ia64/xen-unstable

changeset 1042:3a01e5ddf625

bitkeeper revision 1.681 (400bf7cdMKvYPiShFBsSoD7tY6SGXA)

cciss driver compiles... not tested (no h/w) and definitely will not
"work" out of the box (need to register blkdev stuff + fix xenolinux)
enough to test though I hope...
author smh22@labyrinth.cl.cam.ac.uk
date Mon Jan 19 15:29:17 2004 +0000 (2004-01-19)
parents 7c3b17193b4b
children eb4a4fe67cdc
files .rootkeys BitKeeper/etc/ignore xen/drivers/block/cciss.c xen/drivers/block/cciss.h xen/drivers/block/cciss_cmd.h xen/drivers/block/cciss_scsi.c xen/drivers/block/cciss_scsi.h xen/drivers/block/ll_rw_blk.c xen/include/asm-i386/io.h xen/include/xeno/blkdev.h
line diff
     1.1 --- a/.rootkeys	Fri Jan 16 19:29:56 2004 +0000
     1.2 +++ b/.rootkeys	Mon Jan 19 15:29:17 2004 +0000
     1.3 @@ -153,6 +153,11 @@ 3ddb79bd3zgV33PHdt-cgh3sxcb1hw xen/commo
     1.4  3ddb79c0ppNeJtjC4va8j41ADCnchA xen/drivers/Makefile
     1.5  3ddb79beWzgPS8ozf2BL2g3ZkiWhhQ xen/drivers/block/Makefile
     1.6  3ddb79be04dyXzyXqDbMRS_1funwXQ xen/drivers/block/blkpg.c
     1.7 +400be8220k_K_8-GFqi_KI7DtnG4wg xen/drivers/block/cciss.c
     1.8 +400be822U6rx3myRzz2fCoqMbQTKjQ xen/drivers/block/cciss.h
     1.9 +400be822MTFR0Ue17PyB6X6vU5lmzw xen/drivers/block/cciss_cmd.h
    1.10 +400be822o5kwrfC5oI8PY4A-GoXeoA xen/drivers/block/cciss_scsi.c
    1.11 +400be822HEA8ZLcYGxSl_CprccJDkg xen/drivers/block/cciss_scsi.h
    1.12  3ddb79beME_0abStePF6fU8XLuQnWw xen/drivers/block/elevator.c
    1.13  3ddb79beNQVrdGyoI4njXhgAjD6a4A xen/drivers/block/genhd.c
    1.14  3ddb79beyWwLRP_BiM2t1JKgr_plEw xen/drivers/block/ll_rw_blk.c
     2.1 --- a/BitKeeper/etc/ignore	Fri Jan 16 19:29:56 2004 +0000
     2.2 +++ b/BitKeeper/etc/ignore	Mon Jan 19 15:29:17 2004 +0000
     2.3 @@ -535,3 +535,6 @@ xen/drivers/scsi/sym53c8xx_2/sym_malloc.
     2.4  xen/drivers/scsi/sym53c8xx_2/sym_misc.o
     2.5  xen/drivers/scsi/sym53c8xx_2/sym_nvram.o
     2.6  xen/arch/i386/flushtlb.o
     2.7 +xen/common/rbtree.o
     2.8 +xen/drivers/block/cciss.o
     2.9 +xen/drivers/block/cciss_scsi.o
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/xen/drivers/block/cciss.c	Mon Jan 19 15:29:17 2004 +0000
     3.3 @@ -0,0 +1,3393 @@
     3.4 +/*
     3.5 + *    Disk Array driver for HP SA 5xxx and 6xxx Controllers
     3.6 + *    Copyright 2000, 2002 Hewlett-Packard Development Company, L.P. 
     3.7 + *
     3.8 + *    This program is free software; you can redistribute it and/or modify
     3.9 + *    it under the terms of the GNU General Public License as published by
    3.10 + *    the Free Software Foundation; either version 2 of the License, or
    3.11 + *    (at your option) any later version.
    3.12 + *
    3.13 + *    This program is distributed in the hope that it will be useful,
    3.14 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
    3.15 + *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
    3.16 + *    NON INFRINGEMENT.  See the GNU General Public License for more details.
    3.17 + *
    3.18 + *    You should have received a copy of the GNU General Public License
    3.19 + *    along with this program; if not, write to the Free Software
    3.20 + *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
    3.21 + *
    3.22 + *    Questions/Comments/Bugfixes to Cciss-discuss@lists.sourceforge.net
    3.23 + *
    3.24 + */
    3.25 +
    3.26 +#include <xeno/config.h>	/* CONFIG_PROC_FS */
    3.27 +#include <xeno/module.h>
    3.28 +#include <xeno/version.h>
    3.29 +#include <xeno/types.h>
    3.30 +#include <xeno/pci.h>
    3.31 +#include <xeno/kernel.h>
    3.32 +#include <xeno/slab.h>
    3.33 +#include <xeno/delay.h>
    3.34 +#include <xeno/major.h>
    3.35 +//#include <xeno/fs.h>
    3.36 +#include <xeno/blkpg.h>
    3.37 +#include <xeno/interrupt.h>
    3.38 +#include <xeno/timer.h>
    3.39 +//#include <xeno/proc_fs.h>
    3.40 +#include <xeno/init.h> 
    3.41 +#include <xeno/hdreg.h>
    3.42 +#include <xeno/spinlock.h>
    3.43 +#include <asm/uaccess.h>
    3.44 +#include <asm/io.h>
    3.45 +//#include <xeno/smp_lock.h>
    3.46 +
    3.47 +#include <xeno/blk.h>
    3.48 +#include <xeno/blkdev.h>
    3.49 +#include <xeno/genhd.h>
    3.50 +
    3.51 +#include <asm/irq.h>
    3.52 +#include <asm/byteorder.h>
    3.53 +
    3.54 +#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
    3.55 +#define DRIVER_NAME "HP CISS Driver (v 2.4.50)"
    3.56 +#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,4,50)
    3.57 +
    3.58 +/* Embedded module documentation macros - see modules.h */
    3.59 +MODULE_AUTHOR("Hewlett-Packard Company");
    3.60 +MODULE_DESCRIPTION("Driver for HP SA5xxx SA6xxx Controllers version 2.4.50");
    3.61 +MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400 6i"); 
    3.62 +MODULE_LICENSE("GPL");
    3.63 +
    3.64 +#include "cciss_cmd.h"
    3.65 +#include "cciss.h"
    3.66 +//#include <xeno/cciss_ioctl.h>
    3.67 +
    3.68 +/* define the PCI info for the cards we can control */
    3.69 +const struct pci_device_id cciss_pci_device_id[] = {
    3.70 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
    3.71 +			0x0E11, 0x4070, 0, 0, 0},
    3.72 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
    3.73 +                        0x0E11, 0x4080, 0, 0, 0},
    3.74 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
    3.75 +                        0x0E11, 0x4082, 0, 0, 0},
    3.76 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
    3.77 +                        0x0E11, 0x4083, 0, 0, 0},
    3.78 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
    3.79 +                        0x0E11, 0x409A, 0, 0, 0},
    3.80 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
    3.81 +                        0x0E11, 0x409B, 0, 0, 0},
    3.82 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
    3.83 +                        0x0E11, 0x409C, 0, 0, 0},
    3.84 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
    3.85 +                        0x0E11, 0x409D, 0, 0, 0},
    3.86 +	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
    3.87 +                        0x0E11, 0x4091, 0, 0, 0},
    3.88 +	{0,}
    3.89 +};
    3.90 +MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
    3.91 +
    3.92 +#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
    3.93 +
    3.94 +/*  board_id = Subsystem Device ID & Vendor ID
    3.95 + *  product = Marketing Name for the board
    3.96 + *  access = Address of the struct of function pointers 
    3.97 + */
    3.98 +static struct board_type products[] = {
    3.99 +	{ 0x40700E11, "Smart Array 5300", &SA5_access},
   3.100 +	{ 0x40800E11, "Smart Array 5i", &SA5B_access},
   3.101 +	{ 0x40820E11, "Smart Array 532", &SA5B_access},
   3.102 +	{ 0x40830E11, "Smart Array 5312", &SA5B_access},
   3.103 +	{ 0x409A0E11, "Smart Array 641", &SA5_access},
   3.104 +	{ 0x409B0E11, "Smart Array 642", &SA5_access},
   3.105 +	{ 0x409C0E11, "Smart Array 6400", &SA5_access},
   3.106 +	{ 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
   3.107 +	{ 0x40910E11, "Smart Array 6i", &SA5_access},
   3.108 +};
   3.109 +
   3.110 +/* How long to wait (in millesconds) for board to go into simple mode */
   3.111 +#define MAX_CONFIG_WAIT 30000 
   3.112 +#define MAX_IOCTL_CONFIG_WAIT 1000
   3.113 +
   3.114 +/*define how many times we will try a command because of bus resets */
   3.115 +#define MAX_CMD_RETRIES 3
   3.116 +
   3.117 +#define READ_AHEAD 	 128
   3.118 +#define NR_CMDS		 128 /* #commands that can be outstanding */
   3.119 +#define MAX_CTLR	 32 
   3.120 +
   3.121 +/* No sense in giving up our preallocated major numbers */
   3.122 +#if MAX_CTLR < 8
   3.123 +#error"cciss.c: MAX_CTLR must be 8 or greater"
   3.124 +#endif
   3.125 +
   3.126 +/* Originally cciss driver only supports 8 major number */
   3.127 +#define MAX_CTLR_ORIG  COMPAQ_CISS_MAJOR7 - COMPAQ_CISS_MAJOR + 1
   3.128 +
   3.129 +#define CCISS_DMA_MASK 0xFFFFFFFFFFFFFFFF /* 64 bit DMA */
   3.130 +
   3.131 +#ifdef CONFIG_CISS_MONITOR_THREAD
   3.132 +static int cciss_monitor(void *ctlr);
   3.133 +static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd, 
   3.134 +		unsigned long count, int (*cciss_monitor)(void *), int *rc);
   3.135 +static u32 heartbeat_timer = 0;
   3.136 +#else
   3.137 +#define cciss_monitor(x)
   3.138 +#define kill_monitor_thead(x)
   3.139 +#endif
   3.140 +
   3.141 +static ctlr_info_t *hba[MAX_CTLR];
   3.142 +static int map_major_to_ctlr[MAX_BLKDEV] = {0}; /* gets ctlr num from maj num */
   3.143 +//static struct proc_dir_entry *proc_cciss;
   3.144 +
   3.145 +static void do_cciss_request(request_queue_t *q);
   3.146 +static int cciss_open(struct inode *inode, struct file *filep);
   3.147 +static int cciss_release(struct inode *inode, struct file *filep);
   3.148 +static int cciss_ioctl(struct inode *inode, struct file *filep, 
   3.149 +		unsigned int cmd, unsigned long arg);
   3.150 +
   3.151 +static int revalidate_logvol(kdev_t dev, int maxusage);
   3.152 +static int frevalidate_logvol(kdev_t dev);
   3.153 +static int deregister_disk(int ctlr, int logvol);
   3.154 +static int register_new_disk(int cltr, int opened_vol, __u64 requested_lun);
   3.155 +static int cciss_rescan_disk(int cltr, int logvol);
   3.156 +
   3.157 +static void cciss_getgeometry(int cntl_num);
   3.158 +
   3.159 +static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c);
   3.160 +static void start_io( ctlr_info_t *h);
   3.161 +
   3.162 +
   3.163 +#ifdef CONFIG_PROC_FS
   3.164 +static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 
   3.165 +		int length, int *eof, void *data);
   3.166 +static void cciss_procinit(int i);
   3.167 +
   3.168 +XXX
   3.169 +#else
   3.170 +/*static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 
   3.171 +		int length, int *eof, void *data) { return 0;}
   3.172 +*/
   3.173 +static void cciss_procinit(int i) {}
   3.174 +
   3.175 +#endif /* CONFIG_PROC_FS */
   3.176 +
   3.177 +
   3.178 +static struct block_device_operations cciss_fops  = {
   3.179 +	//owner:			THIS_MODULE,
   3.180 +	open:			cciss_open, 
   3.181 +	release:        	cciss_release,
   3.182 +        ioctl:			cciss_ioctl,
   3.183 +	revalidate:		frevalidate_logvol,
   3.184 +};
   3.185 +
   3.186 +#include "cciss_scsi.c"		/* For SCSI tape support */
   3.187 +
   3.188 +#define ENG_GIG	1048576000
   3.189 +#define ENG_GIG_FACTOR (ENG_GIG/512)
   3.190 +#define	RAID_UNKNOWN 6
   3.191 +static const char *raid_label[] = {"0","4","1(0+1)","5","5+1","ADG",
   3.192 +				   "UNKNOWN"};
   3.193 +/*
   3.194 + * Report information about this controller.
   3.195 + */
   3.196 +#ifdef CONFIG_PROC_FS
   3.197 +static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 
   3.198 +		int length, int *eof, void *data)
   3.199 +{
   3.200 +	off_t pos = 0;
   3.201 +	off_t len = 0;
   3.202 +	int size, i, ctlr;
   3.203 +	ctlr_info_t *h = (ctlr_info_t*)data;
   3.204 +	drive_info_struct *drv;
   3.205 +	unsigned long flags;
   3.206 +	unsigned int vol_sz, vol_sz_frac;
   3.207 +
   3.208 +	spin_lock_irqsave(&io_request_lock, flags);
   3.209 +	if (h->busy_configuring) {
   3.210 +		spin_unlock_irqrestore(&io_request_lock, flags);
   3.211 +		return -EBUSY;
   3.212 +	}
   3.213 +	h->busy_configuring = 1;
   3.214 +	spin_unlock_irqrestore(&io_request_lock, flags);
   3.215 +		
   3.216 +	ctlr = h->ctlr;
   3.217 +	size = sprintf(buffer, "%s: HP %s Controller\n"
   3.218 + 		"Board ID: 0x%08lx\n"
   3.219 +		"Firmware Version: %c%c%c%c\n"
   3.220 + 		"IRQ: %d\n"
   3.221 + 		"Logical drives: %d\n"
   3.222 + 		"Current Q depth: %d\n"
   3.223 + 		"Current # commands on controller: %d\n"
   3.224 + 		"Max Q depth since init: %d\n"
   3.225 +		"Max # commands on controller since init: %d\n"
   3.226 +		"Max SG entries since init: %d\n"
   3.227 +		MONITOR_PERIOD_PATTERN 
   3.228 +		MONITOR_DEADLINE_PATTERN
   3.229 +		MONITOR_STATUS_PATTERN 
   3.230 +		"\n",
   3.231 +  		h->devname,
   3.232 +  		h->product_name,
   3.233 +  		(unsigned long)h->board_id,
   3.234 +  		h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
   3.235 +  		(unsigned int)h->intr,
   3.236 +  		h->num_luns, 
   3.237 +  		h->Qdepth, h->commands_outstanding,
   3.238 +		h->maxQsinceinit, h->max_outstanding, h->maxSG,
   3.239 +		MONITOR_PERIOD_VALUE(h),
   3.240 +		MONITOR_DEADLINE_VALUE(h),
   3.241 +		CTLR_STATUS(h));
   3.242 +  
   3.243 +	pos += size; len += size;
   3.244 +	cciss_proc_tape_report(ctlr, buffer, &pos, &len);
   3.245 +	for(i=0; i<=h->highest_lun; i++) {
   3.246 +		drv = &h->drv[i];
   3.247 +		if (drv->nr_blocks == 0)
   3.248 +			continue;
   3.249 +		vol_sz = drv->nr_blocks/ENG_GIG_FACTOR; 
   3.250 +		vol_sz_frac = (drv->nr_blocks%ENG_GIG_FACTOR)*100/ENG_GIG_FACTOR;
   3.251 +
   3.252 +		if (drv->raid_level > 5)
   3.253 +			drv->raid_level = RAID_UNKNOWN;
   3.254 +		size = sprintf(buffer+len, "cciss/c%dd%d:"
   3.255 +				"\t%4d.%02dGB\tRAID %s\n",
   3.256 +		       		 ctlr, i, vol_sz,vol_sz_frac,
   3.257 +				 raid_label[drv->raid_level]);
   3.258 +		pos += size, len += size;
   3.259 +        }
   3.260 +
   3.261 +	*eof = 1;
   3.262 +	*start = buffer+offset;
   3.263 +	len -= offset;
   3.264 +	if (len>length)
   3.265 +		len = length;
   3.266 +	h->busy_configuring = 0;
   3.267 +	return len;
   3.268 +}
   3.269 +
   3.270 +static int
   3.271 +cciss_proc_write(struct file *file, const char *buffer,
   3.272 +			unsigned long count, void *data)
   3.273 +{
   3.274 +	unsigned char cmd[80];
   3.275 +	int len;
   3.276 +	ctlr_info_t *h = (ctlr_info_t *) data;
   3.277 +	int rc;
   3.278 +
   3.279 +	if (count > sizeof(cmd)-1) 
   3.280 +		return -EINVAL;
   3.281 +	if (copy_from_user(cmd, buffer, count)) 
   3.282 +		return -EFAULT;
   3.283 +	cmd[count] = '\0';
   3.284 +	len = strlen(cmd);	
   3.285 +	if (cmd[len-1] == '\n')
   3.286 +		cmd[--len] = '\0';
   3.287 +
   3.288 +#	ifdef CONFIG_CISS_SCSI_TAPE
   3.289 +		if (strcmp("engage scsi", cmd)==0) {
   3.290 +			rc = cciss_engage_scsi(h->ctlr);
   3.291 +			if (rc != 0) 
   3.292 +				return -rc;
   3.293 +			return count;
   3.294 +		}
   3.295 +		/* might be nice to have "disengage" too, but it's not
   3.296 +		   safely possible. (only 1 module use count, lock issues.) */
   3.297 +#	endif
   3.298 +
   3.299 +	if (START_MONITOR_THREAD(h, cmd, count, cciss_monitor, &rc) == 0)
   3.300 +		return rc;
   3.301 +	
   3.302 +	return -EINVAL;
   3.303 +}
   3.304 +
   3.305 +/*
   3.306 + * Get us a file in /proc/cciss that says something about each controller.
   3.307 + * Create /proc/cciss if it doesn't exist yet.
   3.308 + */
   3.309 +static void __init cciss_procinit(int i)
   3.310 +{
   3.311 +	struct proc_dir_entry *pde;
   3.312 +
   3.313 +	if (proc_cciss == NULL) {
   3.314 +		proc_cciss = proc_mkdir("cciss", proc_root_driver);
   3.315 +		if (!proc_cciss) {
   3.316 +			printk("cciss:  proc_mkdir failed\n");
   3.317 +			return;
   3.318 +		}
   3.319 +	}
   3.320 +
   3.321 +	pde = create_proc_read_entry(hba[i]->devname,
   3.322 +		S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
   3.323 +		proc_cciss, cciss_proc_get_info, hba[i]);
   3.324 +	pde->write_proc = cciss_proc_write;
   3.325 +}
   3.326 +#endif /* CONFIG_PROC_FS */
   3.327 +
   3.328 +/* 
   3.329 + * For operations that cannot sleep, a command block is allocated at init, 
   3.330 + * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
   3.331 + * which ones are free or in use.  For operations that can wait for kmalloc 
   3.332 + * to possible sleep, this routine can be called with get_from_pool set to 0. 
   3.333 + * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was. 
   3.334 + */ 
   3.335 +static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
   3.336 +{
   3.337 +	CommandList_struct *c;
   3.338 +	int i; 
   3.339 +	u64bit temp64;
   3.340 +	dma_addr_t cmd_dma_handle, err_dma_handle;
   3.341 +
   3.342 +	if (!get_from_pool) {
   3.343 +		c = (CommandList_struct *) pci_alloc_consistent(
   3.344 +			h->pdev, sizeof(CommandList_struct), &cmd_dma_handle); 
   3.345 +        	if (c==NULL)
   3.346 +                 	return NULL;
   3.347 +		memset(c, 0, sizeof(CommandList_struct));
   3.348 +
   3.349 +		c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
   3.350 +					h->pdev, sizeof(ErrorInfo_struct), 
   3.351 +					&err_dma_handle);
   3.352 +	
   3.353 +		if (c->err_info == NULL)
   3.354 +		{
   3.355 +			pci_free_consistent(h->pdev, 
   3.356 +				sizeof(CommandList_struct), c, cmd_dma_handle);
   3.357 +			return NULL;
   3.358 +		}
   3.359 +		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
   3.360 +	} else /* get it out of the controllers pool */ 
   3.361 +	{
   3.362 +	     	do {
   3.363 +                	i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
   3.364 +                        if (i == NR_CMDS)
   3.365 +                                return NULL;
   3.366 +                } while(test_and_set_bit(i%32, h->cmd_pool_bits+(i/32)) != 0);
   3.367 +#ifdef CCISS_DEBUG
   3.368 +		printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
   3.369 +#endif
   3.370 +                c = h->cmd_pool + i;
   3.371 +		memset(c, 0, sizeof(CommandList_struct));
   3.372 +		cmd_dma_handle = h->cmd_pool_dhandle 
   3.373 +					+ i*sizeof(CommandList_struct);
   3.374 +		c->err_info = h->errinfo_pool + i;
   3.375 +		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
   3.376 +		err_dma_handle = h->errinfo_pool_dhandle 
   3.377 +					+ i*sizeof(ErrorInfo_struct);
   3.378 +                h->nr_allocs++;
   3.379 +        }
   3.380 +
   3.381 +	c->busaddr = (__u32) cmd_dma_handle;
   3.382 +	temp64.val = (__u64) err_dma_handle;	
   3.383 +	c->ErrDesc.Addr.lower = temp64.val32.lower;
   3.384 +	c->ErrDesc.Addr.upper = temp64.val32.upper;
   3.385 +	c->ErrDesc.Len = sizeof(ErrorInfo_struct);
   3.386 +	
   3.387 +	c->ctlr = h->ctlr;
   3.388 +        return c;
   3.389 +
   3.390 +
   3.391 +}
   3.392 +
   3.393 +/* 
   3.394 + * Frees a command block that was previously allocated with cmd_alloc(). 
   3.395 + */
   3.396 +static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
   3.397 +{
   3.398 +	int i;
   3.399 +	u64bit temp64;
   3.400 +
   3.401 +	if (!got_from_pool) { 
   3.402 +		temp64.val32.lower = c->ErrDesc.Addr.lower;
   3.403 +		temp64.val32.upper = c->ErrDesc.Addr.upper;
   3.404 +		pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), 
   3.405 +			c->err_info, (dma_addr_t) temp64.val);
   3.406 +		pci_free_consistent(h->pdev, sizeof(CommandList_struct), 
   3.407 +			c, (dma_addr_t) c->busaddr);
   3.408 +	} else 
   3.409 +	{
   3.410 +		i = c - h->cmd_pool;
   3.411 +		clear_bit(i%32, h->cmd_pool_bits+(i/32));
   3.412 +                h->nr_frees++;
   3.413 +        }
   3.414 +}
   3.415 +
   3.416 +/*  
   3.417 + * fills in the disk information. 
   3.418 + */
   3.419 +static void cciss_geninit( int ctlr)
   3.420 +{
   3.421 +	drive_info_struct *drv;
   3.422 +	int i,j;
   3.423 +	
   3.424 +	/* Loop through each real device */ 
   3.425 +	hba[ctlr]->gendisk.nr_real = 0; 
   3.426 +	for(i=0; i< NWD; i++) {
   3.427 +		drv = &(hba[ctlr]->drv[i]);
   3.428 +		if (!(drv->nr_blocks))
   3.429 +			continue;
   3.430 +		hba[ctlr]->hd[i << NWD_SHIFT].nr_sects = 
   3.431 +		hba[ctlr]->sizes[i << NWD_SHIFT] = drv->nr_blocks;
   3.432 +
   3.433 +		/* for each partition */ 
   3.434 +		for(j=0; j<MAX_PART; j++) {
   3.435 +			hba[ctlr]->blocksizes[(i<<NWD_SHIFT) + j] = 1024; 
   3.436 +
   3.437 +			hba[ctlr]->hardsizes[ (i<<NWD_SHIFT) + j] = 
   3.438 +				drv->block_size;
   3.439 +		}
   3.440 +	}
   3.441 +	hba[ctlr]->gendisk.nr_real = hba[ctlr]->highest_lun+1;
   3.442 +}
   3.443 +/*
   3.444 + * Open.  Make sure the device is really there.
   3.445 + */
   3.446 +static int cciss_open(struct inode *inode, struct file *filep)
   3.447 +{
   3.448 + 	int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
   3.449 +	int dsk  = MINOR(inode->i_rdev) >> NWD_SHIFT;
   3.450 +
   3.451 +#ifdef CCISS_DEBUG
   3.452 +	printk(KERN_DEBUG "cciss_open %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
   3.453 +#endif /* CCISS_DEBUG */ 
   3.454 +
   3.455 +	if (ctlr > MAX_CTLR || hba[ctlr] == NULL || !CTLR_IS_ALIVE(hba[ctlr]))
   3.456 +		return -ENXIO;
   3.457 +	/*
   3.458 +	 * Root is allowed to open raw volume zero even if its not configured
   3.459 +	 * so array config can still work. Root is also allowed to open any
   3.460 +	 * volume that has a LUN ID, so it can issue IOCTL to reread the
   3.461 +	 * disk information.  I don't think I really like this.
   3.462 +	 * but I'm already using way to many device nodes to claim another one
   3.463 +	 * for "raw controller".
   3.464 +	 */
   3.465 +	if (hba[ctlr]->sizes[MINOR(inode->i_rdev)] == 0) { /* not online? */
   3.466 +		if (MINOR(inode->i_rdev) != 0) {	 /* not node 0? */
   3.467 +			/* if not node 0 make sure it is a partition = 0 */
   3.468 +			if (MINOR(inode->i_rdev) & 0x0f) {
   3.469 +				return -ENXIO;
   3.470 +				/* if it is, make sure we have a LUN ID */
   3.471 +			} else if (hba[ctlr]->drv[MINOR(inode->i_rdev)
   3.472 +					>> NWD_SHIFT].LunID == 0) {
   3.473 +				return -ENXIO;
   3.474 +			}
   3.475 +		}
   3.476 +		if (!capable(CAP_SYS_ADMIN))
   3.477 +			return -EPERM;
   3.478 +	}
   3.479 +
   3.480 +	hba[ctlr]->drv[dsk].usage_count++;
   3.481 +	hba[ctlr]->usage_count++;
   3.482 +	return 0;
   3.483 +}
   3.484 +/*
   3.485 + * Close.  Sync first.
   3.486 + */
   3.487 +static int cciss_release(struct inode *inode, struct file *filep)
   3.488 +{
   3.489 +	int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
   3.490 +	int dsk  = MINOR(inode->i_rdev) >> NWD_SHIFT;
   3.491 +
   3.492 +#ifdef CCISS_DEBUG
   3.493 +	printk(KERN_DEBUG "cciss_release %x (%x:%x)\n", inode->i_rdev, ctlr, dsk);
   3.494 +#endif /* CCISS_DEBUG */
   3.495 +
   3.496 +	/* fsync_dev(inode->i_rdev); */
   3.497 +
   3.498 +	hba[ctlr]->drv[dsk].usage_count--;
   3.499 +	hba[ctlr]->usage_count--;
   3.500 +	return 0;
   3.501 +}
   3.502 +
   3.503 +/*
   3.504 + * ioctl 
   3.505 + */
   3.506 +static int cciss_ioctl(struct inode *inode, struct file *filep, 
   3.507 +		unsigned int cmd, unsigned long arg)
   3.508 +{
   3.509 +	//int ctlr = map_major_to_ctlr[MAJOR(inode->i_rdev)];
   3.510 +
   3.511 +	//int dsk  = MINOR(inode->i_rdev) >> NWD_SHIFT;
   3.512 +
   3.513 +printk(KERN_ALERT "cciss_ioctl: Called BUT NOT SUPPORTED cmd=%x %lx\n", cmd, arg);
   3.514 +
   3.515 +return -EBADRQC;
   3.516 +
   3.517 +#if 0
   3.518 +
   3.519 +#ifdef CCISS_DEBUG
   3.520 +	printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
   3.521 +#endif /* CCISS_DEBUG */ 
   3.522 +	
   3.523 +	switch(cmd) {
   3.524 +	   case HDIO_GETGEO:
   3.525 +	   {
   3.526 +		struct hd_geometry driver_geo;
   3.527 +		if (hba[ctlr]->drv[dsk].cylinders) {
   3.528 +			driver_geo.heads = hba[ctlr]->drv[dsk].heads;
   3.529 +			driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
   3.530 +			driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
   3.531 +		} else {
   3.532 +			driver_geo.heads = 0xff;
   3.533 +			driver_geo.sectors = 0x3f;
   3.534 +			driver_geo.cylinders = 
   3.535 +				hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
   3.536 +		}
   3.537 +		driver_geo.start=
   3.538 +			hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
   3.539 +		if (copy_to_user((void *) arg, &driver_geo,
   3.540 +				sizeof( struct hd_geometry)))
   3.541 +			return  -EFAULT;
   3.542 +		return 0;
   3.543 +	   }
   3.544 +	case HDIO_GETGEO_BIG:
   3.545 +	{
   3.546 +		struct hd_big_geometry driver_geo;
   3.547 +		if (hba[ctlr]->drv[dsk].cylinders) {
   3.548 +			driver_geo.heads = hba[ctlr]->drv[dsk].heads;
   3.549 +			driver_geo.sectors = hba[ctlr]->drv[dsk].sectors;
   3.550 +			driver_geo.cylinders = hba[ctlr]->drv[dsk].cylinders;
   3.551 +		} else {
   3.552 +			driver_geo.heads = 0xff;
   3.553 +			driver_geo.sectors = 0x3f;
   3.554 +			driver_geo.cylinders = 
   3.555 +				hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
   3.556 +		}
   3.557 +		driver_geo.start= 
   3.558 +		hba[ctlr]->hd[MINOR(inode->i_rdev)].start_sect;
   3.559 +		if (copy_to_user((void *) arg, &driver_geo,  
   3.560 +				sizeof( struct hd_big_geometry)))
   3.561 +			return  -EFAULT;
   3.562 +		return 0;
   3.563 +	}
   3.564 +	case BLKRRPART:
   3.565 +		if (!capable(CAP_SYS_ADMIN))
   3.566 +			return -EPERM;
   3.567 +		return revalidate_logvol(inode->i_rdev, 1);
   3.568 +	case BLKGETSIZE:
   3.569 +	case BLKGETSIZE64:
   3.570 +	case BLKFLSBUF:
   3.571 +	case BLKBSZSET:
   3.572 +	case BLKBSZGET:
   3.573 +	case BLKROSET:
   3.574 +	case BLKROGET:
   3.575 +	case BLKRASET:
   3.576 +	case BLKRAGET:
   3.577 +	case BLKPG:
   3.578 +	case BLKELVGET:
   3.579 +	case BLKELVSET:
   3.580 +		return blk_ioctl(inode->i_rdev, cmd, arg);
   3.581 +	case CCISS_GETPCIINFO:
   3.582 +	{
   3.583 +		cciss_pci_info_struct pciinfo;
   3.584 +
   3.585 +		if (!arg) 
   3.586 +			return -EINVAL;
   3.587 +		pciinfo.bus = hba[ctlr]->pdev->bus->number;
   3.588 +		pciinfo.dev_fn = hba[ctlr]->pdev->devfn;
   3.589 +		pciinfo.board_id = hba[ctlr]->board_id;
   3.590 +		if (copy_to_user((void *) arg, &pciinfo,  sizeof( cciss_pci_info_struct )))
   3.591 +			return  -EFAULT;
   3.592 +		return 0;
   3.593 +	}	
   3.594 +	case CCISS_GETINTINFO:
   3.595 +	{
   3.596 +		cciss_coalint_struct intinfo;
   3.597 +		ctlr_info_t *c = hba[ctlr];
   3.598 +
   3.599 +		if (!arg) 
   3.600 +			return -EINVAL;
   3.601 +		intinfo.delay = readl(&c->cfgtable->HostWrite.CoalIntDelay);
   3.602 +		intinfo.count = readl(&c->cfgtable->HostWrite.CoalIntCount);
   3.603 +		if (copy_to_user((void *) arg, &intinfo, sizeof( cciss_coalint_struct )))
   3.604 +			return -EFAULT;
   3.605 +                return 0;
   3.606 +        }
   3.607 +	case CCISS_SETINTINFO:
   3.608 +        {
   3.609 +                cciss_coalint_struct intinfo;
   3.610 +                ctlr_info_t *c = hba[ctlr];
   3.611 +		unsigned long flags;
   3.612 +		int i;
   3.613 +
   3.614 +		if (!arg) 
   3.615 +			return -EINVAL;	
   3.616 +		if (!capable(CAP_SYS_ADMIN)) 
   3.617 +			return -EPERM;
   3.618 +		if (copy_from_user(&intinfo, (void *) arg, sizeof( cciss_coalint_struct)))
   3.619 +			return -EFAULT;
   3.620 +		if ( (intinfo.delay == 0 ) && (intinfo.count == 0)) {
   3.621 +			return -EINVAL;
   3.622 +		}
   3.623 +
   3.624 +		spin_lock_irqsave(&io_request_lock, flags);
   3.625 +		/* Can only safely update if no commands outstanding */ 
   3.626 +		if (c->commands_outstanding > 0 ) {
   3.627 +			spin_unlock_irqrestore(&io_request_lock, flags);
   3.628 +			return -EINVAL;
   3.629 +		}
   3.630 +		/* Update the field, and then ring the doorbell */ 
   3.631 +		writel( intinfo.delay, 
   3.632 +			&(c->cfgtable->HostWrite.CoalIntDelay));
   3.633 +		writel( intinfo.count, 
   3.634 +                        &(c->cfgtable->HostWrite.CoalIntCount));
   3.635 +		writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
   3.636 +
   3.637 +		for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
   3.638 +			if (!(readl(c->vaddr + SA5_DOORBELL) 
   3.639 +					& CFGTBL_ChangeReq))
   3.640 +				break;
   3.641 +			/* delay and try again */
   3.642 +			udelay(1000);
   3.643 +		}	
   3.644 +		spin_unlock_irqrestore(&io_request_lock, flags);
   3.645 +		if (i >= MAX_IOCTL_CONFIG_WAIT)
   3.646 +			/* there is an unlikely case where this can happen,
   3.647 +			 * involving hot replacing a failed 144 GB drive in a 
   3.648 +			 * RAID 5 set just as we attempt this ioctl. */
   3.649 +			return -EAGAIN;
   3.650 +                return 0;
   3.651 +        }
   3.652 +	case CCISS_GETNODENAME:
   3.653 +        {
   3.654 +                NodeName_type NodeName;
   3.655 +                ctlr_info_t *c = hba[ctlr];
   3.656 +		int i; 
   3.657 +
   3.658 +		if (!arg) 
   3.659 +			return -EINVAL;
   3.660 +		for(i=0;i<16;i++)
   3.661 +			NodeName[i] = readb(&c->cfgtable->ServerName[i]);
   3.662 +                if (copy_to_user((void *) arg, NodeName, sizeof( NodeName_type)))
   3.663 +                	return  -EFAULT;
   3.664 +                return 0;
   3.665 +        }
   3.666 +	case CCISS_SETNODENAME:
   3.667 +	{
   3.668 +		NodeName_type NodeName;
   3.669 +		ctlr_info_t *c = hba[ctlr];
   3.670 +		unsigned long flags;
   3.671 +		int i;
   3.672 +
   3.673 +		if (!arg) 
   3.674 +			return -EINVAL;
   3.675 +		if (!capable(CAP_SYS_ADMIN)) 
   3.676 +			return -EPERM;
   3.677 +		
   3.678 +		if (copy_from_user(NodeName, (void *) arg, sizeof( NodeName_type)))
   3.679 +			return -EFAULT;
   3.680 +
   3.681 +		spin_lock_irqsave(&io_request_lock, flags);
   3.682 +
   3.683 +			/* Update the field, and then ring the doorbell */ 
   3.684 +		for(i=0;i<16;i++)
   3.685 +			writeb( NodeName[i], &c->cfgtable->ServerName[i]);
   3.686 +			
   3.687 +		writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
   3.688 +
   3.689 +		for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
   3.690 +			if (!(readl(c->vaddr + SA5_DOORBELL) 
   3.691 +					& CFGTBL_ChangeReq))
   3.692 +				break;
   3.693 +			/* delay and try again */
   3.694 +			udelay(1000);
   3.695 +		}	
   3.696 +		spin_unlock_irqrestore(&io_request_lock, flags);
   3.697 +		if (i >= MAX_IOCTL_CONFIG_WAIT)
   3.698 +			/* there is an unlikely case where this can happen,
   3.699 +			 * involving hot replacing a failed 144 GB drive in a 
   3.700 +			 * RAID 5 set just as we attempt this ioctl. */
   3.701 +			return -EAGAIN;
   3.702 +                return 0;
   3.703 +        }
   3.704 +
   3.705 +	case CCISS_GETHEARTBEAT:
   3.706 +        {
   3.707 +                Heartbeat_type heartbeat;
   3.708 +                ctlr_info_t *c = hba[ctlr];
   3.709 +
   3.710 +		if (!arg) 
   3.711 +			return -EINVAL;
   3.712 +                heartbeat = readl(&c->cfgtable->HeartBeat);
   3.713 +                if (copy_to_user((void *) arg, &heartbeat, sizeof( Heartbeat_type)))
   3.714 +                	return -EFAULT;
   3.715 +                return 0;
   3.716 +        }
   3.717 +	case CCISS_GETBUSTYPES:
   3.718 +        {
   3.719 +                BusTypes_type BusTypes;
   3.720 +                ctlr_info_t *c = hba[ctlr];
   3.721 +
   3.722 +		if (!arg) 
   3.723 +			return -EINVAL;
   3.724 +                BusTypes = readl(&c->cfgtable->BusTypes);
   3.725 +                if (copy_to_user((void *) arg, &BusTypes, sizeof( BusTypes_type) ))
   3.726 +                	return  -EFAULT;
   3.727 +                return 0;
   3.728 +        }
   3.729 +	case CCISS_GETFIRMVER:
   3.730 +        {
   3.731 +		FirmwareVer_type firmware;
   3.732 +
   3.733 +		if (!arg) 
   3.734 +			return -EINVAL;
   3.735 +		memcpy(firmware, hba[ctlr]->firm_ver, 4);
   3.736 +
   3.737 +                if (copy_to_user((void *) arg, firmware, sizeof( FirmwareVer_type)))
   3.738 +                	return -EFAULT;
   3.739 +                return 0;
   3.740 +        }
   3.741 +        case CCISS_GETDRIVVER:
   3.742 +        {
   3.743 +		DriverVer_type DriverVer = DRIVER_VERSION;
   3.744 +
   3.745 +                if (!arg) 
   3.746 +			return -EINVAL;
   3.747 +
   3.748 +                if (copy_to_user((void *) arg, &DriverVer, sizeof( DriverVer_type) ))
   3.749 +                	return -EFAULT;
   3.750 +                return 0;
   3.751 +        }
   3.752 +	case CCISS_RESCANDISK:
   3.753 +	{
   3.754 +		return cciss_rescan_disk(ctlr, dsk);
   3.755 +	}
   3.756 +	case CCISS_DEREGDISK:
   3.757 +		return deregister_disk(ctlr,dsk);
   3.758 +
   3.759 +	case CCISS_REGNEWD:
   3.760 +		return register_new_disk(ctlr, dsk, 0);
   3.761 +	case CCISS_REGNEWDISK:
   3.762 +	{
   3.763 +		__u64 new_logvol;
   3.764 +
   3.765 +		if (!arg) 
   3.766 +			return -EINVAL;
   3.767 +		if (copy_from_user(&new_logvol, (void *) arg, 
   3.768 +			sizeof( __u64)))
   3.769 +			return -EFAULT;
   3.770 +		return register_new_disk(ctlr, dsk, new_logvol);
   3.771 +	}
   3.772 +	case CCISS_GETLUNINFO:
   3.773 +	{
   3.774 +		LogvolInfo_struct luninfo;
   3.775 +		int num_parts = 0;
   3.776 +		int i, start;
   3.777 +
   3.778 +		luninfo.LunID = hba[ctlr]->drv[dsk].LunID;
   3.779 +		luninfo.num_opens = hba[ctlr]->drv[dsk].usage_count;
   3.780 +
   3.781 +		/* count partitions 1 to 15 with sizes > 0 */
   3.782 +  		start = (dsk << NWD_SHIFT);
   3.783 +		for(i=1; i <MAX_PART; i++) {
   3.784 +			int minor = start+i;
   3.785 +			if (hba[ctlr]->sizes[minor] != 0)
   3.786 +				num_parts++;
   3.787 +		}
   3.788 +		luninfo.num_parts = num_parts;
   3.789 +		if (copy_to_user((void *) arg, &luninfo,
   3.790 +				sizeof( LogvolInfo_struct) ))
   3.791 +			return -EFAULT;
   3.792 +		return 0;
   3.793 +	}
   3.794 +#if 0
   3.795 +	case CCISS_PASSTHRU:
   3.796 +	{
   3.797 +		IOCTL_Command_struct iocommand;
   3.798 +		ctlr_info_t *h = hba[ctlr];
   3.799 +		CommandList_struct *c;
   3.800 +		char 	*buff = NULL;
   3.801 +		u64bit	temp64;
   3.802 +		unsigned long flags;
   3.803 +		DECLARE_COMPLETION(wait);
   3.804 +
   3.805 +		if (!arg) 
   3.806 +			return -EINVAL;
   3.807 +	
   3.808 +		if (!capable(CAP_SYS_RAWIO)) 
   3.809 +			return -EPERM;
   3.810 +
   3.811 +		if (copy_from_user(&iocommand, (void *) arg, sizeof( IOCTL_Command_struct) ))
   3.812 +			return -EFAULT;
   3.813 +		if ((iocommand.buf_size < 1) && 
   3.814 +				(iocommand.Request.Type.Direction 
   3.815 +				 	!= XFER_NONE)) {	
   3.816 +			return -EINVAL;
   3.817 +		} 
   3.818 +		/* Check kmalloc limits */
   3.819 +		if (iocommand.buf_size > 128000)
   3.820 +			return -EINVAL;
   3.821 +		if (iocommand.buf_size > 0) {
   3.822 +			buff =  kmalloc(iocommand.buf_size, GFP_KERNEL);
   3.823 +			if (buff == NULL) 
   3.824 +				return -ENOMEM;
   3.825 +		}
   3.826 +		if (iocommand.Request.Type.Direction == XFER_WRITE) {
   3.827 +			/* Copy the data into the buffer we created */ 
   3.828 +			if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
   3.829 +			{
   3.830 +				kfree(buff);
   3.831 +				return -EFAULT;
   3.832 +			}
   3.833 +		}
   3.834 +		if ((c = cmd_alloc(h , 0)) == NULL) {
   3.835 +			kfree(buff);
   3.836 +			return -ENOMEM;
   3.837 +		}
   3.838 +			/* Fill in the command type */
   3.839 +		c->cmd_type = CMD_IOCTL_PEND;
   3.840 +			/* Fill in Command Header */
   3.841 +		c->Header.ReplyQueue = 0;  /* unused in simple mode */
   3.842 +		if (iocommand.buf_size > 0) { 	/* buffer to fill */
   3.843 +			c->Header.SGList = 1;
   3.844 +			c->Header.SGTotal= 1;
   3.845 +		} else	{  /* no buffers to fill  */
   3.846 +			c->Header.SGList = 0;
   3.847 +                	c->Header.SGTotal= 0;
   3.848 +		}
   3.849 +		c->Header.LUN = iocommand.LUN_info;
   3.850 +		c->Header.Tag.lower = c->busaddr;  /* use the kernel address */
   3.851 +						/* the cmd block for tag */
   3.852 +		
   3.853 +		/* Fill in Request block */
   3.854 +		c->Request = iocommand.Request; 
   3.855 +	
   3.856 +		/* Fill in the scatter gather information */
   3.857 +		if (iocommand.buf_size > 0 ) {
   3.858 +			temp64.val = pci_map_single( h->pdev, buff,
   3.859 +                                        iocommand.buf_size, 
   3.860 +                                PCI_DMA_BIDIRECTIONAL);	
   3.861 +			c->SG[0].Addr.lower = temp64.val32.lower;
   3.862 +			c->SG[0].Addr.upper = temp64.val32.upper;
   3.863 +			c->SG[0].Len = iocommand.buf_size;
   3.864 +			c->SG[0].Ext = 0;  /* we are not chaining */
   3.865 +		}
   3.866 +		c->waiting = &wait;
   3.867 +
   3.868 +		/* Put the request on the tail of the request queue */
   3.869 +		spin_lock_irqsave(&io_request_lock, flags);
   3.870 +		addQ(&h->reqQ, c);
   3.871 +		h->Qdepth++;
   3.872 +		start_io(h);
   3.873 +		spin_unlock_irqrestore(&io_request_lock, flags);
   3.874 +
   3.875 +		wait_for_completion(&wait);
   3.876 +
   3.877 +		/* unlock the buffers from DMA */
   3.878 +		temp64.val32.lower = c->SG[0].Addr.lower;
   3.879 +                temp64.val32.upper = c->SG[0].Addr.upper;
   3.880 +                pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
   3.881 +                	iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
   3.882 +
   3.883 +		/* Copy the error information out */ 
   3.884 +		iocommand.error_info = *(c->err_info);
   3.885 +		if (copy_to_user((void *) arg, &iocommand, 
   3.886 +				sizeof( IOCTL_Command_struct) ) ) {
   3.887 +			kfree(buff);
   3.888 +			cmd_free(h, c, 0);
   3.889 +			return( -EFAULT);
   3.890 +		} 	
   3.891 +
   3.892 +		if (iocommand.Request.Type.Direction == XFER_READ) {
   3.893 +                        /* Copy the data out of the buffer we created */
   3.894 +                        if (copy_to_user(iocommand.buf, buff, 
   3.895 +						iocommand.buf_size)) {
   3.896 +                        	kfree(buff);
   3.897 +				cmd_free(h, c, 0);
   3.898 +				return -EFAULT;
   3.899 +			}
   3.900 +                }
   3.901 +                kfree(buff);
   3.902 +		cmd_free(h, c, 0);
   3.903 +                return 0;
   3.904 +	} 
   3.905 +	case CCISS_BIG_PASSTHRU:
   3.906 +	{
   3.907 +		BIG_IOCTL_Command_struct iocommand;
   3.908 +		ctlr_info_t *h = hba[ctlr];
   3.909 +		CommandList_struct *c;
   3.910 +		char 	*buff[MAXSGENTRIES] = {NULL,};
   3.911 +		int	buff_size[MAXSGENTRIES] = {0,};
   3.912 +		u64bit	temp64;
   3.913 +		unsigned long flags;
   3.914 +		BYTE sg_used = 0;
   3.915 +		int status = 0;
   3.916 +		int i;
   3.917 +		DECLARE_COMPLETION(wait);
   3.918 +
   3.919 +		if (!arg) 
   3.920 +			return -EINVAL;
   3.921 +		
   3.922 +		if (!capable(CAP_SYS_RAWIO)) 
   3.923 +			return -EPERM;
   3.924 +
   3.925 +		if (copy_from_user(&iocommand, (void *) arg, sizeof( BIG_IOCTL_Command_struct) ))
   3.926 +			return -EFAULT;
   3.927 +		if ((iocommand.buf_size < 1) && 
   3.928 +			(iocommand.Request.Type.Direction != XFER_NONE)) {
   3.929 +			return -EINVAL;
   3.930 +		} 
   3.931 +		/* Check kmalloc limits  using all SGs */
   3.932 +		if (iocommand.malloc_size > MAX_KMALLOC_SIZE)
   3.933 +			return -EINVAL;
   3.934 +		if (iocommand.buf_size > iocommand.malloc_size * MAXSGENTRIES)
   3.935 +			return -EINVAL;
   3.936 +		if (iocommand.buf_size > 0) {
   3.937 +			__u32   size_left_alloc = iocommand.buf_size;
   3.938 +			BYTE    *data_ptr = (BYTE *) iocommand.buf;
   3.939 +			while (size_left_alloc > 0) {
   3.940 +				buff_size[sg_used] = (size_left_alloc 
   3.941 +							> iocommand.malloc_size)
   3.942 +					? iocommand.malloc_size : size_left_alloc;
   3.943 +				buff[sg_used] = kmalloc( buff_size[sg_used], 
   3.944 +						GFP_KERNEL);
   3.945 +				if (buff[sg_used] == NULL) {
   3.946 +					status = -ENOMEM;
   3.947 +					goto cleanup1;
   3.948 +				}
   3.949 +				if (iocommand.Request.Type.Direction == 
   3.950 +						XFER_WRITE)
   3.951 +				   /* Copy the data into the buffer created */
   3.952 +				   if (copy_from_user(buff[sg_used], data_ptr, 
   3.953 +						buff_size[sg_used])) {
   3.954 +					status = -ENOMEM;
   3.955 +					goto cleanup1;			
   3.956 +				   }
   3.957 +				size_left_alloc -= buff_size[sg_used];
   3.958 +				data_ptr += buff_size[sg_used];
   3.959 +				sg_used++;
   3.960 +			}
   3.961 +			
   3.962 +		}
   3.963 +		if ((c = cmd_alloc(h , 0)) == NULL) {
   3.964 +			status = -ENOMEM;
   3.965 +			goto cleanup1;	
   3.966 +		}
   3.967 +		/* Fill in the command type */
   3.968 +		c->cmd_type = CMD_IOCTL_PEND;
   3.969 +		/* Fill in Command Header */
   3.970 +		c->Header.ReplyQueue = 0;  /* unused in simple mode */
   3.971 +		
   3.972 +		if (iocommand.buf_size > 0) { 	/* buffer to fill */
   3.973 +			c->Header.SGList = sg_used;
   3.974 +			c->Header.SGTotal= sg_used;
   3.975 +		} else	{	/* no buffers to fill */
   3.976 +			c->Header.SGList = 0;
   3.977 +			c->Header.SGTotal= 0;
   3.978 +		}
   3.979 +		c->Header.LUN = iocommand.LUN_info;
   3.980 +		c->Header.Tag.lower = c->busaddr;  /* use the kernel address */
   3.981 +						/* the cmd block for tag */
   3.982 +		
   3.983 +	/* Fill in Request block */
   3.984 +	c->Request = iocommand.Request; 
   3.985 +	/* Fill in the scatter gather information */
   3.986 +	if (iocommand.buf_size > 0 ) {
   3.987 +		int i;
   3.988 +		for(i=0; i< sg_used; i++) {
   3.989 +			temp64.val = pci_map_single( h->pdev, buff[i], 
   3.990 +					buff_size[i], 
   3.991 +					PCI_DMA_BIDIRECTIONAL);
   3.992 +
   3.993 +			c->SG[i].Addr.lower = temp64.val32.lower;
   3.994 +			c->SG[i].Addr.upper = temp64.val32.upper;
   3.995 +			c->SG[i].Len = buff_size[i];
   3.996 +			c->SG[i].Ext = 0;  /* we are not chaining */
   3.997 +		}
   3.998 +	}
   3.999 +	c->waiting = &wait;
  3.1000 +	/* Put the request on the tail of the request queue */
  3.1001 +	spin_lock_irqsave(&io_request_lock, flags);
  3.1002 +	addQ(&h->reqQ, c);
  3.1003 +	h->Qdepth++;
  3.1004 +	start_io(h);
  3.1005 +	spin_unlock_irqrestore(&io_request_lock, flags);
  3.1006 +	wait_for_completion(&wait);
  3.1007 +	/* unlock the buffers from DMA */
  3.1008 +	for(i=0; i< sg_used; i++) {
  3.1009 +		temp64.val32.lower = c->SG[i].Addr.lower;
  3.1010 +		temp64.val32.upper = c->SG[i].Addr.upper;
  3.1011 +		pci_unmap_single( h->pdev, (dma_addr_t) temp64.val,
  3.1012 +				buff_size[i], PCI_DMA_BIDIRECTIONAL);
  3.1013 +	}
  3.1014 +	/* Copy the error information out */
  3.1015 +		iocommand.error_info = *(c->err_info);
  3.1016 +		if (copy_to_user((void *) arg, &iocommand, 
  3.1017 +					sizeof( IOCTL_Command_struct) ) ) {
  3.1018 +				cmd_free(h, c, 0);
  3.1019 +				status = -EFAULT;
  3.1020 +				goto cleanup1;
  3.1021 +		}
  3.1022 +		if (iocommand.Request.Type.Direction == XFER_READ) {
  3.1023 +		/* Copy the data out of the buffer we created */
  3.1024 +			BYTE *ptr = (BYTE  *) iocommand.buf;
  3.1025 +	        	for(i=0; i< sg_used; i++) {
  3.1026 +				if (copy_to_user(ptr, buff[i], buff_size[i])) {
  3.1027 +					cmd_free(h, c, 0);
  3.1028 +					status = -EFAULT;
  3.1029 +					goto cleanup1;
  3.1030 +
  3.1031 +				}
  3.1032 +				ptr += buff_size[i];
  3.1033 +			}
  3.1034 +		}
  3.1035 +		cmd_free(h, c, 0);
  3.1036 +		status = 0;
  3.1037 +		
  3.1038 +
  3.1039 +cleanup1:
  3.1040 +		for(i=0; i< sg_used; i++) {
  3.1041 +			if (buff[i] != NULL)
  3.1042 +				kfree(buff[i]);
  3.1043 +		}
  3.1044 +		return status;
  3.1045 +	}
  3.1046 +#endif //PASSTHROUGH
  3.1047 +
  3.1048 +	default:
  3.1049 +		return -EBADRQC;
  3.1050 +	}
  3.1051 +
  3.1052 +#endif
  3.1053 +	
  3.1054 +}
  3.1055 +
  3.1056 +/* Borrowed and adapted from sd.c */
  3.1057 +static int revalidate_logvol(kdev_t dev, int maxusage)
  3.1058 +{
  3.1059 +        int ctlr, target;
  3.1060 +        struct gendisk *gdev;
  3.1061 +        unsigned long flags;
  3.1062 +        int max_p;
  3.1063 +        int start;
  3.1064 +        int i;
  3.1065 +
  3.1066 +        target = MINOR(dev) >> NWD_SHIFT;
  3.1067 +	ctlr = map_major_to_ctlr[MAJOR(dev)];
  3.1068 +        gdev = &(hba[ctlr]->gendisk);
  3.1069 +
  3.1070 +        spin_lock_irqsave(&io_request_lock, flags);
  3.1071 +        if (hba[ctlr]->drv[target].usage_count > maxusage) {
  3.1072 +                spin_unlock_irqrestore(&io_request_lock, flags);
  3.1073 +                printk(KERN_WARNING "cciss: Device busy for "
  3.1074 +                        "revalidation (usage=%d)\n",
  3.1075 +                        hba[ctlr]->drv[target].usage_count);
  3.1076 +                return -EBUSY;
  3.1077 +        }
  3.1078 +        hba[ctlr]->drv[target].usage_count++;
  3.1079 +        spin_unlock_irqrestore(&io_request_lock, flags);
  3.1080 +
  3.1081 +        max_p = gdev->max_p;
  3.1082 +        start = target << gdev->minor_shift;
  3.1083 +
  3.1084 +        for(i=max_p-1; i>=0; i--) {
  3.1085 +                int minor = start+i;
  3.1086 +                invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
  3.1087 +                gdev->part[minor].start_sect = 0;
  3.1088 +                gdev->part[minor].nr_sects = 0;
  3.1089 +
  3.1090 +                /* reset the blocksize so we can read the partition table */
  3.1091 +                blksize_size[hba[ctlr]->major][minor] = 1024;
  3.1092 +        }
  3.1093 +	/* setup partitions per disk */
  3.1094 +	grok_partitions(gdev, target, MAX_PART, 
  3.1095 +			hba[ctlr]->drv[target].nr_blocks);
  3.1096 +        hba[ctlr]->drv[target].usage_count--;
  3.1097 +        return 0;
  3.1098 +}
  3.1099 +
  3.1100 +static int frevalidate_logvol(kdev_t dev)
  3.1101 +{
  3.1102 +#ifdef CCISS_DEBUG
  3.1103 +	printk(KERN_DEBUG "cciss: frevalidate has been called\n");
  3.1104 +#endif /* CCISS_DEBUG */ 
  3.1105 +	return revalidate_logvol(dev, 0);
  3.1106 +}
  3.1107 +static int deregister_disk(int ctlr, int logvol)
  3.1108 +{
  3.1109 +	unsigned long flags;
  3.1110 +	struct gendisk *gdev = &(hba[ctlr]->gendisk);
  3.1111 +	ctlr_info_t  *h = hba[ctlr];
  3.1112 +	int start, max_p, i;
  3.1113 +
  3.1114 +	if (!capable(CAP_SYS_RAWIO))
  3.1115 +		return -EPERM;
  3.1116 +
  3.1117 +	spin_lock_irqsave(&io_request_lock, flags);
  3.1118 +	/* make sure logical volume is NOT is use */
  3.1119 +	if (h->drv[logvol].usage_count > 1 || h->busy_configuring) {
  3.1120 +		spin_unlock_irqrestore(&io_request_lock, flags);
  3.1121 +		return -EBUSY;
  3.1122 +	}
  3.1123 +	h->busy_configuring = 1;
  3.1124 +	spin_unlock_irqrestore(&io_request_lock, flags);
  3.1125 +
  3.1126 +	/* invalidate the devices and deregister the disk */
  3.1127 +	max_p = gdev->max_p;
  3.1128 +	start = logvol << gdev->minor_shift;
  3.1129 +	for (i=max_p-1; i>=0; i--) {
  3.1130 +		int minor = start+i;
  3.1131 +		/* printk("invalidating( %d %d)\n", ctlr, minor); */
  3.1132 +		invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
  3.1133 +		/* so open will now fail */
  3.1134 +		h->sizes[minor] = 0;
  3.1135 +		/* so it will no longer appear in /proc/partitions */
  3.1136 +		gdev->part[minor].start_sect = 0;
  3.1137 +		gdev->part[minor].nr_sects = 0;
  3.1138 +	}
  3.1139 +	/* check to see if it was the last disk */
  3.1140 +	if (logvol == h->highest_lun) {
  3.1141 +		/* if so, find the new hightest lun */
  3.1142 +		int i, newhighest =-1;
  3.1143 +		for(i=0; i<h->highest_lun; i++) {
  3.1144 +			/* if the disk has size > 0, it is available */
  3.1145 +			if (h->sizes[i << gdev->minor_shift] != 0)
  3.1146 +				newhighest = i;
  3.1147 +		}
  3.1148 +		h->highest_lun = newhighest;
  3.1149 +
  3.1150 +	}
  3.1151 +	--h->num_luns;
  3.1152 +	gdev->nr_real = h->highest_lun+1;
  3.1153 +	/* zero out the disk size info */
  3.1154 +	h->drv[logvol].nr_blocks = 0;
  3.1155 +	h->drv[logvol].block_size = 0;
  3.1156 +	h->drv[logvol].cylinders = 0;
  3.1157 +	h->drv[logvol].LunID = 0;
  3.1158 +	h->busy_configuring = 0;
  3.1159 +	return 0;
  3.1160 +}
  3.1161 +static int sendcmd_withirq(__u8	cmd,
  3.1162 +	int	ctlr,
  3.1163 +	void	*buff,
  3.1164 +	size_t	size,
  3.1165 +	unsigned int use_unit_num,
  3.1166 +	unsigned int log_unit,
  3.1167 +	__u8	page_code,
  3.1168 +	__u8 cmdtype)
  3.1169 +{
  3.1170 +	ctlr_info_t *h = hba[ctlr];
  3.1171 +	CommandList_struct *c;
  3.1172 +	u64bit	buff_dma_handle;
  3.1173 +	unsigned long flags;
  3.1174 +	int return_status = IO_OK;
  3.1175 +#if 0
  3.1176 +	DECLARE_COMPLETION(wait);
  3.1177 +#else
  3.1178 +	/* XXX SMH: no waiting for us ... spin instead */
  3.1179 +	int wait = 1; 
  3.1180 +	int usecs = 0; 
  3.1181 +#endif
  3.1182 +
  3.1183 +	if ((c = cmd_alloc(h , 0)) == NULL)
  3.1184 +		return -ENOMEM;
  3.1185 +	c->cmd_type = CMD_IOCTL_PEND;
  3.1186 +	/* Fill in Command Header */
  3.1187 +	c->Header.ReplyQueue = 0;  /* unused in simple mode */
  3.1188 +	if (buff != NULL) { 	/* buffer to fill */
  3.1189 +		c->Header.SGList = 1;
  3.1190 +		c->Header.SGTotal= 1;
  3.1191 +	} else {
  3.1192 +		/* no buffers to fill */
  3.1193 +		c->Header.SGList = 0;
  3.1194 +		c->Header.SGTotal= 0;
  3.1195 +	}
  3.1196 +	c->Header.Tag.lower = c->busaddr;  /* tag is phys addr of cmd */
  3.1197 +	/* Fill in Request block */
  3.1198 +	c->Request.CDB[0] = cmd;
  3.1199 +	c->Request.Type.Type = cmdtype;
  3.1200 +	if (cmdtype == TYPE_CMD) {
  3.1201 +	switch (cmd) {
  3.1202 +		case  CISS_INQUIRY:
  3.1203 +			/* If the logical unit number is 0 then, this is going
  3.1204 +				to controller so It's a physical command
  3.1205 +				mode = 0 target = 0.
  3.1206 +				So we have nothing to write.
  3.1207 +				Otherwise
  3.1208 +				mode = 1  target = LUNID
  3.1209 +			*/
  3.1210 +			if (use_unit_num != 0) {
  3.1211 +				c->Header.LUN.LogDev.VolId =
  3.1212 +					hba[ctlr]->drv[log_unit].LunID;
  3.1213 +				c->Header.LUN.LogDev.Mode = 1;
  3.1214 +			}
  3.1215 +			if (page_code != 0) {
  3.1216 +				c->Request.CDB[1] = 0x01;
  3.1217 +				c->Request.CDB[2] = page_code;
  3.1218 +			}
  3.1219 +			c->Request.CDBLen = 6;
  3.1220 +			c->Request.Type.Attribute = ATTR_SIMPLE;
  3.1221 +			c->Request.Type.Direction = XFER_READ; /* Read */
  3.1222 +			c->Request.Timeout = 0; /* Don't time out */
  3.1223 +			c->Request.CDB[4] = size  & 0xFF;
  3.1224 +		break;
  3.1225 +		case CISS_REPORT_LOG:
  3.1226 +			/* Talking to controller so It's a physical command
  3.1227 +				mode = 00 target = 0.
  3.1228 +				So we have nothing to write.
  3.1229 +			*/
  3.1230 +			c->Request.CDBLen = 12;
  3.1231 +			c->Request.Type.Attribute = ATTR_SIMPLE;
  3.1232 +			c->Request.Type.Direction = XFER_READ; /* Read */
  3.1233 +			c->Request.Timeout = 0; /* Don't time out */
  3.1234 +			c->Request.CDB[6] = (size >> 24) & 0xFF;  /* MSB */
  3.1235 +			c->Request.CDB[7] = (size >> 16) & 0xFF;
  3.1236 +			c->Request.CDB[8] = (size >> 8) & 0xFF;
  3.1237 +			c->Request.CDB[9] = size & 0xFF;
  3.1238 +		break;
  3.1239 +		case CCISS_READ_CAPACITY:
  3.1240 +			c->Header.LUN.LogDev.VolId=
  3.1241 +				hba[ctlr]->drv[log_unit].LunID;
  3.1242 +			c->Header.LUN.LogDev.Mode = 1;
  3.1243 +			c->Request.CDBLen = 10;
  3.1244 +			c->Request.Type.Attribute = ATTR_SIMPLE;
  3.1245 +			c->Request.Type.Direction = XFER_READ; /* Read */
  3.1246 +			c->Request.Timeout = 0; /* Don't time out */
  3.1247 +		break;
  3.1248 +		default:
  3.1249 +			printk(KERN_WARNING
  3.1250 +				"cciss:  Unknown Command 0x%x sent attempted\n",				cmd);
  3.1251 +			cmd_free(h, c, 1);
  3.1252 +			return IO_ERROR;
  3.1253 +		}
  3.1254 +	} else if (cmdtype == TYPE_MSG) {
  3.1255 +		switch (cmd) {
  3.1256 +		case 3: /* No-Op message */
  3.1257 +			c->Request.CDBLen = 1;
  3.1258 +			c->Request.Type.Attribute = ATTR_SIMPLE;
  3.1259 +			c->Request.Type.Direction = XFER_WRITE;
  3.1260 +			c->Request.Timeout = 0;
  3.1261 +			c->Request.CDB[0] = cmd;
  3.1262 +			break;
  3.1263 +		default:
  3.1264 +			printk(KERN_WARNING
  3.1265 +				"cciss%d: unknown message type %d\n",
  3.1266 +					ctlr, cmd);
  3.1267 +			cmd_free(h, c, 1);
  3.1268 +			return IO_ERROR;
  3.1269 +		}
  3.1270 +	} else {
  3.1271 +		printk(KERN_WARNING
  3.1272 +			"cciss%d: unknown command type %d\n", ctlr, cmdtype);
  3.1273 +		cmd_free(h, c, 1);
  3.1274 +		return IO_ERROR;
  3.1275 +	}
  3.1276 +
  3.1277 +	/* Fill in the scatter gather information */
  3.1278 +	if (size > 0) {
  3.1279 +		buff_dma_handle.val = (__u64) pci_map_single( h->pdev,
  3.1280 +			buff, size, PCI_DMA_BIDIRECTIONAL);
  3.1281 +		c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
  3.1282 +		c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
  3.1283 +		c->SG[0].Len = size;
  3.1284 +		c->SG[0].Ext = 0;  /* we are not chaining */
  3.1285 +	}
  3.1286 +
  3.1287 +resend_cmd2:
  3.1288 +
  3.1289 +#if 0
  3.1290 +	c->waiting = &wait;
  3.1291 +#else 
  3.1292 +	/* XXX SMH: we spin instead of waiting... */
  3.1293 +	c->waiting = (void *)&wait; 
  3.1294 +#endif
  3.1295 +
  3.1296 +	/* Put the request on the tail of the queue and send it */
  3.1297 +	spin_lock_irqsave(&io_request_lock, flags);
  3.1298 +	addQ(&h->reqQ, c);
  3.1299 +	h->Qdepth++;
  3.1300 +	start_io(h);
  3.1301 +	spin_unlock_irqrestore(&io_request_lock, flags);
  3.1302 +
  3.1303 +#if 0 
  3.1304 +	wait_for_completion(&wait);
  3.1305 +#else
  3.1306 +	/* XXX SMH: spin instead of waiting on wait queue */
  3.1307 +	while(wait) { 
  3.1308 +		do_softirq(); 
  3.1309 +		udelay(500); 
  3.1310 +		usecs += 500; 
  3.1311 +		if(usecs > 1000000) { 
  3.1312 +			printk("cciss: still waiting...!\n"); 
  3.1313 +			usecs = 0; 
  3.1314 +		}
  3.1315 +	}
  3.1316 +#endif
  3.1317 +
  3.1318 +
  3.1319 +	if (c->err_info->CommandStatus != 0) {
  3.1320 +		/* an error has occurred */
  3.1321 +		switch (c->err_info->CommandStatus) {
  3.1322 +			case CMD_TARGET_STATUS:
  3.1323 +				printk(KERN_WARNING "cciss: cmd %p has "
  3.1324 +					" completed with errors\n", c);
  3.1325 +				if (c->err_info->ScsiStatus) {
  3.1326 +					printk(KERN_WARNING "cciss: cmd %p "
  3.1327 +					"has SCSI Status = %x\n", c,
  3.1328 +						c->err_info->ScsiStatus);
  3.1329 +				}
  3.1330 +			break;
  3.1331 +			case CMD_DATA_UNDERRUN:
  3.1332 +			case CMD_DATA_OVERRUN:
  3.1333 +			/* expected for inquire and report lun commands */
  3.1334 +			break;
  3.1335 +			case CMD_INVALID:
  3.1336 +				printk(KERN_WARNING "cciss: cmd %p is "
  3.1337 +					"reported invalid\n", c);
  3.1338 +				return_status = IO_ERROR;
  3.1339 +			break;
  3.1340 +			case CMD_PROTOCOL_ERR:
  3.1341 +				printk(KERN_WARNING "cciss: cmd %p has "
  3.1342 +					"protocol error \n", c);
  3.1343 +				return_status = IO_ERROR;
  3.1344 +			break;
  3.1345 +			case CMD_HARDWARE_ERR:
  3.1346 +				printk(KERN_WARNING "cciss: cmd %p had "
  3.1347 +					" hardware error\n", c);
  3.1348 +				return_status = IO_ERROR;
  3.1349 +				break;
  3.1350 +			case CMD_CONNECTION_LOST:
  3.1351 +				printk(KERN_WARNING "cciss: cmd %p had "
  3.1352 +					"connection lost\n", c);
  3.1353 +				return_status = IO_ERROR;
  3.1354 +			break;
  3.1355 +			case CMD_ABORTED:
  3.1356 +				printk(KERN_WARNING "cciss: cmd %p was "
  3.1357 +					"aborted\n", c);
  3.1358 +				return_status = IO_ERROR;
  3.1359 +			break;
  3.1360 +			case CMD_ABORT_FAILED:
  3.1361 +				printk(KERN_WARNING "cciss: cmd %p reports "
  3.1362 +					"abort failed\n", c);
  3.1363 +				return_status = IO_ERROR;
  3.1364 +			break;
  3.1365 +			case CMD_UNSOLICITED_ABORT:
  3.1366 +				printk(KERN_WARNING "cciss: cmd %p aborted "
  3.1367 +					"do to an unsolicited abort\n", c);
  3.1368 +				if (c->retry_count < MAX_CMD_RETRIES) 
  3.1369 +				{ 
  3.1370 +					printk(KERN_WARNING "retrying cmd\n"); 
  3.1371 +					c->retry_count++; 
  3.1372 +					/* erase the old error */ 
  3.1373 +					/* information */ 
  3.1374 +					memset(c->err_info, 0, 
  3.1375 +						sizeof(ErrorInfo_struct)); 
  3.1376 +					return_status = IO_OK;
  3.1377 +#if 0
  3.1378 +					INIT_COMPLETION(wait);
  3.1379 +#else 
  3.1380 +					/* XXX SMH: spin instead of waiting. */
  3.1381 +					wait = 0; 
  3.1382 +#endif
  3.1383 +					goto resend_cmd2;
  3.1384 +					
  3.1385 +				}
  3.1386 +				return_status = IO_ERROR;
  3.1387 +			break;
  3.1388 +			default:
  3.1389 +				printk(KERN_WARNING "cciss: cmd %p returned "
  3.1390 +					"unknown status %x\n", c,
  3.1391 +						c->err_info->CommandStatus);
  3.1392 +				return_status = IO_ERROR;
  3.1393 +		}
  3.1394 +	}
  3.1395 +
  3.1396 +	/* unlock the buffers from DMA */
  3.1397 +	pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
  3.1398 +			size, PCI_DMA_BIDIRECTIONAL);
  3.1399 +	cmd_free(h, c, 0);
  3.1400 +	return return_status;
  3.1401 +}
  3.1402 +static int register_new_disk(int ctlr, int opened_vol, __u64 requested_lun)
  3.1403 +{
  3.1404 +	struct gendisk *gdev = &(hba[ctlr]->gendisk);
  3.1405 +	ctlr_info_t  *h = hba[ctlr];
  3.1406 +	int start, max_p, i;
  3.1407 +	int num_luns;
  3.1408 +	int logvol;
  3.1409 +	int new_lun_found = 0;
  3.1410 +	int new_lun_index = 0;
  3.1411 +	int free_index_found = 0;
  3.1412 +	int free_index = 0;
  3.1413 +	ReportLunData_struct *ld_buff;
  3.1414 +	ReadCapdata_struct *size_buff;
  3.1415 +	InquiryData_struct *inq_buff;
  3.1416 +	int return_code;
  3.1417 +	int listlength = 0;
  3.1418 +	__u32 lunid = 0;
  3.1419 +	unsigned int block_size;
  3.1420 +	unsigned int total_size;
  3.1421 +	unsigned long flags;
  3.1422 +	int req_lunid = (int) (requested_lun & (__u64) 0xffffffff);
  3.1423 +
  3.1424 +	if (!capable(CAP_SYS_RAWIO))
  3.1425 +		return -EPERM;
  3.1426 +	/* if we have no space in our disk array left to add anything */
  3.1427 +	spin_lock_irqsave(&io_request_lock, flags);
  3.1428 +	if (h->num_luns >= CISS_MAX_LUN) {
  3.1429 +		spin_unlock_irqrestore(&io_request_lock, flags);
  3.1430 +		return -EINVAL;
  3.1431 +	}
  3.1432 +	if (h->busy_configuring) {
  3.1433 +		spin_unlock_irqrestore(&io_request_lock, flags);
  3.1434 +		return -EBUSY;
  3.1435 +	}
  3.1436 +	h->busy_configuring = 1;
  3.1437 +	spin_unlock_irqrestore(&io_request_lock, flags);
  3.1438 +
  3.1439 +	ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
  3.1440 +	if (ld_buff == NULL) {
  3.1441 +		printk(KERN_ERR "cciss: out of memory\n");
  3.1442 +		h->busy_configuring = 0;
  3.1443 +		return -ENOMEM;
  3.1444 +	}
  3.1445 +	memset(ld_buff, 0, sizeof(ReportLunData_struct));
  3.1446 +	size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
  3.1447 +	if (size_buff == NULL) {
  3.1448 +		printk(KERN_ERR "cciss: out of memory\n");
  3.1449 +		kfree(ld_buff);
  3.1450 +		h->busy_configuring = 0;
  3.1451 +		return -ENOMEM;
  3.1452 +	}
  3.1453 +	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
  3.1454 +	if (inq_buff == NULL) {
  3.1455 +		printk(KERN_ERR "cciss: out of memory\n");
  3.1456 +		kfree(ld_buff);
  3.1457 +		kfree(size_buff);
  3.1458 +		h->busy_configuring = 0;
  3.1459 +		return -ENOMEM;
  3.1460 +	}
  3.1461 +
  3.1462 +	return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
  3.1463 +			sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD);
  3.1464 +
  3.1465 +	if (return_code == IO_OK) {
  3.1466 +		listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0]));
  3.1467 +	} else {
  3.1468 +		/* reading number of logical volumes failed */
  3.1469 +		printk(KERN_WARNING "cciss: report logical volume"
  3.1470 +			" command failed\n");
  3.1471 +		listlength = 0;
  3.1472 +		h->busy_configuring = 0;
  3.1473 +		return -1;
  3.1474 +	}
  3.1475 +	num_luns = listlength / 8; /* 8 bytes pre entry */
  3.1476 +	if (num_luns > CISS_MAX_LUN)
  3.1477 +		num_luns = CISS_MAX_LUN;
  3.1478 +
  3.1479 +#ifdef CCISS_DEBUG
  3.1480 +	printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
  3.1481 +		ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
  3.1482 +		ld_buff->LUNListLength[3],  num_luns);
  3.1483 +#endif
  3.1484 +	for(i=0; i<  num_luns; i++) {
  3.1485 +		int j;
  3.1486 +		int lunID_found = 0;
  3.1487 +
  3.1488 +		lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
  3.1489 +		lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
  3.1490 +		lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
  3.1491 +		lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
  3.1492 +
  3.1493 +		/* check to see if this is a new lun */
  3.1494 +		for(j=0; j <= h->highest_lun; j++) {
  3.1495 +#ifdef CCISS_DEBUG
  3.1496 +			printk("Checking %d %x against %x\n", j,h->drv[j].LunID,
  3.1497 +						lunid);
  3.1498 +#endif /* CCISS_DEBUG */
  3.1499 +			if (h->drv[j].LunID == lunid) {
  3.1500 +				lunID_found = 1;
  3.1501 +				break;
  3.1502 +			}
  3.1503 +
  3.1504 +		}
  3.1505 +		if (lunID_found == 1)
  3.1506 +			continue;
  3.1507 +		else {	/* new lun found */
  3.1508 +			
  3.1509 +#ifdef CCISS_DEBUG
  3.1510 +			printk("new lun found at %d\n", i);
  3.1511 +#endif /* CCISS_DEBUG */
  3.1512 +			if (req_lunid)  /* we are looking for a specific lun */
  3.1513 +			{
  3.1514 +				if (lunid != req_lunid)
  3.1515 +				{
  3.1516 +#ifdef CCISS_DEBUG
  3.1517 +					printk("new lun %x is not %x\n",
  3.1518 +							lunid, req_lunid);
  3.1519 +#endif /* CCISS_DEBUG */
  3.1520 +					continue;
  3.1521 +				}
  3.1522 +			}
  3.1523 +			new_lun_index = i;
  3.1524 +			new_lun_found = 1;
  3.1525 +			break;
  3.1526 +		}
  3.1527 +	}
  3.1528 +	if (!new_lun_found) {
  3.1529 +		printk(KERN_DEBUG "cciss:  New Logical Volume not found\n");
  3.1530 +		h->busy_configuring = 0;
  3.1531 +		return -1;
  3.1532 +	}
  3.1533 +	/* Now find the free index 	*/
  3.1534 +	for(i=0; i <CISS_MAX_LUN; i++) {
  3.1535 +#ifdef CCISS_DEBUG
  3.1536 +		printk("Checking Index %d\n", i);
  3.1537 +#endif /* CCISS_DEBUG */
  3.1538 +		if (hba[ctlr]->drv[i].LunID == 0) {
  3.1539 +#ifdef CCISS_DEBUG
  3.1540 +			printk("free index found at %d\n", i);
  3.1541 +#endif /* CCISS_DEBUG */
  3.1542 +			free_index_found = 1;
  3.1543 +			free_index = i;
  3.1544 +			break;
  3.1545 +		}
  3.1546 +	}
  3.1547 +	if (!free_index_found) {
  3.1548 +		printk(KERN_WARNING "cciss: unable to find free slot for disk\n");
  3.1549 +		h->busy_configuring = 0;
  3.1550 +		return -1;
  3.1551 +	}
  3.1552 +
  3.1553 +	logvol = free_index;
  3.1554 +	hba[ctlr]->drv[logvol].LunID = lunid;
  3.1555 +		/* there could be gaps in lun numbers, track hightest */
  3.1556 +	if (hba[ctlr]->highest_lun < logvol)
  3.1557 +		hba[ctlr]->highest_lun = logvol;
  3.1558 +
  3.1559 +	memset(size_buff, 0, sizeof(ReadCapdata_struct));
  3.1560 +	return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr,
  3.1561 +			size_buff, sizeof(ReadCapdata_struct), 1,
  3.1562 +			logvol, 0, TYPE_CMD);
  3.1563 +	if (return_code == IO_OK) {
  3.1564 +		total_size = (0xff &
  3.1565 +			(unsigned int) size_buff->total_size[0]) << 24;
  3.1566 +		total_size |= (0xff &
  3.1567 +			(unsigned int) size_buff->total_size[1]) << 16;
  3.1568 +		total_size |= (0xff &
  3.1569 +			(unsigned int) size_buff->total_size[2]) << 8;
  3.1570 +		total_size |= (0xff &
  3.1571 +			(unsigned int) size_buff->total_size[3]);
  3.1572 +		total_size++; /* command returns highest block address */
  3.1573 +
  3.1574 +		block_size = (0xff &
  3.1575 +			(unsigned int) size_buff->block_size[0]) << 24;
  3.1576 +		block_size |= (0xff &
  3.1577 +			(unsigned int) size_buff->block_size[1]) << 16;
  3.1578 +		block_size |= (0xff &
  3.1579 +			(unsigned int) size_buff->block_size[2]) << 8;
  3.1580 +		block_size |= (0xff &
  3.1581 +			(unsigned int) size_buff->block_size[3]);
  3.1582 +	} else {
  3.1583 +		/* read capacity command failed */
  3.1584 +		printk(KERN_WARNING "cciss: read capacity failed\n");
  3.1585 +		total_size = 0;
  3.1586 +		block_size = BLOCK_SIZE;
  3.1587 +	}
  3.1588 +	printk(KERN_INFO "      blocks= %d block_size= %d\n",
  3.1589 +					total_size, block_size);
  3.1590 +	/* Execute the command to read the disk geometry */
  3.1591 +	memset(inq_buff, 0, sizeof(InquiryData_struct));
  3.1592 +	return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
  3.1593 +		sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD);
  3.1594 +	if (return_code == IO_OK) {
  3.1595 +		if (inq_buff->data_byte[8] == 0xFF) {
  3.1596 +			printk(KERN_WARNING
  3.1597 +			"cciss: reading geometry failed, "
  3.1598 +			"volume does not support reading geometry\n");
  3.1599 +
  3.1600 +			hba[ctlr]->drv[logvol].block_size = block_size;
  3.1601 +			hba[ctlr]->drv[logvol].nr_blocks = total_size;
  3.1602 +			hba[ctlr]->drv[logvol].heads = 255;
  3.1603 +			hba[ctlr]->drv[logvol].sectors = 32; /* secs/trk */
  3.1604 +			hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
  3.1605 +			hba[ctlr]->drv[logvol].raid_level = RAID_UNKNOWN;
  3.1606 +		} else {
  3.1607 +			hba[ctlr]->drv[logvol].block_size = block_size;
  3.1608 +			hba[ctlr]->drv[logvol].nr_blocks = total_size;
  3.1609 +			hba[ctlr]->drv[logvol].heads = inq_buff->data_byte[6];
  3.1610 +			hba[ctlr]->drv[logvol].sectors = inq_buff->data_byte[7];
  3.1611 +			hba[ctlr]->drv[logvol].cylinders =
  3.1612 +				(inq_buff->data_byte[4] & 0xff) << 8;
  3.1613 +			hba[ctlr]->drv[logvol].cylinders +=
  3.1614 +				inq_buff->data_byte[5];
  3.1615 +			hba[ctlr]->drv[logvol].raid_level = 
  3.1616 +				inq_buff->data_byte[8];
  3.1617 +		}
  3.1618 +	} else {
  3.1619 +		/* Get geometry failed */
  3.1620 +		printk(KERN_WARNING "cciss: reading geometry failed, "
  3.1621 +			"continuing with default geometry\n");
  3.1622 +
  3.1623 +		hba[ctlr]->drv[logvol].block_size = block_size;
  3.1624 +		hba[ctlr]->drv[logvol].nr_blocks = total_size;
  3.1625 +		hba[ctlr]->drv[logvol].heads = 255;
  3.1626 +		hba[ctlr]->drv[logvol].sectors = 32; /* Sectors per track */
  3.1627 +		hba[ctlr]->drv[logvol].cylinders = total_size / 255 / 32;
  3.1628 +	}
  3.1629 +	if (hba[ctlr]->drv[logvol].raid_level > 5)
  3.1630 +		hba[ctlr]->drv[logvol].raid_level = RAID_UNKNOWN;
  3.1631 +	printk(KERN_INFO "      heads= %d, sectors= %d, cylinders= %d RAID %s\n\n",
  3.1632 +		hba[ctlr]->drv[logvol].heads,
  3.1633 +		hba[ctlr]->drv[logvol].sectors,
  3.1634 +		hba[ctlr]->drv[logvol].cylinders, 
  3.1635 +		raid_label[hba[ctlr]->drv[logvol].raid_level]);
  3.1636 +
  3.1637 +	/* special case for c?d0, which may be opened even when
  3.1638 +	   it does not "exist".  In that case, don't mess with usage count.
  3.1639 +	   Also, /dev/c1d1 could be used to re-add c0d0 so we can't just 
  3.1640 +	   check whether logvol == 0, must check logvol != opened_vol */
  3.1641 +	if (logvol != opened_vol)
  3.1642 +		hba[ctlr]->drv[logvol].usage_count = 0;
  3.1643 +
  3.1644 +	max_p = gdev->max_p;
  3.1645 +	start = logvol<< gdev->minor_shift;
  3.1646 +	hba[ctlr]->hd[start].nr_sects = total_size;
  3.1647 +	hba[ctlr]->sizes[start] = total_size;
  3.1648 +
  3.1649 +	for(i=max_p-1; i>=0; i--) {
  3.1650 +		int minor = start+i;
  3.1651 +		invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
  3.1652 +		gdev->part[minor].start_sect = 0;
  3.1653 +		gdev->part[minor].nr_sects = 0;
  3.1654 +
  3.1655 +		/* reset the blocksize so we can read the partition table */
  3.1656 +		blksize_size[hba[ctlr]->major][minor] = block_size;
  3.1657 +		hba[ctlr]->hardsizes[minor] = block_size;
  3.1658 +	}
  3.1659 +
  3.1660 +	++hba[ctlr]->num_luns;
  3.1661 +	gdev->nr_real = hba[ctlr]->highest_lun + 1;
  3.1662 +	/* setup partitions per disk */
  3.1663 +	grok_partitions(gdev, logvol, MAX_PART,
  3.1664 +			hba[ctlr]->drv[logvol].nr_blocks);
  3.1665 +	kfree(ld_buff);
  3.1666 +	kfree(size_buff);
  3.1667 +	kfree(inq_buff);
  3.1668 +	h->busy_configuring = 0;
  3.1669 +	return logvol;
  3.1670 +}
  3.1671 +
  3.1672 +static int cciss_rescan_disk(int ctlr, int logvol)
  3.1673 +{
  3.1674 +	struct gendisk *gdev = &(hba[ctlr]->gendisk);
  3.1675 +	int start, max_p, i;
  3.1676 +	ReadCapdata_struct *size_buff;
  3.1677 +	InquiryData_struct *inq_buff;
  3.1678 +	int return_code;
  3.1679 +	unsigned int block_size;
  3.1680 +	unsigned int total_size;
  3.1681 +
  3.1682 +	if (!capable(CAP_SYS_RAWIO))
  3.1683 +		return -EPERM;
  3.1684 +	if (hba[ctlr]->sizes[logvol << NWD_SHIFT] != 0) {
  3.1685 +		/* disk is possible on line, return just a warning */
  3.1686 +		return 1;
  3.1687 +	}
  3.1688 +	size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
  3.1689 +	if (size_buff == NULL) {
  3.1690 +		printk(KERN_ERR "cciss: out of memory\n");
  3.1691 +		return -1;
  3.1692 +	}
  3.1693 +	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
  3.1694 +	if (inq_buff == NULL) {
  3.1695 +		printk(KERN_ERR "cciss: out of memory\n");
  3.1696 +		kfree(size_buff);
  3.1697 +		return -1;
  3.1698 +	}
  3.1699 +	memset(size_buff, 0, sizeof(ReadCapdata_struct));
  3.1700 +	return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, size_buff,
  3.1701 +				sizeof( ReadCapdata_struct), 1, logvol, 0, 
  3.1702 +				TYPE_CMD);
  3.1703 +	if (return_code == IO_OK) {
  3.1704 +		total_size = (0xff &
  3.1705 +			(unsigned int)(size_buff->total_size[0])) << 24;
  3.1706 +		total_size |= (0xff &
  3.1707 +				(unsigned int)(size_buff->total_size[1])) << 16;
  3.1708 +		total_size |= (0xff &
  3.1709 +				(unsigned int)(size_buff->total_size[2])) << 8;
  3.1710 +		total_size |= (0xff & (unsigned int)
  3.1711 +				(size_buff->total_size[3]));
  3.1712 +		total_size++; /* command returns highest block address */
  3.1713 +
  3.1714 +		block_size = (0xff &
  3.1715 +				(unsigned int)(size_buff->block_size[0])) << 24;
  3.1716 +		block_size |= (0xff &
  3.1717 +				(unsigned int)(size_buff->block_size[1])) << 16;
  3.1718 +		block_size |= (0xff &
  3.1719 +				(unsigned int)(size_buff->block_size[2])) << 8;
  3.1720 +		block_size |= (0xff &
  3.1721 +				(unsigned int)(size_buff->block_size[3]));
  3.1722 +	} else { /* read capacity command failed */
  3.1723 +		printk(KERN_WARNING "cciss: read capacity failed\n");
  3.1724 +		total_size = block_size = 0;
  3.1725 +	}
  3.1726 +	printk(KERN_INFO "      blocks= %d block_size= %d\n",
  3.1727 +					total_size, block_size);
  3.1728 +	/* Execute the command to read the disk geometry */
  3.1729 +	memset(inq_buff, 0, sizeof(InquiryData_struct));
  3.1730 +	return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
  3.1731 +			sizeof(InquiryData_struct), 1, logvol ,0xC1, TYPE_CMD);
  3.1732 +	if (return_code == IO_OK) {
  3.1733 +		if (inq_buff->data_byte[8] == 0xFF) {
  3.1734 +			printk(KERN_WARNING "cciss: reading geometry failed, "
  3.1735 +				"volume does not support reading geometry\n");
  3.1736 +
  3.1737 +			hba[ctlr]->drv[logvol].nr_blocks = total_size;
  3.1738 +			hba[ctlr]->drv[logvol].heads = 255;
  3.1739 +			hba[ctlr]->drv[logvol].sectors = 32; /* Sectors/track */
  3.1740 +			hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
  3.1741 +		} else {
  3.1742 +			hba[ctlr]->drv[logvol].nr_blocks = total_size;
  3.1743 +			hba[ctlr]->drv[logvol].heads = inq_buff->data_byte[6];
  3.1744 +			hba[ctlr]->drv[logvol].sectors = inq_buff->data_byte[7];
  3.1745 +			hba[ctlr]->drv[logvol].cylinders =
  3.1746 +				(inq_buff->data_byte[4] & 0xff) << 8;
  3.1747 +			hba[ctlr]->drv[logvol].cylinders +=
  3.1748 +				inq_buff->data_byte[5];
  3.1749 +		}
  3.1750 +	} else { /* Get geometry failed */
  3.1751 +		printk(KERN_WARNING "cciss: reading geometry failed, "
  3.1752 +				"continuing with default geometry\n");
  3.1753 +
  3.1754 +		hba[ctlr]->drv[logvol].nr_blocks = total_size;
  3.1755 +		hba[ctlr]->drv[logvol].heads = 255;
  3.1756 +		hba[ctlr]->drv[logvol].sectors = 32; /* Sectors / track */
  3.1757 +		hba[ctlr]->drv[logvol].cylinders = total_size / 255 /32;
  3.1758 +	}
  3.1759 +
  3.1760 +	printk(KERN_INFO "      heads= %d, sectors= %d, cylinders= %d \n\n", 
  3.1761 +		hba[ctlr]->drv[logvol].heads,
  3.1762 +		hba[ctlr]->drv[logvol].sectors,
  3.1763 +		hba[ctlr]->drv[logvol].cylinders);
  3.1764 +	max_p = gdev->max_p;
  3.1765 +	start = logvol<< gdev->minor_shift;
  3.1766 +	hba[ctlr]->hd[start].nr_sects = hba[ctlr]->sizes[start]= total_size;
  3.1767 +
  3.1768 +	for (i=max_p-1; i>=0; i--) {
  3.1769 +		int minor = start+i;
  3.1770 +		invalidate_device(MKDEV(hba[ctlr]->major, minor), 1);
  3.1771 +		gdev->part[minor].start_sect = 0;
  3.1772 +		gdev->part[minor].nr_sects = 0;
  3.1773 +
  3.1774 +		/* reset the blocksize so we can read the partition table */
  3.1775 +		blksize_size[hba[ctlr]->major][minor] = block_size;
  3.1776 +		hba[ctlr]->hardsizes[minor] = block_size;
  3.1777 +	}
  3.1778 +
  3.1779 +	/* setup partitions per disk */
  3.1780 +	grok_partitions(gdev, logvol, MAX_PART,
  3.1781 +			hba[ctlr]->drv[logvol].nr_blocks );
  3.1782 +
  3.1783 +	kfree(size_buff);
  3.1784 +	kfree(inq_buff);
  3.1785 +	return 0;
  3.1786 +}
  3.1787 +/*
  3.1788 + *   Wait polling for a command to complete.
  3.1789 + *   The memory mapped FIFO is polled for the completion.
  3.1790 + *   Used only at init time, interrupts disabled.
  3.1791 + */
  3.1792 +static unsigned long pollcomplete(int ctlr)
  3.1793 +{
  3.1794 +	unsigned long done;
  3.1795 +	int i;
  3.1796 +
  3.1797 +	/* Wait (up to 20 seconds) for a command to complete */
  3.1798 +
  3.1799 +        for (i = 20 * HZ; i > 0; i--) {
  3.1800 +		done = hba[ctlr]->access.command_completed(hba[ctlr]);
  3.1801 +		if (done == FIFO_EMPTY) {
  3.1802 +			set_current_state(TASK_UNINTERRUPTIBLE);
  3.1803 +			schedule_timeout(1);
  3.1804 +		} else
  3.1805 +			return done;
  3.1806 +	}
  3.1807 +	/* Invalid address to tell caller we ran out of time */
  3.1808 +	return 1;
  3.1809 +}
  3.1810 +/*
  3.1811 + * Send a command to the controller, and wait for it to complete.  
  3.1812 + * Only used at init time. 
  3.1813 + */
  3.1814 +static int sendcmd(
  3.1815 +	__u8	cmd,
  3.1816 +	int	ctlr,
  3.1817 +	void	*buff,
  3.1818 +	size_t	size,
  3.1819 +	unsigned int use_unit_num, /* 0: address the controller,
  3.1820 +				      1: address logical volume log_unit,
  3.1821 +				      2: periph device address is scsi3addr */
  3.1822 +	unsigned int log_unit,
  3.1823 +	__u8	page_code,
  3.1824 +	unsigned char *scsi3addr)
  3.1825 +{
  3.1826 +	CommandList_struct *c;
  3.1827 +	int i;
  3.1828 +	unsigned long complete;
  3.1829 +	ctlr_info_t *info_p= hba[ctlr];
  3.1830 +	u64bit buff_dma_handle;
  3.1831 +	int status = IO_OK;
  3.1832 +
  3.1833 +	c = cmd_alloc(info_p, 1);
  3.1834 +	if (c == NULL) {
  3.1835 +		printk(KERN_WARNING "cciss: unable to get memory");
  3.1836 +		return IO_ERROR;
  3.1837 +	}
  3.1838 +	/* Fill in Command Header */
  3.1839 +	c->Header.ReplyQueue = 0;  /* unused in simple mode */
  3.1840 +	if (buff != NULL) { 	/* buffer to fill */
  3.1841 +		c->Header.SGList = 1;
  3.1842 +		c->Header.SGTotal= 1;
  3.1843 +	} else	{	/* no buffers to fill  */
  3.1844 +		c->Header.SGList = 0;
  3.1845 +                c->Header.SGTotal= 0;
  3.1846 +	}
  3.1847 +	c->Header.Tag.lower = c->busaddr;  /* use the kernel address */
  3.1848 +					   /* the cmd block for tag */
  3.1849 +	/* Fill in Request block */
  3.1850 +	switch (cmd) {
  3.1851 +		case  CISS_INQUIRY:
  3.1852 +			/* If the logical unit number is 0 then, this is going
  3.1853 +				to controller so It's a physical command
  3.1854 +				mode = 0 target = 0.
  3.1855 +				So we have nothing to write. 
  3.1856 +				otherwise, if use_unit_num == 1,
  3.1857 +				mode = 1(volume set addressing) target = LUNID
  3.1858 +				otherwise, if use_unit_num == 2,
  3.1859 +				mode = 0(periph dev addr) target = scsi3addr
  3.1860 +			*/
  3.1861 +			if (use_unit_num == 1) {
  3.1862 +				c->Header.LUN.LogDev.VolId=
  3.1863 +                                	hba[ctlr]->drv[log_unit].LunID;
  3.1864 +                        	c->Header.LUN.LogDev.Mode = 1;
  3.1865 +			}
  3.1866 +			else if (use_unit_num == 2) {
  3.1867 +				memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
  3.1868 +				c->Header.LUN.LogDev.Mode = 0; 
  3.1869 +							/* phys dev addr */
  3.1870 +			}
  3.1871 +
  3.1872 +			/* are we trying to read a vital product page */
  3.1873 +			if (page_code != 0) {
  3.1874 +				c->Request.CDB[1] = 0x01;
  3.1875 +				c->Request.CDB[2] = page_code;
  3.1876 +			}
  3.1877 +			c->Request.CDBLen = 6;
  3.1878 +			c->Request.Type.Type =  TYPE_CMD; /* It is a command. */
  3.1879 +			c->Request.Type.Attribute = ATTR_SIMPLE;  
  3.1880 +			c->Request.Type.Direction = XFER_READ; /* Read */
  3.1881 +			c->Request.Timeout = 0; /* Don't time out */
  3.1882 +			c->Request.CDB[0] =  CISS_INQUIRY;
  3.1883 +			c->Request.CDB[4] = size  & 0xFF;  
  3.1884 +		break;
  3.1885 +		case CISS_REPORT_LOG:
  3.1886 +		case CISS_REPORT_PHYS:
  3.1887 +                        /* Talking to controller so It's a physical command
  3.1888 +                                mode = 00 target = 0.
  3.1889 +                                So we have nothing to write.
  3.1890 +                        */
  3.1891 +                        c->Request.CDBLen = 12;
  3.1892 +                        c->Request.Type.Type =  TYPE_CMD; /* It is a command. */
  3.1893 +                        c->Request.Type.Attribute = ATTR_SIMPLE; 
  3.1894 +                        c->Request.Type.Direction = XFER_READ; /* Read */
  3.1895 +                        c->Request.Timeout = 0; /* Don't time out */
  3.1896 +			c->Request.CDB[0] = cmd;
  3.1897 +                        c->Request.CDB[6] = (size >> 24) & 0xFF;  /* MSB */
  3.1898 +                        c->Request.CDB[7] = (size >> 16) & 0xFF;
  3.1899 +                        c->Request.CDB[8] = (size >> 8) & 0xFF;
  3.1900 +                        c->Request.CDB[9] = size & 0xFF;
  3.1901 +                break;
  3.1902 +
  3.1903 +		case CCISS_READ_CAPACITY:
  3.1904 +			c->Header.LUN.LogDev.VolId= 
  3.1905 +				hba[ctlr]->drv[log_unit].LunID;
  3.1906 +			c->Header.LUN.LogDev.Mode = 1;
  3.1907 +			c->Request.CDBLen = 10;
  3.1908 +                        c->Request.Type.Type =  TYPE_CMD; /* It is a command. */
  3.1909 +                        c->Request.Type.Attribute = ATTR_SIMPLE; 
  3.1910 +                        c->Request.Type.Direction = XFER_READ; /* Read */
  3.1911 +                        c->Request.Timeout = 0; /* Don't time out */
  3.1912 +                        c->Request.CDB[0] = CCISS_READ_CAPACITY;
  3.1913 +		break;
  3.1914 +		case CCISS_CACHE_FLUSH:
  3.1915 +			c->Request.CDBLen = 12;
  3.1916 +			c->Request.Type.Type =  TYPE_CMD; /* It is a command. */
  3.1917 +			c->Request.Type.Attribute = ATTR_SIMPLE;
  3.1918 +			c->Request.Type.Direction = XFER_WRITE; /* No data */
  3.1919 +			c->Request.Timeout = 0; /* Don't time out */
  3.1920 +			c->Request.CDB[0] = BMIC_WRITE;  /* BMIC Passthru */
  3.1921 +			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
  3.1922 +		break;
  3.1923 +		default:
  3.1924 +			printk(KERN_WARNING
  3.1925 +				"cciss:  Unknown Command 0x%x sent attempted\n",
  3.1926 +				  cmd);
  3.1927 +			cmd_free(info_p, c, 1);
  3.1928 +			return IO_ERROR;
  3.1929 +	};
  3.1930 +	/* Fill in the scatter gather information */
  3.1931 +	if (size > 0) {
  3.1932 +		buff_dma_handle.val = (__u64) pci_map_single( info_p->pdev, 
  3.1933 +			buff, size, PCI_DMA_BIDIRECTIONAL);
  3.1934 +		c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
  3.1935 +		c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
  3.1936 +		c->SG[0].Len = size;
  3.1937 +		c->SG[0].Ext = 0;  /* we are not chaining */
  3.1938 +	}
  3.1939 +resend_cmd1:
  3.1940 +	/*
  3.1941 +         * Disable interrupt
  3.1942 +         */
  3.1943 +#ifdef CCISS_DEBUG
  3.1944 +	printk(KERN_DEBUG "cciss: turning intr off\n");
  3.1945 +#endif /* CCISS_DEBUG */ 
  3.1946 +        info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
  3.1947 +	
  3.1948 +	/* Make sure there is room in the command FIFO */
  3.1949 +        /* Actually it should be completely empty at this time. */
  3.1950 +        for (i = 200000; i > 0; i--) {
  3.1951 +		/* if fifo isn't full go */
  3.1952 +                if (!(info_p->access.fifo_full(info_p))) {
  3.1953 +			
  3.1954 +                        break;
  3.1955 +                }
  3.1956 +                udelay(10);
  3.1957 +                printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
  3.1958 +                        " waiting!\n", ctlr);
  3.1959 +        }
  3.1960 +        /*
  3.1961 +         * Send the cmd
  3.1962 +         */
  3.1963 +        info_p->access.submit_command(info_p, c);
  3.1964 +        complete = pollcomplete(ctlr);
  3.1965 +
  3.1966 +#ifdef CCISS_DEBUG
  3.1967 +	printk(KERN_DEBUG "cciss: command completed\n");
  3.1968 +#endif /* CCISS_DEBUG */
  3.1969 +
  3.1970 +	if (complete != 1) {
  3.1971 +		if ( (complete & CISS_ERROR_BIT)
  3.1972 +		     && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
  3.1973 +			/* if data overrun or underun on Report command 
  3.1974 +				ignore it 
  3.1975 +			*/
  3.1976 +			if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
  3.1977 +			     (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
  3.1978 +			     (c->Request.CDB[0] == CISS_INQUIRY)) &&
  3.1979 +				((c->err_info->CommandStatus == 
  3.1980 +					CMD_DATA_OVERRUN) || 
  3.1981 +				 (c->err_info->CommandStatus == 
  3.1982 +					CMD_DATA_UNDERRUN)
  3.1983 +			 	)) {
  3.1984 +				complete = c->busaddr;
  3.1985 +			} else {
  3.1986 +				if (c->err_info->CommandStatus == 
  3.1987 +						CMD_UNSOLICITED_ABORT) {
  3.1988 +					printk(KERN_WARNING "cciss: "
  3.1989 +						"cmd %p aborted do "
  3.1990 +					"to an unsolicited abort \n", c); 
  3.1991 +					if (c->retry_count < MAX_CMD_RETRIES) {
  3.1992 +						printk(KERN_WARNING
  3.1993 +						   "retrying cmd\n");
  3.1994 +						c->retry_count++;
  3.1995 +						/* erase the old error */
  3.1996 +						/* information */
  3.1997 +						memset(c->err_info, 0, 
  3.1998 +						   sizeof(ErrorInfo_struct));
  3.1999 +						goto resend_cmd1;
  3.2000 +					} else {
  3.2001 +						printk(KERN_WARNING
  3.2002 +						   "retried to many times\n");
  3.2003 +						status = IO_ERROR;
  3.2004 +						goto cleanup1;
  3.2005 +					}
  3.2006 +				}
  3.2007 +				printk(KERN_WARNING "cciss cciss%d: sendcmd"
  3.2008 +				" Error %x \n", ctlr, 
  3.2009 +					c->err_info->CommandStatus); 
  3.2010 +				printk(KERN_WARNING "cciss cciss%d: sendcmd"
  3.2011 +				" offensive info\n"
  3.2012 +				"  size %x\n   num %x   value %x\n", ctlr,
  3.2013 +				  c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
  3.2014 +				  c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
  3.2015 +				  c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
  3.2016 +				status = IO_ERROR;
  3.2017 +				goto cleanup1;
  3.2018 +			}
  3.2019 +		}
  3.2020 +                if (complete != c->busaddr) {
  3.2021 +                        printk( KERN_WARNING "cciss cciss%d: SendCmd "
  3.2022 +                      "Invalid command list address returned! (%lx)\n",
  3.2023 +                                ctlr, complete);
  3.2024 +                        status = IO_ERROR;
  3.2025 +			goto cleanup1;
  3.2026 +                }
  3.2027 +        } else {
  3.2028 +                printk( KERN_WARNING
  3.2029 +                        "cciss cciss%d: SendCmd Timeout out, "
  3.2030 +                        "No command list address returned!\n",
  3.2031 +                        ctlr);
  3.2032 +                status = IO_ERROR;
  3.2033 +        }
  3.2034 +		
  3.2035 +cleanup1:	
  3.2036 +	/* unlock the data buffer from DMA */
  3.2037 +	pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
  3.2038 +                                size, PCI_DMA_BIDIRECTIONAL);
  3.2039 +	cmd_free(info_p, c, 1);
  3.2040 +        return status;
  3.2041 +} 
  3.2042 +/*
  3.2043 + * Map (physical) PCI mem into (virtual) kernel space
  3.2044 + */
  3.2045 +static ulong remap_pci_mem(ulong base, ulong size)
  3.2046 +{
  3.2047 +        ulong page_base        = ((ulong) base) & PAGE_MASK;
  3.2048 +        ulong page_offs        = ((ulong) base) - page_base;
  3.2049 +        ulong page_remapped    = (ulong) ioremap(page_base, page_offs+size);
  3.2050 +
  3.2051 +        return (ulong) (page_remapped ? (page_remapped + page_offs) : 0UL);
  3.2052 +}
  3.2053 +
  3.2054 +/*
  3.2055 + * Enqueuing and dequeuing functions for cmdlists.
  3.2056 + */
  3.2057 +static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
  3.2058 +{
  3.2059 +        if (*Qptr == NULL) {
  3.2060 +                *Qptr = c;
  3.2061 +                c->next = c->prev = c;
  3.2062 +        } else {
  3.2063 +                c->prev = (*Qptr)->prev;
  3.2064 +                c->next = (*Qptr);
  3.2065 +                (*Qptr)->prev->next = c;
  3.2066 +                (*Qptr)->prev = c;
  3.2067 +        }
  3.2068 +}
  3.2069 +
  3.2070 +static inline CommandList_struct *removeQ(CommandList_struct **Qptr, 
  3.2071 +						CommandList_struct *c)
  3.2072 +{
  3.2073 +        if (c && c->next != c) {
  3.2074 +                if (*Qptr == c) *Qptr = c->next;
  3.2075 +                c->prev->next = c->next;
  3.2076 +                c->next->prev = c->prev;
  3.2077 +        } else {
  3.2078 +                *Qptr = NULL;
  3.2079 +        }
  3.2080 +        return c;
  3.2081 +}
  3.2082 +
  3.2083 +/* 
  3.2084 + * Takes jobs of the Q and sends them to the hardware, then puts it on 
  3.2085 + * the Q to wait for completion. 
  3.2086 + */ 
  3.2087 +static void start_io( ctlr_info_t *h)
  3.2088 +{
  3.2089 +	CommandList_struct *c;
  3.2090 +	
  3.2091 +	while(( c = h->reqQ) != NULL ) {
  3.2092 +		/* can't do anything if fifo is full */
  3.2093 +		if ((h->access.fifo_full(h))) {
  3.2094 +			printk(KERN_WARNING "cciss: fifo full \n");
  3.2095 +			return;
  3.2096 +		}
  3.2097 +		/* Get the frist entry from the Request Q */ 
  3.2098 +		removeQ(&(h->reqQ), c);
  3.2099 +		h->Qdepth--;
  3.2100 +	
  3.2101 +		/* Tell the controller execute command */ 
  3.2102 +		h->access.submit_command(h, c);
  3.2103 +		
  3.2104 +		/* Put job onto the completed Q */ 
  3.2105 +		addQ (&(h->cmpQ), c); 
  3.2106 +	}
  3.2107 +}
  3.2108 +
  3.2109 +static inline void complete_buffers( struct buffer_head *bh, int status)
  3.2110 +{
  3.2111 +	struct buffer_head *xbh;
  3.2112 +	
  3.2113 +	while(bh) {
  3.2114 +		xbh = bh->b_reqnext; 
  3.2115 +		bh->b_reqnext = NULL; 
  3.2116 +		blk_finished_io(bh->b_size >> 9);
  3.2117 +		bh->b_end_io(bh, status);
  3.2118 +		bh = xbh;
  3.2119 +	}
  3.2120 +} 
  3.2121 +/* This code assumes io_request_lock is already held */
  3.2122 +/* Zeros out the error record and then resends the command back */
  3.2123 +/* to the controller */ 
  3.2124 +static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
  3.2125 +{
  3.2126 +	/* erase the old error information */
  3.2127 +	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
  3.2128 +
  3.2129 +	/* add it to software queue and then send it to the controller */
  3.2130 +	addQ(&(h->reqQ),c);
  3.2131 +	h->Qdepth++;
  3.2132 +	if (h->Qdepth > h->maxQsinceinit)
  3.2133 +		h->maxQsinceinit = h->Qdepth; 
  3.2134 +
  3.2135 +	start_io(h);
  3.2136 +}
  3.2137 +/* checks the status of the job and calls complete buffers to mark all 
  3.2138 + * buffers for the completed job. 
  3.2139 + */ 
  3.2140 +static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, 
  3.2141 +		int timeout)
  3.2142 +{
  3.2143 +	int status = 1;
  3.2144 +	int retry_cmd = 0;
  3.2145 +	int i, ddir;
  3.2146 +	u64bit temp64;
  3.2147 +		
  3.2148 +	if (timeout)
  3.2149 +		status = 0; 
  3.2150 +
  3.2151 +	if (cmd->err_info->CommandStatus != 0) { 
  3.2152 +		/* an error has occurred */ 
  3.2153 +		switch (cmd->err_info->CommandStatus) {
  3.2154 +			unsigned char sense_key;
  3.2155 +			case CMD_TARGET_STATUS:
  3.2156 +				status = 0;
  3.2157 +			
  3.2158 +				if (cmd->err_info->ScsiStatus == 0x02) {
  3.2159 +					printk(KERN_WARNING "cciss: cmd %p "
  3.2160 +						"has CHECK CONDITION,"
  3.2161 +						" sense key = 0x%x\n", cmd,
  3.2162 +						cmd->err_info->SenseInfo[2]);
  3.2163 +					/* check the sense key */
  3.2164 +					sense_key = 0xf & 
  3.2165 +						cmd->err_info->SenseInfo[2];
  3.2166 +					/* recovered error */
  3.2167 +					if ( sense_key == 0x1)
  3.2168 +						status = 1;
  3.2169 +				} else {
  3.2170 +					printk(KERN_WARNING "cciss: cmd %p "
  3.2171 +						"has SCSI Status 0x%x\n",
  3.2172 +						cmd, cmd->err_info->ScsiStatus);
  3.2173 +				}
  3.2174 +			break;
  3.2175 +			case CMD_DATA_UNDERRUN:
  3.2176 +				printk(KERN_WARNING "cciss: cmd %p has"
  3.2177 +					" completed with data underrun "
  3.2178 +					"reported\n", cmd);
  3.2179 +			break;
  3.2180 +			case CMD_DATA_OVERRUN:
  3.2181 +				printk(KERN_WARNING "cciss: cmd %p has"
  3.2182 +					" completed with data overrun "
  3.2183 +					"reported\n", cmd);
  3.2184 +			break;
  3.2185 +			case CMD_INVALID:
  3.2186 +				printk(KERN_WARNING "cciss: cmd %p is "
  3.2187 +					"reported invalid\n", cmd);
  3.2188 +				status = 0;
  3.2189 +			break;
  3.2190 +			case CMD_PROTOCOL_ERR:
  3.2191 +                                printk(KERN_WARNING "cciss: cmd %p has "
  3.2192 +					"protocol error \n", cmd);
  3.2193 +                                status = 0;
  3.2194 +                        break;
  3.2195 +			case CMD_HARDWARE_ERR:
  3.2196 +                                printk(KERN_WARNING "cciss: cmd %p had " 
  3.2197 +                                        " hardware error\n", cmd);
  3.2198 +                                status = 0;
  3.2199 +                        break;
  3.2200 +			case CMD_CONNECTION_LOST:
  3.2201 +				printk(KERN_WARNING "cciss: cmd %p had "
  3.2202 +					"connection lost\n", cmd);
  3.2203 +				status=0;
  3.2204 +			break;
  3.2205 +			case CMD_ABORTED:
  3.2206 +				printk(KERN_WARNING "cciss: cmd %p was "
  3.2207 +					"aborted\n", cmd);
  3.2208 +				status=0;
  3.2209 +			break;
  3.2210 +			case CMD_ABORT_FAILED:
  3.2211 +				printk(KERN_WARNING "cciss: cmd %p reports "
  3.2212 +					"abort failed\n", cmd);
  3.2213 +				status=0;
  3.2214 +			break;
  3.2215 +			case CMD_UNSOLICITED_ABORT:
  3.2216 +				printk(KERN_WARNING "cciss: cmd %p aborted do "
  3.2217 +					"to an unsolicited abort \n",
  3.2218 +				       	cmd);
  3.2219 +				if (cmd->retry_count < MAX_CMD_RETRIES) {
  3.2220 +					retry_cmd=1;
  3.2221 +					printk(KERN_WARNING
  3.2222 +						"retrying cmd\n");
  3.2223 +					cmd->retry_count++;
  3.2224 +				} else {
  3.2225 +					printk(KERN_WARNING
  3.2226 +					"retried to many times\n");
  3.2227 +				}
  3.2228 +				status=0;
  3.2229 +			break;
  3.2230 +			case CMD_TIMEOUT:
  3.2231 +				printk(KERN_WARNING "cciss: cmd %p timedout\n",
  3.2232 +					cmd);
  3.2233 +				status=0;
  3.2234 +			break;
  3.2235 +			default:
  3.2236 +				printk(KERN_WARNING "cciss: cmd %p returned "
  3.2237 +					"unknown status %x\n", cmd, 
  3.2238 +						cmd->err_info->CommandStatus); 
  3.2239 +				status=0;
  3.2240 +		}
  3.2241 +	}
  3.2242 +	/* We need to return this command */
  3.2243 +	if (retry_cmd) {
  3.2244 +		resend_cciss_cmd(h,cmd);
  3.2245 +		return;
  3.2246 +	}	
  3.2247 +	/* command did not need to be retried */
  3.2248 +	/* unmap the DMA mapping for all the scatter gather elements */
  3.2249 +	if (cmd->Request.Type.Direction == XFER_READ)
  3.2250 +		ddir = PCI_DMA_FROMDEVICE;
  3.2251 +	else
  3.2252 +		ddir = PCI_DMA_TODEVICE;
  3.2253 +	for(i=0; i<cmd->Header.SGList; i++) {
  3.2254 +		temp64.val32.lower = cmd->SG[i].Addr.lower;
  3.2255 +		temp64.val32.upper = cmd->SG[i].Addr.upper;
  3.2256 +		pci_unmap_page(hba[cmd->ctlr]->pdev,
  3.2257 +			temp64.val, cmd->SG[i].Len, ddir);
  3.2258 +	}
  3.2259 +	complete_buffers(cmd->rq->bh, status);
  3.2260 +#ifdef CCISS_DEBUG
  3.2261 +	printk("Done with %p\n", cmd->rq);
  3.2262 +#endif /* CCISS_DEBUG */ 
  3.2263 +	end_that_request_last(cmd->rq);
  3.2264 +	cmd_free(h,cmd,1);
  3.2265 +}
  3.2266 +
  3.2267 +
  3.2268 +static inline int cpq_new_segment(request_queue_t *q, struct request *rq,
  3.2269 +                                  int max_segments)
  3.2270 +{
  3.2271 +        if (rq->nr_segments < MAXSGENTRIES) {
  3.2272 +                rq->nr_segments++;
  3.2273 +                return 1;
  3.2274 +        }
  3.2275 +        return 0;
  3.2276 +}
  3.2277 +
  3.2278 +static int cpq_back_merge_fn(request_queue_t *q, struct request *rq,
  3.2279 +                             struct buffer_head *bh, int max_segments)
  3.2280 +{
  3.2281 +	if (blk_seg_merge_ok(rq->bhtail, bh))	
  3.2282 +                return 1;
  3.2283 +        return cpq_new_segment(q, rq, max_segments);
  3.2284 +}
  3.2285 +
  3.2286 +static int cpq_front_merge_fn(request_queue_t *q, struct request *rq,
  3.2287 +                             struct buffer_head *bh, int max_segments)
  3.2288 +{
  3.2289 +	if (blk_seg_merge_ok(bh, rq->bh))
  3.2290 +                return 1;
  3.2291 +        return cpq_new_segment(q, rq, max_segments);
  3.2292 +}
  3.2293 +
  3.2294 +static int cpq_merge_requests_fn(request_queue_t *q, struct request *rq,
  3.2295 +                                 struct request *nxt, int max_segments)
  3.2296 +{
  3.2297 +        int total_segments = rq->nr_segments + nxt->nr_segments;
  3.2298 +
  3.2299 +	if (blk_seg_merge_ok(rq->bhtail, nxt->bh))
  3.2300 +                total_segments--;
  3.2301 +
  3.2302 +        if (total_segments > MAXSGENTRIES)
  3.2303 +                return 0;
  3.2304 +
  3.2305 +        rq->nr_segments = total_segments;
  3.2306 +        return 1;
  3.2307 +}
  3.2308 +
  3.2309 +/* 
  3.2310 + * Get a request and submit it to the controller. 
  3.2311 + * Currently we do one request at a time.  Ideally we would like to send
  3.2312 + * everything to the controller on the first call, but there is a danger
  3.2313 + * of holding the io_request_lock for to long.  
  3.2314 + */
  3.2315 +static void do_cciss_request(request_queue_t *q)
  3.2316 +{
  3.2317 +	ctlr_info_t *h= q->queuedata; 
  3.2318 +	CommandList_struct *c;
  3.2319 +	int log_unit, start_blk, seg;
  3.2320 +	unsigned long long lastdataend;
  3.2321 +	struct buffer_head *bh;
  3.2322 +	struct list_head *queue_head = &q->queue_head;
  3.2323 +	struct request *creq;
  3.2324 +	u64bit temp64;
  3.2325 +	struct scatterlist tmp_sg[MAXSGENTRIES];
  3.2326 +	int i, ddir;
  3.2327 +
  3.2328 +	if (q->plugged)
  3.2329 +		goto startio;
  3.2330 +
  3.2331 +next:
  3.2332 +	if (list_empty(queue_head))
  3.2333 +		goto startio;
  3.2334 +
  3.2335 +	creq =	blkdev_entry_next_request(queue_head); 
  3.2336 +	if (creq->nr_segments > MAXSGENTRIES)
  3.2337 +                BUG();
  3.2338 +
  3.2339 +	if( h->ctlr != map_major_to_ctlr[MAJOR(creq->rq_dev)] ) {
  3.2340 +                printk(KERN_WARNING "doreq cmd for %d, %x at %p\n",
  3.2341 +                                h->ctlr, creq->rq_dev, creq);
  3.2342 +                blkdev_dequeue_request(creq);
  3.2343 +                complete_buffers(creq->bh, 0);
  3.2344 +		end_that_request_last(creq);
  3.2345 +		goto startio;
  3.2346 +        }
  3.2347 +
  3.2348 +	/* make sure controller is alive. */
  3.2349 +	if (!CTLR_IS_ALIVE(h)) {
  3.2350 +                printk(KERN_WARNING "cciss%d: I/O quit ", h->ctlr);
  3.2351 +                blkdev_dequeue_request(creq);
  3.2352 +                complete_buffers(creq->bh, 0);
  3.2353 +		end_that_request_last(creq);
  3.2354 +		return;
  3.2355 +	}
  3.2356 +
  3.2357 +	if (( c = cmd_alloc(h, 1)) == NULL)
  3.2358 +		goto startio;
  3.2359 +
  3.2360 +	blkdev_dequeue_request(creq);
  3.2361 +
  3.2362 +	spin_unlock_irq(&io_request_lock);
  3.2363 +
  3.2364 +	c->cmd_type = CMD_RWREQ;      
  3.2365 +	c->rq = creq;
  3.2366 +	bh = creq->bh;
  3.2367 +	
  3.2368 +	/* fill in the request */ 
  3.2369 +	log_unit = MINOR(creq->rq_dev) >> NWD_SHIFT; 
  3.2370 +	c->Header.ReplyQueue = 0;  /* unused in simple mode */
  3.2371 +	c->Header.Tag.lower = c->busaddr;  /* use the physical address */
  3.2372 +					/* the cmd block for tag */
  3.2373 +	c->Header.LUN.LogDev.VolId= hba[h->ctlr]->drv[log_unit].LunID;
  3.2374 +	c->Header.LUN.LogDev.Mode = 1;
  3.2375 +	c->Request.CDBLen = 10; /* 12 byte commands not in FW yet. */
  3.2376 +	c->Request.Type.Type =  TYPE_CMD; /* It is a command.  */
  3.2377 +	c->Request.Type.Attribute = ATTR_SIMPLE; 
  3.2378 +	c->Request.Type.Direction = 
  3.2379 +		(creq->cmd == READ) ? XFER_READ: XFER_WRITE; 
  3.2380 +	c->Request.Timeout = 0; /* Don't time out */
  3.2381 +	c->Request.CDB[0] = (creq->cmd == READ) ? CCISS_READ : CCISS_WRITE;
  3.2382 +	start_blk = hba[h->ctlr]->hd[MINOR(creq->rq_dev)].start_sect + creq->sector;
  3.2383 +#ifdef CCISS_DEBUG
  3.2384 +	if (bh == NULL)
  3.2385 +		panic("cciss: bh== NULL?");
  3.2386 +	printk(KERN_DEBUG "cciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
  3.2387 +		(int) creq->nr_sectors);	
  3.2388 +#endif /* CCISS_DEBUG */
  3.2389 +	seg = 0;
  3.2390 +	lastdataend = ~0ULL;
  3.2391 +	while(bh) {
  3.2392 +//		if (bh_phys(bh) == lastdataend)
  3.2393 +		if ((page_to_phys(bh->b_page) + bh_offset(bh))== lastdataend)
  3.2394 +		{  /* tack it on to the last segment */
  3.2395 +			tmp_sg[seg-1].length +=bh->b_size;
  3.2396 +			lastdataend += bh->b_size;
  3.2397 +		} else {
  3.2398 +			if (seg == MAXSGENTRIES)
  3.2399 +				BUG();
  3.2400 +			tmp_sg[seg].page = bh->b_page;
  3.2401 +			tmp_sg[seg].length = bh->b_size;
  3.2402 +			tmp_sg[seg].offset = bh_offset(bh);
  3.2403 +			lastdataend = bh_phys(bh) + bh->b_size;
  3.2404 +			seg++;
  3.2405 +		}
  3.2406 +		bh = bh->b_reqnext;
  3.2407 +	}
  3.2408 +
  3.2409 +	/* get the DMA records for the setup */ 
  3.2410 +	if (c->Request.Type.Direction == XFER_READ)
  3.2411 +		ddir = PCI_DMA_FROMDEVICE;
  3.2412 +	else
  3.2413 +		ddir = PCI_DMA_TODEVICE;
  3.2414 +	for (i=0; i<seg; i++) {
  3.2415 +		c->SG[i].Len = tmp_sg[i].length;
  3.2416 +		temp64.val = pci_map_page(h->pdev, tmp_sg[i].page,
  3.2417 +			    tmp_sg[i].offset, tmp_sg[i].length, ddir);
  3.2418 +		c->SG[i].Addr.lower = temp64.val32.lower;
  3.2419 +                c->SG[i].Addr.upper = temp64.val32.upper;
  3.2420 +                c->SG[i].Ext = 0;  /* we are not chaining */
  3.2421 +	}
  3.2422 +	/* track how many SG entries we are using */ 
  3.2423 +	if (seg > h->maxSG)
  3.2424 +		h->maxSG = seg; 
  3.2425 +
  3.2426 +#ifdef CCISS_DEBUG
  3.2427 +	printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", sect, seg);
  3.2428 +#endif /* CCISS_DEBUG */
  3.2429 +
  3.2430 +	c->Header.SGList = c->Header.SGTotal = seg;
  3.2431 +	c->Request.CDB[1]= 0;
  3.2432 +	c->Request.CDB[2]= (start_blk >> 24) & 0xff;	/* MSB */
  3.2433 +	c->Request.CDB[3]= (start_blk >> 16) & 0xff;
  3.2434 +	c->Request.CDB[4]= (start_blk >>  8) & 0xff;
  3.2435 +	c->Request.CDB[5]= start_blk & 0xff;
  3.2436 +	c->Request.CDB[6]= 0; /* (sect >> 24) & 0xff; MSB */
  3.2437 +	c->Request.CDB[7]= (creq->nr_sectors >>  8) & 0xff; 
  3.2438 +	c->Request.CDB[8]= creq->nr_sectors & 0xff; 
  3.2439 +	c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
  3.2440 +
  3.2441 +	spin_lock_irq(&io_request_lock);
  3.2442 +
  3.2443 +	addQ(&(h->reqQ),c);
  3.2444 +	h->Qdepth++;
  3.2445 +	if (h->Qdepth > h->maxQsinceinit)
  3.2446 +		h->maxQsinceinit = h->Qdepth; 
  3.2447 +
  3.2448 +	goto next;
  3.2449 +
  3.2450 +startio:
  3.2451 +	start_io(h);
  3.2452 +}
  3.2453 +
  3.2454 +static void do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
  3.2455 +{
  3.2456 +	ctlr_info_t *h = dev_id;
  3.2457 +	CommandList_struct *c;
  3.2458 +	unsigned long flags;
  3.2459 +	__u32 a, a1;
  3.2460 +
  3.2461 +
  3.2462 +	/* Is this interrupt for us? */
  3.2463 +	if ((h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0))
  3.2464 +		return;
  3.2465 +
  3.2466 +	/*
  3.2467 +	 * If there are completed commands in the completion queue,
  3.2468 +	 * we had better do something about it.
  3.2469 +	 */
  3.2470 +	spin_lock_irqsave(&io_request_lock, flags);
  3.2471 +	while( h->access.intr_pending(h)) {
  3.2472 +		while((a = h->access.command_completed(h)) != FIFO_EMPTY) {
  3.2473 +			a1 = a;
  3.2474 +			a &= ~3;
  3.2475 +			if ((c = h->cmpQ) == NULL) {  
  3.2476 +				printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1);
  3.2477 +				continue;	
  3.2478 +			} 
  3.2479 +			while(c->busaddr != a) {
  3.2480 +				c = c->next;
  3.2481 +				if (c == h->cmpQ) 
  3.2482 +					break;
  3.2483 +			}
  3.2484 +			/*
  3.2485 +			 * If we've found the command, take it off the
  3.2486 +			 * completion Q and free it
  3.2487 +			 */
  3.2488 +			 if (c->busaddr == a) {
  3.2489 +				removeQ(&h->cmpQ, c);
  3.2490 +				if (c->cmd_type == CMD_RWREQ) {
  3.2491 +					complete_command(h, c, 0);
  3.2492 +				} else if (c->cmd_type == CMD_IOCTL_PEND) {
  3.2493 +#if 0
  3.2494 +					complete(c->waiting);
  3.2495 +#else
  3.2496 +					/* XXX SMH: use a flag to signal */
  3.2497 +					if(*(int *)(c->waiting) != 0) 
  3.2498 +						*(int *)(c->waiting) = 0; 
  3.2499 +#endif
  3.2500 +				}
  3.2501 +#				ifdef CONFIG_CISS_SCSI_TAPE
  3.2502 +				else if (c->cmd_type == CMD_SCSI) {
  3.2503 +					complete_scsi_command(c, 0, a1);
  3.2504 +				}
  3.2505 +#				endif
  3.2506 +				continue;
  3.2507 +			}
  3.2508 +		}
  3.2509 +	}
  3.2510 +	/*
  3.2511 +	 * See if we can queue up some more IO
  3.2512 +	 */
  3.2513 +	do_cciss_request(BLK_DEFAULT_QUEUE(h->major));
  3.2514 +	spin_unlock_irqrestore(&io_request_lock, flags);
  3.2515 +}
  3.2516 +/* 
  3.2517 + *  We cannot read the structure directly, for portablity we must use 
  3.2518 + *   the io functions.
  3.2519 + *   This is for debug only. 
  3.2520 + */
  3.2521 +#ifdef CCISS_DEBUG
  3.2522 +static void print_cfg_table( CfgTable_struct *tb)
  3.2523 +{
  3.2524 +	int i;
  3.2525 +	char temp_name[17];
  3.2526 +
  3.2527 +	printk("Controller Configuration information\n");
  3.2528 +	printk("------------------------------------\n");
  3.2529 +	for(i=0;i<4;i++)
  3.2530 +		temp_name[i] = readb(&(tb->Signature[i]));
  3.2531 +	temp_name[4]='\0';
  3.2532 +	printk("   Signature = %s\n", temp_name); 
  3.2533 +	printk("   Spec Number = %d\n", readl(&(tb->SpecValence)));
  3.2534 +	printk("   Transport methods supported = 0x%x\n", 
  3.2535 +				readl(&(tb-> TransportSupport)));
  3.2536 +	printk("   Transport methods active = 0x%x\n", 
  3.2537 +				readl(&(tb->TransportActive)));
  3.2538 +	printk("   Requested transport Method = 0x%x\n", 
  3.2539 +			readl(&(tb->HostWrite.TransportRequest)));
  3.2540 +	printk("   Coalese Interrupt Delay = 0x%x\n", 
  3.2541 +			readl(&(tb->HostWrite.CoalIntDelay)));
  3.2542 +	printk("   Coalese Interrupt Count = 0x%x\n", 
  3.2543 +			readl(&(tb->HostWrite.CoalIntCount)));
  3.2544 +	printk("   Max outstanding commands = 0x%d\n", 
  3.2545 +			readl(&(tb->CmdsOutMax)));
  3.2546 +	printk("   Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
  3.2547 +	for(i=0;i<16;i++)
  3.2548 +		temp_name[i] = readb(&(tb->ServerName[i]));
  3.2549 +	temp_name[16] = '\0';
  3.2550 +	printk("   Server Name = %s\n", temp_name);
  3.2551 +	printk("   Heartbeat Counter = 0x%x\n\n\n", 
  3.2552 +			readl(&(tb->HeartBeat)));
  3.2553 +}
  3.2554 +#endif /* CCISS_DEBUG */ 
  3.2555 +
  3.2556 +static void release_io_mem(ctlr_info_t *c)
  3.2557 +{
  3.2558 +	/* if IO mem was not protected do nothing */
  3.2559 +	if (c->io_mem_addr == 0)
  3.2560 +		return;
  3.2561 +	release_region(c->io_mem_addr, c->io_mem_length);
  3.2562 +	c->io_mem_addr = 0;
  3.2563 +	c->io_mem_length = 0;
  3.2564 +}
  3.2565 +static int find_PCI_BAR_index(struct pci_dev *pdev,
  3.2566 +               unsigned long pci_bar_addr)
  3.2567 +{
  3.2568 +	int i, offset, mem_type, bar_type;
  3.2569 +	if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
  3.2570 +		return 0;
  3.2571 +	offset = 0;
  3.2572 +	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
  3.2573 +		bar_type = pci_resource_flags(pdev, i) &
  3.2574 +			PCI_BASE_ADDRESS_SPACE; 
  3.2575 +		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
  3.2576 +			offset += 4;
  3.2577 +		else {
  3.2578 +			mem_type = pci_resource_flags(pdev, i) &
  3.2579 +				PCI_BASE_ADDRESS_MEM_TYPE_MASK; 
  3.2580 +			switch (mem_type) {
  3.2581 +				case PCI_BASE_ADDRESS_MEM_TYPE_32:
  3.2582 +				case PCI_BASE_ADDRESS_MEM_TYPE_1M:
  3.2583 +					offset += 4; /* 32 bit */
  3.2584 +					break;
  3.2585 +				case PCI_BASE_ADDRESS_MEM_TYPE_64:
  3.2586 +					offset += 8;
  3.2587 +					break;
  3.2588 +				default: /* reserved in PCI 2.2 */
  3.2589 +					printk(KERN_WARNING "Base address is invalid\n");
  3.2590 +					return -1;	
  3.2591 +				break;
  3.2592 +			}
  3.2593 +		}
  3.2594 +		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
  3.2595 +			return i+1;
  3.2596 +	}
  3.2597 +	return -1;
  3.2598 +}
  3.2599 +			
  3.2600 +static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
  3.2601 +{
  3.2602 +	ushort subsystem_vendor_id, subsystem_device_id, command;
  3.2603 +	unchar irq = pdev->irq;
  3.2604 +	__u32 board_id;
  3.2605 +	__u64 cfg_offset;
  3.2606 +	__u32 cfg_base_addr;
  3.2607 +	__u64 cfg_base_addr_index;
  3.2608 +	int i;
  3.2609 +
  3.2610 +	/* check to see if controller has been disabled */
  3.2611 +	/* BEFORE we try to enable it */
  3.2612 +	(void) pci_read_config_word(pdev, PCI_COMMAND,&command);
  3.2613 +	if (!(command & 0x02)) {
  3.2614 +		printk(KERN_WARNING "cciss: controller appears to be disabled\n");
  3.2615 +		return -1;
  3.2616 +	}
  3.2617 +	if (pci_enable_device(pdev)) {
  3.2618 +		printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
  3.2619 +		return -1;
  3.2620 +	}
  3.2621 +	if (pci_set_dma_mask(pdev, CCISS_DMA_MASK ) != 0) {
  3.2622 +		printk(KERN_ERR "cciss:  Unable to set DMA mask\n");
  3.2623 +		return -1;
  3.2624 +	}
  3.2625 +	
  3.2626 +	subsystem_vendor_id = pdev->subsystem_vendor;
  3.2627 +	subsystem_device_id = pdev->subsystem_device;
  3.2628 +	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
  3.2629 +					subsystem_vendor_id );
  3.2630 +
  3.2631 +
  3.2632 +	/* search for our IO range so we can protect it */
  3.2633 +	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
  3.2634 +		/* is this an IO range */
  3.2635 +		if (pci_resource_flags(pdev, i) & 0x01) {
  3.2636 +			c->io_mem_addr = pci_resource_start(pdev, i);
  3.2637 +			c->io_mem_length = pci_resource_end(pdev, i) -
  3.2638 +				pci_resource_start(pdev, i) + 1; 
  3.2639 +#ifdef CCISS_DEBUG
  3.2640 +			printk("IO value found base_addr[%d] %lx %lx\n", i,
  3.2641 +				c->io_mem_addr, c->io_mem_length);
  3.2642 +#endif /* CCISS_DEBUG */
  3.2643 +			/* register the IO range */
  3.2644 +			if (!request_region( c->io_mem_addr,
  3.2645 +                                        c->io_mem_length, "cciss")) {
  3.2646 +				printk(KERN_WARNING 
  3.2647 +					"cciss I/O memory range already in "
  3.2648 +					"use addr=%lx length=%ld\n",
  3.2649 +				c->io_mem_addr, c->io_mem_length);
  3.2650 +				c->io_mem_addr= 0;
  3.2651 +				c->io_mem_length = 0;
  3.2652 +			}
  3.2653 +			break;
  3.2654 +		}
  3.2655 +	}
  3.2656 +
  3.2657 +#ifdef CCISS_DEBUG
  3.2658 +	printk("command = %x\n", command);
  3.2659 +	printk("irq = %x\n", irq);
  3.2660 +	printk("board_id = %x\n", board_id);
  3.2661 +#endif /* CCISS_DEBUG */ 
  3.2662 +
  3.2663 +	c->intr = irq;
  3.2664 +
  3.2665 +	/*
  3.2666 +	 * Memory base addr is first addr , the second points to the config
  3.2667 +         *   table
  3.2668 +	 */
  3.2669 +
  3.2670 +	c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
  3.2671 +#ifdef CCISS_DEBUG
  3.2672 +	printk("address 0 = %x\n", c->paddr);
  3.2673 +#endif /* CCISS_DEBUG */ 
  3.2674 +	c->vaddr = remap_pci_mem(c->paddr, 200);
  3.2675 +
  3.2676 +	/* get the address index number */
  3.2677 +	cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
  3.2678 +	/* I am not prepared to deal with a 64 bit address value */
  3.2679 +	cfg_base_addr &= (__u32) 0x0000ffff;
  3.2680 +#ifdef CCISS_DEBUG
  3.2681 +	printk("cfg base address = %x\n", cfg_base_addr);
  3.2682 +#endif /* CCISS_DEBUG */
  3.2683 +	cfg_base_addr_index =
  3.2684 +		find_PCI_BAR_index(pdev, cfg_base_addr);
  3.2685 +#ifdef CCISS_DEBUG
  3.2686 +	printk("cfg base address index = %x\n", cfg_base_addr_index);
  3.2687 +#endif /* CCISS_DEBUG */
  3.2688 +	if (cfg_base_addr_index == -1) {
  3.2689 +		printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
  3.2690 +		release_io_mem(hba[i]);
  3.2691 +		return -1;
  3.2692 +	}
  3.2693 +
  3.2694 +	cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
  3.2695 +#ifdef CCISS_DEBUG
  3.2696 +	printk("cfg offset = %x\n", cfg_offset);
  3.2697 +#endif /* CCISS_DEBUG */
  3.2698 +	c->cfgtable = (CfgTable_struct *) 
  3.2699 +		remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index)
  3.2700 +				+ cfg_offset, sizeof(CfgTable_struct));
  3.2701 +	c->board_id = board_id;
  3.2702 +
  3.2703 +#ifdef CCISS_DEBUG
  3.2704 +	print_cfg_table(c->cfgtable); 
  3.2705 +#endif /* CCISS_DEBUG */
  3.2706 +
  3.2707 +	for(i=0; i<NR_PRODUCTS; i++) {
  3.2708 +		if (board_id == products[i].board_id) {
  3.2709 +			c->product_name = products[i].product_name;
  3.2710 +			c->access = *(products[i].access);
  3.2711 +			break;
  3.2712 +		}
  3.2713 +	}
  3.2714 +	if (i == NR_PRODUCTS) {
  3.2715 +		printk(KERN_WARNING "cciss: Sorry, I don't know how"
  3.2716 +			" to access the Smart Array controller %08lx\n", 
  3.2717 +				(unsigned long)board_id);
  3.2718 +		return -1;
  3.2719 +	}
  3.2720 +	if (  (readb(&c->cfgtable->Signature[0]) != 'C') ||
  3.2721 +	      (readb(&c->cfgtable->Signature[1]) != 'I') ||
  3.2722 +	      (readb(&c->cfgtable->Signature[2]) != 'S') ||
  3.2723 +	      (readb(&c->cfgtable->Signature[3]) != 'S') ) {
  3.2724 +		printk("Does not appear to be a valid CISS config table\n");
  3.2725 +		return -1;
  3.2726 +	}
  3.2727 +#ifdef CCISS_DEBUG
  3.2728 +	printk("Trying to put board into Simple mode\n");
  3.2729 +#endif /* CCISS_DEBUG */ 
  3.2730 +	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
  3.2731 +	/* Update the field, and then ring the doorbell */ 
  3.2732 +	writel( CFGTBL_Trans_Simple, 
  3.2733 +		&(c->cfgtable->HostWrite.TransportRequest));
  3.2734 +	writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
  3.2735 +
  3.2736 +	/* Here, we wait, possibly for a long time, (4 secs or more). 
  3.2737 +	 * In some unlikely cases, (e.g. A failed 144 GB drive in a 
  3.2738 +	 * RAID 5 set was hot replaced just as we're coming in here) it 
  3.2739 +	 * can take that long.  Normally (almost always) we will wait 
  3.2740 +	 * less than 1 sec. */
  3.2741 +	for(i=0;i<MAX_CONFIG_WAIT;i++) {
  3.2742 +		if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
  3.2743 +			break;
  3.2744 +		/* delay and try again */
  3.2745 +		set_current_state(TASK_INTERRUPTIBLE);
  3.2746 +		schedule_timeout(1);
  3.2747 +	}	
  3.2748 +
  3.2749 +#ifdef CCISS_DEBUG
  3.2750 +	printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
  3.2751 +#endif /* CCISS_DEBUG */
  3.2752 +#ifdef CCISS_DEBUG
  3.2753 +	print_cfg_table(c->cfgtable);	
  3.2754 +#endif /* CCISS_DEBUG */ 
  3.2755 +
  3.2756 +	if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
  3.2757 +		printk(KERN_WARNING "cciss: unable to get board into"
  3.2758 +					" simple mode\n");
  3.2759 +		return -1;
  3.2760 +	}
  3.2761 +	return 0;
  3.2762 +
  3.2763 +}
  3.2764 +
  3.2765 +/* 
  3.2766 + * Gets information about the local volumes attached to the controller. 
  3.2767 + */ 
  3.2768 +static void cciss_getgeometry(int cntl_num)
  3.2769 +{
  3.2770 +	ReportLunData_struct *ld_buff;
  3.2771 +	ReadCapdata_struct *size_buff;
  3.2772 +	InquiryData_struct *inq_buff;
  3.2773 +	int return_code;
  3.2774 +	int i;
  3.2775 +	int listlength = 0;
  3.2776 +	__u32 lunid = 0;
  3.2777 +	int block_size;
  3.2778 +	int total_size; 
  3.2779 +
  3.2780 +	ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
  3.2781 +	if (ld_buff == NULL) {
  3.2782 +		printk(KERN_ERR "cciss: out of memory\n");
  3.2783 +		return;
  3.2784 +	}
  3.2785 +	memset(ld_buff, 0, sizeof(ReportLunData_struct));
  3.2786 +	size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
  3.2787 +        if (size_buff == NULL) {
  3.2788 +                printk(KERN_ERR "cciss: out of memory\n");
  3.2789 +		kfree(ld_buff);
  3.2790 +                return;
  3.2791 +        }
  3.2792 +	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
  3.2793 +        if (inq_buff == NULL) {
  3.2794 +                printk(KERN_ERR "cciss: out of memory\n");
  3.2795 +                kfree(ld_buff);
  3.2796 +		kfree(size_buff);
  3.2797 +                return;
  3.2798 +        }
  3.2799 +	/* Get the firmware version */ 
  3.2800 +	return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff, 
  3.2801 +		sizeof(InquiryData_struct), 0, 0 ,0, NULL);
  3.2802 +	if (return_code == IO_OK) {
  3.2803 +		hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
  3.2804 +		hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
  3.2805 +		hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
  3.2806 +		hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
  3.2807 +	} else  {	/* send command failed */
  3.2808 +		printk(KERN_WARNING "cciss: unable to determine firmware"
  3.2809 +			" version of controller\n");
  3.2810 +	}
  3.2811 +	/* Get the number of logical volumes */ 
  3.2812 +	return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff, 
  3.2813 +			sizeof(ReportLunData_struct), 0, 0, 0, NULL);
  3.2814 +
  3.2815 +	if (return_code == IO_OK) {
  3.2816 +#ifdef CCISS_DEBUG
  3.2817 +		printk("LUN Data\n--------------------------\n");
  3.2818 +#endif /* CCISS_DEBUG */ 
  3.2819 +
  3.2820 +		listlength = be32_to_cpu(*((__u32 *) &ld_buff->LUNListLength[0]));
  3.2821 +	} else { /* reading number of logical volumes failed */
  3.2822 +		printk(KERN_WARNING "cciss: report logical volume"
  3.2823 +			" command failed\n");
  3.2824 +		listlength = 0;
  3.2825 +	}
  3.2826 +	hba[cntl_num]->num_luns = listlength / 8; /* 8 bytes pre entry */
  3.2827 +	if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
  3.2828 +		printk(KERN_ERR "cciss:  only %d number of logical volumes supported\n",
  3.2829 +			CISS_MAX_LUN);
  3.2830 +		hba[cntl_num]->num_luns = CISS_MAX_LUN;
  3.2831 +	}
  3.2832 +#ifdef CCISS_DEBUG
  3.2833 +	printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
  3.2834 +		ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
  3.2835 +		ld_buff->LUNListLength[3],  hba[cntl_num]->num_luns);
  3.2836 +#endif /* CCISS_DEBUG */
  3.2837 +
  3.2838 +	hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
  3.2839 +	for(i=0; i<  hba[cntl_num]->num_luns; i++) {
  3.2840 +	  	lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
  3.2841 +        	lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
  3.2842 +        	lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
  3.2843 +        	lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
  3.2844 +		hba[cntl_num]->drv[i].LunID = lunid;
  3.2845 +
  3.2846 +#ifdef CCISS_DEBUG
  3.2847 +	  	printk(KERN_DEBUG "LUN[%d]:  %x %x %x %x = %x\n", i, 
  3.2848 +		ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2], 
  3.2849 +		ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID);
  3.2850 +#endif /* CCISS_DEBUG */
  3.2851 +
  3.2852 +	  	memset(size_buff, 0, sizeof(ReadCapdata_struct));
  3.2853 +	  	return_code = sendcmd(CCISS_READ_CAPACITY, cntl_num, size_buff, 
  3.2854 +				sizeof( ReadCapdata_struct), 1, i, 0, NULL);
  3.2855 +	  	if (return_code == IO_OK) {
  3.2856 +			total_size = (0xff & 
  3.2857 +				(unsigned int)(size_buff->total_size[0])) << 24;
  3.2858 +			total_size |= (0xff & 
  3.2859 +				(unsigned int)(size_buff->total_size[1])) << 16;
  3.2860 +			total_size |= (0xff & 
  3.2861 +				(unsigned int)(size_buff->total_size[2])) << 8;
  3.2862 +			total_size |= (0xff & (unsigned int)
  3.2863 +				(size_buff->total_size[3])); 
  3.2864 +			total_size++; 	/* command returns highest */
  3.2865 +					/* block address */
  3.2866 +
  3.2867 +			block_size = (0xff & 
  3.2868 +				(unsigned int)(size_buff->block_size[0])) << 24;
  3.2869 +                	block_size |= (0xff & 
  3.2870 +				(unsigned int)(size_buff->block_size[1])) << 16;
  3.2871 +                	block_size |= (0xff & 
  3.2872 +				(unsigned int)(size_buff->block_size[2])) << 8;
  3.2873 +                	block_size |= (0xff & 
  3.2874 +				(unsigned int)(size_buff->block_size[3]));
  3.2875 +		} else {	/* read capacity command failed */ 
  3.2876 +			printk(KERN_WARNING "cciss: read capacity failed\n");
  3.2877 +			total_size = block_size = 0; 
  3.2878 +		}	
  3.2879 +		printk(KERN_INFO "      blocks= %d block_size= %d\n", 
  3.2880 +					total_size, block_size);
  3.2881 +
  3.2882 +		/* Execute the command to read the disk geometry */
  3.2883 +		memset(inq_buff, 0, sizeof(InquiryData_struct));
  3.2884 +		return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
  3.2885 +			sizeof(InquiryData_struct), 1, i, 0xC1, NULL );
  3.2886 +	  	if (return_code == IO_OK) {
  3.2887 +			if (inq_buff->data_byte[8] == 0xFF) {
  3.2888 +			   printk(KERN_WARNING "cciss: reading geometry failed, volume does not support reading geometry\n");
  3.2889 +
  3.2890 +                           hba[cntl_num]->drv[i].block_size = block_size;
  3.2891 +                           hba[cntl_num]->drv[i].nr_blocks = total_size;
  3.2892 +                           hba[cntl_num]->drv[i].heads = 255;
  3.2893 +                           hba[cntl_num]->drv[i].sectors = 32; /* Sectors */
  3.2894 +			   					/* per track */
  3.2895 +                           hba[cntl_num]->drv[i].cylinders = total_size 
  3.2896 +				   				/ 255 / 32;
  3.2897 +			} else {
  3.2898 +
  3.2899 +		 	   hba[cntl_num]->drv[i].block_size = block_size;
  3.2900 +                           hba[cntl_num]->drv[i].nr_blocks = total_size;
  3.2901 +                           hba[cntl_num]->drv[i].heads = 
  3.2902 +					inq_buff->data_byte[6]; 
  3.2903 +                           hba[cntl_num]->drv[i].sectors = 
  3.2904 +					inq_buff->data_byte[7]; 
  3.2905 +			   hba[cntl_num]->drv[i].cylinders = 
  3.2906 +					(inq_buff->data_byte[4] & 0xff) << 8;
  3.2907 +			   hba[cntl_num]->drv[i].cylinders += 
  3.2908 +                                        inq_buff->data_byte[5];
  3.2909 +                           hba[cntl_num]->drv[i].raid_level = 
  3.2910 +					inq_buff->data_byte[8]; 
  3.2911 +			}
  3.2912 +		}
  3.2913 +		else {	/* Get geometry failed */
  3.2914 +			printk(KERN_WARNING "cciss: reading geometry failed, continuing with default geometry\n"); 
  3.2915 +
  3.2916 +			hba[cntl_num]->drv[i].block_size = block_size;
  3.2917 +			hba[cntl_num]->drv[i].nr_blocks = total_size;
  3.2918 +			hba[cntl_num]->drv[i].heads = 255;
  3.2919 +			hba[cntl_num]->drv[i].sectors = 32; 	/* Sectors */
  3.2920 +								/* per track */
  3.2921 +			hba[cntl_num]->drv[i].cylinders = total_size / 255 / 32;
  3.2922 +		}
  3.2923 +		if (hba[cntl_num]->drv[i].raid_level > 5)
  3.2924 +			hba[cntl_num]->drv[i].raid_level = RAID_UNKNOWN;
  3.2925 +		printk(KERN_INFO "      heads= %d, sectors= %d, cylinders= %d RAID %s\n\n",
  3.2926 +			hba[cntl_num]->drv[i].heads, 
  3.2927 +			hba[cntl_num]->drv[i].sectors,
  3.2928 +			hba[cntl_num]->drv[i].cylinders,
  3.2929 +			raid_label[hba[cntl_num]->drv[i].raid_level]); 
  3.2930 +	}
  3.2931 +	kfree(ld_buff);
  3.2932 +	kfree(size_buff);
  3.2933 +	kfree(inq_buff);
  3.2934 +}	
  3.2935 +
  3.2936 +/* Function to find the first free pointer into our hba[] array */
  3.2937 +/* Returns -1 if no free entries are left.  */
  3.2938 +static int alloc_cciss_hba(void)
  3.2939 +{
  3.2940 +	int i;
  3.2941 +	for(i=0; i< MAX_CTLR; i++) {
  3.2942 +		if (hba[i] == NULL) {
  3.2943 +			hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
  3.2944 +			if (hba[i]==NULL) {
  3.2945 +				printk(KERN_ERR "cciss: out of memory.\n");
  3.2946 +				return -1;
  3.2947 +			}
  3.2948 +			return i;
  3.2949 +		}
  3.2950 +	}
  3.2951 +	printk(KERN_WARNING 
  3.2952 +		"cciss: This driver supports a maximum of %d controllers.\n"
  3.2953 +		"You can change this value in cciss.c and recompile.\n",
  3.2954 +		MAX_CTLR);
  3.2955 +	return -1;
  3.2956 +}
  3.2957 +
  3.2958 +static void free_hba(int i)
  3.2959 +{
  3.2960 +	kfree(hba[i]);
  3.2961 +	hba[i]=NULL;
  3.2962 +}
  3.2963 +#ifdef CONFIG_CISS_MONITOR_THREAD
  3.2964 +static void fail_all_cmds(unsigned long ctlr)
  3.2965 +{
  3.2966 +	/* If we get here, the board is apparently dead. */
  3.2967 +	ctlr_info_t *h = hba[ctlr];
  3.2968 +	CommandList_struct *c;
  3.2969 +	unsigned long flags;
  3.2970 +
  3.2971 +	printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
  3.2972 +	h->alive = 0;	/* the controller apparently died... */ 
  3.2973 +
  3.2974 +	spin_lock_irqsave(&io_request_lock, flags);
  3.2975 +
  3.2976 +	pci_disable_device(h->pdev); /* Make sure it is really dead. */
  3.2977 +
  3.2978 +	/* move everything off the request queue onto the completed queue */
  3.2979 +	while( (c = h->reqQ) != NULL ) {
  3.2980 +		removeQ(&(h->reqQ), c);
  3.2981 +		h->Qdepth--;
  3.2982 +		addQ (&(h->cmpQ), c); 
  3.2983 +	}
  3.2984 +
  3.2985 +	/* Now, fail everything on the completed queue with a HW error */
  3.2986 +	while( (c = h->cmpQ) != NULL ) {
  3.2987 +		removeQ(&h->cmpQ, c);
  3.2988 +		c->err_info->CommandStatus = CMD_HARDWARE_ERR;
  3.2989 +		if (c->cmd_type == CMD_RWREQ) {
  3.2990 +			complete_command(h, c, 0);
  3.2991 +		} else if (c->cmd_type == CMD_IOCTL_PEND)
  3.2992 +			complete(c->waiting);
  3.2993 +#		ifdef CONFIG_CISS_SCSI_TAPE
  3.2994 +			else if (c->cmd_type == CMD_SCSI)
  3.2995 +				complete_scsi_command(c, 0, 0);
  3.2996 +#		endif
  3.2997 +	}
  3.2998 +	spin_unlock_irqrestore(&io_request_lock, flags);
  3.2999 +	return;
  3.3000 +}
  3.3001 +static int cciss_monitor(void *ctlr)
  3.3002 +{
  3.3003 +	/* If the board fails, we ought to detect that.  So we periodically 
  3.3004 +	send down a No-Op message and expect it to complete quickly.  If it 
  3.3005 +	doesn't, then we assume the board is dead, and fail all commands.  
  3.3006 +	This is useful mostly in a multipath configuration, so that failover
  3.3007 +	will happen. */
  3.3008 +
  3.3009 +	int rc;
  3.3010 +	ctlr_info_t *h = (ctlr_info_t *) ctlr;
  3.3011 +	unsigned long flags;
  3.3012 +	u32 current_timer;
  3.3013 +
  3.3014 +	daemonize();
  3.3015 +	exit_files(current);
  3.3016 +	reparent_to_init();
  3.3017 +
  3.3018 +	printk("cciss%d: Monitor thread starting.\n", h->ctlr); 
  3.3019 +
  3.3020 +	/* only listen to signals if the HA was loaded as a module.  */
  3.3021 +#define SHUTDOWN_SIGS   (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
  3.3022 +	siginitsetinv(&current->blocked, SHUTDOWN_SIGS);
  3.3023 +	sprintf(current->comm, "ccissmon%d", h->ctlr);
  3.3024 +	h->monitor_thread = current;
  3.3025 +
  3.3026 +	init_timer(&h->watchdog); 
  3.3027 +	h->watchdog.function = fail_all_cmds;
  3.3028 +	h->watchdog.data = (unsigned long) h->ctlr;
  3.3029 +	while (1) {
  3.3030 +  		/* check heartbeat timer */
  3.3031 +                current_timer = readl(&h->cfgtable->HeartBeat);
  3.3032 +  		current_timer &= 0x0fffffff;
  3.3033 +  		if (heartbeat_timer == current_timer) {
  3.3034 +  			fail_all_cmds(h->ctlr);
  3.3035 +  			break;
  3.3036 +  		}
  3.3037 +  		else
  3.3038 +  			heartbeat_timer = current_timer;
  3.3039 +
  3.3040 +		set_current_state(TASK_UNINTERRUPTIBLE);
  3.3041 +		schedule_timeout(h->monitor_period * HZ);
  3.3042 +		h->watchdog.expires = jiffies + HZ * h->monitor_deadline;
  3.3043 +		add_timer(&h->watchdog);
  3.3044 +		/* send down a trivial command (no op message) to ctlr */
  3.3045 +		rc = sendcmd_withirq(3, h->ctlr, NULL, 0, 0, 0, 0, TYPE_MSG);
  3.3046 +		del_timer(&h->watchdog);
  3.3047 +		if (!CTLR_IS_ALIVE(h))
  3.3048 +			break;
  3.3049 +		if (signal_pending(current)) {
  3.3050 +			printk(KERN_WARNING "%s received signal.\n",
  3.3051 +				current->comm);
  3.3052 +			break;
  3.3053 +		}
  3.3054 +		if (h->monitor_period == 0) /* zero period means exit thread */
  3.3055 +			break;
  3.3056 +	}
  3.3057 +	printk(KERN_INFO "%s exiting.\n", current->comm);
  3.3058 +	spin_lock_irqsave(&io_request_lock, flags);
  3.3059 +	h->monitor_started = 0;
  3.3060 +	h->monitor_thread = NULL;
  3.3061 +	spin_unlock_irqrestore(&io_request_lock, flags);
  3.3062 +	return 0;
  3.3063 +}
  3.3064 +static int start_monitor_thread(ctlr_info_t *h, unsigned char *cmd, 
  3.3065 +		unsigned long count, int (*cciss_monitor)(void *), int *rc)
  3.3066 +{
  3.3067 +	unsigned long flags;
  3.3068 +	unsigned int new_period, old_period, new_deadline, old_deadline;
  3.3069 +
  3.3070 +	if (strncmp("monitor", cmd, 7) == 0) {
  3.3071 +		new_period = simple_strtol(cmd + 8, NULL, 10);
  3.3072 +		spin_lock_irqsave(&io_request_lock, flags);
  3.3073 +		new_deadline = h->monitor_deadline;
  3.3074 +		spin_unlock_irqrestore(&io_request_lock, flags);
  3.3075 +	} else if (strncmp("deadline", cmd, 8) == 0) {
  3.3076 +		new_deadline = simple_strtol(cmd + 9, NULL, 10);
  3.3077 +		spin_lock_irqsave(&io_request_lock, flags);
  3.3078 +		new_period = h->monitor_period;
  3.3079 +		spin_unlock_irqrestore(&io_request_lock, flags);
  3.3080 +	} else
  3.3081 +		return -1;
  3.3082 +	if (new_period != 0 && new_period < CCISS_MIN_PERIOD)
  3.3083 +		new_period = CCISS_MIN_PERIOD;
  3.3084 +	if (new_period > CCISS_MAX_PERIOD)
  3.3085 +		new_period = CCISS_MAX_PERIOD;
  3.3086 +	if (new_deadline >= new_period) {
  3.3087 +		new_deadline = new_period - 5;
  3.3088 +		printk(KERN_INFO "setting deadline to %d\n", new_deadline);
  3.3089 +	}
  3.3090 +	spin_lock_irqsave(&io_request_lock, flags);
  3.3091 +	if (h->monitor_started != 0)  {
  3.3092 +		old_period = h->monitor_period;
  3.3093 +		old_deadline = h->monitor_deadline;
  3.3094 +		h->monitor_period = new_period;
  3.3095 +		h->monitor_deadline = new_deadline;
  3.3096 +		spin_unlock_irqrestore(&io_request_lock, flags);
  3.3097 +		if (new_period == 0) {
  3.3098 +			printk(KERN_INFO "cciss%d: stopping monitor thread\n",
  3.3099 +				h->ctlr);
  3.3100 +			*rc = count;
  3.3101 +			return 0;
  3.3102 +		}
  3.3103 +		if (new_period != old_period) 
  3.3104 +			printk(KERN_INFO "cciss%d: adjusting monitor thread "
  3.3105 +				"period from %d to %d seconds\n",
  3.3106 +				h->ctlr, old_period, new_period);
  3.3107 +		if (new_deadline != old_deadline) 
  3.3108 +			printk(KERN_INFO "cciss%d: adjusting monitor thread "
  3.3109 +				"deadline from %d to %d seconds\n",
  3.3110 +				h->ctlr, old_deadline, new_deadline);
  3.3111 +		*rc = count;
  3.3112 +		return 0;
  3.3113 +	}
  3.3114 +	h->monitor_started = 1;
  3.3115 +	h->monitor_period = new_period;
  3.3116 +	h->monitor_deadline = new_deadline;
  3.3117 +	spin_unlock_irqrestore(&io_request_lock, flags);
  3.3118 +	kernel_thread(cciss_monitor, h, 0);
  3.3119 +	*rc = count;
  3.3120 +	return 0;
  3.3121 +}
  3.3122 +
  3.3123 +static void kill_monitor_thread(ctlr_info_t *h)
  3.3124 +{
  3.3125 +	if (h->monitor_thread)
  3.3126 +		send_sig(SIGKILL, h->monitor_thread, 1);
  3.3127 +}
  3.3128 +#else
  3.3129 +#define kill_monitor_thread(h)
  3.3130 +#endif
  3.3131 +/*
  3.3132 + *  This is it.  Find all the controllers and register them.  I really hate
  3.3133 + *  stealing all these major device numbers.
  3.3134 + *  returns the number of block devices registered.
  3.3135 + */
  3.3136 +static int __init cciss_init_one(struct pci_dev *pdev,
  3.3137 +	const struct pci_device_id *ent)
  3.3138 +{
  3.3139 +	request_queue_t *q;
  3.3140 +	int i;
  3.3141 +	int j;
  3.3142 +	int rc;
  3.3143 +
  3.3144 +	printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
  3.3145 +			" bus %d dev %d func %d\n",
  3.3146 +		pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
  3.3147 +			PCI_FUNC(pdev->devfn));
  3.3148 +	i = alloc_cciss_hba();
  3.3149 +	if (i < 0 ) 
  3.3150 +		return -1;
  3.3151 +	memset(hba[i], 0, sizeof(ctlr_info_t));
  3.3152 +	if (cciss_pci_init(hba[i], pdev) != 0) {
  3.3153 +		free_hba(i);
  3.3154 +		return -1;
  3.3155 +	}
  3.3156 +	sprintf(hba[i]->devname, "cciss%d", i);
  3.3157 +	hba[i]->ctlr = i;
  3.3158 +
  3.3159 +	/* register with the major number, or get a dynamic major number */
  3.3160 +	/* by passing 0 as argument */
  3.3161 +
  3.3162 +	if (i < MAX_CTLR_ORIG)
  3.3163 +		hba[i]->major = MAJOR_NR + i;
  3.3164 +
  3.3165 +	hba[i]->pdev = pdev;
  3.3166 +	ASSERT_CTLR_ALIVE(hba[i]);
  3.3167 +
  3.3168 +#if 0
  3.3169 +	rc = (register_blkdev(hba[i]->major, hba[i]->devname, &cciss_fops));
  3.3170 +	if (rc < 0) {
  3.3171 +		printk(KERN_ERR "cciss:  Unable to get major number "
  3.3172 +			"%d for %s\n", hba[i]->major, hba[i]->devname);
  3.3173 +		release_io_mem(hba[i]);
  3.3174 +		free_hba(i);
  3.3175 +		return -1;
  3.3176 +	} else 
  3.3177 +                {
  3.3178 +		if (i < MAX_CTLR_ORIG) {
  3.3179 +			hba[i]->major = MAJOR_NR + i;
  3.3180 +			map_major_to_ctlr[MAJOR_NR + i] = i;
  3.3181 +		} else {
  3.3182 +			hba[i]->major = rc;
  3.3183 +			map_major_to_ctlr[rc] = i;
  3.3184 +		}
  3.3185 +	}
  3.3186 +
  3.3187 +XXXX  Need to register this...
  3.3188 +
  3.3189 +#endif
  3.3190 +
  3.3191 +	/* make sure the board interrupts are off */
  3.3192 +	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
  3.3193 +	if (request_irq(hba[i]->intr, do_cciss_intr, 
  3.3194 +		SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM, 
  3.3195 +			hba[i]->devname, hba[i])) {
  3.3196 +
  3.3197 +		printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
  3.3198 +			hba[i]->intr, hba[i]->devname);
  3.3199 +		unregister_blkdev( hba[i]->major, hba[i]->devname);
  3.3200 +		map_major_to_ctlr[hba[i]->major] = 0;
  3.3201 +		release_io_mem(hba[i]);
  3.3202 +		free_hba(i);
  3.3203 +		return -1;
  3.3204 +	}
  3.3205 +	hba[i]->cmd_pool_bits = (__u32*)kmalloc(
  3.3206 +        	((NR_CMDS+31)/32)*sizeof(__u32), GFP_KERNEL);
  3.3207 +	hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
  3.3208 +		hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), 
  3.3209 +		&(hba[i]->cmd_pool_dhandle));
  3.3210 +	hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
  3.3211 +		hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), 
  3.3212 +		&(hba[i]->errinfo_pool_dhandle));
  3.3213 +	if ((hba[i]->cmd_pool_bits == NULL) 
  3.3214 +		|| (hba[i]->cmd_pool == NULL)
  3.3215 +		|| (hba[i]->errinfo_pool == NULL)) {
  3.3216 +
  3.3217 +		if (hba[i]->cmd_pool_bits)
  3.3218 +                	kfree(hba[i]->cmd_pool_bits);
  3.3219 +                if (hba[i]->cmd_pool)
  3.3220 +                	pci_free_consistent(hba[i]->pdev,  
  3.3221 +				NR_CMDS * sizeof(CommandList_struct), 
  3.3222 +				hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);	
  3.3223 +		if (hba[i]->errinfo_pool)
  3.3224 +			pci_free_consistent(hba[i]->pdev,
  3.3225 +				NR_CMDS * sizeof( ErrorInfo_struct),
  3.3226 +				hba[i]->errinfo_pool, 
  3.3227 +				hba[i]->errinfo_pool_dhandle);
  3.3228 +                free_irq(hba[i]->intr, hba[i]);
  3.3229 +                unregister_blkdev(hba[i]->major, hba[i]->devname);
  3.3230 +		map_major_to_ctlr[hba[i]->major] = 0;
  3.3231 +		release_io_mem(hba[i]);
  3.3232 +		free_hba(i);
  3.3233 +                printk( KERN_ERR "cciss: out of memory");
  3.3234 +		return -1;
  3.3235 +	}
  3.3236 +
  3.3237 +	/* Initialize the pdev driver private data. 
  3.3238 +		have it point to hba[i].  */
  3.3239 +	pci_set_drvdata(pdev, hba[i]);
  3.3240 +	/* command and error info recs zeroed out before 
  3.3241 +			they are used */
  3.3242 +        memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+31)/32)*sizeof(__u32));
  3.3243 +
  3.3244 +#ifdef CCISS_DEBUG	
  3.3245 +	printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
  3.3246 +#endif /* CCISS_DEBUG */
  3.3247 +
  3.3248 +	cciss_getgeometry(i);
  3.3249 +
  3.3250 +	cciss_find_non_disk_devices(i);	/* find our tape drives, if any */
  3.3251 +
  3.3252 +	/* Turn the interrupts on so we can service requests */
  3.3253 +	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
  3.3254 +
  3.3255 +	cciss_procinit(i);
  3.3256 +
  3.3257 +	q = BLK_DEFAULT_QUEUE(hba[i]->major);
  3.3258 +	q->queuedata = hba[i];
  3.3259 +	blk_init_queue(q, do_cciss_request);
  3.3260 +#if 0
  3.3261 +	// XXX SMH; no bounce support for us yet 
  3.3262 +	blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
  3.3263 +#endif
  3.3264 +	blk_queue_headactive(q, 0);		
  3.3265 +
  3.3266 +	/* fill in the other Kernel structs */
  3.3267 +	blksize_size[hba[i]->major] = hba[i]->blocksizes;
  3.3268 +        hardsect_size[hba[i]->major] = hba[i]->hardsizes;
  3.3269 +        read_ahead[hba[i]->major] = READ_AHEAD;
  3.3270 +
  3.3271 +	/* Set the pointers to queue functions */ 
  3.3272 +	q->back_merge_fn = cpq_back_merge_fn;
  3.3273 +        q->front_merge_fn = cpq_front_merge_fn;
  3.3274 +        q->merge_requests_fn = cpq_merge_requests_fn;
  3.3275 +
  3.3276 +
  3.3277 +	/* Fill in the gendisk data */ 	
  3.3278 +	hba[i]->gendisk.major = hba[i]->major;
  3.3279 +	hba[i]->gendisk.major_name = "cciss";
  3.3280 +	hba[i]->gendisk.minor_shift = NWD_SHIFT;
  3.3281 +	hba[i]->gendisk.max_p = MAX_PART;
  3.3282 +	hba[i]->gendisk.part = hba[i]->hd;
  3.3283 +	hba[i]->gendisk.sizes = hba[i]->sizes;
  3.3284 +	hba[i]->gendisk.nr_real = hba[i]->highest_lun+1;
  3.3285 +	hba[i]->gendisk.fops = &cciss_fops;
  3.3286 +
  3.3287 +	/* Get on the disk list */ 
  3.3288 +	add_gendisk(&(hba[i]->gendisk));
  3.3289 +
  3.3290 +	cciss_geninit(i);
  3.3291 +	for(j=0; j<NWD; j++)
  3.3292 +		register_disk(&(hba[i]->gendisk),
  3.3293 +			MKDEV(hba[i]->major, j <<4), 
  3.3294 +			MAX_PART, &cciss_fops, 
  3.3295 +			hba[i]->drv[j].nr_blocks);
  3.3296 +
  3.3297 +	cciss_register_scsi(i, 1);  /* hook ourself into SCSI subsystem */
  3.3298 +
  3.3299 +	return 1;
  3.3300 +}
  3.3301 +
  3.3302 +static void __devexit cciss_remove_one (struct pci_dev *pdev)
  3.3303 +{
  3.3304 +	ctlr_info_t *tmp_ptr;
  3.3305 +	int i;
  3.3306 +	char flush_buf[4];
  3.3307 +	int return_code; 
  3.3308 +
  3.3309 +	if (pci_get_drvdata(pdev) == NULL) {
  3.3310 +		printk( KERN_ERR "cciss: Unable to remove device \n");
  3.3311 +		return;
  3.3312 +	}
  3.3313 +	tmp_ptr = pci_get_drvdata(pdev);
  3.3314 +	i = tmp_ptr->ctlr;
  3.3315 +	if (hba[i] == NULL) {
  3.3316 +		printk(KERN_ERR "cciss: device appears to "
  3.3317 +			"already be removed \n");
  3.3318 +		return;
  3.3319 +	}
  3.3320 +	kill_monitor_thread(hba[i]);
  3.3321 +	/* no sense in trying to flush a dead board's cache. */
  3.3322 +	if (CTLR_IS_ALIVE(hba[i])) {
  3.3323 +		/* Turn board interrupts off and flush the cache */
  3.3324 +		/* write all data in the battery backed cache to disks */
  3.3325 + 	memset(flush_buf, 0, 4);
  3.3326 +		return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf,
  3.3327 +					4, 0, 0, 0, NULL);
  3.3328 +		if (return_code != IO_OK)
  3.3329 + 		printk(KERN_WARNING 
  3.3330 +				"cciss%d: Error flushing cache\n", i);
  3.3331 + 	}
  3.3332 +	free_irq(hba[i]->intr, hba[i]);
  3.3333 +	pci_set_drvdata(pdev, NULL);
  3.3334 +	iounmap((void*)hba[i]->vaddr);
  3.3335 +	cciss_unregister_scsi(i);  /* unhook from SCSI subsystem */
  3.3336 +	unregister_blkdev(hba[i]->major, hba[i]->devname);
  3.3337 +	map_major_to_ctlr[hba[i]->major] = 0;
  3.3338 +	//remove_proc_entry(hba[i]->devname, proc_cciss);	
  3.3339 +	
  3.3340 +
  3.3341 +	/* remove it from the disk list */
  3.3342 +	del_gendisk(&(hba[i]->gendisk));
  3.3343 +
  3.3344 +	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), 
  3.3345 +		hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
  3.3346 +	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
  3.3347 +		hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
  3.3348 +	kfree(hba[i]->cmd_pool_bits);
  3.3349 +	release_io_mem(hba[i]);
  3.3350 +	free_hba(i);
  3.3351 +}	
  3.3352 +
  3.3353 +static struct pci_driver cciss_pci_driver = {
  3.3354 +	 name:   "cciss",
  3.3355 +	probe:  cciss_init_one,
  3.3356 +	remove:  __devexit_p(cciss_remove_one),
  3.3357 +	id_table:  cciss_pci_device_id, /* id_table */
  3.3358 +};
  3.3359 +
  3.3360 +/*
  3.3361 +*  This is it.  Register the PCI driver information for the cards we control
  3.3362 +*  the OS will call our registered routines when it finds one of our cards. 
  3.3363 +*/
  3.3364 +int __init cciss_init(void)
  3.3365 +{
  3.3366 +
  3.3367 +	printk(KERN_INFO DRIVER_NAME "\n");
  3.3368 +	/* Register for out PCI devices */
  3.3369 +	return pci_module_init(&cciss_pci_driver);
  3.3370 +}
  3.3371 +
  3.3372 +EXPORT_NO_SYMBOLS;
  3.3373 +static int __init init_cciss_module(void)
  3.3374 +{
  3.3375 +
  3.3376 +	return cciss_init();
  3.3377 +}
  3.3378 +
  3.3379 +static void __exit cleanup_cciss_module(void)
  3.3380 +{
  3.3381 +	int i;
  3.3382 +
  3.3383 +	pci_unregister_driver(&cciss_pci_driver);
  3.3384 +	/* double check that all controller entrys have been removed */
  3.3385 +	for (i=0; i< MAX_CTLR; i++) {
  3.3386 +		if (hba[i] != NULL) {
  3.3387 +			printk(KERN_WARNING "cciss: had to remove"
  3.3388 +					" controller %d\n", i);
  3.3389 +			cciss_remove_one(hba[i]->pdev);
  3.3390 +		}
  3.3391 +	}
  3.3392 +	//remove_proc_entry("cciss", proc_root_driver);
  3.3393 +}
  3.3394 +
  3.3395 +module_init(init_cciss_module);
  3.3396 +module_exit(cleanup_cciss_module);
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/drivers/block/cciss.h	Mon Jan 19 15:29:17 2004 +0000
     4.3 @@ -0,0 +1,293 @@
     4.4 +#ifndef CCISS_H
     4.5 +#define CCISS_H
     4.6 +
     4.7 +#include <linux/genhd.h>
     4.8 +
     4.9 +#include "cciss_cmd.h"
    4.10 +
    4.11 +
    4.12 +#define NWD		16
    4.13 +#define NWD_SHIFT	4
    4.14 +#define MAX_PART	16
    4.15 +
    4.16 +#define IO_OK		0
    4.17 +#define IO_ERROR	1
    4.18 +
    4.19 +#define MAJOR_NR COMPAQ_CISS_MAJOR 
    4.20 +
    4.21 +struct ctlr_info;
    4.22 +typedef struct ctlr_info ctlr_info_t;
    4.23 +
    4.24 +struct access_method {
    4.25 +	void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
    4.26 +	void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
    4.27 +	unsigned long (*fifo_full)(ctlr_info_t *h);
    4.28 +	unsigned long (*intr_pending)(ctlr_info_t *h);
    4.29 +	unsigned long (*command_completed)(ctlr_info_t *h);
    4.30 +};
    4.31 +typedef struct _drive_info_struct
    4.32 +{
    4.33 + 	__u32   	LunID;	
    4.34 +	int 		usage_count;
    4.35 +	unsigned int 	nr_blocks;
    4.36 +	int		block_size;
    4.37 +	int 		heads;
    4.38 +	int		sectors;
    4.39 +	int 		cylinders;
    4.40 +	int 		raid_level;
    4.41 +} drive_info_struct;
    4.42 +
    4.43 +struct ctlr_info 
    4.44 +{
    4.45 +	int	ctlr;
    4.46 +	int	major;
    4.47 +	char	devname[8];
    4.48 +	char    *product_name;
    4.49 +	char	firm_ver[4]; // Firmware version 
    4.50 +	struct pci_dev *pdev;
    4.51 +	__u32	board_id;
    4.52 +	unsigned long vaddr;
    4.53 +	unsigned long paddr;	
    4.54 +	unsigned long io_mem_addr;
    4.55 +	unsigned long io_mem_length;
    4.56 +	CfgTable_struct *cfgtable;
    4.57 +	int	intr;
    4.58 +	int	interrupts_enabled;
    4.59 +	int 	max_commands;
    4.60 +	int	commands_outstanding;
    4.61 +	int 	max_outstanding; /* Debug */ 
    4.62 +	int	num_luns;
    4.63 +	int 	highest_lun;
    4.64 +	int	usage_count;  /* number of opens all all minor devices */
    4.65 +
    4.66 +	// information about each logical volume
    4.67 +	drive_info_struct drv[CISS_MAX_LUN];
    4.68 +
    4.69 +	struct access_method access;
    4.70 +
    4.71 +	/* queue and queue Info */ 
    4.72 +	CommandList_struct *reqQ;
    4.73 +	CommandList_struct  *cmpQ;
    4.74 +	unsigned int Qdepth;
    4.75 +	unsigned int maxQsinceinit;
    4.76 +	unsigned int maxSG;
    4.77 +
    4.78 +	//* pointers to command and error info pool */ 
    4.79 +	CommandList_struct 	*cmd_pool;
    4.80 +	dma_addr_t		cmd_pool_dhandle; 
    4.81 +	ErrorInfo_struct 	*errinfo_pool;
    4.82 +	dma_addr_t		errinfo_pool_dhandle; 
    4.83 +        __u32   		*cmd_pool_bits;
    4.84 +	int			nr_allocs;
    4.85 +	int			nr_frees; 
    4.86 +
    4.87 +	// Disk structures we need to pass back
    4.88 +	struct gendisk   gendisk;
    4.89 +	   // indexed by minor numbers
    4.90 +	struct hd_struct hd[256];
    4.91 +	int              sizes[256];
    4.92 +	int              blocksizes[256];
    4.93 +	int              hardsizes[256];
    4.94 +	int busy_configuring;
    4.95 +#ifdef CONFIG_CISS_SCSI_TAPE
    4.96 +	void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
    4.97 +#endif
    4.98 +#ifdef CONFIG_CISS_MONITOR_THREAD
    4.99 +	struct timer_list watchdog;
   4.100 +	struct task_struct *monitor_thread; 
   4.101 +	unsigned int monitor_period;
   4.102 +	unsigned int monitor_deadline;
   4.103 +	unsigned char alive;
   4.104 +	unsigned char monitor_started;
   4.105 +#define CCISS_MIN_PERIOD 10
   4.106 +#define CCISS_MAX_PERIOD 3600 
   4.107 +#define CTLR_IS_ALIVE(h) (h->alive)
   4.108 +#define ASSERT_CTLR_ALIVE(h) {	h->alive = 1; \
   4.109 +				h->monitor_period = 0; \
   4.110 +				h->monitor_started = 0; }
   4.111 +#define MONITOR_STATUS_PATTERN "Status: %s\n"
   4.112 +#define CTLR_STATUS(h) CTLR_IS_ALIVE(h) ? "operational" : "failed"
   4.113 +#define MONITOR_PERIOD_PATTERN "Monitor thread period: %d\n"
   4.114 +#define MONITOR_PERIOD_VALUE(h) (h->monitor_period)
   4.115 +#define MONITOR_DEADLINE_PATTERN "Monitor thread deadline: %d\n"
   4.116 +#define MONITOR_DEADLINE_VALUE(h) (h->monitor_deadline)
   4.117 +#define START_MONITOR_THREAD(h, cmd, count, cciss_monitor, rc) \
   4.118 +	start_monitor_thread(h, cmd, count, cciss_monitor, rc)
   4.119 +#else
   4.120 +
   4.121 +#define MONITOR_PERIOD_PATTERN "%s"
   4.122 +#define MONITOR_PERIOD_VALUE(h) ""
   4.123 +#define MONITOR_DEADLINE_PATTERN "%s"
   4.124 +#define MONITOR_DEADLINE_VALUE(h) ""
   4.125 +#define MONITOR_STATUS_PATTERN "%s\n"
   4.126 +#define CTLR_STATUS(h) ""
   4.127 +#define CTLR_IS_ALIVE(h) (1)
   4.128 +#define ASSERT_CTLR_ALIVE(h)
   4.129 +#define START_MONITOR_THREAD(a,b,c,d,rc) (*rc == 0)
   4.130 +
   4.131 +#endif
   4.132 +};
   4.133 +
   4.134 +/*  Defining the diffent access_menthods */
   4.135 +/*
   4.136 + * Memory mapped FIFO interface (SMART 53xx cards)
   4.137 + */
   4.138 +#define SA5_DOORBELL	0x20
   4.139 +#define SA5_REQUEST_PORT_OFFSET	0x40
   4.140 +#define SA5_REPLY_INTR_MASK_OFFSET	0x34
   4.141 +#define SA5_REPLY_PORT_OFFSET		0x44
   4.142 +#define SA5_INTR_STATUS		0x30
   4.143 +
   4.144 +#define SA5_CTCFG_OFFSET	0xB4
   4.145 +#define SA5_CTMEM_OFFSET	0xB8
   4.146 +
   4.147 +#define SA5_INTR_OFF		0x08
   4.148 +#define SA5B_INTR_OFF		0x04
   4.149 +#define SA5_INTR_PENDING	0x08
   4.150 +#define SA5B_INTR_PENDING	0x04
   4.151 +#define FIFO_EMPTY		0xffffffff	
   4.152 +
   4.153 +#define  CISS_ERROR_BIT		0x02
   4.154 +
   4.155 +#define CCISS_INTR_ON 	1 
   4.156 +#define CCISS_INTR_OFF	0
   4.157 +/* 
   4.158 +	Send the command to the hardware 
   4.159 +*/
   4.160 +static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 
   4.161 +{
   4.162 +#ifdef CCISS_DEBUG
   4.163 +	 printk("Sending %x - down to controller\n", c->busaddr );
   4.164 +#endif /* CCISS_DEBUG */ 
   4.165 +         writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
   4.166 +	 h->commands_outstanding++;
   4.167 +	 if ( h->commands_outstanding > h->max_outstanding)
   4.168 +		h->max_outstanding = h->commands_outstanding;
   4.169 +}
   4.170 +
   4.171 +/*  
   4.172 + *  This card is the opposite of the other cards.  
   4.173 + *   0 turns interrupts on... 
   4.174 + *   0x08 turns them off... 
   4.175 + */
   4.176 +static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
   4.177 +{
   4.178 +	if (val) 
   4.179 +	{ /* Turn interrupts on */
   4.180 +		h->interrupts_enabled = 1;
   4.181 +		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
   4.182 +	} else /* Turn them off */
   4.183 +	{
   4.184 +		h->interrupts_enabled = 0;
   4.185 +        	writel( SA5_INTR_OFF, 
   4.186 +			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
   4.187 +	}
   4.188 +}
   4.189 +/*
   4.190 + *  This card is the opposite of the other cards.
   4.191 + *   0 turns interrupts on...
   4.192 + *   0x04 turns them off...
   4.193 + */
   4.194 +static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
   4.195 +{
   4.196 +        if (val)
   4.197 +        { /* Turn interrupts on */
   4.198 +		h->interrupts_enabled = 1;
   4.199 +                writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
   4.200 +        } else /* Turn them off */
   4.201 +        {
   4.202 +		h->interrupts_enabled = 0;
   4.203 +                writel( SA5B_INTR_OFF,
   4.204 +                        h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
   4.205 +        }
   4.206 +}
   4.207 +/*
   4.208 + *  Returns true if fifo is full.  
   4.209 + * 
   4.210 + */ 
   4.211 +static unsigned long SA5_fifo_full(ctlr_info_t *h)
   4.212 +{
   4.213 +	if( h->commands_outstanding >= h->max_commands)
   4.214 +		return(1);
   4.215 +	else 
   4.216 +		return(0);
   4.217 +
   4.218 +}
   4.219 +/* 
   4.220 + *   returns value read from hardware. 
   4.221 + *     returns FIFO_EMPTY if there is nothing to read 
   4.222 + */ 
   4.223 +static unsigned long SA5_completed(ctlr_info_t *h)
   4.224 +{
   4.225 +	unsigned long register_value 
   4.226 +		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
   4.227 +	if(register_value != FIFO_EMPTY)
   4.228 +	{
   4.229 +		h->commands_outstanding--;
   4.230 +#ifdef CCISS_DEBUG
   4.231 +		printk("cciss:  Read %lx back from board\n", register_value);
   4.232 +#endif /* CCISS_DEBUG */ 
   4.233 +	} 
   4.234 +#ifdef CCISS_DEBUG
   4.235 +	else
   4.236 +	{
   4.237 +		printk("cciss:  FIFO Empty read\n");
   4.238 +	}
   4.239 +#endif 
   4.240 +	return ( register_value); 
   4.241 +
   4.242 +}
   4.243 +/*
   4.244 + *	Returns true if an interrupt is pending.. 
   4.245 + */
   4.246 +static unsigned long SA5_intr_pending(ctlr_info_t *h)
   4.247 +{
   4.248 +	unsigned long register_value  = 
   4.249 +		readl(h->vaddr + SA5_INTR_STATUS);
   4.250 +#ifdef CCISS_DEBUG
   4.251 +	printk("cciss: intr_pending %lx\n", register_value);
   4.252 +#endif  /* CCISS_DEBUG */
   4.253 +	if( register_value &  SA5_INTR_PENDING) 
   4.254 +		return  1;	
   4.255 +	return 0 ;
   4.256 +}
   4.257 +
   4.258 +/*
   4.259 + *      Returns true if an interrupt is pending..
   4.260 + */
   4.261 +static unsigned long SA5B_intr_pending(ctlr_info_t *h)
   4.262 +{
   4.263 +        unsigned long register_value  =
   4.264 +                readl(h->vaddr + SA5_INTR_STATUS);
   4.265 +#ifdef CCISS_DEBUG
   4.266 +        printk("cciss: intr_pending %lx\n", register_value);
   4.267 +#endif  /* CCISS_DEBUG */
   4.268 +        if( register_value &  SA5B_INTR_PENDING)
   4.269 +                return  1;
   4.270 +        return 0 ;
   4.271 +}
   4.272 +
   4.273 +
   4.274 +static struct access_method SA5_access = {
   4.275 +	SA5_submit_command,
   4.276 +	SA5_intr_mask,
   4.277 +	SA5_fifo_full,
   4.278 +	SA5_intr_pending,
   4.279 +	SA5_completed,
   4.280 +};
   4.281 +
   4.282 +static struct access_method SA5B_access = {
   4.283 +        SA5_submit_command,
   4.284 +        SA5B_intr_mask,
   4.285 +        SA5_fifo_full,
   4.286 +        SA5B_intr_pending,
   4.287 +        SA5_completed,
   4.288 +};
   4.289 +
   4.290 +struct board_type {
   4.291 +	__u32	board_id;
   4.292 +	char	*product_name;
   4.293 +	struct access_method *access;
   4.294 +};
   4.295 +#endif /* CCISS_H */
   4.296 +
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/drivers/block/cciss_cmd.h	Mon Jan 19 15:29:17 2004 +0000
     5.3 @@ -0,0 +1,271 @@
     5.4 +#ifndef CCISS_CMD_H
     5.5 +#define CCISS_CMD_H
     5.6 +/* ########################################################################## */
     5.7 +/* DEFINES                                                                    */
     5.8 +/* ########################################################################## */
     5.9 +#define CISS_VERSION "1.00"
    5.10 +
    5.11 +/* general boundary defintions */
    5.12 +#define SENSEINFOBYTES          32	/* note that this value may vary */
    5.13 +					/* between host implementations  */
    5.14 +#define MAXSGENTRIES            31
    5.15 +#define MAXREPLYQS              256
    5.16 +
    5.17 +/* Command Status value */
    5.18 +#define CMD_SUCCESS             0x0000
    5.19 +#define CMD_TARGET_STATUS       0x0001
    5.20 +#define CMD_DATA_UNDERRUN       0x0002
    5.21 +#define CMD_DATA_OVERRUN        0x0003
    5.22 +#define CMD_INVALID             0x0004
    5.23 +#define CMD_PROTOCOL_ERR        0x0005
    5.24 +#define CMD_HARDWARE_ERR        0x0006
    5.25 +#define CMD_CONNECTION_LOST     0x0007
    5.26 +#define CMD_ABORTED             0x0008
    5.27 +#define CMD_ABORT_FAILED        0x0009
    5.28 +#define CMD_UNSOLICITED_ABORT   0x000A
    5.29 +#define CMD_TIMEOUT             0x000B
    5.30 +#define CMD_UNABORTABLE		0x000C
    5.31 +
    5.32 +/* transfer direction */
    5.33 +#define XFER_NONE               0x00
    5.34 +#define XFER_WRITE              0x01
    5.35 +#define XFER_READ               0x02
    5.36 +#define XFER_RSVD               0x03
    5.37 +
    5.38 +/* task attribute */
    5.39 +#define ATTR_UNTAGGED           0x00
    5.40 +#define ATTR_SIMPLE             0x04
    5.41 +#define ATTR_HEADOFQUEUE        0x05
    5.42 +#define ATTR_ORDERED            0x06
    5.43 +#define ATTR_ACA                0x07
    5.44 +
    5.45 +/* cdb type */
    5.46 +#define TYPE_CMD				0x00
    5.47 +#define TYPE_MSG				0x01
    5.48 +
    5.49 +/* config space register offsets */
    5.50 +#define CFG_VENDORID            0x00
    5.51 +#define CFG_DEVICEID            0x02
    5.52 +#define CFG_I2OBAR              0x10
    5.53 +#define CFG_MEM1BAR             0x14
    5.54 +
    5.55 +/* i2o space register offsets */
    5.56 +#define I2O_IBDB_SET            0x20
    5.57 +#define I2O_IBDB_CLEAR          0x70
    5.58 +#define I2O_INT_STATUS          0x30
    5.59 +#define I2O_INT_MASK            0x34
    5.60 +#define I2O_IBPOST_Q            0x40
    5.61 +#define I2O_OBPOST_Q            0x44
    5.62 +
    5.63 +/* Configuration Table */
    5.64 +#define CFGTBL_ChangeReq        0x00000001l
    5.65 +#define CFGTBL_AccCmds          0x00000001l
    5.66 +
    5.67 +#define CFGTBL_Trans_Simple     0x00000002l
    5.68 +
    5.69 +#define CFGTBL_BusType_Ultra2   0x00000001l
    5.70 +#define CFGTBL_BusType_Ultra3   0x00000002l
    5.71 +#define CFGTBL_BusType_Fibre1G  0x00000100l
    5.72 +#define CFGTBL_BusType_Fibre2G  0x00000200l
    5.73 +typedef struct _vals32
    5.74 +{
    5.75 +        __u32   lower;
    5.76 +        __u32   upper;
    5.77 +} vals32;
    5.78 +
    5.79 +typedef union _u64bit
    5.80 +{
    5.81 +   vals32	val32;
    5.82 +   __u64	val;
    5.83 +} u64bit;
    5.84 +
    5.85 +/*  Type defs used in the following structs */
    5.86 +typedef	__u8 BYTE;
    5.87 +typedef	__u16 WORD;
    5.88 +typedef __u16 HWORD;
    5.89 +typedef	__u32 DWORD; 
    5.90 +typedef vals32 QWORD; 
    5.91 +
    5.92 +/* ######################################################################### */
    5.93 +/* STRUCTURES                                                                */
    5.94 +/* ######################################################################### */
    5.95 +#define CISS_MAX_LUN	16	
    5.96 +#define CISS_MAX_PHYS_LUN	1024
    5.97 +/* SCSI-3 Cmmands  */
    5.98 +
    5.99 +#pragma pack(1)	
   5.100 +
   5.101 +#define CISS_INQUIRY 0x12
   5.102 +/* Data returned */
   5.103 +typedef struct _InquiryData_struct
   5.104 +{
   5.105 +  BYTE data_byte[36];
   5.106 +} InquiryData_struct;
   5.107 +
   5.108 +#define CISS_REPORT_LOG 0xc2    /* Report Logical LUNs */
   5.109 +#define CISS_REPORT_PHYS 0xc3   /* Report Physical LUNs */
   5.110 +/*  Data returned */
   5.111 +typedef struct _ReportLUNdata_struct
   5.112 +{
   5.113 +  BYTE LUNListLength[4];
   5.114 +  DWORD reserved;
   5.115 +  BYTE LUN[CISS_MAX_LUN][8];
   5.116 +} ReportLunData_struct;
   5.117 +
   5.118 +#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ 
   5.119 +typedef struct _ReadCapdata_struct
   5.120 +{
   5.121 +  BYTE total_size[4];	/* Total size in blocks */
   5.122 +  BYTE block_size[4];	/* Size of blocks in bytes */
   5.123 +} ReadCapdata_struct;
   5.124 +
   5.125 +/* 12 byte commands not implemented in firmware yet.  */
   5.126 +/* #define CCISS_READ 	0xa8	Read(12)	*/
   5.127 +/* #define CCISS_WRITE	0xaa	Write(12) 	*/
   5.128 + #define CCISS_READ   0x28    /* Read(10) */
   5.129 + #define CCISS_WRITE  0x2a    /* Write(10) */
   5.130 +
   5.131 +/* BMIC commands */
   5.132 +#define BMIC_READ 0x26
   5.133 +#define BMIC_WRITE 0x27
   5.134 +#define BMIC_CACHE_FLUSH 0xc2
   5.135 +#define CCISS_CACHE_FLUSH 0x01	/* 0xC2 was already being used by CCISS */
   5.136 +
   5.137 +/* Command List Structure */
   5.138 +typedef union _SCSI3Addr_struct {
   5.139 +   struct {
   5.140 +    BYTE Dev;
   5.141 +    BYTE Bus:6;
   5.142 +    BYTE Mode:2;        /* b00 */
   5.143 +  } PeripDev;
   5.144 +   struct {
   5.145 +    BYTE DevLSB;
   5.146 +    BYTE DevMSB:6;
   5.147 +    BYTE Mode:2;        /* b01 */
   5.148 +  } LogDev;
   5.149 +   struct {
   5.150 +    BYTE Dev:5;
   5.151 +    BYTE Bus:3;
   5.152 +    BYTE Targ:6;
   5.153 +    BYTE Mode:2;        /* b10 */
   5.154 +  } LogUnit;
   5.155 +} SCSI3Addr_struct;
   5.156 +
   5.157 +typedef struct _PhysDevAddr_struct {
   5.158 +  DWORD             TargetId:24;
   5.159 +  DWORD             Bus:6;
   5.160 +  DWORD             Mode:2;
   5.161 +  SCSI3Addr_struct  Target[2]; /* 2 level target device addr */
   5.162 +} PhysDevAddr_struct;
   5.163 +  
   5.164 +typedef struct _LogDevAddr_struct {
   5.165 +  DWORD            VolId:30;
   5.166 +  DWORD            Mode:2;
   5.167 +  BYTE             reserved[4];
   5.168 +} LogDevAddr_struct;
   5.169 +
   5.170 +typedef union _LUNAddr_struct {
   5.171 +  BYTE               LunAddrBytes[8];
   5.172 +  SCSI3Addr_struct   SCSI3Lun[4];
   5.173 +  PhysDevAddr_struct PhysDev;
   5.174 +  LogDevAddr_struct  LogDev;
   5.175 +} LUNAddr_struct;
   5.176 +
   5.177 +typedef struct _CommandListHeader_struct {
   5.178 +  BYTE              ReplyQueue;
   5.179 +  BYTE              SGList;
   5.180 +  HWORD             SGTotal;
   5.181 +  QWORD             Tag;
   5.182 +  LUNAddr_struct    LUN;
   5.183 +} CommandListHeader_struct;
   5.184 +typedef struct _RequestBlock_struct {
   5.185 +  BYTE   CDBLen;
   5.186 +  struct {
   5.187 +    BYTE Type:3;
   5.188 +    BYTE Attribute:3;
   5.189 +    BYTE Direction:2;
   5.190 +  } Type;
   5.191 +  HWORD  Timeout;
   5.192 +  BYTE   CDB[16];
   5.193 +} RequestBlock_struct;
   5.194 +typedef struct _ErrDescriptor_struct {
   5.195 +  QWORD  Addr;
   5.196 +  DWORD  Len;
   5.197 +} ErrDescriptor_struct;
   5.198 +typedef struct _SGDescriptor_struct {
   5.199 +  QWORD  Addr;
   5.200 +  DWORD  Len;
   5.201 +  DWORD  Ext;
   5.202 +} SGDescriptor_struct;
   5.203 +
   5.204 +typedef union _MoreErrInfo_struct{
   5.205 +  struct {
   5.206 +    BYTE  Reserved[3];
   5.207 +    BYTE  Type;
   5.208 +    DWORD ErrorInfo;
   5.209 +  }Common_Info;
   5.210 +  struct{
   5.211 +    BYTE  Reserved[2];
   5.212 +    BYTE  offense_size;	/* size of offending entry 	*/
   5.213 +    BYTE  offense_num; 	/* byte # of offense 0-base 	*/
   5.214 +    DWORD offense_value;
   5.215 +  }Invalid_Cmd;
   5.216 +}MoreErrInfo_struct;
   5.217 +typedef struct _ErrorInfo_struct {
   5.218 +  BYTE               ScsiStatus;
   5.219 +  BYTE               SenseLen;
   5.220 +  HWORD              CommandStatus;
   5.221 +  DWORD              ResidualCnt;
   5.222 +  MoreErrInfo_struct MoreErrInfo;
   5.223 +  BYTE               SenseInfo[SENSEINFOBYTES];
   5.224 +} ErrorInfo_struct;
   5.225 +
   5.226 +/* Command types */
   5.227 +#define CMD_RWREQ       0x00
   5.228 +#define CMD_IOCTL_PEND  0x01
   5.229 +#define CMD_SCSI	0x03
   5.230 +#define CMD_MSG_DONE	0x04
   5.231 +#define CMD_MSG_TIMEOUT 0x05
   5.232 +
   5.233 +typedef struct _CommandList_struct {
   5.234 +  CommandListHeader_struct Header;
   5.235 +  RequestBlock_struct      Request;
   5.236 +  ErrDescriptor_struct     ErrDesc;
   5.237 +  SGDescriptor_struct      SG[MAXSGENTRIES];
   5.238 +	/* information associated with the command */ 
   5.239 +  __u32			   busaddr; /* physical address of this record */
   5.240 +  ErrorInfo_struct * 	   err_info; /* pointer to the allocated mem */ 
   5.241 +  int			   ctlr;
   5.242 +  int			   cmd_type; 
   5.243 +  struct _CommandList_struct *prev;
   5.244 +  struct _CommandList_struct *next;
   5.245 +  struct request *	   rq;
   5.246 +  struct completion *waiting;
   5.247 +  int	 retry_count;
   5.248 +#ifdef CONFIG_CISS_SCSI_TAPE
   5.249 +  void * scsi_cmd;
   5.250 +#endif
   5.251 +} CommandList_struct;
   5.252 +
   5.253 +/* Configuration Table Structure */
   5.254 +typedef struct _HostWrite_struct {
   5.255 +  DWORD TransportRequest;
   5.256 +  DWORD Reserved;
   5.257 +  DWORD CoalIntDelay;
   5.258 +  DWORD CoalIntCount;
   5.259 +} HostWrite_struct;
   5.260 +
   5.261 +typedef struct _CfgTable_struct {
   5.262 +  BYTE             Signature[4];
   5.263 +  DWORD            SpecValence;
   5.264 +  DWORD            TransportSupport;
   5.265 +  DWORD            TransportActive;
   5.266 +  HostWrite_struct HostWrite;
   5.267 +  DWORD            CmdsOutMax;
   5.268 +  DWORD            BusTypes;
   5.269 +  DWORD            Reserved; 
   5.270 +  BYTE             ServerName[16];
   5.271 +  DWORD            HeartBeat;
   5.272 +} CfgTable_struct;
   5.273 +#pragma pack()	 
   5.274 +#endif /* CCISS_CMD_H */
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/drivers/block/cciss_scsi.c	Mon Jan 19 15:29:17 2004 +0000
     6.3 @@ -0,0 +1,1590 @@
     6.4 +/*
     6.5 + *    Disk Array driver for HP SA 5xxx and 6xxx Controllers, SCSI Tape module
     6.6 + *    Copyright 2001, 2002 Hewlett-Packard Development Company, L.P.
     6.7 + *    
     6.8 + *    This program is free software; you can redistribute it and/or modify
     6.9 + *    it under the terms of the GNU General Public License as published by
    6.10 + *    the Free Software Foundation; either version 2 of the License, or
    6.11 + *    (at your option) any later version.
    6.12 + *
    6.13 + *    This program is distributed in the hope that it will be useful,
    6.14 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
    6.15 + *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
    6.16 + *    NON INFRINGEMENT.  See the GNU General Public License for more details.
    6.17 + *
    6.18 + *    You should have received a copy of the GNU General Public License
    6.19 + *    along with this program; if not, write to the Free Software
    6.20 + *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
    6.21 + *
    6.22 + *    Questions/Comments/Bugfixes to Cciss-discuss@lists.sourceforge.net 
    6.23 + *
    6.24 + *    Author: Stephen M. Cameron
    6.25 + */
    6.26 +#ifdef CONFIG_CISS_SCSI_TAPE
    6.27 +
    6.28 +/* Here we have code to present the driver as a scsi driver
    6.29 +   as it is simultaneously presented as a block driver.  The
    6.30 +   reason for doing this is to allow access to SCSI tape drives
    6.31 +   through the array controller.  Note in particular, neither
    6.32 +   physical nor logical disks are presented through the scsi layer. */
    6.33 +
    6.34 +#include "../scsi/scsi.h"
    6.35 +#include "../scsi/hosts.h"
    6.36 +#include <asm/atomic.h>
    6.37 +#include <linux/timer.h>
    6.38 +
    6.39 +#include "cciss_scsi.h"
    6.40 +
    6.41 +/* some prototypes... */
    6.42 +static int sendcmd(
    6.43 +	__u8	cmd,
    6.44 +	int	ctlr,
    6.45 +	void	*buff,
    6.46 +	size_t	size,
    6.47 +	unsigned int use_unit_num, /* 0: address the controller,
    6.48 +				      1: address logical volume log_unit,
    6.49 +				      2: address is in scsi3addr */
    6.50 +	unsigned int log_unit,
    6.51 +	__u8	page_code,
    6.52 +	unsigned char *scsi3addr );
    6.53 +
    6.54 +
    6.55 +int __init cciss_scsi_detect(Scsi_Host_Template *tpnt);
    6.56 +int cciss_scsi_release(struct Scsi_Host *sh);
    6.57 +const char *cciss_scsi_info(struct Scsi_Host *sa);
    6.58 +
    6.59 +int cciss_scsi_proc_info(
    6.60 +		char *buffer, /* data buffer */
    6.61 +		char **start, 	   /* where data in buffer starts */
    6.62 +		off_t offset,	   /* offset from start of imaginary file */
    6.63 +		int length, 	   /* length of data in buffer */
    6.64 +		int hostnum, 	   /* which host adapter (always zero for me) */
    6.65 +		int func);	   /* 0 == read, 1 == write */
    6.66 +
    6.67 +int cciss_scsi_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *));
    6.68 +#if 0
    6.69 +int cciss_scsi_abort(Scsi_Cmnd *cmd);
    6.70 +#if defined SCSI_RESET_SYNCHRONOUS && defined SCSI_RESET_ASYNCHRONOUS
    6.71 +int cciss_scsi_reset(Scsi_Cmnd *cmd, unsigned int reset_flags);
    6.72 +#else
    6.73 +int cciss_scsi_reset(Scsi_Cmnd *cmd);
    6.74 +#endif
    6.75 +#endif
    6.76 +
    6.77 +static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR];
    6.78 +
    6.79 +/* We need one Scsi_Host_Template *per controller* instead of
    6.80 +   the usual one Scsi_Host_Template per controller *type*. This
    6.81 +   is so PCI hot plug could have a remote possibility of still
    6.82 +   working even with the SCSI system.  It's so
    6.83 +   scsi_unregister_module will differentiate the controllers.
    6.84 +   When register_scsi_module is called, each host template is
    6.85 +   customized (name change) in cciss_register_scsi()
    6.86 +   (that's called from cciss.c:cciss_init_one()) */
    6.87 +
    6.88 +static
    6.89 +Scsi_Host_Template driver_template[MAX_CTLR];
    6.90 +
    6.91 +#pragma pack(1)
    6.92 +struct cciss_scsi_cmd_stack_elem_t {
    6.93 +	CommandList_struct cmd;
    6.94 +	ErrorInfo_struct Err;
    6.95 +	__u32 busaddr; // 32 bits always, must fit through cmd register.
    6.96 +};
    6.97 +
    6.98 +#pragma pack()
    6.99 +
   6.100 +#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \
   6.101 +		CCISS_MAX_SCSI_DEVS_PER_HBA + 2)
   6.102 +			// plus two for init time usage
   6.103 +
   6.104 +#pragma pack(1)
   6.105 +struct cciss_scsi_cmd_stack_t {
   6.106 +	struct cciss_scsi_cmd_stack_elem_t *pool;
   6.107 +	struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE];
   6.108 +	dma_addr_t cmd_pool_handle;
   6.109 +	int top;
   6.110 +};
   6.111 +#pragma pack()
   6.112 +
   6.113 +struct cciss_scsi_adapter_data_t {
   6.114 +	struct Scsi_Host *scsi_host;
   6.115 +	struct cciss_scsi_cmd_stack_t cmd_stack;
   6.116 +	int registered;
   6.117 +	spinlock_t lock; // to protect ccissscsi[ctlr];
   6.118 +};
   6.119 +#if 1
   6.120 +#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
   6.121 +	&(((struct cciss_scsi_adapter_data_t *) \
   6.122 +	hba[ctlr]->scsi_ctlr)->lock), flags);
   6.123 +#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
   6.124 +	&(((struct cciss_scsi_adapter_data_t *) \
   6.125 +	hba[ctlr]->scsi_ctlr)->lock), flags);
   6.126 +#else
   6.127 +#define CPQ_TAPE_LOCK(x,y)
   6.128 +#define CPQ_TAPE_UNLOCK(x,y)
   6.129 +#endif
   6.130 +
   6.131 +static CommandList_struct *
   6.132 +scsi_cmd_alloc(ctlr_info_t *h)
   6.133 +{
   6.134 +	/* assume only one process in here at a time, locking done by caller. */
   6.135 +
   6.136 +	/* take the top memory chunk off the stack and return it, if any. */
   6.137 +	struct cciss_scsi_cmd_stack_elem_t *c;
   6.138 +	struct cciss_scsi_adapter_data_t *sa;
   6.139 +	struct cciss_scsi_cmd_stack_t *stk;
   6.140 +	u64bit temp64;
   6.141 +
   6.142 +	sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
   6.143 +	stk = &sa->cmd_stack;
   6.144 +
   6.145 +	if (stk->top < 0)
   6.146 +		return NULL;
   6.147 +	c = stk->elem[stk->top];
   6.148 +	memset(&c->cmd, 0, sizeof(c->cmd));
   6.149 +	memset(&c->Err, 0, sizeof(c->Err));
   6.150 +	/* set physical addr of cmd and addr of scsi parameters */
   6.151 +	c->cmd.busaddr = c->busaddr;
   6.152 +
   6.153 +	temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
   6.154 +	stk->top--;
   6.155 +	c->cmd.ErrDesc.Addr.lower = temp64.val32.lower;
   6.156 +	c->cmd.ErrDesc.Addr.upper = temp64.val32.upper;
   6.157 +	c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct);
   6.158 +
   6.159 +	c->cmd.ctlr = h->ctlr;
   6.160 +	c->cmd.err_info = &c->Err;
   6.161 +
   6.162 +	return (CommandList_struct *) c;
   6.163 +}
   6.164 +
   6.165 +static void
   6.166 +scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
   6.167 +{
   6.168 +	/* assume only one process in here at a time, locking done by caller. */
   6.169 +	/* drop the free memory chunk on top of the stack. */
   6.170 +
   6.171 +	struct cciss_scsi_adapter_data_t *sa;
   6.172 +	struct cciss_scsi_cmd_stack_t *stk;
   6.173 +
   6.174 +	sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
   6.175 +	stk = &sa->cmd_stack;
   6.176 +	if (stk->top >= CMD_STACK_SIZE) {
   6.177 +		printk("cciss: scsi_cmd_free called too many times.\n");
   6.178 +		BUG();
   6.179 +	}
   6.180 +	stk->top++;
   6.181 +	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
   6.182 +}
   6.183 +
   6.184 +static int
   6.185 +scsi_cmd_stack_setup(int ctlr)
   6.186 +{
   6.187 +	int i;
   6.188 +	struct cciss_scsi_adapter_data_t *sa;
   6.189 +	struct cciss_scsi_cmd_stack_t *stk;
   6.190 +	size_t size;
   6.191 +
   6.192 +	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
   6.193 +	stk = &sa->cmd_stack;
   6.194 +	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
   6.195 +
   6.196 +	stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
   6.197 +		pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
   6.198 +
   6.199 +	if (stk->pool == NULL) {
   6.200 +		printk("stk->pool is null\n");
   6.201 +		return -1;
   6.202 +	}
   6.203 +
   6.204 +	for (i=0; i<CMD_STACK_SIZE; i++) {
   6.205 +		stk->elem[i] = &stk->pool[i];
   6.206 +		stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
   6.207 +			(sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
   6.208 +	}
   6.209 +	stk->top = CMD_STACK_SIZE-1;
   6.210 +	return 0;
   6.211 +}
   6.212 +
   6.213 +static void
   6.214 +scsi_cmd_stack_free(int ctlr)
   6.215 +{
   6.216 +	struct cciss_scsi_adapter_data_t *sa;
   6.217 +	struct cciss_scsi_cmd_stack_t *stk;
   6.218 +	size_t size;
   6.219 +
   6.220 +	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
   6.221 +	stk = &sa->cmd_stack;
   6.222 +	if (stk->top != CMD_STACK_SIZE-1) {
   6.223 +		printk( "cciss: %d scsi commands are still outstanding.\n",
   6.224 +			CMD_STACK_SIZE - stk->top);
   6.225 +		// BUG();
   6.226 +		printk("WE HAVE A BUG HERE!!! stk=0x%08x\n",
   6.227 +			(unsigned int) stk);
   6.228 +	}
   6.229 +	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
   6.230 +
   6.231 +	pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
   6.232 +	stk->pool = NULL;
   6.233 +}
   6.234 +
   6.235 +/* scsi_device_types comes from scsi.h */
   6.236 +#define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \
   6.237 +	"Unknown" : scsi_device_types[n]
   6.238 +
   6.239 +#if 0
   6.240 +static int xmargin=8;
   6.241 +static int amargin=60;
   6.242 +
   6.243 +static void
   6.244 +print_bytes (unsigned char *c, int len, int hex, int ascii)
   6.245 +{
   6.246 +
   6.247 +	int i;
   6.248 +	unsigned char *x;
   6.249 +
   6.250 +	if (hex)
   6.251 +	{
   6.252 +		x = c;
   6.253 +		for (i=0;i<len;i++)
   6.254 +		{
   6.255 +			if ((i % xmargin) == 0 && i>0) printk("\n");
   6.256 +			if ((i % xmargin) == 0) printk("0x%04x:", i);
   6.257 +			printk(" %02x", *x);
   6.258 +			x++;
   6.259 +		}
   6.260 +		printk("\n");
   6.261 +	}
   6.262 +	if (ascii)
   6.263 +	{
   6.264 +		x = c;
   6.265 +		for (i=0;i<len;i++)
   6.266 +		{
   6.267 +			if ((i % amargin) == 0 && i>0) printk("\n");
   6.268 +			if ((i % amargin) == 0) printk("0x%04x:", i);
   6.269 +			if (*x > 26 && *x < 128) printk("%c", *x);
   6.270 +			else printk(".");
   6.271 +			x++;
   6.272 +		}
   6.273 +		printk("\n");
   6.274 +	}
   6.275 +}
   6.276 +
   6.277 +static void
   6.278 +print_cmd(CommandList_struct *cp)
   6.279 +{
   6.280 +	printk("queue:%d\n", cp->Header.ReplyQueue);
   6.281 +	printk("sglist:%d\n", cp->Header.SGList);
   6.282 +	printk("sgtot:%d\n", cp->Header.SGTotal);
   6.283 +	printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
   6.284 +			cp->Header.Tag.lower);
   6.285 +	printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
   6.286 +		cp->Header.LUN.LunAddrBytes[0],
   6.287 +		cp->Header.LUN.LunAddrBytes[1],
   6.288 +		cp->Header.LUN.LunAddrBytes[2],
   6.289 +		cp->Header.LUN.LunAddrBytes[3],
   6.290 +		cp->Header.LUN.LunAddrBytes[4],
   6.291 +		cp->Header.LUN.LunAddrBytes[5],
   6.292 +		cp->Header.LUN.LunAddrBytes[6],
   6.293 +		cp->Header.LUN.LunAddrBytes[7]);
   6.294 +	printk("CDBLen:%d\n", cp->Request.CDBLen);
   6.295 +	printk("Type:%d\n",cp->Request.Type.Type);
   6.296 +	printk("Attr:%d\n",cp->Request.Type.Attribute);
   6.297 +	printk(" Dir:%d\n",cp->Request.Type.Direction);
   6.298 +	printk("Timeout:%d\n",cp->Request.Timeout);
   6.299 +	printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
   6.300 +		" %02x %02x %02x %02x %02x %02x %02x %02x\n",
   6.301 +		cp->Request.CDB[0], cp->Request.CDB[1],
   6.302 +		cp->Request.CDB[2], cp->Request.CDB[3],
   6.303 +		cp->Request.CDB[4], cp->Request.CDB[5],
   6.304 +		cp->Request.CDB[6], cp->Request.CDB[7],
   6.305 +		cp->Request.CDB[8], cp->Request.CDB[9],
   6.306 +		cp->Request.CDB[10], cp->Request.CDB[11],
   6.307 +		cp->Request.CDB[12], cp->Request.CDB[13],
   6.308 +		cp->Request.CDB[14], cp->Request.CDB[15]),
   6.309 +	printk("edesc.Addr: 0x%08x/0%08x, Len  = %d\n",
   6.310 +		cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
   6.311 +			cp->ErrDesc.Len);
   6.312 +	printk("sgs..........Errorinfo:\n");
   6.313 +	printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
   6.314 +	printk("senselen:%d\n", cp->err_info->SenseLen);
   6.315 +	printk("cmd status:%d\n", cp->err_info->CommandStatus);
   6.316 +	printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
   6.317 +	printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
   6.318 +	printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
   6.319 +	printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
   6.320 +
   6.321 +}
   6.322 +
   6.323 +#endif
   6.324 +
   6.325 +static int
   6.326 +find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
   6.327 +{
   6.328 +	/* finds an unused bus, target, lun for a new device */
   6.329 +	/* assumes hba[ctlr]->scsi_ctlr->lock is held */
   6.330 +	int i, found=0;
   6.331 +	unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
   6.332 +
   6.333 +	memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
   6.334 +
   6.335 +#	if SELF_SCSI_ID >= 0
   6.336 +		target_taken[SELF_SCSI_ID] = 1;
   6.337 +#	endif
   6.338 +	for (i=0;i<ccissscsi[ctlr].ndevices;i++)
   6.339 +		target_taken[ccissscsi[ctlr].dev[i].target] = 1;
   6.340 +
   6.341 +	for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) {
   6.342 +		if (!target_taken[i]) {
   6.343 +			*bus = 0; *target=i; *lun = 0; found=1;
   6.344 +			break;
   6.345 +		}
   6.346 +	}
   6.347 +	return (!found);
   6.348 +}
   6.349 +
   6.350 +static int
   6.351 +cciss_scsi_add_entry(int ctlr, int hostno,
   6.352 +		unsigned char *scsi3addr, int devtype)
   6.353 +{
   6.354 +	/* assumes hba[ctlr]->scsi_ctlr->lock is held */
   6.355 +	int n = ccissscsi[ctlr].ndevices;
   6.356 +	struct cciss_scsi_dev_t *sd;
   6.357 +
   6.358 +	if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
   6.359 +		printk("cciss%d: Too many devices, "
   6.360 +			"some will be inaccessible.\n", ctlr);
   6.361 +		return -1;
   6.362 +	}
   6.363 +	sd = &ccissscsi[ctlr].dev[n];
   6.364 +	if (find_bus_target_lun(ctlr, &sd->bus, &sd->target, &sd->lun) != 0)
   6.365 +		return -1;
   6.366 +	memcpy(&sd->scsi3addr[0], scsi3addr, 8);
   6.367 +	sd->devtype = devtype;
   6.368 +	ccissscsi[ctlr].ndevices++;
   6.369 +
   6.370 +	/* initially, (before registering with scsi layer) we don't
   6.371 +	   know our hostno and we don't want to print anything first
   6.372 +	   time anyway (the scsi layer's inquiries will show that info) */
   6.373 +	if (hostno != -1)
   6.374 +		printk("cciss%d: %s device c%db%dt%dl%d added.\n",
   6.375 +			ctlr, DEVICETYPE(sd->devtype), hostno,
   6.376 +			sd->bus, sd->target, sd->lun);
   6.377 +	return 0;
   6.378 +}
   6.379 +
   6.380 +static void
   6.381 +cciss_scsi_remove_entry(int ctlr, int hostno, int entry)
   6.382 +{
   6.383 +	/* assumes hba[ctlr]->scsi_ctlr->lock is held */
   6.384 +	int i;
   6.385 +	struct cciss_scsi_dev_t sd;
   6.386 +
   6.387 +	if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
   6.388 +	sd = ccissscsi[ctlr].dev[entry];
   6.389 +	for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
   6.390 +		ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
   6.391 +	ccissscsi[ctlr].ndevices--;
   6.392 +	printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
   6.393 +		ctlr, DEVICETYPE(sd.devtype), hostno,
   6.394 +			sd.bus, sd.target, sd.lun);
   6.395 +}
   6.396 +
   6.397 +
   6.398 +#define SCSI3ADDR_EQ(a,b) ( \
   6.399 +	(a)[7] == (b)[7] && \
   6.400 +	(a)[6] == (b)[6] && \
   6.401 +	(a)[5] == (b)[5] && \
   6.402 +	(a)[4] == (b)[4] && \
   6.403 +	(a)[3] == (b)[3] && \
   6.404 +	(a)[2] == (b)[2] && \
   6.405 +	(a)[1] == (b)[1] && \
   6.406 +	(a)[0] == (b)[0])
   6.407 +
   6.408 +static int
   6.409 +adjust_cciss_scsi_table(int ctlr, int hostno,
   6.410 +	struct cciss_scsi_dev_t sd[], int nsds)
   6.411 +{
   6.412 +	/* sd contains scsi3 addresses and devtypes, but
   6.413 +	   bus target and lun are not filled in.  This funciton
   6.414 +	   takes what's in sd to be the current and adjusts
   6.415 +	   ccissscsi[] to be in line with what's in sd. */
   6.416 +
   6.417 +	int i,j, found, changes=0;
   6.418 +	struct cciss_scsi_dev_t *csd;
   6.419 +	unsigned long flags;
   6.420 +
   6.421 +	CPQ_TAPE_LOCK(ctlr, flags);
   6.422 +
   6.423 +	/* find any devices in ccissscsi[] that are not in
   6.424 +	   sd[] and remove them from ccissscsi[] */
   6.425 +
   6.426 +	i = 0;
   6.427 +	while(i<ccissscsi[ctlr].ndevices) {
   6.428 +		csd = &ccissscsi[ctlr].dev[i];
   6.429 +		found=0;
   6.430 +		for (j=0;j<nsds;j++) {
   6.431 +			if (SCSI3ADDR_EQ(sd[j].scsi3addr,
   6.432 +				csd->scsi3addr)) {
   6.433 +				if (sd[j].devtype == csd->devtype)
   6.434 +					found=2;
   6.435 +				else
   6.436 +					found=1;
   6.437 +				break;
   6.438 +			}
   6.439 +		}
   6.440 +
   6.441 +		if (found == 0) { /* device no longer present. */
   6.442 +			changes++;
   6.443 +			/* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
   6.444 +				ctlr, DEVICETYPE(csd->devtype), hostno,
   6.445 +					csd->bus, csd->target, csd->lun); */
   6.446 +			cciss_scsi_remove_entry(ctlr, hostno, i);
   6.447 +			/* note, i not incremented */
   6.448 +		}
   6.449 +		else if (found == 1) { /* device is different kind */
   6.450 +			changes++;
   6.451 +			printk("cciss%d: device c%db%dt%dl%d type changed "
   6.452 +				"(device type now %s).\n",
   6.453 +				ctlr, hostno, csd->bus, csd->target, csd->lun,
   6.454 +					DEVICETYPE(csd->devtype));
   6.455 +			csd->devtype = sd[j].devtype;
   6.456 +			i++;	/* so just move along. */
   6.457 +		} else 		/* device is same as it ever was, */
   6.458 +			i++;	/* so just move along. */
   6.459 +	}
   6.460 +
   6.461 +	/* Now, make sure every device listed in sd[] is also
   6.462 + 	   listed in ccissscsi[], adding them if they aren't found */
   6.463 +
   6.464 +	for (i=0;i<nsds;i++) {
   6.465 +		found=0;
   6.466 +		for (j=0;j<ccissscsi[ctlr].ndevices;j++) {
   6.467 +			csd = &ccissscsi[ctlr].dev[j];
   6.468 +			if (SCSI3ADDR_EQ(sd[i].scsi3addr,
   6.469 +				csd->scsi3addr)) {
   6.470 +				if (sd[i].devtype == csd->devtype)
   6.471 +					found=2;	/* found device */
   6.472 +				else
   6.473 +					found=1; 	/* found a bug. */
   6.474 +				break;
   6.475 +			}
   6.476 +		}
   6.477 +		if (!found) {
   6.478 +			changes++;
   6.479 +			if (cciss_scsi_add_entry(ctlr, hostno,
   6.480 +				&sd[i].scsi3addr[0], sd[i].devtype) != 0)
   6.481 +				break;
   6.482 +		} else if (found == 1) {
   6.483 +			/* should never happen... */
   6.484 +			changes++;
   6.485 +			printk("cciss%d: device unexpectedly changed type\n",
   6.486 +				ctlr);
   6.487 +			/* but if it does happen, we just ignore that device */
   6.488 +		}
   6.489 +	}
   6.490 +	CPQ_TAPE_UNLOCK(ctlr, flags);
   6.491 +
   6.492 +	if (!changes)
   6.493 +		printk("cciss%d: No device changes detected.\n", ctlr);
   6.494 +
   6.495 +	return 0;
   6.496 +}
   6.497 +
   6.498 +static int
   6.499 +lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr)
   6.500 +{
   6.501 +	int i;
   6.502 +	struct cciss_scsi_dev_t *sd;
   6.503 +	unsigned long flags;
   6.504 +
   6.505 +	CPQ_TAPE_LOCK(ctlr, flags);
   6.506 +	for (i=0;i<ccissscsi[ctlr].ndevices;i++) {
   6.507 +		sd = &ccissscsi[ctlr].dev[i];
   6.508 +		if (sd->bus == bus &&
   6.509 +		    sd->target == target &&
   6.510 +		    sd->lun == lun) {
   6.511 +			memcpy(scsi3addr, &sd->scsi3addr[0], 8);
   6.512 +			CPQ_TAPE_UNLOCK(ctlr, flags);
   6.513 +			return 0;
   6.514 +		}
   6.515 +	}
   6.516 +	CPQ_TAPE_UNLOCK(ctlr, flags);
   6.517 +	return -1;
   6.518 +}
   6.519 +
   6.520 +
   6.521 +static void
   6.522 +cciss_find_non_disk_devices(int cntl_num)
   6.523 +{
   6.524 +	ReportLunData_struct *ld_buff;
   6.525 +	InquiryData_struct *inq_buff;
   6.526 +	int return_code;
   6.527 +	int i;
   6.528 +	int listlength = 0;
   6.529 +	int num_luns;
   6.530 +	unsigned char scsi3addr[8];
   6.531 +	unsigned long flags;
   6.532 +	int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
   6.533 +
   6.534 +	hba[cntl_num]->scsi_ctlr = (void *)
   6.535 +		kmalloc(sizeof(struct cciss_scsi_adapter_data_t),
   6.536 +			GFP_KERNEL);
   6.537 +	if (hba[cntl_num]->scsi_ctlr == NULL)
   6.538 +		return;
   6.539 +
   6.540 +	((struct cciss_scsi_adapter_data_t *)
   6.541 +		hba[cntl_num]->scsi_ctlr)->scsi_host = NULL;
   6.542 +	((struct cciss_scsi_adapter_data_t *)
   6.543 +		hba[cntl_num]->scsi_ctlr)->lock = SPIN_LOCK_UNLOCKED;
   6.544 +	((struct cciss_scsi_adapter_data_t *)
   6.545 +		hba[cntl_num]->scsi_ctlr)->registered = 0;
   6.546 +
   6.547 +	if (scsi_cmd_stack_setup(cntl_num) != 0) {
   6.548 +		printk("Trouble, returned non-zero!\n");
   6.549 +		return;
   6.550 +	}
   6.551 +
   6.552 +	ld_buff = kmalloc(reportlunsize, GFP_KERNEL);
   6.553 +	if (ld_buff == NULL) {
   6.554 +		printk(KERN_ERR "cciss: out of memory\n");
   6.555 +		return;
   6.556 +	}
   6.557 +	memset(ld_buff, 0, sizeof(ReportLunData_struct));
   6.558 +	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
   6.559 +	if (inq_buff == NULL) {
   6.560 +		printk(KERN_ERR "cciss: out of memory\n");
   6.561 +		kfree(ld_buff);
   6.562 +		return;
   6.563 +	}
   6.564 +
   6.565 +	/* Get the physical luns */
   6.566 +	return_code = sendcmd(CISS_REPORT_PHYS, cntl_num, ld_buff,
   6.567 +			reportlunsize, 0, 0, 0, NULL );
   6.568 +
   6.569 +	if( return_code == IO_OK) {
   6.570 +		unsigned char *c = &ld_buff->LUNListLength[0];
   6.571 +		listlength = (c[0] << 24) | (c[1] << 16) | (c[2] << 8) | c[3];
   6.572 +	}
   6.573 +	else {  /* getting report of physical luns failed */
   6.574 +		printk(KERN_WARNING "cciss: report physical luns"
   6.575 +			" command failed\n");
   6.576 +		listlength = 0;
   6.577 +	}
   6.578 +
   6.579 +	CPQ_TAPE_LOCK(cntl_num, flags);
   6.580 +	ccissscsi[cntl_num].ndevices = 0;
   6.581 +	num_luns = listlength / 8; // 8 bytes pre entry
   6.582 +	/* printk("Found %d LUNs\n", num_luns); */
   6.583 +
   6.584 +	if (num_luns > CISS_MAX_PHYS_LUN)
   6.585 +	{
   6.586 +		printk(KERN_WARNING
   6.587 +			"cciss: Maximum physical LUNs (%d) exceeded.  "
   6.588 +			"%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
   6.589 +			num_luns - CISS_MAX_PHYS_LUN);
   6.590 +		num_luns = CISS_MAX_PHYS_LUN;
   6.591 +	}
   6.592 +
   6.593 +	for(i=0; i<num_luns; i++) {
   6.594 +		/* Execute an inquiry to figure the device type */
   6.595 +		memset(inq_buff, 0, sizeof(InquiryData_struct));
   6.596 +		memcpy(scsi3addr, ld_buff->LUN[i], 8); /* ugly... */
   6.597 +		return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
   6.598 +			sizeof(InquiryData_struct), 2, 0 ,0, scsi3addr );
   6.599 +	  	if (return_code == IO_OK) {
   6.600 +			if(inq_buff->data_byte[8] == 0xFF)
   6.601 +			{
   6.602 +			   printk(KERN_WARNING "cciss: inquiry failed\n");
   6.603 +			} else {
   6.604 +			   int devtype;
   6.605 +
   6.606 +			   /* printk("Inquiry...\n");
   6.607 +			   print_bytes((unsigned char *) inq_buff, 36, 1, 1); */
   6.608 +			   devtype = (inq_buff->data_byte[0] & 0x1f);
   6.609 +
   6.610 +			   switch (devtype)
   6.611 +			   {
   6.612 +			    case 0x01: /* sequential access, (tape) */
   6.613 +			    case 0x08: /* medium changer */
   6.614 +					  /* this is the only kind of dev */
   6.615 +					  /* we want to expose here. */
   6.616 +				if (cciss_scsi_add_entry(cntl_num, -1,
   6.617 +					(unsigned char *) ld_buff->LUN[i],
   6.618 +					devtype) != 0)
   6.619 +						i=num_luns; // leave loop
   6.620 +				break;
   6.621 +			    default:
   6.622 +				break;
   6.623 +			   }
   6.624 +
   6.625 +			}
   6.626 +		}
   6.627 +		else printk("cciss: inquiry failed.\n");
   6.628 +	}
   6.629 +#if 0
   6.630 +	for (i=0;i<ccissscsi[cntl_num].ndevices;i++)
   6.631 +		printk("Tape device presented at c%db%dt%dl%d\n",
   6.632 +			cntl_num, // <-- this is wrong
   6.633 +			ccissscsi[cntl_num].dev[i].bus,
   6.634 +			ccissscsi[cntl_num].dev[i].target,
   6.635 +			ccissscsi[cntl_num].dev[i].lun);
   6.636 +#endif
   6.637 +	CPQ_TAPE_UNLOCK(cntl_num, flags);
   6.638 +	kfree(ld_buff);
   6.639 +	kfree(inq_buff);
   6.640 +	return;
   6.641 +}
   6.642 +
   6.643 +static void
   6.644 +complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
   6.645 +{
   6.646 +	Scsi_Cmnd *cmd;
   6.647 +	ctlr_info_t *ctlr;
   6.648 +	u64bit addr64;
   6.649 +	ErrorInfo_struct *ei;
   6.650 +
   6.651 +	ei = cp->err_info;
   6.652 +
   6.653 +	/* First, see if it was a message rather than a command */
   6.654 +	if (cp->Request.Type.Type == TYPE_MSG)  {
   6.655 +		cp->cmd_type = CMD_MSG_DONE;
   6.656 +		return;
   6.657 +	}
   6.658 +
   6.659 +	/* we stored ptr to scsi cmd in the buffer head pointer */
   6.660 +	cmd = (Scsi_Cmnd *) cp->scsi_cmd;
   6.661 +	ctlr = hba[cp->ctlr];
   6.662 +
   6.663 +	/* undo the DMA mappings */
   6.664 +
   6.665 +	if (cmd->use_sg) {
   6.666 +		pci_unmap_sg(ctlr->pdev,
   6.667 +			cmd->buffer, cmd->use_sg,
   6.668 +				scsi_to_pci_dma_dir(cmd->sc_data_direction));
   6.669 +	}
   6.670 +	else if (cmd->request_bufflen) {
   6.671 +		addr64.val32.lower = cp->SG[0].Addr.lower;
   6.672 +		addr64.val32.upper = cp->SG[0].Addr.upper;
   6.673 +		pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
   6.674 +			cmd->request_bufflen,
   6.675 +				scsi_to_pci_dma_dir(cmd->sc_data_direction));
   6.676 +	}
   6.677 +
   6.678 +	cmd->result = (DID_OK << 16); 		/* host byte */
   6.679 +	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
   6.680 +	/* cmd->result |= (GOOD < 1); */		/* status byte */
   6.681 +
   6.682 +	cmd->result |= (ei->ScsiStatus);
   6.683 +	/* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus);  */
   6.684 +
   6.685 +	/* copy the sense data whether we need to or not. */
   6.686 +
   6.687 +	memcpy(cmd->sense_buffer, ei->SenseInfo,
   6.688 +		ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
   6.689 +			SCSI_SENSE_BUFFERSIZE :
   6.690 +			ei->SenseLen);
   6.691 +	cmd->resid = ei->ResidualCnt;
   6.692 +
   6.693 +	if(ei->CommandStatus != 0)
   6.694 +	{ /* an error has occurred */
   6.695 +		switch(ei->CommandStatus)
   6.696 +		{
   6.697 +			case CMD_TARGET_STATUS:
   6.698 +				/* Pass it up to the upper layers... */
   6.699 +				if( ei->ScsiStatus)
   6.700 +					cmd->result |= (ei->ScsiStatus < 1);
   6.701 +				else {  /* scsi status is zero??? How??? */
   6.702 +
   6.703 +	/* Ordinarily, this case should never happen, but there is a bug
   6.704 +	   in some released firmware revisions that allows it to happen
   6.705 +	   if, for example, a 4100 backplane loses power and the tape
   6.706 +	   drive is in it.  We assume that it's a fatal error of some
   6.707 +	   kind because we can't show that it wasn't. We will make it
   6.708 +	   look like selection timeout since that is the most common
   6.709 +	   reason for this to occur, and it's severe enough. */
   6.710 +
   6.711 +					cmd->result = DID_NO_CONNECT << 16;
   6.712 +				}
   6.713 +			break;
   6.714 +			case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
   6.715 +			break;
   6.716 +			case CMD_DATA_OVERRUN:
   6.717 +				printk(KERN_WARNING "cciss: cp %p has"
   6.718 +					" completed with data overrun "
   6.719 +					"reported\n", cp);
   6.720 +			break;
   6.721 +			case CMD_INVALID: {
   6.722 +				/* print_bytes(cp, sizeof(*cp), 1, 0);
   6.723 +				print_cmd(cp); */
   6.724 +     /* We get CMD_INVALID if you address a non-existent tape drive instead
   6.725 +	of a selection timeout (no response).  You will see this if you yank
   6.726 +	out a tape drive, then try to access it. This is kind of a shame
   6.727 +	because it means that any other CMD_INVALID (e.g. driver bug) will
   6.728 +	get interpreted as a missing target. */
   6.729 +				cmd->result = DID_NO_CONNECT << 16;
   6.730 +				}
   6.731 +			break;
   6.732 +			case CMD_PROTOCOL_ERR:
   6.733 +				printk(KERN_WARNING "cciss: cp %p has "
   6.734 +					"protocol error \n", cp);
   6.735 +			break;
   6.736 +			case CMD_HARDWARE_ERR:
   6.737 +				cmd->result = DID_ERROR << 16;
   6.738 +				printk(KERN_WARNING "cciss: cp %p had "
   6.739 +					" hardware error\n", cp);
   6.740 +			break;
   6.741 +			case CMD_CONNECTION_LOST:
   6.742 +				cmd->result = DID_ERROR << 16;
   6.743 +				printk(KERN_WARNING "cciss: cp %p had "
   6.744 +					"connection lost\n", cp);
   6.745 +			break;
   6.746 +			case CMD_ABORTED:
   6.747 +				cmd->result = DID_ABORT << 16;
   6.748 +				printk(KERN_WARNING "cciss: cp %p was "
   6.749 +					"aborted\n", cp);
   6.750 +			break;
   6.751 +			case CMD_ABORT_FAILED:
   6.752 +				cmd->result = DID_ERROR << 16;
   6.753 +				printk(KERN_WARNING "cciss: cp %p reports "
   6.754 +					"abort failed\n", cp);
   6.755 +			break;
   6.756 +			case CMD_UNSOLICITED_ABORT:
   6.757 +				cmd->result = DID_ABORT << 16;
   6.758 +				printk(KERN_WARNING "cciss: cp %p aborted "
   6.759 +					"do to an unsolicited abort\n", cp);
   6.760 +			break;
   6.761 +			case CMD_TIMEOUT:
   6.762 +				cmd->result = DID_TIME_OUT << 16;
   6.763 +				printk(KERN_WARNING "cciss: cp %p timedout\n",
   6.764 +					cp);
   6.765 +			break;
   6.766 +			default:
   6.767 +				cmd->result = DID_ERROR << 16;
   6.768 +				printk(KERN_WARNING "cciss: cp %p returned "
   6.769 +					"unknown status %x\n", cp,
   6.770 +						ei->CommandStatus);
   6.771 +		}
   6.772 +	}
   6.773 +	cmd->scsi_done(cmd);
   6.774 +	scsi_cmd_free(ctlr, cp);
   6.775 +}
   6.776 +
   6.777 +/* cciss_scsi_detect is called from the scsi mid layer.
   6.778 +   The scsi mid layer (scsi_register_module) is
   6.779 +   called from cciss.c:cciss_init_one().  */
   6.780 +
   6.781 +int __init
   6.782 +cciss_scsi_detect(Scsi_Host_Template *tpnt)
   6.783 +{
   6.784 +	int i;
   6.785 +	struct Scsi_Host *sh;
   6.786 +
   6.787 +	/* Tell the kernel we want to be a SCSI driver... */
   6.788 +	sh = scsi_register(tpnt, sizeof(struct ctlr_info *));
   6.789 +	if (sh == NULL) return 0;
   6.790 +
   6.791 +	sh->io_port = 0;	// good enough?  FIXME,
   6.792 +	sh->n_io_port = 0;	// I don't think we use these two...
   6.793 +
   6.794 +	sh->this_id = SELF_SCSI_ID;
   6.795 +
   6.796 +	i = simple_strtol((char *)&tpnt->name[5], NULL, 10);
   6.797 +
   6.798 +	if (i<0 || i>=MAX_CTLR || hba[i] == NULL) {
   6.799 +		/* we didn't find ourself... we shouldn't get here. */
   6.800 +		printk("cciss_scsi_detect: could not find ourself in hba[]\n");
   6.801 +		return 0;
   6.802 +	}
   6.803 +
   6.804 +	((struct cciss_scsi_adapter_data_t *)
   6.805 +		hba[i]->scsi_ctlr)->scsi_host = (void *) sh;
   6.806 +	sh->hostdata[0] = (unsigned long) hba[i];
   6.807 +	sh->irq = hba[i]->intr;
   6.808 +	sh->unique_id = sh->irq;
   6.809 +	scsi_set_pci_device(sh, hba[i]->pdev);
   6.810 +
   6.811 +	return 1;	/* Say we have 1 scsi adapter, this will be */
   6.812 +			/* called multiple times, once for each adapter */
   6.813 +			/* from cciss.c:cciss_init_one().  We do it this */
   6.814 +			/* way for PCI-hot plug reasons. (we don't know how */
   6.815 +			/* many adapters we have total, so we say we have */
   6.816 +			/* 1, each of a unique type.) */
   6.817 +}
   6.818 +
   6.819 +static void __exit cleanup_cciss_module(void);
   6.820 +int
   6.821 +cciss_scsi_release(struct Scsi_Host *sh)
   6.822 +{
   6.823 +	return 0;
   6.824 +}
   6.825 +
   6.826 +static void
   6.827 +cciss_unmap_one(struct pci_dev *pdev,
   6.828 +		CommandList_struct *cp,
   6.829 +		size_t buflen,
   6.830 +		int data_direction)
   6.831 +{
   6.832 +	u64bit addr64;
   6.833 +
   6.834 +	addr64.val32.lower = cp->SG[0].Addr.lower;
   6.835 +	addr64.val32.upper = cp->SG[0].Addr.upper;
   6.836 +	pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
   6.837 +}
   6.838 +
   6.839 +static void
   6.840 +cciss_map_one(struct pci_dev *pdev,
   6.841 +		CommandList_struct *cp,
   6.842 +		unsigned char *buf,
   6.843 +		size_t buflen,
   6.844 +		int data_direction)
   6.845 +{
   6.846 +	__u64 addr64;
   6.847 +
   6.848 +	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
   6.849 +	cp->SG[0].Addr.lower =
   6.850 +	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
   6.851 +	cp->SG[0].Addr.upper =
   6.852 +	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
   6.853 +	cp->SG[0].Len = buflen;
   6.854 +	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
   6.855 +	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
   6.856 +}
   6.857 +
   6.858 +static int
   6.859 +cciss_scsi_do_simple_cmd(ctlr_info_t *c,
   6.860 +			CommandList_struct *cp,
   6.861 +			unsigned char *scsi3addr,
   6.862 +			unsigned char *cdb,
   6.863 +			unsigned char cdblen,
   6.864 +			unsigned char *buf, int bufsize,
   6.865 +			int direction)
   6.866 +{
   6.867 +	unsigned long flags;
   6.868 +	DECLARE_COMPLETION(wait);
   6.869 +
   6.870 +	cp->cmd_type = CMD_IOCTL_PEND;		// treat this like an ioctl
   6.871 +	cp->scsi_cmd = NULL;
   6.872 +	cp->Header.ReplyQueue = 0;  // unused in simple mode
   6.873 +	memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN));
   6.874 +	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
   6.875 +	// Fill in the request block...
   6.876 +
   6.877 +	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
   6.878 +		scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
   6.879 +		scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
   6.880 +
   6.881 +	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
   6.882 +	memcpy(cp->Request.CDB, cdb, cdblen);
   6.883 +	cp->Request.Timeout = 0;	// No timeout
   6.884 +	cp->Request.CDBLen = cdblen;
   6.885 +	cp->Request.Type.Type = TYPE_CMD;
   6.886 +	cp->Request.Type.Attribute = ATTR_SIMPLE;
   6.887 +	cp->Request.Type.Direction = direction;
   6.888 +
   6.889 +	/* Fill in the SG list and do dma mapping */
   6.890 +	cciss_map_one(c->pdev, cp,
   6.891 +			(unsigned char *) buf, bufsize,
   6.892 +			scsi_to_pci_dma_dir(SCSI_DATA_READ));
   6.893 +
   6.894 +	cp->waiting = &wait;
   6.895 +
   6.896 +	/* Put the request on the tail of the request queue */
   6.897 +	spin_lock_irqsave(&io_request_lock, flags);
   6.898 +	addQ(&c->reqQ, cp);
   6.899 +	c->Qdepth++;
   6.900 +	start_io(c);
   6.901 +	spin_unlock_irqrestore(&io_request_lock, flags);
   6.902 +
   6.903 +	wait_for_completion(&wait);
   6.904 +
   6.905 +	/* undo the dma mapping */
   6.906 +	cciss_unmap_one(c->pdev, cp, bufsize,
   6.907 +				scsi_to_pci_dma_dir(SCSI_DATA_READ));
   6.908 +
   6.909 +	return(0);
   6.910 +}
   6.911 +
   6.912 +static void
   6.913 +cciss_scsi_interpret_error(CommandList_struct *cp)
   6.914 +{
   6.915 +	ErrorInfo_struct *ei;
   6.916 +
   6.917 +	ei = cp->err_info;
   6.918 +	switch(ei->CommandStatus)
   6.919 +	{
   6.920 +		case CMD_TARGET_STATUS:
   6.921 +			printk(KERN_WARNING "cciss: cmd %p has "
   6.922 +				"completed with errors\n", cp);
   6.923 +			printk(KERN_WARNING "cciss: cmd %p "
   6.924 +				"has SCSI Status = %x\n",
   6.925 +					cp,
   6.926 +					ei->ScsiStatus);
   6.927 +			if (ei->ScsiStatus == 0)
   6.928 +				printk(KERN_WARNING
   6.929 +				"cciss:SCSI status is abnormally zero.  "
   6.930 +				"(probably indicates selection timeout "
   6.931 +				"reported incorrectly due to a known "
   6.932 +				"firmware bug, circa July, 2001.)\n");
   6.933 +		break;
   6.934 +		case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
   6.935 +			printk("UNDERRUN\n");
   6.936 +		break;
   6.937 +		case CMD_DATA_OVERRUN:
   6.938 +			printk(KERN_WARNING "cciss: cp %p has"
   6.939 +				" completed with data overrun "
   6.940 +				"reported\n", cp);
   6.941 +		break;
   6.942 +		case CMD_INVALID: {
   6.943 +			/* controller unfortunately reports SCSI passthru's */
   6.944 +			/* to non-existent targets as invalid commands. */
   6.945 +			printk(KERN_WARNING "cciss: cp %p is "
   6.946 +				"reported invalid (probably means "						"target device no longer present)\n",
   6.947 +				cp);
   6.948 +			/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
   6.949 +			print_cmd(cp);  */
   6.950 +			}
   6.951 +		break;
   6.952 +		case CMD_PROTOCOL_ERR:
   6.953 +			printk(KERN_WARNING "cciss: cp %p has "
   6.954 +				"protocol error \n", cp);
   6.955 +		break;
   6.956 +		case CMD_HARDWARE_ERR:
   6.957 +			/* cmd->result = DID_ERROR << 16; */
   6.958 +			printk(KERN_WARNING "cciss: cp %p had "
   6.959 +				" hardware error\n", cp);
   6.960 +		break;
   6.961 +		case CMD_CONNECTION_LOST:
   6.962 +			printk(KERN_WARNING "cciss: cp %p had "
   6.963 +				"connection lost\n", cp);
   6.964 +		break;
   6.965 +		case CMD_ABORTED:
   6.966 +			printk(KERN_WARNING "cciss: cp %p was "
   6.967 +				"aborted\n", cp);
   6.968 +		break;
   6.969 +		case CMD_ABORT_FAILED:
   6.970 +			printk(KERN_WARNING "cciss: cp %p reports "
   6.971 +				"abort failed\n", cp);
   6.972 +		break;
   6.973 +		case CMD_UNSOLICITED_ABORT:
   6.974 +			printk(KERN_WARNING "cciss: cp %p aborted "
   6.975 +				"do to an unsolicited abort\n", cp);
   6.976 +		break;
   6.977 +		case CMD_TIMEOUT:
   6.978 +			printk(KERN_WARNING "cciss: cp %p timedout\n",
   6.979 +				cp);
   6.980 +		break;
   6.981 +		default:
   6.982 +			printk(KERN_WARNING "cciss: cp %p returned "
   6.983 +				"unknown status %x\n", cp,
   6.984 +					ei->CommandStatus);
   6.985 +	}
   6.986 +}
   6.987 +
   6.988 +static int
   6.989 +cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
   6.990 +		 InquiryData_struct *buf)
   6.991 +{
   6.992 +	int rc;
   6.993 +	CommandList_struct *cp;
   6.994 +	char cdb[6];
   6.995 +	ErrorInfo_struct *ei;
   6.996 +
   6.997 +	cp = scsi_cmd_alloc(c);
   6.998 +	ei = cp->err_info;
   6.999 +
  6.1000 +	if (cp == NULL) {			/* trouble... */
  6.1001 +		printk("cmd_alloc returned NULL!\n");
  6.1002 +		return -1;
  6.1003 +	}
  6.1004 +
  6.1005 +	cdb[0] = CISS_INQUIRY;
  6.1006 +	cdb[1] = 0;
  6.1007 +	cdb[2] = 0;
  6.1008 +	cdb[3] = 0;
  6.1009 +	cdb[4] = sizeof(*buf) & 0xff;
  6.1010 +	cdb[5] = 0;
  6.1011 +	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb,
  6.1012 +				6, (unsigned char *) buf,
  6.1013 +				sizeof(*buf), XFER_READ);
  6.1014 +
  6.1015 +	if (rc != 0) return rc; /* something went wrong */
  6.1016 +
  6.1017 +	if (ei->CommandStatus != 0 &&
  6.1018 +	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
  6.1019 +		cciss_scsi_interpret_error(cp);
  6.1020 +		scsi_cmd_free(c, cp);
  6.1021 +		return -1;
  6.1022 +	}
  6.1023 +	scsi_cmd_free(c, cp);
  6.1024 +	return 0;
  6.1025 +}
  6.1026 +
  6.1027 +static int
  6.1028 +cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
  6.1029 +		ReportLunData_struct *buf, int bufsize)
  6.1030 +{
  6.1031 +	int rc;
  6.1032 +	CommandList_struct *cp;
  6.1033 +	unsigned char cdb[12];
  6.1034 +	unsigned char scsi3addr[8];
  6.1035 +	ErrorInfo_struct *ei;
  6.1036 +
  6.1037 +	cp = scsi_cmd_alloc(c);
  6.1038 +	if (cp == NULL) {			/* trouble... */
  6.1039 +		printk("cmd_alloc returned NULL!\n");
  6.1040 +		return -1;
  6.1041 +	}
  6.1042 +
  6.1043 +	memset(&scsi3addr[0], 0, 8); /* address the controller */
  6.1044 +	cdb[0] = CISS_REPORT_PHYS;
  6.1045 +	cdb[1] = 0;
  6.1046 +	cdb[2] = 0;
  6.1047 +	cdb[3] = 0;
  6.1048 +	cdb[4] = 0;
  6.1049 +	cdb[5] = 0;
  6.1050 +	cdb[6] = (bufsize >> 24) & 0xFF;  //MSB
  6.1051 +	cdb[7] = (bufsize >> 16) & 0xFF;
  6.1052 +	cdb[8] = (bufsize >> 8) & 0xFF;
  6.1053 +	cdb[9] = bufsize & 0xFF;
  6.1054 +	cdb[10] = 0;
  6.1055 +	cdb[11] = 0;
  6.1056 +
  6.1057 +	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr,
  6.1058 +				cdb, 12,
  6.1059 +				(unsigned char *) buf,
  6.1060 +				bufsize, XFER_READ);
  6.1061 +
  6.1062 +	if (rc != 0) return rc; /* something went wrong */
  6.1063 +
  6.1064 +	ei = cp->err_info;
  6.1065 +	if (ei->CommandStatus != 0 &&
  6.1066 +	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
  6.1067 +		cciss_scsi_interpret_error(cp);
  6.1068 +		scsi_cmd_free(c, cp);
  6.1069 +		return -1;
  6.1070 +	}
  6.1071 +	scsi_cmd_free(c, cp);
  6.1072 +	return 0;
  6.1073 +}
  6.1074 +
  6.1075 +static void
  6.1076 +cciss_update_non_disk_devices(int cntl_num, int hostno)
  6.1077 +{
  6.1078 +	/* the idea here is we could get notified from /proc
  6.1079 +	   that some devices have changed, so we do a report
  6.1080 +	   physical luns cmd, and adjust our list of devices
  6.1081 +	   accordingly.  (We can't rely on the scsi-mid layer just
  6.1082 +	   doing inquiries, because the "busses" that the scsi
  6.1083 +	   mid-layer probes are totally fabricated by this driver,
  6.1084 +	   so new devices wouldn't show up.
  6.1085 +
  6.1086 +	   the scsi3addr's of devices won't change so long as the
  6.1087 +	   adapter is not reset.  That means we can rescan and
  6.1088 +	   tell which devices we already know about, vs. new
  6.1089 +	   devices, vs.  disappearing devices.
  6.1090 +
  6.1091 +	   Also, if you yank out a tape drive, then put in a disk
  6.1092 +	   in it's place, (say, a configured volume from another
  6.1093 +	   array controller for instance)  _don't_ poke this driver
  6.1094 +	   (so it thinks it's still a tape, but _do_ poke the scsi
  6.1095 +	   mid layer, so it does an inquiry... the scsi mid layer
  6.1096 +	   could see the physical disk.  This would be bad.  Need to
  6.1097 +	   think about how to prevent that.  One idea would be to
  6.1098 +	   snoop all scsi responses and if an inquiry repsonse comes
  6.1099 +	   back that reports a disk, chuck it an return selection
  6.1100 +	   timeout instead and adjust our table...  Not sure i like
  6.1101 +	   that though.
  6.1102 +
  6.1103 +	 */
  6.1104 +
  6.1105 +	ReportLunData_struct *ld_buff;
  6.1106 +	InquiryData_struct *inq_buff;
  6.1107 +	unsigned char scsi3addr[8];
  6.1108 +	ctlr_info_t *c;
  6.1109 +	__u32 num_luns=0;
  6.1110 +	unsigned char *ch;
  6.1111 +	/* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */
  6.1112 +	struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
  6.1113 +	int ncurrent=0;
  6.1114 +	int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
  6.1115 +	int i;
  6.1116 +
  6.1117 +	c = (ctlr_info_t *) hba[cntl_num];
  6.1118 +	ld_buff = kmalloc(reportlunsize, GFP_KERNEL);
  6.1119 +	if (ld_buff == NULL) {
  6.1120 +		printk(KERN_ERR "cciss: out of memory\n");
  6.1121 +		return;
  6.1122 +	}
  6.1123 +	memset(ld_buff, 0, reportlunsize);
  6.1124 +	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
  6.1125 +	if (inq_buff == NULL) {
  6.1126 +		printk(KERN_ERR "cciss: out of memory\n");
  6.1127 +		kfree(ld_buff);
  6.1128 +		return;
  6.1129 +	}
  6.1130 +
  6.1131 +	if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
  6.1132 +		ch = &ld_buff->LUNListLength[0];
  6.1133 +		num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
  6.1134 +		if (num_luns > CISS_MAX_PHYS_LUN) {
  6.1135 +			printk(KERN_WARNING
  6.1136 +				"cciss: Maximum physical LUNs (%d) exceeded.  "
  6.1137 +				"%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
  6.1138 +				num_luns - CISS_MAX_PHYS_LUN);
  6.1139 +			num_luns = CISS_MAX_PHYS_LUN;
  6.1140 +		}
  6.1141 +	}
  6.1142 +	else {
  6.1143 +		printk(KERN_ERR  "cciss: Report physical LUNs failed.\n");
  6.1144 +		return;
  6.1145 +	}
  6.1146 +
  6.1147 +
  6.1148 +	/* adjust our table of devices */
  6.1149 +	for(i=0; i<num_luns; i++)
  6.1150 +	{
  6.1151 +		int devtype;
  6.1152 +
  6.1153 +		/* for each physical lun, do an inquiry */
  6.1154 +		memset(inq_buff, 0, sizeof(InquiryData_struct));
  6.1155 +		memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
  6.1156 +
  6.1157 +		if (cciss_scsi_do_inquiry(hba[cntl_num],
  6.1158 +			scsi3addr, inq_buff) != 0)
  6.1159 +		{
  6.1160 +			/* Inquiry failed (msg printed already) */
  6.1161 +			devtype = 0; /* so we will skip this device. */
  6.1162 +		} else /* what kind of device is this? */
  6.1163 +			devtype = (inq_buff->data_byte[0] & 0x1f);
  6.1164 +
  6.1165 +		switch (devtype)
  6.1166 +		{
  6.1167 +		  case 0x01: /* sequential access, (tape) */
  6.1168 +		  case 0x08: /* medium changer */
  6.1169 +			if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
  6.1170 +				printk(KERN_INFO "cciss%d: %s ignored, "
  6.1171 +					"too many devices.\n", cntl_num,
  6.1172 +					DEVICETYPE(devtype));
  6.1173 +				break;
  6.1174 +			}
  6.1175 +			memcpy(&currentsd[ncurrent].scsi3addr[0],
  6.1176 +				&scsi3addr[0], 8);
  6.1177 +			currentsd[ncurrent].devtype = devtype;
  6.1178 +			currentsd[ncurrent].bus = -1;
  6.1179 +			currentsd[ncurrent].target = -1;
  6.1180 +			currentsd[ncurrent].lun = -1;
  6.1181 +			ncurrent++;
  6.1182 +			break;
  6.1183 +		  default:
  6.1184 +			break;
  6.1185 +		}
  6.1186 +	}
  6.1187 +
  6.1188 +	adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent);
  6.1189 +
  6.1190 +	kfree(inq_buff);
  6.1191 +	kfree(ld_buff);
  6.1192 +	return;
  6.1193 +}
  6.1194 +
  6.1195 +static int
  6.1196 +is_keyword(char *ptr, int len, char *verb)  // Thanks to ncr53c8xx.c
  6.1197 +{
  6.1198 +	int verb_len = strlen(verb);
  6.1199 +	if (len >= verb_len && !memcmp(verb,ptr,verb_len))
  6.1200 +		return verb_len;
  6.1201 +	else
  6.1202 +		return 0;
  6.1203 +}
  6.1204 +
  6.1205 +static int
  6.1206 +cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length)
  6.1207 +{
  6.1208 +	int arg_len;
  6.1209 +
  6.1210 +	if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
  6.1211 +		cciss_update_non_disk_devices(ctlr, hostno);
  6.1212 +	else
  6.1213 +		return -EINVAL;
  6.1214 +	return length;
  6.1215 +}
  6.1216 +
  6.1217 +/* It's a pity that we need this, but, we do... */
  6.1218 +extern struct Scsi_Host *scsi_hostlist;  /* from ../scsi/hosts.c */
  6.1219 +
  6.1220 +int
  6.1221 +cciss_scsi_proc_info(char *buffer, /* data buffer */
  6.1222 +		char **start, 	   /* where data in buffer starts */
  6.1223 +		off_t offset,	   /* offset from start of imaginary file */
  6.1224 +		int length, 	   /* length of data in buffer */
  6.1225 +		int hostnum, 	   /* which host adapter (always zero for me) */
  6.1226 +		int func)	   /* 0 == read, 1 == write */
  6.1227 +{
  6.1228 +
  6.1229 +	int buflen, datalen;
  6.1230 +	struct Scsi_Host *sh;
  6.1231 +	int found;
  6.1232 +	ctlr_info_t *ci;
  6.1233 +	int cntl_num;
  6.1234 +
  6.1235 +	/* Lets see if we can find our Scsi_Host...
  6.1236 +	   this might be kind of "bad", searching scis_hostlist this way
  6.1237 +	   but how else can we find the scsi host?  I think I've seen
  6.1238 +	   this coded both ways, (circular list and null terminated list)
  6.1239 +	   I coded it to work either way, since I wasn't sure.  */
  6.1240 +
  6.1241 +	sh = scsi_hostlist;
  6.1242 +	found=0;
  6.1243 +	do {
  6.1244 +		if (sh == NULL) break;
  6.1245 +		if (sh->host_no == hostnum) {
  6.1246 +			found++;
  6.1247 +			break;
  6.1248 +		}
  6.1249 +		sh = sh->next;
  6.1250 +	} while (sh != scsi_hostlist && sh != NULL);
  6.1251 +
  6.1252 +	if (sh == NULL || found == 0) /* This really shouldn't ever happen. */
  6.1253 +		return -EINVAL;
  6.1254 +
  6.1255 +	ci = (ctlr_info_t *) sh->hostdata[0];
  6.1256 +	if (ci == NULL)  /* This really shouldn't ever happen. */
  6.1257 +		return -EINVAL;
  6.1258 +
  6.1259 +	cntl_num = ci->ctlr;	/* Get our index into the hba[] array */
  6.1260 +
  6.1261 +	if (func == 0) {	/* User is reading from /proc/scsi/ciss*?/?*  */
  6.1262 +		buflen = sprintf(buffer, "hostnum=%d\n", hostnum);
  6.1263 +
  6.1264 +		datalen = buflen - offset;
  6.1265 +		if (datalen < 0) { 	/* they're reading past EOF. */
  6.1266 +			datalen = 0;
  6.1267 +			*start = buffer+buflen;
  6.1268 +		} else
  6.1269 +			*start = buffer + offset;
  6.1270 +		return(datalen);
  6.1271 +	} else 	/* User is writing to /proc/scsi/cciss*?/?*  ... */
  6.1272 +		return cciss_scsi_user_command(cntl_num, hostnum,
  6.1273 +			buffer, length);
  6.1274 +}
  6.1275 +
  6.1276 +/* this is via the generic proc support */
  6.1277 +const char *
  6.1278 +cciss_scsi_info(struct Scsi_Host *sa)
  6.1279 +{
  6.1280 +	static char buf[300];
  6.1281 +	ctlr_info_t *ci;
  6.1282 +
  6.1283 +	/* probably need to work on putting a bit more info in here... */
  6.1284 +	/* this is output via the /proc filesystem. */
  6.1285 +
  6.1286 +	ci = (ctlr_info_t *) sa->hostdata[0];
  6.1287 +
  6.1288 +	sprintf(buf, "%s %c%c%c%c\n",
  6.1289 +		ci->product_name,
  6.1290 +		ci->firm_ver[0],
  6.1291 +		ci->firm_ver[1],
  6.1292 +		ci->firm_ver[2],
  6.1293 +		ci->firm_ver[3]);
  6.1294 +
  6.1295 +	return buf;
  6.1296 +}
  6.1297 +
  6.1298 +
  6.1299 +/* cciss_scatter_gather takes a Scsi_Cmnd, (cmd), and does the pci
  6.1300 +   dma mapping  and fills in the scatter gather entries of the
  6.1301 +   cciss command, cp. */
  6.1302 +
  6.1303 +static void
  6.1304 +cciss_scatter_gather(struct pci_dev *pdev,
  6.1305 +		CommandList_struct *cp,
  6.1306 +		Scsi_Cmnd *cmd)
  6.1307 +{
  6.1308 +	unsigned int use_sg, nsegs=0, len;
  6.1309 +	struct scatterlist *scatter = (struct scatterlist *) cmd->buffer;
  6.1310 +	__u64 addr64;
  6.1311 +
  6.1312 +	/* is it just one virtual address? */
  6.1313 +	if (!cmd->use_sg) {
  6.1314 +		if (cmd->request_bufflen) {	/* anything to xfer? */
  6.1315 +
  6.1316 +			addr64 = (__u64) pci_map_single(pdev,
  6.1317 +				cmd->request_buffer,
  6.1318 +				cmd->request_bufflen,
  6.1319 +				scsi_to_pci_dma_dir(cmd->sc_data_direction));
  6.1320 +
  6.1321 +			cp->SG[0].Addr.lower =
  6.1322 +			  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
  6.1323 +			cp->SG[0].Addr.upper =
  6.1324 +			  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
  6.1325 +			cp->SG[0].Len = cmd->request_bufflen;
  6.1326 +			nsegs=1;
  6.1327 +		}
  6.1328 +	} /* else, must be a list of virtual addresses.... */
  6.1329 +	else if (cmd->use_sg <= MAXSGENTRIES) {	/* not too many addrs? */
  6.1330 +
  6.1331 +		use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg,
  6.1332 +			scsi_to_pci_dma_dir(cmd->sc_data_direction));
  6.1333 +
  6.1334 +		for (nsegs=0; nsegs < use_sg; nsegs++) {
  6.1335 +			addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
  6.1336 +			len  = sg_dma_len(&scatter[nsegs]);
  6.1337 +			cp->SG[nsegs].Addr.lower =
  6.1338 +			  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
  6.1339 +			cp->SG[nsegs].Addr.upper =
  6.1340 +			  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
  6.1341 +			cp->SG[nsegs].Len = len;
  6.1342 +			cp->SG[nsegs].Ext = 0;  // we are not chaining
  6.1343 +		}
  6.1344 +	} else BUG();
  6.1345 +
  6.1346 +	cp->Header.SGList = (__u8) nsegs;   /* no. SGs contig in this cmd */
  6.1347 +	cp->Header.SGTotal = (__u16) nsegs; /* total sgs in this cmd list */
  6.1348 +	return;
  6.1349 +}
  6.1350 +
  6.1351 +
  6.1352 +int
  6.1353 +cciss_scsi_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *))
  6.1354 +{
  6.1355 +	ctlr_info_t **c;
  6.1356 +	int ctlr, rc;
  6.1357 +	unsigned char scsi3addr[8];
  6.1358 +	CommandList_struct *cp;
  6.1359 +
  6.1360 +	// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
  6.1361 +	// We violate cmd->host privacy here.  (Is there another way?)
  6.1362 +	c = (ctlr_info_t **) &cmd->host->hostdata[0];
  6.1363 +	ctlr = (*c)->ctlr;
  6.1364 +
  6.1365 +	rc = lookup_scsi3addr(ctlr, cmd->channel, cmd->target, cmd->lun,
  6.1366 +			scsi3addr);
  6.1367 +	if (rc != 0) {
  6.1368 +		/* the scsi nexus does not match any that we presented... */
  6.1369 +		/* pretend to mid layer that we got selection timeout */
  6.1370 +		cmd->result = DID_NO_CONNECT << 16;
  6.1371 +		done(cmd);
  6.1372 +		/* we might want to think about registering controller itself
  6.1373 +		   as a processor device on the bus so sg binds to it. */
  6.1374 +		return 0;
  6.1375 +	}
  6.1376 +
  6.1377 +	// printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n",
  6.1378 +	//	cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);
  6.1379 +
  6.1380 +	/* Ok, we have a reasonable scsi nexus, so send the cmd down, and
  6.1381 +		see what the device thinks of it. */
  6.1382 +
  6.1383 +	cp = scsi_cmd_alloc(*c);
  6.1384 +	if (cp == NULL) {			/* trouble... */
  6.1385 +		printk("scsi_cmd_alloc returned NULL!\n");
  6.1386 +		/* FIXME: next 3 lines are -> BAD! <- */
  6.1387 +		cmd->result = DID_NO_CONNECT << 16;
  6.1388 +		done(cmd);
  6.1389 +		return 0;
  6.1390 +	}
  6.1391 +
  6.1392 +	// Fill in the command list header
  6.1393 +
  6.1394 +	cmd->scsi_done = done;    // save this for use by completion code
  6.1395 +
  6.1396 +	// save cp in case we have to abort it
  6.1397 +	cmd->host_scribble = (unsigned char *) cp;
  6.1398 +
  6.1399 +	cp->cmd_type = CMD_SCSI;
  6.1400 +	cp->scsi_cmd = cmd;
  6.1401 +	cp->Header.ReplyQueue = 0;  // unused in simple mode
  6.1402 +	memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
  6.1403 +	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
  6.1404 +
  6.1405 +	// Fill in the request block...
  6.1406 +
  6.1407 +	cp->Request.Timeout = 0; // No timeout 
  6.1408 +	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
  6.1409 +	if (cmd->cmd_len > sizeof(cp->Request.CDB)) BUG();
  6.1410 +	cp->Request.CDBLen = cmd->cmd_len;
  6.1411 +	memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len);
  6.1412 +	cp->Request.Type.Type = TYPE_CMD;
  6.1413 +	cp->Request.Type.Attribute = ATTR_SIMPLE;
  6.1414 +	switch(cmd->sc_data_direction)
  6.1415 +	{
  6.1416 +	  case SCSI_DATA_WRITE: cp->Request.Type.Direction = XFER_WRITE; break;
  6.1417 +	  case SCSI_DATA_READ: cp->Request.Type.Direction = XFER_READ; break;
  6.1418 +	  case SCSI_DATA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
  6.1419 +
  6.1420 +	  case SCSI_DATA_UNKNOWN:
  6.1421 +		// This can happen if a buggy application does a scsi passthru
  6.1422 +		// and sets both inlen and outlen to non-zero. ( see
  6.1423 +		// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
  6.1424 +
  6.1425 +		cp->Request.Type.Direction = XFER_RSVD;
  6.1426 +		// This is technically wrong, and cciss controllers should
  6.1427 +		// reject it with CMD_INVALID, which is the most correct
  6.1428 +		// response, but non-fibre backends appear to let it
  6.1429 +		// slide by, and give the same results as if this field
  6.1430 +		// were set correctly.  Either way is acceptable for
  6.1431 +		// our purposes here.
  6.1432 +
  6.1433 +		break;
  6.1434 +
  6.1435 +	  default:
  6.1436 +		printk("cciss: unknown data direction: %d\n",
  6.1437 +			cmd->sc_data_direction);
  6.1438 +		BUG();
  6.1439 +		break;
  6.1440 +	}
  6.1441 +
  6.1442 +	cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list
  6.1443 +
  6.1444 +	/* Put the request on the tail of the request queue */
  6.1445 +
  6.1446 +	addQ(&(*c)->reqQ, cp);
  6.1447 +	(*c)->Qdepth++;
  6.1448 +	start_io(*c);
  6.1449 +
  6.1450 +	/* the cmd'll come back via intr handler in complete_scsi_command()  */
  6.1451 +	return 0;
  6.1452 +}
  6.1453 +
  6.1454 +static void
  6.1455 +init_driver_template(int ctlr)
  6.1456 +{
  6.1457 +	memset(&driver_template[ctlr], 0, sizeof(driver_template[ctlr]));
  6.1458 +	driver_template[ctlr].name = ccissscsi[ctlr].name;
  6.1459 +	driver_template[ctlr].proc_name = ccissscsi[ctlr].name;
  6.1460 +	driver_template[ctlr].detect = cciss_scsi_detect;
  6.1461 +	driver_template[ctlr].release = cciss_scsi_release;
  6.1462 +	driver_template[ctlr].proc_info = cciss_scsi_proc_info;
  6.1463 +	driver_template[ctlr].queuecommand = cciss_scsi_queue_command;
  6.1464 +	driver_template[ctlr].eh_abort_handler = NULL;
  6.1465 +	driver_template[ctlr].eh_device_reset_handler = NULL;
  6.1466 +	driver_template[ctlr].bios_param = scsicam_bios_param;
  6.1467 +	driver_template[ctlr].can_queue = SCSI_CCISS_CAN_QUEUE;
  6.1468 +	driver_template[ctlr].this_id = SELF_SCSI_ID;
  6.1469 +	driver_template[ctlr].sg_tablesize = MAXSGENTRIES;
  6.1470 +	driver_template[ctlr].cmd_per_lun = 1;
  6.1471 +	driver_template[ctlr].use_new_eh_code = 1;
  6.1472 +	driver_template[ctlr].use_clustering = DISABLE_CLUSTERING;
  6.1473 +	driver_template[ctlr].module = THIS_MODULE;
  6.1474 +
  6.1475 +	/* set scsi_host to NULL so our detect routine will
  6.1476 +	   find us on register */
  6.1477 +
  6.1478 +	((struct cciss_scsi_adapter_data_t *)
  6.1479 +		hba[ctlr]->scsi_ctlr)->scsi_host = NULL;
  6.1480 +
  6.1481 +}
  6.1482 +
  6.1483 +static void
  6.1484 +cciss_unregister_scsi(int ctlr)
  6.1485 +{
  6.1486 +	struct cciss_scsi_adapter_data_t *sa;
  6.1487 +	struct cciss_scsi_cmd_stack_t *stk;
  6.1488 +	unsigned long flags;
  6.1489 +
  6.1490 +	/* we are being forcibly unloaded, and may not refuse. */
  6.1491 +
  6.1492 +	spin_lock_irqsave(&io_request_lock, flags);
  6.1493 +	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
  6.1494 +	stk = &sa->cmd_stack;
  6.1495 +
  6.1496 +	/* if we weren't ever actually registered, don't unregister */
  6.1497 +	if (((struct cciss_scsi_adapter_data_t *)
  6.1498 +		hba[ctlr]->scsi_ctlr)->registered) {
  6.1499 +		spin_unlock_irqrestore(&io_request_lock, flags);
  6.1500 +		scsi_unregister_module(MODULE_SCSI_HA, &driver_template[ctlr]);
  6.1501 +		spin_lock_irqsave(&io_request_lock, flags);
  6.1502 +	}
  6.1503 +	init_driver_template(ctlr);
  6.1504 +	scsi_cmd_stack_free(ctlr);
  6.1505 +	kfree(hba[ctlr]->scsi_ctlr);
  6.1506 +	spin_unlock_irqrestore(&io_request_lock, flags);
  6.1507 +}
  6.1508 +
  6.1509 +static int
  6.1510 +cciss_register_scsi(int ctlr, int this_is_init_time)
  6.1511 +{
  6.1512 +	unsigned long flags;
  6.1513 +
  6.1514 +	CPQ_TAPE_LOCK(ctlr, flags);
  6.1515 +
  6.1516 +	sprintf( ccissscsi[ctlr].name, "cciss%d", ctlr );
  6.1517 +	
  6.1518 +	init_driver_template(ctlr);
  6.1519 +
  6.1520 +	/* Since this is really a block driver, the SCSI core may not be
  6.1521 +	   initialized yet, in which case, calling scsi_register_module
  6.1522 +	   would hang.  instead, we will do it later, via /proc filesystem
  6.1523 +	   and rc scripts, when we know SCSI core is good to go. */
  6.1524 +
  6.1525 +	if (this_is_init_time) {
  6.1526 +		CPQ_TAPE_UNLOCK(ctlr, flags);
  6.1527 +		return 0;
  6.1528 +	}
  6.1529 +
  6.1530 +	/* Only register if SCSI devices are detected. */
  6.1531 +	if (ccissscsi[ctlr].ndevices != 0) {
  6.1532 +		((struct cciss_scsi_adapter_data_t *)
  6.1533 +			hba[ctlr]->scsi_ctlr)->registered = 1;
  6.1534 +		CPQ_TAPE_UNLOCK(ctlr, flags);
  6.1535 +		return scsi_register_module(MODULE_SCSI_HA,
  6.1536 +			&driver_template[ctlr]);
  6.1537 +	}
  6.1538 +	CPQ_TAPE_UNLOCK(ctlr, flags);
  6.1539 +	printk(KERN_INFO
  6.1540 +		"cciss%d: No appropriate SCSI device detected, "
  6.1541 +		"SCSI subsystem not engaged.\n", ctlr);
  6.1542 +	return 0;
  6.1543 +}
  6.1544 +
  6.1545 +static int
  6.1546 +cciss_engage_scsi(int ctlr)
  6.1547 +{
  6.1548 +	struct cciss_scsi_adapter_data_t *sa;
  6.1549 +	struct cciss_scsi_cmd_stack_t *stk;
  6.1550 +	unsigned long flags;
  6.1551 +
  6.1552 +	spin_lock_irqsave(&io_request_lock, flags);
  6.1553 +	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
  6.1554 +	stk = &sa->cmd_stack;
  6.1555 +
  6.1556 +	if (((struct cciss_scsi_adapter_data_t *)
  6.1557 +		hba[ctlr]->scsi_ctlr)->registered) {
  6.1558 +		printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
  6.1559 +		spin_unlock_irqrestore(&io_request_lock, flags);
  6.1560 +		return ENXIO;
  6.1561 +	}
  6.1562 +	spin_unlock_irqrestore(&io_request_lock, flags);
  6.1563 +	cciss_update_non_disk_devices(ctlr, -1);
  6.1564 +	cciss_register_scsi(ctlr, 0);
  6.1565 +	return 0;
  6.1566 +}
  6.1567 +
  6.1568 +static void
  6.1569 +cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
  6.1570 +{
  6.1571 +	int size;
  6.1572 +	unsigned int flags;
  6.1573 +
  6.1574 +	*pos = *pos -1; *len = *len - 1; // cut off the last trailing newline
  6.1575 +
  6.1576 +	CPQ_TAPE_LOCK(ctlr, flags);
  6.1577 +	size = sprintf(buffer + *len,
  6.1578 +		"Sequential access devices: %d\n\n",
  6.1579 +		 ccissscsi[ctlr].ndevices);
  6.1580 +	CPQ_TAPE_UNLOCK(ctlr, flags);
  6.1581 +	*pos += size; *len += size;
  6.1582 +}
  6.1583 +
  6.1584 +#else /* no CONFIG_CISS_SCSI_TAPE */
  6.1585 +
  6.1586 +/* If no tape support, then these become defined out of existence */
  6.1587 +
  6.1588 +#define cciss_find_non_disk_devices(cntl_num)
  6.1589 +#define cciss_unregister_scsi(ctlr)
  6.1590 +#define cciss_register_scsi(ctlr, this_is_init_time)
  6.1591 +#define cciss_proc_tape_report(ctlr, buffer, pos, len)
  6.1592 +
  6.1593 +#endif /* CONFIG_CISS_SCSI_TAPE */
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/drivers/block/cciss_scsi.h	Mon Jan 19 15:29:17 2004 +0000
     7.3 @@ -0,0 +1,99 @@
     7.4 +/*
     7.5 + *    Disk Array driver for HP SA 5xxx and 6xxx Controllers, SCSI Tape module
     7.6 + *    Copyright 2001, 2002 Hewlett-Packard Development Company, L.P.
     7.7 + *
     7.8 + *    This program is free software; you can redistribute it and/or modify
     7.9 + *    it under the terms of the GNU General Public License as published by
    7.10 + *    the Free Software Foundation; either version 2 of the License, or
    7.11 + *    (at your option) any later version.
    7.12 + *
    7.13 + *    This program is distributed in the hope that it will be useful,
    7.14 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
    7.15 + *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
    7.16 + *    NON INFRINGEMENT.  See the GNU General Public License for more details.
    7.17 + *
    7.18 + *    You should have received a copy of the GNU General Public License
    7.19 + *    along with this program; if not, write to the Free Software
    7.20 + *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
    7.21 + *
    7.22 + *    Questions/Comments/Bugfixes to Cciss-discuss@lists.sourceforge.net
    7.23 + *
    7.24 + */
    7.25 +#ifdef CONFIG_CISS_SCSI_TAPE
    7.26 +#ifndef _CCISS_SCSI_H_
    7.27 +#define _CCISS_SCSI_H_
    7.28 +
    7.29 +#include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
    7.30 +
    7.31 +		// the scsi id of the adapter...
    7.32 +#define SELF_SCSI_ID -1 
    7.33 +		// In case we ever want to present controller so sg will 
    7.34 +		// bind to it.  The scsi bus that's presented by the
    7.35 +		// driver to the OS is fabricated.  The "real" scsi-3
    7.36 +		// bus the hardware presents is fabricated too.
    7.37 +		// The actual, honest-to-goodness physical
    7.38 +		// bus that the devices are attached to is not
    7.39 +		// addressible natively, and may in fact turn
    7.40 +		// out to be not scsi at all.
    7.41 +
    7.42 +#define SCSI_CCISS_CAN_QUEUE 2
    7.43 +
    7.44 +/* this notation works fine for static initializations (as is the usual
    7.45 +   case for linux scsi drivers), but not so well for dynamic settings,
    7.46 +   so, if you change this, you also have to change cciss_unregister_scsi()
    7.47 +   in cciss_scsi.c  */
    7.48 +#define CCISS_SCSI {    \
    7.49 +	name:			"",				\
    7.50 +	detect:			cciss_scsi_detect,		\
    7.51 +	release:		cciss_scsi_release,		\
    7.52 +	proc_info:		cciss_scsi_proc_info,		\
    7.53 +	queuecommand:   	cciss_scsi_queue_command,	\
    7.54 +	bios_param:     	scsicam_bios_param,		\
    7.55 +	can_queue:      	SCSI_CCISS_CAN_QUEUE,		\
    7.56 +	this_id:		SELF_SCSI_ID,			\
    7.57 +	sg_tablesize:   	MAXSGENTRIES, 			\
    7.58 +	cmd_per_lun:		1,				\
    7.59 +	use_new_eh_code:	1,				\
    7.60 +	use_clustering:		DISABLE_CLUSTERING,\
    7.61 +}
    7.62 +
    7.63 +/*
    7.64 +	info:			cciss_scsi_info,		\
    7.65 +
    7.66 +Note, cmd_per_lun could give us some trouble, so I'm setting it very low.
    7.67 +Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively.
    7.68 +
    7.69 +If the upper scsi layer tries to track how many commands we have
    7.70 +outstanding, it will be operating under the misapprehension that it is
    7.71 +the only one sending us requests.  We also have the block interface,
    7.72 +which is where most requests must surely come from, so the upper layer's
    7.73 +notion of how many requests we have outstanding will be wrong most or
    7.74 +all of the time.
    7.75 +
    7.76 +Note, the normal SCSI mid-layer error handling doesn't work well
    7.77 +for this driver because 1) it takes the io_request_lock before
    7.78 +calling error handlers and uses a local variable to store flags,
    7.79 +so the io_request_lock cannot be released and interrupts enabled
    7.80 +inside the error handlers, and, the error handlers cannot poll
    7.81 +for command completion because they might get commands from the
    7.82 +block half of the driver completing, and not know what to do
    7.83 +with them.  That's what we get for making a hybrid scsi/block
    7.84 +driver, I suppose.
    7.85 +
    7.86 +*/
    7.87 +
    7.88 +struct cciss_scsi_dev_t {
    7.89 +	int devtype;
    7.90 +	int bus, target, lun;		/* as presented to the OS */
    7.91 +	unsigned char scsi3addr[8];	/* as presented to the HW */
    7.92 +};
    7.93 +
    7.94 +struct cciss_scsi_hba_t {
    7.95 +	char name[32];
    7.96 +	int ndevices;
    7.97 +#define CCISS_MAX_SCSI_DEVS_PER_HBA 16
    7.98 +	struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA];
    7.99 +};
   7.100 +
   7.101 +#endif /* _CCISS_SCSI_H_ */
   7.102 +#endif /* CONFIG_CISS_SCSI_TAPE */
     8.1 --- a/xen/drivers/block/ll_rw_blk.c	Fri Jan 16 19:29:56 2004 +0000
     8.2 +++ b/xen/drivers/block/ll_rw_blk.c	Mon Jan 19 15:29:17 2004 +0000
     8.3 @@ -117,6 +117,7 @@ int * max_readahead[MAX_BLKDEV];
     8.4   */
     8.5  int * max_sectors[MAX_BLKDEV];
     8.6  
     8.7 +
     8.8  static inline int get_max_sectors(kdev_t dev)
     8.9  {
    8.10  	if (!max_sectors[MAJOR(dev)])
    8.11 @@ -238,6 +239,21 @@ void blk_queue_make_request(request_queu
    8.12  	q->make_request_fn = mfn;
    8.13  }
    8.14  
    8.15 +
    8.16 +/*
    8.17 + * can we merge the two segments, or do we need to start a new one?
    8.18 + */
    8.19 +inline int blk_seg_merge_ok(struct buffer_head *bh, struct buffer_head *nxt)
    8.20 +{
    8.21 +	/*
    8.22 +	 * if bh and nxt are contigous and don't cross a 4g boundary, it's ok
    8.23 +	 */
    8.24 +	if (BH_CONTIG(bh, nxt) && BH_PHYS_4G(bh, nxt))
    8.25 +		return 1;
    8.26 +
    8.27 +	return 0;
    8.28 +}
    8.29 +
    8.30  static inline int ll_new_segment(request_queue_t *q, struct request *req, int max_segments)
    8.31  {
    8.32  	if (req->nr_segments < max_segments) {
    8.33 @@ -250,16 +266,18 @@ static inline int ll_new_segment(request
    8.34  static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
    8.35  			    struct buffer_head *bh, int max_segments)
    8.36  {
    8.37 -	if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
    8.38 +	if (blk_seg_merge_ok(req->bhtail, bh))
    8.39  		return 1;
    8.40 +
    8.41  	return ll_new_segment(q, req, max_segments);
    8.42  }
    8.43  
    8.44  static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
    8.45  			     struct buffer_head *bh, int max_segments)
    8.46  {
    8.47 -	if (bh->b_data + bh->b_size == req->bh->b_data)
    8.48 +	if (blk_seg_merge_ok(bh, req->bh))
    8.49  		return 1;
    8.50 +
    8.51  	return ll_new_segment(q, req, max_segments);
    8.52  }
    8.53  
    8.54 @@ -268,7 +286,7 @@ static int ll_merge_requests_fn(request_
    8.55  {
    8.56  	int total_segments = req->nr_segments + next->nr_segments;
    8.57  
    8.58 -	if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
    8.59 +	if (blk_seg_merge_ok(req->bhtail, next->bh))
    8.60  		total_segments--;
    8.61      
    8.62  	if (total_segments > max_segments)
    8.63 @@ -1448,3 +1466,4 @@ EXPORT_SYMBOL(generic_make_request);
    8.64  EXPORT_SYMBOL(blkdev_release_request);
    8.65  EXPORT_SYMBOL(req_finished_io);
    8.66  EXPORT_SYMBOL(generic_unplug_device);
    8.67 +EXPORT_SYMBOL(blk_seg_merge_ok);
     9.1 --- a/xen/include/asm-i386/io.h	Fri Jan 16 19:29:56 2004 +0000
     9.2 +++ b/xen/include/asm-i386/io.h	Mon Jan 19 15:29:17 2004 +0000
     9.3 @@ -22,24 +22,56 @@
     9.4  //#define __io_phys(x) __pa(x)
     9.5  #endif
     9.6  
     9.7 -/*
     9.8 - * Change virtual addresses to physical addresses and vv.
     9.9 - * These are pretty trivial
    9.10 +
    9.11 +/**
    9.12 + *  virt_to_phys    -   map virtual addresses to physical
    9.13 + *  @address: address to remap
    9.14 + *
    9.15 + *  The returned physical address is the physical (CPU) mapping for
    9.16 + *  the memory address given. It is only valid to use this function on
    9.17 + *  addresses directly mapped or allocated via kmalloc.
    9.18 + *
    9.19 + *  This function does not give bus mappings for DMA transfers. In
    9.20 + *  almost all conceivable cases a device driver should not be using
    9.21 + *  this function
    9.22   */
    9.23 +
    9.24  static inline unsigned long virt_to_phys(volatile void * address)
    9.25  {
    9.26 -	return __pa(address);
    9.27 +    return __pa(address);
    9.28  }
    9.29  
    9.30 +/**
    9.31 + *  phys_to_virt    -   map physical address to virtual
    9.32 + *  @address: address to remap
    9.33 + *
    9.34 + *  The returned virtual address is a current CPU mapping for
    9.35 + *  the memory address given. It is only valid to use this function on
    9.36 + *  addresses that have a kernel mapping
    9.37 + *
    9.38 + *  This function does not handle bus mappings for DMA transfers. In
    9.39 + *  almost all conceivable cases a device driver should not be using
    9.40 + *  this function
    9.41 + */
    9.42 +
    9.43  static inline void * phys_to_virt(unsigned long address)
    9.44  {
    9.45 -	return __va(address);
    9.46 +    return __va(address);
    9.47  }
    9.48  
    9.49 +/*
    9.50 + * Change "struct pfn_info" to physical address.
    9.51 + */
    9.52 +#ifdef CONFIG_HIGHMEM64G
    9.53 +#define page_to_phys(page)  ((u64)(page - frame_table) << PAGE_SHIFT)
    9.54 +#else
    9.55 +#define page_to_phys(page)  ((page - frame_table) << PAGE_SHIFT)
    9.56 +#endif
    9.57 +
    9.58  #define page_to_pfn(_page)  ((unsigned long)((_page) - frame_table))
    9.59 -#define page_to_phys(_page) (page_to_pfn(_page) << PAGE_SHIFT)
    9.60  #define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
    9.61  
    9.62 +
    9.63  extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
    9.64  
    9.65  static inline void * ioremap (unsigned long offset, unsigned long size)
    10.1 --- a/xen/include/xeno/blkdev.h	Fri Jan 16 19:29:56 2004 +0000
    10.2 +++ b/xen/include/xeno/blkdev.h	Mon Jan 19 15:29:17 2004 +0000
    10.3 @@ -78,7 +78,8 @@ struct buffer_head {
    10.4          kdev_t b_dev;                   /* device (B_FREE = free) */
    10.5          unsigned long b_state;          /* buffer state bitmap (see above) */
    10.6          struct buffer_head *b_reqnext;  /* request queue */
    10.7 -        char *b_data;                  /* pointer to data block */
    10.8 +        char *b_data;                   /* pointer to data block */
    10.9 +	struct pfn_info *b_page;        /* the page this bh is mapped to */
   10.10          void (*b_end_io)(struct buffer_head *bh, int uptodate);
   10.11          pending_req_t *pending_req;
   10.12  };
   10.13 @@ -247,6 +248,8 @@ struct request_queue
   10.14  	 */
   10.15  	char			head_active;
   10.16  
   10.17 +	unsigned long bounce_pfn; // XXX SMH: backported from 2.4.24
   10.18 +
   10.19  	/*
   10.20  	 * Is meant to protect the queue in the future instead of
   10.21  	 * io_request_lock
   10.22 @@ -261,6 +264,34 @@ struct request_queue
   10.23  #endif
   10.24  };
   10.25  
   10.26 +
   10.27 +
   10.28 +#ifdef CONFIG_HIGHMEM
   10.29 +extern struct buffer_head *create_bounce(int, struct buffer_head *);
   10.30 +extern inline struct buffer_head *blk_queue_bounce(request_queue_t *q, int rw,
   10.31 +						   struct buffer_head *bh)
   10.32 +{
   10.33 +	struct page *page = bh->b_page;
   10.34 +
   10.35 +#ifndef CONFIG_DISCONTIGMEM
   10.36 +	if (page - mem_map <= q->bounce_pfn)
   10.37 +#else
   10.38 +	if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT) <= q->bounce_pfn)
   10.39 +#endif
   10.40 +		return bh;
   10.41 +
   10.42 +	return create_bounce(rw, bh);
   10.43 +}
   10.44 +#else
   10.45 +#define blk_queue_bounce(q, rw, bh)	(bh)
   10.46 +#endif
   10.47 +
   10.48 +#define bh_phys(bh)		(page_to_phys((bh)->b_page) + bh_offset((bh)))
   10.49 +
   10.50 +#define BH_CONTIG(b1, b2)	(bh_phys((b1)) + (b1)->b_size == bh_phys((b2)))
   10.51 +#define BH_PHYS_4G(b1, b2)	((bh_phys((b1)) | 0xffffffff) == ((bh_phys((b2)) + (b2)->b_size - 1) | 0xffffffff))
   10.52 +
   10.53 +
   10.54  struct blk_dev_struct {
   10.55  	/*
   10.56  	 * queue_proc has to be atomic
   10.57 @@ -300,6 +331,7 @@ extern void blk_cleanup_queue(request_qu
   10.58  extern void blk_queue_headactive(request_queue_t *, int);
   10.59  extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
   10.60  extern void generic_unplug_device(void *);
   10.61 +extern inline int blk_seg_merge_ok(struct buffer_head *, struct buffer_head *);
   10.62  
   10.63  extern int * blk_size[MAX_BLKDEV];
   10.64  
   10.65 @@ -313,6 +345,8 @@ extern int * max_sectors[MAX_BLKDEV];
   10.66  
   10.67  extern int * max_segments[MAX_BLKDEV];
   10.68  
   10.69 +extern int read_ahead[];
   10.70 +
   10.71  #define MAX_SEGMENTS 128
   10.72  #define MAX_SECTORS 255
   10.73  
   10.74 @@ -366,4 +400,6 @@ static inline unsigned int block_size(kd
   10.75  	return retval;
   10.76  }
   10.77  
   10.78 +
   10.79 +
   10.80  #endif