ia64/xen-unstable

changeset 677:3946af49a538

bitkeeper revision 1.392 (3f3244ecABjhViAc-zlxaKkbsstr4Q)

Many files:
Upgraded our aacraid driver to latest Linux-ac version (Linux 2.4.21-ac4 with aacraid v1.1.2).
author kaf24@scramble.cl.cam.ac.uk
date Thu Aug 07 12:24:12 2003 +0000 (2003-08-07)
parents 00f3a45626b0
children 8ae2c6bc95ea
files xen/drivers/scsi/aacraid/Makefile xen/drivers/scsi/aacraid/README xen/drivers/scsi/aacraid/TODO xen/drivers/scsi/aacraid/aachba.c xen/drivers/scsi/aacraid/aacraid.h xen/drivers/scsi/aacraid/commctrl.c xen/drivers/scsi/aacraid/comminit.c xen/drivers/scsi/aacraid/commsup.c xen/drivers/scsi/aacraid/dpcsup.c xen/drivers/scsi/aacraid/linit.c xen/drivers/scsi/aacraid/rx.c xen/drivers/scsi/aacraid/sa.c
line diff
     1.1 --- a/xen/drivers/scsi/aacraid/Makefile	Wed Aug 06 20:53:30 2003 +0000
     1.2 +++ b/xen/drivers/scsi/aacraid/Makefile	Thu Aug 07 12:24:12 2003 +0000
     1.3 @@ -3,10 +3,6 @@ include $(BASEDIR)/Rules.mk
     1.4  
     1.5  CFLAGS	+= -I$(BASEDIR)/drivers/scsi
     1.6  
     1.7 -
     1.8 -# -y		:= linit.o aachba.o commctrl.o comminit.o commsup.o \
     1.9 -# 		   dpcsup.o rx.o sa.o
    1.10 -
    1.11  default: $(OBJS)
    1.12  	$(LD) -r -o aacraid.o $(OBJS)
    1.13  
     2.1 --- a/xen/drivers/scsi/aacraid/README	Wed Aug 06 20:53:30 2003 +0000
     2.2 +++ b/xen/drivers/scsi/aacraid/README	Thu Aug 07 12:24:12 2003 +0000
     2.3 @@ -18,6 +18,12 @@ Supported Cards/Chipsets
     2.4  	ADAPTEC 2120S
     2.5  	ADAPTEC 2200S
     2.6  	ADAPTEC 5400S
     2.7 +	Legend S220
     2.8 +	Legend S230
     2.9 +	Adaptec 3230S
    2.10 +	Adaptec 3240S
    2.11 +	ASR-2020S PCI-X
    2.12 +	AAR-2410SA SATA
    2.13  
    2.14  People
    2.15  -------------------------
    2.16 @@ -28,15 +34,22 @@ Deanna Bonds <deanna_bonds@adaptec.com> 
    2.17  					 added new ioctls, changed scsi interface to use new error handler,
    2.18  					 increased the number of fibs and outstanding commands to a container)
    2.19  
    2.20 +					(fixed 64bit and 64G memory model, changed confusing naming convention
    2.21 +					 where fibs that go to the hardware are consistently called hw_fibs and
    2.22 +					 not just fibs like the name of the driver tracking structure)
    2.23 +Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas.
    2.24 +
    2.25  Original Driver
    2.26  -------------------------
    2.27  Adaptec Unix OEM Product Group
    2.28  
    2.29  Mailing List
    2.30  -------------------------
    2.31 -None currently. Also note this is very different to Brian's original driver
    2.32 +linux-aacraid-devel@dell.com (Interested parties troll here)
    2.33 +http://mbserver.adaptec.com/ (Currently more Community Support than Devel Support)
    2.34 +Also note this is very different to Brian's original driver
    2.35  so don't expect him to support it.
    2.36 -Adaptec does support this driver.  Contact either tech support or deanna bonds.
    2.37 +Adaptec does support this driver.  Contact either tech support or Mark Salyzyn.
    2.38  
    2.39  Original by Brian Boerner February 2001
    2.40  Rewritten by Alan Cox, November 2001
     3.1 --- a/xen/drivers/scsi/aacraid/TODO	Wed Aug 06 20:53:30 2003 +0000
     3.2 +++ b/xen/drivers/scsi/aacraid/TODO	Thu Aug 07 12:24:12 2003 +0000
     3.3 @@ -2,3 +2,4 @@ o	Testing
     3.4  o	More testing
     3.5  o	Feature request: display the firmware/bios/etc revisions in the
     3.6  	/proc info
     3.7 +o	2.5.0 and beyond.
     4.1 --- a/xen/drivers/scsi/aacraid/aachba.c	Wed Aug 06 20:53:30 2003 +0000
     4.2 +++ b/xen/drivers/scsi/aacraid/aachba.c	Thu Aug 07 12:24:12 2003 +0000
     4.3 @@ -22,15 +22,17 @@
     4.4   *
     4.5   */
     4.6  
     4.7 -#include <xeno/config.h>
     4.8 -/*  #include <xeno/kernel.h> */
     4.9 -#include <xeno/init.h>
    4.10 -#include <xeno/sched.h>
    4.11 -#include <xeno/pci.h>
    4.12 -/*  #include <xeno/spinlock.h> */
    4.13 -/*  #include <xeno/slab.h> */
    4.14 -/*  #include <xeno/completion.h> */
    4.15 -/*  #include <asm/semaphore.h> */
    4.16 +#include <linux/config.h>
    4.17 +#include <linux/module.h>
    4.18 +#include <linux/kernel.h>
    4.19 +#include <linux/init.h>
    4.20 +#include <linux/types.h>
    4.21 +#include <linux/sched.h>
    4.22 +#include <linux/pci.h>
    4.23 +#include <linux/spinlock.h>
    4.24 +#include <linux/slab.h>
    4.25 +/*#include <linux/completion.h>*/
    4.26 +/*#include <asm/semaphore.h>*/
    4.27  #include <asm/uaccess.h>
    4.28  #define MAJOR_NR SCSI_DISK0_MAJOR	/* For DEVICE_NR() */
    4.29  #include <linux/blk.h>
    4.30 @@ -213,8 +215,7 @@ struct sense_data {
    4.31   *              M O D U L E   G L O B A L S
    4.32   */
    4.33   
    4.34 -static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS]; /*  SCSI Device 
    4.35 -							       Instance Ptrs */
    4.36 +static struct fsa_scsi_hba *fsa_dev[MAXIMUM_NUM_ADAPTERS];	/*  SCSI Device Instance Pointers */
    4.37  static struct sense_data sense_data[MAXIMUM_NUM_CONTAINERS];
    4.38  static void get_sd_devname(int disknum, char *buffer);
    4.39  static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* sgmap);
    4.40 @@ -224,6 +225,15 @@ static int aac_send_srb_fib(Scsi_Cmnd* s
    4.41  static char *aac_get_status_string(u32 status);
    4.42  #endif
    4.43  
    4.44 +/*
    4.45 + *	Non dasd selection is handled entirely in aachba now
    4.46 + */	
    4.47 + 
    4.48 +MODULE_PARM(nondasd, "i");
    4.49 +MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
    4.50 +
    4.51 +static int nondasd = -1;
    4.52 +
    4.53  /**
    4.54   *	aac_get_containers	-	list containers
    4.55   *	@common: adapter to probe
    4.56 @@ -232,57 +242,59 @@ static char *aac_get_status_string(u32 s
    4.57   */
    4.58  int aac_get_containers(struct aac_dev *dev)
    4.59  {
    4.60 -    struct fsa_scsi_hba *fsa_dev_ptr;
    4.61 -    u32 index, status = 0;
    4.62 -    struct aac_query_mount *dinfo;
    4.63 -    struct aac_mount *dresp;
    4.64 -    struct fib * fibptr;
    4.65 -    unsigned instance;
    4.66 -    
    4.67 -    fsa_dev_ptr = &(dev->fsa_dev);
    4.68 -    instance = dev->scsi_host_ptr->unique_id;
    4.69 -    
    4.70 -    if (!(fibptr = fib_alloc(dev)))
    4.71 -	return -ENOMEM;
    4.72 -    
    4.73 -    for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
    4.74 -	fib_init(fibptr);
    4.75 -	dinfo = (struct aac_query_mount *) fib_data(fibptr);
    4.76 -	
    4.77 -	dinfo->command = cpu_to_le32(VM_NameServe);
    4.78 -	dinfo->count = cpu_to_le32(index);
    4.79 -	dinfo->type = cpu_to_le32(FT_FILESYS);
    4.80 +	struct fsa_scsi_hba *fsa_dev_ptr;
    4.81 +	u32 index;
    4.82 +	int status = 0;
    4.83 +	struct aac_query_mount *dinfo;
    4.84 +	struct aac_mount *dresp;
    4.85 +	struct fib * fibptr;
    4.86 +	unsigned instance;
    4.87  
    4.88 -	status = fib_send(ContainerCommand,
    4.89 -			  fibptr,
    4.90 -			  sizeof (struct aac_query_mount),
    4.91 -			  FsaNormal,
    4.92 -			  1, 1,
    4.93 -			  NULL, NULL);
    4.94 -	if (status < 0 ) {
    4.95 -	    printk(KERN_WARNING "ProbeContainers: SendFIB failed.\n");
    4.96 -	    break;
    4.97 +	fsa_dev_ptr = &(dev->fsa_dev);
    4.98 +	instance = dev->scsi_host_ptr->unique_id;
    4.99 +
   4.100 +	if (!(fibptr = fib_alloc(dev)))
   4.101 +		return -ENOMEM;
   4.102 +
   4.103 +	for (index = 0; index < MAXIMUM_NUM_CONTAINERS; index++) {
   4.104 +		fib_init(fibptr);
   4.105 +		dinfo = (struct aac_query_mount *) fib_data(fibptr);
   4.106 +
   4.107 +		dinfo->command = cpu_to_le32(VM_NameServe);
   4.108 +		dinfo->count = cpu_to_le32(index);
   4.109 +		dinfo->type = cpu_to_le32(FT_FILESYS);
   4.110 +
   4.111 +		status = fib_send(ContainerCommand,
   4.112 +				    fibptr,
   4.113 +				    sizeof (struct aac_query_mount),
   4.114 +				    FsaNormal,
   4.115 +				    1, 1,
   4.116 +				    NULL, NULL);
   4.117 +		if (status < 0 ) {
   4.118 +			printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
   4.119 +			break;
   4.120 +		}
   4.121 +		dresp = (struct aac_mount *)fib_data(fibptr);
   4.122 +
   4.123 +		if ((le32_to_cpu(dresp->status) == ST_OK) &&
   4.124 +		    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
   4.125 +		    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
   4.126 +			fsa_dev_ptr->valid[index] = 1;
   4.127 +			fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
   4.128 +			fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
   4.129 +			if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
   4.130 +				    fsa_dev_ptr->ro[index] = 1;
   4.131 +		}
   4.132 +		fib_complete(fibptr);
   4.133 +		/*
   4.134 +		 *	If there are no more containers, then stop asking.
   4.135 +		 */
   4.136 +		if ((index + 1) >= le32_to_cpu(dresp->count))
   4.137 +			break;
   4.138  	}
   4.139 -	dresp = (struct aac_mount *)fib_data(fibptr);
   4.140 -	
   4.141 -	if ((le32_to_cpu(dresp->status) == ST_OK) &&
   4.142 -	    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
   4.143 -	    fsa_dev_ptr->valid[index] = 1;
   4.144 -	    fsa_dev_ptr->type[index] = le32_to_cpu(dresp->mnt[0].vol);
   4.145 -	    fsa_dev_ptr->size[index] = le32_to_cpu(dresp->mnt[0].capacity);
   4.146 -	    if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
   4.147 -		fsa_dev_ptr->ro[index] = 1;
   4.148 -	}
   4.149 -	fib_complete(fibptr);
   4.150 -	/*
   4.151 -	 *	If there are no more containers, then stop asking.
   4.152 -	 */
   4.153 -	if ((index + 1) >= le32_to_cpu(dresp->count))
   4.154 -	    break;
   4.155 -    }
   4.156 -    fib_free(fibptr);
   4.157 -    fsa_dev[instance] = fsa_dev_ptr;
   4.158 -    return status;
   4.159 +	fib_free(fibptr);
   4.160 +	fsa_dev[instance] = fsa_dev_ptr;
   4.161 +	return status;
   4.162  }
   4.163  
   4.164  /**
   4.165 @@ -296,54 +308,55 @@ int aac_get_containers(struct aac_dev *d
   4.166   
   4.167  static int probe_container(struct aac_dev *dev, int cid)
   4.168  {
   4.169 -    struct fsa_scsi_hba *fsa_dev_ptr;
   4.170 -    int status;
   4.171 -    struct aac_query_mount *dinfo;
   4.172 -    struct aac_mount *dresp;
   4.173 -    struct fib * fibptr;
   4.174 -    unsigned instance;
   4.175 -    
   4.176 -    fsa_dev_ptr = &(dev->fsa_dev);
   4.177 -    instance = dev->scsi_host_ptr->unique_id;
   4.178 -    
   4.179 -    if (!(fibptr = fib_alloc(dev)))
   4.180 -	return -ENOMEM;
   4.181 -    
   4.182 -    fib_init(fibptr);
   4.183 -    
   4.184 -    dinfo = (struct aac_query_mount *)fib_data(fibptr);
   4.185 -    
   4.186 -    dinfo->command = cpu_to_le32(VM_NameServe);
   4.187 -    dinfo->count = cpu_to_le32(cid);
   4.188 -    dinfo->type = cpu_to_le32(FT_FILESYS);
   4.189 -    
   4.190 -    status = fib_send(ContainerCommand,
   4.191 -		      fibptr,
   4.192 -		      sizeof(struct aac_query_mount),
   4.193 -		      FsaNormal,
   4.194 -		      1, 1,
   4.195 -		      NULL, NULL);
   4.196 -    if (status < 0) {
   4.197 -	printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
   4.198 -	goto error;
   4.199 -    }
   4.200 -    
   4.201 -    dresp = (struct aac_mount *) fib_data(fibptr);
   4.202 -    
   4.203 -    if ((le32_to_cpu(dresp->status) == ST_OK) &&
   4.204 -	(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
   4.205 -	fsa_dev_ptr->valid[cid] = 1;
   4.206 -	fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
   4.207 -	fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
   4.208 -	if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
   4.209 -	    fsa_dev_ptr->ro[cid] = 1;
   4.210 -    }
   4.211 -    
   4.212 - error:
   4.213 -    fib_complete(fibptr);
   4.214 -    fib_free(fibptr);
   4.215 -    
   4.216 -    return status;
   4.217 +	struct fsa_scsi_hba *fsa_dev_ptr;
   4.218 +	int status;
   4.219 +	struct aac_query_mount *dinfo;
   4.220 +	struct aac_mount *dresp;
   4.221 +	struct fib * fibptr;
   4.222 +	unsigned instance;
   4.223 +
   4.224 +	fsa_dev_ptr = &(dev->fsa_dev);
   4.225 +	instance = dev->scsi_host_ptr->unique_id;
   4.226 +
   4.227 +	if (!(fibptr = fib_alloc(dev)))
   4.228 +		return -ENOMEM;
   4.229 +
   4.230 +	fib_init(fibptr);
   4.231 +
   4.232 +	dinfo = (struct aac_query_mount *)fib_data(fibptr);
   4.233 +
   4.234 +	dinfo->command = cpu_to_le32(VM_NameServe);
   4.235 +	dinfo->count = cpu_to_le32(cid);
   4.236 +	dinfo->type = cpu_to_le32(FT_FILESYS);
   4.237 +
   4.238 +	status = fib_send(ContainerCommand,
   4.239 +			    fibptr,
   4.240 +			    sizeof(struct aac_query_mount),
   4.241 +			    FsaNormal,
   4.242 +			    1, 1,
   4.243 +			    NULL, NULL);
   4.244 +	if (status < 0) {
   4.245 +		printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
   4.246 +		goto error;
   4.247 +	}
   4.248 +
   4.249 +	dresp = (struct aac_mount *) fib_data(fibptr);
   4.250 +
   4.251 +	if ((le32_to_cpu(dresp->status) == ST_OK) &&
   4.252 +	    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
   4.253 +	    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
   4.254 +		fsa_dev_ptr->valid[cid] = 1;
   4.255 +		fsa_dev_ptr->type[cid] = le32_to_cpu(dresp->mnt[0].vol);
   4.256 +		fsa_dev_ptr->size[cid] = le32_to_cpu(dresp->mnt[0].capacity);
   4.257 +		if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
   4.258 +			fsa_dev_ptr->ro[cid] = 1;
   4.259 +	}
   4.260 +
   4.261 +error:
   4.262 +	fib_complete(fibptr);
   4.263 +	fib_free(fibptr);
   4.264 +
   4.265 +	return status;
   4.266  }
   4.267  
   4.268  /* Local Structure to set SCSI inquiry data strings */
   4.269 @@ -482,7 +495,6 @@ int aac_get_adapter_info(struct aac_dev*
   4.270  	struct aac_adapter_info* info;
   4.271  	int rcode;
   4.272  	u32 tmp;
   4.273 -
   4.274  	if (!(fibptr = fib_alloc(dev)))
   4.275  		return -ENOMEM;
   4.276  
   4.277 @@ -520,24 +532,54 @@ int aac_get_adapter_info(struct aac_dev*
   4.278  			dev->name, dev->id,
   4.279  			dev->adapter_info.serial[0],
   4.280  			dev->adapter_info.serial[1]);
   4.281 -	dev->pae_support = 0;
   4.282 +
   4.283  	dev->nondasd_support = 0;
   4.284 -	if( BITS_PER_LONG >= 64 && 
   4.285 -	  (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
   4.286 -		printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", 
   4.287 -		       dev->name, dev->id);
   4.288 +	dev->raid_scsi_mode = 0;
   4.289 +	if(dev->adapter_info.options & AAC_OPT_NONDASD){
   4.290 +		dev->nondasd_support = 1;
   4.291 +	}
   4.292 +
   4.293 +	/*
   4.294 +	 * If the firmware supports ROMB RAID/SCSI mode and we are currently
   4.295 +	 * in RAID/SCSI mode, set the flag. For now if in this mode we will
   4.296 +	 * force nondasd support on. If we decide to allow the non-dasd flag
   4.297 +	 * additional changes changes will have to be made to support
   4.298 +	 * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
   4.299 +	 * changed to support the new dev->raid_scsi_mode flag instead of
   4.300 +	 * leaching off of the dev->nondasd_support flag. Also in linit.c the
   4.301 +	 * function aac_detect will have to be modified where it sets up the
   4.302 +	 * max number of channels based on the aac->nondasd_support flag only.
   4.303 +	 */
   4.304 +	if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED)
   4.305 +		&& (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE))
   4.306 +	{
   4.307 +		dev->nondasd_support = 1;
   4.308 +		dev->raid_scsi_mode = 1;
   4.309 +	}
   4.310 +	if (dev->raid_scsi_mode != 0)
   4.311 +		printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",dev->name, dev->id);
   4.312 +		
   4.313 +	if (nondasd != -1)
   4.314 +		dev->nondasd_support = (nondasd!=0);
   4.315 +
   4.316 +	if(dev->nondasd_support != 0)
   4.317 +		printk(KERN_INFO "%s%d: Non-DASD support enabled\n",dev->name, dev->id);
   4.318 +
   4.319 +	dev->pae_support = 0;
   4.320 +	if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
   4.321  		dev->pae_support = 1;
   4.322  	}
   4.323  	/* TODO - dmb temporary until fw can set this bit  */
   4.324  	dev->pae_support = (BITS_PER_LONG >= 64);
   4.325 -	if(dev->pae_support != 0) {
   4.326 -		printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", 
   4.327 -		       dev->name, dev->id);
   4.328 +	if(dev->pae_support != 0) 
   4.329 +	{
   4.330 +		printk(KERN_INFO "%s%d: 64 Bit PAE enabled\n", dev->name, dev->id);
   4.331 +		pci_set_dma_mask(dev->pdev, (dma_addr_t)0xFFFFFFFFFFFFFFFFULL);
   4.332  	}
   4.333  
   4.334 -	if(dev->adapter_info.options & AAC_OPT_NONDASD){
   4.335 -		dev->nondasd_support = 1;
   4.336 -	}
   4.337 +	fib_complete(fibptr);
   4.338 +	fib_free(fibptr);
   4.339 +
   4.340  	return rcode;
   4.341  }
   4.342  
   4.343 @@ -556,7 +598,7 @@ static void read_callback(void *context,
   4.344  	cid =TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
   4.345  
   4.346  	lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
   4.347 -	dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
   4.348 +	dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
   4.349  
   4.350  	if (fibptr == NULL)
   4.351  		BUG();
   4.352 @@ -601,7 +643,7 @@ static void write_callback(void *context
   4.353  	cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
   4.354  
   4.355  	lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
   4.356 -	dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %d, t = %ld.\n", smp_processor_id(), lba, jiffies));
   4.357 +	dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
   4.358  	if (fibptr == NULL)
   4.359  		BUG();
   4.360  
   4.361 @@ -687,8 +729,7 @@ int aac_read(Scsi_Cmnd * scsicmd, int ci
   4.362  		aac_build_sg64(scsicmd, &readcmd->sg);
   4.363  		if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
   4.364  			BUG();
   4.365 -		fibsize = sizeof(struct aac_read64) + 
   4.366 -		    ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
   4.367 +		fibsize = sizeof(struct aac_read64) + ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
   4.368  		/*
   4.369  		 *	Now send the Fib to the adapter
   4.370  		 */
   4.371 @@ -713,8 +754,7 @@ int aac_read(Scsi_Cmnd * scsicmd, int ci
   4.372  		aac_build_sg(scsicmd, &readcmd->sg);
   4.373  		if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
   4.374  			BUG();
   4.375 -		fibsize = sizeof(struct aac_read) + 
   4.376 -		    ((readcmd->sg.count - 1) * sizeof (struct sgentry));
   4.377 +		fibsize = sizeof(struct aac_read) + ((readcmd->sg.count - 1) * sizeof (struct sgentry));
   4.378  		/*
   4.379  		 *	Now send the Fib to the adapter
   4.380  		 */
   4.381 @@ -734,8 +774,7 @@ int aac_read(Scsi_Cmnd * scsicmd, int ci
   4.382  	if (status == -EINPROGRESS) 
   4.383  		return 0;
   4.384  		
   4.385 -	printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", 
   4.386 -	       status);
   4.387 +	printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
   4.388  	/*
   4.389  	 *	For some reason, the Fib didn't queue, return QUEUE_FULL
   4.390  	 */
   4.391 @@ -770,8 +809,7 @@ static int aac_write(Scsi_Cmnd * scsicmd
   4.392  		lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
   4.393  		count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
   4.394  	}
   4.395 -	dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", 
   4.396 -		 smp_processor_id(), lba, jiffies));
   4.397 +	dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
   4.398  	/*
   4.399  	 *	Allocate and initialize a Fib then setup a BlockWrite command
   4.400  	 */
   4.401 @@ -796,8 +834,7 @@ static int aac_write(Scsi_Cmnd * scsicmd
   4.402  		aac_build_sg64(scsicmd, &writecmd->sg);
   4.403  		if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
   4.404  			BUG();
   4.405 -		fibsize = sizeof(struct aac_write64) + 
   4.406 -		    ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
   4.407 +		fibsize = sizeof(struct aac_write64) + ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
   4.408  		/*
   4.409  		 *	Now send the Fib to the adapter
   4.410  		 */
   4.411 @@ -825,8 +862,7 @@ static int aac_write(Scsi_Cmnd * scsicmd
   4.412  		aac_build_sg(scsicmd, &writecmd->sg);
   4.413  		if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
   4.414  			BUG();
   4.415 -		fibsize = sizeof(struct aac_write) + 
   4.416 -		    ((writecmd->sg.count - 1) * sizeof (struct sgentry));
   4.417 +		fibsize = sizeof(struct aac_write) + ((writecmd->sg.count - 1) * sizeof (struct sgentry));
   4.418  		/*
   4.419  		 *	Now send the Fib to the adapter
   4.420  		 */
   4.421 @@ -869,358 +905,351 @@ static int aac_write(Scsi_Cmnd * scsicmd
   4.422   
   4.423  int aac_scsi_cmd(Scsi_Cmnd * scsicmd)
   4.424  {
   4.425 -    u32 cid = 0;
   4.426 -    struct fsa_scsi_hba *fsa_dev_ptr;
   4.427 -    int cardtype;
   4.428 -    int ret;
   4.429 -    struct aac_dev *dev = (struct aac_dev *)scsicmd->host->hostdata;
   4.430 -    
   4.431 -    cardtype = dev->cardtype;
   4.432 +	u32 cid = 0;
   4.433 +	struct fsa_scsi_hba *fsa_dev_ptr;
   4.434 +	int cardtype;
   4.435 +	int ret;
   4.436 +	struct aac_dev *dev = (struct aac_dev *)scsicmd->host->hostdata;
   4.437 +	
   4.438 +	cardtype = dev->cardtype;
   4.439  
   4.440 -    fsa_dev_ptr = fsa_dev[scsicmd->host->unique_id];
   4.441 -    
   4.442 -    /*
   4.443 -     *	If the bus, target or lun is out of range, return fail
   4.444 -     *	Test does not apply to ID 16, the pseudo id for the controller
   4.445 -     *	itself.
   4.446 -     */
   4.447 -    if (scsicmd->target != scsicmd->host->this_id) {
   4.448 -	if ((scsicmd->channel == 0) ){
   4.449 -	    if( (scsicmd->target >= AAC_MAX_TARGET) || (scsicmd->lun != 0)){ 
   4.450 -		scsicmd->result = DID_NO_CONNECT << 16;
   4.451 -		__aac_io_done(scsicmd);
   4.452 -		return 0;
   4.453 -	    }
   4.454 -	    cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
   4.455 -	    
   4.456 -	    /*
   4.457 -	     *	If the target container doesn't exist, it may have
   4.458 -	     *	been newly created
   4.459 -	     */
   4.460 -	    if (fsa_dev_ptr->valid[cid] == 0) {
   4.461 -		switch (scsicmd->cmnd[0]) {
   4.462 -		case SS_INQUIR:
   4.463 -		case SS_RDCAP:
   4.464 -		case SS_TEST:
   4.465 -		    spin_unlock_irq(&io_request_lock);
   4.466 -		    probe_container(dev, cid);
   4.467 -		    spin_lock_irq(&io_request_lock);
   4.468 -		    if (fsa_dev_ptr->valid[cid] == 0) {
   4.469 -			scsicmd->result = DID_NO_CONNECT << 16;
   4.470 -			__aac_io_done(scsicmd);
   4.471 -			return 0;
   4.472 -		    }
   4.473 -		default:
   4.474 -		    break;
   4.475 +	fsa_dev_ptr = fsa_dev[scsicmd->host->unique_id];
   4.476 +
   4.477 +	/*
   4.478 +	 *	If the bus, target or lun is out of range, return fail
   4.479 +	 *	Test does not apply to ID 16, the pseudo id for the controller
   4.480 +	 *	itself.
   4.481 +	 */
   4.482 +	if (scsicmd->target != scsicmd->host->this_id) {
   4.483 +		if ((scsicmd->channel == 0) ){
   4.484 +			if( (scsicmd->target >= AAC_MAX_TARGET) || (scsicmd->lun != 0)){ 
   4.485 +				scsicmd->result = DID_NO_CONNECT << 16;
   4.486 +				__aac_io_done(scsicmd);
   4.487 +				return 0;
   4.488 +			}
   4.489 +			cid = TARGET_LUN_TO_CONTAINER(scsicmd->target, scsicmd->lun);
   4.490 +
   4.491 +			/*
   4.492 +			 *	If the target container doesn't exist, it may have
   4.493 +			 *	been newly created
   4.494 +			 */
   4.495 +			if (fsa_dev_ptr->valid[cid] == 0) {
   4.496 +				switch (scsicmd->cmnd[0]) {
   4.497 +				case SS_INQUIR:
   4.498 +				case SS_RDCAP:
   4.499 +				case SS_TEST:
   4.500 +					spin_unlock_irq(&io_request_lock);
   4.501 +					probe_container(dev, cid);
   4.502 +					spin_lock_irq(&io_request_lock);
   4.503 +					if (fsa_dev_ptr->valid[cid] == 0) {
   4.504 +						scsicmd->result = DID_NO_CONNECT << 16;
   4.505 +						__aac_io_done(scsicmd);
   4.506 +						return 0;
   4.507 +					}
   4.508 +				default:
   4.509 +					break;
   4.510 +				}
   4.511 +			}
   4.512 +			/*
   4.513 +			 *	If the target container still doesn't exist, 
   4.514 +			 *	return failure
   4.515 +			 */
   4.516 +			if (fsa_dev_ptr->valid[cid] == 0) {
   4.517 +				scsicmd->result = DID_BAD_TARGET << 16;
   4.518 +				__aac_io_done(scsicmd);
   4.519 +				return -1;
   4.520 +			}
   4.521 +		} else {  /* check for physical non-dasd devices */
   4.522 +			if(dev->nondasd_support == 1){
   4.523 +				return aac_send_srb_fib(scsicmd);
   4.524 +			} else {
   4.525 +				scsicmd->result = DID_NO_CONNECT << 16;
   4.526 +				__aac_io_done(scsicmd);
   4.527 +				return 0;
   4.528 +			}
   4.529  		}
   4.530 -	    }
   4.531 -	    /*
   4.532 -	     *	If the target container still doesn't exist, 
   4.533 -	     *	return failure
   4.534 -	     */
   4.535 -	    if (fsa_dev_ptr->valid[cid] == 0) {
   4.536 -		scsicmd->result = DID_BAD_TARGET << 16;
   4.537 +	}
   4.538 +	/*
   4.539 +	 * else Command for the controller itself
   4.540 +	 */
   4.541 +	else if ((scsicmd->cmnd[0] != SS_INQUIR) &&	/* only INQUIRY & TUR cmnd supported for controller */
   4.542 +		(scsicmd->cmnd[0] != SS_TEST)) 
   4.543 +	{
   4.544 +		dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
   4.545 +		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
   4.546 +		set_sense((u8 *) &sense_data[cid],
   4.547 +			    SENKEY_ILLEGAL,
   4.548 +			    SENCODE_INVALID_COMMAND,
   4.549 +			    ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
   4.550  		__aac_io_done(scsicmd);
   4.551  		return -1;
   4.552 -			}
   4.553 -	} else {  /* check for physical non-dasd devices */
   4.554 -	    if(dev->nondasd_support == 1){
   4.555 -		return aac_send_srb_fib(scsicmd);
   4.556 -	    } else {
   4.557 -		scsicmd->result = DID_NO_CONNECT << 16;
   4.558 +	}
   4.559 +
   4.560 +
   4.561 +	/* Handle commands here that don't really require going out to the adapter */
   4.562 +	switch (scsicmd->cmnd[0]) {
   4.563 +	case SS_INQUIR:
   4.564 +	{
   4.565 +		struct inquiry_data *inq_data_ptr;
   4.566 +
   4.567 +		dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->target));
   4.568 +		inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
   4.569 +		memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
   4.570 +
   4.571 +		inq_data_ptr->inqd_ver = 2;	/* claim compliance to SCSI-2 */
   4.572 +		inq_data_ptr->inqd_dtq = 0x80;	/* set RMB bit to one indicating that the medium is removable */
   4.573 +		inq_data_ptr->inqd_rdf = 2;	/* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
   4.574 +		inq_data_ptr->inqd_len = 31;
   4.575 +		/*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
   4.576 +		inq_data_ptr->inqd_pad2= 0x32 ;	 /*WBus16|Sync|CmdQue */
   4.577 +		/*
   4.578 +		 *	Set the Vendor, Product, and Revision Level
   4.579 +		 *	see: <vendor>.c i.e. aac.c
   4.580 +		 */
   4.581 +		setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
   4.582 +		if (scsicmd->target == scsicmd->host->this_id)
   4.583 +			inq_data_ptr->inqd_pdt = INQD_PDT_PROC;	/* Processor device */
   4.584 +		else
   4.585 +			inq_data_ptr->inqd_pdt = INQD_PDT_DA;	/* Direct/random access device */
   4.586 +		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.587  		__aac_io_done(scsicmd);
   4.588  		return 0;
   4.589 -	    }
   4.590  	}
   4.591 -    }
   4.592 -    /*
   4.593 -     * else Command for the controller itself
   4.594 -     */
   4.595 -    else if ((scsicmd->cmnd[0] != SS_INQUIR) &&	
   4.596 -	     (scsicmd->cmnd[0] != SS_TEST)) 
   4.597 -    {
   4.598 -	/* only INQUIRY & TUR cmnd supported for controller */
   4.599 -	dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for "
   4.600 -		 "controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
   4.601 -	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 
   4.602 -	    CHECK_CONDITION;
   4.603 -	set_sense((u8 *) &sense_data[cid],
   4.604 -		  SENKEY_ILLEGAL,
   4.605 -		  SENCODE_INVALID_COMMAND,
   4.606 -		  ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
   4.607 -	__aac_io_done(scsicmd);
   4.608 -	return -1;
   4.609 -    }
   4.610 -    
   4.611 -    
   4.612 -    /* Handle commands here that don't require going out to the adapter */
   4.613 -    switch (scsicmd->cmnd[0]) {
   4.614 -    case SS_INQUIR:
   4.615 -    {
   4.616 -	struct inquiry_data *inq_data_ptr;
   4.617 -	
   4.618 -	dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->target));
   4.619 -	inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
   4.620 -	memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
   4.621 -	
   4.622 -	inq_data_ptr->inqd_ver = 2;	/* claim compliance to SCSI-2 */
   4.623 -	inq_data_ptr->inqd_dtq = 0x80;	/* set RMB bit to one indicating that the medium is removable */
   4.624 -	inq_data_ptr->inqd_rdf = 2;	/* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
   4.625 -	inq_data_ptr->inqd_len = 31;
   4.626 -	/*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
   4.627 -	inq_data_ptr->inqd_pad2= 0x32 ;	 /*WBus16|Sync|CmdQue */
   4.628 -	/*
   4.629 -	 *	Set the Vendor, Product, and Revision Level
   4.630 -	 *	see: <vendor>.c i.e. aac.c
   4.631 -	 */
   4.632 -	setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr->type[cid]);
   4.633 -	if (scsicmd->target == scsicmd->host->this_id)
   4.634 -	    inq_data_ptr->inqd_pdt = INQD_PDT_PROC;	/* Processor device */
   4.635 -	else
   4.636 -	    inq_data_ptr->inqd_pdt = INQD_PDT_DA;	/* Direct/random access device */
   4.637 -	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.638 -	__aac_io_done(scsicmd);
   4.639 -	return 0;
   4.640 -    }
   4.641 -    case SS_RDCAP:
   4.642 -    {
   4.643 -	int capacity;
   4.644 -	char *cp;
   4.645 -	
   4.646 -	dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
   4.647 -	capacity = fsa_dev_ptr->size[cid] - 1;
   4.648 -	cp = scsicmd->request_buffer;
   4.649 -	cp[0] = (capacity >> 24) & 0xff;
   4.650 -	cp[1] = (capacity >> 16) & 0xff;
   4.651 -	cp[2] = (capacity >> 8) & 0xff;
   4.652 -	cp[3] = (capacity >> 0) & 0xff;
   4.653 -	cp[4] = 0;
   4.654 -	cp[5] = 0;
   4.655 -	cp[6] = 2;
   4.656 -	cp[7] = 0;
   4.657 -	
   4.658 -	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.659 -	__aac_io_done(scsicmd);
   4.660 -	
   4.661 -	return 0;
   4.662 -    }
   4.663 -    
   4.664 -    case SS_MODESEN:
   4.665 -    {
   4.666 -	char *mode_buf;
   4.667 -	
   4.668 -	dprintk((KERN_DEBUG "MODE SENSE command.\n"));
   4.669 -	mode_buf = scsicmd->request_buffer;
   4.670 -	mode_buf[0] = 0;  /* Mode data length (MSB) */
   4.671 -	mode_buf[1] = 6;  /* Mode data length (LSB) */
   4.672 -	mode_buf[2] = 0;  /* Medium type - default */
   4.673 -	mode_buf[3] = 0;  /* Device-specific param, 
   4.674 -			     bit 8: 0/1 = write enabled/protected */
   4.675 -	mode_buf[4] = 0;  /* reserved */
   4.676 -	mode_buf[5] = 0;  /* reserved */
   4.677 -	mode_buf[6] = 0;  /* Block descriptor length (MSB) */
   4.678 -	mode_buf[7] = 0;  /* Block descriptor length (LSB) */
   4.679 -	
   4.680 -	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.681 -	__aac_io_done(scsicmd);
   4.682 -	
   4.683 -	return 0;
   4.684 -    }
   4.685 -    case SS_REQSEN:
   4.686 -	dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
   4.687 -	memcpy(scsicmd->sense_buffer, &sense_data[cid], 
   4.688 -	       sizeof (struct sense_data));
   4.689 -	memset(&sense_data[cid], 0, sizeof (struct sense_data));
   4.690 -	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.691 -	__aac_io_done(scsicmd);
   4.692 -	return (0);
   4.693 -	
   4.694 -    case SS_LOCK:
   4.695 -	dprintk((KERN_DEBUG "LOCK command.\n"));
   4.696 -	if (scsicmd->cmnd[4])
   4.697 -	    fsa_dev_ptr->locked[cid] = 1;
   4.698 -	else
   4.699 -	    fsa_dev_ptr->locked[cid] = 0;
   4.700 -	
   4.701 -	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.702 -	__aac_io_done(scsicmd);
   4.703 -	return 0;
   4.704 +	case SS_RDCAP:
   4.705 +	{
   4.706 +		int capacity;
   4.707 +		char *cp;
   4.708 +
   4.709 +		dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
   4.710 +		capacity = fsa_dev_ptr->size[cid] - 1;
   4.711 +		cp = scsicmd->request_buffer;
   4.712 +		cp[0] = (capacity >> 24) & 0xff;
   4.713 +		cp[1] = (capacity >> 16) & 0xff;
   4.714 +		cp[2] = (capacity >> 8) & 0xff;
   4.715 +		cp[3] = (capacity >> 0) & 0xff;
   4.716 +		cp[4] = 0;
   4.717 +		cp[5] = 0;
   4.718 +		cp[6] = 2;
   4.719 +		cp[7] = 0;
   4.720 +
   4.721 +		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.722 +		__aac_io_done(scsicmd);
   4.723 +
   4.724 +		return 0;
   4.725 +	}
   4.726 +
   4.727 +	case SS_MODESEN:
   4.728 +	{
   4.729 +		char *mode_buf;
   4.730 +
   4.731 +		dprintk((KERN_DEBUG "MODE SENSE command.\n"));
   4.732 +		mode_buf = scsicmd->request_buffer;
   4.733 +		mode_buf[0] = 0;	/* Mode data length (MSB) */
   4.734 +		mode_buf[1] = 6;	/* Mode data length (LSB) */
   4.735 +		mode_buf[2] = 0;	/* Medium type - default */
   4.736 +		mode_buf[3] = 0;	/* Device-specific param, bit 8: 0/1 = write enabled/protected */
   4.737 +		mode_buf[4] = 0;	/* reserved */
   4.738 +		mode_buf[5] = 0;	/* reserved */
   4.739 +		mode_buf[6] = 0;	/* Block descriptor length (MSB) */
   4.740 +		mode_buf[7] = 0;	/* Block descriptor length (LSB) */
   4.741 +
   4.742 +		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.743 +		__aac_io_done(scsicmd);
   4.744 +
   4.745 +		return 0;
   4.746 +	}
   4.747 +	case SS_REQSEN:
   4.748 +		dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
   4.749 +		memcpy(scsicmd->sense_buffer, &sense_data[cid], sizeof (struct sense_data));
   4.750 +		memset(&sense_data[cid], 0, sizeof (struct sense_data));
   4.751 +		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.752 +		__aac_io_done(scsicmd);
   4.753 +		return (0);
   4.754 +
   4.755 +	case SS_LOCK:
   4.756 +		dprintk((KERN_DEBUG "LOCK command.\n"));
   4.757 +		if (scsicmd->cmnd[4])
   4.758 +			fsa_dev_ptr->locked[cid] = 1;
   4.759 +		else
   4.760 +			fsa_dev_ptr->locked[cid] = 0;
   4.761 +
   4.762 +		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.763 +		__aac_io_done(scsicmd);
   4.764 +		return 0;
   4.765  	/*
   4.766  	 *	These commands are all No-Ops
   4.767  	 */
   4.768 -    case SS_TEST:
   4.769 -    case SS_RESERV:
   4.770 -    case SS_RELES:
   4.771 -    case SS_REZERO:
   4.772 -    case SS_REASGN:
   4.773 -    case SS_SEEK:
   4.774 -    case SS_ST_SP:
   4.775 -	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.776 -	__aac_io_done(scsicmd);
   4.777 -	return (0);
   4.778 -    }
   4.779 -    
   4.780 -    switch (scsicmd->cmnd[0]) 
   4.781 -    {
   4.782 -    case SS_READ:
   4.783 -    case SM_READ:
   4.784 -	/*
   4.785 -	 *	Hack to keep track of ordinal number of the device that
   4.786 -	 *	corresponds to a container. Needed to convert
   4.787 -	 *	containers to /dev/sd device names
   4.788 -	 */
   4.789 -	
   4.790 -	spin_unlock_irq(&io_request_lock);
   4.791 -	fsa_dev_ptr->devno[cid] = DEVICE_NR(scsicmd->request.rq_dev);
   4.792 -	ret = aac_read(scsicmd, cid);
   4.793 -	spin_lock_irq(&io_request_lock);
   4.794 -	return ret;
   4.795 -	
   4.796 -    case SS_WRITE:
   4.797 -    case SM_WRITE:
   4.798 -	spin_unlock_irq(&io_request_lock);
   4.799 -	ret = aac_write(scsicmd, cid);
   4.800 -	spin_lock_irq(&io_request_lock);
   4.801 -	return ret;
   4.802 -    default:
   4.803 -	/*
   4.804 -	 *	Unhandled commands
   4.805 -	 */
   4.806 -	printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", 
   4.807 -	       scsicmd->cmnd[0]);
   4.808 -	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 
   4.809 -	    CHECK_CONDITION;
   4.810 -	set_sense((u8 *) &sense_data[cid],
   4.811 -		  SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
   4.812 -		  ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
   4.813 -	__aac_io_done(scsicmd);
   4.814 -	return -1;
   4.815 -    }
   4.816 +	case SS_TEST:
   4.817 +	case SS_RESERV:
   4.818 +	case SS_RELES:
   4.819 +	case SS_REZERO:
   4.820 +	case SS_REASGN:
   4.821 +	case SS_SEEK:
   4.822 +	case SS_ST_SP:
   4.823 +		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | GOOD;
   4.824 +		__aac_io_done(scsicmd);
   4.825 +		return (0);
   4.826 +	}
   4.827 +
   4.828 +	switch (scsicmd->cmnd[0]) 
   4.829 +	{
   4.830 +		case SS_READ:
   4.831 +		case SM_READ:
   4.832 +			/*
   4.833 +			 *	Hack to keep track of ordinal number of the device that
   4.834 +			 *	corresponds to a container. Needed to convert
   4.835 +			 *	containers to /dev/sd device names
   4.836 +			 */
   4.837 +			 
   4.838 +			spin_unlock_irq(&io_request_lock);
   4.839 +			fsa_dev_ptr->devno[cid] = DEVICE_NR(scsicmd->request.rq_dev);
   4.840 +			ret = aac_read(scsicmd, cid);
   4.841 +			spin_lock_irq(&io_request_lock);
   4.842 +			return ret;
   4.843 +
   4.844 +		case SS_WRITE:
   4.845 +		case SM_WRITE:
   4.846 +			spin_unlock_irq(&io_request_lock);
   4.847 +			ret = aac_write(scsicmd, cid);
   4.848 +			spin_lock_irq(&io_request_lock);
   4.849 +			return ret;
   4.850 +		default:
   4.851 +			/*
   4.852 +			 *	Unhandled commands
   4.853 +			 */
   4.854 +			printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
   4.855 +			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
   4.856 +			set_sense((u8 *) &sense_data[cid],
   4.857 +				SENKEY_ILLEGAL, SENCODE_INVALID_COMMAND,
   4.858 +			ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
   4.859 +			__aac_io_done(scsicmd);
   4.860 +			return -1;
   4.861 +	}
   4.862  }
   4.863  
   4.864  static int query_disk(struct aac_dev *dev, void *arg)
   4.865  {
   4.866 -    struct aac_query_disk qd;
   4.867 -    struct fsa_scsi_hba *fsa_dev_ptr;
   4.868 -    
   4.869 -    fsa_dev_ptr = &(dev->fsa_dev);
   4.870 -    if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
   4.871 -	return -EFAULT;
   4.872 -    if (qd.cnum == -1)
   4.873 -	qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
   4.874 -    else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1)) 
   4.875 -    {
   4.876 -	if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
   4.877 -	    return -EINVAL;
   4.878 -	qd.instance = dev->scsi_host_ptr->host_no;
   4.879 -	qd.bus = 0;
   4.880 -	qd.target = CONTAINER_TO_TARGET(qd.cnum);
   4.881 -	qd.lun = CONTAINER_TO_LUN(qd.cnum);
   4.882 -    }
   4.883 -    else return -EINVAL;
   4.884 -    
   4.885 -    qd.valid = fsa_dev_ptr->valid[qd.cnum];
   4.886 -    qd.locked = fsa_dev_ptr->locked[qd.cnum];
   4.887 -    qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
   4.888 -    
   4.889 -    if (fsa_dev_ptr->devno[qd.cnum] == -1)
   4.890 -	qd.unmapped = 1;
   4.891 -    else
   4.892 -	qd.unmapped = 0;
   4.893 -    
   4.894 -    get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
   4.895 -    
   4.896 -    if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
   4.897 -	return -EFAULT;
   4.898 -    return 0;
   4.899 +	struct aac_query_disk qd;
   4.900 +	struct fsa_scsi_hba *fsa_dev_ptr;
   4.901 +
   4.902 +	fsa_dev_ptr = &(dev->fsa_dev);
   4.903 +	if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
   4.904 +		return -EFAULT;
   4.905 +	if (qd.cnum == -1)
   4.906 +		qd.cnum = TARGET_LUN_TO_CONTAINER(qd.target, qd.lun);
   4.907 +	else if ((qd.bus == -1) && (qd.target == -1) && (qd.lun == -1)) 
   4.908 +	{
   4.909 +		if (qd.cnum < 0 || qd.cnum > MAXIMUM_NUM_CONTAINERS)
   4.910 +			return -EINVAL;
   4.911 +		qd.instance = dev->scsi_host_ptr->host_no;
   4.912 +		qd.bus = 0;
   4.913 +		qd.target = CONTAINER_TO_TARGET(qd.cnum);
   4.914 +		qd.lun = CONTAINER_TO_LUN(qd.cnum);
   4.915 +	}
   4.916 +	else return -EINVAL;
   4.917 +
   4.918 +	qd.valid = fsa_dev_ptr->valid[qd.cnum];
   4.919 +	qd.locked = fsa_dev_ptr->locked[qd.cnum];
   4.920 +	qd.deleted = fsa_dev_ptr->deleted[qd.cnum];
   4.921 +
   4.922 +	if (fsa_dev_ptr->devno[qd.cnum] == -1)
   4.923 +		qd.unmapped = 1;
   4.924 +	else
   4.925 +		qd.unmapped = 0;
   4.926 +
   4.927 +	get_sd_devname(fsa_dev_ptr->devno[qd.cnum], qd.name);
   4.928 +
   4.929 +	if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
   4.930 +		return -EFAULT;
   4.931 +	return 0;
   4.932  }
   4.933  
   4.934  static void get_sd_devname(int disknum, char *buffer)
   4.935  {
   4.936 -    if (disknum < 0) {
   4.937 -	sprintf(buffer, "%s", "");
   4.938 -	return;
   4.939 -    }
   4.940 -    
   4.941 -    if (disknum < 26)
   4.942 -	sprintf(buffer, "sd%c", 'a' + disknum);
   4.943 -    else {
   4.944 -	unsigned int min1;
   4.945 -	unsigned int min2;
   4.946 -	/*
   4.947 -	 * For larger numbers of disks, we need to go to a new
   4.948 -	 * naming scheme.
   4.949 -	 */
   4.950 -	min1 = disknum / 26;
   4.951 -	min2 = disknum % 26;
   4.952 -	sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
   4.953 -    }
   4.954 +	if (disknum < 0) {
   4.955 +		sprintf(buffer, "%s", "");
   4.956 +		return;
   4.957 +	}
   4.958 +
   4.959 +	if (disknum < 26)
   4.960 +		sprintf(buffer, "sd%c", 'a' + disknum);
   4.961 +	else {
   4.962 +		unsigned int min1;
   4.963 +		unsigned int min2;
   4.964 +		/*
   4.965 +		 * For larger numbers of disks, we need to go to a new
   4.966 +		 * naming scheme.
   4.967 +		 */
   4.968 +		min1 = disknum / 26;
   4.969 +		min2 = disknum % 26;
   4.970 +		sprintf(buffer, "sd%c%c", 'a' + min1 - 1, 'a' + min2);
   4.971 +	}
   4.972  }
   4.973  
   4.974  static int force_delete_disk(struct aac_dev *dev, void *arg)
   4.975  {
   4.976 -    struct aac_delete_disk dd;
   4.977 -    struct fsa_scsi_hba *fsa_dev_ptr;
   4.978 -    
   4.979 -    fsa_dev_ptr = &(dev->fsa_dev);
   4.980 -    
   4.981 -    if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
   4.982 -	return -EFAULT;
   4.983 -    
   4.984 -    if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
   4.985 -	return -EINVAL;
   4.986 -    /*
   4.987 -     *	Mark this container as being deleted.
   4.988 -     */
   4.989 -    fsa_dev_ptr->deleted[dd.cnum] = 1;
   4.990 -    /*
   4.991 -     *	Mark the container as no longer valid
   4.992 -     */
   4.993 -    fsa_dev_ptr->valid[dd.cnum] = 0;
   4.994 -    return 0;
   4.995 +	struct aac_delete_disk dd;
   4.996 +	struct fsa_scsi_hba *fsa_dev_ptr;
   4.997 +
   4.998 +	fsa_dev_ptr = &(dev->fsa_dev);
   4.999 +
  4.1000 +	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
  4.1001 +		return -EFAULT;
  4.1002 +
  4.1003 +	if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
  4.1004 +		return -EINVAL;
  4.1005 +	/*
  4.1006 +	 *	Mark this container as being deleted.
  4.1007 +	 */
  4.1008 +	fsa_dev_ptr->deleted[dd.cnum] = 1;
  4.1009 +	/*
  4.1010 +	 *	Mark the container as no longer valid
  4.1011 +	 */
  4.1012 +	fsa_dev_ptr->valid[dd.cnum] = 0;
  4.1013 +	return 0;
  4.1014  }
  4.1015  
  4.1016  static int delete_disk(struct aac_dev *dev, void *arg)
  4.1017  {
  4.1018 -    struct aac_delete_disk dd;
  4.1019 -    struct fsa_scsi_hba *fsa_dev_ptr;
  4.1020 -
  4.1021 -    fsa_dev_ptr = &(dev->fsa_dev);
  4.1022 -
  4.1023 -    if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
  4.1024 -	return -EFAULT;
  4.1025 +	struct aac_delete_disk dd;
  4.1026 +	struct fsa_scsi_hba *fsa_dev_ptr;
  4.1027  
  4.1028 -    if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
  4.1029 -	return -EINVAL;
  4.1030 -    /*
  4.1031 -     *	If the container is locked, it can not be deleted by the API.
  4.1032 -     */
  4.1033 -    if (fsa_dev_ptr->locked[dd.cnum])
  4.1034 -	return -EBUSY;
  4.1035 -    else {
  4.1036 +	fsa_dev_ptr = &(dev->fsa_dev);
  4.1037 +
  4.1038 +	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
  4.1039 +		return -EFAULT;
  4.1040 +
  4.1041 +	if (dd.cnum > MAXIMUM_NUM_CONTAINERS)
  4.1042 +		return -EINVAL;
  4.1043  	/*
  4.1044 -	 *	Mark the container as no longer being valid.
  4.1045 +	 *	If the container is locked, it can not be deleted by the API.
  4.1046  	 */
  4.1047 -	fsa_dev_ptr->valid[dd.cnum] = 0;
  4.1048 -	fsa_dev_ptr->devno[dd.cnum] = -1;
  4.1049 -	return 0;
  4.1050 -    }
  4.1051 +	if (fsa_dev_ptr->locked[dd.cnum])
  4.1052 +		return -EBUSY;
  4.1053 +	else {
  4.1054 +		/*
  4.1055 +		 *	Mark the container as no longer being valid.
  4.1056 +		 */
  4.1057 +		fsa_dev_ptr->valid[dd.cnum] = 0;
  4.1058 +		fsa_dev_ptr->devno[dd.cnum] = -1;
  4.1059 +		return 0;
  4.1060 +	}
  4.1061  }
  4.1062  
  4.1063  int aac_dev_ioctl(struct aac_dev *dev, int cmd, void *arg)
  4.1064  {
  4.1065 -    switch (cmd) {
  4.1066 -    case FSACTL_QUERY_DISK:
  4.1067 -	return query_disk(dev, arg);
  4.1068 -    case FSACTL_DELETE_DISK:
  4.1069 -	return delete_disk(dev, arg);
  4.1070 -    case FSACTL_FORCE_DELETE_DISK:
  4.1071 -	return force_delete_disk(dev, arg);
  4.1072 -    case 2131:
  4.1073 -	return aac_get_containers(dev);
  4.1074 -    default:
  4.1075 -	return -ENOTTY;
  4.1076 -    }
  4.1077 +	switch (cmd) {
  4.1078 +	case FSACTL_QUERY_DISK:
  4.1079 +		return query_disk(dev, arg);
  4.1080 +	case FSACTL_DELETE_DISK:
  4.1081 +		return delete_disk(dev, arg);
  4.1082 +	case FSACTL_FORCE_DELETE_DISK:
  4.1083 +		return force_delete_disk(dev, arg);
  4.1084 +	case 2131:
  4.1085 +		return aac_get_containers(dev);
  4.1086 +	default:
  4.1087 +		return -ENOTTY;
  4.1088 +	}
  4.1089  }
  4.1090  
  4.1091  /**
  4.1092 @@ -1235,160 +1264,189 @@ int aac_dev_ioctl(struct aac_dev *dev, i
  4.1093  
  4.1094  static void aac_srb_callback(void *context, struct fib * fibptr)
  4.1095  {
  4.1096 -    struct aac_dev *dev;
  4.1097 -    struct aac_srb_reply *srbreply;
  4.1098 -    Scsi_Cmnd *scsicmd;
  4.1099 -
  4.1100 -    scsicmd = (Scsi_Cmnd *) context;
  4.1101 -    dev = (struct aac_dev *)scsicmd->host->hostdata;
  4.1102 -
  4.1103 -    if (fibptr == NULL)
  4.1104 -	BUG();
  4.1105 -
  4.1106 -    srbreply = (struct aac_srb_reply *) fib_data(fibptr);
  4.1107 -
  4.1108 -    scsicmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
  4.1109 -    // calculate resid for sg 
  4.1110 -    scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
  4.1111 -
  4.1112 -    if(scsicmd->use_sg)
  4.1113 -	pci_unmap_sg(dev->pdev, 
  4.1114 -		     (struct scatterlist *)scsicmd->buffer,
  4.1115 -		     scsicmd->use_sg,
  4.1116 -		     scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1117 -    else if(scsicmd->request_bufflen)
  4.1118 -	pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, 
  4.1119 -			 scsicmd->request_bufflen,
  4.1120 -			 scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1121 -
  4.1122 -    /*
  4.1123 -     * First check the fib status
  4.1124 -     */
  4.1125 +	struct aac_dev *dev;
  4.1126 +	struct aac_srb_reply *srbreply;
  4.1127 +	Scsi_Cmnd *scsicmd;
  4.1128  
  4.1129 -    if (le32_to_cpu(srbreply->status) != ST_OK){
  4.1130 -	int len;
  4.1131 -	printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", 
  4.1132 -	       le32_to_cpu(srbreply->status));
  4.1133 -	len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
  4.1134 -	    sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
  4.1135 -	scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | 
  4.1136 -	    CHECK_CONDITION;
  4.1137 -	memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
  4.1138 -    }
  4.1139 +	scsicmd = (Scsi_Cmnd *) context;
  4.1140 +	dev = (struct aac_dev *)scsicmd->host->hostdata;
  4.1141  
  4.1142 -    /*
  4.1143 -     * Next check the srb status
  4.1144 -     */
  4.1145 -    switch(le32_to_cpu(srbreply->srb_status)){
  4.1146 -    case SRB_STATUS_ERROR_RECOVERY:
  4.1147 -    case SRB_STATUS_PENDING:
  4.1148 -    case SRB_STATUS_SUCCESS:
  4.1149 -	if(scsicmd->cmnd[0] == INQUIRY ){
  4.1150 -	    u8 b;
  4.1151 -	    /* We can't expose disk devices because we can't tell whether they
  4.1152 -	     * are the raw container drives or stand alone drives
  4.1153 -	     */
  4.1154 -	    b = *(u8*)scsicmd->buffer;
  4.1155 -	    if( (b & 0x0f) == TYPE_DISK ){
  4.1156 +	if (fibptr == NULL)
  4.1157 +		BUG();
  4.1158 +
  4.1159 +	srbreply = (struct aac_srb_reply *) fib_data(fibptr);
  4.1160 +
  4.1161 +	scsicmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
  4.1162 +	// calculate resid for sg 
  4.1163 +	scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
  4.1164 +
  4.1165 +	if(scsicmd->use_sg)
  4.1166 +		pci_unmap_sg(dev->pdev, 
  4.1167 +			(struct scatterlist *)scsicmd->buffer,
  4.1168 +			scsicmd->use_sg,
  4.1169 +			scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1170 +	else if(scsicmd->request_bufflen)
  4.1171 +		pci_unmap_single(dev->pdev, (ulong)scsicmd->SCp.ptr, scsicmd->request_bufflen,
  4.1172 +			scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1173 +
  4.1174 +	/*
  4.1175 +	 * First check the fib status
  4.1176 +	 */
  4.1177 +
  4.1178 +	if (le32_to_cpu(srbreply->status) != ST_OK){
  4.1179 +		int len;
  4.1180 +		printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
  4.1181 +		len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
  4.1182 +				sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
  4.1183 +		scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | CHECK_CONDITION;
  4.1184 +		memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
  4.1185 +	}
  4.1186 +
  4.1187 +	/*
  4.1188 +	 * Next check the srb status
  4.1189 +	 */
  4.1190 +	switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
  4.1191 +	case SRB_STATUS_ERROR_RECOVERY:
  4.1192 +	case SRB_STATUS_PENDING:
  4.1193 +	case SRB_STATUS_SUCCESS:
  4.1194 +		if(scsicmd->cmnd[0] == INQUIRY ){
  4.1195 +			u8 b;
  4.1196 +			u8 b1;
  4.1197 +			/* We can't expose disk devices because we can't tell whether they
  4.1198 +			 * are the raw container drives or stand alone drives.  If they have
  4.1199 +			 * the removable bit set then we should expose them though.
  4.1200 +			 */
  4.1201 +			b = (*(u8*)scsicmd->buffer)&0x1f;
  4.1202 +			b1 = ((u8*)scsicmd->buffer)[1];
  4.1203 +			if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 
  4.1204 +					|| (b==TYPE_DISK && (b1&0x80)) ){
  4.1205 +				scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
  4.1206 +			/*
  4.1207 +			 * We will allow disk devices if in RAID/SCSI mode and
  4.1208 +			 * the channel is 2
  4.1209 +			 */
  4.1210 +			} else if((dev->raid_scsi_mode)&&(scsicmd->channel == 2)){
  4.1211 +				scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
  4.1212 +			} else {
  4.1213 +				scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
  4.1214 +			}
  4.1215 +		} else {
  4.1216 +			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
  4.1217 +		}
  4.1218 +		break;
  4.1219 +	case SRB_STATUS_DATA_OVERRUN:
  4.1220 +		switch(scsicmd->cmnd[0]){
  4.1221 +		case  READ_6:
  4.1222 +		case  WRITE_6:
  4.1223 +		case  READ_10:
  4.1224 +		case  WRITE_10:
  4.1225 +		case  READ_12:
  4.1226 +		case  WRITE_12:
  4.1227 +			if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
  4.1228 +				printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
  4.1229 +			} else {
  4.1230 +				printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
  4.1231 +			}
  4.1232 +			scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
  4.1233 +			break;
  4.1234 +		case INQUIRY: {
  4.1235 +			u8 b;
  4.1236 +			u8 b1;
  4.1237 +			/* We can't expose disk devices because we can't tell whether they
  4.1238 +			* are the raw container drives or stand alone drives
  4.1239 +			*/
  4.1240 +			b = (*(u8*)scsicmd->buffer)&0x0f;
  4.1241 +			b1 = ((u8*)scsicmd->buffer)[1];
  4.1242 +			if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
  4.1243 +					|| (b==TYPE_DISK && (b1&0x80)) ){
  4.1244 +				scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
  4.1245 +			/*
  4.1246 +			 * We will allow disk devices if in RAID/SCSI mode and
  4.1247 +			 * the channel is 2
  4.1248 +			 */
  4.1249 +			} else if((dev->raid_scsi_mode)&&(scsicmd->channel == 2)){
  4.1250 +				scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
  4.1251 +			} else {
  4.1252 +				scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
  4.1253 +			}
  4.1254 +			break;
  4.1255 +		}
  4.1256 +		default:
  4.1257 +			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
  4.1258 +			break;
  4.1259 +		}
  4.1260 +		break;
  4.1261 +	case SRB_STATUS_ABORTED:
  4.1262 +		scsicmd->result = DID_ABORT << 16 | ABORT << 8;
  4.1263 +		break;
  4.1264 +	case SRB_STATUS_ABORT_FAILED:
  4.1265 +		// Not sure about this one - but assuming the hba was trying to abort for some reason
  4.1266 +		scsicmd->result = DID_ERROR << 16 | ABORT << 8;
  4.1267 +		break;
  4.1268 +	case SRB_STATUS_PARITY_ERROR:
  4.1269 +		scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
  4.1270 +		break;
  4.1271 +	case SRB_STATUS_NO_DEVICE:
  4.1272 +	case SRB_STATUS_INVALID_PATH_ID:
  4.1273 +	case SRB_STATUS_INVALID_TARGET_ID:
  4.1274 +	case SRB_STATUS_INVALID_LUN:
  4.1275 +	case SRB_STATUS_SELECTION_TIMEOUT:
  4.1276  		scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
  4.1277 -	    }
  4.1278 -	} else {
  4.1279 -	    scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
  4.1280 -	}
  4.1281 -	break;
  4.1282 -    case SRB_STATUS_DATA_OVERRUN:
  4.1283 -	switch(scsicmd->cmnd[0]){
  4.1284 -	case  READ_6:
  4.1285 -	case  WRITE_6:
  4.1286 -	case  READ_10:
  4.1287 -	case  WRITE_10:
  4.1288 -	case  READ_12:
  4.1289 -	case  WRITE_12:
  4.1290 -	    if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
  4.1291 -		printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
  4.1292 -	    } else {
  4.1293 -		printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
  4.1294 -	    }
  4.1295 -	    scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
  4.1296 -	    break;
  4.1297 +		break;
  4.1298 +
  4.1299 +	case SRB_STATUS_COMMAND_TIMEOUT:
  4.1300 +	case SRB_STATUS_TIMEOUT:
  4.1301 +		scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
  4.1302 +		break;
  4.1303 +
  4.1304 +	case SRB_STATUS_BUSY:
  4.1305 +		scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
  4.1306 +		break;
  4.1307 +
  4.1308 +	case SRB_STATUS_BUS_RESET:
  4.1309 +		scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
  4.1310 +		break;
  4.1311 +
  4.1312 +	case SRB_STATUS_MESSAGE_REJECTED:
  4.1313 +		scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
  4.1314 +		break;
  4.1315 +	case SRB_STATUS_REQUEST_FLUSHED:
  4.1316 +	case SRB_STATUS_ERROR:
  4.1317 +	case SRB_STATUS_INVALID_REQUEST:
  4.1318 +	case SRB_STATUS_REQUEST_SENSE_FAILED:
  4.1319 +	case SRB_STATUS_NO_HBA:
  4.1320 +	case SRB_STATUS_UNEXPECTED_BUS_FREE:
  4.1321 +	case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
  4.1322 +	case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
  4.1323 +	case SRB_STATUS_DELAYED_RETRY:
  4.1324 +	case SRB_STATUS_BAD_FUNCTION:
  4.1325 +	case SRB_STATUS_NOT_STARTED:
  4.1326 +	case SRB_STATUS_NOT_IN_USE:
  4.1327 +	case SRB_STATUS_FORCE_ABORT:
  4.1328 +	case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
  4.1329  	default:
  4.1330 -	    scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
  4.1331 -	    break;
  4.1332 +#ifdef AAC_DETAILED_STATUS_INFO
  4.1333 +		printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",le32_to_cpu(srbreply->srb_status&0x3f),aac_get_status_string(le32_to_cpu(srbreply->srb_status)), scsicmd->cmnd[0], le32_to_cpu(srbreply->scsi_status) );
  4.1334 +#endif
  4.1335 +		scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
  4.1336 +		break;
  4.1337  	}
  4.1338 -	break;
  4.1339 -    case SRB_STATUS_ABORTED:
  4.1340 -	scsicmd->result = DID_ABORT << 16 | ABORT << 8;
  4.1341 -	break;
  4.1342 -    case SRB_STATUS_ABORT_FAILED:
  4.1343 -	// Not sure about this one - but assuming the hba was trying 
  4.1344 -	// to abort for some reason
  4.1345 -	scsicmd->result = DID_ERROR << 16 | ABORT << 8;
  4.1346 -	break;
  4.1347 -    case SRB_STATUS_PARITY_ERROR:
  4.1348 -	scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
  4.1349 -	break;
  4.1350 -    case SRB_STATUS_NO_DEVICE:
  4.1351 -    case SRB_STATUS_INVALID_PATH_ID:
  4.1352 -    case SRB_STATUS_INVALID_TARGET_ID:
  4.1353 -    case SRB_STATUS_INVALID_LUN:
  4.1354 -    case SRB_STATUS_SELECTION_TIMEOUT:
  4.1355 -	scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
  4.1356 -	break;
  4.1357 -
  4.1358 -    case SRB_STATUS_COMMAND_TIMEOUT:
  4.1359 -    case SRB_STATUS_TIMEOUT:
  4.1360 -	scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
  4.1361 -	break;
  4.1362 -
  4.1363 -    case SRB_STATUS_BUSY:
  4.1364 -	scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
  4.1365 -	break;
  4.1366 -
  4.1367 -    case SRB_STATUS_BUS_RESET:
  4.1368 -	scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
  4.1369 -	break;
  4.1370 +	if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){  // Check Condition
  4.1371 +		int len;
  4.1372 +		scsicmd->result |= CHECK_CONDITION;
  4.1373 +		len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
  4.1374 +				sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
  4.1375 +		printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
  4.1376 +		memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
  4.1377 +	}
  4.1378 +	/*
  4.1379 +	 * OR in the scsi status (already shifted up a bit)
  4.1380 +	 */
  4.1381 +	scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
  4.1382  
  4.1383 -    case SRB_STATUS_MESSAGE_REJECTED:
  4.1384 -	scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
  4.1385 -	break;
  4.1386 -    case SRB_STATUS_REQUEST_FLUSHED:
  4.1387 -    case SRB_STATUS_ERROR:
  4.1388 -    case SRB_STATUS_INVALID_REQUEST:
  4.1389 -    case SRB_STATUS_REQUEST_SENSE_FAILED:
  4.1390 -    case SRB_STATUS_NO_HBA:
  4.1391 -    case SRB_STATUS_UNEXPECTED_BUS_FREE:
  4.1392 -    case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
  4.1393 -    case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
  4.1394 -    case SRB_STATUS_DELAYED_RETRY:
  4.1395 -    case SRB_STATUS_BAD_FUNCTION:
  4.1396 -    case SRB_STATUS_NOT_STARTED:
  4.1397 -    case SRB_STATUS_NOT_IN_USE:
  4.1398 -    case SRB_STATUS_FORCE_ABORT:
  4.1399 -    case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
  4.1400 -    default:
  4.1401 -#ifdef AAC_DETAILED_STATUS_INFO
  4.1402 -	printk("aacraid: SRB ERROR (%s)\n", 
  4.1403 -	       aac_get_status_string(le32_to_cpu(srbreply->srb_status)));
  4.1404 -#endif
  4.1405 -	scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
  4.1406 -	break;
  4.1407 -    }
  4.1408 -    if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){  // Check Condition
  4.1409 -	int len;
  4.1410 -	len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
  4.1411 -	    sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
  4.1412 -	printk(KERN_WARNING "aac_srb_callback: check condition, "
  4.1413 -	       "status = %d len=%d\n", le32_to_cpu(srbreply->status), len);
  4.1414 -	memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
  4.1415 -    }
  4.1416 -    /*
  4.1417 -     * OR in the scsi status (already shifted up a bit)
  4.1418 -     */
  4.1419 -    scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
  4.1420 -
  4.1421 -    fib_complete(fibptr);
  4.1422 -    fib_free(fibptr);
  4.1423 -    aac_io_done(scsicmd);
  4.1424 +	fib_complete(fibptr);
  4.1425 +	fib_free(fibptr);
  4.1426 +	aac_io_done(scsicmd);
  4.1427  }
  4.1428  
  4.1429  /**
  4.1430 @@ -1402,227 +1460,230 @@ static void aac_srb_callback(void *conte
  4.1431  
  4.1432  static int aac_send_srb_fib(Scsi_Cmnd* scsicmd)
  4.1433  {
  4.1434 -    struct fib* cmd_fibcontext;
  4.1435 -    struct aac_dev* dev;
  4.1436 -    int status;
  4.1437 -    struct aac_srb *srbcmd;
  4.1438 -    u16 fibsize;
  4.1439 -    u32 flag;
  4.1440 -
  4.1441 -    if( scsicmd->target > 15 || scsicmd->lun > 7) {
  4.1442 -	scsicmd->result = DID_NO_CONNECT << 16;
  4.1443 -	__aac_io_done(scsicmd);
  4.1444 -	return 0;
  4.1445 -    }
  4.1446 +	struct fib* cmd_fibcontext;
  4.1447 +	struct aac_dev* dev;
  4.1448 +	int status;
  4.1449 +	struct aac_srb *srbcmd;
  4.1450 +	u16 fibsize;
  4.1451 +	u32 flag;
  4.1452 +	u32 timeout;
  4.1453  
  4.1454 -    dev = (struct aac_dev *)scsicmd->host->hostdata;
  4.1455 -    switch(scsicmd->sc_data_direction){
  4.1456 -    case SCSI_DATA_WRITE:
  4.1457 -	flag = SRB_DataOut;
  4.1458 -	break;
  4.1459 -    case SCSI_DATA_UNKNOWN:  
  4.1460 -	flag = SRB_DataIn | SRB_DataOut;
  4.1461 -	break;
  4.1462 -    case SCSI_DATA_READ:
  4.1463 -	flag = SRB_DataIn;
  4.1464 -	break;
  4.1465 -    case SCSI_DATA_NONE: 
  4.1466 -    default:
  4.1467 -	flag = SRB_NoDataXfer;
  4.1468 -	break;
  4.1469 -    }
  4.1470 +	if( scsicmd->target > 15 || scsicmd->lun > 7) {
  4.1471 +		scsicmd->result = DID_NO_CONNECT << 16;
  4.1472 +		__aac_io_done(scsicmd);
  4.1473 +		return 0;
  4.1474 +	}
  4.1475 +
  4.1476 +	dev = (struct aac_dev *)scsicmd->host->hostdata;
  4.1477 +	switch(scsicmd->sc_data_direction){
  4.1478 +	case SCSI_DATA_WRITE:
  4.1479 +		flag = SRB_DataOut;
  4.1480 +		break;
  4.1481 +	case SCSI_DATA_UNKNOWN:  
  4.1482 +		flag = SRB_DataIn | SRB_DataOut;
  4.1483 +		break;
  4.1484 +	case SCSI_DATA_READ:
  4.1485 +		flag = SRB_DataIn;
  4.1486 +		break;
  4.1487 +	case SCSI_DATA_NONE: 
  4.1488 +	default:
  4.1489 +		flag = SRB_NoDataXfer;
  4.1490 +		break;
  4.1491 +	}
  4.1492  
  4.1493  
  4.1494 -    /*
  4.1495 -     *	Allocate and initialize a Fib then setup a BlockWrite command
  4.1496 -     */
  4.1497 -    if (!(cmd_fibcontext = fib_alloc(dev))) {
  4.1498 -	scsicmd->result = DID_ERROR << 16;
  4.1499 -	__aac_io_done(scsicmd);
  4.1500 -	return -1;
  4.1501 -    }
  4.1502 -    fib_init(cmd_fibcontext);
  4.1503 -
  4.1504 -    srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
  4.1505 -    srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
  4.1506 -    srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scsicmd->channel));
  4.1507 -    srbcmd->target   = cpu_to_le32(scsicmd->target);
  4.1508 -    srbcmd->lun      = cpu_to_le32(scsicmd->lun);
  4.1509 -    srbcmd->flags    = cpu_to_le32(flag);
  4.1510 -    srbcmd->timeout  = cpu_to_le32(0);  // timeout not used
  4.1511 -    srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
  4.1512 -    srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
  4.1513 -	
  4.1514 -    if( dev->pae_support ==1 ) {
  4.1515 -	aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
  4.1516 -	srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
  4.1517 -
  4.1518 -	memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
  4.1519 -	memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
  4.1520  	/*
  4.1521 -	 *	Build Scatter/Gather list
  4.1522 -	 */
  4.1523 -	fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) 
  4.1524 -					     * sizeof (struct sgentry64));
  4.1525 -
  4.1526 -	/*
  4.1527 -	 *	Now send the Fib to the adapter
  4.1528 +	 *	Allocate and initialize a Fib then setup a BlockWrite command
  4.1529  	 */
  4.1530 -	status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, 
  4.1531 -			  FsaNormal, 0, 1, (fib_callback) aac_srb_callback, 
  4.1532 -			  (void *) scsicmd);
  4.1533 -    } else {
  4.1534 -	aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
  4.1535 -	srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
  4.1536 +	if (!(cmd_fibcontext = fib_alloc(dev))) {
  4.1537 +		scsicmd->result = DID_ERROR << 16;
  4.1538 +		__aac_io_done(scsicmd);
  4.1539 +		return -1;
  4.1540 +	}
  4.1541 +	fib_init(cmd_fibcontext);
  4.1542  
  4.1543 -	memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
  4.1544 -	memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
  4.1545 +	srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
  4.1546 +	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
  4.1547 +	srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scsicmd->channel));
  4.1548 +	srbcmd->target   = cpu_to_le32(scsicmd->target);
  4.1549 +	srbcmd->lun      = cpu_to_le32(scsicmd->lun);
  4.1550 +	srbcmd->flags    = cpu_to_le32(flag);
  4.1551 +	timeout = (scsicmd->timeout-jiffies)/HZ;
  4.1552 +	if(timeout == 0){
  4.1553 +		timeout = 1;
  4.1554 +	}
  4.1555 +	srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
  4.1556 +	srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
  4.1557 +	srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
  4.1558 +	
  4.1559 +	if( dev->pae_support ==1 ) {
  4.1560 +		aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
  4.1561 +		srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
  4.1562 +
  4.1563 +		memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
  4.1564 +		memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
  4.1565 +		/*
  4.1566 +		 *	Build Scatter/Gather list
  4.1567 +		 */
  4.1568 +		fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64));
  4.1569 +
  4.1570 +		/*
  4.1571 +		 *	Now send the Fib to the adapter
  4.1572 +		 */
  4.1573 +		status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
  4.1574 +				  (fib_callback) aac_srb_callback, (void *) scsicmd);
  4.1575 +	} else {
  4.1576 +		aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
  4.1577 +		srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
  4.1578 +
  4.1579 +		memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
  4.1580 +		memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
  4.1581 +		/*
  4.1582 +		 *	Build Scatter/Gather list
  4.1583 +		 */
  4.1584 +		fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
  4.1585 +
  4.1586 +		/*
  4.1587 +		 *	Now send the Fib to the adapter
  4.1588 +		 */
  4.1589 +		status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
  4.1590 +				  (fib_callback) aac_srb_callback, (void *) scsicmd);
  4.1591 +	}
  4.1592  	/*
  4.1593 -	 *	Build Scatter/Gather list
  4.1594 -	 */
  4.1595 -	fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) 
  4.1596 -					     * sizeof (struct sgentry));
  4.1597 -
  4.1598 -	/*
  4.1599 -	 *	Now send the Fib to the adapter
  4.1600 +	 *	Check that the command queued to the controller
  4.1601  	 */
  4.1602 -	status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, 
  4.1603 -			  FsaNormal, 0, 1, (fib_callback) aac_srb_callback, 
  4.1604 -			  (void *) scsicmd);
  4.1605 -    }
  4.1606 -    /*
  4.1607 -     *	Check that the command queued to the controller
  4.1608 -     */
  4.1609 -    if (status == -EINPROGRESS){
  4.1610 -	return 0;
  4.1611 -    }
  4.1612 +	if (status == -EINPROGRESS){
  4.1613 +		return 0;
  4.1614 +	}
  4.1615  
  4.1616 -    printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
  4.1617 -    /*
  4.1618 -     *	For some reason, the Fib didn't queue, return QUEUE_FULL
  4.1619 -     */
  4.1620 -    scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
  4.1621 -    __aac_io_done(scsicmd);
  4.1622 +	printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
  4.1623 +	/*
  4.1624 +	 *	For some reason, the Fib didn't queue, return QUEUE_FULL
  4.1625 +	 */
  4.1626 +	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | QUEUE_FULL;
  4.1627 +	__aac_io_done(scsicmd);
  4.1628  
  4.1629 -    fib_complete(cmd_fibcontext);
  4.1630 -    fib_free(cmd_fibcontext);
  4.1631 +	fib_complete(cmd_fibcontext);
  4.1632 +	fib_free(cmd_fibcontext);
  4.1633  
  4.1634 -    return -1;
  4.1635 +	return -1;
  4.1636  }
  4.1637  
  4.1638  static unsigned long aac_build_sg(Scsi_Cmnd* scsicmd, struct sgmap* psg)
  4.1639  {
  4.1640 -    struct aac_dev *dev;
  4.1641 -    unsigned long byte_count = 0;
  4.1642 -
  4.1643 -    dev = (struct aac_dev *)scsicmd->host->hostdata;
  4.1644 -    // Get rid of old data
  4.1645 -    psg->count = cpu_to_le32(0);
  4.1646 -    psg->sg[0].addr = cpu_to_le32(NULL);
  4.1647 -    psg->sg[0].count = cpu_to_le32(0);  
  4.1648 -    if (scsicmd->use_sg) {
  4.1649 -	struct scatterlist *sg;
  4.1650 -	int i;
  4.1651 -	int sg_count;
  4.1652 -	sg = (struct scatterlist *) scsicmd->request_buffer;
  4.1653 -
  4.1654 -	sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
  4.1655 -			      scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1656 -	psg->count = cpu_to_le32(sg_count);
  4.1657 -
  4.1658 -	byte_count = 0;
  4.1659 +	struct aac_dev *dev;
  4.1660 +	unsigned long byte_count = 0;
  4.1661  
  4.1662 -	for (i = 0; i < sg_count; i++) {
  4.1663 -	    psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
  4.1664 -	    psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
  4.1665 -	    byte_count += sg_dma_len(sg);
  4.1666 -	    sg++;
  4.1667 -	}
  4.1668 -	/* hba wants the size to be exact */
  4.1669 -	if(byte_count > scsicmd->request_bufflen){
  4.1670 -	    psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
  4.1671 -	    byte_count = scsicmd->request_bufflen;
  4.1672 +	dev = (struct aac_dev *)scsicmd->host->hostdata;
  4.1673 +	// Get rid of old data
  4.1674 +	psg->count = cpu_to_le32(0);
  4.1675 +	psg->sg[0].addr = cpu_to_le32(NULL);
  4.1676 +	psg->sg[0].count = cpu_to_le32(0);  
  4.1677 +	if (scsicmd->use_sg) {
  4.1678 +		struct scatterlist *sg;
  4.1679 +		int i;
  4.1680 +		int sg_count;
  4.1681 +		sg = (struct scatterlist *) scsicmd->request_buffer;
  4.1682 +
  4.1683 +		sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
  4.1684 +			scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1685 +		psg->count = cpu_to_le32(sg_count);
  4.1686 +
  4.1687 +		byte_count = 0;
  4.1688 +
  4.1689 +		for (i = 0; i < sg_count; i++) {
  4.1690 +			psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
  4.1691 +			psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
  4.1692 +			byte_count += sg_dma_len(sg);
  4.1693 +			sg++;
  4.1694 +		}
  4.1695 +		/* hba wants the size to be exact */
  4.1696 +		if(byte_count > scsicmd->request_bufflen){
  4.1697 +			psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
  4.1698 +			byte_count = scsicmd->request_bufflen;
  4.1699 +		}
  4.1700 +		/* Check for command underflow */
  4.1701 +		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
  4.1702 +			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
  4.1703 +					byte_count, scsicmd->underflow);
  4.1704 +		}
  4.1705  	}
  4.1706 -	/* Check for command underflow */
  4.1707 -	if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
  4.1708 -	    printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
  4.1709 -		   byte_count, scsicmd->underflow);
  4.1710 +	else if(scsicmd->request_bufflen) {
  4.1711 +		dma_addr_t addr; 
  4.1712 +		addr = pci_map_single(dev->pdev,
  4.1713 +				scsicmd->request_buffer,
  4.1714 +				scsicmd->request_bufflen,
  4.1715 +				scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1716 +		psg->count = cpu_to_le32(1);
  4.1717 +		psg->sg[0].addr = cpu_to_le32(addr);
  4.1718 +		psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
  4.1719 +		/* Cast to pointer from integer of different size */
  4.1720 +		scsicmd->SCp.ptr = (void *)addr;
  4.1721 +		byte_count = scsicmd->request_bufflen;
  4.1722  	}
  4.1723 -    }
  4.1724 -    else if(scsicmd->request_bufflen) {
  4.1725 -	dma_addr_t addr; 
  4.1726 -	addr = pci_map_single(dev->pdev,
  4.1727 -			      scsicmd->request_buffer,
  4.1728 -			      scsicmd->request_bufflen,
  4.1729 -			      scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1730 -	psg->count = cpu_to_le32(1);
  4.1731 -	psg->sg[0].addr = cpu_to_le32(addr);
  4.1732 -	psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
  4.1733 -	scsicmd->SCp.ptr = (void *)addr;
  4.1734 -	byte_count = scsicmd->request_bufflen;
  4.1735 -    }
  4.1736 -    return byte_count;
  4.1737 +	return byte_count;
  4.1738  }
  4.1739  
  4.1740  
  4.1741  static unsigned long aac_build_sg64(Scsi_Cmnd* scsicmd, struct sgmap64* psg)
  4.1742  {
  4.1743 -    struct aac_dev *dev;
  4.1744 -    unsigned long byte_count = 0;
  4.1745 -    u64 le_addr;
  4.1746 -
  4.1747 -    dev = (struct aac_dev *)scsicmd->host->hostdata;
  4.1748 -    // Get rid of old data
  4.1749 -    psg->count = cpu_to_le32(0);
  4.1750 -    psg->sg[0].addr[0] = cpu_to_le32(NULL);
  4.1751 -    psg->sg[0].addr[1] = cpu_to_le32(NULL);
  4.1752 -    psg->sg[0].count = cpu_to_le32(0);  
  4.1753 -    if (scsicmd->use_sg) {
  4.1754 -	struct scatterlist *sg;
  4.1755 -	int i;
  4.1756 -	int sg_count;
  4.1757 -	sg = (struct scatterlist *) scsicmd->request_buffer;
  4.1758 -
  4.1759 -	sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
  4.1760 -			      scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1761 -	psg->count = cpu_to_le32(sg_count);
  4.1762 -
  4.1763 -	byte_count = 0;
  4.1764 +	struct aac_dev *dev;
  4.1765 +	unsigned long byte_count = 0;
  4.1766 +	u64 le_addr;
  4.1767  
  4.1768 -	for (i = 0; i < sg_count; i++) {
  4.1769 -	    le_addr = cpu_to_le64(sg_dma_address(sg));
  4.1770 -	    psg->sg[i].addr[1] = (u32)(le_addr>>32);
  4.1771 -	    psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
  4.1772 -	    psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
  4.1773 -	    byte_count += sg_dma_len(sg);
  4.1774 -	    sg++;
  4.1775 -	}
  4.1776 -	/* hba wants the size to be exact */
  4.1777 -	if(byte_count > scsicmd->request_bufflen){
  4.1778 -	    psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
  4.1779 -	    byte_count = scsicmd->request_bufflen;
  4.1780 +	dev = (struct aac_dev *)scsicmd->host->hostdata;
  4.1781 +	// Get rid of old data
  4.1782 +	psg->count = cpu_to_le32(0);
  4.1783 +	psg->sg[0].addr[0] = cpu_to_le32(NULL);
  4.1784 +	psg->sg[0].addr[1] = cpu_to_le32(NULL);
  4.1785 +	psg->sg[0].count = cpu_to_le32(0);  
  4.1786 +	if (scsicmd->use_sg) {
  4.1787 +		struct scatterlist *sg;
  4.1788 +		int i;
  4.1789 +		int sg_count;
  4.1790 +		sg = (struct scatterlist *) scsicmd->request_buffer;
  4.1791 +
  4.1792 +		sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
  4.1793 +			scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1794 +		psg->count = cpu_to_le32(sg_count);
  4.1795 +
  4.1796 +		byte_count = 0;
  4.1797 +
  4.1798 +		for (i = 0; i < sg_count; i++) {
  4.1799 +			le_addr = cpu_to_le64(sg_dma_address(sg));
  4.1800 +			psg->sg[i].addr[1] = (u32)(le_addr>>32);
  4.1801 +			psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
  4.1802 +			psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
  4.1803 +			byte_count += sg_dma_len(sg);
  4.1804 +			sg++;
  4.1805 +		}
  4.1806 +		/* hba wants the size to be exact */
  4.1807 +		if(byte_count > scsicmd->request_bufflen){
  4.1808 +			psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
  4.1809 +			byte_count = scsicmd->request_bufflen;
  4.1810 +		}
  4.1811 +		/* Check for command underflow */
  4.1812 +		if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
  4.1813 +			printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
  4.1814 +					byte_count, scsicmd->underflow);
  4.1815 +		}
  4.1816  	}
  4.1817 -	/* Check for command underflow */
  4.1818 -	if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
  4.1819 -	    printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
  4.1820 -		   byte_count, scsicmd->underflow);
  4.1821 +	else if(scsicmd->request_bufflen) {
  4.1822 +		dma_addr_t addr; 
  4.1823 +		addr = pci_map_single(dev->pdev,
  4.1824 +				scsicmd->request_buffer,
  4.1825 +				scsicmd->request_bufflen,
  4.1826 +				scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1827 +		psg->count = cpu_to_le32(1);
  4.1828 +		le_addr = cpu_to_le64(addr);
  4.1829 +		psg->sg[0].addr[1] = (u32)(le_addr>>32);
  4.1830 +		psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
  4.1831 +		psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
  4.1832 +		/* Cast to pointer from integer of different size */
  4.1833 +		scsicmd->SCp.ptr = (void *)addr;
  4.1834 +		byte_count = scsicmd->request_bufflen;
  4.1835  	}
  4.1836 -    }
  4.1837 -    else if(scsicmd->request_bufflen) {
  4.1838 -	dma_addr_t addr; 
  4.1839 -	addr = pci_map_single(dev->pdev,
  4.1840 -			      scsicmd->request_buffer,
  4.1841 -			      scsicmd->request_bufflen,
  4.1842 -			      scsi_to_pci_dma_dir(scsicmd->sc_data_direction));
  4.1843 -	psg->count = cpu_to_le32(1);
  4.1844 -	le_addr = cpu_to_le64(addr);
  4.1845 -	psg->sg[0].addr[1] = (u32)(le_addr>>32);
  4.1846 -	psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
  4.1847 -	psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
  4.1848 -	scsicmd->SCp.ptr = (void *)addr;
  4.1849 -	byte_count = scsicmd->request_bufflen;
  4.1850 -    }
  4.1851 -    return byte_count;
  4.1852 +	return byte_count;
  4.1853  }
  4.1854  
  4.1855  #ifdef AAC_DETAILED_STATUS_INFO
     5.1 --- a/xen/drivers/scsi/aacraid/aacraid.h	Wed Aug 06 20:53:30 2003 +0000
     5.2 +++ b/xen/drivers/scsi/aacraid/aacraid.h	Thu Aug 07 12:24:12 2003 +0000
     5.3 @@ -1,16 +1,13 @@
     5.4 -
     5.5 -/* #define dprintk(x) */
     5.6 -// #define dprintk(x) printk x
     5.7 -#define dprintk(x)
     5.8 -
     5.9 +//#define dprintk(x) printk x
    5.10 +#if (!defined(dprintk))
    5.11 +# define dprintk(x)
    5.12 +#endif
    5.13  
    5.14 +/* Start of Xen additions XXX */
    5.15  #include <asm/byteorder.h>
    5.16 -
    5.17 +#include <xeno/interrupt.h>
    5.18  #define TRY_TASKLET
    5.19 -#ifdef TRY_TASKLET
    5.20 -/* XXX SMH: trying to use softirqs to trigger stuff done prev by threads */
    5.21 -#include <xeno/interrupt.h>  /* for tasklet/softirq stuff */
    5.22 -#endif
    5.23 +/* End of Xen additions XXX */
    5.24  
    5.25  /*------------------------------------------------------------------------------
    5.26   *              D E F I N E S
    5.27 @@ -19,12 +16,13 @@
    5.28  #define MAXIMUM_NUM_CONTAINERS	31
    5.29  #define MAXIMUM_NUM_ADAPTERS	8
    5.30  
    5.31 -#define AAC_NUM_FIB	578
    5.32 -#define AAC_NUM_IO_FIB	512
    5.33 +#define AAC_NUM_FIB		578
    5.34 +//#define AAC_NUM_IO_FIB	512
    5.35 +#define AAC_NUM_IO_FIB		100
    5.36  
    5.37 -#define AAC_MAX_TARGET (MAXIMUM_NUM_CONTAINERS+1)
    5.38 +#define AAC_MAX_TARGET		(MAXIMUM_NUM_CONTAINERS+1)
    5.39  //#define AAC_MAX_TARGET 	(16)
    5.40 -#define AAC_MAX_LUN	(8)
    5.41 +#define AAC_MAX_LUN		(8)
    5.42  
    5.43  /*
    5.44   * These macros convert from physical channels to virtual channels
    5.45 @@ -266,27 +264,25 @@ enum aac_queue_types {
    5.46   */
    5.47  
    5.48  struct aac_fibhdr {
    5.49 -    u32 XferState;		// Current transfer state for this CCB
    5.50 -    u16 Command;		// Routing information for the destination
    5.51 -    u8 StructType;		// Type FIB
    5.52 -    u8 Flags;			// Flags for FIB
    5.53 -    u16 Size;			// Size of this FIB in bytes
    5.54 -    u16 SenderSize;		// Size of the FIB in the sender (for 
    5.55 -                                // response sizing)
    5.56 -    u32 SenderFibAddress;	// Host defined data in the FIB
    5.57 -    u32 ReceiverFibAddress;	// Logical address of this FIB for the adapter
    5.58 -    u32 SenderData;		// Place holder for the sender to store data
    5.59 -    union {
    5.60 -	struct {
    5.61 -	    u32 _ReceiverTimeStart;  // Timestamp for receipt of fib
    5.62 -	    u32 _ReceiverTimeDone;   // Timestamp for completion of fib
    5.63 -	} _s;
    5.64 -	struct list_head _FibLinks;  // Used to link Adapter Initiated 
    5.65 -	                             // Fibs on the host
    5.66 -    } _u;
    5.67 +	u32 XferState;			// Current transfer state for this CCB
    5.68 +	u16 Command;			// Routing information for the destination
    5.69 +	u8 StructType;			// Type FIB
    5.70 +	u8 Flags;			// Flags for FIB
    5.71 +	u16 Size;			// Size of this FIB in bytes
    5.72 +	u16 SenderSize;			// Size of the FIB in the sender (for response sizing)
    5.73 +	u32 SenderFibAddress;		// Host defined data in the FIB
    5.74 +	u32 ReceiverFibAddress;		// Logical address of this FIB for the adapter
    5.75 +	u32 SenderData;			// Place holder for the sender to store data
    5.76 +	union {
    5.77 +		struct {
    5.78 +		    u32 _ReceiverTimeStart; 	// Timestamp for receipt of fib
    5.79 +		    u32 _ReceiverTimeDone;	// Timestamp for completion of fib
    5.80 +		} _s;
    5.81 +//		struct aac_list_head _FibLinks;	// Used to link Adapter Initiated Fibs on the host
    5.82 +	} _u;
    5.83  };
    5.84  
    5.85 -#define FibLinks			_u._FibLinks
    5.86 +//#define FibLinks			_u._FibLinks
    5.87  
    5.88  #define FIB_DATA_SIZE_IN_BYTES (512 - sizeof(struct aac_fibhdr))
    5.89  
    5.90 @@ -451,6 +447,8 @@ struct aac_driver_ident
    5.91  	char *	vname;
    5.92  	char *	model;
    5.93  	u16	channels;
    5.94 +	int	quirks;
    5.95 +#define AAC_QUIRK_31BIT			1
    5.96  };
    5.97  
    5.98  /*
    5.99 @@ -471,8 +469,7 @@ struct aac_queue {
   5.100  #if 0
   5.101  	wait_queue_head_t	qfull;		      	/* Event to wait on if the queue is full */
   5.102  	wait_queue_head_t	cmdready;	  	/* Indicates there is a Command ready from the adapter on this queue. */
   5.103 -#endif
   5.104 -                                        		/* This is only valid for adapter to host command queues. */                      
   5.105 +#endif                                        		/* This is only valid for adapter to host command queues. */                      
   5.106  	spinlock_t	 	*lock;		     	/* Spinlock for this queue must take this lock before accessing the lock */
   5.107  	spinlock_t		lockdata;		/* Actual lock (used only on one side of the lock) */
   5.108  	unsigned long		SavedIrql;      	/* Previous IRQL when the spin lock is taken */
   5.109 @@ -646,7 +643,7 @@ struct aac_fib_context {
   5.110  #endif
   5.111  	int			wait;		// Set to true when thread is in WaitForSingleObject
   5.112  	unsigned long		count;		// total number of FIBs on FibList
   5.113 -	struct list_head	fibs;
   5.114 +	struct list_head	fib_list;	// this holds fibs which should be 32 bit addresses
   5.115  };
   5.116  
   5.117  struct fsa_scsi_hba {
   5.118 @@ -656,7 +653,7 @@ struct fsa_scsi_hba {
   5.119  	u8		ro[MAXIMUM_NUM_CONTAINERS];
   5.120  	u8		locked[MAXIMUM_NUM_CONTAINERS];
   5.121  	u8		deleted[MAXIMUM_NUM_CONTAINERS];
   5.122 -	u32		devno[MAXIMUM_NUM_CONTAINERS];
   5.123 +	s32		devno[MAXIMUM_NUM_CONTAINERS];
   5.124  };
   5.125  
   5.126  struct fib {
   5.127 @@ -667,7 +664,6 @@ struct fib {
   5.128  	 *	The Adapter that this I/O is destined for.
   5.129  	 */
   5.130  	struct aac_dev 		*dev;
   5.131 -	u64			logicaladdr;	/* 64 bit */
   5.132  #if 0
   5.133  	/*
   5.134  	 *	This is the event the sendfib routine will wait on if the
   5.135 @@ -686,9 +682,14 @@ struct fib {
   5.136  	 *	Outstanding I/O queue.
   5.137  	 */
   5.138  	struct list_head	queue;
   5.139 -
   5.140 +	/*
   5.141 +	 *	And for the internal issue/reply queues (we may be able
   5.142 +	 *	to merge these two)
   5.143 +	 */
   5.144 +	struct list_head	fiblink;
   5.145  	void 			*data;
   5.146 -	struct hw_fib		*fib;		/* Actual shared object */
   5.147 +	struct hw_fib		*hw_fib;		/* Actual shared object */
   5.148 +	dma_addr_t		hw_fib_pa;		/* physical address of hw_fib*/
   5.149  };
   5.150  
   5.151  /*
   5.152 @@ -715,6 +716,7 @@ struct aac_adapter_info
   5.153  	u32	biosrev;
   5.154  	u32	biosbuild;
   5.155  	u32	cluster;
   5.156 +	u32	clusterchannelmask; 
   5.157  	u32	serial[2];
   5.158  	u32	battery;
   5.159  	u32	options;
   5.160 @@ -739,19 +741,22 @@ struct aac_adapter_info
   5.161  /*
   5.162   * Supported Options
   5.163   */
   5.164 -#define AAC_OPT_SNAPSHOT	cpu_to_le32(1)
   5.165 -#define AAC_OPT_CLUSTERS	cpu_to_le32(1<<1)
   5.166 -#define AAC_OPT_WRITE_CACHE	cpu_to_le32(1<<2)
   5.167 -#define AAC_OPT_64BIT_DATA	cpu_to_le32(1<<3)
   5.168 -#define AAC_OPT_HOST_TIME_FIB	cpu_to_le32(1<<4)
   5.169 -#define AAC_OPT_RAID50		cpu_to_le32(1<<5)
   5.170 -#define AAC_OPT_4GB_WINDOW	cpu_to_le32(1<<6)
   5.171 -#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
   5.172 -#define AAC_OPT_SOFT_ERR_REPORT	cpu_to_le32(1<<8)
   5.173 -#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
   5.174 -#define AAC_OPT_SGMAP_HOST64	cpu_to_le32(1<<10)
   5.175 -#define AAC_OPT_ALARM		cpu_to_le32(1<<11)
   5.176 -#define AAC_OPT_NONDASD		cpu_to_le32(1<<12)
   5.177 +#define AAC_OPT_SNAPSHOT		cpu_to_le32(1)
   5.178 +#define AAC_OPT_CLUSTERS		cpu_to_le32(1<<1)
   5.179 +#define AAC_OPT_WRITE_CACHE		cpu_to_le32(1<<2)
   5.180 +#define AAC_OPT_64BIT_DATA		cpu_to_le32(1<<3)
   5.181 +#define AAC_OPT_HOST_TIME_FIB		cpu_to_le32(1<<4)
   5.182 +#define AAC_OPT_RAID50			cpu_to_le32(1<<5)
   5.183 +#define AAC_OPT_4GB_WINDOW		cpu_to_le32(1<<6)
   5.184 +#define AAC_OPT_SCSI_UPGRADEABLE	cpu_to_le32(1<<7)
   5.185 +#define AAC_OPT_SOFT_ERR_REPORT		cpu_to_le32(1<<8)
   5.186 +#define AAC_OPT_SUPPORTED_RECONDITION	cpu_to_le32(1<<9)
   5.187 +#define AAC_OPT_SGMAP_HOST64		cpu_to_le32(1<<10)
   5.188 +#define AAC_OPT_ALARM			cpu_to_le32(1<<11)
   5.189 +#define AAC_OPT_NONDASD			cpu_to_le32(1<<12)
   5.190 +#define AAC_OPT_SCSI_MANAGED    	cpu_to_le32(1<<13)
   5.191 +#define AAC_OPT_RAID_SCSI_MODE		cpu_to_le32(1<<14)
   5.192 +#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO	cpu_to_le32(1<<15)
   5.193  
   5.194  struct aac_dev
   5.195  {
   5.196 @@ -765,13 +770,12 @@ struct aac_dev
   5.197  	 */	
   5.198  	dma_addr_t		hw_fib_pa;
   5.199  	struct hw_fib		*hw_fib_va;
   5.200 -#if BITS_PER_LONG >= 64
   5.201  	ulong			fib_base_va;
   5.202 -#endif
   5.203  	/*
   5.204  	 *	Fib Headers
   5.205  	 */
   5.206 -	struct fib		fibs[AAC_NUM_FIB];
   5.207 +	struct fib              *fibs;
   5.208 +
   5.209  	struct fib		*free_fib;
   5.210  	struct fib		*timeout_fib;
   5.211  	spinlock_t		fib_lock;
   5.212 @@ -800,7 +804,9 @@ struct aac_dev
   5.213  
   5.214  	struct Scsi_Host	*scsi_host_ptr;
   5.215  	struct fsa_scsi_hba	fsa_dev;
   5.216 -	int			thread_pid;
   5.217 +#if 0
   5.218 +	pid_t			thread_pid;
   5.219 +#endif
   5.220  	int			cardtype;
   5.221  	
   5.222  	/*
   5.223 @@ -825,8 +831,15 @@ struct aac_dev
   5.224  	 */
   5.225  	u8			nondasd_support; 
   5.226  	u8			pae_support;
   5.227 +	u8			raid_scsi_mode;
   5.228  };
   5.229  
   5.230 +#define AllocateAndMapFibSpace(dev, MapFibContext) \
   5.231 +	dev->a_ops.AllocateAndMapFibSpace(dev, MapFibContext)
   5.232 +
   5.233 +#define UnmapAndFreeFibSpace(dev, MapFibContext) \
   5.234 +	dev->a_ops.UnmapAndFreeFibSpace(dev, MapFibContext)
   5.235 +
   5.236  #define aac_adapter_interrupt(dev) \
   5.237  	dev->a_ops.adapter_interrupt(dev)
   5.238  
   5.239 @@ -1163,7 +1176,9 @@ struct aac_mntent {
   5.240  	u32			altoid;		// != oid <==> snapshot or broken mirror exists
   5.241  };
   5.242  
   5.243 -#define FSCS_READONLY	0x0002	/*	possible result of broken mirror */
   5.244 +#define FSCS_NOTCLEAN	0x0001  	/* fsck is neccessary before mounting */
   5.245 +#define FSCS_READONLY	0x0002		/* possible result of broken mirror */
   5.246 +#define FSCS_HIDDEN	0x0004		/* should be ignored - set during a clear */
   5.247  
   5.248  struct aac_query_mount {
   5.249  	u32		command;
   5.250 @@ -1347,9 +1362,12 @@ extern struct aac_common aac_config;
   5.251   */
   5.252   
   5.253  #define 	AifCmdEventNotify	1	/* Notify of event */
   5.254 +#define		AifEnContainerChange	4	/* Container configuration change */
   5.255  #define		AifCmdJobProgress	2	/* Progress report */
   5.256  #define		AifCmdAPIReport		3	/* Report from other user of API */
   5.257  #define		AifCmdDriverNotify	4	/* Notify host driver of event */
   5.258 +#define		AifDenMorphComplete	200	/* A morph operation completed */
   5.259 +#define		AifDenVolumeExtendComplete 201  /* A volume expand operation completed */
   5.260  #define		AifReqJobList		100	/* Gets back complete job list */
   5.261  #define		AifReqJobsForCtr	101	/* Gets back jobs for specific container */
   5.262  #define		AifReqJobsForScsi	102	/* Gets back jobs for specific SCSI device */ 
   5.263 @@ -1374,16 +1392,6 @@ struct aac_aifcmd {
   5.264  	u8 data[1];		/* Undefined length (from kernel viewpoint) */
   5.265  };
   5.266  
   5.267 -static inline u32 fib2addr(struct hw_fib *hw)
   5.268 -{
   5.269 -	return (u32)hw;
   5.270 -}
   5.271 -
   5.272 -static inline struct hw_fib *addr2fib(u32 addr)
   5.273 -{
   5.274 -	return (struct hw_fib *)addr;
   5.275 -}
   5.276 -
   5.277  const char *aac_driverinfo(struct Scsi_Host *);
   5.278  struct fib *fib_alloc(struct aac_dev *dev);
   5.279  int fib_setup(struct aac_dev *dev);
   5.280 @@ -1397,7 +1405,7 @@ int aac_consumer_get(struct aac_dev * de
   5.281  int aac_consumer_avail(struct aac_dev * dev, struct aac_queue * q);
   5.282  void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
   5.283  int fib_complete(struct fib * context);
   5.284 -#define fib_data(fibctx) ((void *)(fibctx)->fib->data)
   5.285 +#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
   5.286  int aac_detach(struct aac_dev *dev);
   5.287  struct aac_dev *aac_init_adapter(struct aac_dev *dev);
   5.288  int aac_get_containers(struct aac_dev *dev);
     6.1 --- a/xen/drivers/scsi/aacraid/commctrl.c	Wed Aug 06 20:53:30 2003 +0000
     6.2 +++ b/xen/drivers/scsi/aacraid/commctrl.c	Thu Aug 07 12:24:12 2003 +0000
     6.3 @@ -28,17 +28,17 @@
     6.4   *
     6.5   */
     6.6  
     6.7 -#include <xeno/config.h>
     6.8 -/*  #include <xeno/kernel.h> */
     6.9 -#include <xeno/init.h>
    6.10 -#include <xeno/types.h>
    6.11 -#include <xeno/sched.h>
    6.12 -#include <xeno/pci.h>
    6.13 -/*  #include <xeno/spinlock.h> */
    6.14 -/*  #include <xeno/slab.h> */
    6.15 -/*  #include <xeno/completion.h> */
    6.16 -#include <xeno/blk.h>
    6.17 -/*  #include <asm/semaphore.h> */
    6.18 +#include <linux/config.h>
    6.19 +#include <linux/kernel.h>
    6.20 +#include <linux/init.h>
    6.21 +#include <linux/types.h>
    6.22 +#include <linux/sched.h>
    6.23 +#include <linux/pci.h>
    6.24 +#include <linux/spinlock.h>
    6.25 +#include <linux/slab.h>
    6.26 +/*#include <linux/completion.h>*/
    6.27 +#include <linux/blk.h>
    6.28 +/*#include <asm/semaphore.h>*/
    6.29  #include <asm/uaccess.h>
    6.30  #include "scsi.h"
    6.31  #include "hosts.h"
    6.32 @@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev
    6.33  	if(fibptr == NULL)
    6.34  		return -ENOMEM;
    6.35  		
    6.36 -	kfib = fibptr->fib;
    6.37 +	kfib = fibptr->hw_fib;
    6.38  	/*
    6.39  	 *	First copy in the header so that we can check the size field.
    6.40  	 */
    6.41 @@ -152,7 +152,7 @@ static int open_getadapter_fib(struct aa
    6.42  		 *	the list to 0.
    6.43  		 */
    6.44  		fibctx->count = 0;
    6.45 -		INIT_LIST_HEAD(&fibctx->fibs);
    6.46 +		INIT_LIST_HEAD(&fibctx->fib_list);
    6.47  		fibctx->jiffies = jiffies/HZ;
    6.48  		/*
    6.49  		 *	Now add this context onto the adapter's 
    6.50 @@ -183,7 +183,7 @@ static int next_getadapter_fib(struct aa
    6.51  {
    6.52  	struct fib_ioctl f;
    6.53  	struct aac_fib_context *fibctx, *aifcp;
    6.54 -	struct hw_fib * fib;
    6.55 +	struct fib * fib;
    6.56  	int status;
    6.57  	struct list_head * entry;
    6.58  	int found;
    6.59 @@ -213,12 +213,16 @@ static int next_getadapter_fib(struct aa
    6.60  		}
    6.61  		entry = entry->next;
    6.62  	}
    6.63 -	if (found == 0)
    6.64 +	if (found == 0) {
    6.65 +		dprintk ((KERN_INFO "Fib not found\n"));
    6.66  		return -EINVAL;
    6.67 +	}
    6.68  
    6.69  	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
    6.70 -		 (fibctx->size != sizeof(struct aac_fib_context)))
    6.71 +		 (fibctx->size != sizeof(struct aac_fib_context))) {
    6.72 +		dprintk ((KERN_INFO "Fib Context corrupt?\n"));
    6.73  		return -EINVAL;
    6.74 +	}
    6.75  	status = 0;
    6.76  	spin_lock_irqsave(&dev->fib_lock, flags);
    6.77  	/*
    6.78 @@ -226,27 +230,28 @@ static int next_getadapter_fib(struct aa
    6.79  	 *	-EAGAIN
    6.80  	 */
    6.81  return_fib:
    6.82 -	if (!list_empty(&fibctx->fibs)) {
    6.83 +	if (!list_empty(&fibctx->fib_list)) {
    6.84  		struct list_head * entry;
    6.85  		/*
    6.86  		 *	Pull the next fib from the fibs
    6.87  		 */
    6.88 -		entry = fibctx->fibs.next;
    6.89 +		entry = fibctx->fib_list.next;
    6.90  		list_del(entry);
    6.91  		
    6.92 -		fib = list_entry(entry, struct hw_fib, header.FibLinks);
    6.93 +		fib = list_entry(entry, struct fib, fiblink);
    6.94  		fibctx->count--;
    6.95  		spin_unlock_irqrestore(&dev->fib_lock, flags);
    6.96 -		if (copy_to_user(f.fib, fib, sizeof(struct hw_fib))) {
    6.97 +		if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
    6.98 +			kfree(fib->hw_fib);
    6.99  			kfree(fib);
   6.100  			return -EFAULT;
   6.101  		}	
   6.102  		/*
   6.103  		 *	Free the space occupied by this copy of the fib.
   6.104  		 */
   6.105 +		kfree(fib->hw_fib);
   6.106  		kfree(fib);
   6.107  		status = 0;
   6.108 -		fibctx->jiffies = jiffies/HZ;
   6.109  	} else {
   6.110  		spin_unlock_irqrestore(&dev->fib_lock, flags);
   6.111  		if (f.wait) {
   6.112 @@ -255,7 +260,7 @@ return_fib:
   6.113  				status = -EINTR;
   6.114  			} else {
   6.115  #else
   6.116 -			    {
   6.117 +			{
   6.118  #endif
   6.119  				/* Lock again and retry */
   6.120  				spin_lock_irqsave(&dev->fib_lock, flags);
   6.121 @@ -265,28 +270,30 @@ return_fib:
   6.122  			status = -EAGAIN;
   6.123  		}	
   6.124  	}
   6.125 +	fibctx->jiffies = jiffies/HZ;
   6.126  	return status;
   6.127  }
   6.128  
   6.129  int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
   6.130  {
   6.131 -	struct hw_fib *fib;
   6.132 +	struct fib *fib;
   6.133  
   6.134  	/*
   6.135  	 *	First free any FIBs that have not been consumed.
   6.136  	 */
   6.137 -	while (!list_empty(&fibctx->fibs)) {
   6.138 +	while (!list_empty(&fibctx->fib_list)) {
   6.139  		struct list_head * entry;
   6.140  		/*
   6.141  		 *	Pull the next fib from the fibs
   6.142  		 */
   6.143 -		entry = fibctx->fibs.next;
   6.144 +		entry = fibctx->fib_list.next;
   6.145  		list_del(entry);
   6.146 -		fib = list_entry(entry, struct hw_fib, header.FibLinks);
   6.147 +		fib = list_entry(entry, struct fib, fiblink);
   6.148  		fibctx->count--;
   6.149  		/*
   6.150  		 *	Free the space occupied by this copy of the fib.
   6.151  		 */
   6.152 +		kfree(fib->hw_fib);
   6.153  		kfree(fib);
   6.154  	}
   6.155  	/*
     7.1 --- a/xen/drivers/scsi/aacraid/comminit.c	Wed Aug 06 20:53:30 2003 +0000
     7.2 +++ b/xen/drivers/scsi/aacraid/comminit.c	Thu Aug 07 12:24:12 2003 +0000
     7.3 @@ -29,17 +29,18 @@
     7.4   *
     7.5   */
     7.6  
     7.7 -#include <xeno/config.h>
     7.8 -/* #include <xeno/kernel.h> */
     7.9 -#include <xeno/init.h>
    7.10 -#include <xeno/types.h>
    7.11 -#include <xeno/sched.h>
    7.12 -#include <xeno/pci.h>
    7.13 -#include <xeno/spinlock.h>
    7.14 -/* #include <xeno/slab.h> */
    7.15 -#include <xeno/blk.h>
    7.16 -/* #include <xeno/completion.h> */
    7.17 -/* #include <asm/semaphore.h> */
    7.18 +#include <linux/config.h>
    7.19 +#include <linux/kernel.h>
    7.20 +#include <linux/init.h>
    7.21 +#include <linux/types.h>
    7.22 +#include <linux/sched.h>
    7.23 +#include <linux/pci.h>
    7.24 +#include <linux/spinlock.h>
    7.25 +#include <linux/slab.h>
    7.26 +#include <linux/blk.h>
    7.27 +/*#include <linux/completion.h>*/
    7.28 +#include <linux/mm.h>
    7.29 +/*#include <asm/semaphore.h>*/
    7.30  #include "scsi.h"
    7.31  #include "hosts.h"
    7.32  
    7.33 @@ -58,7 +59,6 @@ static int aac_alloc_comm(struct aac_dev
    7.34  	struct aac_init *init;
    7.35  	dma_addr_t phys;
    7.36  
    7.37 -	/* FIXME: Adaptec add 128 bytes to this value - WHY ?? */
    7.38  	size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
    7.39  
    7.40  	base = pci_alloc_consistent(dev->pdev, size, &phys);
    7.41 @@ -74,14 +74,6 @@ static int aac_alloc_comm(struct aac_dev
    7.42  	dev->init = (struct aac_init *)(base + fibsize);
    7.43  	dev->init_pa = phys + fibsize;
    7.44  
    7.45 -	/*
    7.46 -	 *	Cache the upper bits of the virtual mapping for 64bit boxes
    7.47 -	 *	FIXME: this crap should be rewritten
    7.48 -	 */
    7.49 -#if BITS_PER_LONG >= 64 
    7.50 -	dev->fib_base_va = ((ulong)base & 0xffffffff00000000);
    7.51 -#endif
    7.52 -
    7.53  	init = dev->init;
    7.54  
    7.55  	init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
    7.56 @@ -92,16 +84,20 @@ static int aac_alloc_comm(struct aac_dev
    7.57  	 *	Adapter Fibs are the first thing allocated so that they
    7.58  	 *	start page aligned
    7.59  	 */
    7.60 -	init->AdapterFibsVirtualAddress = cpu_to_le32((u32)base);
    7.61 -	init->AdapterFibsPhysicalAddress = cpu_to_le32(phys);
    7.62 +	dev->fib_base_va = (ulong)base;
    7.63 +
    7.64 +	/* We submit the physical address for AIF tags to limit to 32 bits */
    7.65 +	init->AdapterFibsVirtualAddress = cpu_to_le32((u32)phys);
    7.66 +	init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
    7.67  	init->AdapterFibsSize = cpu_to_le32(fibsize);
    7.68  	init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
    7.69 +	init->HostPhysMemPages = cpu_to_le32(4096);		// number of 4k pages of host physical memory
    7.70  
    7.71  	/*
    7.72  	 * Increment the base address by the amount already used
    7.73  	 */
    7.74  	base = base + fibsize + sizeof(struct aac_init);
    7.75 -	phys = phys + fibsize + sizeof(struct aac_init);
    7.76 +	phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
    7.77  	/*
    7.78  	 *	Align the beginning of Headers to commalign
    7.79  	 */
    7.80 @@ -111,8 +107,8 @@ static int aac_alloc_comm(struct aac_dev
    7.81  	/*
    7.82  	 *	Fill in addresses of the Comm Area Headers and Queues
    7.83  	 */
    7.84 -	*commaddr = (unsigned long *)base;
    7.85 -	init->CommHeaderAddress = cpu_to_le32(phys);
    7.86 +	*commaddr = base;
    7.87 +	init->CommHeaderAddress = cpu_to_le32((u32)phys);
    7.88  	/*
    7.89  	 *	Increment the base address by the size of the CommArea
    7.90  	 */
    7.91 @@ -144,8 +140,8 @@ static void aac_queue_init(struct aac_de
    7.92  	q->lock = &q->lockdata;
    7.93  	q->headers.producer = mem;
    7.94  	q->headers.consumer = mem+1;
    7.95 -	*q->headers.producer = cpu_to_le32(qsize);
    7.96 -	*q->headers.consumer = cpu_to_le32(qsize);
    7.97 +	*(q->headers.producer) = cpu_to_le32(qsize);
    7.98 +	*(q->headers.consumer) = cpu_to_le32(qsize);
    7.99  	q->entries = qsize;
   7.100  }
   7.101  
   7.102 @@ -250,9 +246,9 @@ int aac_comm_init(struct aac_dev * dev)
   7.103  	if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
   7.104  		return -ENOMEM;
   7.105  
   7.106 -	queues = (struct aac_entry *)((unsigned char *)headers + hdrsize);
   7.107 +	queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
   7.108  
   7.109 -	/* Adapter to Host normal proirity Command queue */ 
   7.110 +	/* Adapter to Host normal priority Command queue */ 
   7.111  	comm->queue[HostNormCmdQueue].base = queues;
   7.112  	aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
   7.113  	queues += HOST_NORM_CMD_ENTRIES;
   7.114 @@ -317,23 +313,25 @@ struct aac_dev *aac_init_adapter(struct 
   7.115  	/*
   7.116  	 *	Ok now init the communication subsystem
   7.117  	 */
   7.118 -	dev->queues = (struct aac_queue_block *) 
   7.119 -	    kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
   7.120 +	dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
   7.121  	if (dev->queues == NULL) {
   7.122  		printk(KERN_ERR "Error could not allocate comm region.\n");
   7.123  		return NULL;
   7.124  	}
   7.125  	memset(dev->queues, 0, sizeof(struct aac_queue_block));
   7.126  
   7.127 -	if (aac_comm_init(dev)<0)
   7.128 +	if (aac_comm_init(dev)<0){
   7.129 +		kfree(dev->queues);
   7.130  		return NULL;
   7.131 -
   7.132 +	}
   7.133  	/*
   7.134  	 *	Initialize the list of fibs
   7.135  	 */
   7.136 -	if(fib_setup(dev)<0)
   7.137 -	    return NULL;
   7.138 -		
   7.139 +	if(fib_setup(dev)<0){
   7.140 +		kfree(dev->queues);
   7.141 +		return NULL;
   7.142 +	}
   7.143 +
   7.144  	INIT_LIST_HEAD(&dev->fib_list);
   7.145  #if 0
   7.146  	init_completion(&dev->aif_completion);
     8.1 --- a/xen/drivers/scsi/aacraid/commsup.c	Wed Aug 06 20:53:30 2003 +0000
     8.2 +++ b/xen/drivers/scsi/aacraid/commsup.c	Thu Aug 07 12:24:12 2003 +0000
     8.3 @@ -31,21 +31,22 @@
     8.4   *
     8.5   */
     8.6  
     8.7 -#include <xeno/config.h>
     8.8 -/* #include <xeno/kernel.h> */
     8.9 -#include <xeno/init.h>
    8.10 -#include <xeno/types.h>
    8.11 -#include <xeno/sched.h>
    8.12 -#include <xeno/pci.h>
    8.13 -#include <xeno/spinlock.h>
    8.14 +#include <linux/config.h>
    8.15 +#include <linux/kernel.h>
    8.16 +#include <linux/init.h>
    8.17 +#include <linux/types.h>
    8.18 +#include <linux/sched.h>
    8.19 +#include <linux/pci.h>
    8.20 +#include <linux/spinlock.h>
    8.21 +#include <linux/slab.h>
    8.22 +/*#include <linux/completion.h>*/
    8.23 +/*#include <asm/semaphore.h>*/
    8.24 +#include <linux/blk.h>
    8.25 +#include <asm/uaccess.h>
    8.26  
    8.27 -#include <xeno/interrupt.h> /* tasklet stuff */
    8.28 +#include <xeno/interrupt.h>
    8.29 +#include <xeno/delay.h>
    8.30  
    8.31 -/*  #include <xeno/slab.h> */
    8.32 -/*  #include <xeno/completion.h> */
    8.33 -/*  #include <asm/semaphore.h> */
    8.34 -#include <xeno/blk.h>
    8.35 -#include <xeno/delay.h>
    8.36  #include "scsi.h"
    8.37  #include "hosts.h"
    8.38  
    8.39 @@ -61,11 +62,9 @@
    8.40   
    8.41  static int fib_map_alloc(struct aac_dev *dev)
    8.42  {
    8.43 -    if((dev->hw_fib_va = 
    8.44 -	pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, 
    8.45 -			     &dev->hw_fib_pa))==NULL)
    8.46 -	return -ENOMEM;
    8.47 -    return 0;
    8.48 +	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, &dev->hw_fib_pa))==NULL)
    8.49 +		return -ENOMEM;
    8.50 +	return 0;
    8.51  }
    8.52  
    8.53  /**
    8.54 @@ -78,8 +77,7 @@ static int fib_map_alloc(struct aac_dev 
    8.55  
    8.56  void fib_map_free(struct aac_dev *dev)
    8.57  {
    8.58 -    pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, 
    8.59 -			dev->hw_fib_va, dev->hw_fib_pa);
    8.60 +	pci_free_consistent(dev->pdev, sizeof(struct hw_fib) * AAC_NUM_FIB, dev->hw_fib_va, dev->hw_fib_pa);
    8.61  }
    8.62  
    8.63  /**
    8.64 @@ -92,45 +90,45 @@ void fib_map_free(struct aac_dev *dev)
    8.65  
    8.66  int fib_setup(struct aac_dev * dev)
    8.67  {
    8.68 -    struct fib *fibptr;
    8.69 -    struct hw_fib *fib;
    8.70 -    dma_addr_t fibpa;
    8.71 -    int i;
    8.72 -    
    8.73 -    if(fib_map_alloc(dev)<0)
    8.74 -	return -ENOMEM;
    8.75 -    
    8.76 -    fib = dev->hw_fib_va;
    8.77 -    fibpa = dev->hw_fib_pa;
    8.78 -    memset(fib, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
    8.79 -    /*
    8.80 -     *	Initialise the fibs
    8.81 -     */
    8.82 -    for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) 
    8.83 -    {
    8.84 -	fibptr->dev = dev;
    8.85 -	fibptr->fib = fib;
    8.86 -	fibptr->data = (void *) fibptr->fib->data;
    8.87 -	fibptr->next = fibptr+1;	/* Forward chain the fibs */
    8.88 +	struct fib *fibptr;
    8.89 +	struct hw_fib *hw_fib_va;
    8.90 +	dma_addr_t hw_fib_pa;
    8.91 +	int i;
    8.92 +	
    8.93 +	if(fib_map_alloc(dev)<0)
    8.94 +		return -ENOMEM;
    8.95 +		
    8.96 +	hw_fib_va = dev->hw_fib_va;
    8.97 +	hw_fib_pa = dev->hw_fib_pa;
    8.98 +	memset(hw_fib_va, 0, sizeof(struct hw_fib) * AAC_NUM_FIB);
    8.99 +	/*
   8.100 +	 *	Initialise the fibs
   8.101 +	 */
   8.102 +	for (i = 0, fibptr = &dev->fibs[i]; i < AAC_NUM_FIB; i++, fibptr++) 
   8.103 +	{
   8.104 +		fibptr->dev = dev;
   8.105 +		fibptr->hw_fib = hw_fib_va;
   8.106 +		fibptr->data = (void *) fibptr->hw_fib->data;
   8.107 +		fibptr->next = fibptr+1;	/* Forward chain the fibs */
   8.108  #if 0
   8.109 -	init_MUTEX_LOCKED(&fibptr->event_wait);
   8.110 +		init_MUTEX_LOCKED(&fibptr->event_wait);
   8.111  #endif
   8.112 -	spin_lock_init(&fibptr->event_lock);
   8.113 -	fib->header.XferState = cpu_to_le32(0xffffffff);
   8.114 -	fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
   8.115 -	fibptr->logicaladdr = (unsigned long) fibpa;
   8.116 -	fib = (struct hw_fib *)((unsigned char *)fib + sizeof(struct hw_fib));
   8.117 -	fibpa = fibpa + sizeof(struct hw_fib);
   8.118 -    }
   8.119 -    /*
   8.120 -     *	Add the fib chain to the free list
   8.121 -     */
   8.122 -    dev->fibs[AAC_NUM_FIB-1].next = NULL;
   8.123 -    /*
   8.124 -     *	Enable this to debug out of queue space
   8.125 -     */
   8.126 -    dev->free_fib = &dev->fibs[0];
   8.127 -    return 0;
   8.128 +		spin_lock_init(&fibptr->event_lock);
   8.129 +		hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
   8.130 +		hw_fib_va->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
   8.131 +		fibptr->hw_fib_pa = hw_fib_pa;
   8.132 +		hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + sizeof(struct hw_fib));
   8.133 +		hw_fib_pa = hw_fib_pa + sizeof(struct hw_fib); 
   8.134 +	}
   8.135 +	/*
   8.136 +	 *	Add the fib chain to the free list
   8.137 +	 */
   8.138 +	dev->fibs[AAC_NUM_FIB-1].next = NULL;
   8.139 +	/*
   8.140 +	 *	Enable this to debug out of queue space
   8.141 +	 */
   8.142 +	dev->free_fib = &dev->fibs[0];
   8.143 +	return 0;
   8.144  }
   8.145  
   8.146  /**
   8.147 @@ -143,29 +141,29 @@ int fib_setup(struct aac_dev * dev)
   8.148   
   8.149  struct fib * fib_alloc(struct aac_dev *dev)
   8.150  {
   8.151 -    struct fib * fibptr;
   8.152 -    unsigned long flags;
   8.153 -    
   8.154 -    spin_lock_irqsave(&dev->fib_lock, flags);
   8.155 -    fibptr = dev->free_fib;	
   8.156 -    if(!fibptr)
   8.157 -	BUG();
   8.158 -    dev->free_fib = fibptr->next;
   8.159 -    spin_unlock_irqrestore(&dev->fib_lock, flags);
   8.160 -    /*
   8.161 -     *	Set the proper node type code and node byte size
   8.162 -     */
   8.163 -    fibptr->type = FSAFS_NTC_FIB_CONTEXT;
   8.164 -    fibptr->size = sizeof(struct fib);
   8.165 -    /*
   8.166 -     *	Null out fields that depend on being zero at the start of
   8.167 -     *	each I/O
   8.168 -     */
   8.169 -    fibptr->fib->header.XferState = cpu_to_le32(0);
   8.170 -    fibptr->callback = NULL;
   8.171 -    fibptr->callback_data = NULL;
   8.172 -    
   8.173 -    return fibptr;
   8.174 +	struct fib * fibptr;
   8.175 +	unsigned long flags;
   8.176 +	
   8.177 +	spin_lock_irqsave(&dev->fib_lock, flags);
   8.178 +	fibptr = dev->free_fib;	
   8.179 +	if(!fibptr)
   8.180 +		BUG();
   8.181 +	dev->free_fib = fibptr->next;
   8.182 +	spin_unlock_irqrestore(&dev->fib_lock, flags);
   8.183 +	/*
   8.184 +	 *	Set the proper node type code and node byte size
   8.185 +	 */
   8.186 +	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
   8.187 +	fibptr->size = sizeof(struct fib);
   8.188 +	/*
   8.189 +	 *	Null out fields that depend on being zero at the start of
   8.190 +	 *	each I/O
   8.191 +	 */
   8.192 +	fibptr->hw_fib->header.XferState = cpu_to_le32(0);
   8.193 +	fibptr->callback = NULL;
   8.194 +	fibptr->callback_data = NULL;
   8.195 +
   8.196 +	return fibptr;
   8.197  }
   8.198  
   8.199  /**
   8.200 @@ -178,24 +176,23 @@ struct fib * fib_alloc(struct aac_dev *d
   8.201   
   8.202  void fib_free(struct fib * fibptr)
   8.203  {
   8.204 -    unsigned long flags;
   8.205 -    
   8.206 -    spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
   8.207 -    
   8.208 -    if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
   8.209 -	aac_config.fib_timeouts++;
   8.210 -	fibptr->next = fibptr->dev->timeout_fib;
   8.211 -	fibptr->dev->timeout_fib = fibptr;
   8.212 -    } else {
   8.213 -	if (fibptr->fib->header.XferState != 0) {
   8.214 -	    printk(KERN_WARNING "fib_free, XferState != 0, "
   8.215 -		   "fibptr = 0x%p, XferState = 0x%x\n", 
   8.216 -		   (void *)fibptr, fibptr->fib->header.XferState);
   8.217 -	}
   8.218 -	fibptr->next = fibptr->dev->free_fib;
   8.219 -	fibptr->dev->free_fib = fibptr;
   8.220 -    }	
   8.221 -    spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
   8.222 +	unsigned long flags;
   8.223 +
   8.224 +	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
   8.225 +
   8.226 +	if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
   8.227 +		aac_config.fib_timeouts++;
   8.228 +		fibptr->next = fibptr->dev->timeout_fib;
   8.229 +		fibptr->dev->timeout_fib = fibptr;
   8.230 +	} else {
   8.231 +		if (fibptr->hw_fib->header.XferState != 0) {
   8.232 +			printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n", 
   8.233 +				 (void*)fibptr, fibptr->hw_fib->header.XferState);
   8.234 +		}
   8.235 +		fibptr->next = fibptr->dev->free_fib;
   8.236 +		fibptr->dev->free_fib = fibptr;
   8.237 +	}	
   8.238 +	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
   8.239  }
   8.240  
   8.241  /**
   8.242 @@ -207,15 +204,14 @@ void fib_free(struct fib * fibptr)
   8.243   
   8.244  void fib_init(struct fib *fibptr)
   8.245  {
   8.246 -    struct hw_fib *fib = fibptr->fib;
   8.247 -    
   8.248 -    fib->header.StructType = FIB_MAGIC;
   8.249 -    fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
   8.250 -    fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | 
   8.251 -					FibEmpty | FastResponseCapable);
   8.252 -    fib->header.SenderFibAddress = cpu_to_le32(0);
   8.253 -    fib->header.ReceiverFibAddress = cpu_to_le32(0);
   8.254 -    fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
   8.255 +	struct hw_fib *hw_fib = fibptr->hw_fib;
   8.256 +
   8.257 +	hw_fib->header.StructType = FIB_MAGIC;
   8.258 +	hw_fib->header.Size = cpu_to_le16(sizeof(struct hw_fib));
   8.259 +	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
   8.260 +	hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
   8.261 +	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
   8.262 +	hw_fib->header.SenderSize = cpu_to_le16(sizeof(struct hw_fib));
   8.263  }
   8.264  
   8.265  /**
   8.266 @@ -228,10 +224,10 @@ void fib_init(struct fib *fibptr)
   8.267   
   8.268  void fib_dealloc(struct fib * fibptr)
   8.269  {
   8.270 -    struct hw_fib *fib = fibptr->fib;
   8.271 -    if(fib->header.StructType != FIB_MAGIC) 
   8.272 -	BUG();
   8.273 -    fib->header.XferState = cpu_to_le32(0);        
   8.274 +	struct hw_fib *hw_fib = fibptr->hw_fib;
   8.275 +	if(hw_fib->header.StructType != FIB_MAGIC) 
   8.276 +		BUG();
   8.277 +	hw_fib->header.XferState = cpu_to_le32(0);        
   8.278  }
   8.279  
   8.280  /*
   8.281 @@ -256,48 +252,47 @@ void fib_dealloc(struct fib * fibptr)
   8.282   
   8.283  static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
   8.284  {
   8.285 -    struct aac_queue * q;
   8.286 -
   8.287 -    /*
   8.288 -     *	All of the queues wrap when they reach the end, so we check
   8.289 -     *	to see if they have reached the end and if they have we just
   8.290 -     *	set the index back to zero. This is a wrap. You could or off
   8.291 -     *	the high bits in all updates but this is a bit faster I think.
   8.292 -     */
   8.293 -
   8.294 -    q = &dev->queues->queue[qid];
   8.295 -	
   8.296 -    *index = le32_to_cpu(*(q->headers.producer));
   8.297 -    if (*index - 2 == le32_to_cpu(*(q->headers.consumer)))
   8.298 -	*nonotify = 1; 
   8.299 +	struct aac_queue * q;
   8.300  
   8.301 -    if (qid == AdapHighCmdQueue) {
   8.302 -	if (*index >= ADAP_HIGH_CMD_ENTRIES)
   8.303 -	    *index = 0;
   8.304 -    } else if (qid == AdapNormCmdQueue) {
   8.305 -	if (*index >= ADAP_NORM_CMD_ENTRIES) 
   8.306 -	    *index = 0; /* Wrap to front of the Producer Queue. */
   8.307 -    }
   8.308 -    else if (qid == AdapHighRespQueue) 
   8.309 -    {
   8.310 -	if (*index >= ADAP_HIGH_RESP_ENTRIES)
   8.311 -	    *index = 0;
   8.312 -    }
   8.313 -    else if (qid == AdapNormRespQueue) 
   8.314 -    {
   8.315 -	if (*index >= ADAP_NORM_RESP_ENTRIES) 
   8.316 -	    *index = 0; /* Wrap to front of the Producer Queue. */
   8.317 -    }
   8.318 -    else BUG();
   8.319 +	/*
   8.320 +	 *	All of the queues wrap when they reach the end, so we check
   8.321 +	 *	to see if they have reached the end and if they have we just
   8.322 +	 *	set the index back to zero. This is a wrap. You could or off
   8.323 +	 *	the high bits in all updates but this is a bit faster I think.
   8.324 +	 */
   8.325  
   8.326 -    if (*index + 1 == le32_to_cpu(*(q->headers.consumer))) { /* Queue full */
   8.327 -	printk(KERN_WARNING "Queue %d full, %ld outstanding.\n", 
   8.328 -	       qid, q->numpending);
   8.329 -	return 0;
   8.330 -    } else {
   8.331 -	*entry = q->base + *index;
   8.332 -	return 1;
   8.333 -    }
   8.334 +	q = &dev->queues->queue[qid];
   8.335 +	
   8.336 +	*index = le32_to_cpu(*(q->headers.producer));
   8.337 +	if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
   8.338 +			*nonotify = 1; 
   8.339 +
   8.340 +	if (qid == AdapHighCmdQueue) {
   8.341 +	        if (*index >= ADAP_HIGH_CMD_ENTRIES)
   8.342 +        		*index = 0;
   8.343 +	} else if (qid == AdapNormCmdQueue) {
   8.344 +	        if (*index >= ADAP_NORM_CMD_ENTRIES) 
   8.345 +			*index = 0; /* Wrap to front of the Producer Queue. */
   8.346 +	}
   8.347 +	else if (qid == AdapHighRespQueue) 
   8.348 +	{
   8.349 +	        if (*index >= ADAP_HIGH_RESP_ENTRIES)
   8.350 +			*index = 0;
   8.351 +	}
   8.352 +	else if (qid == AdapNormRespQueue) 
   8.353 +	{
   8.354 +		if (*index >= ADAP_NORM_RESP_ENTRIES) 
   8.355 +			*index = 0; /* Wrap to front of the Producer Queue. */
   8.356 +	}
   8.357 +	else BUG();
   8.358 +
   8.359 +        if (*index + 1 == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
   8.360 +		printk(KERN_WARNING "Queue %d full, %ld outstanding.\n", qid, q->numpending);
   8.361 +		return 0;
   8.362 +	} else {
   8.363 +	        *entry = q->base + *index;
   8.364 +		return 1;
   8.365 +	}
   8.366  }   
   8.367  
   8.368  /**
   8.369 @@ -316,48 +311,49 @@ static int aac_get_entry (struct aac_dev
   8.370   *	success.
   8.371   */
   8.372  
   8.373 -static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * fib, int wait, struct fib * fibptr, unsigned long *nonotify)
   8.374 +static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
   8.375  {
   8.376 -    struct aac_entry * entry = NULL;
   8.377 -    int map = 0;
   8.378 -    struct aac_queue * q = &dev->queues->queue[qid];
   8.379 +	struct aac_entry * entry = NULL;
   8.380 +	int map = 0;
   8.381 +	struct aac_queue * q = &dev->queues->queue[qid];
   8.382  		
   8.383 -    spin_lock_irqsave(q->lock, q->SavedIrql);
   8.384 +	spin_lock_irqsave(q->lock, q->SavedIrql);
   8.385  	    
   8.386 -    if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 
   8.387 -    {
   8.388 -	/*  if no entries wait for some if caller wants to */
   8.389 -	while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 
   8.390 +	if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 
   8.391  	{
   8.392 -	    printk(KERN_ERR "GetEntries failed\n");
   8.393 +		/*  if no entries wait for some if caller wants to */
   8.394 +        	while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 
   8.395 +        	{
   8.396 +			printk(KERN_ERR "GetEntries failed\n");
   8.397 +		}
   8.398 +	        /*
   8.399 +	         *	Setup queue entry with a command, status and fib mapped
   8.400 +	         */
   8.401 +	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
   8.402 +	        map = 1;
   8.403  	}
   8.404 +	else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
   8.405 +	{
   8.406 +	        while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 
   8.407 +	        {
   8.408 +			/* if no entries wait for some if caller wants to */
   8.409 +		}
   8.410 +        	/*
   8.411 +        	 *	Setup queue entry with command, status and fib mapped
   8.412 +        	 */
   8.413 +        	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
   8.414 +        	entry->addr = hw_fib->header.SenderFibAddress;
   8.415 +     			/* Restore adapters pointer to the FIB */
   8.416 +		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
   8.417 +        	map = 0;
   8.418 +	} 
   8.419  	/*
   8.420 -	 *	Setup queue entry with a command, status and fib mapped
   8.421 +	 *	If MapFib is true than we need to map the Fib and put pointers
   8.422 +	 *	in the queue entry.
   8.423  	 */
   8.424 -	entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
   8.425 -	map = 1;
   8.426 -    }
   8.427 -    else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
   8.428 -    {
   8.429 -	while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 
   8.430 -	{
   8.431 -	    /* if no entries wait for some if caller wants to */
   8.432 -	}
   8.433 -	/*
   8.434 -	 *	Setup queue entry with command, status and fib mapped
   8.435 -	 */
   8.436 -	entry->size = cpu_to_le32(le16_to_cpu(fib->header.Size));
   8.437 -	entry->addr = cpu_to_le32(fib->header.SenderFibAddress);     		/* Restore adapters pointer to the FIB */
   8.438 -	fib->header.ReceiverFibAddress = fib->header.SenderFibAddress;		/* Let the adapter now where to find its data */
   8.439 -	map = 0;
   8.440 -    } 
   8.441 -    /*
   8.442 -     *	If MapFib is true than we need to map the Fib and put pointers
   8.443 -     *	in the queue entry.
   8.444 -     */
   8.445 -    if (map)
   8.446 -	entry->addr = cpu_to_le32((unsigned long)(fibptr->logicaladdr));
   8.447 -    return 0;
   8.448 +	if (map)
   8.449 +		entry->addr = fibptr->hw_fib_pa;
   8.450 +	return 0;
   8.451  }
   8.452  
   8.453  
   8.454 @@ -376,24 +372,24 @@ static int aac_queue_get(struct aac_dev 
   8.455   
   8.456  static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) 
   8.457  {
   8.458 -    struct aac_queue * q = &dev->queues->queue[qid];
   8.459 -
   8.460 -    if(q == NULL)
   8.461 -	BUG();
   8.462 -    *(q->headers.producer) = cpu_to_le32(index + 1);
   8.463 -    spin_unlock_irqrestore(q->lock, q->SavedIrql);
   8.464 +	struct aac_queue * q = &dev->queues->queue[qid];
   8.465  
   8.466 -    if (qid == AdapHighCmdQueue ||
   8.467 -	qid == AdapNormCmdQueue ||
   8.468 -	qid == AdapHighRespQueue ||
   8.469 -	qid == AdapNormRespQueue)
   8.470 -    {
   8.471 -	if (!nonotify)
   8.472 -	    aac_adapter_notify(dev, qid);
   8.473 -    }
   8.474 -    else
   8.475 -	printk("Suprise insert!\n");
   8.476 -    return 0;
   8.477 +	if(q == NULL)
   8.478 +		BUG();
   8.479 +	*(q->headers.producer) = cpu_to_le32(index + 1);
   8.480 +	spin_unlock_irqrestore(q->lock, q->SavedIrql);
   8.481 +
   8.482 +	if (qid == AdapHighCmdQueue ||
   8.483 +	    qid == AdapNormCmdQueue ||
   8.484 +	    qid == AdapHighRespQueue ||
   8.485 +	    qid == AdapNormRespQueue)
   8.486 +	{
   8.487 +		if (!nonotify)
   8.488 +			aac_adapter_notify(dev, qid);
   8.489 +	}
   8.490 +	else
   8.491 +		printk("Suprise insert!\n");
   8.492 +	return 0;
   8.493  }
   8.494  
   8.495  /*
   8.496 @@ -423,149 +419,141 @@ static int aac_insert_entry(struct aac_d
   8.497   
   8.498  int fib_send(u16 command, struct fib * fibptr, unsigned long size,  int priority, int wait, int reply, fib_callback callback, void * callback_data)
   8.499  {
   8.500 -    u32 index;
   8.501 -    u32 qid;
   8.502 -    struct aac_dev * dev = fibptr->dev;
   8.503 -    unsigned long nointr = 0;
   8.504 -    struct hw_fib * fib = fibptr->fib;
   8.505 -    struct aac_queue * q;
   8.506 -    unsigned long flags = 0;
   8.507 +	u32 index;
   8.508 +	u32 qid;
   8.509 +	struct aac_dev * dev = fibptr->dev;
   8.510 +	unsigned long nointr = 0;
   8.511 +	struct hw_fib * hw_fib = fibptr->hw_fib;
   8.512 +	struct aac_queue * q;
   8.513 +	unsigned long flags = 0;
   8.514  
   8.515 -    if (!(le32_to_cpu(fib->header.XferState) & HostOwned))
   8.516 -	return -EBUSY;
   8.517 -    /*
   8.518 -     *	There are 5 cases with the wait and reponse requested flags. 
   8.519 -     *	The only invalid cases are if the caller requests to wait and
   8.520 -     *	does not request a response and if the caller does not want a
   8.521 -     *	response and the Fibis not allocated from pool. If a response
   8.522 -     *	is not requesed the Fib will just be deallocaed by the DPC
   8.523 -     *	routine when the response comes back from the adapter. No
   8.524 -     *	further processing will be done besides deleting the Fib. We 
   8.525 -     *	will have a debug mode where the adapter can notify the host
   8.526 -     *	it had a problem and the host can log that fact.
   8.527 -     */
   8.528 -    if (wait && !reply) {
   8.529 -	return -EINVAL;
   8.530 -    } else if (!wait && reply) {
   8.531 -	fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
   8.532 -	FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
   8.533 -    } else if (!wait && !reply) {
   8.534 -	fib->header.XferState |= cpu_to_le32(NoResponseExpected);
   8.535 -	FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
   8.536 -    } else if (wait && reply) {
   8.537 -	fib->header.XferState |= cpu_to_le32(ResponseExpected);
   8.538 -	FIB_COUNTER_INCREMENT(aac_config.NormalSent);
   8.539 -    } 
   8.540 -    /*
   8.541 -     *	Map the fib into 32bits by using the fib number
   8.542 -     */
   8.543 -    fib->header.SenderData = fibptr-&dev->fibs[0];	/* for callback */
   8.544 -    /*
   8.545 -     *	Set FIB state to indicate where it came from and if we want a
   8.546 -     *	response from the adapter. Also load the command from the
   8.547 -     *	caller.
   8.548 -     *
   8.549 -     *	Map the hw fib pointer as a 32bit value
   8.550 -     */
   8.551 -    fib->header.SenderFibAddress = fib2addr(fib);
   8.552 -    fib->header.Command = cpu_to_le16(command);
   8.553 -    fib->header.XferState |= cpu_to_le32(SentFromHost);
   8.554 -    fibptr->fib->header.Flags = 0; /* Zero flags field - its internal only */
   8.555 -    /*
   8.556 -     *	Set the size of the Fib we want to send to the adapter
   8.557 -     */
   8.558 -    fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
   8.559 -    if (le16_to_cpu(fib->header.Size) > le16_to_cpu(fib->header.SenderSize)) {
   8.560 -	return -EMSGSIZE;
   8.561 -    }                
   8.562 -    /*
   8.563 -     *	Get a queue entry connect the FIB to it and send an notify
   8.564 -     *	the adapter a command is ready.
   8.565 -     */
   8.566 -    if (priority == FsaHigh) {
   8.567 -	fib->header.XferState |= cpu_to_le32(HighPriority);
   8.568 -	qid = AdapHighCmdQueue;
   8.569 -    } else {
   8.570 -	fib->header.XferState |= cpu_to_le32(NormalPriority);
   8.571 -	qid = AdapNormCmdQueue;
   8.572 -    }
   8.573 -    q = &dev->queues->queue[qid];
   8.574 +	if (!(le32_to_cpu(hw_fib->header.XferState) & HostOwned))
   8.575 +		return -EBUSY;
   8.576 +	/*
   8.577 +	 *	There are 5 cases with the wait and reponse requested flags. 
   8.578 +	 *	The only invalid cases are if the caller requests to wait and
   8.579 +	 *	does not request a response and if the caller does not want a
   8.580 +	 *	response and the Fibis not allocated from pool. If a response
   8.581 +	 *	is not requesed the Fib will just be deallocaed by the DPC
   8.582 +	 *	routine when the response comes back from the adapter. No
   8.583 +	 *	further processing will be done besides deleting the Fib. We 
   8.584 +	 *	will have a debug mode where the adapter can notify the host
   8.585 +	 *	it had a problem and the host can log that fact.
   8.586 +	 */
   8.587 +	if (wait && !reply) {
   8.588 +		return -EINVAL;
   8.589 +	} else if (!wait && reply) {
   8.590 +		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
   8.591 +		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
   8.592 +	} else if (!wait && !reply) {
   8.593 +		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
   8.594 +		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
   8.595 +	} else if (wait && reply) {
   8.596 +		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
   8.597 +		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
   8.598 +	} 
   8.599 +	/*
   8.600 +	 *	Map the fib into 32bits by using the fib number
   8.601 +	 */
   8.602  
   8.603 -    if(wait)
   8.604 -	spin_lock_irqsave(&fibptr->event_lock, flags);
   8.605 +//	hw_fib->header.SenderFibAddress = ((u32)(fibptr-dev->fibs)) << 1;
   8.606 +	hw_fib->header.SenderFibAddress = cpu_to_le32((u32)(ulong)fibptr->hw_fib_pa);
   8.607 +	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
   8.608 +	/*
   8.609 +	 *	Set FIB state to indicate where it came from and if we want a
   8.610 +	 *	response from the adapter. Also load the command from the
   8.611 +	 *	caller.
   8.612 +	 *
   8.613 +	 *	Map the hw fib pointer as a 32bit value
   8.614 +	 */
   8.615 +	hw_fib->header.Command = cpu_to_le16(command);
   8.616 +	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
   8.617 +	fibptr->hw_fib->header.Flags = 0;	/* 0 the flags field - internal only*/
   8.618 +	/*
   8.619 +	 *	Set the size of the Fib we want to send to the adapter
   8.620 +	 */
   8.621 +	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
   8.622 +	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
   8.623 +		return -EMSGSIZE;
   8.624 +	}                
   8.625 +	/*
   8.626 +	 *	Get a queue entry connect the FIB to it and send an notify
   8.627 +	 *	the adapter a command is ready.
   8.628 +	 */
   8.629 +	if (priority == FsaHigh) {
   8.630 +		hw_fib->header.XferState |= cpu_to_le32(HighPriority);
   8.631 +		qid = AdapHighCmdQueue;
   8.632 +	} else {
   8.633 +		hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
   8.634 +		qid = AdapNormCmdQueue;
   8.635 +	}
   8.636 +	q = &dev->queues->queue[qid];
   8.637  
   8.638 -    if(aac_queue_get( dev, &index, qid, fib, 1, fibptr, &nointr)<0)
   8.639 -	return -EWOULDBLOCK;
   8.640 -    dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",
   8.641 -	     index));
   8.642 -    dprintk((KERN_DEBUG "Fib contents:.\n"));
   8.643 -    dprintk((KERN_DEBUG "  Command =               %d.\n", 
   8.644 -	     fib->header.Command));
   8.645 -    dprintk((KERN_DEBUG "  XferState  =            %x.\n", 
   8.646 -	     fib->header.XferState));
   8.647 -    /*
   8.648 -     *	Fill in the Callback and CallbackContext if we are not
   8.649 -     *	going to wait.
   8.650 -     */
   8.651 -    if (!wait) {
   8.652 -	fibptr->callback = callback;
   8.653 -	fibptr->callback_data = callback_data;
   8.654 -    }
   8.655 -    FIB_COUNTER_INCREMENT(aac_config.FibsSent);
   8.656 -    list_add_tail(&fibptr->queue, &q->pendingq);
   8.657 -    q->numpending++;
   8.658 +	if(wait)
   8.659 +		spin_lock_irqsave(&fibptr->event_lock, flags);
   8.660 +	if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
   8.661 +		return -EWOULDBLOCK;
   8.662 +	dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
   8.663 +	dprintk((KERN_DEBUG "Fib contents:.\n"));
   8.664 +	dprintk((KERN_DEBUG "  Command =               %d.\n", hw_fib->header.Command));
   8.665 +	dprintk((KERN_DEBUG "  XferState  =            %x.\n", hw_fib->header.XferState));
   8.666 +	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
   8.667 +	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
   8.668 +	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
   8.669 +	/*
   8.670 +	 *	Fill in the Callback and CallbackContext if we are not
   8.671 +	 *	going to wait.
   8.672 +	 */
   8.673 +	if (!wait) {
   8.674 +		fibptr->callback = callback;
   8.675 +		fibptr->callback_data = callback_data;
   8.676 +	}
   8.677 +	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
   8.678 +	list_add_tail(&fibptr->queue, &q->pendingq);
   8.679 +	q->numpending++;
   8.680  
   8.681 -    fibptr->done = 0;
   8.682 +	fibptr->done = 0;
   8.683 +	fibptr->flags = 0;
   8.684  
   8.685 -    if(aac_insert_entry(dev, index, qid, 
   8.686 -			(nointr & aac_config.irq_mod)) < 0)
   8.687 -	return -EWOULDBLOCK;
   8.688 -    /*
   8.689 -     *	If the caller wanted us to wait for response wait now. 
   8.690 -     */
   8.691 +	if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
   8.692 +		return -EWOULDBLOCK;
   8.693 +	/*
   8.694 +	 *	If the caller wanted us to wait for response wait now. 
   8.695 +	 */
   8.696      
   8.697 -    if (wait) {
   8.698 -	spin_unlock_irqrestore(&fibptr->event_lock, flags);
   8.699 +	if (wait) {
   8.700 +		spin_unlock_irqrestore(&fibptr->event_lock, flags);
   8.701  #if 0
   8.702 -	down(&fibptr->event_wait);
   8.703 +		down(&fibptr->event_wait);
   8.704 +		if(fibptr->done == 0)
   8.705 +			BUG();
   8.706  #endif
   8.707  #ifdef TRY_TASKLET
   8.708 -        /*
   8.709 -         * XXX KAF: Well, this is pretty gross. We should probably
   8.710 -         * do_softirq() after scheduling the tasklet, as long as we
   8.711 -         * are _sure_ we hold no locks here...
   8.712 -         */
   8.713 -//	printk("about to softirq aac_command_thread...\n"); 
   8.714  	while (!fibptr->done) { 
   8.715 -            tasklet_schedule(&aac_command_tasklet);
   8.716 -	    do_softirq(); /* force execution */
   8.717 -//	    mdelay(100); 
   8.718 -	}
   8.719 -//	printk("back from softirq cmd thread and fibptr->done!\n"); 
   8.720 -#else 
   8.721 -	printk("about to bail at aac_command_thread...\n"); 
   8.722 -	while (!fibptr->done) { 
   8.723 -	    mdelay(100); 
   8.724 -	    aac_command_thread(dev); 
   8.725 +		tasklet_schedule(&aac_command_tasklet);
   8.726 +		do_softirq(); /* force execution */
   8.727  	}
   8.728 -	printk("back from command thread and fibptr->done!\n"); 
   8.729 +#else 
   8.730 +	while (!fibptr->done) { 
   8.731 +		mdelay(100); 
   8.732 +		aac_command_thread(dev); 
   8.733 +	}
   8.734  #endif
   8.735 -/*  if(fibptr->done == 0) */
   8.736 -/*  			BUG(); */
   8.737 +
   8.738  			
   8.739 -	if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
   8.740 -	    return -ETIMEDOUT;
   8.741 +		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
   8.742 +			return -ETIMEDOUT;
   8.743 +		else
   8.744 +			return 0;
   8.745 +	}
   8.746 +	/*
   8.747 +	 *	If the user does not want a response than return success otherwise
   8.748 +	 *	return pending
   8.749 +	 */
   8.750 +	if (reply)
   8.751 +		return -EINPROGRESS;
   8.752  	else
   8.753 -	    return 0;
   8.754 -    }
   8.755 -    /*
   8.756 -     *	If the user does not want a response than return success otherwise
   8.757 -     *	return pending
   8.758 -     */
   8.759 -    if (reply)
   8.760 -	return -EINPROGRESS;
   8.761 -    else
   8.762 -	return 0;
   8.763 +		return 0;
   8.764  }
   8.765  
   8.766  /** 
   8.767 @@ -574,37 +562,36 @@ int fib_send(u16 command, struct fib * f
   8.768   *	@q: Queue
   8.769   *	@entry: Return entry
   8.770   *
   8.771 - *      Will return a pointer to the entry on the top of the queue
   8.772 - * 	requested that we are a consumer of, and return the address of
   8.773 - * 	the queue entry. It does * not change the state of the queue.
   8.774 + *	Will return a pointer to the entry on the top of the queue requested that
   8.775 + * 	we are a consumer of, and return the address of the queue entry. It does
   8.776 + *	not change the state of the queue. 
   8.777   */
   8.778  
   8.779  int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
   8.780  {
   8.781 -    u32 index;
   8.782 -    int status;
   8.783 -
   8.784 -    if (*q->headers.producer == *q->headers.consumer) {
   8.785 -	status = 0;
   8.786 -    } else {
   8.787 -	/*
   8.788 -	 *	The consumer index must be wrapped if we have reached
   8.789 -	 *	the end of the queue, else we just use the entry
   8.790 -	 *	pointed to by the header index
   8.791 -	 */
   8.792 -	if (le32_to_cpu(*q->headers.consumer) >= q->entries) 
   8.793 -	    index = 0;		
   8.794 -	else
   8.795 -	    index = le32_to_cpu(*q->headers.consumer);
   8.796 -	*entry = q->base + index;
   8.797 -	status = 1;
   8.798 -    }
   8.799 -    return(status);
   8.800 +	u32 index;
   8.801 +	int status;
   8.802 +	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
   8.803 +		status = 0;
   8.804 +	} else {
   8.805 +		/*
   8.806 +		 *	The consumer index must be wrapped if we have reached
   8.807 +		 *	the end of the queue, else we just use the entry
   8.808 +		 *	pointed to by the header index
   8.809 +		 */
   8.810 +		if (le32_to_cpu(*q->headers.consumer) >= q->entries) 
   8.811 +			index = 0;		
   8.812 +		else
   8.813 +		        index = le32_to_cpu(*q->headers.consumer);
   8.814 +		*entry = q->base + index;
   8.815 +		status = 1;
   8.816 +	}
   8.817 +	return(status);
   8.818  }
   8.819  
   8.820  int aac_consumer_avail(struct aac_dev *dev, struct aac_queue * q)
   8.821  {
   8.822 -    return (*q->headers.producer != *q->headers.consumer);
   8.823 +	return (le32_to_cpu(*q->headers.producer) != le32_to_cpu(*q->headers.consumer));
   8.824  }
   8.825  
   8.826  
   8.827 @@ -620,39 +607,38 @@ int aac_consumer_avail(struct aac_dev *d
   8.828  
   8.829  void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
   8.830  {
   8.831 -    int wasfull = 0;
   8.832 -    u32 notify;
   8.833 -
   8.834 -    if (*q->headers.producer+1 == *q->headers.consumer)
   8.835 -	wasfull = 1;
   8.836 -        
   8.837 -    if (le32_to_cpu(*q->headers.consumer) >= q->entries)
   8.838 -	*q->headers.consumer = cpu_to_le32(1);
   8.839 -    else
   8.840 -	*q->headers.consumer = 
   8.841 -	    cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
   8.842 -        
   8.843 -    if (wasfull) {
   8.844 -	switch (qid) {
   8.845 +	int wasfull = 0;
   8.846 +	u32 notify;
   8.847  
   8.848 -	case HostNormCmdQueue:
   8.849 -	    notify = HostNormCmdNotFull;
   8.850 -	    break;
   8.851 -	case HostHighCmdQueue:
   8.852 -	    notify = HostHighCmdNotFull;
   8.853 -	    break;
   8.854 -	case HostNormRespQueue:
   8.855 -	    notify = HostNormRespNotFull;
   8.856 -	    break;
   8.857 -	case HostHighRespQueue:
   8.858 -	    notify = HostHighRespNotFull;
   8.859 -	    break;
   8.860 -	default:
   8.861 -	    BUG();
   8.862 -	    return;
   8.863 +	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
   8.864 +		wasfull = 1;
   8.865 +        
   8.866 +	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
   8.867 +		*q->headers.consumer = cpu_to_le32(1);
   8.868 +	else
   8.869 +		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
   8.870 +        
   8.871 +	if (wasfull) {
   8.872 +		switch (qid) {
   8.873 +
   8.874 +		case HostNormCmdQueue:
   8.875 +			notify = HostNormCmdNotFull;
   8.876 +			break;
   8.877 +		case HostHighCmdQueue:
   8.878 +			notify = HostHighCmdNotFull;
   8.879 +			break;
   8.880 +		case HostNormRespQueue:
   8.881 +			notify = HostNormRespNotFull;
   8.882 +			break;
   8.883 +		case HostHighRespQueue:
   8.884 +			notify = HostHighRespNotFull;
   8.885 +			break;
   8.886 +		default:
   8.887 +			BUG();
   8.888 +			return;
   8.889 +		}
   8.890 +		aac_adapter_notify(dev, notify);
   8.891  	}
   8.892 -	aac_adapter_notify(dev, notify);
   8.893 -    }
   8.894  }        
   8.895  
   8.896  /**
   8.897 @@ -666,70 +652,65 @@ void aac_consumer_free(struct aac_dev * 
   8.898  
   8.899  int fib_adapter_complete(struct fib * fibptr, unsigned short size)
   8.900  {
   8.901 -    struct hw_fib * fib = fibptr->fib;
   8.902 -    struct aac_dev * dev = fibptr->dev;
   8.903 -    unsigned long nointr = 0;
   8.904 +	struct hw_fib * hw_fib = fibptr->hw_fib;
   8.905 +	struct aac_dev * dev = fibptr->dev;
   8.906 +	unsigned long nointr = 0;
   8.907 +	if (le32_to_cpu(hw_fib->header.XferState) == 0)
   8.908 +        	return 0;
   8.909 +	/*
   8.910 +	 *	If we plan to do anything check the structure type first.
   8.911 +	 */ 
   8.912 +	if ( hw_fib->header.StructType != FIB_MAGIC ) {
   8.913 +        	return -EINVAL;
   8.914 +	}
   8.915 +	/*
   8.916 +	 *	This block handles the case where the adapter had sent us a
   8.917 +	 *	command and we have finished processing the command. We
   8.918 +	 *	call completeFib when we are done processing the command 
   8.919 +	 *	and want to send a response back to the adapter. This will 
   8.920 +	 *	send the completed cdb to the adapter.
   8.921 +	 */
   8.922 +	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
   8.923 +	        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
   8.924 +	        if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
   8.925 +        		u32 index;
   8.926 +       			if (size) 
   8.927 +			{
   8.928 +				size += sizeof(struct aac_fibhdr);
   8.929 +				if (size > le16_to_cpu(hw_fib->header.SenderSize))
   8.930 +					return -EMSGSIZE;
   8.931 +				hw_fib->header.Size = cpu_to_le16(size);
   8.932 +			}
   8.933 +			if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
   8.934 +				return -EWOULDBLOCK;
   8.935 +			}
   8.936 +			if (aac_insert_entry(dev, index, AdapHighRespQueue,  (nointr & (int)aac_config.irq_mod)) != 0) {
   8.937 +			}
   8.938 +		}
   8.939 +		else if (hw_fib->header.XferState & NormalPriority) 
   8.940 +		{
   8.941 +			u32 index;
   8.942  
   8.943 -    if (le32_to_cpu(fib->header.XferState) == 0)
   8.944 +			if (size) {
   8.945 +				size += sizeof(struct aac_fibhdr);
   8.946 +				if (size > le16_to_cpu(hw_fib->header.SenderSize)) 
   8.947 +					return -EMSGSIZE;
   8.948 +				hw_fib->header.Size = cpu_to_le16(size);
   8.949 +			}
   8.950 +			if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) 
   8.951 +				return -EWOULDBLOCK;
   8.952 +			if (aac_insert_entry(dev, index, AdapNormRespQueue, 
   8.953 +				(nointr & (int)aac_config.irq_mod)) != 0) 
   8.954 +			{
   8.955 +			}
   8.956 +		}
   8.957 +	}
   8.958 +	else 
   8.959 +	{
   8.960 +        	printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
   8.961 +        	BUG();
   8.962 +	}   
   8.963  	return 0;
   8.964 -    /*
   8.965 -     *	If we plan to do anything check the structure type first.
   8.966 -     */ 
   8.967 -    if ( fib->header.StructType != FIB_MAGIC ) {
   8.968 -	return -EINVAL;
   8.969 -    }
   8.970 -    /*
   8.971 -     *	This block handles the case where the adapter had sent us a
   8.972 -     *	command and we have finished processing the command. We
   8.973 -     *	call completeFib when we are done processing the command 
   8.974 -     *	and want to send a response back to the adapter. This will 
   8.975 -     *	send the completed cdb to the adapter.
   8.976 -     */
   8.977 -    if (fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
   8.978 -	fib->header.XferState |= cpu_to_le32(HostProcessed);
   8.979 -	if (fib->header.XferState & cpu_to_le32(HighPriority)) {
   8.980 -	    u32 index;
   8.981 -	    if (size) 
   8.982 -	    {
   8.983 -		size += sizeof(struct aac_fibhdr);
   8.984 -		if (size > le16_to_cpu(fib->header.SenderSize))
   8.985 -		    return -EMSGSIZE;
   8.986 -		fib->header.Size = cpu_to_le16(size);
   8.987 -	    }
   8.988 -	    if(aac_queue_get(dev, &index, AdapHighRespQueue, 
   8.989 -			     fib, 1, NULL, &nointr) < 0) {
   8.990 -		return -EWOULDBLOCK;
   8.991 -	    }
   8.992 -	    if (aac_insert_entry(dev, index, AdapHighRespQueue,  
   8.993 -				 (nointr & (int)aac_config.irq_mod)) != 0) {
   8.994 -	    }
   8.995 -	}
   8.996 -	else if (fib->header.XferState & NormalPriority) 
   8.997 -	{
   8.998 -	    u32 index;
   8.999 -
  8.1000 -	    if (size) {
  8.1001 -		size += sizeof(struct aac_fibhdr);
  8.1002 -		if (size > le16_to_cpu(fib->header.SenderSize)) 
  8.1003 -		    return -EMSGSIZE;
  8.1004 -		fib->header.Size = cpu_to_le16(size);
  8.1005 -	    }
  8.1006 -	    if (aac_queue_get(dev, &index, AdapNormRespQueue, 
  8.1007 -			      fib, 1, NULL, &nointr) < 0) 
  8.1008 -		return -EWOULDBLOCK;
  8.1009 -	    if (aac_insert_entry(dev, index, AdapNormRespQueue, 
  8.1010 -				 (nointr & (int)aac_config.irq_mod)) != 0) 
  8.1011 -	    {
  8.1012 -	    }
  8.1013 -	}
  8.1014 -    }
  8.1015 -    else 
  8.1016 -    {
  8.1017 -	printk(KERN_WARNING 
  8.1018 -	       "fib_adapter_complete: Unknown xferstate detected.\n");
  8.1019 -	BUG();
  8.1020 -    }   
  8.1021 -    return 0;
  8.1022  }
  8.1023  
  8.1024  /**
  8.1025 @@ -741,44 +722,44 @@ int fib_adapter_complete(struct fib * fi
  8.1026   
  8.1027  int fib_complete(struct fib * fibptr)
  8.1028  {
  8.1029 -    struct hw_fib * fib = fibptr->fib;
  8.1030 -
  8.1031 -    /*
  8.1032 -     *	Check for a fib which has already been completed
  8.1033 -     */
  8.1034 -
  8.1035 -    if (fib->header.XferState == cpu_to_le32(0))
  8.1036 -	return 0;
  8.1037 -    /*
  8.1038 -     *	If we plan to do anything check the structure type first.
  8.1039 -     */ 
  8.1040 +	struct hw_fib * hw_fib = fibptr->hw_fib;
  8.1041  
  8.1042 -    if (fib->header.StructType != FIB_MAGIC)
  8.1043 -	return -EINVAL;
  8.1044 -    /*
  8.1045 -     *	This block completes a cdb which orginated on the host and we 
  8.1046 -     *	just need to deallocate the cdb or reinit it. At this point the
  8.1047 -     *	command is complete that we had sent to the adapter and this
  8.1048 -     *	cdb could be reused.
  8.1049 -     */
  8.1050 -    if((fib->header.XferState & cpu_to_le32(SentFromHost)) &&
  8.1051 -       (fib->header.XferState & cpu_to_le32(AdapterProcessed)))
  8.1052 -    {
  8.1053 -	fib_dealloc(fibptr);
  8.1054 -    }
  8.1055 -    else if(fib->header.XferState & cpu_to_le32(SentFromHost))
  8.1056 -    {
  8.1057  	/*
  8.1058 -	 *	This handles the case when the host has aborted the I/O
  8.1059 -	 *	to the adapter because the adapter is not responding
  8.1060 +	 *	Check for a fib which has already been completed
  8.1061  	 */
  8.1062 -	fib_dealloc(fibptr);
  8.1063 -    } else if(fib->header.XferState & cpu_to_le32(HostOwned)) {
  8.1064 -	fib_dealloc(fibptr);
  8.1065 -    } else {
  8.1066 -	BUG();
  8.1067 -    }   
  8.1068 -    return 0;
  8.1069 +
  8.1070 +	if (hw_fib->header.XferState == cpu_to_le32(0))
  8.1071 +        	return 0;
  8.1072 +	/*
  8.1073 +	 *	If we plan to do anything check the structure type first.
  8.1074 +	 */ 
  8.1075 +
  8.1076 +	if (hw_fib->header.StructType != FIB_MAGIC)
  8.1077 +	        return -EINVAL;
  8.1078 +	/*
  8.1079 +	 *	This block completes a cdb which orginated on the host and we 
  8.1080 +	 *	just need to deallocate the cdb or reinit it. At this point the
  8.1081 +	 *	command is complete that we had sent to the adapter and this
  8.1082 +	 *	cdb could be reused.
  8.1083 +	 */
  8.1084 +	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
  8.1085 +		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
  8.1086 +	{
  8.1087 +		fib_dealloc(fibptr);
  8.1088 +	}
  8.1089 +	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
  8.1090 +	{
  8.1091 +		/*
  8.1092 +		 *	This handles the case when the host has aborted the I/O
  8.1093 +		 *	to the adapter because the adapter is not responding
  8.1094 +		 */
  8.1095 +		fib_dealloc(fibptr);
  8.1096 +	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
  8.1097 +		fib_dealloc(fibptr);
  8.1098 +	} else {
  8.1099 +		BUG();
  8.1100 +	}   
  8.1101 +	return 0;
  8.1102  }
  8.1103  
  8.1104  /**
  8.1105 @@ -792,23 +773,23 @@ int fib_complete(struct fib * fibptr)
  8.1106  
  8.1107  void aac_printf(struct aac_dev *dev, u32 val)
  8.1108  {
  8.1109 -    int length = val & 0xffff;
  8.1110 -    int level = (val >> 16) & 0xffff;
  8.1111 -    char *cp = dev->printfbuf;
  8.1112 +	int length = val & 0xffff;
  8.1113 +	int level = (val >> 16) & 0xffff;
  8.1114 +	char *cp = dev->printfbuf;
  8.1115  	
  8.1116 -    /*
  8.1117 -     *	The size of the printfbuf is set in port.c
  8.1118 -     *	There is no variable or define for it
  8.1119 -     */
  8.1120 -    if (length > 255)
  8.1121 -	length = 255;
  8.1122 -    if (cp[length] != 0)
  8.1123 -	cp[length] = 0;
  8.1124 -    if (level == LOG_HIGH_ERROR)
  8.1125 -	printk(KERN_WARNING "aacraid:%s", cp);
  8.1126 -    else
  8.1127 -	printk(KERN_INFO "aacraid:%s", cp);
  8.1128 -    memset(cp, 0,  256);
  8.1129 +	/*
  8.1130 +	 *	The size of the printfbuf is set in port.c
  8.1131 +	 *	There is no variable or define for it
  8.1132 +	 */
  8.1133 +	if (length > 255)
  8.1134 +		length = 255;
  8.1135 +	if (cp[length] != 0)
  8.1136 +		cp[length] = 0;
  8.1137 +	if (level == LOG_HIGH_ERROR)
  8.1138 +		printk(KERN_WARNING "aacraid:%s", cp);
  8.1139 +	else
  8.1140 +		printk(KERN_INFO "aacraid:%s", cp);
  8.1141 +	memset(cp, 0,  256);
  8.1142  }
  8.1143  
  8.1144  
  8.1145 @@ -821,16 +802,117 @@ void aac_printf(struct aac_dev *dev, u32
  8.1146   *	dispatches it to the appropriate routine for handling.
  8.1147   */
  8.1148  
  8.1149 +#define CONTAINER_TO_BUS(cont)		(0)
  8.1150 +#define CONTAINER_TO_TARGET(cont)	((cont))
  8.1151 +#define CONTAINER_TO_LUN(cont)		(0)
  8.1152 +
  8.1153  static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
  8.1154  {
  8.1155 -    struct hw_fib * fib = fibptr->fib;
  8.1156 -    /*
  8.1157 -     * Set the status of this FIB to be Invalid parameter.
  8.1158 -     *
  8.1159 -     *	*(u32 *)fib->data = ST_INVAL;
  8.1160 -     */
  8.1161 -    *(u32 *)fib->data = cpu_to_le32(ST_OK);
  8.1162 -    fib_adapter_complete(fibptr, sizeof(u32));
  8.1163 +#if 0
  8.1164 +	struct hw_fib * hw_fib = fibptr->hw_fib;
  8.1165 +	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
  8.1166 +	int busy;
  8.1167 +	u32 container;
  8.1168 +	mm_segment_t fs;
  8.1169 +
  8.1170 +	/* Sniff for container changes */
  8.1171 +	dprintk ((KERN_INFO "AifCmdDriverNotify=%x\n", le32_to_cpu(*(u32 *)aifcmd->data)));
  8.1172 +	switch (le32_to_cpu(*(u32 *)aifcmd->data)) {
  8.1173 +	case AifDenMorphComplete:
  8.1174 +	case AifDenVolumeExtendComplete:
  8.1175 +	case AifEnContainerChange: /* Not really a driver notify Event */
  8.1176 +
  8.1177 +		busy = 0;
  8.1178 +		container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
  8.1179 +		dprintk ((KERN_INFO "container=%d(%d,%d,%d,%d) ",
  8.1180 +		  container,
  8.1181 +		  (dev && dev->scsi_host_ptr)
  8.1182 +		    ? dev->scsi_host_ptr->host_no
  8.1183 +		    : -1,
  8.1184 +		  CONTAINER_TO_BUS(container),
  8.1185 +		  CONTAINER_TO_TARGET(container),
  8.1186 +		  CONTAINER_TO_LUN(container)));
  8.1187 +
  8.1188 +		/*
  8.1189 +		 *	Find the Scsi_Device associated with the SCSI address,
  8.1190 +		 * and mark it as changed, invalidating the cache. This deals
  8.1191 +		 * with changes to existing device IDs.
  8.1192 +		 */
  8.1193 +
  8.1194 +		if ((dev != (struct aac_dev *)NULL)
  8.1195 +		 && (dev->scsi_host_ptr != (struct Scsi_Host *)NULL)) {
  8.1196 +			Scsi_Device * device;
  8.1197 +
  8.1198 +			for (device = dev->scsi_host_ptr->host_queue;
  8.1199 +			  device != (Scsi_Device *)NULL;
  8.1200 +			  device = device->next) {
  8.1201 +				dprintk((KERN_INFO
  8.1202 +				  "aifd: device (%d,%d,%d,%d)?\n",
  8.1203 +				  dev->scsi_host_ptr->host_no,
  8.1204 +				  device->channel,
  8.1205 +				  device->id,
  8.1206 +				  device->lun));
  8.1207 +				if ((device->channel == CONTAINER_TO_BUS(container))
  8.1208 +				 && (device->id == CONTAINER_TO_TARGET(container))
  8.1209 +				 && (device->lun == CONTAINER_TO_LUN(container))) {
  8.1210 +					busy |= (device->access_count != 0);
  8.1211 +					if (busy == 0) {
  8.1212 +						device->removable = TRUE;
  8.1213 +					}
  8.1214 +				}
  8.1215 +			}
  8.1216 +		}
  8.1217 +		dprintk (("busy=%d\n", busy));
  8.1218 +
  8.1219 +		/*
  8.1220 +		 * if (busy == 0) {
  8.1221 +		 *	scan_scsis(dev->scsi_host_ptr, 1,
  8.1222 +		 *	  CONTAINER_TO_BUS(container),
  8.1223 +		 *	  CONTAINER_TO_TARGET(container),
  8.1224 +		 *	  CONTAINER_TO_LUN(container));
  8.1225 +		 * }
  8.1226 +		 * is not exported as accessible, so we need to go around it
  8.1227 +		 * another way. So, we look for the "proc/scsi/scsi" entry in
  8.1228 +		 * the proc filesystem (using proc_scsi as a shortcut) and send
  8.1229 +		 * it a message. This deals with new devices that have
  8.1230 +		 * appeared. If the device has gone offline, scan_scsis will
  8.1231 +		 * also discover this, but we do not want the device to
  8.1232 +		 * go away. We need to check the access_count for the
  8.1233 +		 * device since we are not wanting the devices to go away.
  8.1234 +		 */
  8.1235 +		if (busy == 0 && proc_scsi != NULL) {
  8.1236 +			struct proc_dir_entry * entry;
  8.1237 +
  8.1238 +			dprintk((KERN_INFO "proc_scsi=%p ", proc_scsi));
  8.1239 +			for (entry = proc_scsi->subdir; entry != (struct proc_dir_entry *)NULL; entry = entry->next) {
  8.1240 +				dprintk(("\"%.*s\"[%d]=%x ", entry->namelen,
  8.1241 +				  entry->name, entry->namelen, entry->low_ino));
  8.1242 +				if ((entry->low_ino != 0) && (entry->namelen == 4) && (memcmp ("scsi", entry->name, 4) == 0)) {
  8.1243 +					dprintk(("%p->write_proc=%p ", entry, entry->write_proc));
  8.1244 +					if (entry->write_proc != (int (*)(struct file *, const char *, unsigned long, void *))NULL) {
  8.1245 +						char buffer[80];
  8.1246 +						int length;
  8.1247 +
  8.1248 +						sprintf (buffer,
  8.1249 +						  "scsi add-single-device %d %d %d %d\n",
  8.1250 +						  dev->scsi_host_ptr->host_no,
  8.1251 +						  CONTAINER_TO_BUS(container),
  8.1252 +						  CONTAINER_TO_TARGET(container),
  8.1253 +						  CONTAINER_TO_LUN(container));
  8.1254 +						length = strlen (buffer);
  8.1255 +						dprintk((KERN_INFO "echo %.*s > /proc/scsi/scsi\n", length-1, buffer));
  8.1256 +						fs = get_fs();
  8.1257 +						set_fs(get_ds());
  8.1258 +						length = entry->write_proc(NULL, buffer, length, NULL);
  8.1259 +						set_fs(fs);
  8.1260 +						dprintk((KERN_INFO "returns %d\n", length));
  8.1261 +					}
  8.1262 +					break;
  8.1263 +				}
  8.1264 +			}
  8.1265 +		}
  8.1266 +	}
  8.1267 +#endif
  8.1268  }
  8.1269  
  8.1270  /**
  8.1271 @@ -842,7 +924,6 @@ static void aac_handle_aif(struct aac_de
  8.1272   *	until the queue is empty. When the queue is empty it will wait for
  8.1273   *	more FIBs.
  8.1274   */
  8.1275 - 
  8.1276  #ifndef TRY_TASKLET
  8.1277  int aac_command_thread(struct aac_dev * dev)
  8.1278  {
  8.1279 @@ -850,176 +931,193 @@ int aac_command_thread(struct aac_dev * 
  8.1280  DECLARE_TASKLET_DISABLED(aac_command_tasklet, aac_command_thread, 0);
  8.1281  void aac_command_thread(unsigned long data)
  8.1282  #define return(_x) return 
  8.1283 -{   
  8.1284 -    struct aac_dev *dev = (struct aac_dev *)data; 
  8.1285 -#endif
  8.1286 -    struct hw_fib *fib, *newfib;
  8.1287 -    struct fib fibptr; /* for error logging */
  8.1288 -    struct aac_queue_block *queues = dev->queues;
  8.1289 -    struct aac_fib_context *fibctx;
  8.1290 -    unsigned long flags;
  8.1291 -#if 0
  8.1292 -    DECLARE_WAITQUEUE(wait, current);
  8.1293 +{
  8.1294 +	struct aac_dev *dev = (struct aac_dev *)data; 
  8.1295  #endif
  8.1296 -
  8.1297 -    /*
  8.1298 -     *	We can only have one thread per adapter for AIF's.
  8.1299 -     */
  8.1300 -    if (dev->aif_thread)
  8.1301 -	return(-EINVAL);
  8.1302 -
  8.1303 +	struct hw_fib *hw_fib, *hw_newfib;
  8.1304 +	struct fib *fib, *newfib;
  8.1305 +	struct aac_queue_block *queues = dev->queues;
  8.1306 +	struct aac_fib_context *fibctx;
  8.1307 +	unsigned long flags;
  8.1308  #if 0
  8.1309 -    /*
  8.1310 -     *	Set up the name that will appear in 'ps'
  8.1311 -     *	stored in  task_struct.comm[16].
  8.1312 -     */
  8.1313 -    sprintf(current->comm, "aacraid");
  8.1314 -    daemonize();
  8.1315 +	DECLARE_WAITQUEUE(wait, current);
  8.1316  #endif
  8.1317  
  8.1318 -    /*
  8.1319 -     *	Let the DPC know it has a place to send the AIF's to.
  8.1320 -     */
  8.1321 -    dev->aif_thread = 1;
  8.1322 -    memset(&fibptr, 0, sizeof(struct fib));
  8.1323 +	/*
  8.1324 +	 *	We can only have one thread per adapter for AIF's.
  8.1325 +	 */
  8.1326 +	if (dev->aif_thread)
  8.1327 +		return(-EINVAL);
  8.1328  #if 0
  8.1329 -    add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
  8.1330 -    set_current_state(TASK_INTERRUPTIBLE);
  8.1331 +	/*
  8.1332 +	 *	Set up the name that will appear in 'ps'
  8.1333 +	 *	stored in  task_struct.comm[16].
  8.1334 +	 */
  8.1335 +	sprintf(current->comm, "aacraid");
  8.1336 +	daemonize();
  8.1337  #endif
  8.1338 -//    while(1) 
  8.1339 -    {
  8.1340 -
  8.1341 -	spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
  8.1342 -	while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
  8.1343 -	    struct list_head *entry;
  8.1344 -	    struct aac_aifcmd * aifcmd;
  8.1345 +	/*
  8.1346 +	 *	Let the DPC know it has a place to send the AIF's to.
  8.1347 +	 */
  8.1348 +	dev->aif_thread = 1;
  8.1349 +#if 0
  8.1350 +	add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
  8.1351 +	set_current_state(TASK_INTERRUPTIBLE);
  8.1352 +	dprintk ((KERN_INFO "aac_command_thread start\n"));
  8.1353 +	while(1) 
  8.1354 +#endif
  8.1355 +	{
  8.1356 +		spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
  8.1357 +		while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
  8.1358 +			struct list_head *entry;
  8.1359 +			struct aac_aifcmd * aifcmd;
  8.1360  
  8.1361  #if 0
  8.1362 -	    set_current_state(TASK_RUNNING);
  8.1363 +			set_current_state(TASK_RUNNING);
  8.1364  #endif
  8.1365  
  8.1366 -		
  8.1367 -	    entry = queues->queue[HostNormCmdQueue].cmdq.next;
  8.1368 -	    list_del(entry);
  8.1369 +			entry = queues->queue[HostNormCmdQueue].cmdq.next;
  8.1370 +			list_del(entry);
  8.1371 +	
  8.1372 +			spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
  8.1373 +			fib = list_entry(entry, struct fib, fiblink);
  8.1374 +			/*
  8.1375 +			 *	We will process the FIB here or pass it to a 
  8.1376 +			 *	worker thread that is TBD. We Really can't 
  8.1377 +			 *	do anything at this point since we don't have
  8.1378 +			 *	anything defined for this thread to do.
  8.1379 +			 */
  8.1380 +			hw_fib = fib->hw_fib;
  8.1381  			
  8.1382 -	    spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock,flags);
  8.1383 -	    fib = list_entry(entry, struct hw_fib, header.FibLinks);
  8.1384 -	    /*
  8.1385 -	     *	We will process the FIB here or pass it to a 
  8.1386 -	     *	worker thread that is TBD. We Really can't 
  8.1387 -	     *	do anything at this point since we don't have
  8.1388 -	     *	anything defined for this thread to do.
  8.1389 -	     */
  8.1390 -	    memset(&fibptr, 0, sizeof(struct fib));
  8.1391 -	    fibptr.type = FSAFS_NTC_FIB_CONTEXT;
  8.1392 -	    fibptr.size = sizeof( struct fib );
  8.1393 -	    fibptr.fib = fib;
  8.1394 -	    fibptr.data = fib->data;
  8.1395 -	    fibptr.dev = dev;
  8.1396 -	    /*
  8.1397 -	     *	We only handle AifRequest fibs from the adapter.
  8.1398 -	     */
  8.1399 -	    aifcmd = (struct aac_aifcmd *) fib->data;
  8.1400 -	    if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) {
  8.1401 -		aac_handle_aif(dev, &fibptr);
  8.1402 -	    } else {
  8.1403 -		/* The u32 here is important and intended. We are using
  8.1404 -		   32bit wrapping time to fit the adapter field */
  8.1405 -		u32 time_now, time_last;
  8.1406 -		unsigned long flagv;
  8.1407 -		
  8.1408 -		time_now = jiffies/HZ;
  8.1409 +			memset(fib, 0, sizeof(struct fib));
  8.1410 +			fib->type = FSAFS_NTC_FIB_CONTEXT;
  8.1411 +			fib->size = sizeof( struct fib );
  8.1412 +			fib->hw_fib = hw_fib;
  8.1413 +			fib->data = hw_fib->data;
  8.1414 +			fib->dev = dev;
  8.1415 +			/*
  8.1416 +			 *	We only handle AifRequest fibs from the adapter.
  8.1417 +			 */
  8.1418 +			aifcmd = (struct aac_aifcmd *) hw_fib->data;
  8.1419 +			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
  8.1420 +				/* Handle Driver Notify Events */
  8.1421 +				aac_handle_aif(dev, fib);
  8.1422 +				*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
  8.1423 +				fib_adapter_complete(fib, sizeof(u32));
  8.1424 +			} else {
  8.1425 +				struct list_head *entry;
  8.1426 +				/* The u32 here is important and intended. We are using
  8.1427 +				   32bit wrapping time to fit the adapter field */
  8.1428 +				   
  8.1429 +				u32 time_now, time_last;
  8.1430 +				unsigned long flagv;
  8.1431 +				
  8.1432 +				/* Sniff events */
  8.1433 +				if (aifcmd->command == cpu_to_le32(AifCmdEventNotify))
  8.1434 +					aac_handle_aif(dev, fib);
  8.1435  
  8.1436 -		spin_lock_irqsave(&dev->fib_lock, flagv);
  8.1437 -		entry = dev->fib_list.next;
  8.1438 +				time_now = jiffies/HZ;
  8.1439 +
  8.1440 +				spin_lock_irqsave(&dev->fib_lock, flagv);
  8.1441 +				entry = dev->fib_list.next;
  8.1442  				/*
  8.1443  				 * For each Context that is on the 
  8.1444  				 * fibctxList, make a copy of the
  8.1445  				 * fib, and then set the event to wake up the
  8.1446  				 * thread that is waiting for it.
  8.1447  				 */
  8.1448 -		while (entry != &dev->fib_list) {
  8.1449 -		    /*
  8.1450 -		     * Extract the fibctx
  8.1451 -		     */
  8.1452 -		    fibctx = list_entry(entry, struct aac_fib_context, next);
  8.1453 -		    /*
  8.1454 -		     * Check if the queue is getting
  8.1455 -		     * backlogged
  8.1456 -		     */
  8.1457 -		    if (fibctx->count > 20)
  8.1458 -		    {
  8.1459 -			time_last = fibctx->jiffies;
  8.1460 -			/*
  8.1461 -			 * Has it been > 2 minutes 
  8.1462 -			 * since the last read off
  8.1463 -			 * the queue?
  8.1464 -			 */
  8.1465 -			if ((time_now - time_last) > 120) {
  8.1466 -			    entry = entry->next;
  8.1467 -			    aac_close_fib_context(dev, fibctx);
  8.1468 -			    continue;
  8.1469 -			}
  8.1470 -		    }
  8.1471 -		    /*
  8.1472 -		     * Warning: no sleep allowed while
  8.1473 -		     * holding spinlock
  8.1474 -		     */
  8.1475 -		    newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
  8.1476 -		    if (newfib) {
  8.1477 -			/*
  8.1478 -			 * Make the copy of the FIB
  8.1479 -			 */
  8.1480 -			memcpy(newfib, fib, sizeof(struct hw_fib));
  8.1481 -			/*
  8.1482 -			 * Put the FIB onto the
  8.1483 -			 * fibctx's fibs
  8.1484 -			 */
  8.1485 -			list_add_tail(&newfib->header.FibLinks, &fibctx->fibs);
  8.1486 -			fibctx->count++;
  8.1487 +				while (entry != &dev->fib_list) {
  8.1488 +					/*
  8.1489 +					 * Extract the fibctx
  8.1490 +					 */
  8.1491 +					fibctx = list_entry(entry, struct aac_fib_context, next);
  8.1492 +					/*
  8.1493 +					 * Check if the queue is getting
  8.1494 +					 * backlogged
  8.1495 +					 */
  8.1496 +					if (fibctx->count > 20)
  8.1497 +					{
  8.1498 +						/*
  8.1499 +						 * It's *not* jiffies folks,
  8.1500 +						 * but jiffies / HZ, so do not
  8.1501 +						 * panic ...
  8.1502 +						 */
  8.1503 +						time_last = fibctx->jiffies;
  8.1504 +						/*
  8.1505 +						 * Has it been > 2 minutes 
  8.1506 +						 * since the last read off
  8.1507 +						 * the queue?
  8.1508 +						 */
  8.1509 +						if ((time_now - time_last) > 120) {
  8.1510 +							entry = entry->next;
  8.1511 +							aac_close_fib_context(dev, fibctx);
  8.1512 +							continue;
  8.1513 +						}
  8.1514 +					}
  8.1515 +					/*
  8.1516 +					 * Warning: no sleep allowed while
  8.1517 +					 * holding spinlock
  8.1518 +					 */
  8.1519 +					hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
  8.1520 +					newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
  8.1521 +					if (newfib && hw_newfib) {
  8.1522 +						/*
  8.1523 +						 * Make the copy of the FIB
  8.1524 +						 * FIXME: check if we need to fix other fields up
  8.1525 +						 */
  8.1526 +						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
  8.1527 +						memcpy(newfib, fib, sizeof(struct fib));
  8.1528 +						newfib->hw_fib = hw_newfib;
  8.1529 +						/*
  8.1530 +						 * Put the FIB onto the
  8.1531 +						 * fibctx's fibs
  8.1532 +						 */
  8.1533 +						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
  8.1534 +						fibctx->count++;
  8.1535  #if 0
  8.1536 -			/* 
  8.1537 -			 * Set the event to wake up the
  8.1538 -			 * thread that will waiting.
  8.1539 -			 */
  8.1540 -			up(&fibctx->wait_sem);
  8.1541 +						/* 
  8.1542 +						 * Set the event to wake up the
  8.1543 +						 * thread that will waiting.
  8.1544 +						 */
  8.1545 +						up(&fibctx->wait_sem);
  8.1546  #endif
  8.1547 -		    } else {
  8.1548 -			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  8.1549 -		    }
  8.1550 -		    entry = entry->next;
  8.1551 -		}
  8.1552 +					} else {
  8.1553 +						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  8.1554 +						if(newfib)
  8.1555 +							kfree(newfib);
  8.1556 +						if(hw_newfib)
  8.1557 +							kfree(hw_newfib);
  8.1558 +					}
  8.1559 +					entry = entry->next;
  8.1560 +				}
  8.1561  				/*
  8.1562  				 *	Set the status of this FIB
  8.1563  				 */
  8.1564 -		*(u32 *)fib->data = cpu_to_le32(ST_OK);
  8.1565 -		fib_adapter_complete(&fibptr, sizeof(u32));
  8.1566 -		spin_unlock_irqrestore(&dev->fib_lock, flagv);
  8.1567 -	    }
  8.1568 -	    spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
  8.1569 -	}
  8.1570 -	/*
  8.1571 -	 *	There are no more AIF's
  8.1572 -	 */
  8.1573 -	spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
  8.1574 +				*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
  8.1575 +				fib_adapter_complete(fib, sizeof(u32));
  8.1576 +				spin_unlock_irqrestore(&dev->fib_lock, flagv);
  8.1577 +			}
  8.1578 +			spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
  8.1579 +			kfree(fib);
  8.1580 +		}
  8.1581 +		/*
  8.1582 +		 *	There are no more AIF's
  8.1583 +		 */
  8.1584 +		spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
  8.1585  #if 0
  8.1586 -	schedule();
  8.1587 +		schedule();
  8.1588  
  8.1589 -	if(signal_pending(current))
  8.1590 -	    break;
  8.1591 -	set_current_state(TASK_INTERRUPTIBLE);
  8.1592 +		if(signal_pending(current))
  8.1593 +			break;
  8.1594 +		set_current_state(TASK_INTERRUPTIBLE);
  8.1595  #endif
  8.1596 -
  8.1597 -    }
  8.1598 -    
  8.1599 +	}
  8.1600  #if 0
  8.1601 -    remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
  8.1602 -    dev->aif_thread = 0;
  8.1603 -    complete_and_exit(&dev->aif_completion, 0);
  8.1604 +	remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
  8.1605 +	dev->aif_thread = 0;
  8.1606 +	complete_and_exit(&dev->aif_completion, 0);
  8.1607  #else
  8.1608 -    mdelay(50); 
  8.1609 -    dev->aif_thread = 0;
  8.1610 -
  8.1611 +	mdelay(50); 
  8.1612 +	dev->aif_thread = 0;
  8.1613  #endif
  8.1614 -    return(0);
  8.1615  }
     9.1 --- a/xen/drivers/scsi/aacraid/dpcsup.c	Wed Aug 06 20:53:30 2003 +0000
     9.2 +++ b/xen/drivers/scsi/aacraid/dpcsup.c	Thu Aug 07 12:24:12 2003 +0000
     9.3 @@ -29,17 +29,17 @@
     9.4   *
     9.5   */
     9.6  
     9.7 -#include <xeno/config.h>
     9.8 -/* #include <xeno/kernel.h> */
     9.9 -#include <xeno/init.h>
    9.10 -#include <xeno/types.h>
    9.11 -#include <xeno/sched.h>
    9.12 -#include <xeno/pci.h>
    9.13 -/*  #include <xeno/spinlock.h> */
    9.14 -/*  #include <xeno/slab.h> */
    9.15 -/*  #include <xeno/completion.h> */
    9.16 -#include <xeno/blk.h>
    9.17 -/*  #include <asm/semaphore.h> */
    9.18 +#include <linux/config.h>
    9.19 +#include <linux/kernel.h>
    9.20 +#include <linux/init.h>
    9.21 +#include <linux/types.h>
    9.22 +#include <linux/sched.h>
    9.23 +#include <linux/pci.h>
    9.24 +#include <linux/spinlock.h>
    9.25 +#include <linux/slab.h>
    9.26 +/*#include <linux/completion.h>*/
    9.27 +#include <linux/blk.h>
    9.28 +/*#include <asm/semaphore.h>*/
    9.29  #include "scsi.h"
    9.30  #include "hosts.h"
    9.31  
    9.32 @@ -74,12 +74,14 @@ unsigned int aac_response_normal(struct 
    9.33  	 */
    9.34  	while(aac_consumer_get(dev, q, &entry))
    9.35  	{
    9.36 -		int fast;
    9.37 +		u32 fast ;
    9.38 +		fast = (entry->addr & cpu_to_le32(0x01));
    9.39 +//		fib = &dev->fibs[(entry->addr >> 1)];
    9.40 +//		hwfib = fib->hw_fib;
    9.41 +		hwfib = bus_to_virt(le32_to_cpu(entry->addr & cpu_to_le32(~0x01)));
    9.42 +		fib = &dev->fibs[hwfib->header.SenderData];
    9.43  
    9.44 -		fast = (int) (entry->addr & 0x01);
    9.45 -		hwfib = addr2fib(entry->addr & ~0x01);
    9.46  		aac_consumer_free(dev, q, HostNormRespQueue);
    9.47 -		fib = &dev->fibs[hwfib->header.SenderData];
    9.48  		/*
    9.49  		 *	Remove this fib from the Outstanding I/O queue.
    9.50  		 *	But only if it has not already been timed out.
    9.51 @@ -173,32 +175,53 @@ unsigned int aac_command_normal(struct a
    9.52  	 *	up the waiters until there are no more QEs. We then return
    9.53  	 *	back to the system.
    9.54  	 */
    9.55 +	dprintk((KERN_INFO
    9.56 +	  "dev=%p, dev->comm_phys=%x, dev->comm_addr=%p, dev->comm_size=%u\n",
    9.57 +	  dev, (u32)dev->comm_phys, dev->comm_addr, (unsigned)dev->comm_size));
    9.58 +
    9.59  	while(aac_consumer_get(dev, q, &entry))
    9.60  	{
    9.61 -		struct hw_fib * fib;
    9.62 -		fib = addr2fib(entry->addr);
    9.63 +		struct fib fibctx;
    9.64 +		struct fib *fib = &fibctx;
    9.65 +		u32 hw_fib_pa = le32_to_cpu(entry->addr & cpu_to_le32(~0x01));
    9.66 +		struct hw_fib * hw_fib_va = ((dev->comm_phys <= hw_fib_pa)
    9.67 +		 && (hw_fib_pa < (dev->comm_phys + dev->comm_size)))
    9.68 +		  ? dev->comm_addr + (hw_fib_pa - dev->comm_phys)
    9.69 +		  : /* inconceivable */ bus_to_virt(hw_fib_pa);
    9.70 +		dprintk((KERN_INFO "hw_fib_pa=%x hw_fib_va=%p\n", hw_fib_pa, hw_fib_va));
    9.71  
    9.72 -		if (dev->aif_thread) {
    9.73 -		        list_add_tail(&fib->header.FibLinks, &q->cmdq);
    9.74 +		/*
    9.75 +		 *	Allocate a FIB at all costs. For non queued stuff
    9.76 +		 *	we can just use the stack so we are happy. We need
    9.77 +		 *	a fib object in order to manage the linked lists
    9.78 +		 */
    9.79 +		if (dev->aif_thread)
    9.80 +			if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC))==NULL)
    9.81 +				fib = &fibctx;
    9.82 +			
    9.83 +		memset(fib, 0, sizeof(struct fib));
    9.84 +		INIT_LIST_HEAD(&fib->fiblink);
    9.85 +		fib->type = FSAFS_NTC_FIB_CONTEXT;
    9.86 +		fib->size = sizeof(struct fib);
    9.87 +		fib->hw_fib = hw_fib_va;
    9.88 +		fib->data = hw_fib_va->data;
    9.89 +		fib->dev = dev;
    9.90 +		
    9.91 +		if (dev->aif_thread && fib != &fibctx)
    9.92 +		{		
    9.93 +			list_add_tail(&fib->fiblink, &q->cmdq);
    9.94  	 	        aac_consumer_free(dev, q, HostNormCmdQueue);
    9.95  #if 0
    9.96  		        wake_up_interruptible(&q->cmdready);
    9.97  #endif
    9.98  		} else {
    9.99 -			struct fib fibctx;
   9.100  	 	        aac_consumer_free(dev, q, HostNormCmdQueue);
   9.101  			spin_unlock_irqrestore(q->lock, flags);
   9.102 -			memset(&fibctx, 0, sizeof(struct fib));
   9.103 -			fibctx.type = FSAFS_NTC_FIB_CONTEXT;
   9.104 -			fibctx.size = sizeof(struct fib);
   9.105 -			fibctx.fib = fib;
   9.106 -			fibctx.data = fib->data;
   9.107 -			fibctx.dev = dev;
   9.108  			/*
   9.109  			 *	Set the status of this FIB
   9.110  			 */
   9.111 -			*(u32 *)fib->data = cpu_to_le32(ST_OK);
   9.112 -			fib_adapter_complete(&fibctx, sizeof(u32));
   9.113 +			*(u32 *)hw_fib_va->data = cpu_to_le32(ST_OK);
   9.114 +			fib_adapter_complete(fib, sizeof(u32));
   9.115  			spin_lock_irqsave(q->lock, flags);
   9.116  		}		
   9.117  	}
    10.1 --- a/xen/drivers/scsi/aacraid/linit.c	Wed Aug 06 20:53:30 2003 +0000
    10.2 +++ b/xen/drivers/scsi/aacraid/linit.c	Thu Aug 07 12:24:12 2003 +0000
    10.3 @@ -35,21 +35,21 @@
    10.4   *	
    10.5   */
    10.6  
    10.7 -#define AAC_DRIVER_VERSION		"0.9.9ac6-TEST"
    10.8 -#define AAC_DRIVER_BUILD_DATE		__DATE__
    10.9 +#define AAC_DRIVER_VERSION		"1.1.2"
   10.10 +#define AAC_DRIVER_BUILD_DATE		__DATE__ " " __TIME__
   10.11  
   10.12 -#include <xeno/module.h>
   10.13 -#include <xeno/config.h>
   10.14 -#include <xeno/kernel.h>
   10.15 -#include <xeno/init.h>
   10.16 -#include <xeno/types.h>
   10.17 -#include <xeno/sched.h>
   10.18 -#include <xeno/pci.h>
   10.19 -#include <xeno/spinlock.h>
   10.20 -/*  #include <xeno/slab.h> */
   10.21 -/*  #include <xeno/completion.h> */
   10.22 -/*  #include <asm/semaphore.h> */
   10.23 -#include <xeno/blk.h>
   10.24 +#include <linux/module.h>
   10.25 +#include <linux/config.h>
   10.26 +#include <linux/kernel.h>
   10.27 +#include <linux/init.h>
   10.28 +#include <linux/types.h>
   10.29 +#include <linux/sched.h>
   10.30 +#include <linux/pci.h>
   10.31 +#include <linux/spinlock.h>
   10.32 +#include <linux/slab.h>
   10.33 +/*#include <linux/completion.h>*/
   10.34 +/*#include <asm/semaphore.h>*/
   10.35 +#include <linux/blk.h>
   10.36  #include "scsi.h"
   10.37  #include "hosts.h"
   10.38  
   10.39 @@ -59,12 +59,14 @@
   10.40  #define AAC_DRIVERNAME	"aacraid"
   10.41  
   10.42  MODULE_AUTHOR("Red Hat Inc and Adaptec");
   10.43 -MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, PERC 320/DC, Adaptec 2120S, 2200S, 5400S, and HP NetRAID-4M devices. http://domsch.com/xeno/ or http://linux.adaptec.com");
   10.44 +MODULE_DESCRIPTION("Supports Dell PERC2, 2/Si, 3/Si, 3/Di, Adaptec Advanced Raid Products, and HP NetRAID-4M devices. http://domsch.com/linux/ or http://linux.adaptec.com");
   10.45  MODULE_LICENSE("GPL");
   10.46 -MODULE_PARM(nondasd, "i");
   10.47 -MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
   10.48 +MODULE_PARM(paemode, "i");
   10.49 +MODULE_PARM_DESC(paemode, "Control whether dma addressing is using PAE. 0=off, 1=on");
   10.50  
   10.51 -static int nondasd=-1;
   10.52 +#if 0
   10.53 +static int paemode = -1;
   10.54 +#endif
   10.55  
   10.56  struct aac_dev *aac_devices[MAXIMUM_NUM_ADAPTERS];
   10.57  
   10.58 @@ -81,25 +83,41 @@ static int aac_cfg_major = -1;
   10.59   */
   10.60   
   10.61  static struct aac_driver_ident aac_drivers[] = {
   10.62 -	{ 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 2/Si */
   10.63 -	{ 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
   10.64 -	{ 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Si */
   10.65 -	{ 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Si */
   10.66 -	{ 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
   10.67 -	{ 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
   10.68 -	{ 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
   10.69 -	{ 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
   10.70 -	{ 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* PERC 3/Di */
   10.71 -	{ 0x9005, 0x0283, 0x9005, 0x0283, aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2 }, /* catapult*/
   10.72 -	{ 0x9005, 0x0284, 0x9005, 0x0284, aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2 }, /* tomcat*/
   10.73 -	{ 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1 }, /* Adaptec 2120S (Crusader)*/
   10.74 -	{ 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2 }, /* Adaptec 2200S (Vulcan)*/
   10.75 -	{ 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2 }, /* Adaptec 2200S (Vulcan-2m)*/
   10.76 -	{ 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, /* Dell PERC 320/DC */
   10.77 -	{ 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4 }, /* Adaptec 5400S (Mustang)*/
   10.78 -	{ 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4 }, /* Adaptec 5400S (Mustang)*/
   10.79 -	{ 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4 }, /* Dell PERC2 "Quad Channel" */
   10.80 -	{ 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid",  "HP      ", "NetRAID-4M      ", 4 }  /* HP NetRAID-4M */
   10.81 +	{ 0x1028, 0x0001, 0x1028, 0x0001, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 2/Si (Iguana/PERC2Si) */
   10.82 +	{ 0x1028, 0x0002, 0x1028, 0x0002, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 3/Di (Opal/PERC3Di) */
   10.83 +	{ 0x1028, 0x0003, 0x1028, 0x0003, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 3/Si (SlimFast/PERC3Si */
   10.84 +	{ 0x1028, 0x0004, 0x1028, 0x00d0, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 3/Di (Iguana FlipChip/PERC3DiF */
   10.85 +	{ 0x1028, 0x0002, 0x1028, 0x00d1, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 3/Di (Viper/PERC3DiV) */
   10.86 +	{ 0x1028, 0x0002, 0x1028, 0x00d9, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 3/Di (Lexus/PERC3DiL) */
   10.87 +	{ 0x1028, 0x000a, 0x1028, 0x0106, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 3/Di (Jaguar/PERC3DiJ) */
   10.88 +	{ 0x1028, 0x000a, 0x1028, 0x011b, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 3/Di (Dagger/PERC3DiD) */
   10.89 +	{ 0x1028, 0x000a, 0x1028, 0x0121, aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2 }, 		/* PERC 3/Di (Boxster/PERC3DiB) */
   10.90 +	{ 0x9005, 0x0283, 0x9005, 0x0283, aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2 }, 		/* catapult */
   10.91 +	{ 0x9005, 0x0284, 0x9005, 0x0284, aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2 }, 		/* tomcat */
   10.92 +	{ 0x9005, 0x0285, 0x9005, 0x0286, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1, AAC_QUIRK_31BIT },/* Adaptec 2120S (Crusader) */
   10.93 +	{ 0x9005, 0x0285, 0x9005, 0x0285, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT },/* Adaptec 2200S (Vulcan) */
   10.94 +	{ 0x9005, 0x0285, 0x9005, 0x0287, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT },/* Adaptec 2200S (Vulcan-2m) */
   10.95 +	{ 0x9005, 0x0285, 0x17aa, 0x0286, aac_rx_init, "aacraid",  "Legend  ", "Legend S220     ", 1 }, 		/* Legend S220 (Legend Crusader) */
   10.96 +	{ 0x9005, 0x0285, 0x17aa, 0x0287, aac_rx_init, "aacraid",  "Legend  ", "Legend S230     ", 2 }, 		/* Legend S230 (Legend Vulcan) */
   10.97 +
   10.98 +	{ 0x9005, 0x0285, 0x9005, 0x0288, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3230S   ", 2 }, 		/* Adaptec 3230S (Harrier) */
   10.99 +	{ 0x9005, 0x0285, 0x9005, 0x0289, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3240S   ", 2 }, 		/* Adaptec 3240S (Tornado) */
  10.100 +	{ 0x9005, 0x0285, 0x9005, 0x028a, aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020S PCI-X ", 2 }, 		/* ASR-2020S PCI-X ZCR (Skyhawk) */
  10.101 +	{ 0x9005, 0x0285, 0x9005, 0x028b, aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020S PCI-X ", 2 }, 		/* ASR-2020S SO-DIMM PCI-X ZCR (Terminator) */
  10.102 +	{ 0x9005, 0x0285, 0x9005, 0x0290, aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2410SA SATA ", 2 }, 		/* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  10.103 +	{ 0x9005, 0x0285, 0x1028, 0x0291, aac_rx_init, "aacraid",  "DELL    ", "CERC SATA RAID 2 ", 2 },		/* CERC SATA RAID 2 PCI SATA 8ch (DellCorsair) */
  10.104 +	{ 0x9005, 0x0285, 0x9005, 0x0292, aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2810SA SATA ", 2 }, 		/* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  10.105 +	{ 0x9005, 0x0285, 0x9005, 0x0293, aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-21610SA SATA ", 2 },		/* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  10.106 +	{ 0x9005, 0x0285, 0x9005, 0x0294, aac_rx_init, "aacraid",  "ADAPTEC ", "SO-DIMM SATA ZCR ", 2 },		/* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  10.107 +	/* ServeRAID */
  10.108 +/*	{ 0x9005, 0x0250, 0x1014, 0x0279, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec         ", 2 }, */ /*  (Marco) */
  10.109 +/*	{ 0x9005, 0x0250, 0x1014, 0x028c, aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec         ", 2 }, */ /* (Sebring)*/
  10.110 +
  10.111 +	{ 0x9005, 0x0285, 0x1028, 0x0287, aac_rx_init, "percraid", "DELL    ", "PERC 320/DC     ", 2 },			/* Perc 320/DC*/
  10.112 +	{ 0x1011, 0x0046, 0x9005, 0x0365, aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4 }, 		/* Adaptec 5400S (Mustang)*/
  10.113 +	{ 0x1011, 0x0046, 0x9005, 0x0364, aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4 },			/* Adaptec 5400S (Mustang)*/
  10.114 +	{ 0x1011, 0x0046, 0x9005, 0x1364, aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4 },			/* Dell PERC2 "Quad Channel" */
  10.115 +	{ 0x1011, 0x0046, 0x103c, 0x10c2, aac_sa_init, "hpnraid",  "HP      ", "NetRAID         ", 4 }			/* HP NetRAID-4M */
  10.116  };
  10.117  
  10.118  #define NUM_AACTYPES	(sizeof(aac_drivers) / sizeof(struct aac_driver_ident))
  10.119 @@ -111,7 +129,7 @@ static int aac_cfg_open(struct inode * i
  10.120  static int aac_cfg_release(struct inode * inode,struct file * file);
  10.121  
  10.122  static struct file_operations aac_cfg_fops = {
  10.123 -/*	owner: THIS_MODULE, */
  10.124 +	owner: THIS_MODULE,
  10.125  	ioctl: aac_cfg_ioctl,
  10.126  	open: aac_cfg_open,
  10.127  	release: aac_cfg_release
  10.128 @@ -151,194 +169,156 @@ static void aac_queuedepth(struct Scsi_H
  10.129   *	scsi_malloc/scsi_free must not be called.
  10.130   *
  10.131   */
  10.132 + 
  10.133  static int aac_detect(Scsi_Host_Template *template)
  10.134  {
  10.135 -    int index;
  10.136 -    int container;
  10.137 -    u16 vendor_id, device_id;
  10.138 -    struct Scsi_Host *host_ptr;
  10.139 -    struct pci_dev *dev = NULL;
  10.140 -    struct aac_dev *aac;
  10.141 -    struct fsa_scsi_hba *fsa_dev_ptr;
  10.142 -    char *name = NULL;
  10.143 +	int index;
  10.144 +	int container;
  10.145 +	u16 vendor_id, device_id;
  10.146 +	struct Scsi_Host *host_ptr;
  10.147 +	struct pci_dev *dev = NULL;
  10.148 +	struct aac_dev *aac;
  10.149 +	struct fsa_scsi_hba *fsa_dev_ptr;
  10.150 +	char *name = NULL;
  10.151  	
  10.152 -    printk(KERN_INFO "Red Hat/Adaptec aacraid driver, %s\n", 
  10.153 -	   AAC_DRIVER_BUILD_DATE);
  10.154 -
  10.155 -
  10.156 -    /* 
  10.157 -    ** XXX SMH: we need to take interrupts during detect, but the SCSI 
  10.158 -    ** layer is holding this lock with interrupts disabled. I don't 
  10.159 -    ** know how this works on vanilla linux (we 'down' on a semaphone 
  10.160 -    ** at one point during the process -- how do we wake?) 
  10.161 -    */
  10.162 -    spin_unlock_irq(&io_request_lock);
  10.163 -
  10.164 -
  10.165 -    /* setting up the proc directory structure */
  10.166 -    template->proc_name = "aacraid";
  10.167 -
  10.168 -    for( index = 0; index != num_aacdrivers; index++ )
  10.169 -    {
  10.170 -	device_id = aac_drivers[index].device;
  10.171 -	vendor_id = aac_drivers[index].vendor;
  10.172 -	name = aac_drivers[index].name;
  10.173 -	dprintk((KERN_DEBUG "Checking %s %x/%x/%x/%x.\n", 
  10.174 -		 name, vendor_id, device_id,
  10.175 -		 aac_drivers[index].subsystem_vendor,
  10.176 -		 aac_drivers[index].subsystem_device));
  10.177 -
  10.178 -	dev = NULL;
  10.179 -	while((dev = pci_find_device(vendor_id, device_id, dev))) {
  10.180 -	    if (pci_enable_device(dev))
  10.181 -		continue;
  10.182 -	    pci_set_master(dev);
  10.183 -	    pci_set_dma_mask(dev, 0xFFFFFFFFULL);
  10.184 -
  10.185 -	    if((dev->subsystem_vendor != aac_drivers[index].subsystem_vendor) || 
  10.186 -	       (dev->subsystem_device != aac_drivers[index].subsystem_device))
  10.187 -		continue;
  10.188 +	printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n", AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE);
  10.189  
  10.190 -	    dprintk((KERN_DEBUG "%s device detected.\n", name));
  10.191 -	    dprintk((KERN_DEBUG "%x/%x/%x/%x.\n", vendor_id, device_id, 
  10.192 -		     aac_drivers[index].subsystem_vendor, 
  10.193 -		     aac_drivers[index].subsystem_device));
  10.194 -	    /* Increment the host adapter count */
  10.195 -	    aac_count++;
  10.196 -	    /*
  10.197 -	     * scsi_register() allocates memory for a Scsi_Hosts
  10.198 -	     * structure and links it into the linked list of host
  10.199 -	     * adapters. This linked list contains the data for all
  10.200 -	     * possible <supported> scsi hosts.  This is similar to
  10.201 -	     * the Scsi_Host_Template, except that we have one entry
  10.202 -	     * for each actual physical host adapter on the system,
  10.203 -	     * stored as a linked list. If there are two AAC boards,
  10.204 -	     * then we will need to make two Scsi_Host entries, but
  10.205 -	     * there will be only one Scsi_Host_Template entry. The
  10.206 -	     * second argument to scsi_register() specifies the size
  10.207 -	     * of the extra memory we want to hold any device specific
  10.208 -	     * information.  */
  10.209 -	    host_ptr = scsi_register( template, sizeof(struct aac_dev) );
  10.210 -	    /* 
  10.211 -	     * These three parameters can be used to allow for wide SCSI 
  10.212 -	     * and for host adapters that support multiple buses.
  10.213 -	     */
  10.214 -	    host_ptr->max_id = 17;
  10.215 -	    host_ptr->max_lun = 8;
  10.216 -	    host_ptr->max_channel = 1;
  10.217 -	    host_ptr->irq = dev->irq;		/* Adapter IRQ number */
  10.218 -	    /* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */
  10.219 -	    host_ptr->base = dev->resource[0].start;
  10.220 -	    scsi_set_pci_device(host_ptr, dev);
  10.221 -	    dprintk((KERN_DEBUG "Device base address = 0x%lx [0x%lx].\n", 
  10.222 -		     host_ptr->base, dev->resource[0].start));
  10.223 -	    dprintk((KERN_DEBUG "Device irq = 0x%x.\n", dev->irq));
  10.224 -	    /*
  10.225 -	     * The unique_id field is a unique identifier that must
  10.226 -	     * be assigned so that we have some way of identifying
  10.227 -	     * each host adapter properly and uniquely. For hosts 
  10.228 -	     * that do not support more than one card in the
  10.229 -	     * system, this does not need to be set. It is
  10.230 -	     * initialized to zero in scsi_register(). This is the 
  10.231 -	     * value returned as aac->id.
  10.232 -	     */
  10.233 -	    host_ptr->unique_id = aac_count - 1;
  10.234 -	    /*
  10.235 -	     *	This function is called after the device list has
  10.236 -	     *	been built to find the tagged queueing depth 
  10.237 -	     *	supported for each device.
  10.238 -	     */
  10.239 -	    host_ptr->select_queue_depths = aac_queuedepth;
  10.240 -	    aac = (struct aac_dev *)host_ptr->hostdata;
  10.241 -	    /* attach a pointer back to Scsi_Host */
  10.242 -	    aac->scsi_host_ptr = host_ptr;	
  10.243 -	    aac->pdev = dev;
  10.244 -	    aac->cardtype =  index;
  10.245 -	    aac->name = aac->scsi_host_ptr->hostt->name;
  10.246 -	    aac->id = aac->scsi_host_ptr->unique_id;
  10.247 -	    /* Initialize the ordinal number of the device to -1 */
  10.248 -	    fsa_dev_ptr = &(aac->fsa_dev);
  10.249 -	    for( container=0; container < MAXIMUM_NUM_CONTAINERS; container++)
  10.250 -		fsa_dev_ptr->devno[container] = -1;
  10.251 -
  10.252 -	    dprintk((KERN_DEBUG "Initializing Hardware...\n"));
  10.253 +	/* setting up the proc directory structure */
  10.254 +	template->proc_name = "aacraid";
  10.255 +	spin_unlock_irq(&io_request_lock);
  10.256  
  10.257 -	    if((*aac_drivers[index].init)(aac , host_ptr->unique_id) != 0)
  10.258 -	    {
  10.259 -		/* device initialization failed */
  10.260 -		printk(KERN_WARNING 
  10.261 -		       "aacraid: device initialization failed.\n");
  10.262 -		scsi_unregister(host_ptr);
  10.263 -		aac_count--;
  10.264 -		continue;
  10.265 -	    } 
  10.266 -	    dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", 
  10.267 -		     name, host_ptr->unique_id));
  10.268 -	    aac_get_adapter_info(aac);
  10.269 -
  10.270 -	    dprintk((KERN_DEBUG "%s got adapter info.\n", name));
  10.271 +	for( index = 0; index != num_aacdrivers; index++ )
  10.272 +	{
  10.273 +		device_id = aac_drivers[index].device;
  10.274 +		vendor_id = aac_drivers[index].vendor;
  10.275 +		name = aac_drivers[index].name;
  10.276 +		dprintk((KERN_DEBUG "Checking %s %x/%x/%x/%x.\n", 
  10.277 +			name, vendor_id, device_id,
  10.278 +			aac_drivers[index].subsystem_vendor,
  10.279 +			aac_drivers[index].subsystem_device));
  10.280  
  10.281 -	    if(nondasd != -1) 
  10.282 -	    {
  10.283 -		/* someone told us how to set this on the cmdline */
  10.284 -		aac->nondasd_support = (nondasd!=0);
  10.285 -	    }
  10.286 -	    if(aac->nondasd_support != 0){
  10.287 -		printk(KERN_INFO "%s%d: Non-DASD support enabled\n", 
  10.288 -		       aac->name, aac->id);
  10.289 -	    }
  10.290 -	    dprintk((KERN_DEBUG "%s:%d options flag %04x.\n", name, 
  10.291 -		     host_ptr->unique_id, aac->adapter_info.options));
  10.292 -	    if(aac->nondasd_support == 1)
  10.293 -	    {
  10.294 -		/*
  10.295 -		 * max channel will be the physical
  10.296 -		 * channels plus 1 virtual channel all
  10.297 -		 * containers are on the virtual
  10.298 -		 * channel 0 physical channels are
  10.299 -		 * address by their actual physical
  10.300 -		 * number+1 */
  10.301 -		host_ptr->max_channel = aac_drivers[index].channels+1;
  10.302 -	    } else {
  10.303 -		host_ptr->max_channel = 1;
  10.304 -	    }
  10.305 -	    dprintk((KERN_DEBUG "Device has %d logical channels\n", 
  10.306 -		     host_ptr->max_channel));
  10.307 -	    aac_get_containers(aac);
  10.308 -	    aac_devices[aac_count-1] = aac;
  10.309 +		dev = NULL;
  10.310 +		while((dev = pci_find_device(vendor_id, device_id, dev))) {
  10.311 +			if (pci_enable_device(dev))
  10.312 +				continue;
  10.313 +			pci_set_master(dev);
  10.314 +			
  10.315 +			if(aac_drivers[index].quirks & AAC_QUIRK_31BIT)
  10.316 +				pci_set_dma_mask(dev, 0x7FFFFFFFULL);
  10.317 +			else
  10.318 +				pci_set_dma_mask(dev, 0xFFFFFFFFULL);
  10.319  
  10.320 -	    /*
  10.321 -	     * dmb - we may need to move these 3 parms somewhere else once
  10.322 -	     * we get a fib that can report the actual numbers
  10.323 -	     */
  10.324 -	    host_ptr->max_id = AAC_MAX_TARGET;
  10.325 -	    host_ptr->max_lun = AAC_MAX_LUN;
  10.326 -			
  10.327 -	    /*
  10.328 -	     *  If we are PAE capable then our future DMA mappings
  10.329 -	     *  (for read/write commands) are 64bit clean and don't 
  10.330 -	     *  need bouncing. This assumes we do no other 32bit only
  10.331 -	     *  allocations (eg fib table expands) after this point.
  10.332 -	     */
  10.333 -			 
  10.334 -	    if(aac->pae_support)
  10.335 -		pci_set_dma_mask(dev, 0xFFFFFFFFFFFFFFFFUL);
  10.336 +			if((dev->subsystem_vendor != aac_drivers[index].subsystem_vendor) || 
  10.337 +			   (dev->subsystem_device != aac_drivers[index].subsystem_device))
  10.338 +					continue;
  10.339 +
  10.340 +			dprintk((KERN_DEBUG "%s device detected.\n", name));
  10.341 +			dprintk((KERN_DEBUG "%x/%x/%x/%x.\n", vendor_id, device_id, 
  10.342 +				aac_drivers[index].subsystem_vendor, aac_drivers[index].subsystem_device));
  10.343 +			/*
  10.344 +			 * scsi_register() allocates memory for a Scsi_Hosts structure and
  10.345 +			 * links it into the linked list of host adapters. This linked list
  10.346 +			 * contains the data for all possible <supported> scsi hosts.
  10.347 +			 * This is similar to the Scsi_Host_Template, except that we have
  10.348 +			 * one entry for each actual physical host adapter on the system,
  10.349 +			 * stored as a linked list. If there are two AAC boards, then we
  10.350 +			 * will need to make two Scsi_Host entries, but there will be only
  10.351 +			 * one Scsi_Host_Template entry. The second argument to scsi_register()
  10.352 +			 * specifies the size of the extra memory we want to hold any device 
  10.353 +			 * specific information.
  10.354 +			 */
  10.355 +			host_ptr = scsi_register( template, sizeof(struct aac_dev) );
  10.356 +			if(host_ptr == NULL)
  10.357 +				continue;
  10.358 +			/* Increment the host adapter count */
  10.359 +			aac_count++;
  10.360 +			/* 
  10.361 +			 * These three parameters can be used to allow for wide SCSI 
  10.362 +			 * and for host adapters that support multiple buses.
  10.363 +			 */
  10.364 +			host_ptr->irq = dev->irq;		/* Adapter IRQ number */
  10.365 +			/* host_ptr->base = ( char * )(dev->resource[0].start & ~0xff); */
  10.366 +			host_ptr->base = dev->resource[0].start;
  10.367 +			scsi_set_pci_device(host_ptr, dev);
  10.368 +			dprintk((KERN_DEBUG "Device base address = 0x%lx [0x%lx].\n", host_ptr->base, dev->resource[0].start));
  10.369 +			dprintk((KERN_DEBUG "Device irq = 0x%x.\n", dev->irq));
  10.370 +			/*
  10.371 +			 * The unique_id field is a unique identifier that must
  10.372 +			 * be assigned so that we have some way of identifying
  10.373 +			 * each host adapter properly and uniquely. For hosts 
  10.374 +			 * that do not support more than one card in the
  10.375 +			 * system, this does not need to be set. It is
  10.376 +			 * initialized to zero in scsi_register(). This is the 
  10.377 +			 * value returned as aac->id.
  10.378 +			 */
  10.379 +			host_ptr->unique_id = aac_count - 1;
  10.380 +			/*
  10.381 +			 *	This function is called after the device list has
  10.382 +			 *	been built to find the tagged queueing depth 
  10.383 +			 *	supported for each device.
  10.384 +			 */
  10.385 +			host_ptr->select_queue_depths = aac_queuedepth;
  10.386 +			aac = (struct aac_dev *)host_ptr->hostdata;
  10.387 +			/* attach a pointer back to Scsi_Host */
  10.388 +			aac->scsi_host_ptr = host_ptr;	
  10.389 +			aac->pdev = dev;
  10.390 +			aac->name = aac->scsi_host_ptr->hostt->name;
  10.391 +			aac->id = aac->scsi_host_ptr->unique_id;
  10.392 +			aac->cardtype =  index;
  10.393 +
  10.394 +			aac->fibs = (struct fib*) kmalloc(sizeof(struct fib)*AAC_NUM_FIB, GFP_KERNEL);
  10.395 +			spin_lock_init(&aac->fib_lock);
  10.396 +
  10.397 +			/* Initialize the ordinal number of the device to -1 */
  10.398 +			fsa_dev_ptr = &(aac->fsa_dev);
  10.399 +			for( container = 0; container < MAXIMUM_NUM_CONTAINERS; container++ )
  10.400 +				fsa_dev_ptr->devno[container] = -1;
  10.401 +
  10.402 +			dprintk((KERN_DEBUG "Initializing Hardware...\n"));
  10.403 +			if((*aac_drivers[index].init)(aac , host_ptr->unique_id) != 0)
  10.404 +			{
  10.405 +				/* device initialization failed */
  10.406 +				printk(KERN_WARNING "aacraid: device initialization failed.\n");
  10.407 +				scsi_unregister(host_ptr);
  10.408 +				aac_count--;
  10.409 +				continue;
  10.410 +			} 
  10.411 +			dprintk((KERN_DEBUG "%s:%d device initialization successful.\n", name, host_ptr->unique_id));
  10.412 +			aac_get_adapter_info(aac);
  10.413 +			if(aac->nondasd_support == 1)
  10.414 +			{
  10.415 +				/*
  10.416 +				 * max channel will be the physical channels plus 1 virtual channel 
  10.417 +				 * all containers are on the virtual channel 0
  10.418 +				 * physical channels are address by their actual physical number+1
  10.419 +				 */
  10.420 +				host_ptr->max_channel = aac_drivers[index].channels+1;
  10.421 +			} else {
  10.422 +				host_ptr->max_channel = 1;
  10.423 + 			}
  10.424 +			dprintk((KERN_DEBUG "Device has %d logical channels\n", host_ptr->max_channel));
  10.425 +			aac_get_containers(aac);
  10.426 +			aac_devices[aac_count-1] = aac;
  10.427 +
  10.428 +			/*
  10.429 +			 * dmb - we may need to move the setting of these parms somewhere else once
  10.430 +			 * we get a fib that can report the actual numbers
  10.431 +			 */
  10.432 +			host_ptr->max_id = AAC_MAX_TARGET;
  10.433 +			host_ptr->max_lun = AAC_MAX_LUN;
  10.434 +		}
  10.435  	}
  10.436 -    }
  10.437 -
  10.438 -    /* XXX SMH: restore lock and IPL for SCSI layer */
  10.439 -    spin_lock_irq(&io_request_lock);
  10.440 -
  10.441  
  10.442  #if 0
  10.443 -    if( aac_count ){
  10.444 -	if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0)
  10.445 -	    printk(KERN_WARNING "aacraid: unable to register 'aac' device.\n");
  10.446 -    }
  10.447 +	if( aac_count ){
  10.448 +		if((aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops))<0)
  10.449 +			printk(KERN_WARNING "aacraid: unable to register \"aac\" device.\n");
  10.450 +	}
  10.451  #endif
  10.452 +	spin_lock_irq(&io_request_lock);
  10.453  
  10.454 -    template->present = aac_count; /* # of cards of this type found */
  10.455 -    return aac_count;
  10.456 +	template->present = aac_count; /* # of cards of this type found */
  10.457 +	return aac_count;
  10.458  }
  10.459  
  10.460  /**
  10.461 @@ -353,38 +333,37 @@ static int aac_detect(Scsi_Host_Template
  10.462  
  10.463  static int aac_release(struct Scsi_Host *host_ptr)
  10.464  {
  10.465 -    struct aac_dev *dev;
  10.466 -    dprintk((KERN_DEBUG "aac_release.\n"));
  10.467 -    dev = (struct aac_dev *)host_ptr->hostdata;
  10.468 -    
  10.469 +	struct aac_dev *dev;
  10.470 +	dprintk((KERN_DEBUG "aac_release.\n"));
  10.471 +	dev = (struct aac_dev *)host_ptr->hostdata;
  10.472  #if 0
  10.473 -    /*
  10.474 -     *	kill any threads we started
  10.475 -     */
  10.476 -    kill_proc(dev->thread_pid, SIGKILL, 0);
  10.477 -    wait_for_completion(&dev->aif_completion);
  10.478 +	/*
  10.479 +	 *	kill any threads we started
  10.480 +	 */
  10.481 +	kill_proc(dev->thread_pid, SIGKILL, 0);
  10.482 +	wait_for_completion(&dev->aif_completion);
  10.483  #endif
  10.484 -    /*
  10.485 -     *	Call the comm layer to detach from this adapter
  10.486 -     */
  10.487 -    aac_detach(dev);
  10.488 -    /* Check free orderings... */
  10.489 -    /* remove interrupt binding */
  10.490 -    free_irq(host_ptr->irq, dev);
  10.491 -    iounmap((void * )dev->regs.sa);
  10.492 -    /* unregister adapter */
  10.493 -    scsi_unregister(host_ptr);
  10.494 -    /*
  10.495 -     *	FIXME: This assumes no hot plugging is going on...
  10.496 -     */
  10.497 -    if( aac_cfg_major >= 0 )
  10.498 -    {
  10.499 +	/*
  10.500 +	 *	Call the comm layer to detach from this adapter
  10.501 +	 */
  10.502 +	aac_detach(dev);
  10.503 +	/* Check free orderings... */
  10.504 +	/* remove interrupt binding */
  10.505 +	free_irq(host_ptr->irq, dev);
  10.506 +	iounmap((void * )dev->regs.sa);
  10.507 +	/* unregister adapter */
  10.508 +	scsi_unregister(host_ptr);
  10.509 +	/*
  10.510 +	 *	FIXME: This assumes no hot plugging is going on...
  10.511 +	 */
  10.512 +	if( aac_cfg_major >= 0 )
  10.513 +	{
  10.514  #if 0
  10.515 -	unregister_chrdev(aac_cfg_major, "aac");
  10.516 +		unregister_chrdev(aac_cfg_major, "aac");
  10.517  #endif
  10.518 -	aac_cfg_major = -1;
  10.519 -    }
  10.520 -    return 0;
  10.521 +		aac_cfg_major = -1;
  10.522 +	}
  10.523 +	return 0;
  10.524  }
  10.525  
  10.526  /**
  10.527 @@ -397,16 +376,16 @@ static int aac_release(struct Scsi_Host 
  10.528  
  10.529  static int aac_queuecommand(Scsi_Cmnd *scsi_cmnd_ptr, void (*complete)(Scsi_Cmnd *))
  10.530  {
  10.531 -    int ret;
  10.532 -    
  10.533 -    scsi_cmnd_ptr->scsi_done = complete;
  10.534 -    /*
  10.535 -     *	aac_scsi_cmd() handles command processing, setting the 
  10.536 -     *	result code and calling completion routine. 
  10.537 -     */
  10.538 -    if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0)
  10.539 -	dprintk((KERN_DEBUG "aac_scsi_cmd failed.\n"));
  10.540 -    return ret;
  10.541 +	int ret;
  10.542 +
  10.543 +	scsi_cmnd_ptr->scsi_done = complete;
  10.544 +	/*
  10.545 +	 *	aac_scsi_cmd() handles command processing, setting the 
  10.546 +	 *	result code and calling completion routine. 
  10.547 +	 */
  10.548 +	if((ret = aac_scsi_cmd(scsi_cmnd_ptr)) != 0)
  10.549 +		dprintk((KERN_DEBUG "aac_scsi_cmd failed.\n"));
  10.550 +	return ret;
  10.551  } 
  10.552  
  10.553  /**
  10.554 @@ -418,8 +397,8 @@ static int aac_queuecommand(Scsi_Cmnd *s
  10.555  
  10.556  const char *aac_driverinfo(struct Scsi_Host *host_ptr)
  10.557  {
  10.558 -    struct aac_dev *dev = (struct aac_dev *)host_ptr->hostdata;
  10.559 -    return aac_drivers[dev->cardtype].name;
  10.560 +	struct aac_dev *dev = (struct aac_dev *)host_ptr->hostdata;
  10.561 +	return aac_drivers[dev->cardtype].name;
  10.562  }
  10.563  
  10.564  /**
  10.565 @@ -456,111 +435,110 @@ struct aac_driver_ident* aac_get_driver_
  10.566   
  10.567  static int aac_biosparm(Scsi_Disk *disk, kdev_t dev, int *geom)
  10.568  {
  10.569 -    struct diskparm *param = (struct diskparm *)geom;
  10.570 -    /*struct buffer_head * buf;*/
  10.571 -    
  10.572 -    dprintk((KERN_DEBUG "aac_biosparm.\n"));
  10.573 +	struct diskparm *param = (struct diskparm *)geom;
  10.574 +#if 0
  10.575 +	struct buffer_head * buf;
  10.576 +#endif
  10.577  
  10.578 -    /*
  10.579 -     *	Assuming extended translation is enabled - #REVISIT#
  10.580 -     */
  10.581 -    if( disk->capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
  10.582 -    {
  10.583 -	if( disk->capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
  10.584 +	dprintk((KERN_DEBUG "aac_biosparm.\n"));
  10.585 +
  10.586 +	/*
  10.587 +	 *	Assuming extended translation is enabled - #REVISIT#
  10.588 +	 */
  10.589 +	if( disk->capacity >= 2 * 1024 * 1024 ) /* 1 GB in 512 byte sectors */
  10.590  	{
  10.591 -	    param->heads = 255;
  10.592 -	    param->sectors = 63;
  10.593 +		if( disk->capacity >= 4 * 1024 * 1024 ) /* 2 GB in 512 byte sectors */
  10.594 +		{
  10.595 +			param->heads = 255;
  10.596 +			param->sectors = 63;
  10.597 +		}
  10.598 +		else
  10.599 +		{
  10.600 +			param->heads = 128;
  10.601 +			param->sectors = 32;
  10.602 +		}
  10.603  	}
  10.604  	else
  10.605  	{
  10.606 -	    param->heads = 128;
  10.607 -	    param->sectors = 32;
  10.608 -	}
  10.609 -    }
  10.610 -    else
  10.611 -    {
  10.612 -	param->heads = 64;
  10.613 -	param->sectors = 32;
  10.614 -    }
  10.615 -    
  10.616 -    param->cylinders = disk->capacity/(param->heads * param->sectors);
  10.617 -	
  10.618 -#if 0
  10.619 -    /*
  10.620 -     *	Read the first 1024 bytes from the disk device
  10.621 -     */
  10.622 -    
  10.623 -    buf = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, block_size(dev));
  10.624 -    if(buf == NULL)
  10.625 -	return 0;
  10.626 -    /* 
  10.627 -     *	If the boot sector partition table is valid, search for a partition 
  10.628 -     *	table entry whose end_head matches one of the standard geometry 
  10.629 -     *	translations ( 64/32, 128/32, 255/63 ).
  10.630 -     */
  10.631 -
  10.632 -	 
  10.633 -    if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
  10.634 -    {
  10.635 -	struct partition *first = (struct partition * )(buf->b_data + 0x1be);
  10.636 -	struct partition *entry = first;
  10.637 -	int saved_cylinders = param->cylinders;
  10.638 -	int num;
  10.639 -	unsigned char end_head, end_sec;
  10.640 -	
  10.641 -	for(num = 0; num < 4; num++)
  10.642 -	{
  10.643 -	    end_head = entry->end_head;
  10.644 -	    end_sec = entry->end_sector & 0x3f;
  10.645 -	    
  10.646 -	    if(end_head == 63)
  10.647 -	    {
  10.648  		param->heads = 64;
  10.649  		param->sectors = 32;
  10.650 -		break;
  10.651 -	    }
  10.652 -	    else if(end_head == 127)
  10.653 -	    {
  10.654 -		param->heads = 128;
  10.655 -		param->sectors = 32;
  10.656 -		break;
  10.657 -	    }
  10.658 -	    else if(end_head == 254) 
  10.659 -	    {
  10.660 -		param->heads = 255;
  10.661 -		param->sectors = 63;
  10.662 -		break;
  10.663 -	    }
  10.664 -	    entry++;
  10.665 -	}
  10.666 -	
  10.667 -	if(num == 4)
  10.668 -	{
  10.669 -	    end_head = first->end_head;
  10.670 -	    end_sec = first->end_sector & 0x3f;
  10.671  	}
  10.672 -	
  10.673 -	param->cylinders = disk->capacity / (param->heads * param->sectors);
  10.674 -	
  10.675 -	if(num < 4 && end_sec == param->sectors)
  10.676 +
  10.677 +	param->cylinders = disk->capacity/(param->heads * param->sectors);
  10.678 +
  10.679 +#if 0
  10.680 +	/*
  10.681 +	 *	Read the first 1024 bytes from the disk device
  10.682 +	 */
  10.683 +
  10.684 +	buf = bread(MKDEV(MAJOR(dev), MINOR(dev)&~0xf), 0, block_size(dev));
  10.685 +	if(buf == NULL)
  10.686 +		return 0;
  10.687 +	/* 
  10.688 +	 *	If the boot sector partition table is valid, search for a partition 
  10.689 +	 *	table entry whose end_head matches one of the standard geometry 
  10.690 +	 *	translations ( 64/32, 128/32, 255/63 ).
  10.691 +	 */
  10.692 +	 
  10.693 +	if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
  10.694  	{
  10.695 -	    if(param->cylinders != saved_cylinders)
  10.696 -		dprintk((KERN_DEBUG "Adopting geometry: heads=%d, "
  10.697 -			 "sectors=%d from partition table %d.\n",
  10.698 -			 param->heads, param->sectors, num));
  10.699 +		struct partition *first = (struct partition * )(buf->b_data + 0x1be);
  10.700 +		struct partition *entry = first;
  10.701 +		int saved_cylinders = param->cylinders;
  10.702 +		int num;
  10.703 +		unsigned char end_head, end_sec;
  10.704 +
  10.705 +		for(num = 0; num < 4; num++)
  10.706 +		{
  10.707 +			end_head = entry->end_head;
  10.708 +			end_sec = entry->end_sector & 0x3f;
  10.709 +
  10.710 +			if(end_head == 63)
  10.711 +			{
  10.712 +				param->heads = 64;
  10.713 +				param->sectors = 32;
  10.714 +				break;
  10.715 +			}
  10.716 +			else if(end_head == 127)
  10.717 +			{
  10.718 +				param->heads = 128;
  10.719 +				param->sectors = 32;
  10.720 +				break;
  10.721 +			}
  10.722 +			else if(end_head == 254) 
  10.723 +			{
  10.724 +				param->heads = 255;
  10.725 +				param->sectors = 63;
  10.726 +				break;
  10.727 +			}
  10.728 +			entry++;
  10.729 +		}
  10.730 +
  10.731 +		if(num == 4)
  10.732 +		{
  10.733 +			end_head = first->end_head;
  10.734 +			end_sec = first->end_sector & 0x3f;
  10.735 +		}
  10.736 +
  10.737 +		param->cylinders = disk->capacity / (param->heads * param->sectors);
  10.738 +
  10.739 +		if(num < 4 && end_sec == param->sectors)
  10.740 +		{
  10.741 +			if(param->cylinders != saved_cylinders)
  10.742 +				dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
  10.743 +					param->heads, param->sectors, num));
  10.744 +		}
  10.745 +		else if(end_head > 0 || end_sec > 0)
  10.746 +		{
  10.747 +			dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
  10.748 +				end_head + 1, end_sec, num));
  10.749 +			dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
  10.750 +					param->heads, param->sectors));
  10.751 +		}
  10.752  	}
  10.753 -	else if(end_head > 0 || end_sec > 0)
  10.754 -	{
  10.755 -	    dprintk((KERN_DEBUG "Strange geometry: heads=%d, "
  10.756 -		     "sectors=%d in partition table %d.\n",
  10.757 -		     end_head + 1, end_sec, num));
  10.758 -	    dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
  10.759 -		     param->heads, param->sectors));
  10.760 -	}
  10.761 -    }
  10.762 -    brelse(buf);
  10.763 +	brelse(buf);
  10.764  #endif
  10.765 -    return 0;
  10.766 +	return 0;
  10.767  }
  10.768  
  10.769  /**
  10.770 @@ -575,20 +553,20 @@ static int aac_biosparm(Scsi_Disk *disk,
  10.771  
  10.772  static void aac_queuedepth(struct Scsi_Host * host, Scsi_Device * dev )
  10.773  {
  10.774 -    Scsi_Device * dptr;
  10.775 -    
  10.776 -    dprintk((KERN_DEBUG "aac_queuedepth.\n"));
  10.777 -    dprintk((KERN_DEBUG "Device #   Q Depth   Online\n"));
  10.778 -    dprintk((KERN_DEBUG "---------------------------\n"));
  10.779 -    for(dptr = dev; dptr != NULL; dptr = dptr->next)
  10.780 -    {
  10.781 -	if(dptr->host == host)
  10.782 +	Scsi_Device * dptr;
  10.783 +
  10.784 +	dprintk((KERN_DEBUG "aac_queuedepth.\n"));
  10.785 +	dprintk((KERN_DEBUG "Device #   Q Depth   Online\n"));
  10.786 +	dprintk((KERN_DEBUG "---------------------------\n"));
  10.787 +	for(dptr = dev; dptr != NULL; dptr = dptr->next)
  10.788  	{
  10.789 -	    dptr->queue_depth = 10;		
  10.790 -	    dprintk((KERN_DEBUG "  %2d         %d        %d\n", 
  10.791 -		     dptr->id, dptr->queue_depth, dptr->online));
  10.792 +		if(dptr->host == host)
  10.793 +		{
  10.794 +			dptr->queue_depth = 10;		
  10.795 +			dprintk((KERN_DEBUG "  %2d         %d        %d\n", 
  10.796 +				dptr->id, dptr->queue_depth, dptr->online));
  10.797 +		}
  10.798  	}
  10.799 -    }
  10.800  }
  10.801  
  10.802  
  10.803 @@ -603,7 +581,7 @@ static void aac_queuedepth(struct Scsi_H
  10.804   
  10.805  static int aac_eh_abort(Scsi_Cmnd *cmd)
  10.806  {
  10.807 -    return FAILED;
  10.808 +	return FAILED;
  10.809  }
  10.810  
  10.811  /**
  10.812 @@ -617,7 +595,7 @@ static int aac_eh_abort(Scsi_Cmnd *cmd)
  10.813  
  10.814  static int aac_eh_device_reset(Scsi_Cmnd *cmd)
  10.815  {
  10.816 -    return FAILED;
  10.817 +	return FAILED;
  10.818  }
  10.819  
  10.820  /**
  10.821 @@ -631,7 +609,7 @@ static int aac_eh_device_reset(Scsi_Cmnd
  10.822  
  10.823  static int aac_eh_bus_reset(Scsi_Cmnd* cmd)
  10.824  {
  10.825 -    return FAILED;
  10.826 +	return FAILED;
  10.827  }
  10.828  
  10.829  /**
  10.830 @@ -645,8 +623,8 @@ static int aac_eh_bus_reset(Scsi_Cmnd* c
  10.831  
  10.832  static int aac_eh_reset(Scsi_Cmnd* cmd)
  10.833  {
  10.834 -    printk(KERN_ERR "aacraid: Host adapter reset request. SCSI hang ?\n");
  10.835 -    return FAILED;
  10.836 +	printk(KERN_ERR "aacraid: Host adapter reset request. SCSI hang ?\n");
  10.837 +	return FAILED;
  10.838  }
  10.839  
  10.840  /**
  10.841 @@ -661,10 +639,10 @@ static int aac_eh_reset(Scsi_Cmnd* cmd)
  10.842   
  10.843  static int aac_ioctl(Scsi_Device * scsi_dev_ptr, int cmd, void * arg)
  10.844  {
  10.845 -    struct aac_dev *dev;
  10.846 -    dprintk((KERN_DEBUG "aac_ioctl.\n"));
  10.847 -    dev = (struct aac_dev *)scsi_dev_ptr->host->hostdata;
  10.848 -    return aac_do_ioctl(dev, cmd, arg);
  10.849 +	struct aac_dev *dev;
  10.850 +	dprintk((KERN_DEBUG "aac_ioctl.\n"));
  10.851 +	dev = (struct aac_dev *)scsi_dev_ptr->host->hostdata;
  10.852 +	return aac_do_ioctl(dev, cmd, arg);
  10.853  }
  10.854  
  10.855  /**
  10.856 @@ -681,10 +659,10 @@ static int aac_ioctl(Scsi_Device * scsi_
  10.857  
  10.858  static int aac_cfg_open(struct inode * inode, struct file * file )
  10.859  {
  10.860 -    unsigned minor_number = MINOR(inode->i_rdev);
  10.861 -    if(minor_number >= aac_count)
  10.862 -	return -ENODEV;
  10.863 -    return 0;
  10.864 +	unsigned minor_number = MINOR(inode->i_rdev);
  10.865 +	if(minor_number >= aac_count)
  10.866 +		return -ENODEV;
  10.867 +	return 0;
  10.868  }
  10.869  
  10.870  /**
  10.871 @@ -698,7 +676,7 @@ static int aac_cfg_open(struct inode * i
  10.872   
  10.873  static int aac_cfg_release(struct inode * inode, struct file * file )
  10.874  {
  10.875 -    return 0;
  10.876 +	return 0;
  10.877  }
  10.878  
  10.879  /**
  10.880 @@ -717,8 +695,8 @@ static int aac_cfg_release(struct inode 
  10.881   
  10.882  static int aac_cfg_ioctl(struct inode * inode,  struct file * file, unsigned int cmd, unsigned long arg )
  10.883  {
  10.884 -    struct aac_dev *dev = aac_devices[MINOR(inode->i_rdev)];
  10.885 -    return aac_do_ioctl(dev, cmd, (void *)arg);
  10.886 +	struct aac_dev *dev = aac_devices[MINOR(inode->i_rdev)];
  10.887 +	return aac_do_ioctl(dev, cmd, (void *)arg);
  10.888  }
  10.889  
  10.890  /*
  10.891 @@ -728,9 +706,13 @@ static int aac_cfg_ioctl(struct inode * 
  10.892   */
  10.893   
  10.894  static Scsi_Host_Template driver_template = {
  10.895 -/*	module:			THIS_MODULE, */
  10.896 +#if 0
  10.897 +	module:			THIS_MODULE,
  10.898 +#endif
  10.899  	name:           	"AAC",
  10.900 -/*	proc_info:      	aac_procinfo, */
  10.901 +#if 0
  10.902 +	proc_info:      	aac_procinfo,
  10.903 +#endif
  10.904  	detect:         	aac_detect,
  10.905  	release:        	aac_release,
  10.906  	info:           	aac_driverinfo,
  10.907 @@ -780,11 +762,13 @@ static Scsi_Host_Template driver_templat
  10.908  static int aac_procinfo(char *proc_buffer, char **start_ptr,off_t offset,
  10.909  			int bytes_available, int host_no, int write)
  10.910  {
  10.911 -    if(write || offset > 0)
  10.912 -	return 0;
  10.913 -    *start_ptr = proc_buffer;
  10.914 -    return sprintf(proc_buffer, "%s  %d\n", 
  10.915 -		   "Raid Controller, scsi hba number", host_no);
  10.916 +	if(write || offset > 0)
  10.917 +		return 0;
  10.918 +	*start_ptr = proc_buffer;
  10.919 +	return sprintf(proc_buffer,
  10.920 +	  "Adaptec Raid Controller %s %s, scsi hba number %d\n",
  10.921 +	  AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE,
  10.922 +	  host_no);
  10.923  }
  10.924  #endif
  10.925  
    11.1 --- a/xen/drivers/scsi/aacraid/rx.c	Wed Aug 06 20:53:30 2003 +0000
    11.2 +++ b/xen/drivers/scsi/aacraid/rx.c	Thu Aug 07 12:24:12 2003 +0000
    11.3 @@ -28,18 +28,18 @@
    11.4   *
    11.5   */
    11.6  
    11.7 -#include <xeno/config.h>
    11.8 -#include <xeno/kernel.h>
    11.9 -#include <xeno/init.h>
   11.10 -#include <xeno/types.h>
   11.11 -#include <xeno/sched.h>
   11.12 -#include <xeno/pci.h>
   11.13 -/*  #include <xeno/spinlock.h> */
   11.14 -/*  #include <xeno/slab.h> */
   11.15 -#include <xeno/blk.h>
   11.16 -#include <xeno/delay.h>
   11.17 -/*  #include <xeno/completion.h> */
   11.18 -/*  #include <asm/semaphore.h> */
   11.19 +#include <linux/config.h>
   11.20 +#include <linux/kernel.h>
   11.21 +#include <linux/init.h>
   11.22 +#include <linux/types.h>
   11.23 +#include <linux/sched.h>
   11.24 +#include <linux/pci.h>
   11.25 +#include <linux/spinlock.h>
   11.26 +#include <linux/slab.h>
   11.27 +#include <linux/blk.h>
   11.28 +#include <linux/delay.h>
   11.29 +/*#include <linux/completion.h>*/
   11.30 +/*#include <asm/semaphore.h>*/
   11.31  #include "scsi.h"
   11.32  #include "hosts.h"
   11.33  
   11.34 @@ -47,42 +47,41 @@
   11.35  
   11.36  static void aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
   11.37  {
   11.38 -    struct aac_dev *dev = dev_id;
   11.39 -    unsigned long bellbits;
   11.40 -    u8 intstat, mask;
   11.41 -    intstat = rx_readb(dev, MUnit.OISR);
   11.42 -    /*
   11.43 -     *	Read mask and invert because drawbridge is reversed.
   11.44 -     *	This allows us to only service interrupts that have 
   11.45 -     *	been enabled.
   11.46 -     */
   11.47 -    mask = ~(rx_readb(dev, MUnit.OIMR));
   11.48 -    /* Check to see if this is our interrupt.  If it isn't just return */
   11.49 -    
   11.50 -    if (intstat & mask) 
   11.51 -    {
   11.52 -	bellbits = rx_readl(dev, OutboundDoorbellReg);
   11.53 -	if (bellbits & DoorBellPrintfReady) {
   11.54 -	    aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
   11.55 -	    rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
   11.56 -	    rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
   11.57 +	struct aac_dev *dev = dev_id;
   11.58 +	unsigned long bellbits;
   11.59 +	u8 intstat, mask;
   11.60 +	intstat = rx_readb(dev, MUnit.OISR);
   11.61 +	/*
   11.62 +	 *	Read mask and invert because drawbridge is reversed.
   11.63 +	 *	This allows us to only service interrupts that have 
   11.64 +	 *	been enabled.
   11.65 +	 */
   11.66 +	mask = ~(rx_readb(dev, MUnit.OIMR));
   11.67 +	/* Check to see if this is our interrupt.  If it isn't just return */
   11.68 +	if (intstat & mask) 
   11.69 +	{
   11.70 +		bellbits = rx_readl(dev, OutboundDoorbellReg);
   11.71 +		if (bellbits & DoorBellPrintfReady) {
   11.72 +			aac_printf(dev, le32_to_cpu(rx_readl (dev, IndexRegs.Mailbox[5])));
   11.73 +			rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
   11.74 +			rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
   11.75 +		}
   11.76 +		else if (bellbits & DoorBellAdapterNormCmdReady) {
   11.77 +			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
   11.78 +			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
   11.79 +		}
   11.80 +		else if (bellbits & DoorBellAdapterNormRespReady) {
   11.81 +			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
   11.82 +			rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
   11.83 +		}
   11.84 +		else if (bellbits & DoorBellAdapterNormCmdNotFull) {
   11.85 +			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
   11.86 +		}
   11.87 +		else if (bellbits & DoorBellAdapterNormRespNotFull) {
   11.88 +			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
   11.89 +			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
   11.90 +		}
   11.91  	}
   11.92 -	else if (bellbits & DoorBellAdapterNormCmdReady) {
   11.93 -	    aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
   11.94 -	    rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
   11.95 -	}
   11.96 -	else if (bellbits & DoorBellAdapterNormRespReady) {
   11.97 -	    aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
   11.98 -	    rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
   11.99 -	}
  11.100 -	else if (bellbits & DoorBellAdapterNormCmdNotFull) {
  11.101 -	    rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
  11.102 -	}
  11.103 -	else if (bellbits & DoorBellAdapterNormRespNotFull) {
  11.104 -	    rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
  11.105 -	    rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
  11.106 -	}
  11.107 -    }
  11.108  }
  11.109  
  11.110  /**
  11.111 @@ -95,24 +94,24 @@ static void aac_rx_intr(int irq, void *d
  11.112   
  11.113  static void aac_rx_enable_interrupt(struct aac_dev * dev, u32 event)
  11.114  {
  11.115 -    switch (event) {
  11.116 -	
  11.117 -    case HostNormCmdQue:
  11.118 -	dev->irq_mask &= ~(OUTBOUNDDOORBELL_1);
  11.119 -	break;
  11.120 -	
  11.121 -    case HostNormRespQue:
  11.122 -	dev->irq_mask &= ~(OUTBOUNDDOORBELL_2);
  11.123 -	break;
  11.124 -	
  11.125 -    case AdapNormCmdNotFull:
  11.126 -	dev->irq_mask &= ~(OUTBOUNDDOORBELL_3);
  11.127 -	break;
  11.128 -	
  11.129 -    case AdapNormRespNotFull:
  11.130 -	dev->irq_mask &= ~(OUTBOUNDDOORBELL_4);
  11.131 -	break;
  11.132 -    }
  11.133 +	switch (event) {
  11.134 +
  11.135 +	case HostNormCmdQue:
  11.136 +		dev->irq_mask &= ~(OUTBOUNDDOORBELL_1);
  11.137 +		break;
  11.138 +
  11.139 +	case HostNormRespQue:
  11.140 +		dev->irq_mask &= ~(OUTBOUNDDOORBELL_2);
  11.141 +		break;
  11.142 +
  11.143 +	case AdapNormCmdNotFull:
  11.144 +		dev->irq_mask &= ~(OUTBOUNDDOORBELL_3);
  11.145 +		break;
  11.146 +
  11.147 +	case AdapNormRespNotFull:
  11.148 +		dev->irq_mask &= ~(OUTBOUNDDOORBELL_4);
  11.149 +		break;
  11.150 +	}
  11.151  }
  11.152  
  11.153  /**
  11.154 @@ -125,24 +124,24 @@ static void aac_rx_enable_interrupt(stru
  11.155  
  11.156  static void aac_rx_disable_interrupt(struct aac_dev *dev, u32 event)
  11.157  {
  11.158 -    switch (event) {
  11.159 -	
  11.160 -    case HostNormCmdQue:
  11.161 -	dev->irq_mask |= (OUTBOUNDDOORBELL_1);
  11.162 -	break;
  11.163 -	
  11.164 -    case HostNormRespQue:
  11.165 -	dev->irq_mask |= (OUTBOUNDDOORBELL_2);
  11.166 -	break;
  11.167 -	
  11.168 -    case AdapNormCmdNotFull:
  11.169 -	dev->irq_mask |= (OUTBOUNDDOORBELL_3);
  11.170 -	break;
  11.171 -	
  11.172 -    case AdapNormRespNotFull:
  11.173 -	dev->irq_mask |= (OUTBOUNDDOORBELL_4);
  11.174 -	break;
  11.175 -    }
  11.176 +	switch (event) {
  11.177 +
  11.178 +	case HostNormCmdQue:
  11.179 +		dev->irq_mask |= (OUTBOUNDDOORBELL_1);
  11.180 +		break;
  11.181 +
  11.182 +	case HostNormRespQue:
  11.183 +		dev->irq_mask |= (OUTBOUNDDOORBELL_2);
  11.184 +		break;
  11.185 +
  11.186 +	case AdapNormCmdNotFull:
  11.187 +		dev->irq_mask |= (OUTBOUNDDOORBELL_3);
  11.188 +		break;
  11.189 +
  11.190 +	case AdapNormRespNotFull:
  11.191 +		dev->irq_mask |= (OUTBOUNDDOORBELL_4);
  11.192 +		break;
  11.193 +	}
  11.194  }
  11.195  
  11.196  /**
  11.197 @@ -158,91 +157,89 @@ static void aac_rx_disable_interrupt(str
  11.198  
  11.199  static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 *status)
  11.200  {
  11.201 -    unsigned long start;
  11.202 -    int ok;
  11.203 -    /*
  11.204 -     *	Write the command into Mailbox 0
  11.205 -     */
  11.206 -    rx_writel(dev, InboundMailbox0, cpu_to_le32(command));
  11.207 -    /*
  11.208 -     *	Write the parameters into Mailboxes 1 - 4
  11.209 -     */
  11.210 -    rx_writel(dev, InboundMailbox1, cpu_to_le32(p1));
  11.211 -    rx_writel(dev, InboundMailbox2, 0);
  11.212 -    rx_writel(dev, InboundMailbox3, 0);
  11.213 -    rx_writel(dev, InboundMailbox4, 0);
  11.214 -    /*
  11.215 -     *	Clear the synch command doorbell to start on a clean slate.
  11.216 -     */
  11.217 -    rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
  11.218 -    /*
  11.219 -     *	Disable doorbell interrupts
  11.220 -     */
  11.221 -    rx_writeb(dev, MUnit.OIMR, rx_readb(dev, MUnit.OIMR) | 0x04);
  11.222 -    /*
  11.223 -     *	Force the completion of the mask register write before issuing
  11.224 -     *	the interrupt.
  11.225 -     */
  11.226 -    rx_readb (dev, MUnit.OIMR);
  11.227 -    /*
  11.228 -     *	Signal that there is a new synch command
  11.229 -     */
  11.230 -    rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
  11.231 -    
  11.232 -    ok = 0;
  11.233 -    start = jiffies;
  11.234 -    
  11.235 -    /*
  11.236 -     *	Wait up to 30 seconds
  11.237 -     */
  11.238 -    while (time_before(jiffies, start+30*HZ)) 
  11.239 -    {
  11.240 -	/* Delay 5 microseconds to let Mon960 get info. */
  11.241 -	udelay(5);	
  11.242 +	unsigned long start;
  11.243 +	int ok;
  11.244  	/*
  11.245 -	 *	Mon960 will set doorbell0 bit when its completed the command.
  11.246 +	 *	Write the command into Mailbox 0
  11.247  	 */
  11.248 -	if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
  11.249 -	    /*
  11.250 -	     *	Clear the doorbell.
  11.251 -	     */
  11.252 -	    rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
  11.253 -	    ok = 1;
  11.254 -	    break;
  11.255 -	}
  11.256 -#if 0
  11.257 +	rx_writel(dev, InboundMailbox0, cpu_to_le32(command));
  11.258  	/*
  11.259 -	 *	Yield the processor in case we are slow 
  11.260 +	 *	Write the parameters into Mailboxes 1 - 4
  11.261  	 */
  11.262 -	set_current_state(TASK_UNINTERRUPTIBLE);
  11.263 -	schedule_timeout(1);
  11.264 -#else 
  11.265 -	/* XXX SMH: not in xen we don't */
  11.266 -	mdelay(50); 
  11.267 +	rx_writel(dev, InboundMailbox1, cpu_to_le32(p1));
  11.268 +	rx_writel(dev, InboundMailbox2, 0);
  11.269 +	rx_writel(dev, InboundMailbox3, 0);
  11.270 +	rx_writel(dev, InboundMailbox4, 0);
  11.271 +	/*
  11.272 +	 *	Clear the synch command doorbell to start on a clean slate.
  11.273 +	 */
  11.274 +	rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
  11.275 +	/*
  11.276 +	 *	Disable doorbell interrupts
  11.277 +	 */
  11.278 +	rx_writeb(dev, MUnit.OIMR, rx_readb(dev, MUnit.OIMR) | 0x04);
  11.279 +	/*
  11.280 +	 *	Force the completion of the mask register write before issuing
  11.281 +	 *	the interrupt.
  11.282 +	 */
  11.283 +	rx_readb (dev, MUnit.OIMR);
  11.284 +	/*
  11.285 +	 *	Signal that there is a new synch command
  11.286 +	 */
  11.287 +	rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
  11.288 +
  11.289 +	ok = 0;
  11.290 +	start = jiffies;
  11.291 +
  11.292 +	/*
  11.293 +	 *	Wait up to 30 seconds
  11.294 +	 */
  11.295 +	while (time_before(jiffies, start+30*HZ)) 
  11.296 +	{
  11.297 +		udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
  11.298 +		/*
  11.299 +		 *	Mon960 will set doorbell0 bit when it has completed the command.
  11.300 +		 */
  11.301 +		if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
  11.302 +			/*
  11.303 +			 *	Clear the doorbell.
  11.304 +			 */
  11.305 +			rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
  11.306 +			ok = 1;
  11.307 +			break;
  11.308 +		}
  11.309 +#if 0
  11.310 +		/*
  11.311 +		 *	Yield the processor in case we are slow 
  11.312 +		 */
  11.313 +		set_current_state(TASK_UNINTERRUPTIBLE);
  11.314 +		schedule_timeout(1);
  11.315 +#else
  11.316 +		/* XXX SMH: not in xen we don't */
  11.317 +		mdelay(50); 
  11.318  #endif
  11.319 -	
  11.320 -    }
  11.321 -    if (ok != 1) {
  11.322 +	}
  11.323 +	if (ok != 1) {
  11.324 +		/*
  11.325 +		 *	Restore interrupt mask even though we timed out
  11.326 +		 */
  11.327 +		rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
  11.328 +		return -ETIMEDOUT;
  11.329 +	}
  11.330  	/*
  11.331 -	 *	Restore interrupt mask even though we timed out
  11.332 +	 *	Pull the synch status from Mailbox 0.
  11.333 +	 */
  11.334 +	*status = le32_to_cpu(rx_readl(dev, IndexRegs.Mailbox[0]));
  11.335 +	/*
  11.336 +	 *	Clear the synch command doorbell.
  11.337 +	 */
  11.338 +	rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
  11.339 +	/*
  11.340 +	 *	Restore interrupt mask
  11.341  	 */
  11.342  	rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
  11.343 -	return -ETIMEDOUT;
  11.344 -    }
  11.345 -    /*
  11.346 -     *	Pull the synch status from Mailbox 0.
  11.347 -     */
  11.348 -    *status = le32_to_cpu(rx_readl(dev, IndexRegs.Mailbox[0]));
  11.349 -    /*
  11.350 -     *	Clear the synch command doorbell.
  11.351 -     */
  11.352 -    rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
  11.353 -    /*
  11.354 -     *	Restore interrupt mask
  11.355 -     */
  11.356 -    rx_writeb(dev, MUnit.OIMR, rx_readl(dev, MUnit.OIMR) & 0xfb);
  11.357 -    return 0;
  11.358 -    
  11.359 +	return 0;
  11.360 +
  11.361  }
  11.362  
  11.363  /**
  11.364 @@ -254,8 +251,8 @@ static int rx_sync_cmd(struct aac_dev *d
  11.365  
  11.366  static void aac_rx_interrupt_adapter(struct aac_dev *dev)
  11.367  {
  11.368 -    u32 ret;
  11.369 -    rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
  11.370 +	u32 ret;
  11.371 +	rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, &ret);
  11.372  }
  11.373  
  11.374  /**
  11.375 @@ -269,33 +266,33 @@ static void aac_rx_interrupt_adapter(str
  11.376  
  11.377  static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
  11.378  {
  11.379 -    switch (event) {
  11.380 -	
  11.381 -    case AdapNormCmdQue:
  11.382 -	rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
  11.383 -	break;
  11.384 -    case HostNormRespNotFull:
  11.385 -	rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
  11.386 -	break;
  11.387 -    case AdapNormRespQue:
  11.388 -	rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
  11.389 -	break;
  11.390 -    case HostNormCmdNotFull:
  11.391 -	rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
  11.392 -	break;
  11.393 -    case HostShutdown:
  11.394 +	switch (event) {
  11.395 +
  11.396 +	case AdapNormCmdQue:
  11.397 +		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
  11.398 +		break;
  11.399 +	case HostNormRespNotFull:
  11.400 +		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
  11.401 +		break;
  11.402 +	case AdapNormRespQue:
  11.403 +		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
  11.404 +		break;
  11.405 +	case HostNormCmdNotFull:
  11.406 +		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
  11.407 +		break;
  11.408 +	case HostShutdown:
  11.409  //		rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, &ret);
  11.410 -	break;
  11.411 -    case FastIo:
  11.412 -	rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
  11.413 -	break;
  11.414 -    case AdapPrintfDone:
  11.415 -	rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
  11.416 -	break;
  11.417 -    default:
  11.418 -	BUG();
  11.419 -	break;
  11.420 -    }
  11.421 +		break;
  11.422 +	case FastIo:
  11.423 +		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
  11.424 +		break;
  11.425 +	case AdapPrintfDone:
  11.426 +		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
  11.427 +		break;
  11.428 +	default:
  11.429 +		BUG();
  11.430 +		break;
  11.431 +	}
  11.432  }
  11.433  
  11.434  /**
  11.435 @@ -307,31 +304,27 @@ static void aac_rx_notify_adapter(struct
  11.436  
  11.437  static void aac_rx_start_adapter(struct aac_dev *dev)
  11.438  {
  11.439 -    u32 status;
  11.440 -    struct aac_init *init;
  11.441 -    
  11.442 -    init = dev->init;
  11.443 +	u32 status;
  11.444 +	struct aac_init *init;
  11.445  
  11.446 -    init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
  11.447 -    /*
  11.448 -     *	Tell the adapter we are back and up and running so it will scan
  11.449 -     *	its command queues and enable our interrupts
  11.450 -     */
  11.451 -    dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | 
  11.452 -		     OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | 
  11.453 -		     OUTBOUNDDOORBELL_4);
  11.454 -    /*
  11.455 -     *	First clear out all interrupts.  Then enable the one's that we
  11.456 -     *	can handle.
  11.457 -     */
  11.458 -    rx_writeb(dev, MUnit.OIMR, 0xff);
  11.459 -    rx_writel(dev, MUnit.ODR, 0xffffffff);
  11.460 +	init = dev->init;
  11.461 +	init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
  11.462 +	/*
  11.463 +	 *	Tell the adapter we are back and up and running so it will scan
  11.464 +	 *	its command queues and enable our interrupts
  11.465 +	 */
  11.466 +	dev->irq_mask = (DoorBellPrintfReady | OUTBOUNDDOORBELL_1 | OUTBOUNDDOORBELL_2 | OUTBOUNDDOORBELL_3 | OUTBOUNDDOORBELL_4);
  11.467 +	/*
  11.468 +	 *	First clear out all interrupts.  Then enable the one's that we
  11.469 +	 *	can handle.
  11.470 +	 */
  11.471 +	rx_writeb(dev, MUnit.OIMR, 0xff);
  11.472 +	rx_writel(dev, MUnit.ODR, 0xffffffff);
  11.473  //	rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
  11.474 -    rx_writeb(dev, MUnit.OIMR, 0xfb);
  11.475 -    
  11.476 -    // We can only use a 32 bit address here
  11.477 -    rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, 
  11.478 -		(u32)(ulong)dev->init_pa, &status);
  11.479 +	rx_writeb(dev, MUnit.OIMR, 0xfb);
  11.480 +
  11.481 +	// We can only use a 32 bit address here
  11.482 +	rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, &status);
  11.483  }
  11.484  
  11.485  /**
  11.486 @@ -346,112 +339,91 @@ static void aac_rx_start_adapter(struct 
  11.487  
  11.488  int aac_rx_init(struct aac_dev *dev, unsigned long num)
  11.489  {
  11.490 -    unsigned long start;
  11.491 -    unsigned long status;
  11.492 -    int instance;
  11.493 -    const char * name;
  11.494 -    
  11.495 -    dev->devnum = num;
  11.496 -    instance = dev->id;
  11.497 -    name     = dev->name;
  11.498 -    
  11.499 -    dprintk((KERN_ERR "aac_rx_init called, num %ld, scsi host ptr = %p\n", 
  11.500 -	     num, (void *)(dev->scsi_host_ptr))); 
  11.501 -    
  11.502 -    dprintk((KERN_ERR "scsi_host_ptr->base is %p\n", 
  11.503 -	     (void *)dev->scsi_host_ptr->base)); 
  11.504 -    /*
  11.505 -     *	Map in the registers from the adapter.
  11.506 -     */
  11.507 -    if((dev->regs.rx = (struct rx_registers *)
  11.508 -	ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
  11.509 -    {	
  11.510 -	printk(KERN_WARNING "aacraid: unable to map i960.\n" );
  11.511 -	return -1;
  11.512 -    }
  11.513 -    
  11.514 -//	dprintk((KERN_ERR "aac_rx_init: AAA\n")); 
  11.515 -    /*
  11.516 -     *	Check to see if the board failed any self tests.
  11.517 -     */
  11.518 -    if (rx_readl(dev, IndexRegs.Mailbox[7]) & SELF_TEST_FAILED) {
  11.519 -	printk(KERN_ERR "%s%d: adapter self-test failed.\n", 
  11.520 -	       dev->name, instance);
  11.521 -	return -1;
  11.522 -    }
  11.523 -    
  11.524 -    
  11.525 -//	dprintk((KERN_ERR "aac_rx_init: BBB\n")); 
  11.526 -    /*
  11.527 -     *	Check to see if the board panic'd while booting.
  11.528 -     */
  11.529 -    if (rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_PANIC) {
  11.530 -	printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", 
  11.531 -	       dev->name, instance);
  11.532 -	return -1;
  11.533 -    }
  11.534 -    start = jiffies;
  11.535 -    
  11.536 -//	dprintk((KERN_ERR "aac_rx_init: DDD\n")); 
  11.537 -    /*
  11.538 -     *	Wait for the adapter to be up and running. Wait up to 3 minutes
  11.539 -     */
  11.540 -    while (!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 
  11.541 -    {
  11.542 -	if(time_after(jiffies, start+180*HZ))
  11.543 +	unsigned long start;
  11.544 +	unsigned long status;
  11.545 +	int instance;
  11.546 +	const char * name;
  11.547 +
  11.548 +	dev->devnum = num;
  11.549 +	instance = dev->id;
  11.550 +	name     = dev->name;
  11.551 +
  11.552 +	/*
  11.553 +	 *	Map in the registers from the adapter.
  11.554 +	 */
  11.555 +	if((dev->regs.rx = (struct rx_registers *)ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
  11.556 +	{	
  11.557 +		printk(KERN_WARNING "aacraid: unable to map i960.\n" );
  11.558 +		return -1;
  11.559 +	}
  11.560 +	/*
  11.561 +	 *	Check to see if the board failed any self tests.
  11.562 +	 */
  11.563 +	if (rx_readl(dev, IndexRegs.Mailbox[7]) & SELF_TEST_FAILED) {
  11.564 +		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
  11.565 +		return -1;
  11.566 +	}
  11.567 +	/*
  11.568 +	 *	Check to see if the board panic'd while booting.
  11.569 +	 */
  11.570 +	if (rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_PANIC) {
  11.571 +		printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
  11.572 +		return -1;
  11.573 +	}
  11.574 +	start = jiffies;
  11.575 +	/*
  11.576 +	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
  11.577 +	 */
  11.578 +	while (!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 
  11.579  	{
  11.580 -	    status = rx_readl(dev, IndexRegs.Mailbox[7]) >> 16;
  11.581 -	    printk(KERN_ERR "%s%d: adapter kernel failed to start,"
  11.582 -		   "init status = %ld.\n", dev->name, 
  11.583 -		   instance, status);
  11.584 -	    return -1;
  11.585 -	}
  11.586 -// dprintk((KERN_ERR "aac_rx_init: XXX\n")); 
  11.587 -	
  11.588 -#if 0 
  11.589 -	set_current_state(TASK_UNINTERRUPTIBLE);
  11.590 -	schedule_timeout(1);
  11.591 +		if(time_after(jiffies, start+180*HZ))
  11.592 +		{
  11.593 +			status = rx_readl(dev, IndexRegs.Mailbox[7]) >> 16;
  11.594 +			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %ld.\n", dev->name, instance, status);
  11.595 +			return -1;
  11.596 +		}
  11.597 +#if 0
  11.598 +		set_current_state(TASK_UNINTERRUPTIBLE);
  11.599 +		schedule_timeout(1);
  11.600  #else
  11.601 -	/* XXX SMH: no sleeping for us (we're the xen idle task) */
  11.602 -	mdelay(50); 
  11.603 +		/* XXX SMH: no sleeping for us (we're the xen idle task) */
  11.604 +		mdelay(50); 
  11.605  #endif
  11.606 -	
  11.607 -    }
  11.608 -    
  11.609 -//	dprintk((KERN_ERR "aac_rx_init: ZZZ!\n")); 
  11.610 -    if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, 
  11.611 -		    SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev) < 0) 
  11.612 -    {
  11.613 -	printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 
  11.614 -	       name, instance);
  11.615 -	return -1;
  11.616 -    }
  11.617 -    /*
  11.618 -     *	Fill in the function dispatch table.
  11.619 -     */
  11.620 -    dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
  11.621 -    dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt;
  11.622 -    dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
  11.623 -    dev->a_ops.adapter_notify = aac_rx_notify_adapter;
  11.624 -    dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
  11.625 -    
  11.626 -    if (aac_init_adapter(dev) == NULL)
  11.627 -	return -1;
  11.628 +	}
  11.629 +	if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0) 
  11.630 +	{
  11.631 +		printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
  11.632 +		return -1;
  11.633 +	}
  11.634 +	/*
  11.635 +	 *	Fill in the function dispatch table.
  11.636 +	 */
  11.637 +	dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
  11.638 +	dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt;
  11.639 +	dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
  11.640 +	dev->a_ops.adapter_notify = aac_rx_notify_adapter;
  11.641 +	dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
  11.642 +
  11.643 +	if (aac_init_adapter(dev) == NULL)
  11.644 +		return -1;
  11.645  #ifdef TRY_TASKLET
  11.646 -    aac_command_tasklet.data = (unsigned long)dev;
  11.647 -    tasklet_enable(&aac_command_tasklet);
  11.648 +	aac_command_tasklet.data = (unsigned long)dev;
  11.649 +	tasklet_enable(&aac_command_tasklet);
  11.650  #else
  11.651 -    /*
  11.652 -     *	Start any kernel threads needed
  11.653 -     */
  11.654 -    dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, 
  11.655 -				    dev, 0);
  11.656 -#endif 
  11.657 -
  11.658 -    /*
  11.659 -     *	Tell the adapter that all is configured, and it can start
  11.660 -     *	accepting requests
  11.661 -     */
  11.662 -    aac_rx_start_adapter(dev);
  11.663 -    return 0;
  11.664 +	/*
  11.665 +	 *	Start any kernel threads needed
  11.666 +	 */
  11.667 +	dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
  11.668 +	if(dev->thread_pid < 0)
  11.669 +	{
  11.670 +		printk(KERN_ERR "aacraid: Unable to create rx thread.\n");
  11.671 +		return -1;
  11.672 +	}	
  11.673 +#endif
  11.674 +	/*
  11.675 +	 *	Tell the adapter that all is configured, and it can start
  11.676 +	 *	accepting requests
  11.677 +	 */
  11.678 +	aac_rx_start_adapter(dev);
  11.679 +	return 0;
  11.680  }
    12.1 --- a/xen/drivers/scsi/aacraid/sa.c	Wed Aug 06 20:53:30 2003 +0000
    12.2 +++ b/xen/drivers/scsi/aacraid/sa.c	Thu Aug 07 12:24:12 2003 +0000
    12.3 @@ -28,18 +28,18 @@
    12.4   *
    12.5   */
    12.6  
    12.7 -#include <xeno/config.h>
    12.8 -#include <xeno/kernel.h>
    12.9 -#include <xeno/init.h>
   12.10 -#include <xeno/types.h>
   12.11 -#include <xeno/sched.h>
   12.12 -/*  #include <xeno/pci.h> */
   12.13 -/*  #include <xeno/spinlock.h> */
   12.14 -/*  #include <xeno/slab.h> */
   12.15 -#include <xeno/blk.h>
   12.16 -#include <xeno/delay.h>
   12.17 -/*  #include <xeno/completion.h> */
   12.18 -/*  #include <asm/semaphore.h> */
   12.19 +#include <linux/config.h>
   12.20 +#include <linux/kernel.h>
   12.21 +#include <linux/init.h>
   12.22 +#include <linux/types.h>
   12.23 +#include <linux/sched.h>
   12.24 +#include <linux/pci.h>
   12.25 +#include <linux/spinlock.h>
   12.26 +#include <linux/slab.h>
   12.27 +#include <linux/blk.h>
   12.28 +#include <linux/delay.h>
   12.29 +/*#include <linux/completion.h>*/
   12.30 +/*#include <asm/semaphore.h>*/
   12.31  #include "scsi.h"
   12.32  #include "hosts.h"
   12.33  
   12.34 @@ -235,9 +235,9 @@ static int sa_sync_cmd(struct aac_dev *d
   12.35  #if 0
   12.36  		set_current_state(TASK_UNINTERRUPTIBLE);
   12.37  		schedule_timeout(1);
   12.38 +#else
   12.39 +		mdelay(100);
   12.40  #endif
   12.41 -		mdelay(100); 
   12.42 -
   12.43  	}
   12.44  
   12.45  	if (ok != 1)
   12.46 @@ -353,7 +353,7 @@ int aac_sa_init(struct aac_dev *dev, uns
   12.47  	 *	Wait for the adapter to be up and running. Wait up to 3 minutes.
   12.48  	 */
   12.49  	while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
   12.50 -		if (time_after(start+180*HZ, jiffies)) {
   12.51 +		if (time_after(jiffies, start+180*HZ)) {
   12.52  			status = sa_readl(dev, Mailbox7) >> 16;
   12.53  			printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %d.\n", name, instance, le32_to_cpu(status));
   12.54  			return -1;
   12.55 @@ -361,8 +361,9 @@ int aac_sa_init(struct aac_dev *dev, uns
   12.56  #if 0
   12.57  		set_current_state(TASK_UNINTERRUPTIBLE);
   12.58  		schedule_timeout(1);
   12.59 +#else
   12.60 +		mdelay(100);
   12.61  #endif
   12.62 -		mdelay(100); 
   12.63  	}
   12.64  
   12.65  	dprintk(("ATIRQ\n"));
   12.66 @@ -392,8 +393,11 @@ int aac_sa_init(struct aac_dev *dev, uns
   12.67  	 *	Start any kernel threads needed
   12.68  	 */
   12.69  	dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
   12.70 +	if (dev->thread_pid < 0) {
   12.71 +	     printk(KERN_ERR "aacraid: Unable to create command thread.\n");
   12.72 +	     return -1;
   12.73 +	}
   12.74  #endif
   12.75 -
   12.76  	/*
   12.77  	 *	Tell the adapter that all is configure, and it can start 
   12.78  	 *	accepting requests