ia64/xen-unstable

changeset 263:9888f92572ba

bitkeeper revision 1.109 (3e677d37B9PCxHLX7a1Iufrz4eSUqA)

Manual merge of SMH + ACH worlds.
author kaf24@labyrinth.cl.cam.ac.uk
date Thu Mar 06 16:54:15 2003 +0000 (2003-03-06)
parents 8d7cbb78020d 7233489302e6
children 2e679e814ec4
files .rootkeys xen/Rules.mk xen/drivers/block/genhd.c xen/drivers/block/grok.c xen/drivers/block/xen_block.c xen/drivers/ide/ide.c xen/drivers/scsi/aacraid/aachba.c xen/drivers/scsi/aacraid/aacraid.h xen/drivers/scsi/aacraid/comminit.c xen/drivers/scsi/aacraid/commsup.c xen/drivers/scsi/aacraid/linit.c xen/drivers/scsi/aacraid/rx.c xen/drivers/scsi/scsi.c xen/drivers/scsi/scsi.h xen/drivers/scsi/scsi_error.c xen/drivers/scsi/scsi_lib.c xen/drivers/scsi/scsi_merge.c xen/drivers/scsi/sd.c xen/include/xeno/config.h xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/Makefile xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c xenolinux-2.4.21-pre4-sparse/include/linux/major.h xenolinux-2.4.21-pre4-sparse/init/do_mounts.c
line diff
     1.1 --- a/.rootkeys	Thu Mar 06 15:52:22 2003 +0000
     1.2 +++ b/.rootkeys	Thu Mar 06 16:54:15 2003 +0000
     1.3 @@ -251,6 +251,7 @@ 3ddb79beWzgPS8ozf2BL2g3ZkiWhhQ xen/drive
     1.4  3ddb79be04dyXzyXqDbMRS_1funwXQ xen/drivers/block/blkpg.c
     1.5  3ddb79beME_0abStePF6fU8XLuQnWw xen/drivers/block/elevator.c
     1.6  3ddb79beNQVrdGyoI4njXhgAjD6a4A xen/drivers/block/genhd.c
     1.7 +3e677183FxihZVsJDCnvV2S0-FEZyA xen/drivers/block/grok.c
     1.8  3ddb79beyWwLRP_BiM2t1JKgr_plEw xen/drivers/block/ll_rw_blk.c
     1.9  3e4a8cb7RhubVgsPwO7cK0pgAN8WCQ xen/drivers/block/xen_block.c
    1.10  3e5d129asHNyZOjBKTkqs-9AFzxemA xen/drivers/block/xen_segment.c
    1.11 @@ -477,6 +478,8 @@ 3e6377fbMjXWAQd0XN0FWv4fDEo6fg xenolinux
    1.12  3e5a4e65iHEuC5sjFhj42XALYbLVRw xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/Makefile
    1.13  3e5a4e65pP5spJErBW69pJxSSdK9RA xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c
    1.14  3e5a4e65GtI9JZRAjuRdXaxt_4ohyQ xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block_test.c
    1.15 +3e677190SjkzJIvFifRVeYpIZOCtYA xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c
    1.16 +3e677193nOKKTLJzcAu4SYdbZaia8g xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c
    1.17  3e676eb5RXnHzSHgA1BvM0B1aIm4qg xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_segment.c
    1.18  3e5d129aDldt6geU2-2SzBae34sQzg xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_segment_proc.c
    1.19  3e5a4e65G3e2s0ghPMgiJ-gBTUJ0uQ xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/console/Makefile
     2.1 --- a/xen/Rules.mk	Thu Mar 06 15:52:22 2003 +0000
     2.2 +++ b/xen/Rules.mk	Thu Mar 06 16:54:15 2003 +0000
     2.3 @@ -20,7 +20,7 @@ ALL_OBJS += $(BASEDIR)/drivers/pci/drive
     2.4  ALL_OBJS += $(BASEDIR)/drivers/net/driver.o
     2.5  ALL_OBJS += $(BASEDIR)/drivers/block/driver.o
     2.6  ALL_OBJS += $(BASEDIR)/drivers/ide/driver.o
     2.7 -#ALL_OBJS += $(BASEDIR)/drivers/scsi/driver.o
     2.8 +ALL_OBJS += $(BASEDIR)/drivers/scsi/driver.o
     2.9  ALL_OBJS += $(BASEDIR)/arch/$(ARCH)/arch.o
    2.10  
    2.11  HOSTCC     = gcc
     3.1 --- a/xen/drivers/block/genhd.c	Thu Mar 06 15:52:22 2003 +0000
     3.2 +++ b/xen/drivers/block/genhd.c	Thu Mar 06 16:54:15 2003 +0000
     3.3 @@ -193,6 +193,67 @@ out:
     3.4  }
     3.5  #endif
     3.6  
     3.7 +/* XXX SMH: stuff from fs/partitions dumped here temporarily */
     3.8 +
     3.9 +
    3.10 +/*
    3.11 + * This function will re-read the partition tables for a given device,
    3.12 + * and set things back up again.  There are some important caveats,
    3.13 + * however.  You must ensure that no one is using the device, and no one
    3.14 + * can start using the device while this function is being executed.
    3.15 + *
    3.16 + * Much of the cleanup from the old partition tables should have already been
    3.17 + * done
    3.18 + */
    3.19 +void register_disk(struct gendisk *gdev, kdev_t dev, unsigned minors,
    3.20 +    struct block_device_operations *ops, long size)
    3.21 +{
    3.22 +    if (!gdev)
    3.23 +        return;
    3.24 +    grok_partitions(gdev, MINOR(dev)>>gdev->minor_shift, minors, size);
    3.25 +}
    3.26 +
    3.27 +void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size)
    3.28 +{
    3.29 +	int i;
    3.30 +	int first_minor	= drive << dev->minor_shift;
    3.31 +	int end_minor	= first_minor + dev->max_p;
    3.32 +
    3.33 +	if(!dev->sizes)
    3.34 +		blk_size[dev->major] = NULL;
    3.35 +
    3.36 +	dev->part[first_minor].nr_sects = size;
    3.37 +#ifdef DEVFS_MUST_DIE
    3.38 +	/* No such device or no minors to use for partitions */
    3.39 +	if ( !size && dev->flags && (dev->flags[drive] & GENHD_FL_REMOVABLE) )
    3.40 +		devfs_register_partitions (dev, first_minor, 0);
    3.41 +#endif
    3.42 +
    3.43 +	if (!size || minors == 1)
    3.44 +		return;
    3.45 +
    3.46 +	if (dev->sizes) {
    3.47 +		dev->sizes[first_minor] = size >> (BLOCK_SIZE_BITS - 9);
    3.48 +		for (i = first_minor + 1; i < end_minor; i++)
    3.49 +			dev->sizes[i] = 0;
    3.50 +	}
    3.51 +	blk_size[dev->major] = dev->sizes;
    3.52 +#if 0
    3.53 +	/* XXX SMH: don't actually check partition details yet */
    3.54 +	check_partition(dev, MKDEV(dev->major, first_minor), 1 + first_minor);
    3.55 +#endif
    3.56 +
    3.57 + 	/*
    3.58 + 	 * We need to set the sizes array before we will be able to access
    3.59 + 	 * any of the partitions on this device.
    3.60 + 	 */
    3.61 +	if (dev->sizes != NULL) {	/* optional safeguard in ll_rw_blk.c */
    3.62 +		for (i = first_minor; i < end_minor; i++)
    3.63 +			dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
    3.64 +	}
    3.65 +}
    3.66 +
    3.67 +
    3.68  
    3.69  extern int blk_dev_init(void);
    3.70  extern int net_dev_init(void);
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/drivers/block/grok.c	Thu Mar 06 16:54:15 2003 +0000
     4.3 @@ -0,0 +1,39 @@
     4.4 +void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size)
     4.5 +{
     4.6 +	int i;
     4.7 +	int first_minor	= drive << dev->minor_shift;
     4.8 +	int end_minor	= first_minor + dev->max_p;
     4.9 +
    4.10 +	if(!dev->sizes)
    4.11 +		blk_size[dev->major] = NULL;
    4.12 +
    4.13 +	dev->part[first_minor].nr_sects = size;
    4.14 +#ifdef DEVFS_MUST_DIE
    4.15 +	/* No such device or no minors to use for partitions */
    4.16 +	if ( !size && dev->flags && (dev->flags[drive] & GENHD_FL_REMOVABLE) )
    4.17 +		devfs_register_partitions (dev, first_minor, 0);
    4.18 +#endif
    4.19 +
    4.20 +	if (!size || minors == 1)
    4.21 +		return;
    4.22 +
    4.23 +	if (dev->sizes) {
    4.24 +		dev->sizes[first_minor] = size >> (BLOCK_SIZE_BITS - 9);
    4.25 +		for (i = first_minor + 1; i < end_minor; i++)
    4.26 +			dev->sizes[i] = 0;
    4.27 +	}
    4.28 +	blk_size[dev->major] = dev->sizes;
    4.29 +#if 0
    4.30 +	/* XXX SMH: don't actually check partition details yet */
    4.31 +	check_partition(dev, MKDEV(dev->major, first_minor), 1 + first_minor);
    4.32 +#endif
    4.33 +
    4.34 + 	/*
    4.35 + 	 * We need to set the sizes array before we will be able to access
    4.36 + 	 * any of the partitions on this device.
    4.37 + 	 */
    4.38 +	if (dev->sizes != NULL) {	/* optional safeguard in ll_rw_blk.c */
    4.39 +		for (i = first_minor; i < end_minor; i++)
    4.40 +			dev->sizes[i] = dev->part[i].nr_sects >> (BLOCK_SIZE_BITS - 9);
    4.41 +	}
    4.42 +}
     5.1 --- a/xen/drivers/block/xen_block.c	Thu Mar 06 15:52:22 2003 +0000
     5.2 +++ b/xen/drivers/block/xen_block.c	Thu Mar 06 16:54:15 2003 +0000
     5.3 @@ -240,23 +240,23 @@ static void dispatch_debug_block_io(stru
     5.4  
     5.5  static void dispatch_create_segment(struct task_struct *p, int index)
     5.6  {
     5.7 -  blk_ring_t *blk_ring = p->blk_ring_base;
     5.8 -  xv_disk_t *xvd;
     5.9 -  int result;
    5.10 +    blk_ring_t *blk_ring = p->blk_ring_base;
    5.11 +    xv_disk_t *xvd;
    5.12 +    int result;
    5.13  
    5.14 -  if (p->domain != 0)
    5.15 -  {
    5.16 -    printk (KERN_ALERT "dispatch_create_segment called by dom%d\n", p->domain);
    5.17 -    make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_SEG_CREATE, 1); 
    5.18 +    if (p->domain != 0)
    5.19 +    {
    5.20 +        DPRINTK("dispatch_create_segment called by dom%d\n", p->domain);
    5.21 +        make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_SEG_CREATE, 1); 
    5.22 +        return;
    5.23 +    }
    5.24 +
    5.25 +    xvd = phys_to_virt((unsigned long)blk_ring->ring[index].req.buffer);    
    5.26 +    result = xen_segment_create(xvd);
    5.27 +
    5.28 +    make_response(p, blk_ring->ring[index].req.id, 
    5.29 +                  XEN_BLOCK_SEG_CREATE, result); 
    5.30      return;
    5.31 -  }
    5.32 -
    5.33 -  xvd = phys_to_virt((unsigned long)blk_ring->ring[index].req.buffer);    
    5.34 -  result = xen_segment_create(xvd);
    5.35 -
    5.36 -  make_response(p, blk_ring->ring[index].req.id, 
    5.37 -		XEN_BLOCK_SEG_CREATE, result); 
    5.38 -  return;
    5.39  }
    5.40  
    5.41  static void dispatch_delete_segment(struct task_struct *p, int index)
    5.42 @@ -268,12 +268,16 @@ static void dispatch_probe_blk(struct ta
    5.43  {
    5.44      extern void ide_probe_devices(xen_disk_info_t *xdi, int *count, 
    5.45  				  drive_t xdrives[]);
    5.46 +    extern void scsi_probe_devices(xen_disk_info_t *xdi, int *count,
    5.47 +                                   drive_t xdrives[]);
    5.48 +
    5.49      blk_ring_t *blk_ring = p->blk_ring_base;
    5.50      xen_disk_info_t *xdi;
    5.51  
    5.52      xdi = phys_to_virt((unsigned long)blk_ring->ring[index].req.buffer);    
    5.53 +
    5.54      ide_probe_devices(xdi, &num_xdrives, xdrives);
    5.55 -    /* scsi_probe_devices(xdi, &num_xdrives, xdrives); */          /* future */
    5.56 +    scsi_probe_devices(xdi, &num_xdrives, xdrives);
    5.57  
    5.58      make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_PROBE_BLK, 0);
    5.59  }
    5.60 @@ -371,39 +375,38 @@ static void dispatch_rw_block_io(struct 
    5.61      xen_device = blk_ring->ring[index].req.device;
    5.62      if (IS_XHD_MAJOR(xen_device))
    5.63      {
    5.64 -      if (xen_device == XHDA_MAJOR)    	 phys_device = MKDEV(IDE0_MAJOR, 0);
    5.65 -      else if (xen_device == XHDB_MAJOR) phys_device = MKDEV(IDE1_MAJOR, 0);
    5.66 -      else if (xen_device == XHDC_MAJOR) phys_device = MKDEV(IDE2_MAJOR, 0);
    5.67 -      else if (xen_device == XHDD_MAJOR) phys_device = MKDEV(IDE3_MAJOR, 0);
    5.68 -      else
    5.69 -      {
    5.70 -	printk (KERN_ALERT "dispatch_rw_block_io: unknown device %d\n",
    5.71 -		xen_device);
    5.72 -	BUG();
    5.73 -      }
    5.74 +        if (xen_device == XHDA_MAJOR)    	 phys_device = MKDEV(IDE0_MAJOR, 0);
    5.75 +        else if (xen_device == XHDB_MAJOR) phys_device = MKDEV(IDE1_MAJOR, 0);
    5.76 +        else if (xen_device == XHDC_MAJOR) phys_device = MKDEV(IDE2_MAJOR, 0);
    5.77 +        else if (xen_device == XHDD_MAJOR) phys_device = MKDEV(IDE3_MAJOR, 0);
    5.78 +        else
    5.79 +        {
    5.80 +            printk(KERN_ALERT "dispatch_rw_block_io: unknown device %d\n",
    5.81 +                   xen_device);
    5.82 +            BUG();
    5.83 +        }
    5.84  
    5.85 -      block_number = blk_ring->ring[index].req.block_number;
    5.86 -      sector_number = blk_ring->ring[index].req.sector_number;
    5.87 +        block_number = blk_ring->ring[index].req.block_number;
    5.88 +        sector_number = blk_ring->ring[index].req.sector_number;
    5.89      }
    5.90      else if (IS_VHD_MAJOR(xen_device))
    5.91      {
    5.92 -      int s;
    5.93 -      if (s = xen_segment_map_request(&phys_device, &block_number, 
    5.94 -				      &sector_number,
    5.95 -				      p, operation, xen_device,
    5.96 -				      blk_ring->ring[index].req.block_number,
    5.97 -				      blk_ring->ring[index].req.sector_number))
    5.98 -      {
    5.99 -	printk ("dispatch_rw_block_io: xen_segment_map_request status: %d\n",
   5.100 -		s);
   5.101 -	goto bad_descriptor;
   5.102 -      }
   5.103 +        int s;
   5.104 +        if (s = xen_segment_map_request(&phys_device, &block_number, 
   5.105 +                                        &sector_number,
   5.106 +                                        p, operation, xen_device,
   5.107 +                                        blk_ring->ring[index].req.block_number,
   5.108 +                                        blk_ring->ring[index].req.sector_number))
   5.109 +        {
   5.110 +            DPRINTK("dispatch_rw_block_io: xen_seg_map_request status: %d\n", s);
   5.111 +            goto bad_descriptor;
   5.112 +        }
   5.113      }
   5.114      else
   5.115      {
   5.116 -      printk (KERN_ALERT "dispatch_rw_block_io: unknown device %d\n",
   5.117 -	      xen_device);
   5.118 -      BUG();
   5.119 +        printk (KERN_ALERT "dispatch_rw_block_io: unknown device %d\n",
   5.120 +                xen_device);
   5.121 +        BUG();
   5.122      }
   5.123      
   5.124      bh->b_blocknr       = block_number;
   5.125 @@ -433,9 +436,8 @@ static void dispatch_rw_block_io(struct 
   5.126      return;
   5.127  
   5.128   bad_descriptor:
   5.129 -    printk (KERN_ALERT "dispatch rw blockio bad descriptor\n");
   5.130 +    DPRINTK("dispatch rw blockio bad descriptor\n");
   5.131      make_response(p, blk_ring->ring[index].req.id, XEN_BLOCK_READ, 1);
   5.132 -    return;
   5.133  } 
   5.134  
   5.135  
   5.136 @@ -468,23 +470,23 @@ static void make_response(struct task_st
   5.137  
   5.138  static void dump_blockq(u_char key, void *dev_id, struct pt_regs *regs) 
   5.139  {
   5.140 -  struct task_struct *p;
   5.141 -  blk_ring_t *blk_ring ;
   5.142 +    struct task_struct *p;
   5.143 +    blk_ring_t *blk_ring ;
   5.144  
   5.145 -  printk("Dumping block queue stats: nr_pending = %d\n",
   5.146 -	 atomic_read(&nr_pending));
   5.147 +    printk("Dumping block queue stats: nr_pending = %d\n",
   5.148 +           atomic_read(&nr_pending));
   5.149  
   5.150 -  p = current->next_task;
   5.151 -  do
   5.152 -  {
   5.153 -    printk (KERN_ALERT "Domain: %d\n", p->domain);
   5.154 -    blk_ring = p->blk_ring_base;
   5.155 +    p = current->next_task;
   5.156 +    do
   5.157 +    {
   5.158 +        printk (KERN_ALERT "Domain: %d\n", p->domain);
   5.159 +        blk_ring = p->blk_ring_base;
   5.160  
   5.161 -    printk("  req_prod:%d, resp_prod:%d, req_cons:%d\n",
   5.162 -	   blk_ring->req_prod, blk_ring->resp_prod, p->blk_req_cons);
   5.163 +        printk("  req_prod:%d, resp_prod:%d, req_cons:%d\n",
   5.164 +               blk_ring->req_prod, blk_ring->resp_prod, p->blk_req_cons);
   5.165  
   5.166 -    p = p->next_task;
   5.167 -  } while (p != current);
   5.168 +        p = p->next_task;
   5.169 +    } while (p != current);
   5.170  }
   5.171  
   5.172  /* Start-of-day initialisation for a new domain. */
     6.1 --- a/xen/drivers/ide/ide.c	Thu Mar 06 15:52:22 2003 +0000
     6.2 +++ b/xen/drivers/ide/ide.c	Thu Mar 06 16:54:15 2003 +0000
     6.3 @@ -183,12 +183,6 @@ int unregister_blkdev(unsigned int major
     6.4  int invalidate_device(kdev_t dev, int do_sync) { return 0; }
     6.5  /* fs/buffer.c... */
     6.6  void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) { }
     6.7 -/* fs/partitions/check.c... */
     6.8 -void grok_partitions(struct gendisk *dev, int drive, 
     6.9 -                     unsigned minors, long size) { }
    6.10 -void register_disk(struct gendisk *dev, kdev_t first, 
    6.11 -                   unsigned minors, struct block_device_operations *ops, 
    6.12 -                   long size) { }
    6.13  /* fs/devices.c... */
    6.14  const char * kdevname(kdev_t dev) { return NULL; }
    6.15  /* End of XXXXXX region */
     7.1 --- a/xen/drivers/scsi/aacraid/aachba.c	Thu Mar 06 15:52:22 2003 +0000
     7.2 +++ b/xen/drivers/scsi/aacraid/aachba.c	Thu Mar 06 16:54:15 2003 +0000
     7.3 @@ -253,7 +253,6 @@ int aac_get_containers(struct aac_dev *d
     7.4  	dinfo->count = cpu_to_le32(index);
     7.5  	dinfo->type = cpu_to_le32(FT_FILESYS);
     7.6  
     7.7 -	printk("aac_get_container: getting info for container %d\n", index); 
     7.8  	status = fib_send(ContainerCommand,
     7.9  			  fibptr,
    7.10  			  sizeof (struct aac_query_mount),
     8.1 --- a/xen/drivers/scsi/aacraid/aacraid.h	Thu Mar 06 15:52:22 2003 +0000
     8.2 +++ b/xen/drivers/scsi/aacraid/aacraid.h	Thu Mar 06 16:54:15 2003 +0000
     8.3 @@ -1410,7 +1410,7 @@ unsigned int aac_response_normal(struct 
     8.4  unsigned int aac_command_normal(struct aac_queue * q);
     8.5  #ifdef TRY_TASKLET
     8.6  extern struct tasklet_struct aac_command_tasklet;
     8.7 -int aac_command_thread(unsigned long data);
     8.8 +void aac_command_thread(unsigned long data);
     8.9  #else
    8.10  int aac_command_thread(struct aac_dev * dev);
    8.11  #endif
     9.1 --- a/xen/drivers/scsi/aacraid/comminit.c	Thu Mar 06 15:52:22 2003 +0000
     9.2 +++ b/xen/drivers/scsi/aacraid/comminit.c	Thu Mar 06 16:54:15 2003 +0000
     9.3 @@ -325,10 +325,9 @@ struct aac_dev *aac_init_adapter(struct 
     9.4  	}
     9.5  	memset(dev->queues, 0, sizeof(struct aac_queue_block));
     9.6  
     9.7 -	printk("aac_init_adapater, dev is %p\n", dev); 
     9.8  	if (aac_comm_init(dev)<0)
     9.9  		return NULL;
    9.10 -	printk("aac_init_adapater, dev->init is %p\n", dev->init); 
    9.11 +
    9.12  	/*
    9.13  	 *	Initialize the list of fibs
    9.14  	 */
    10.1 --- a/xen/drivers/scsi/aacraid/commsup.c	Thu Mar 06 15:52:22 2003 +0000
    10.2 +++ b/xen/drivers/scsi/aacraid/commsup.c	Thu Mar 06 16:54:15 2003 +0000
    10.3 @@ -535,12 +535,13 @@ int fib_send(u16 command, struct fib * f
    10.4           * do_softirq() after scheduling the tasklet, as long as we
    10.5           * are _sure_ we hold no locks here...
    10.6           */
    10.7 -	printk("about to softirq aac_command_thread...\n"); 
    10.8 +//	printk("about to softirq aac_command_thread...\n"); 
    10.9  	while (!fibptr->done) { 
   10.10              tasklet_schedule(&aac_command_tasklet);
   10.11 -	    mdelay(100); 
   10.12 +	    do_softirq(); /* force execution */
   10.13 +//	    mdelay(100); 
   10.14  	}
   10.15 -	printk("back from softirq cmd thread and fibptr->done!\n"); 
   10.16 +//	printk("back from softirq cmd thread and fibptr->done!\n"); 
   10.17  #else 
   10.18  	printk("about to bail at aac_command_thread...\n"); 
   10.19  	while (!fibptr->done) { 
   10.20 @@ -843,11 +844,12 @@ static void aac_handle_aif(struct aac_de
   10.21   */
   10.22   
   10.23  #ifndef TRY_TASKLET
   10.24 -DECLARE_TASKLET_DISABLED(aac_command_tasklet, aac_command_thread, 0);
   10.25  int aac_command_thread(struct aac_dev * dev)
   10.26  {
   10.27  #else
   10.28 -int aac_command_thread(unsigned long data)
   10.29 +DECLARE_TASKLET_DISABLED(aac_command_tasklet, aac_command_thread, 0);
   10.30 +void aac_command_thread(unsigned long data)
   10.31 +#define return(_x) return 
   10.32  {   
   10.33      struct aac_dev *dev = (struct aac_dev *)data; 
   10.34  #endif
   10.35 @@ -863,9 +865,8 @@ int aac_command_thread(unsigned long dat
   10.36      /*
   10.37       *	We can only have one thread per adapter for AIF's.
   10.38       */
   10.39 -    printk("aac_command_'thread': entered.\n"); 
   10.40      if (dev->aif_thread)
   10.41 -	return -EINVAL;
   10.42 +	return(-EINVAL);
   10.43  
   10.44  #if 0
   10.45      /*
   10.46 @@ -888,9 +889,7 @@ int aac_command_thread(unsigned long dat
   10.47  //    while(1) 
   10.48      {
   10.49  
   10.50 -	printk("aac_command_thread: in 'loop'\n"); 
   10.51  	spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
   10.52 -	printk("flags = %x\n", flags); 
   10.53  	while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
   10.54  	    struct list_head *entry;
   10.55  	    struct aac_aifcmd * aifcmd;
   10.56 @@ -905,7 +904,6 @@ int aac_command_thread(unsigned long dat
   10.57  			
   10.58  	    spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock,flags);
   10.59  	    fib = list_entry(entry, struct hw_fib, header.FibLinks);
   10.60 -	    printk("aac_command_thread: got fib \n"); 
   10.61  	    /*
   10.62  	     *	We will process the FIB here or pass it to a 
   10.63  	     *	worker thread that is TBD. We Really can't 
   10.64 @@ -923,7 +921,6 @@ int aac_command_thread(unsigned long dat
   10.65  	     */
   10.66  	    aifcmd = (struct aac_aifcmd *) fib->data;
   10.67  	    if (aifcmd->command == le16_to_cpu(AifCmdDriverNotify)) {
   10.68 -		printk("aac_command_thread: handling aif... :-( \n"); 
   10.69  		aac_handle_aif(dev, &fibptr);
   10.70  	    } else {
   10.71  		/* The u32 here is important and intended. We are using
   10.72 @@ -1024,5 +1021,5 @@ int aac_command_thread(unsigned long dat
   10.73      dev->aif_thread = 0;
   10.74  
   10.75  #endif
   10.76 -    return 0;
   10.77 +    return(0);
   10.78  }
    11.1 --- a/xen/drivers/scsi/aacraid/linit.c	Thu Mar 06 15:52:22 2003 +0000
    11.2 +++ b/xen/drivers/scsi/aacraid/linit.c	Thu Mar 06 16:54:15 2003 +0000
    11.3 @@ -338,7 +338,6 @@ static int aac_detect(Scsi_Host_Template
    11.4  #endif
    11.5  
    11.6      template->present = aac_count; /* # of cards of this type found */
    11.7 -    printk(KERN_DEBUG "aac_detect: returning %d\n", aac_count); 
    11.8      return aac_count;
    11.9  }
   11.10  
   11.11 @@ -461,7 +460,7 @@ static int aac_biosparm(Scsi_Disk *disk,
   11.12      struct buffer_head * buf;
   11.13      
   11.14      dprintk((KERN_DEBUG "aac_biosparm.\n"));
   11.15 -    
   11.16 +
   11.17      /*
   11.18       *	Assuming extended translation is enabled - #REVISIT#
   11.19       */
   11.20 @@ -499,7 +498,6 @@ static int aac_biosparm(Scsi_Disk *disk,
   11.21       *	table entry whose end_head matches one of the standard geometry 
   11.22       *	translations ( 64/32, 128/32, 255/63 ).
   11.23       */
   11.24 -#endif
   11.25  
   11.26  	 
   11.27      if(*(unsigned short *)(buf->b_data + 0x1fe) == cpu_to_le16(0xaa55))
   11.28 @@ -560,7 +558,6 @@ static int aac_biosparm(Scsi_Disk *disk,
   11.29  		     param->heads, param->sectors));
   11.30  	}
   11.31      }
   11.32 -#if 0
   11.33      brelse(buf);
   11.34  #endif
   11.35      return 0;
    12.1 --- a/xen/drivers/scsi/aacraid/rx.c	Thu Mar 06 15:52:22 2003 +0000
    12.2 +++ b/xen/drivers/scsi/aacraid/rx.c	Thu Mar 06 16:54:15 2003 +0000
    12.3 @@ -311,7 +311,7 @@ static void aac_rx_start_adapter(struct 
    12.4      struct aac_init *init;
    12.5      
    12.6      init = dev->init;
    12.7 -    printk("aac_rx_start: dev is %p, init is %p\n", dev, init); 
    12.8 +
    12.9      init->HostElapsedSeconds = cpu_to_le32(jiffies/HZ);
   12.10      /*
   12.11       *	Tell the adapter we are back and up and running so it will scan
    13.1 --- a/xen/drivers/scsi/scsi.c	Thu Mar 06 15:52:22 2003 +0000
    13.2 +++ b/xen/drivers/scsi/scsi.c	Thu Mar 06 16:54:15 2003 +0000
    13.3 @@ -55,6 +55,10 @@
    13.4  /*#include <xeno/smp_lock.h>*/
    13.5  /*#include <xeno/completion.h>*/
    13.6  
    13.7 +/* for xeno scsi_probe() stuff... maybe punt somewhere else? */
    13.8 +#include <hypervisor-ifs/block.h>
    13.9 +#include <xeno/blkdev.h>
   13.10 +
   13.11  #define __KERNEL_SYSCALLS__
   13.12  
   13.13  /*#include <xeno/unistd.h>*/
   13.14 @@ -240,7 +244,14 @@ static void scsi_wait_done(Scsi_Cmnd * S
   13.15      if (req->waiting != NULL) {
   13.16          complete(req->waiting);
   13.17      }
   13.18 +#else 
   13.19 +    /* XXX SMH: just use a flag to signal completion; caller spins */
   13.20 +    if (*(int *)(req->waiting) != 0) {
   13.21 +//        printk("scsi_wait_done: flipping wait status on req %p\n", req); 
   13.22 +        *(int *)(req->waiting) = 0; 
   13.23 +    }
   13.24  #endif
   13.25 +
   13.26  }
   13.27  
   13.28  /*
   13.29 @@ -317,15 +328,8 @@ void scsi_release_request(Scsi_Request *
   13.30  {
   13.31      if( req->sr_command != NULL )
   13.32      {
   13.33 -#ifdef SMH_DEBUG
   13.34 -        printk("scsi_release_request: req->sr_command = %p\n", 
   13.35 -                   req->sr_command); 
   13.36 -#endif
   13.37          scsi_release_command(req->sr_command);
   13.38          req->sr_command = NULL;
   13.39 -#ifdef SMHHACK 
   13.40 -        req->freeaddr = 0x1234; 
   13.41 -#endif
   13.42      }
   13.43      
   13.44      kfree(req);
   13.45 @@ -361,225 +365,227 @@ void scsi_release_request(Scsi_Request *
   13.46  Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait, 
   13.47                                  int interruptable)
   13.48  {
   13.49 - 	struct Scsi_Host *host;
   13.50 -  	Scsi_Cmnd *SCpnt = NULL;
   13.51 -	Scsi_Device *SDpnt;
   13.52 -	unsigned long flags;
   13.53 -  
   13.54 -  	if (!device)
   13.55 -  		panic("No device passed to scsi_allocate_device().\n");
   13.56 -  
   13.57 -  	host = device->host;
   13.58 -  
   13.59 -	spin_lock_irqsave(&device_request_lock, flags);
   13.60 - 
   13.61 -	while (1 == 1) {
   13.62 -		SCpnt = NULL;
   13.63 -		if (!device->device_blocked) {
   13.64 -			if (device->single_lun) {
   13.65 -				/*
   13.66 -				 * FIXME(eric) - this is not at all optimal.  Given that
   13.67 -				 * single lun devices are rare and usually slow
   13.68 -				 * (i.e. CD changers), this is good enough for now, but
   13.69 -				 * we may want to come back and optimize this later.
   13.70 -				 *
   13.71 -				 * Scan through all of the devices attached to this
   13.72 -				 * host, and see if any are active or not.  If so,
   13.73 -				 * we need to defer this command.
   13.74 -				 *
   13.75 -				 * We really need a busy counter per device.  This would
   13.76 -				 * allow us to more easily figure out whether we should
   13.77 -				 * do anything here or not.
   13.78 -				 */
   13.79 -				for (SDpnt = host->host_queue;
   13.80 -				     SDpnt;
   13.81 -				     SDpnt = SDpnt->next) {
   13.82 -					/*
   13.83 -					 * Only look for other devices on the same bus
   13.84 -					 * with the same target ID.
   13.85 -					 */
   13.86 -					if (SDpnt->channel != device->channel
   13.87 -					    || SDpnt->id != device->id
   13.88 -					    || SDpnt == device) {
   13.89 - 						continue;
   13.90 -					}
   13.91 -                                        if( atomic_read(&SDpnt->device_active) != 0)
   13.92 -                                        {
   13.93 -                                                break;
   13.94 -                                        }
   13.95 -				}
   13.96 -				if (SDpnt) {
   13.97 -					/*
   13.98 -					 * Some other device in this cluster is busy.
   13.99 -					 * If asked to wait, we need to wait, otherwise
  13.100 -					 * return NULL.
  13.101 -					 */
  13.102 -					SCpnt = NULL;
  13.103 -					goto busy;
  13.104 -				}
  13.105 -			}
  13.106 -			/*
  13.107 -			 * Now we can check for a free command block for this device.
  13.108 -			 */
  13.109 -			for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
  13.110 -				if (SCpnt->request.rq_status == RQ_INACTIVE)
  13.111 -					break;
  13.112 -			}
  13.113 -		}
  13.114 -		/*
  13.115 -		 * If we couldn't find a free command block, and we have been
  13.116 -		 * asked to wait, then do so.
  13.117 -		 */
  13.118 -		if (SCpnt) {
  13.119 -			break;
  13.120 -		}
  13.121 -      busy:
  13.122 -		/*
  13.123 -		 * If we have been asked to wait for a free block, then
  13.124 -		 * wait here.
  13.125 -		 */
  13.126 -		if (wait) {
  13.127 -                    printk("XXX smh: scsi cannot wait for free cmd block.\n"); 
  13.128 -                    BUG(); 
  13.129 +    struct Scsi_Host *host;
  13.130 +    Scsi_Cmnd *SCpnt = NULL;
  13.131 +    Scsi_Device *SDpnt;
  13.132 +    unsigned long flags;
  13.133 +    
  13.134 +    if (!device)
  13.135 +        panic("No device passed to scsi_allocate_device().\n");
  13.136 +    
  13.137 +    host = device->host;
  13.138 +    
  13.139 +    spin_lock_irqsave(&device_request_lock, flags);
  13.140 +    
  13.141 +    while (1 == 1) {
  13.142 +        SCpnt = NULL;
  13.143 +        if (!device->device_blocked) {
  13.144 +            if (device->single_lun) {
  13.145 +                /*
  13.146 +                 * FIXME(eric) - this is not at all optimal.  Given that
  13.147 +                 * single lun devices are rare and usually slow
  13.148 +                 * (i.e. CD changers), this is good enough for now, but
  13.149 +                 * we may want to come back and optimize this later.
  13.150 +                 *
  13.151 +                 * Scan through all of the devices attached to this
  13.152 +                 * host, and see if any are active or not.  If so,
  13.153 +                 * we need to defer this command.
  13.154 +                 *
  13.155 +                 * We really need a busy counter per device.  This would
  13.156 +                 * allow us to more easily figure out whether we should
  13.157 +                 * do anything here or not.
  13.158 +                 */
  13.159 +                for (SDpnt = host->host_queue;
  13.160 +                     SDpnt;
  13.161 +                     SDpnt = SDpnt->next) {
  13.162 +                    /*
  13.163 +                     * Only look for other devices on the same bus
  13.164 +                     * with the same target ID.
  13.165 +                     */
  13.166 +                    if (SDpnt->channel != device->channel
  13.167 +                        || SDpnt->id != device->id
  13.168 +                        || SDpnt == device) {
  13.169 +                        continue;
  13.170 +                    }
  13.171 +                    if( atomic_read(&SDpnt->device_active) != 0)
  13.172 +                    {
  13.173 +                        break;
  13.174 +                    }
  13.175 +                }
  13.176 +                if (SDpnt) {
  13.177 +                    /*
  13.178 +                     * Some other device in this cluster is busy.
  13.179 +                     * If asked to wait, we need to wait, otherwise
  13.180 +                     * return NULL.
  13.181 +                     */
  13.182 +                    SCpnt = NULL;
  13.183 +                    goto busy;
  13.184 +                }
  13.185 +            }
  13.186 +            /*
  13.187 +             * Now we can check for a free command block for this device.
  13.188 +             */
  13.189 +            for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
  13.190 +                if (SCpnt->request.rq_status == RQ_INACTIVE)
  13.191 +                    break;
  13.192 +            }
  13.193 +        }
  13.194 +        /*
  13.195 +         * If we couldn't find a free command block, and we have been
  13.196 +         * asked to wait, then do so.
  13.197 +         */
  13.198 +        if (SCpnt) {
  13.199 +            break;
  13.200 +        }
  13.201 +    busy:
  13.202 +        /*
  13.203 +         * If we have been asked to wait for a free block, then
  13.204 +         * wait here.
  13.205 +         */
  13.206 +        if (wait) {
  13.207 +            printk("XXX smh: scsi cannot wait for free cmd block.\n"); 
  13.208 +            BUG(); 
  13.209  #if 0 
  13.210 -                        DECLARE_WAITQUEUE(wait, current);
  13.211 -
  13.212 -                        /*
  13.213 -                         * We need to wait for a free commandblock.  We need to
  13.214 -                         * insert ourselves into the list before we release the
  13.215 -                         * lock.  This way if a block were released the same
  13.216 -                         * microsecond that we released the lock, the call
  13.217 -                         * to schedule() wouldn't block (well, it might switch,
  13.218 -                         * but the current task will still be schedulable.
  13.219 -                         */
  13.220 -                        add_wait_queue(&device->scpnt_wait, &wait);
  13.221 -                        if( interruptable ) {
  13.222 -                                set_current_state(TASK_INTERRUPTIBLE);
  13.223 -                        } else {
  13.224 -                                set_current_state(TASK_UNINTERRUPTIBLE);
  13.225 -                        }
  13.226 -
  13.227 -                        spin_unlock_irqrestore(&device_request_lock, flags);
  13.228 -
  13.229 -			/*
  13.230 -			 * This should block until a device command block
  13.231 -			 * becomes available.
  13.232 -			 */
  13.233 -                        schedule();
  13.234 -
  13.235 -			spin_lock_irqsave(&device_request_lock, flags);
  13.236 -
  13.237 -                        remove_wait_queue(&device->scpnt_wait, &wait);
  13.238 -                        /*
  13.239 -                         * FIXME - Isn't this redundant??  Someone
  13.240 -                         * else will have forced the state back to running.
  13.241 -                         */
  13.242 -                        set_current_state(TASK_RUNNING);
  13.243 -                        /*
  13.244 -                         * In the event that a signal has arrived that we need
  13.245 -                         * to consider, then simply return NULL.  Everyone
  13.246 -                         * that calls us should be prepared for this
  13.247 -                         * possibility, and pass the appropriate code back
  13.248 -                         * to the user.
  13.249 -                         */
  13.250 -                        if( interruptable ) {
  13.251 -                                if (signal_pending(current)) {
  13.252 -                                        spin_unlock_irqrestore(&device_request_lock, flags);
  13.253 -                                        return NULL;
  13.254 -                                }
  13.255 -                        }
  13.256 +            DECLARE_WAITQUEUE(wait, current);
  13.257 +            
  13.258 +            /*
  13.259 +             * We need to wait for a free commandblock.  We need to
  13.260 +             * insert ourselves into the list before we release the
  13.261 +             * lock.  This way if a block were released the same
  13.262 +             * microsecond that we released the lock, the call
  13.263 +             * to schedule() wouldn't block (well, it might switch,
  13.264 +             * but the current task will still be schedulable.
  13.265 +             */
  13.266 +            add_wait_queue(&device->scpnt_wait, &wait);
  13.267 +            if( interruptable ) {
  13.268 +                set_current_state(TASK_INTERRUPTIBLE);
  13.269 +            } else {
  13.270 +                set_current_state(TASK_UNINTERRUPTIBLE);
  13.271 +            }
  13.272 +            
  13.273 +            spin_unlock_irqrestore(&device_request_lock, flags);
  13.274 +            
  13.275 +            /*
  13.276 +             * This should block until a device command block
  13.277 +             * becomes available.
  13.278 +             */
  13.279 +            schedule();
  13.280 +            
  13.281 +            spin_lock_irqsave(&device_request_lock, flags);
  13.282 +            
  13.283 +            remove_wait_queue(&device->scpnt_wait, &wait);
  13.284 +            /*
  13.285 +             * FIXME - Isn't this redundant??  Someone
  13.286 +             * else will have forced the state back to running.
  13.287 +             */
  13.288 +            set_current_state(TASK_RUNNING);
  13.289 +            /*
  13.290 +             * In the event that a signal has arrived that we need
  13.291 +             * to consider, then simply return NULL.  Everyone
  13.292 +             * that calls us should be prepared for this
  13.293 +             * possibility, and pass the appropriate code back
  13.294 +             * to the user.
  13.295 +             */
  13.296 +            if( interruptable ) {
  13.297 +                if (signal_pending(current)) {
  13.298 +                    spin_unlock_irqrestore(&device_request_lock, flags);
  13.299 +                    return NULL;
  13.300 +                }
  13.301 +            }
  13.302  #endif
  13.303 -		} else {
  13.304 -                        spin_unlock_irqrestore(&device_request_lock, flags);
  13.305 -			return NULL;
  13.306 -		}
  13.307 -	}
  13.308 -
  13.309 -	SCpnt->request.rq_status = RQ_SCSI_BUSY;
  13.310 -	SCpnt->request.waiting = NULL;	/* And no one is waiting for this
  13.311 +        } else {
  13.312 +            spin_unlock_irqrestore(&device_request_lock, flags);
  13.313 +            return NULL;
  13.314 +        }
  13.315 +    }
  13.316 +    
  13.317 +    SCpnt->request.rq_status = RQ_SCSI_BUSY;
  13.318 +    SCpnt->request.waiting = NULL;	/* And no one is waiting for this
  13.319  					 * to complete */
  13.320 -	atomic_inc(&SCpnt->host->host_active);
  13.321 -	atomic_inc(&SCpnt->device->device_active);
  13.322 -
  13.323 -	SCpnt->buffer  = NULL;
  13.324 -	SCpnt->bufflen = 0;
  13.325 -	SCpnt->request_buffer = NULL;
  13.326 -	SCpnt->request_bufflen = 0;
  13.327 -
  13.328 -	SCpnt->use_sg = 0;	/* Reset the scatter-gather flag */
  13.329 -	SCpnt->old_use_sg = 0;
  13.330 -	SCpnt->transfersize = 0;	/* No default transfer size */
  13.331 -	SCpnt->cmd_len = 0;
  13.332 -
  13.333 -	SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
  13.334 -	SCpnt->sc_request = NULL;
  13.335 -	SCpnt->sc_magic = SCSI_CMND_MAGIC;
  13.336 -
  13.337 -        SCpnt->result = 0;
  13.338 -	SCpnt->underflow = 0;	/* Do not flag underflow conditions */
  13.339 -	SCpnt->old_underflow = 0;
  13.340 -	SCpnt->resid = 0;
  13.341 -	SCpnt->state = SCSI_STATE_INITIALIZING;
  13.342 -	SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
  13.343 -
  13.344 -	spin_unlock_irqrestore(&device_request_lock, flags);
  13.345 -
  13.346 -	SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
  13.347 -				   SCpnt->target,
  13.348 -				atomic_read(&SCpnt->host->host_active)));
  13.349 -
  13.350 -	return SCpnt;
  13.351 +    atomic_inc(&SCpnt->host->host_active);
  13.352 +    atomic_inc(&SCpnt->device->device_active);
  13.353 +    
  13.354 +    SCpnt->buffer  = NULL;
  13.355 +    SCpnt->bufflen = 0;
  13.356 +    SCpnt->request_buffer = NULL;
  13.357 +    SCpnt->request_bufflen = 0;
  13.358 +    
  13.359 +    SCpnt->use_sg = 0;	/* Reset the scatter-gather flag */
  13.360 +    SCpnt->old_use_sg = 0;
  13.361 +    SCpnt->transfersize = 0;	/* No default transfer size */
  13.362 +    SCpnt->cmd_len = 0;
  13.363 +    
  13.364 +    SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
  13.365 +    SCpnt->sc_request = NULL;
  13.366 +    SCpnt->sc_magic = SCSI_CMND_MAGIC;
  13.367 +    
  13.368 +    SCpnt->result = 0;
  13.369 +    SCpnt->underflow = 0;	/* Do not flag underflow conditions */
  13.370 +    SCpnt->old_underflow = 0;
  13.371 +    SCpnt->resid = 0;
  13.372 +    SCpnt->state = SCSI_STATE_INITIALIZING;
  13.373 +    SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
  13.374 +    
  13.375 +    spin_unlock_irqrestore(&device_request_lock, flags);
  13.376 +    
  13.377 +    SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
  13.378 +                               SCpnt->target,
  13.379 +                               atomic_read(&SCpnt->host->host_active)));
  13.380 +    
  13.381 +    return SCpnt;
  13.382  }
  13.383  
  13.384  inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
  13.385  {
  13.386 -	unsigned long flags;
  13.387 -        Scsi_Device * SDpnt;
  13.388 -
  13.389 -	spin_lock_irqsave(&device_request_lock, flags);
  13.390 -
  13.391 -        SDpnt = SCpnt->device;
  13.392 -
  13.393 -	SCpnt->request.rq_status = RQ_INACTIVE;
  13.394 -	SCpnt->state = SCSI_STATE_UNUSED;
  13.395 -	SCpnt->owner = SCSI_OWNER_NOBODY;
  13.396 -	atomic_dec(&SCpnt->host->host_active);
  13.397 -	atomic_dec(&SDpnt->device_active);
  13.398 -
  13.399 -	SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
  13.400 -				   SCpnt->target,
  13.401 -				   atomic_read(&SCpnt->host->host_active),
  13.402 -				   SCpnt->host->host_failed));
  13.403 -	if (SCpnt->host->host_failed != 0) {
  13.404 -		SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
  13.405 -						SCpnt->host->in_recovery,
  13.406 -						SCpnt->host->eh_active));
  13.407 -	}
  13.408 -	/*
  13.409 -	 * If the host is having troubles, then look to see if this was the last
  13.410 -	 * command that might have failed.  If so, wake up the error handler.
  13.411 -	 */
  13.412 -	if (SCpnt->host->in_recovery
  13.413 -	    && !SCpnt->host->eh_active
  13.414 -	    && SCpnt->host->host_busy == SCpnt->host->host_failed) {
  13.415 -		SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
  13.416 -			     atomic_read(&SCpnt->host->eh_wait->count)));
  13.417 +    unsigned long flags;
  13.418 +    Scsi_Device * SDpnt;
  13.419 +    
  13.420 +    spin_lock_irqsave(&device_request_lock, flags);
  13.421 +    
  13.422 +    SDpnt = SCpnt->device;
  13.423 +    
  13.424 +    SCpnt->request.rq_status = RQ_INACTIVE;
  13.425 +    SCpnt->state = SCSI_STATE_UNUSED;
  13.426 +    SCpnt->owner = SCSI_OWNER_NOBODY;
  13.427 +    atomic_dec(&SCpnt->host->host_active);
  13.428 +    atomic_dec(&SDpnt->device_active);
  13.429 +    
  13.430 +    SCSI_LOG_MLQUEUE(5, printk(
  13.431 +        "Deactivating command for device %d (active=%d, failed=%d)\n",
  13.432 +        SCpnt->target,
  13.433 +        atomic_read(&SCpnt->host->host_active),
  13.434 +        SCpnt->host->host_failed));
  13.435 +    if (SCpnt->host->host_failed != 0) {
  13.436 +        SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
  13.437 +                                          SCpnt->host->in_recovery,
  13.438 +                                          SCpnt->host->eh_active));
  13.439 +    }
  13.440 +    /*
  13.441 +     * If the host is having troubles, then look to see if this was the last
  13.442 +     * command that might have failed.  If so, wake up the error handler.
  13.443 +     */
  13.444 +    if (SCpnt->host->in_recovery
  13.445 +        && !SCpnt->host->eh_active
  13.446 +        && SCpnt->host->host_busy == SCpnt->host->host_failed) {
  13.447  #if 0
  13.448 -		up(SCpnt->host->eh_wait);
  13.449 +        SCSI_LOG_ERROR_RECOVERY(5, printk(
  13.450 +            "Waking error handler thread (%d)\n",
  13.451 +            atomic_read(&SCpnt->host->eh_wait->count)));
  13.452 +        up(SCpnt->host->eh_wait);
  13.453  #endif
  13.454 -	}
  13.455 -
  13.456 -	spin_unlock_irqrestore(&device_request_lock, flags);
  13.457 -
  13.458 +    }
  13.459 +    
  13.460 +    spin_unlock_irqrestore(&device_request_lock, flags);
  13.461 +    
  13.462  #if 0
  13.463 -        /*
  13.464 -         * Wake up anyone waiting for this device.  Do this after we
  13.465 -         * have released the lock, as they will need it as soon as
  13.466 -         * they wake up.  
  13.467 -         */
  13.468 -	wake_up(&SDpnt->scpnt_wait);
  13.469 +    /*
  13.470 +     * Wake up anyone waiting for this device.  Do this after we
  13.471 +     * have released the lock, as they will need it as soon as
  13.472 +     * they wake up.  
  13.473 +     */
  13.474 +    wake_up(&SDpnt->scpnt_wait);
  13.475  #endif
  13.476 -
  13.477 +    
  13.478  }
  13.479  
  13.480  /*
  13.481 @@ -611,21 +617,21 @@ inline void __scsi_release_command(Scsi_
  13.482   */
  13.483  void scsi_release_command(Scsi_Cmnd * SCpnt)
  13.484  {
  13.485 -        request_queue_t *q;
  13.486 -        Scsi_Device * SDpnt;
  13.487 -
  13.488 -        SDpnt = SCpnt->device;
  13.489 -
  13.490 -        __scsi_release_command(SCpnt);
  13.491 -
  13.492 -        /*
  13.493 -         * Finally, hit the queue request function to make sure that
  13.494 -         * the device is actually busy if there are requests present.
  13.495 -         * This won't block - if the device cannot take any more, life
  13.496 -         * will go on.  
  13.497 -         */
  13.498 -        q = &SDpnt->request_queue;
  13.499 -        scsi_queue_next_request(q, NULL);                
  13.500 +    request_queue_t *q;
  13.501 +    Scsi_Device * SDpnt;
  13.502 +    
  13.503 +    SDpnt = SCpnt->device;
  13.504 +    
  13.505 +    __scsi_release_command(SCpnt);
  13.506 +    
  13.507 +    /*
  13.508 +     * Finally, hit the queue request function to make sure that
  13.509 +     * the device is actually busy if there are requests present.
  13.510 +     * This won't block - if the device cannot take any more, life
  13.511 +     * will go on.  
  13.512 +     */
  13.513 +    q = &SDpnt->request_queue;
  13.514 +    scsi_queue_next_request(q, NULL);                
  13.515  }
  13.516  
  13.517  /*
  13.518 @@ -640,153 +646,158 @@ void scsi_release_command(Scsi_Cmnd * SC
  13.519  int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
  13.520  {
  13.521  #ifdef DEBUG_DELAY
  13.522 -	unsigned long clock;
  13.523 +    unsigned long clock;
  13.524  #endif
  13.525 -	struct Scsi_Host *host;
  13.526 -	int rtn = 0;
  13.527 -	unsigned long flags = 0;
  13.528 -	unsigned long timeout;
  13.529 -
  13.530 -	ASSERT_LOCK(&io_request_lock, 0);
  13.531 -
  13.532 +    struct Scsi_Host *host;
  13.533 +    int rtn = 0;
  13.534 +    unsigned long flags = 0;
  13.535 +    unsigned long timeout;
  13.536 +    
  13.537 +    ASSERT_LOCK(&io_request_lock, 0);
  13.538 +    
  13.539  #if DEBUG
  13.540 -	unsigned long *ret = 0;
  13.541 +    unsigned long *ret = 0;
  13.542  #ifdef __mips__
  13.543 -	__asm__ __volatile__("move\t%0,$31":"=r"(ret));
  13.544 +    __asm__ __volatile__("move\t%0,$31":"=r"(ret));
  13.545  #else
  13.546 -	ret = __builtin_return_address(0);
  13.547 +    ret = __builtin_return_address(0);
  13.548  #endif
  13.549  #endif
  13.550 -
  13.551 -	host = SCpnt->host;
  13.552 -
  13.553 -	/* Assign a unique nonzero serial_number. */
  13.554 -	if (++serial_number == 0)
  13.555 -		serial_number = 1;
  13.556 -	SCpnt->serial_number = serial_number;
  13.557 -	SCpnt->pid = scsi_pid++;
  13.558 -
  13.559 -	/*
  13.560 -	 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
  13.561 -	 * we can avoid the drive not being ready.
  13.562 -	 */
  13.563 -	timeout = host->last_reset + MIN_RESET_DELAY;
  13.564 -
  13.565 -	if (host->resetting && time_before(jiffies, timeout)) {
  13.566 -		int ticks_remaining = timeout - jiffies;
  13.567 -		/*
  13.568 -		 * NOTE: This may be executed from within an interrupt
  13.569 -		 * handler!  This is bad, but for now, it'll do.  The irq
  13.570 -		 * level of the interrupt handler has been masked out by the
  13.571 -		 * platform dependent interrupt handling code already, so the
  13.572 -		 * sti() here will not cause another call to the SCSI host's
  13.573 -		 * interrupt handler (assuming there is one irq-level per
  13.574 -		 * host).
  13.575 -		 */
  13.576 -		while (--ticks_remaining >= 0)
  13.577 -			mdelay(1 + 999 / HZ);
  13.578 -		host->resetting = 0;
  13.579 -	}
  13.580 -	if (host->hostt->use_new_eh_code) {
  13.581 -		scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
  13.582 -	} else {
  13.583 +    
  13.584 +    host = SCpnt->host;
  13.585 +    
  13.586 +    /* Assign a unique nonzero serial_number. */
  13.587 +    if (++serial_number == 0)
  13.588 +        serial_number = 1;
  13.589 +    SCpnt->serial_number = serial_number;
  13.590 +    SCpnt->pid = scsi_pid++;
  13.591 +    
  13.592 +    /*
  13.593 +     * We will wait MIN_RESET_DELAY clock ticks after the last reset so
  13.594 +     * we can avoid the drive not being ready.
  13.595 +     */
  13.596 +    timeout = host->last_reset + MIN_RESET_DELAY;
  13.597 +    
  13.598 +    if (host->resetting && time_before(jiffies, timeout)) {
  13.599 +        int ticks_remaining = timeout - jiffies;
  13.600 +        /*
  13.601 +         * NOTE: This may be executed from within an interrupt
  13.602 +         * handler!  This is bad, but for now, it'll do.  The irq
  13.603 +         * level of the interrupt handler has been masked out by the
  13.604 +         * platform dependent interrupt handling code already, so the
  13.605 +         * sti() here will not cause another call to the SCSI host's
  13.606 +         * interrupt handler (assuming there is one irq-level per
  13.607 +         * host).
  13.608 +         */
  13.609 +        while (--ticks_remaining >= 0)
  13.610 +            mdelay(1 + 999 / HZ);
  13.611 +        host->resetting = 0;
  13.612 +    }
  13.613 +    if (host->hostt->use_new_eh_code) {
  13.614 +        scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
  13.615 +    } else {
  13.616  #if 0
  13.617 -		scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
  13.618 -			       scsi_old_times_out);
  13.619 +        scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
  13.620 +                       scsi_old_times_out);
  13.621  #endif
  13.622 -	}
  13.623 -
  13.624 -	/*
  13.625 -	 * We will use a queued command if possible, otherwise we will emulate the
  13.626 -	 * queuing and calling of completion function ourselves.
  13.627 -	 */
  13.628 -	SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
  13.629 -	       "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
  13.630 -	SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
  13.631 -			    SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
  13.632 -
  13.633 -	SCpnt->state = SCSI_STATE_QUEUED;
  13.634 -	SCpnt->owner = SCSI_OWNER_LOWLEVEL;
  13.635 -	if (host->can_queue) {
  13.636 -		SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
  13.637 -					   host->hostt->queuecommand));
  13.638 -		/*
  13.639 -		 * Use the old error handling code if we haven't converted the driver
  13.640 -		 * to use the new one yet.  Note - only the new queuecommand variant
  13.641 -		 * passes a meaningful return value.
  13.642 -		 */
  13.643 -		if (host->hostt->use_new_eh_code) {
  13.644 -			/*
  13.645 -			 * Before we queue this command, check if the command
  13.646 -			 * length exceeds what the host adapter can handle.
  13.647 -			 */
  13.648 -			if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
  13.649 -				spin_lock_irqsave(&io_request_lock, flags);
  13.650 -				rtn = host->hostt->queuecommand(SCpnt, scsi_done);
  13.651 -				spin_unlock_irqrestore(&io_request_lock, flags);
  13.652 -				if (rtn != 0) {
  13.653 -					scsi_delete_timer(SCpnt);
  13.654 -					scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
  13.655 -					SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));                                
  13.656 -				}
  13.657 -			} else {
  13.658 -				SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
  13.659 -				SCpnt->result = (DID_ABORT << 16);
  13.660 -				spin_lock_irqsave(&io_request_lock, flags);
  13.661 -				scsi_done(SCpnt);
  13.662 -				spin_unlock_irqrestore(&io_request_lock, flags);
  13.663 -				rtn = 1;
  13.664 -			}
  13.665 -		} else {
  13.666 -			/*
  13.667 -			 * Before we queue this command, check if the command
  13.668 -			 * length exceeds what the host adapter can handle.
  13.669 -			 */
  13.670 +    }
  13.671 +    
  13.672 +    /*
  13.673 +     * We will use a queued command if possible, otherwise we will emulate the
  13.674 +     * queuing and calling of completion function ourselves.
  13.675 +     */
  13.676 +    SCSI_LOG_MLQUEUE(3, printk(
  13.677 +        "scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
  13.678 +        "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
  13.679 +        SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
  13.680 +        SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
  13.681 +    
  13.682 +    SCpnt->state = SCSI_STATE_QUEUED;
  13.683 +    SCpnt->owner = SCSI_OWNER_LOWLEVEL;
  13.684 +    if (host->can_queue) {
  13.685 +        SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
  13.686 +                                   host->hostt->queuecommand));
  13.687 +        /*
  13.688 +         * Use the old error handling code if we haven't converted the driver
  13.689 +         * to use the new one yet.  Note - only the new queuecommand variant
  13.690 +         * passes a meaningful return value.
  13.691 +         */
  13.692 +        if (host->hostt->use_new_eh_code) {
  13.693 +            /*
  13.694 +             * Before we queue this command, check if the command
  13.695 +             * length exceeds what the host adapter can handle.
  13.696 +             */
  13.697 +            if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
  13.698 +                spin_lock_irqsave(&io_request_lock, flags);
  13.699 +                rtn = host->hostt->queuecommand(SCpnt, scsi_done);
  13.700 +                spin_unlock_irqrestore(&io_request_lock, flags);
  13.701 +                if (rtn != 0) {
  13.702 +                    scsi_delete_timer(SCpnt);
  13.703 +                    scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
  13.704 +                    SCSI_LOG_MLQUEUE(3, printk(
  13.705 +                        "queuecommand : request rejected\n")); 
  13.706 +                }
  13.707 +            } else {
  13.708 +                SCSI_LOG_MLQUEUE(3, printk(
  13.709 +                    "queuecommand : command too long.\n"));
  13.710 +                SCpnt->result = (DID_ABORT << 16);
  13.711 +                spin_lock_irqsave(&io_request_lock, flags);
  13.712 +                scsi_done(SCpnt);
  13.713 +                spin_unlock_irqrestore(&io_request_lock, flags);
  13.714 +                rtn = 1;
  13.715 +            }
  13.716 +        } else {
  13.717 +            /*
  13.718 +             * Before we queue this command, check if the command
  13.719 +             * length exceeds what the host adapter can handle.
  13.720 +             */
  13.721  #if 0
  13.722 -                    if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
  13.723 -				spin_lock_irqsave(&io_request_lock, flags);
  13.724 -				host->hostt->queuecommand(SCpnt, scsi_old_done);
  13.725 -				spin_unlock_irqrestore(&io_request_lock, flags);
  13.726 -			} else {
  13.727 -				SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n"));
  13.728 -				SCpnt->result = (DID_ABORT << 16);
  13.729 -				spin_lock_irqsave(&io_request_lock, flags);
  13.730 -				scsi_old_done(SCpnt);
  13.731 -				spin_unlock_irqrestore(&io_request_lock, flags);
  13.732 -				rtn = 1;
  13.733 -			}
  13.734 -#endif
  13.735 -
  13.736 -		}
  13.737 -	} else {
  13.738 -		int temp;
  13.739 -
  13.740 -		SCSI_LOG_MLQUEUE(3, printk("command() :  routine at %p\n", host->hostt->command));
  13.741 +            if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
  13.742 +                spin_lock_irqsave(&io_request_lock, flags);
  13.743 +                host->hostt->queuecommand(SCpnt, scsi_old_done);
  13.744 +                spin_unlock_irqrestore(&io_request_lock, flags);
  13.745 +            } else {
  13.746 +                SCSI_LOG_MLQUEUE(3, printk(
  13.747 +                    "queuecommand : command too long.\n"));
  13.748 +                SCpnt->result = (DID_ABORT << 16);
  13.749                  spin_lock_irqsave(&io_request_lock, flags);
  13.750 -		temp = host->hostt->command(SCpnt);
  13.751 -		SCpnt->result = temp;
  13.752 -#ifdef DEBUG_DELAY
  13.753 +                scsi_old_done(SCpnt);
  13.754                  spin_unlock_irqrestore(&io_request_lock, flags);
  13.755 -		clock = jiffies + 4 * HZ;
  13.756 -		while (time_before(jiffies, clock)) {
  13.757 -			barrier();
  13.758 -			cpu_relax();
  13.759 -		}
  13.760 -		printk("done(host = %d, result = %04x) : routine at %p\n",
  13.761 -		       host->host_no, temp, host->hostt->command);
  13.762 -                spin_lock_irqsave(&io_request_lock, flags);
  13.763 +                rtn = 1;
  13.764 +            }
  13.765  #endif
  13.766 -		if (host->hostt->use_new_eh_code) {
  13.767 -			scsi_done(SCpnt);
  13.768 -		} else {
  13.769 -#if 0
  13.770 -			scsi_old_done(SCpnt);
  13.771 +            
  13.772 +        }
  13.773 +    } else {
  13.774 +        int temp;
  13.775 +        
  13.776 +        SCSI_LOG_MLQUEUE(3, printk(
  13.777 +            "command() :  routine at %p\n", host->hostt->command));
  13.778 +        spin_lock_irqsave(&io_request_lock, flags);
  13.779 +        temp = host->hostt->command(SCpnt);
  13.780 +        SCpnt->result = temp;
  13.781 +#ifdef DEBUG_DELAY
  13.782 +        spin_unlock_irqrestore(&io_request_lock, flags);
  13.783 +        clock = jiffies + 4 * HZ;
  13.784 +        while (time_before(jiffies, clock)) {
  13.785 +            barrier();
  13.786 +            cpu_relax();
  13.787 +        }
  13.788 +        printk("done(host = %d, result = %04x) : routine at %p\n",
  13.789 +               host->host_no, temp, host->hostt->command);
  13.790 +        spin_lock_irqsave(&io_request_lock, flags);
  13.791  #endif
  13.792 -		}
  13.793 -                spin_unlock_irqrestore(&io_request_lock, flags);
  13.794 -	}
  13.795 -	SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
  13.796 -	return rtn;
  13.797 +        if (host->hostt->use_new_eh_code) {
  13.798 +            scsi_done(SCpnt);
  13.799 +        } else {
  13.800 +#if 0
  13.801 +            scsi_old_done(SCpnt);
  13.802 +#endif
  13.803 +        }
  13.804 +        spin_unlock_irqrestore(&io_request_lock, flags);
  13.805 +    }
  13.806 +    SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
  13.807 +    return rtn;
  13.808  }
  13.809  
  13.810  #ifdef DEVFS_MUST_DIE
  13.811 @@ -806,6 +817,8 @@ void scsi_wait_req (Scsi_Request * SRpnt
  13.812  {
  13.813  #if 0
  13.814      DECLARE_COMPLETION(wait);
  13.815 +#else 
  13.816 +    int wait = 1; 
  13.817  #endif
  13.818  
  13.819  
  13.820 @@ -813,6 +826,8 @@ void scsi_wait_req (Scsi_Request * SRpnt
  13.821      
  13.822  #if 0
  13.823      SRpnt->sr_request.waiting = &wait;
  13.824 +#else 
  13.825 +    SRpnt->sr_request.waiting = (void *)&wait; 
  13.826  #endif
  13.827  
  13.828  
  13.829 @@ -824,28 +839,31 @@ void scsi_wait_req (Scsi_Request * SRpnt
  13.830  
  13.831  #if 0
  13.832      wait_for_completion(&wait);
  13.833 -#endif
  13.834 +    SRpnt->sr_request.waiting = NULL;
  13.835 +#else 
  13.836  
  13.837      /* XXX SMH: in 'standard' driver we think everythings ok here since
  13.838         we've waited on &wait -- hence we deallocate the command structure
  13.839         if it hasn't been done already. This is not the correct behaviour 
  13.840         in xen ... hmm .. how to fix? */
  13.841 -    mdelay(500); 
  13.842 +    int usecs = 0; 
  13.843 +//    printk("scsi_wait_req: about to poll-wait, request is at %p\n", 
  13.844 +//           SRpnt->sr_request); 
  13.845 +    while(*(int *)(SRpnt->sr_request.waiting)) {
  13.846 +        udelay(500); 
  13.847 +        usecs += 500; 
  13.848 +        if(usecs > 1000000) {
  13.849 +            printk("scsi_wait_req: giving up after 1 seconds!\n"); 
  13.850 +            *(int *)(SRpnt->sr_request.waiting) = 0; 
  13.851 +        } 
  13.852 +    }
  13.853 +#endif
  13.854  
  13.855  
  13.856 -    SRpnt->sr_request.waiting = NULL;
  13.857 -
  13.858      if( SRpnt->sr_command != NULL )
  13.859      {
  13.860 -#ifdef SMH_DEBUG
  13.861 -        printk("scsi_wait_req: releasing SRpnt->sr_command = %p\n", 
  13.862 -               SRpnt->sr_command); 
  13.863 -#endif
  13.864          scsi_release_command(SRpnt->sr_command);
  13.865          SRpnt->sr_command = NULL;
  13.866 -#ifdef SMHHACK 
  13.867 -        SRpnt->freeaddr = 0x99991234; 
  13.868 -#endif
  13.869      }
  13.870      
  13.871  }
  13.872 @@ -880,87 +898,77 @@ void scsi_do_req(Scsi_Request * SRpnt, c
  13.873  	      void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
  13.874  		 int timeout, int retries)
  13.875  {
  13.876 -	Scsi_Device * SDpnt = SRpnt->sr_device;
  13.877 -	struct Scsi_Host *host = SDpnt->host;
  13.878 -
  13.879 -	ASSERT_LOCK(&io_request_lock, 0);
  13.880 -
  13.881 -	SCSI_LOG_MLQUEUE(4,
  13.882 -			 {
  13.883 -			 int i;
  13.884 -			 int target = SDpnt->id;
  13.885 -			 int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
  13.886 -			 printk("scsi_do_req (host = %d, channel = %d target = %d, "
  13.887 -		    "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
  13.888 -				"retries = %d)\n"
  13.889 -				"command : ", host->host_no, SDpnt->channel, target, buffer,
  13.890 -				bufflen, done, timeout, retries);
  13.891 -			 for (i	 = 0; i < size; ++i)
  13.892 -			 	printk("%02x  ", ((unsigned char *) cmnd)[i]);
  13.893 -			 	printk("\n");
  13.894 -			 });
  13.895 -
  13.896 -	if (!host) {
  13.897 -		panic("Invalid or not present host.\n");
  13.898 -	}
  13.899 -
  13.900 -	/*
  13.901 -	 * If the upper level driver is reusing these things, then
  13.902 -	 * we should release the low-level block now.  Another one will
  13.903 -	 * be allocated later when this request is getting queued.
  13.904 -	 */
  13.905 -	if( SRpnt->sr_command != NULL )
  13.906 -	{ 
  13.907 -#ifdef SMH_DEBUG
  13.908 -           printk("scsi_do_req: releasing SRpnt->sr_command = %p\n", 
  13.909 -                   SRpnt->sr_command); 
  13.910 -#endif
  13.911 -		scsi_release_command(SRpnt->sr_command);
  13.912 -		SRpnt->sr_command = NULL;
  13.913 -#ifdef SMHHACK
  13.914 -                SRpnt->freeaddr = 0xabbadead;
  13.915 -#endif
  13.916 -	}
  13.917 -
  13.918 -	/*
  13.919 -	 * We must prevent reentrancy to the lowlevel host driver.
  13.920 -	 * This prevents it - we enter a loop until the host we want
  13.921 -	 * to talk to is not busy.  Race conditions are prevented, as
  13.922 -	 * interrupts are disabled in between the time we check for
  13.923 -	 * the host being not busy, and the time we mark it busy
  13.924 -	 * ourselves.  */
  13.925 -
  13.926 -
  13.927 -	/*
  13.928 -	 * Our own function scsi_done (which marks the host as not
  13.929 -	 * busy, disables the timeout counter, etc) will be called by
  13.930 -	 * us or by the scsi_hosts[host].queuecommand() function needs
  13.931 -	 * to also call the completion function for the high level
  13.932 -	 * driver.  */
  13.933 -
  13.934 -	memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd, 
  13.935 -	       sizeof(SRpnt->sr_cmnd));
  13.936 -#ifdef SMHHACK
  13.937 -        SRpnt->freeaddr = 0x1111; 
  13.938 -#endif
  13.939 -
  13.940 -	SRpnt->sr_bufflen = bufflen;
  13.941 -	SRpnt->sr_buffer = buffer;
  13.942 -	SRpnt->sr_allowed = retries;
  13.943 -	SRpnt->sr_done = done;
  13.944 -	SRpnt->sr_timeout_per_command = timeout;
  13.945 -
  13.946 -	if (SRpnt->sr_cmd_len == 0)
  13.947 -		SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
  13.948 -
  13.949 -	/*
  13.950 -	 * At this point, we merely set up the command, stick it in the normal
  13.951 -	 * request queue, and return.  Eventually that request will come to the
  13.952 -	 * top of the list, and will be dispatched.
  13.953 -	 */
  13.954 -	scsi_insert_special_req(SRpnt, 0);
  13.955 -
  13.956 -	SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
  13.957 +    Scsi_Device * SDpnt = SRpnt->sr_device;
  13.958 +    struct Scsi_Host *host = SDpnt->host;
  13.959 +    
  13.960 +    ASSERT_LOCK(&io_request_lock, 0);
  13.961 +    
  13.962 +    SCSI_LOG_MLQUEUE(4,
  13.963 +    {
  13.964 +        int i;
  13.965 +        int target = SDpnt->id;
  13.966 +        int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
  13.967 +        printk("scsi_do_req (host = %d, channel = %d target = %d, "
  13.968 +               "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
  13.969 +               "retries = %d)\n"
  13.970 +               "command : ", host->host_no, SDpnt->channel, target, buffer,
  13.971 +               bufflen, done, timeout, retries);
  13.972 +        for (i	 = 0; i < size; ++i)
  13.973 +            printk("%02x  ", ((unsigned char *) cmnd)[i]);
  13.974 +        printk("\n");
  13.975 +    });
  13.976 +    
  13.977 +    if (!host) {
  13.978 +        panic("Invalid or not present host.\n");
  13.979 +    }
  13.980 +    
  13.981 +    /*
  13.982 +     * If the upper level driver is reusing these things, then
  13.983 +     * we should release the low-level block now.  Another one will
  13.984 +     * be allocated later when this request is getting queued.
  13.985 +     */
  13.986 +    if( SRpnt->sr_command != NULL )
  13.987 +    { 
  13.988 +        scsi_release_command(SRpnt->sr_command);
  13.989 +        SRpnt->sr_command = NULL;
  13.990 +    }
  13.991 +    
  13.992 +    /*
  13.993 +     * We must prevent reentrancy to the lowlevel host driver.
  13.994 +     * This prevents it - we enter a loop until the host we want
  13.995 +     * to talk to is not busy.  Race conditions are prevented, as
  13.996 +     * interrupts are disabled in between the time we check for
  13.997 +     * the host being not busy, and the time we mark it busy
  13.998 +     * ourselves.  */
  13.999 +    
 13.1000 +    
 13.1001 +    /*
 13.1002 +     * Our own function scsi_done (which marks the host as not
 13.1003 +     * busy, disables the timeout counter, etc) will be called by
 13.1004 +     * us or by the scsi_hosts[host].queuecommand() function needs
 13.1005 +     * to also call the completion function for the high level
 13.1006 +     * driver.  */
 13.1007 +    
 13.1008 +    memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd, 
 13.1009 +           sizeof(SRpnt->sr_cmnd));
 13.1010 +    
 13.1011 +    SRpnt->sr_bufflen = bufflen;
 13.1012 +    SRpnt->sr_buffer = buffer;
 13.1013 +    SRpnt->sr_allowed = retries;
 13.1014 +    SRpnt->sr_done = done;
 13.1015 +    SRpnt->sr_timeout_per_command = timeout;
 13.1016 +    
 13.1017 +    if (SRpnt->sr_cmd_len == 0)
 13.1018 +        SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
 13.1019 +    
 13.1020 +    /*
 13.1021 +     * At this point, we merely set up the command, stick it in the normal
 13.1022 +     * request queue, and return.  Eventually that request will come to the
 13.1023 +     * top of the list, and will be dispatched.
 13.1024 +     */
 13.1025 +    scsi_insert_special_req(SRpnt, 0);
 13.1026 +    
 13.1027 +    SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
 13.1028  }
 13.1029   
 13.1030  /*
 13.1031 @@ -995,10 +1003,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * 
 13.1032  
 13.1033  	SCpnt->owner = SCSI_OWNER_MIDLEVEL;
 13.1034  	SRpnt->sr_command = SCpnt;
 13.1035 -#ifdef SMH_DEBUG
 13.1036 -        printk("scsi_init_cmd_from_req: SRpnt = %p, SRpnt->sr_command = %p\n", 
 13.1037 -               SRpnt, SRpnt->sr_command); 
 13.1038 -#endif        
 13.1039  
 13.1040  	if (!host) {
 13.1041  		panic("Invalid or not present host.\n");
 13.1042 @@ -1381,10 +1385,10 @@ void scsi_bottom_half_handler(void)
 13.1043                       * see if this was the last command that might
 13.1044                       * have failed.  If so, wake up the error handler.  */
 13.1045                      if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
 13.1046 +#if 0
 13.1047                          SCSI_LOG_ERROR_RECOVERY(5, printk(
 13.1048                              "Waking error handler thread (%d)\n",
 13.1049                              atomic_read(&SCpnt->host->eh_wait->count)));
 13.1050 -#if 0
 13.1051                          up(SCpnt->host->eh_wait);
 13.1052  #endif
 13.1053                      }
 13.1054 @@ -2696,8 +2700,6 @@ int __init scsi_setup(char *str)
 13.1055  __setup("scsihosts=", scsi_setup);
 13.1056  #endif
 13.1057  
 13.1058 -static spinlock_t slock2 = SPIN_LOCK_UNLOCKED; 
 13.1059 -
 13.1060  static int __init init_scsi(void)
 13.1061  {
 13.1062  #ifdef CONFIG_PROC_FS
 13.1063 @@ -2706,14 +2708,6 @@ static int __init init_scsi(void)
 13.1064  
 13.1065  	printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
 13.1066  
 13.1067 -        {
 13.1068 -            unsigned long flags; 
 13.1069 -            
 13.1070 -            spin_lock_irqsave(&slock2, flags); 
 13.1071 -            spin_unlock_irqrestore(&slock2, flags); 
 13.1072 -            printk("SCSI start of day -- flags = %lx\n", flags); 
 13.1073 -        }
 13.1074 -
 13.1075          if( scsi_init_minimal_dma_pool() != 0 )
 13.1076          {
 13.1077                  return 1;
 13.1078 @@ -2749,17 +2743,10 @@ static int __init init_scsi(void)
 13.1079  	 */
 13.1080  	init_bh(SCSI_BH, scsi_bottom_half_handler);
 13.1081  
 13.1082 -        {
 13.1083 -            unsigned long flags; 
 13.1084 -            
 13.1085 -            spin_lock_irqsave(&slock2, flags); 
 13.1086 -            spin_unlock_irqrestore(&slock2, flags); 
 13.1087 -            printk("SCSI end of day -- flags = %lx\n", flags); 
 13.1088 -        }
 13.1089 +	return 0;
 13.1090 +}
 13.1091  
 13.1092  
 13.1093 -	return 0;
 13.1094 -}
 13.1095  
 13.1096  static void __exit exit_scsi(void)
 13.1097  {
 13.1098 @@ -2973,6 +2960,7 @@ scsi_reset_provider(Scsi_Device *dev, in
 13.1099  		rtn = scsi_old_reset(SCpnt, flag);
 13.1100  		spin_unlock_irqrestore(&io_request_lock, flags);
 13.1101  #endif
 13.1102 +                rtn= 0; 
 13.1103  	}
 13.1104  
 13.1105  	scsi_delete_timer(SCpnt);
    14.1 --- a/xen/drivers/scsi/scsi.h	Thu Mar 06 15:52:22 2003 +0000
    14.2 +++ b/xen/drivers/scsi/scsi.h	Thu Mar 06 16:54:15 2003 +0000
    14.3 @@ -633,18 +633,18 @@ struct scsi_device {
    14.4   * with low level drivers that support multiple outstanding commands.
    14.5   */
    14.6  typedef struct scsi_pointer {
    14.7 -	char *ptr;		/* data pointer */
    14.8 -	int this_residual;	/* left in this buffer */
    14.9 -	struct scatterlist *buffer;	/* which buffer */
   14.10 -	int buffers_residual;	/* how many buffers left */
   14.11 -
   14.12 -        dma_addr_t dma_handle;
   14.13 -
   14.14 -	volatile int Status;
   14.15 -	volatile int Message;
   14.16 -	volatile int have_data_in;
   14.17 -	volatile int sent_command;
   14.18 -	volatile int phase;
   14.19 +    char *ptr;		/* data pointer */
   14.20 +    int this_residual;	/* left in this buffer */
   14.21 +    struct scatterlist *buffer;	/* which buffer */
   14.22 +    int buffers_residual;	/* how many buffers left */
   14.23 +    
   14.24 +    dma_addr_t dma_handle;
   14.25 +    
   14.26 +    volatile int Status;
   14.27 +    volatile int Message;
   14.28 +    volatile int have_data_in;
   14.29 +    volatile int sent_command;
   14.30 +    volatile int phase;
   14.31  } Scsi_Pointer;
   14.32  
   14.33  /*
   14.34 @@ -685,146 +685,149 @@ struct scsi_request {
   14.35  };
   14.36  
   14.37  /*
   14.38 - * FIXME(eric) - one of the great regrets that I have is that I failed to define
   14.39 - * these structure elements as something like sc_foo instead of foo.  This would
   14.40 - * make it so much easier to grep through sources and so forth.  I propose that
   14.41 - * all new elements that get added to these structures follow this convention.
   14.42 - * As time goes on and as people have the stomach for it, it should be possible to 
   14.43 - * go back and retrofit at least some of the elements here with with the prefix.
   14.44 - */
   14.45 + * FIXME(eric) - one of the great regrets that I have is that I failed
   14.46 + * to define these structure elements as something like sc_foo instead
   14.47 + * of foo.  This would make it so much easier to grep through sources
   14.48 + * and so forth.  I propose that all new elements that get added to
   14.49 + * these structures follow this convention.  As time goes on and as
   14.50 + * people have the stomach for it, it should be possible to go back
   14.51 + * and retrofit at least some of the elements here with with the
   14.52 + * prefix.  
   14.53 +*/
   14.54 +
   14.55  struct scsi_cmnd {
   14.56 -	int     sc_magic;
   14.57 +    int     sc_magic;
   14.58  /* private: */
   14.59 -	/*
   14.60 -	 * This information is private to the scsi mid-layer.  Wrapping it in a
   14.61 -	 * struct private is a way of marking it in a sort of C++ type of way.
   14.62 -	 */
   14.63 -	struct Scsi_Host *host;
   14.64 -	unsigned short state;
   14.65 -	unsigned short owner;
   14.66 -	Scsi_Device *device;
   14.67 -	Scsi_Request *sc_request;
   14.68 -	struct scsi_cmnd *next;
   14.69 -	struct scsi_cmnd *reset_chain;
   14.70 -
   14.71 -	int eh_state;		/* Used for state tracking in error handlr */
   14.72 -	void (*done) (struct scsi_cmnd *);	/* Mid-level done function */
   14.73 -	/*
   14.74 -	   A SCSI Command is assigned a nonzero serial_number when internal_cmnd
   14.75 -	   passes it to the driver's queue command function.  The serial_number
   14.76 -	   is cleared when scsi_done is entered indicating that the command has
   14.77 -	   been completed.  If a timeout occurs, the serial number at the moment
   14.78 -	   of timeout is copied into serial_number_at_timeout.  By subsequently
   14.79 -	   comparing the serial_number and serial_number_at_timeout fields
   14.80 -	   during abort or reset processing, we can detect whether the command
   14.81 -	   has already completed.  This also detects cases where the command has
   14.82 -	   completed and the SCSI Command structure has already being reused
   14.83 -	   for another command, so that we can avoid incorrectly aborting or
   14.84 -	   resetting the new command.
   14.85 -	 */
   14.86 -
   14.87 -	unsigned long serial_number;
   14.88 -	unsigned long serial_number_at_timeout;
   14.89 +    /*
   14.90 +     * This information is private to the scsi mid-layer.  Wrapping it in a
   14.91 +     * struct private is a way of marking it in a sort of C++ type of way.
   14.92 +     */
   14.93 +    struct Scsi_Host *host;
   14.94 +    unsigned short state;
   14.95 +    unsigned short owner;
   14.96 +    Scsi_Device *device;
   14.97 +    Scsi_Request *sc_request;
   14.98 +    struct scsi_cmnd *next;
   14.99 +    struct scsi_cmnd *reset_chain;
  14.100 +    
  14.101 +    int eh_state;		/* Used for state tracking in error handlr */
  14.102 +    void (*done) (struct scsi_cmnd *);	/* Mid-level done function */
  14.103 +    /*
  14.104 +      A SCSI Command is assigned a nonzero serial_number when internal_cmnd
  14.105 +      passes it to the driver's queue command function.  The serial_number
  14.106 +      is cleared when scsi_done is entered indicating that the command has
  14.107 +      been completed.  If a timeout occurs, the serial number at the moment
  14.108 +      of timeout is copied into serial_number_at_timeout.  By subsequently
  14.109 +      comparing the serial_number and serial_number_at_timeout fields
  14.110 +      during abort or reset processing, we can detect whether the command
  14.111 +      has already completed.  This also detects cases where the command has
  14.112 +      completed and the SCSI Command structure has already being reused
  14.113 +      for another command, so that we can avoid incorrectly aborting or
  14.114 +      resetting the new command.
  14.115 +    */
  14.116 +    
  14.117 +    unsigned long serial_number;
  14.118 +    unsigned long serial_number_at_timeout;
  14.119  
  14.120 -	int retries;
  14.121 -	int allowed;
  14.122 -	int timeout_per_command;
  14.123 -	int timeout_total;
  14.124 -	int timeout;
  14.125 -
  14.126 -	/*
  14.127 -	 * We handle the timeout differently if it happens when a reset, 
  14.128 -	 * abort, etc are in process. 
  14.129 -	 */
  14.130 -	unsigned volatile char internal_timeout;
  14.131 -	struct scsi_cmnd *bh_next;	/* To enumerate the commands waiting 
  14.132 -					   to be processed. */
  14.133 -
  14.134 +    int retries;
  14.135 +    int allowed;
  14.136 +    int timeout_per_command;
  14.137 +    int timeout_total;
  14.138 +    int timeout;
  14.139 +    
  14.140 +    /*
  14.141 +     * We handle the timeout differently if it happens when a reset, 
  14.142 +     * abort, etc are in process. 
  14.143 +     */
  14.144 +    unsigned volatile char internal_timeout;
  14.145 +    struct scsi_cmnd *bh_next;	/* To enumerate the commands waiting 
  14.146 +                                   to be processed. */
  14.147 +    
  14.148  /* public: */
  14.149 -
  14.150 -	unsigned int target;
  14.151 -	unsigned int lun;
  14.152 -	unsigned int channel;
  14.153 -	unsigned char cmd_len;
  14.154 -	unsigned char old_cmd_len;
  14.155 -	unsigned char sc_data_direction;
  14.156 -	unsigned char sc_old_data_direction;
  14.157 -
  14.158 -	/* These elements define the operation we are about to perform */
  14.159 -	unsigned char cmnd[MAX_COMMAND_SIZE];
  14.160 -	unsigned request_bufflen;	/* Actual request size */
  14.161 -
  14.162 -	struct timer_list eh_timeout;	/* Used to time out the command. */
  14.163 -	void *request_buffer;		/* Actual requested buffer */
  14.164 -        void **bounce_buffers;		/* Array of bounce buffers when using scatter-gather */
  14.165 -
  14.166 -	/* These elements define the operation we ultimately want to perform */
  14.167 -	unsigned char data_cmnd[MAX_COMMAND_SIZE];
  14.168 -	unsigned short old_use_sg;	/* We save  use_sg here when requesting
  14.169 -					 * sense info */
  14.170 -	unsigned short use_sg;	/* Number of pieces of scatter-gather */
  14.171 -	unsigned short sglist_len;	/* size of malloc'd scatter-gather list */
  14.172 -	unsigned short abort_reason;	/* If the mid-level code requests an
  14.173 +    
  14.174 +    unsigned int target;
  14.175 +    unsigned int lun;
  14.176 +    unsigned int channel;
  14.177 +    unsigned char cmd_len;
  14.178 +    unsigned char old_cmd_len;
  14.179 +    unsigned char sc_data_direction;
  14.180 +    unsigned char sc_old_data_direction;
  14.181 +    
  14.182 +    /* These elements define the operation we are about to perform */
  14.183 +    unsigned char cmnd[MAX_COMMAND_SIZE];
  14.184 +    unsigned request_bufflen;	/* Actual request size */
  14.185 +    
  14.186 +    struct timer_list eh_timeout; /* Used to time out the command. */
  14.187 +    void *request_buffer;  /* Actual requested buffer */
  14.188 +    void **bounce_buffers; /* Array of bounce buffers when 
  14.189 +                              using scatter-gather */
  14.190 +    
  14.191 +    /* These elements define the operation we ultimately want to perform */
  14.192 +    unsigned char data_cmnd[MAX_COMMAND_SIZE];
  14.193 +    unsigned short old_use_sg;	/* We save  use_sg here when requesting
  14.194 +                                 * sense info */
  14.195 +    unsigned short use_sg;	/* Number of pieces of scatter-gather */
  14.196 +    unsigned short sglist_len;	/* size of malloc'd scatter-gather list */
  14.197 +    unsigned short abort_reason;	/* If the mid-level code requests an
  14.198  					 * abort, this is the reason. */
  14.199 -	unsigned bufflen;	/* Size of data buffer */
  14.200 -	void *buffer;		/* Data buffer */
  14.201 -
  14.202 -	unsigned underflow;	/* Return error if less than
  14.203 -				   this amount is transferred */
  14.204 -	unsigned old_underflow;	/* save underflow here when reusing the
  14.205 +    unsigned bufflen;	/* Size of data buffer */
  14.206 +    void *buffer;		/* Data buffer */
  14.207 +    
  14.208 +    unsigned underflow;	/* Return error if less than
  14.209 +                           this amount is transferred */
  14.210 +    unsigned old_underflow;	/* save underflow here when reusing the
  14.211  				 * command for error handling */
  14.212 -
  14.213 -	unsigned transfersize;	/* How much we are guaranteed to
  14.214 +    
  14.215 +    unsigned transfersize;	/* How much we are guaranteed to
  14.216  				   transfer with each SCSI transfer
  14.217  				   (ie, between disconnect / 
  14.218  				   reconnects.   Probably == sector
  14.219  				   size */
  14.220 -
  14.221 -	int resid;		/* Number of bytes requested to be
  14.222 -				   transferred less actual number
  14.223 -				   transferred (0 if not supported) */
  14.224 -
  14.225 -	struct request request;	/* A copy of the command we are
  14.226 +    
  14.227 +    int resid;		/* Number of bytes requested to be
  14.228 +                           transferred less actual number
  14.229 +                           transferred (0 if not supported) */
  14.230 +    
  14.231 +    struct request request;	/* A copy of the command we are
  14.232  				   working on */
  14.233 -
  14.234 -	unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];		/* obtained by REQUEST SENSE
  14.235 -						 * when CHECK CONDITION is
  14.236 -						 * received on original command 
  14.237 -						 * (auto-sense) */
  14.238 -
  14.239 -	unsigned flags;
  14.240 -
  14.241 -	/*
  14.242 -	 * Used to indicate that a command which has timed out also
  14.243 -	 * completed normally.  Typically the completion function will
  14.244 -	 * do nothing but set this flag in this instance because the
  14.245 -	 * timeout handler is already running.
  14.246 -	 */
  14.247 -	unsigned done_late:1;
  14.248 +    
  14.249 +    unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];		
  14.250 +    /* obtained by REQUEST SENSE when CHECK CONDITION is
  14.251 +     * received on original command (auto-sense) */
  14.252  
  14.253 -	/* Low-level done function - can be used by low-level driver to point
  14.254 -	 *        to completion function.  Not used by mid/upper level code. */
  14.255 -	void (*scsi_done) (struct scsi_cmnd *);
  14.256 -
  14.257 -	/*
  14.258 -	 * The following fields can be written to by the host specific code. 
  14.259 -	 * Everything else should be left alone. 
  14.260 -	 */
  14.261 -
  14.262 -	Scsi_Pointer SCp;	/* Scratchpad used by some host adapters */
  14.263 +    unsigned flags;
  14.264 +    
  14.265 +    /*
  14.266 +     * Used to indicate that a command which has timed out also
  14.267 +     * completed normally.  Typically the completion function will
  14.268 +     * do nothing but set this flag in this instance because the
  14.269 +     * timeout handler is already running.
  14.270 +     */
  14.271 +    unsigned done_late:1;
  14.272 +    
  14.273 +    /* Low-level done function - can be used by low-level driver to point
  14.274 +     *        to completion function.  Not used by mid/upper level code. */
  14.275 +    void (*scsi_done) (struct scsi_cmnd *);
  14.276 +    
  14.277 +    /*
  14.278 +     * The following fields can be written to by the host specific code. 
  14.279 +     * Everything else should be left alone. 
  14.280 +     */
  14.281 +    
  14.282 +    Scsi_Pointer SCp;	/* Scratchpad used by some host adapters */
  14.283 +    
  14.284 +    unsigned char *host_scribble;	
  14.285  
  14.286 -	unsigned char *host_scribble;	/* The host adapter is allowed to
  14.287 -					   * call scsi_malloc and get some memory
  14.288 -					   * and hang it here.     The host adapter
  14.289 -					   * is also expected to call scsi_free
  14.290 -					   * to release this memory.  (The memory
  14.291 -					   * obtained by scsi_malloc is guaranteed
  14.292 -					   * to be at an address < 16Mb). */
  14.293 +    /* The host adapter is allowed to call scsi_malloc and get some
  14.294 +     * memory and hang it here.  The host adapter is also expected to
  14.295 +     * call scsi_free to release this memory.  (The memory obtained
  14.296 +     * by scsi_malloc is guaranteed to be at an address < 16Mb). */
  14.297  
  14.298 -	int result;		/* Status code from lower level driver */
  14.299 -
  14.300 -	unsigned char tag;	/* SCSI-II queued command tag */
  14.301 -	unsigned long pid;	/* Process ID, starts at 0 */
  14.302 +    
  14.303 +    int result;		/* Status code from lower level driver */
  14.304 +    
  14.305 +    unsigned char tag;	/* SCSI-II queued command tag */
  14.306 +    unsigned long pid;	/* Process ID, starts at 0 */
  14.307  };
  14.308  
  14.309  /*
    15.1 --- a/xen/drivers/scsi/scsi_error.c	Thu Mar 06 15:52:22 2003 +0000
    15.2 +++ b/xen/drivers/scsi/scsi_error.c	Thu Mar 06 16:54:15 2003 +0000
    15.3 @@ -1914,9 +1914,9 @@ void scsi_error_handler(void *data)
    15.4  	/*
    15.5  	 * Wake up the thread that created us.
    15.6  	 */
    15.7 +#if 0
    15.8  	SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent %d\n", host->eh_notify->count.counter));
    15.9  
   15.10 -#if 0
   15.11  	up(host->eh_notify);
   15.12  #endif
   15.13  
    16.1 --- a/xen/drivers/scsi/scsi_lib.c	Thu Mar 06 15:52:22 2003 +0000
    16.2 +++ b/xen/drivers/scsi/scsi_lib.c	Thu Mar 06 16:54:15 2003 +0000
    16.3 @@ -73,7 +73,7 @@ static void __scsi_insert_special(reques
    16.4      unsigned long flags;
    16.5      
    16.6      ASSERT_LOCK(&io_request_lock, 0);
    16.7 -    
    16.8 +
    16.9      rq->cmd = SPECIAL;
   16.10      rq->special = data;
   16.11      rq->q = NULL;
   16.12 @@ -362,92 +362,98 @@ static Scsi_Cmnd *__scsi_end_request(Scs
   16.13  				     int requeue,
   16.14  				     int frequeue)
   16.15  {
   16.16 -	struct request *req;
   16.17 -	struct buffer_head *bh;
   16.18 -        Scsi_Device * SDpnt;
   16.19 -	int nsect;
   16.20 -
   16.21 -	ASSERT_LOCK(&io_request_lock, 0);
   16.22 -
   16.23 -	req = &SCpnt->request;
   16.24 -	req->errors = 0;
   16.25 -	if (!uptodate) {
   16.26 -		printk(" I/O error: dev %s, sector %lu\n",
   16.27 -		       kdevname(req->rq_dev), req->sector);
   16.28 +    struct request *req;
   16.29 +    struct buffer_head *bh;
   16.30 +    Scsi_Device * SDpnt;
   16.31 +    int nsect;
   16.32 +    
   16.33 +    ASSERT_LOCK(&io_request_lock, 0);
   16.34 +    
   16.35 +    req = &SCpnt->request;
   16.36 +    req->errors = 0;
   16.37 +    if (!uptodate) {
   16.38 +	printk(" I/O error: dev %s, sector %lu\n",
   16.39 +	       kdevname(req->rq_dev), req->sector);
   16.40 +    }
   16.41 +    do {
   16.42 +	if ((bh = req->bh) != NULL) {
   16.43 +	    nsect = bh->b_size >> 9;
   16.44 +	    blk_finished_io(nsect);
   16.45 +	    req->bh = bh->b_reqnext;
   16.46 +	    bh->b_reqnext = NULL;
   16.47 +	    sectors -= nsect;
   16.48 +	    bh->b_end_io(bh, uptodate);
   16.49 +	    if ((bh = req->bh) != NULL) {
   16.50 +		req->hard_sector += nsect;
   16.51 +		req->hard_nr_sectors -= nsect;
   16.52 +		req->sector += nsect;
   16.53 +		req->nr_sectors -= nsect;
   16.54 +		
   16.55 +		req->current_nr_sectors = bh->b_size >> 9;
   16.56 +		if (req->nr_sectors < req->current_nr_sectors) {
   16.57 +		    req->nr_sectors = req->current_nr_sectors;
   16.58 +		    printk("scsi_end_request: buffer-list destroyed\n");
   16.59 +		}
   16.60 +	    }
   16.61  	}
   16.62 -	do {
   16.63 -		if ((bh = req->bh) != NULL) {
   16.64 -			nsect = bh->b_size >> 9;
   16.65 -			blk_finished_io(nsect);
   16.66 -			req->bh = bh->b_reqnext;
   16.67 -			bh->b_reqnext = NULL;
   16.68 -			sectors -= nsect;
   16.69 -			bh->b_end_io(bh, uptodate);
   16.70 -			if ((bh = req->bh) != NULL) {
   16.71 -				req->hard_sector += nsect;
   16.72 -				req->hard_nr_sectors -= nsect;
   16.73 -				req->sector += nsect;
   16.74 -				req->nr_sectors -= nsect;
   16.75 -
   16.76 -				req->current_nr_sectors = bh->b_size >> 9;
   16.77 -				if (req->nr_sectors < req->current_nr_sectors) {
   16.78 -					req->nr_sectors = req->current_nr_sectors;
   16.79 -					printk("scsi_end_request: buffer-list destroyed\n");
   16.80 -				}
   16.81 -			}
   16.82 -		}
   16.83 -	} while (sectors && bh);
   16.84 -
   16.85 +    } while (sectors && bh);
   16.86 +    
   16.87 +    /*
   16.88 +     * If there are blocks left over at the end, set up the command
   16.89 +     * to queue the remainder of them.
   16.90 +     */
   16.91 +    if (req->bh) {
   16.92 +	request_queue_t *q;
   16.93 +	
   16.94 +	if( !requeue )
   16.95 +	{
   16.96 +	    return SCpnt;
   16.97 +	}
   16.98 +	
   16.99 +	q = &SCpnt->device->request_queue;
  16.100 +	
  16.101 +	req->buffer = bh->b_data;
  16.102  	/*
  16.103 -	 * If there are blocks left over at the end, set up the command
  16.104 -	 * to queue the remainder of them.
  16.105 +	 * Bleah.  Leftovers again.  Stick the leftovers in
  16.106 +	 * the front of the queue, and goose the queue again.
  16.107  	 */
  16.108 -	if (req->bh) {
  16.109 -                request_queue_t *q;
  16.110 -
  16.111 -		if( !requeue )
  16.112 -		{
  16.113 -			return SCpnt;
  16.114 -		}
  16.115 -
  16.116 -                q = &SCpnt->device->request_queue;
  16.117 -
  16.118 -		req->buffer = bh->b_data;
  16.119 -		/*
  16.120 -		 * Bleah.  Leftovers again.  Stick the leftovers in
  16.121 -		 * the front of the queue, and goose the queue again.
  16.122 -		 */
  16.123 -		scsi_queue_next_request(q, SCpnt);
  16.124 -		return SCpnt;
  16.125 -	}
  16.126 +	scsi_queue_next_request(q, SCpnt);
  16.127 +	return SCpnt;
  16.128 +    }
  16.129  #if 0
  16.130 -	/*
  16.131 -	 * This request is done.  If there is someone blocked waiting for this
  16.132 -	 * request, wake them up.  Typically used to wake up processes trying
  16.133 -	 * to swap a page into memory.
  16.134 -	 */
  16.135 -	if (req->waiting != NULL) {
  16.136 -		complete(req->waiting);
  16.137 -	}
  16.138 +    /*
  16.139 +     * This request is done.  If there is someone blocked waiting for this
  16.140 +     * request, wake them up.  Typically used to wake up processes trying
  16.141 +     * to swap a page into memory.
  16.142 +     */
  16.143 +    if (req->waiting != NULL) {
  16.144 +	complete(req->waiting);
  16.145 +    }
  16.146 +#else 
  16.147 +    /* XXX SMH: we're done -- flip the flag for the spinner :-| */
  16.148 +    if(req->waiting && (*(int *)(req->waiting) != NULL)) {
  16.149 +		printk("__scsi_end_request: flipping wait status on req %p\n", req); 
  16.150 +		*(int *)(req->waiting) = NULL; 
  16.151 +    } // else printk("__scsi_end_request: no-one to notify!!\n"); 
  16.152  #endif
  16.153 -	req_finished_io(req);
  16.154 -	add_blkdev_randomness(MAJOR(req->rq_dev));
  16.155 -
  16.156 -        SDpnt = SCpnt->device;
  16.157 -
  16.158 -	/*
  16.159 -	 * This will goose the queue request function at the end, so we don't
  16.160 -	 * need to worry about launching another command.
  16.161 -	 */
  16.162 -	__scsi_release_command(SCpnt);
  16.163 -
  16.164 -	if( frequeue ) {
  16.165 -		request_queue_t *q;
  16.166 -
  16.167 -		q = &SDpnt->request_queue;
  16.168 -		scsi_queue_next_request(q, NULL);                
  16.169 -	}
  16.170 -	return NULL;
  16.171 +    req_finished_io(req);
  16.172 +    add_blkdev_randomness(MAJOR(req->rq_dev));
  16.173 +    
  16.174 +    SDpnt = SCpnt->device;
  16.175 +    
  16.176 +    /*
  16.177 +     * This will goose the queue request function at the end, so we don't
  16.178 +     * need to worry about launching another command.
  16.179 +     */
  16.180 +    __scsi_release_command(SCpnt);
  16.181 +    
  16.182 +    if( frequeue ) {
  16.183 +	request_queue_t *q;
  16.184 +	
  16.185 +	q = &SDpnt->request_queue;
  16.186 +	scsi_queue_next_request(q, NULL);                
  16.187 +    }
  16.188 +    return NULL;
  16.189  }
  16.190  
  16.191  /*
  16.192 @@ -554,6 +560,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpn
  16.193  	int this_count = SCpnt->bufflen >> 9;
  16.194  	request_queue_t *q = &SCpnt->device->request_queue;
  16.195  
  16.196 +	// printk("scsi_io_completion entered.\n"); 
  16.197  	/*
  16.198  	 * We must do one of several things here:
  16.199  	 *
  16.200 @@ -1053,6 +1060,7 @@ void scsi_request_fn(request_queue_t * q
  16.201  			 * get those allocated here.  
  16.202  			 */
  16.203  			if (!SDpnt->scsi_init_io_fn(SCpnt)) {
  16.204 +				printk("scsi_request_fn: scsi_init_io_fn failed :-(\n"); 
  16.205  				SCpnt = __scsi_end_request(SCpnt, 0, 
  16.206  							   SCpnt->request.nr_sectors, 0, 0);
  16.207  				if( SCpnt != NULL )
  16.208 @@ -1068,6 +1076,7 @@ void scsi_request_fn(request_queue_t * q
  16.209  			 * Initialize the actual SCSI command for this request.
  16.210  			 */
  16.211  			if (!STpnt->init_command(SCpnt)) {
  16.212 +				printk("scsi_request_fn: init_command failed :-(\n"); 
  16.213  				scsi_release_buffers(SCpnt);
  16.214  				SCpnt = __scsi_end_request(SCpnt, 0, 
  16.215  							   SCpnt->request.nr_sectors, 0, 0);
    17.1 --- a/xen/drivers/scsi/scsi_merge.c	Thu Mar 06 15:52:22 2003 +0000
    17.2 +++ b/xen/drivers/scsi/scsi_merge.c	Thu Mar 06 16:54:15 2003 +0000
    17.3 @@ -802,6 +802,7 @@ MERGEREQFCT(scsi_merge_requests_fn_dc, 1
    17.4  	int		     this_count;
    17.5  	void		   ** bbpnt;
    17.6  
    17.7 +	// printk("scsi_merge.c: __init_io entered\n"); 
    17.8  	/*
    17.9  	 * FIXME(eric) - don't inline this - it doesn't depend on the
   17.10  	 * integer flags.   Come to think of it, I don't think this is even
    18.1 --- a/xen/drivers/scsi/sd.c	Thu Mar 06 15:52:22 2003 +0000
    18.2 +++ b/xen/drivers/scsi/sd.c	Thu Mar 06 16:54:15 2003 +0000
    18.3 @@ -61,6 +61,8 @@
    18.4  
    18.5  #include <xeno/genhd.h>
    18.6  
    18.7 +#include <asm/domain_page.h>    /* SMH: for [un_]map_domain_mem() */
    18.8 +
    18.9  /*
   18.10   *  static const char RCSid[] = "$Header:";
   18.11   */
   18.12 @@ -324,6 +326,7 @@ static int sd_init_command(Scsi_Cmnd * S
   18.13  	!dpnt->device ||
   18.14  	!dpnt->device->online ||
   18.15  	block + SCpnt->request.nr_sectors > ppnt->nr_sects) {
   18.16 +
   18.17  	SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 
   18.18  				   SCpnt->request.nr_sectors));
   18.19  	SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
   18.20 @@ -1132,8 +1135,8 @@ static int sd_init()
   18.21  	sd_registered++;
   18.22      }
   18.23      /* We do not support attaching loadable devices yet. */
   18.24 -    if (rscsi_disks)
   18.25 -	return 0;
   18.26 +    if (rscsi_disks) 
   18.27 +	return 0; 
   18.28  
   18.29      rscsi_disks = kmalloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
   18.30      if (!rscsi_disks)
   18.31 @@ -1296,9 +1299,73 @@ static void sd_finish()
   18.32      }
   18.33  #endif
   18.34  
   18.35 +#if 0
   18.36 +	/* XXX SMH: turn on some logging */
   18.37 +	scsi_logging_level = ~0;
   18.38 +	SCSI_SET_LOGGING(SCSI_LOG_HLQUEUE_SHIFT, SCSI_LOG_HLQUEUE_BITS, 1); 
   18.39 +#endif
   18.40 +
   18.41      return;
   18.42  }
   18.43  
   18.44 +
   18.45 +/* 
   18.46 +** XXX SMH: gross 'probe' function to allow xeno world to grope us; 
   18.47 +** this should really not be in the disk-specific code as it should
   18.48 +** report tapes, CDs, etc. But for now this looks like the easiest 
   18.49 +** place to hook it in :-( 
   18.50 +*/
   18.51 +void scsi_probe_devices(xen_disk_info_t *xdi)
   18.52 +{
   18.53 +    Scsi_Disk *sd; 
   18.54 +    int i, base, diskinfo[4];
   18.55 +    xen_disk_info_t *xen_xdi = 
   18.56 +	(xen_disk_info_t *)map_domain_mem(virt_to_phys(xdi));
   18.57 +
   18.58 +    /* We've already had IDE probe => we need to append our info */
   18.59 +    base = xen_xdi->count; 
   18.60 +
   18.61 +    for (sd = rscsi_disks, i = 0; i < sd_template.dev_max; i++, sd++) {
   18.62 +
   18.63 +        if (sd->device !=NULL) { 
   18.64 +
   18.65 +	    xen_xdi->disks[xen_xdi->count].type = XEN_DISK_SCSI; 
   18.66 +	    xen_xdi->disks[xen_xdi->count].capacity = sd->capacity; 
   18.67 +	    xen_xdi->count++; 
   18.68 +
   18.69 +	    /* default bios params to most commonly used values */
   18.70 +	    diskinfo[0] = 0x40;
   18.71 +	    diskinfo[1] = 0x20;
   18.72 +	    diskinfo[2] = (sd->capacity) >> 11;
   18.73 +	    
   18.74 +	    /* override with calculated, extended default,
   18.75 +	       or driver values */
   18.76 +	    /* XXX SMH: gross in-line literal major number. XXX FIXME. */
   18.77 +	    if(sd->device->host->hostt->bios_param != NULL)
   18.78 +		sd->device->host->hostt->bios_param(
   18.79 +		    sd, MKDEV(SCSI_DISK0_MAJOR, 0), &diskinfo[0]);
   18.80 +	    else scsicam_bios_param(sd, MKDEV(SCSI_DISK0_MAJOR, 0), 
   18.81 +				    &diskinfo[0]);
   18.82 +
   18.83 +	    
   18.84 +	    printk (KERN_ALERT "SCSI-XENO %d\n", xen_xdi->count - base);
   18.85 +	    printk (KERN_ALERT "  capacity 0x%x\n", sd->capacity);
   18.86 +	    printk (KERN_ALERT "  head     0x%x\n", diskinfo[0]);
   18.87 +	    printk (KERN_ALERT "  sector   0x%x\n", diskinfo[1]);
   18.88 +	    printk (KERN_ALERT "  cylinder 0x%x\n", diskinfo[2]);
   18.89 +
   18.90 +
   18.91 +	}
   18.92 +    }
   18.93 +
   18.94 +    unmap_domain_mem(xen_xdi);
   18.95 +
   18.96 +    return; 
   18.97 +}	
   18.98 +
   18.99 +
  18.100 +
  18.101 +
  18.102  static int sd_detect(Scsi_Device * SDp)
  18.103  {
  18.104      if (SDp->type != TYPE_DISK && SDp->type != TYPE_MOD)
  18.105 @@ -1463,6 +1530,8 @@ static void sd_detach(Scsi_Device * SDp)
  18.106  
  18.107  static int __init init_sd(void)
  18.108  {
  18.109 +    extern int scsi_register_module(int, void *);
  18.110 +
  18.111      sd_template.module = THIS_MODULE;
  18.112      return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
  18.113  }
    19.1 --- a/xen/include/xeno/config.h	Thu Mar 06 15:52:22 2003 +0000
    19.2 +++ b/xen/include/xeno/config.h	Thu Mar 06 16:54:15 2003 +0000
    19.3 @@ -32,6 +32,7 @@
    19.4  #define CONFIG_BLK_DEV_PIIX 1
    19.5  
    19.6  #define CONFIG_SCSI 1
    19.7 +#define CONFIG_SCSI_LOGGING 1
    19.8  #define CONFIG_BLK_DEV_SD 1
    19.9  #define CONFIG_SD_EXTRA_DEVS 40
   19.10  #define CONFIG_SCSI_MULTI_LUN 1
    20.1 --- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/Makefile	Thu Mar 06 15:52:22 2003 +0000
    20.2 +++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/Makefile	Thu Mar 06 16:54:15 2003 +0000
    20.3 @@ -1,3 +1,4 @@
    20.4  O_TARGET := blk.o
    20.5 -obj-y := xl_block.o xl_segment.o xl_segment_proc.o xl_block_test.o
    20.6 +obj-y := xl_block.o xl_ide.o xl_scsi.o xl_segment.o 
    20.7 +obj-y += xl_segment_proc.o xl_block_test.o
    20.8  include $(TOPDIR)/Rules.make
    21.1 --- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c	Thu Mar 06 15:52:22 2003 +0000
    21.2 +++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c	Thu Mar 06 16:54:15 2003 +0000
    21.3 @@ -1,7 +1,7 @@
    21.4  /******************************************************************************
    21.5   * xl_block.c
    21.6   * 
    21.7 - * Xenolinux virtual block-device driver (xhd).
    21.8 + * Xenolinux virtual block-device driver.
    21.9   * 
   21.10   */
   21.11  
   21.12 @@ -25,22 +25,23 @@
   21.13  #include <asm/atomic.h>
   21.14  #include <asm/uaccess.h>
   21.15  
   21.16 -#define MAJOR_NR XLBLK_MAJOR   /* force defns in blk.h, must precede include */
   21.17 -static int xlblk_major = XLBLK_MAJOR;
   21.18  #include <linux/blk.h>
   21.19  
   21.20  /* Copied from linux/ide.h */
   21.21  typedef unsigned char	byte; 
   21.22  
   21.23 -#define XLBLK_MAX 32 /* Maximum minor devices we support */
   21.24 -#define XLBLK_MAJOR_NAME "xhd"
   21.25 -#define IDE_PARTN_BITS 6                           /* from ide.h::PARTN_BITS */
   21.26 -#define IDE_PARTN_MASK ((1<<IDE_PARTN_BITS)-1)     /* from ide.h::PARTN_MASK */
   21.27 -static int xlblk_blk_size[XLBLK_MAX];
   21.28 -static int xlblk_blksize_size[XLBLK_MAX];
   21.29 -static int xlblk_read_ahead; 
   21.30 -static int xlblk_hardsect_size[XLBLK_MAX];
   21.31 -static int xlblk_max_sectors[XLBLK_MAX];
   21.32 +extern int  xlide_init(int xidx, int idx); 
   21.33 +extern int  xlide_hwsect(int minor); 
   21.34 +extern void xlide_cleanup(void); 
   21.35 +extern int  xlscsi_init(int xidx, int idx);
   21.36 +extern int  xlscsi_hwsect(int minor); 
   21.37 +extern void xlscsi_cleanup(void); 
   21.38 +
   21.39 +static int nide = 0;    // number of IDE devices we have 
   21.40 +static int nscsi = 0;   // number of SCSI devices we have 
   21.41 +
   21.42 +
   21.43 +#define XLBLK_MAX 32 /* XXX SMH: this the max of XLIDE_MAX and XLSCSI_MAX */
   21.44  
   21.45  #define XLBLK_RESPONSE_IRQ _EVENT_BLK_RESP
   21.46  
   21.47 @@ -56,6 +57,7 @@ static int xlblk_max_sectors[XLBLK_MAX];
   21.48  
   21.49  static blk_ring_t *blk_ring;
   21.50  static unsigned int resp_cons; /* Response consumer for comms ring. */
   21.51 +
   21.52  static xen_disk_info_t xlblk_disk_info;
   21.53  atomic_t xlblk_control_count;
   21.54  
   21.55 @@ -69,54 +71,139 @@ int hypervisor_request(void *          i
   21.56                         kdev_t          device,
   21.57  		       struct gendisk *gd);
   21.58  
   21.59 -
   21.60  /* ------------------------------------------------------------------------
   21.61   */
   21.62  
   21.63 -static int xenolinux_block_open(struct inode *inode, struct file *filep)
   21.64 +/* Convert from a XenoLinux (major,minor) to the Xen-level 'physical' device */
   21.65 +static kdev_t xldev_to_physdev(kdev_t xldev) 
   21.66 +{
   21.67 +    int xlmajor = MAJOR(xldev); 
   21.68 +    int major, minor; 
   21.69 +
   21.70 +    switch(xlmajor) { 
   21.71 +    case XLIDE_MAJOR: 
   21.72 +	major = IDE0_MAJOR; 
   21.73 +	minor = 0; /* we do minor offsetting manually by addition */
   21.74 +	break; 
   21.75 +	
   21.76 +    case XLSCSI_MAJOR: 
   21.77 +	major = SCSI_DISK0_MAJOR; 
   21.78 +	minor = 0; /* we do minor offsetting manually by addition */
   21.79 +	break; 
   21.80 +
   21.81 +    default: 
   21.82 +	panic("xldev_to_physdev: unhandled major %d\n", xlmajor); 
   21.83 +	break; 
   21.84 +    } 
   21.85 +
   21.86 +    return MKDEV(major, minor); 
   21.87 +}
   21.88 +
   21.89 +
   21.90 +/*
   21.91 +** Locate the gendisk structure associated with a particular xenolinux disk; 
   21.92 +** this requires a scan of the xen_disk_info[] array currently which kind of
   21.93 +** sucks. However we can clean this whole area up later (i.e. post SOSP). 
   21.94 +*/
   21.95 +struct gendisk *xldev_to_gendisk(kdev_t xldev, int *t) 
   21.96 +{
   21.97 +    int i, j, posn, type; 
   21.98 +
   21.99 +    switch(MAJOR(xldev)) { 
  21.100 +	
  21.101 +    case XLIDE_MAJOR: 
  21.102 +	type = 1; 
  21.103 +	posn = 1; 
  21.104 +	break; 
  21.105 +	
  21.106 +    case XLSCSI_MAJOR: 
  21.107 +	type = 2; 
  21.108 +	posn = 1; 
  21.109 +	break; 
  21.110 +
  21.111 +    default: 
  21.112 +	panic("xldev_to_gendisk: unhandled major %d\n", MAJOR(xldev)); 
  21.113 +	break; 
  21.114 +    } 
  21.115 +
  21.116 +
  21.117 +    for ( i = j = 0; i < xen_disk_info.count; i++ ) {
  21.118 +	if(xen_disk_info.disks[i].type == type)
  21.119 +	    if(++j == posn)
  21.120 +		break; 
  21.121 +    }
  21.122 +
  21.123 +    if(t) 
  21.124 +	*t = type; 
  21.125 +
  21.126 +    return (xen_disk_info.disks[i].gendisk); 
  21.127 +}
  21.128 +
  21.129 +int xenolinux_block_open(struct inode *inode, struct file *filep)
  21.130  {
  21.131      DPRINTK("xenolinux_block_open\n"); 
  21.132      return 0;
  21.133  }
  21.134  
  21.135 -static int xenolinux_block_release(struct inode *inode, struct file *filep)
  21.136 +int xenolinux_block_release(struct inode *inode, struct file *filep)
  21.137  {
  21.138      DPRINTK("xenolinux_block_release\n");
  21.139      return 0;
  21.140  }
  21.141  
  21.142 -static int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
  21.143 +
  21.144 +
  21.145 +int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
  21.146  			  unsigned command, unsigned long argument)
  21.147  {
  21.148 -    int minor_dev;
  21.149 +    int minor_dev, type;
  21.150      struct hd_geometry *geo = (struct hd_geometry *)argument;
  21.151 -
  21.152 +    struct gendisk *gd;     
  21.153 +    struct hd_struct *part; 
  21.154 +    
  21.155      DPRINTK("xenolinux_block_ioctl\n"); 
  21.156  
  21.157      /* check permissions */
  21.158      if (!capable(CAP_SYS_ADMIN)) return -EPERM;
  21.159      if (!inode)                  return -EINVAL;
  21.160 +
  21.161      minor_dev = MINOR(inode->i_rdev);
  21.162      if (minor_dev >= XLBLK_MAX)  return -ENODEV;
  21.163      
  21.164      DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, minor: 0x%x\n",
  21.165                    command, (long) argument, minor_dev); 
  21.166    
  21.167 +    gd = xldev_to_gendisk(inode->i_rdev, &type); 
  21.168 +    part = &gd->part[minor_dev]; 
  21.169 +
  21.170      switch (command)
  21.171      {
  21.172 -    case BLKGETSIZE:                                             /* get size */
  21.173 -        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, 
  21.174 -                      (long) xlblk_disk_info.disks[0].capacity); 
  21.175 -	return put_user(xlblk_disk_info.disks[0].capacity, 
  21.176 -			(unsigned long *) argument);
  21.177 +    case BLKGETSIZE:
  21.178 +        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); 
  21.179 +	return put_user(part->nr_sects, (unsigned long *) argument);
  21.180  
  21.181      case BLKRRPART:                               /* re-read partition table */
  21.182          DPRINTK_IOCTL("   BLKRRPART: %x\n", BLKRRPART); 
  21.183  	break;
  21.184  
  21.185 -    case BLKBSZGET:                                        /* get block size */
  21.186 -        DPRINTK_IOCTL("   BLKBSZGET: %x\n", BLKBSZGET);
  21.187 -	break;
  21.188 +    case BLKSSZGET:
  21.189 +	switch(type) {
  21.190 +	case 1: 
  21.191 +	    DPRINTK_IOCTL("   BLKSSZGET: %x 0x%x\n", BLKSSZGET, 
  21.192 +			  xlide_hwsect(minor_dev));
  21.193 +	    return xlide_hwsect(minor_dev); 
  21.194 +	    break; 
  21.195 +	case 2: 
  21.196 +	    DPRINTK_IOCTL("   BLKSSZGET: %x 0x%x\n", BLKSSZGET,
  21.197 +			  xlscsi_hwsect(minor_dev));
  21.198 +	    return xlscsi_hwsect(minor_dev); 
  21.199 +	    break; 
  21.200 +
  21.201 +	default: 
  21.202 +	    printk("BLKSSZGET ioctl() on bogus type %d disk!\n", type); 
  21.203 +	    return 0; 
  21.204 +
  21.205 +	}
  21.206  
  21.207      case BLKBSZSET:                                        /* set block size */
  21.208          DPRINTK_IOCTL("   BLKBSZSET: %x\n", BLKBSZSET);
  21.209 @@ -163,13 +250,13 @@ static int xenolinux_block_ioctl(struct 
  21.210      return 0;
  21.211  }
  21.212  
  21.213 -static int xenolinux_block_check(kdev_t dev)
  21.214 +int xenolinux_block_check(kdev_t dev)
  21.215  {
  21.216      DPRINTK("xenolinux_block_check\n");
  21.217      return 0;
  21.218  }
  21.219  
  21.220 -static int xenolinux_block_revalidate(kdev_t dev)
  21.221 +int xenolinux_block_revalidate(kdev_t dev)
  21.222  {
  21.223      DPRINTK("xenolinux_block_revalidate\n"); 
  21.224      return 0;
  21.225 @@ -224,30 +311,13 @@ int hypervisor_request(void *          i
  21.226  
  21.227      case XEN_BLOCK_READ:
  21.228      case XEN_BLOCK_WRITE:
  21.229 -        /* only accept requests for xhd and vhd devices */
  21.230 -	if (!IS_XHD_MAJOR(MAJOR(device)) && !IS_VHD_MAJOR(MAJOR(device)))
  21.231 -	    panic("error: xl_block::hypervisor_request: "
  21.232 -                  "unknown device [0x%x]\n", device);
  21.233 -	phys_device = MAJOR(device);
  21.234 -
  21.235 -	/* Compute real buffer location on disk.
  21.236 -	 * note: gd will be null when we read the partition table.
  21.237 -	 */
  21.238 +        phys_device =  xldev_to_physdev(device); 
  21.239 +	if (!IS_XHD_MAJOR(MAJOR(device)))
  21.240 +            phys_device = MAJOR(device);
  21.241 +	/* Compute real buffer location on disk */
  21.242  	sector_number = block_number;
  21.243 -	if ( gd != NULL )
  21.244 -	{
  21.245 -	  sector_number += gd->part[MINOR(device)&IDE_PARTN_MASK].start_sect;
  21.246 -	}
  21.247 -
  21.248 -	/*
  21.249 -	if (IS_VHD_MAJOR(MAJOR(device)))
  21.250 -	{
  21.251 -	  printk (KERN_ALERT "%lx + %lx = %lx (%x)\n",
  21.252 -		  block_number,
  21.253 -		  gd->part[MINOR(device)&IDE_PARTN_MASK].start_sect,
  21.254 -		  sector_number, device);
  21.255 -	}
  21.256 -	*/
  21.257 +	gd = xldev_to_gendisk(device, NULL); 
  21.258 +	sector_number += gd->part[MINOR(device)].start_sect;
  21.259          break;
  21.260  
  21.261      default:
  21.262 @@ -274,7 +344,7 @@ int hypervisor_request(void *          i
  21.263   * do_xlblk_request
  21.264   *  read a block; request is in a request queue
  21.265   */
  21.266 -static void do_xlblk_request (request_queue_t *rq)
  21.267 +void do_xlblk_request (request_queue_t *rq)
  21.268  {
  21.269      struct request *req;
  21.270      struct buffer_head *bh;
  21.271 @@ -282,9 +352,10 @@ static void do_xlblk_request (request_qu
  21.272      
  21.273      DPRINTK("xlblk.c::do_xlblk_request for '%s'\n", DEVICE_NAME); 
  21.274  
  21.275 -    while ( !rq->plugged && !QUEUE_EMPTY )
  21.276 +    while ( !rq->plugged && !list_empty(&rq->queue_head))
  21.277      {
  21.278 -	if ( (req = CURRENT) == NULL ) goto out;
  21.279 +	if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) 
  21.280 +	    goto out;
  21.281  		
  21.282          DPRINTK("do_xlblk_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
  21.283                  req, req->cmd, req->sector,
  21.284 @@ -351,7 +422,7 @@ static struct block_device_operations xe
  21.285  
  21.286  static void xlblk_response_int(int irq, void *dev_id, struct pt_regs *ptregs)
  21.287  {
  21.288 -    int i;
  21.289 +    int i; 
  21.290      unsigned long flags; 
  21.291      struct buffer_head *bh;
  21.292      
  21.293 @@ -383,10 +454,18 @@ static void xlblk_response_int(int irq, 
  21.294      resp_cons = i;
  21.295  
  21.296      /* KAF: We can push work down at this point. We have the lock. */
  21.297 -    /* aho: okay, so this is a bit of a hack.  we'll kick every queue... */
  21.298 -    do_xlblk_request(BLK_DEFAULT_QUEUE(XLBLK_MAJOR));
  21.299 -    do_xlseg_requestX(BLK_DEFAULT_QUEUE(XLSEG_MAJOR));
  21.300 -    
  21.301 +    for (i = 0; i < xen_disk_info.count; i++) {
  21.302 +	/*
  21.303 +	** XXX SMH: this is pretty broken ... 
  21.304 +	**     a) should really only kick devs w/ outstanding work 
  21.305 +	**     b) should cover /all/ devs, not just first IDE & SCSI
  21.306 +	** KAF will fix this I'm sure. 
  21.307 +	*/
  21.308 +	do_xlblk_request(BLK_DEFAULT_QUEUE(IDE0_MAJOR));
  21.309 +	do_xlblk_request(BLK_DEFAULT_QUEUE(SCSI_DISK0_MAJOR));
  21.310 +        do_xlseg_requestX(BLK_DEFAULT_QUEUE(XLSEG_MAJOR));
  21.311 +    }
  21.312 +
  21.313      spin_unlock_irqrestore(&io_request_lock, flags);
  21.314  }
  21.315  
  21.316 @@ -419,143 +498,54 @@ int __init xlblk_init(void)
  21.317          BUG();
  21.318      HYPERVISOR_block_io_op();
  21.319      while ( blk_ring->resp_prod != 1 ) barrier();
  21.320 -    printk (KERN_ALERT "xhd block device probe:\n");
  21.321      for ( i = 0; i < xlblk_disk_info.count; i++ )
  21.322      { 
  21.323 +	/* 
  21.324 +	** SMH: initialize all the disks we found; this is complicated a 
  21.325 +	** bit by the fact that we have both IDE and SCSI disks underneath 
  21.326 +	*/
  21.327  	printk (KERN_ALERT "  %2d: type: %d, capacity: %ld\n",
  21.328  		i, xlblk_disk_info.disks[i].type, 
  21.329  		xlblk_disk_info.disks[i].capacity);
  21.330 -    }
  21.331 -    
  21.332 -    SET_MODULE_OWNER(&xenolinux_block_fops);
  21.333 -    result = register_blkdev(xlblk_major, "block", &xenolinux_block_fops);
  21.334 -    if (result < 0) {
  21.335 -	printk (KERN_ALERT "xenolinux block: can't get major %d\n",
  21.336 -		xlblk_major);
  21.337 -	return result;
  21.338 +	
  21.339 +	switch(xen_disk_info.disks[i].type) { 
  21.340 +	case 1: 
  21.341 +	    xlide_init(i, nide++); 
  21.342 +	    break; 
  21.343 +	case 2: 
  21.344 +	    xlscsi_init(i, nscsi++); 
  21.345 +	    break; 
  21.346 +	default: 
  21.347 +	    printk("Unknown Xen disk type %d\n", xen_disk_info.disks[i].type);
  21.348 +	    break; 
  21.349 +	}
  21.350      }
  21.351  
  21.352 -    /* initialize global arrays in drivers/block/ll_rw_block.c */
  21.353 -    for (i = 0; i < XLBLK_MAX; i++) {
  21.354 -	xlblk_blk_size[i]      = xlblk_disk_info.disks[0].capacity;
  21.355 -	xlblk_blksize_size[i]  = 512;
  21.356 -	xlblk_hardsect_size[i] = 512;
  21.357 -	xlblk_max_sectors[i]   = 128;
  21.358 -    }
  21.359 -    xlblk_read_ahead  = 8; 
  21.360 -
  21.361 -    blk_size[xlblk_major]      = xlblk_blk_size;
  21.362 -    blksize_size[xlblk_major]  = xlblk_blksize_size;
  21.363 -    hardsect_size[xlblk_major] = xlblk_hardsect_size;
  21.364 -    read_ahead[xlblk_major]    = xlblk_read_ahead; 
  21.365 -    max_sectors[xlblk_major]   = xlblk_max_sectors;
  21.366 -
  21.367 -    blk_init_queue(BLK_DEFAULT_QUEUE(xlblk_major), do_xlblk_request);
  21.368 -
  21.369 -    /*
  21.370 -     * Turn off barking 'headactive' mode. We dequeue buffer heads as
  21.371 -     * soon as we pass them down to Xen.
  21.372 -     */
  21.373 -    blk_queue_headactive(BLK_DEFAULT_QUEUE(xlblk_major), 0);
  21.374 -
  21.375 -    xlblk_ide_register_disk(0, xlblk_disk_info.disks[0].capacity);
  21.376 -
  21.377 -    printk(KERN_ALERT 
  21.378 -	   "XenoLinux Virtual Block Device Driver installed [device: %d]\n",
  21.379 -	   xlblk_major);
  21.380      return 0;
  21.381  
  21.382   fail:
  21.383      return error;
  21.384  }
  21.385  
  21.386 -void xlblk_ide_register_disk(int idx, unsigned long capacity)
  21.387 -{
  21.388 -    int units;
  21.389 -    int minors;
  21.390 -    struct gendisk *gd;
  21.391 -
  21.392 -    /* plagarized from ide-probe.c::init_gendisk */
  21.393 -    
  21.394 -    units = 2; /* from ide.h::MAX_DRIVES */
  21.395 -
  21.396 -    minors    = units * (1<<IDE_PARTN_BITS);
  21.397 -    gd        = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
  21.398 -    gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
  21.399 -    gd->part  = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
  21.400 -    memset(gd->part, 0, minors * sizeof(struct hd_struct));
  21.401 -    
  21.402 -    gd->major        = xlblk_major;  
  21.403 -    gd->major_name   = XLBLK_MAJOR_NAME;
  21.404 -    gd->minor_shift  = IDE_PARTN_BITS; 
  21.405 -    gd->max_p	     = 1<<IDE_PARTN_BITS;
  21.406 -    gd->nr_real	     = units;           
  21.407 -    gd->real_devices = NULL;          
  21.408 -    gd->next	     = NULL;            
  21.409 -    gd->fops         = &xenolinux_block_fops;
  21.410 -    gd->de_arr       = kmalloc (sizeof *gd->de_arr * units, GFP_KERNEL);
  21.411 -    gd->flags	     = kmalloc (sizeof *gd->flags * units, GFP_KERNEL);
  21.412 -
  21.413 -    if (gd->de_arr)  
  21.414 -	memset (gd->de_arr, 0, sizeof *gd->de_arr * units);
  21.415 -
  21.416 -    if (gd->flags) 
  21.417 -	memset (gd->flags, 0, sizeof *gd->flags * units);
  21.418 -
  21.419 -    add_gendisk(gd);
  21.420 -
  21.421 -    xlblk_disk_info.disks[idx].gendisk = gd;
  21.422 -
  21.423 -    register_disk(gd, MKDEV(xlblk_major, 0), 1<<IDE_PARTN_BITS,
  21.424 -		  &xenolinux_block_fops, capacity);
  21.425 -
  21.426 -    {
  21.427 -      int loop = 0;
  21.428 -      printk (KERN_ALERT "Partition Table: (capacity: %lx)\n", capacity);
  21.429 -      for (loop = 0; loop < minors; loop++)
  21.430 -      {
  21.431 -	if (gd->part[loop].start_sect && gd->part[loop].nr_sects)
  21.432 -	{
  21.433 -	  printk (KERN_ALERT 
  21.434 -		  "  %2d: 0x%6lx %8ld    0x%6lx %7ld\n", loop,
  21.435 -		  gd->part[loop].start_sect, gd->part[loop].start_sect,
  21.436 -		  gd->part[loop].nr_sects, gd->part[loop].nr_sects);
  21.437 -	}
  21.438 -      }
  21.439 -    }
  21.440 -
  21.441 -    return;
  21.442 -}
  21.443 -
  21.444 -
  21.445 -
  21.446  static void __exit xlblk_cleanup(void)
  21.447  {
  21.448 -    /* CHANGE FOR MULTIQUEUE */
  21.449 -    blk_cleanup_queue(BLK_DEFAULT_QUEUE(xlblk_major));
  21.450 -
  21.451 -    /* clean up global arrays */
  21.452 -    read_ahead[xlblk_major] = 0;
  21.453 -
  21.454 -    if (blk_size[xlblk_major]) 
  21.455 -	kfree(blk_size[xlblk_major]);
  21.456 -    blk_size[xlblk_major] = NULL;
  21.457 +    int i; 
  21.458  
  21.459 -    if (blksize_size[xlblk_major]) 
  21.460 -	kfree(blksize_size[xlblk_major]);
  21.461 -    blksize_size[xlblk_major] = NULL;
  21.462 +    for ( i = 0; i < xen_disk_info.count; i++ )
  21.463 +    { 
  21.464 +	switch(xen_disk_info.disks[i].type) { 
  21.465 +	case 1: 
  21.466 +	    xlide_cleanup(); 
  21.467 +	    break; 
  21.468 +	case 2: 
  21.469 +	    xlscsi_cleanup(); 
  21.470 +	    break; 
  21.471 +	default: 
  21.472 +	    printk("Unknown Xen disk type %d\n", xen_disk_info.disks[i].type);
  21.473 +	    break; 
  21.474 +	}
  21.475  
  21.476 -    if (hardsect_size[xlblk_major]) 
  21.477 -	kfree(hardsect_size[xlblk_major]);
  21.478 -    hardsect_size[xlblk_major] = NULL;
  21.479 -    
  21.480 -    /* XXX: free each gendisk */
  21.481 -    if (unregister_blkdev(xlblk_major, "block"))
  21.482 -	printk(KERN_ALERT
  21.483 -	       "XenoLinux Virtual Block Device Driver uninstalled w/ errs\n");
  21.484 -    else
  21.485 -	printk(KERN_ALERT 
  21.486 -	       "XenoLinux Virtual Block Device Driver uninstalled\n");
  21.487 +    }
  21.488  
  21.489      return;
  21.490  }
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_ide.c	Thu Mar 06 16:54:15 2003 +0000
    22.3 @@ -0,0 +1,200 @@
    22.4 +/******************************************************************************
    22.5 + * xl_ide.c
    22.6 + * 
    22.7 + * Xenolinux virtual IDE block-device driver.
    22.8 + * 
    22.9 + */
   22.10 +
   22.11 +#include <linux/config.h>
   22.12 +#include <linux/module.h>
   22.13 +
   22.14 +#include <linux/kernel.h>
   22.15 +#include <linux/sched.h>
   22.16 +#include <linux/slab.h>
   22.17 +#include <linux/string.h>
   22.18 +#include <linux/errno.h>
   22.19 +
   22.20 +#include <linux/fs.h>
   22.21 +#include <linux/hdreg.h>
   22.22 +#include <linux/blkdev.h>
   22.23 +#include <linux/major.h>
   22.24 +
   22.25 +#define MAJOR_NR XLIDE_MAJOR   /* force defns in blk.h, must precede include */
   22.26 +static int xlide_major = XLIDE_MAJOR;
   22.27 +#include <linux/blk.h>
   22.28 +
   22.29 +void xlide_ide_register_disk(int, unsigned long);
   22.30 +
   22.31 +#define XLIDE_MAX 32 /* Maximum minor devices we support */
   22.32 +#define XLIDE_MAJOR_NAME "xhd"
   22.33 +#define IDE_PARTN_BITS 6                           /* from ide.h::PARTN_BITS */
   22.34 +#define IDE_PARTN_MASK ((1<<IDE_PARTN_BITS)-1)     /* from ide.h::PARTN_MASK */
   22.35 +static int xlide_blk_size[XLIDE_MAX];
   22.36 +static int xlide_blksize_size[XLIDE_MAX];
   22.37 +static int xlide_read_ahead; 
   22.38 +static int xlide_hardsect_size[XLIDE_MAX];
   22.39 +static int xlide_max_sectors[XLIDE_MAX];
   22.40 +
   22.41 +extern xen_disk_info_t xen_disk_info;
   22.42 +
   22.43 +
   22.44 +extern int xenolinux_block_open(struct inode *inode, struct file *filep);
   22.45 +extern int xenolinux_block_release(struct inode *inode, struct file *filep);
   22.46 +extern int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
   22.47 +				 unsigned command, unsigned long argument);
   22.48 +extern int xenolinux_block_check(kdev_t dev);
   22.49 +extern int xenolinux_block_revalidate(kdev_t dev);
   22.50 +
   22.51 +
   22.52 +extern void do_xlblk_request (request_queue_t *rq); 
   22.53 +
   22.54 +
   22.55 +static struct block_device_operations xlide_block_fops = 
   22.56 +{
   22.57 +    open:               xenolinux_block_open,
   22.58 +    release:            xenolinux_block_release,
   22.59 +    ioctl:              xenolinux_block_ioctl,
   22.60 +    check_media_change: xenolinux_block_check,
   22.61 +    revalidate:         xenolinux_block_revalidate,
   22.62 +};
   22.63 +
   22.64 +
   22.65 +/* tiny inteface fn */
   22.66 +int xlide_hwsect(int minor) 
   22.67 +{
   22.68 +    return xlide_hardsect_size[minor]; 
   22.69 +} 
   22.70 +
   22.71 +
   22.72 +void xlide_register_disk(int xidx, int idx)
   22.73 +{
   22.74 +    int units;
   22.75 +    int minors;
   22.76 +    struct gendisk *gd;
   22.77 +
   22.78 +    /* plagarized from ide-probe.c::init_gendisk */
   22.79 +    units = 2; /* from ide.h::MAX_DRIVES */
   22.80 +
   22.81 +    minors    = units * (1<<IDE_PARTN_BITS);
   22.82 +    gd        = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
   22.83 +    gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
   22.84 +    gd->part  = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
   22.85 +    memset(gd->part, 0, minors * sizeof(struct hd_struct));
   22.86 +    
   22.87 +    gd->major        = xlide_major;         /* XXX should be idx-specific */
   22.88 +    gd->major_name   = XLIDE_MAJOR_NAME;    /* XXX should be idx-specific */
   22.89 +    gd->minor_shift  = IDE_PARTN_BITS; 
   22.90 +    gd->max_p	     = 1<<IDE_PARTN_BITS;
   22.91 +    gd->nr_real	     = units;           
   22.92 +    gd->real_devices = NULL;          
   22.93 +    gd->next	     = NULL;            
   22.94 +    gd->fops         = &xlide_block_fops;
   22.95 +    gd->de_arr       = kmalloc (sizeof *gd->de_arr * units, GFP_KERNEL);
   22.96 +    gd->flags	     = kmalloc (sizeof *gd->flags * units, GFP_KERNEL);
   22.97 +
   22.98 +    if (gd->de_arr)  
   22.99 +	memset (gd->de_arr, 0, sizeof *gd->de_arr * units);
  22.100 +
  22.101 +    if (gd->flags) 
  22.102 +	memset (gd->flags, 0, sizeof *gd->flags * units);
  22.103 +
  22.104 +    add_gendisk(gd);
  22.105 +
  22.106 +    xen_disk_info.disks[xidx].gendisk = gd;
  22.107 +
  22.108 +    /* XXX major should be idx-specific */
  22.109 +    register_disk(gd, MKDEV(xlide_major, 0), 1<<IDE_PARTN_BITS, 
  22.110 +		  &xlide_block_fops, xen_disk_info.disks[xidx].capacity);
  22.111 +
  22.112 +    return;
  22.113 +}
  22.114 +
  22.115 +
  22.116 +
  22.117 +/*
  22.118 +** Initialize a XenoLinux IDE disk; the 'xidx' is the index into the 
  22.119 +** xen_disk_info array so we can grab interesting values; the 'idx' is 
  22.120 +** a count of the number of XLSCSI disks we've seen so far, starting at 0
  22.121 +** XXX SMH: this is all so ugly because the xen_disk_info() structure and 
  22.122 +** array doesn't really give us what we want. Ho hum. To be tidied someday. 
  22.123 +*/
  22.124 +int xlide_init(int xidx, int idx) 
  22.125 +{
  22.126 +    int i, major, result;
  22.127 +
  22.128 +    SET_MODULE_OWNER(&xlide_block_fops);
  22.129 +
  22.130 +    major  = xlide_major + idx;  /* XXX assume we have a linear major space */
  22.131 +
  22.132 +    /* XXX SMH: name below should vary with major */
  22.133 +    result = register_blkdev(major, XLIDE_MAJOR_NAME, &xlide_block_fops);
  22.134 +    if (result < 0) {
  22.135 +	printk (KERN_ALERT "XL IDE: can't get major %d\n",
  22.136 +		major);
  22.137 +	return result;
  22.138 +    }
  22.139 +
  22.140 +    /* initialize global arrays in drivers/block/ll_rw_block.c */
  22.141 +    for (i = 0; i < XLIDE_MAX; i++) {
  22.142 +	xlide_blk_size[i]      = xen_disk_info.disks[0].capacity;
  22.143 +	xlide_blksize_size[i]  = 512;
  22.144 +	xlide_hardsect_size[i] = 512;
  22.145 +	xlide_max_sectors[i]   = 128;
  22.146 +    }
  22.147 +    xlide_read_ahead  = 8; 
  22.148 +
  22.149 +    blk_size[major]      = xlide_blk_size;
  22.150 +    blksize_size[major]  = xlide_blksize_size;
  22.151 +    hardsect_size[major] = xlide_hardsect_size;
  22.152 +    read_ahead[major]    = xlide_read_ahead; 
  22.153 +    max_sectors[major]   = xlide_max_sectors;
  22.154 +
  22.155 +    blk_init_queue(BLK_DEFAULT_QUEUE(major), do_xlblk_request);
  22.156 +
  22.157 +    /*
  22.158 +     * Turn off barking 'headactive' mode. We dequeue buffer heads as
  22.159 +     * soon as we pass them down to Xen.
  22.160 +     */
  22.161 +    blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
  22.162 +
  22.163 +    xlide_register_disk(xidx, idx); 
  22.164 +
  22.165 +    printk(KERN_ALERT 
  22.166 +	   "XenoLinux Virtual IDE Device Driver installed [device: %d]\n",
  22.167 +	   major);
  22.168 +
  22.169 +    return 0;
  22.170 +}
  22.171 +
  22.172 +
  22.173 +void xlide_cleanup(void)
  22.174 +{
  22.175 +    /* CHANGE FOR MULTIQUEUE */
  22.176 +    blk_cleanup_queue(BLK_DEFAULT_QUEUE(xlide_major));
  22.177 +
  22.178 +    /* clean up global arrays */
  22.179 +    read_ahead[xlide_major] = 0;
  22.180 +
  22.181 +    if (blk_size[xlide_major]) 
  22.182 +	kfree(blk_size[xlide_major]);
  22.183 +    blk_size[xlide_major] = NULL;
  22.184 +
  22.185 +    if (blksize_size[xlide_major]) 
  22.186 +	kfree(blksize_size[xlide_major]);
  22.187 +    blksize_size[xlide_major] = NULL;
  22.188 +
  22.189 +    if (hardsect_size[xlide_major]) 
  22.190 +	kfree(hardsect_size[xlide_major]);
  22.191 +    hardsect_size[xlide_major] = NULL;
  22.192 +    
  22.193 +    /* XXX: free each gendisk */
  22.194 +    if (unregister_blkdev(xlide_major, XLIDE_MAJOR_NAME))
  22.195 +	printk(KERN_ALERT
  22.196 +	       "XenoLinux Virtual IDE Device Driver uninstalled w/ errs\n");
  22.197 +    else
  22.198 +	printk(KERN_ALERT 
  22.199 +	       "XenoLinux Virtual IDE Device Driver uninstalled\n");
  22.200 +
  22.201 +    return;
  22.202 +}
  22.203 +
    23.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.2 +++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_scsi.c	Thu Mar 06 16:54:15 2003 +0000
    23.3 @@ -0,0 +1,211 @@
    23.4 +/******************************************************************************
    23.5 + * xl_scsi.c
    23.6 + * 
    23.7 + * Xenolinux virtual SCSI block-device driver.
    23.8 + * 
    23.9 + */
   23.10 +
   23.11 +#include <linux/config.h>
   23.12 +#include <linux/module.h>
   23.13 +
   23.14 +#include <linux/kernel.h>
   23.15 +#include <linux/sched.h>
   23.16 +#include <linux/slab.h>
   23.17 +#include <linux/string.h>
   23.18 +#include <linux/errno.h>
   23.19 +
   23.20 +#include <linux/fs.h>
   23.21 +#include <linux/hdreg.h>
   23.22 +#include <linux/blkdev.h>
   23.23 +#include <linux/major.h>
   23.24 +
   23.25 +#define MAJOR_NR XLSCSI_MAJOR   /* force defns in blk.h, must precede include */
   23.26 +static int xlscsi_major = XLSCSI_MAJOR;
   23.27 +#include <linux/blk.h>
   23.28 +
   23.29 +/* Copied from linux/ide.h */
   23.30 +typedef unsigned char	byte; 
   23.31 +
   23.32 +void xlscsi_ide_register_disk(int, unsigned long);
   23.33 +
   23.34 +#define SCSI_DISKS_PER_MAJOR 16    /* max number of devices per scsi major */
   23.35 +#define XLSCSI_MAX 32              /* maximum minor devices we support */
   23.36 +#define XLSCSI_MAJOR_NAME "xsd"
   23.37 +
   23.38 +static int xlscsi_blk_size[XLSCSI_MAX];
   23.39 +static int xlscsi_blksize_size[XLSCSI_MAX];
   23.40 +static int xlscsi_read_ahead; 
   23.41 +static int xlscsi_hardsect_size[XLSCSI_MAX];
   23.42 +static int xlscsi_max_sectors[XLSCSI_MAX];
   23.43 +
   23.44 +#if 0
   23.45 +#define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
   23.46 +#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
   23.47 +#else
   23.48 +#define DPRINTK(_f, _a...) ((void)0)
   23.49 +#define DPRINTK_IOCTL(_f, _a...) ((void)0)
   23.50 +#endif
   23.51 +
   23.52 +extern xen_disk_info_t xen_disk_info;
   23.53 +
   23.54 +extern int xenolinux_block_open(struct inode *inode, struct file *filep);
   23.55 +extern int xenolinux_block_release(struct inode *inode, struct file *filep);
   23.56 +extern int xenolinux_block_ioctl(struct inode *inode, struct file *filep,
   23.57 +				 unsigned command, unsigned long argument);
   23.58 +extern int xenolinux_block_check(kdev_t dev);
   23.59 +extern int xenolinux_block_revalidate(kdev_t dev);
   23.60 +
   23.61 +
   23.62 +extern void do_xlblk_request (request_queue_t *rq); 
   23.63 +
   23.64 +static struct block_device_operations xlscsi_block_fops = 
   23.65 +{
   23.66 +    open:               xenolinux_block_open,
   23.67 +    release:            xenolinux_block_release,
   23.68 +    ioctl:              xenolinux_block_ioctl,
   23.69 +    check_media_change: xenolinux_block_check,
   23.70 +    revalidate:         xenolinux_block_revalidate,
   23.71 +};
   23.72 +
   23.73 +
   23.74 +/* tiny inteface fn */
   23.75 +int xlscsi_hwsect(int minor) 
   23.76 +{
   23.77 +    return xlscsi_hardsect_size[minor]; 
   23.78 +} 
   23.79 +
   23.80 +
   23.81 +void xlscsi_register_disk(int xidx, int idx)
   23.82 +{
   23.83 +    int minors;
   23.84 +    struct gendisk *gd;
   23.85 +    unsigned long capacity; 
   23.86 +
   23.87 +    minors    = XLSCSI_MAX; 
   23.88 +    gd        = kmalloc (sizeof(struct gendisk), GFP_KERNEL);
   23.89 +    gd->sizes = kmalloc (minors * sizeof(int), GFP_KERNEL);
   23.90 +    gd->part  = kmalloc (minors * sizeof(struct hd_struct), GFP_KERNEL);
   23.91 +    memset(gd->part, 0, minors * sizeof(struct hd_struct));
   23.92 +    
   23.93 +    if(idx > 0) 
   23.94 +	printk("xlscsi_register_disk: need fix to handle "
   23.95 +	       "multiple SCSI majors!\n"); 
   23.96 +    
   23.97 +    gd->major        = xlscsi_major;       /* XXX should be idx-specific */
   23.98 +    gd->major_name   = XLSCSI_MAJOR_NAME;  /* XXX should be idx-specific */
   23.99 +    gd->minor_shift  = 4; 
  23.100 +    gd->max_p	     = 1<<4; 
  23.101 +    gd->nr_real	     = SCSI_DISKS_PER_MAJOR; 
  23.102 +    gd->real_devices = NULL;          
  23.103 +    gd->next	     = NULL;            
  23.104 +    gd->fops         = &xlscsi_block_fops;
  23.105 +    gd->de_arr       = kmalloc (sizeof *gd->de_arr * SCSI_DISKS_PER_MAJOR, 
  23.106 +				GFP_KERNEL);
  23.107 +    gd->flags	     = kmalloc (sizeof *gd->flags * SCSI_DISKS_PER_MAJOR, 
  23.108 +				GFP_KERNEL);
  23.109 +
  23.110 +    if (gd->de_arr)  
  23.111 +	memset (gd->de_arr, 0, sizeof *gd->de_arr * SCSI_DISKS_PER_MAJOR);
  23.112 +
  23.113 +    if (gd->flags) 
  23.114 +	memset (gd->flags, 0, sizeof *gd->flags * SCSI_DISKS_PER_MAJOR);
  23.115 +
  23.116 +    add_gendisk(gd);
  23.117 +
  23.118 +    xen_disk_info.disks[xidx].gendisk = gd;
  23.119 +
  23.120 +    /* XXX major below should be idx-specific */
  23.121 +    register_disk(gd, MKDEV(xlscsi_major, 0), 1<<4, &xlscsi_block_fops, 
  23.122 +		  xen_disk_info.disks[xidx].capacity);
  23.123 +
  23.124 +    return;
  23.125 +}
  23.126 +
  23.127 +
  23.128 +/*
  23.129 +** Initialize a XenoLinux SCSI disk; the 'xidx' is the index into the 
  23.130 +** xen_disk_info array so we can grab interesting values; the 'idx' is 
  23.131 +** a count of the number of XLSCSI disks we've seen so far, starting at 0
  23.132 +** XXX SMH: this is all so ugly because the xen_disk_info() structure and 
  23.133 +** array doesn't really give us what we want. Ho hum. To be tidied someday. 
  23.134 +*/
  23.135 +int xlscsi_init(int xidx, int idx)
  23.136 +{
  23.137 +    int i, major, result;
  23.138 +
  23.139 +    SET_MODULE_OWNER(&xlscsi_block_fops);
  23.140 +
  23.141 +    major  = xlscsi_major + idx;   /* XXX asume we have linear major space */
  23.142 +    
  23.143 +    /* XXX SMH: 'name' below should vary for different major values */
  23.144 +    result = register_blkdev(major, XLSCSI_MAJOR_NAME, &xlscsi_block_fops);
  23.145 +
  23.146 +    if (result < 0) {
  23.147 +	printk (KERN_ALERT "XL SCSI: can't get major %d\n", major);
  23.148 +	return result;
  23.149 +    }
  23.150 +
  23.151 +    /* initialize global arrays in drivers/block/ll_rw_block.c */
  23.152 +    for (i = 0; i < XLSCSI_MAX; i++) {
  23.153 +	xlscsi_blk_size[i]      = xen_disk_info.disks[xidx].capacity;
  23.154 +	xlscsi_blksize_size[i]  = 512;
  23.155 +	xlscsi_hardsect_size[i] = 512;
  23.156 +	xlscsi_max_sectors[i]   = 128;
  23.157 +    }
  23.158 +    xlscsi_read_ahead  = 8; 
  23.159 +
  23.160 +    blk_size[major]      = xlscsi_blk_size;
  23.161 +    blksize_size[major]  = xlscsi_blksize_size;
  23.162 +    hardsect_size[major] = xlscsi_hardsect_size;
  23.163 +    read_ahead[major]    = xlscsi_read_ahead; 
  23.164 +    max_sectors[major]   = xlscsi_max_sectors;
  23.165 +
  23.166 +    blk_init_queue(BLK_DEFAULT_QUEUE(major), do_xlblk_request);
  23.167 +
  23.168 +    /*
  23.169 +     * Turn off barking 'headactive' mode. We dequeue buffer heads as
  23.170 +     * soon as we pass them down to Xen.
  23.171 +     */
  23.172 +    blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
  23.173 +    
  23.174 +    xlscsi_register_disk(xidx, idx);
  23.175 +
  23.176 +    printk(KERN_ALERT 
  23.177 +	   "XenoLinux Virtual SCSI Device Driver installed [device: %d]\n",
  23.178 +	   major);
  23.179 +    return 0;
  23.180 +}
  23.181 +
  23.182 +
  23.183 +
  23.184 +void xlscsi_cleanup(void)
  23.185 +{
  23.186 +    /* CHANGE FOR MULTIQUEUE */
  23.187 +    blk_cleanup_queue(BLK_DEFAULT_QUEUE(xlscsi_major));
  23.188 +
  23.189 +    /* clean up global arrays */
  23.190 +    read_ahead[xlscsi_major] = 0;
  23.191 +
  23.192 +    if (blk_size[xlscsi_major]) 
  23.193 +	kfree(blk_size[xlscsi_major]);
  23.194 +    blk_size[xlscsi_major] = NULL;
  23.195 +
  23.196 +    if (blksize_size[xlscsi_major]) 
  23.197 +	kfree(blksize_size[xlscsi_major]);
  23.198 +    blksize_size[xlscsi_major] = NULL;
  23.199 +
  23.200 +    if (hardsect_size[xlscsi_major]) 
  23.201 +	kfree(hardsect_size[xlscsi_major]);
  23.202 +    hardsect_size[xlscsi_major] = NULL;
  23.203 +    
  23.204 +    /* XXX: free each gendisk */
  23.205 +    if (unregister_blkdev(xlscsi_major, XLSCSI_MAJOR_NAME))
  23.206 +	printk(KERN_ALERT
  23.207 +	       "XenoLinux Virtual SCSI Device Driver uninstalled w/ errs\n");
  23.208 +    else
  23.209 +	printk(KERN_ALERT 
  23.210 +	       "XenoLinux Virtual SCSI Device Driver uninstalled\n");
  23.211 +
  23.212 +    return;
  23.213 +}
  23.214 +
    24.1 --- a/xenolinux-2.4.21-pre4-sparse/include/linux/major.h	Thu Mar 06 15:52:22 2003 +0000
    24.2 +++ b/xenolinux-2.4.21-pre4-sparse/include/linux/major.h	Thu Mar 06 16:54:15 2003 +0000
    24.3 @@ -145,8 +145,8 @@
    24.4  
    24.5  #define	UMEM_MAJOR	116	/* http://www.umem.com/ Battery Backed RAM */
    24.6  
    24.7 -/* note: 123, 124, 125, 126 and 234, 235, 236, 237 are defined in xeno_major */
    24.8 -#include <asm/hypervisor-ifs/xeno-major.h>
    24.9 +#define XLIDE_MAJOR	123	/* XenoLinux IDE Device */
   24.10 +#define XLSCSI_MAJOR	133	/* XenoLinux SCSI Device */
   24.11  
   24.12  #define RTF_MAJOR	150
   24.13  #define RAW_MAJOR	162
    25.1 --- a/xenolinux-2.4.21-pre4-sparse/init/do_mounts.c	Thu Mar 06 15:52:22 2003 +0000
    25.2 +++ b/xenolinux-2.4.21-pre4-sparse/init/do_mounts.c	Thu Mar 06 16:54:15 2003 +0000
    25.3 @@ -232,13 +232,7 @@ static struct dev_name_struct {
    25.4  	{ "ataraid/d15p",0x72F0 },
    25.5  #if defined(CONFIG_XENOLINUX_BLOCK)
    25.6          { "xhda",    0x7B00 },
    25.7 -        { "xhdb",    0x7C00 },
    25.8 -        { "xhdc",    0x7D00 },
    25.9 -        { "xhdd",    0x7E00 },
   25.10 -        { "vhda",    0xEA00 },
   25.11 -        { "vhdb",    0xEB00 },
   25.12 -        { "vhdc",    0xEC00 },
   25.13 -        { "vhdd",    0xED00 },
   25.14 +        { "xsda",    0x8500 },
   25.15  #endif
   25.16  	{ "nftla", 0x5d00 },
   25.17  	{ "nftlb", 0x5d10 },