ia64/xen-unstable

view xen/drivers/scsi/scsi.c @ 945:db2e1ea917df

bitkeeper revision 1.596.1.3 (3fb3b41eWUoRU0H8A0jEX5roXjxKkA)

Many files:
Greatly simplified Xen softirqs. They are now only executed in outermost Xen activation; they are never called within an irq context.
author kaf24@scramble.cl.cam.ac.uk
date Thu Nov 13 16:41:02 2003 +0000 (2003-11-13)
parents d8950052b29f
children 7a554cbf0f58
line source
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 *
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
8 *
9 * <drew@colorado.edu>
10 *
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
15 *
16 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
18 * enhancements.
19 *
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
22 *
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25 * Bjorn Ekwall <bj0rn@blox.se>
26 * (changed to kmod)
27 *
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
31 *
32 * Converted cli() code to spinlocks, Ingo Molnar
33 *
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
35 *
36 * out_of_space hacks, D. Gilbert (dpg) 990608
37 */
39 #define REVISION "Revision: 1.00"
40 #define VERSION "Id: scsi.c 1.00 2000/09/26"
42 #include <xeno/config.h>
43 #include <xeno/module.h>
45 #include <xeno/sched.h>
46 #include <xeno/timer.h>
47 #include <xeno/lib.h>
48 #include <xeno/slab.h>
49 #include <xeno/ioport.h>
50 /*#include <xeno/stat.h>*/
51 #include <xeno/blk.h>
52 #include <xeno/interrupt.h>
53 #include <xeno/delay.h>
54 #include <xeno/init.h>
55 /*#include <xeno/smp_lock.h>*/
56 /*#include <xeno/completion.h>*/
58 /* for xeno scsi_probe() stuff... maybe punt somewhere else? */
59 #include <hypervisor-ifs/block.h>
60 #include <xeno/blkdev.h>
62 #define __KERNEL_SYSCALLS__
64 /*#include <xeno/unistd.h>*/
65 #include <xeno/spinlock.h>
67 #include <asm/system.h>
68 #include <asm/irq.h>
69 #include <asm/dma.h>
70 #include <asm/uaccess.h>
72 #include "scsi.h"
73 #include "hosts.h"
74 #include "constants.h"
76 #ifdef CONFIG_KMOD
77 #include <xeno/kmod.h>
78 #endif
80 #undef USE_STATIC_SCSI_MEMORY
82 struct proc_dir_entry *proc_scsi;
84 #ifdef CONFIG_PROC_FS
85 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
86 static void scsi_dump_status(int level);
87 #endif
89 /*
90 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
91 */
93 /*
94 * Definitions and constants.
95 */
97 #define MIN_RESET_DELAY (2*HZ)
99 /* Do not call reset on error if we just did a reset within 15 sec. */
100 #define MIN_RESET_PERIOD (15*HZ)
102 /*
103 * Macro to determine the size of SCSI command. This macro takes vendor
104 * unique commands into account. SCSI commands in groups 6 and 7 are
105 * vendor unique and we will depend upon the command length being
106 * supplied correctly in cmd_len.
107 */
108 #define CDB_SIZE(SCpnt) ((((SCpnt->cmnd[0] >> 5) & 7) < 6) ? \
109 COMMAND_SIZE(SCpnt->cmnd[0]) : SCpnt->cmd_len)
111 /*
112 * Data declarations.
113 */
114 unsigned long scsi_pid;
115 Scsi_Cmnd *last_cmnd;
116 /* Command group 3 is reserved and should never be used. */
117 const unsigned char scsi_command_size[8] =
118 {
119 6, 10, 10, 12,
120 16, 12, 10, 10
121 };
122 static unsigned long serial_number;
123 static Scsi_Cmnd *scsi_bh_queue_head;
124 static Scsi_Cmnd *scsi_bh_queue_tail;
126 /*
127 * Note - the initial logging level can be set here to log events at boot time.
128 * After the system is up, you may enable logging via the /proc interface.
129 */
130 unsigned int scsi_logging_level;
132 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
133 {
134 "Direct-Access ",
135 "Sequential-Access",
136 "Printer ",
137 "Processor ",
138 "WORM ",
139 "CD-ROM ",
140 "Scanner ",
141 "Optical Device ",
142 "Medium Changer ",
143 "Communications ",
144 "Unknown ",
145 "Unknown ",
146 "Unknown ",
147 "Enclosure ",
148 };
150 /*
151 * Function prototypes.
152 */
153 extern void scsi_times_out(Scsi_Cmnd * SCpnt);
154 void scsi_build_commandblocks(Scsi_Device * SDpnt);
156 /*
157 * These are the interface to the old error handling code. It should go away
158 * someday soon.
159 */
160 extern void scsi_old_done(Scsi_Cmnd * SCpnt);
161 extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
162 extern int scsi_old_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
164 /*
165 * Private interface into the new error handling code.
166 */
167 extern int scsi_new_reset(Scsi_Cmnd *SCpnt, unsigned int flag);
169 /*
170 * Function: scsi_initialize_queue()
171 *
172 * Purpose: Selects queue handler function for a device.
173 *
174 * Arguments: SDpnt - device for which we need a handler function.
175 *
176 * Returns: Nothing
177 *
178 * Lock status: No locking assumed or required.
179 *
180 * Notes: Most devices will end up using scsi_request_fn for the
181 * handler function (at least as things are done now).
182 * The "block" feature basically ensures that only one of
183 * the blocked hosts is active at one time, mainly to work around
184 * buggy DMA chipsets where the memory gets starved.
185 * For this case, we have a special handler function, which
186 * does some checks and ultimately calls scsi_request_fn.
187 *
188 * The single_lun feature is a similar special case.
189 *
190 * We handle these things by stacking the handlers. The
191 * special case handlers simply check a few conditions,
192 * and return if they are not supposed to do anything.
193 * In the event that things are OK, then they call the next
194 * handler in the list - ultimately they call scsi_request_fn
195 * to do the dirty deed.
196 */
197 void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) {
198 blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
199 blk_queue_headactive(&SDpnt->request_queue, 0);
200 SDpnt->request_queue.queuedata = (void *) SDpnt;
201 }
203 #ifdef MODULE
204 MODULE_PARM(scsi_logging_level, "i");
205 MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
207 #else
208 static int __init scsi_logging_setup(char *str)
209 {
210 #if 0
211 int tmp;
213 if (get_option(&str, &tmp) == 1) {
214 scsi_logging_level = (tmp ? ~0 : 0);
215 return 1;
216 } else {
217 printk(KERN_INFO "scsi_logging_setup : usage scsi_logging_level=n "
218 "(n should be 0 or non-zero)\n");
219 return 0;
220 }
221 #else
222 return 0;
223 #endif
225 }
226 __setup("scsi_logging=", scsi_logging_setup);
228 #endif
230 /*
231 * Issue a command and wait for it to complete
232 */
234 static void scsi_wait_done(Scsi_Cmnd * SCpnt)
235 {
236 struct request *req;
238 req = &SCpnt->request;
239 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
241 #if 0
242 if (req->waiting != NULL) {
243 complete(req->waiting);
244 }
245 #else
246 /* XXX SMH: just use a flag to signal completion; caller spins */
247 if (*(int *)(req->waiting) != 0) {
248 // printk("scsi_wait_done: flipping wait status on req %p\n", req);
249 *(int *)(req->waiting) = 0;
250 }
251 #endif
253 }
255 /*
256 * This lock protects the freelist for all devices on the system.
257 * We could make this finer grained by having a single lock per
258 * device if it is ever found that there is excessive contention
259 * on this lock.
260 */
261 static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
263 /*
264 * Used to protect insertion into and removal from the queue of
265 * commands to be processed by the bottom half handler.
266 */
267 static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
269 /*
270 * Function: scsi_allocate_request
271 *
272 * Purpose: Allocate a request descriptor.
273 *
274 * Arguments: device - device for which we want a request
275 *
276 * Lock status: No locks assumed to be held. This function is SMP-safe.
277 *
278 * Returns: Pointer to request block.
279 *
280 * Notes: With the new queueing code, it becomes important
281 * to track the difference between a command and a
282 * request. A request is a pending item in the queue that
283 * has not yet reached the top of the queue.
284 */
286 Scsi_Request *scsi_allocate_request(Scsi_Device * device)
287 {
288 Scsi_Request *SRpnt = NULL;
290 if (!device)
291 panic("No device passed to scsi_allocate_request().\n");
293 SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
294 if( SRpnt == NULL )
295 {
296 return NULL;
297 }
299 memset(SRpnt, 0, sizeof(Scsi_Request));
300 SRpnt->sr_device = device;
301 SRpnt->sr_host = device->host;
302 SRpnt->sr_magic = SCSI_REQ_MAGIC;
303 SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
305 return SRpnt;
306 }
308 /*
309 * Function: scsi_release_request
310 *
311 * Purpose: Release a request descriptor.
312 *
313 * Arguments: device - device for which we want a request
314 *
315 * Lock status: No locks assumed to be held. This function is SMP-safe.
316 *
317 * Returns: Pointer to request block.
318 *
319 * Notes: With the new queueing code, it becomes important
320 * to track the difference between a command and a
321 * request. A request is a pending item in the queue that
322 * has not yet reached the top of the queue. We still need
323 * to free a request when we are done with it, of course.
324 */
325 void scsi_release_request(Scsi_Request * req)
326 {
327 if( req->sr_command != NULL )
328 {
329 scsi_release_command(req->sr_command);
330 req->sr_command = NULL;
331 }
333 kfree(req);
334 }
336 /*
337 * Function: scsi_allocate_device
338 *
339 * Purpose: Allocate a command descriptor.
340 *
341 * Arguments: device - device for which we want a command descriptor
342 * wait - 1 if we should wait in the event that none
343 * are available.
344 * interruptible - 1 if we should unblock and return NULL
345 * in the event that we must wait, and a signal
346 * arrives.
347 *
348 * Lock status: No locks assumed to be held. This function is SMP-safe.
349 *
350 * Returns: Pointer to command descriptor.
351 *
352 * Notes: Prior to the new queue code, this function was not SMP-safe.
353 *
354 * If the wait flag is true, and we are waiting for a free
355 * command block, this function will interrupt and return
356 * NULL in the event that a signal arrives that needs to
357 * be handled.
358 *
359 * This function is deprecated, and drivers should be
360 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
361 */
363 Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
364 int interruptable)
365 {
366 struct Scsi_Host *host;
367 Scsi_Cmnd *SCpnt = NULL;
368 Scsi_Device *SDpnt;
369 unsigned long flags;
371 if (!device)
372 panic("No device passed to scsi_allocate_device().\n");
374 host = device->host;
376 spin_lock_irqsave(&device_request_lock, flags);
378 while (1 == 1) {
379 SCpnt = NULL;
380 if (!device->device_blocked) {
381 if (device->single_lun) {
382 /*
383 * FIXME(eric) - this is not at all optimal. Given that
384 * single lun devices are rare and usually slow
385 * (i.e. CD changers), this is good enough for now, but
386 * we may want to come back and optimize this later.
387 *
388 * Scan through all of the devices attached to this
389 * host, and see if any are active or not. If so,
390 * we need to defer this command.
391 *
392 * We really need a busy counter per device. This would
393 * allow us to more easily figure out whether we should
394 * do anything here or not.
395 */
396 for (SDpnt = host->host_queue;
397 SDpnt;
398 SDpnt = SDpnt->next) {
399 /*
400 * Only look for other devices on the same bus
401 * with the same target ID.
402 */
403 if (SDpnt->channel != device->channel
404 || SDpnt->id != device->id
405 || SDpnt == device) {
406 continue;
407 }
408 if( atomic_read(&SDpnt->device_active) != 0)
409 {
410 break;
411 }
412 }
413 if (SDpnt) {
414 /*
415 * Some other device in this cluster is busy.
416 * If asked to wait, we need to wait, otherwise
417 * return NULL.
418 */
419 SCpnt = NULL;
420 goto busy;
421 }
422 }
423 /*
424 * Now we can check for a free command block for this device.
425 */
426 for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
427 if (SCpnt->request.rq_status == RQ_INACTIVE)
428 break;
429 }
430 }
431 /*
432 * If we couldn't find a free command block, and we have been
433 * asked to wait, then do so.
434 */
435 if (SCpnt) {
436 break;
437 }
438 busy:
439 /*
440 * If we have been asked to wait for a free block, then
441 * wait here.
442 */
443 if (wait) {
444 printk("XXX smh: scsi cannot wait for free cmd block.\n");
445 BUG();
446 #if 0
447 DECLARE_WAITQUEUE(wait, current);
449 /*
450 * We need to wait for a free commandblock. We need to
451 * insert ourselves into the list before we release the
452 * lock. This way if a block were released the same
453 * microsecond that we released the lock, the call
454 * to schedule() wouldn't block (well, it might switch,
455 * but the current task will still be schedulable.
456 */
457 add_wait_queue(&device->scpnt_wait, &wait);
458 if( interruptable ) {
459 set_current_state(TASK_INTERRUPTIBLE);
460 } else {
461 set_current_state(TASK_UNINTERRUPTIBLE);
462 }
464 spin_unlock_irqrestore(&device_request_lock, flags);
466 /*
467 * This should block until a device command block
468 * becomes available.
469 */
470 schedule();
472 spin_lock_irqsave(&device_request_lock, flags);
474 remove_wait_queue(&device->scpnt_wait, &wait);
475 /*
476 * FIXME - Isn't this redundant?? Someone
477 * else will have forced the state back to running.
478 */
479 set_current_state(TASK_RUNNING);
480 /*
481 * In the event that a signal has arrived that we need
482 * to consider, then simply return NULL. Everyone
483 * that calls us should be prepared for this
484 * possibility, and pass the appropriate code back
485 * to the user.
486 */
487 if( interruptable ) {
488 if (signal_pending(current)) {
489 spin_unlock_irqrestore(&device_request_lock, flags);
490 return NULL;
491 }
492 }
493 #endif
494 } else {
495 spin_unlock_irqrestore(&device_request_lock, flags);
496 return NULL;
497 }
498 }
500 SCpnt->request.rq_status = RQ_SCSI_BUSY;
501 SCpnt->request.waiting = NULL; /* And no one is waiting for this
502 * to complete */
503 atomic_inc(&SCpnt->host->host_active);
504 atomic_inc(&SCpnt->device->device_active);
506 SCpnt->buffer = NULL;
507 SCpnt->bufflen = 0;
508 SCpnt->request_buffer = NULL;
509 SCpnt->request_bufflen = 0;
511 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
512 SCpnt->old_use_sg = 0;
513 SCpnt->transfersize = 0; /* No default transfer size */
514 SCpnt->cmd_len = 0;
516 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
517 SCpnt->sc_request = NULL;
518 SCpnt->sc_magic = SCSI_CMND_MAGIC;
520 SCpnt->result = 0;
521 SCpnt->underflow = 0; /* Do not flag underflow conditions */
522 SCpnt->old_underflow = 0;
523 SCpnt->resid = 0;
524 SCpnt->state = SCSI_STATE_INITIALIZING;
525 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
527 spin_unlock_irqrestore(&device_request_lock, flags);
529 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
530 SCpnt->target,
531 atomic_read(&SCpnt->host->host_active)));
533 return SCpnt;
534 }
536 inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
537 {
538 unsigned long flags;
539 Scsi_Device * SDpnt;
541 spin_lock_irqsave(&device_request_lock, flags);
543 SDpnt = SCpnt->device;
545 SCpnt->request.rq_status = RQ_INACTIVE;
546 SCpnt->state = SCSI_STATE_UNUSED;
547 SCpnt->owner = SCSI_OWNER_NOBODY;
548 atomic_dec(&SCpnt->host->host_active);
549 atomic_dec(&SDpnt->device_active);
551 SCSI_LOG_MLQUEUE(5, printk(
552 "Deactivating command for device %d (active=%d, failed=%d)\n",
553 SCpnt->target,
554 atomic_read(&SCpnt->host->host_active),
555 SCpnt->host->host_failed));
556 if (SCpnt->host->host_failed != 0) {
557 SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
558 SCpnt->host->in_recovery,
559 SCpnt->host->eh_active));
560 }
561 /*
562 * If the host is having troubles, then look to see if this was the last
563 * command that might have failed. If so, wake up the error handler.
564 */
565 if (SCpnt->host->in_recovery
566 && !SCpnt->host->eh_active
567 && SCpnt->host->host_busy == SCpnt->host->host_failed) {
568 #if 0
569 SCSI_LOG_ERROR_RECOVERY(5, printk(
570 "Waking error handler thread (%d)\n",
571 atomic_read(&SCpnt->host->eh_wait->count)));
572 up(SCpnt->host->eh_wait);
573 #endif
574 }
576 spin_unlock_irqrestore(&device_request_lock, flags);
578 #if 0
579 /*
580 * Wake up anyone waiting for this device. Do this after we
581 * have released the lock, as they will need it as soon as
582 * they wake up.
583 */
584 wake_up(&SDpnt->scpnt_wait);
585 #endif
587 }
589 /*
590 * Function: scsi_release_command
591 *
592 * Purpose: Release a command block.
593 *
594 * Arguments: SCpnt - command block we are releasing.
595 *
596 * Notes: The command block can no longer be used by the caller once
597 * this funciton is called. This is in effect the inverse
598 * of scsi_allocate_device. Note that we also must perform
599 * a couple of additional tasks. We must first wake up any
600 * processes that might have blocked waiting for a command
601 * block, and secondly we must hit the queue handler function
602 * to make sure that the device is busy. Note - there is an
603 * option to not do this - there were instances where we could
604 * recurse too deeply and blow the stack if this happened
605 * when we were indirectly called from the request function
606 * itself.
607 *
608 * The idea is that a lot of the mid-level internals gunk
609 * gets hidden in this function. Upper level drivers don't
610 * have any chickens to wave in the air to get things to
611 * work reliably.
612 *
613 * This function is deprecated, and drivers should be
614 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
615 */
616 void scsi_release_command(Scsi_Cmnd * SCpnt)
617 {
618 request_queue_t *q;
619 Scsi_Device * SDpnt;
621 SDpnt = SCpnt->device;
623 __scsi_release_command(SCpnt);
625 /*
626 * Finally, hit the queue request function to make sure that
627 * the device is actually busy if there are requests present.
628 * This won't block - if the device cannot take any more, life
629 * will go on.
630 */
631 q = &SDpnt->request_queue;
632 scsi_queue_next_request(q, NULL);
633 }
635 /*
636 * Function: scsi_dispatch_command
637 *
638 * Purpose: Dispatch a command to the low-level driver.
639 *
640 * Arguments: SCpnt - command block we are dispatching.
641 *
642 * Notes:
643 */
644 int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
645 {
646 #ifdef DEBUG_DELAY
647 unsigned long clock;
648 #endif
649 struct Scsi_Host *host;
650 int rtn = 0;
651 unsigned long flags = 0;
652 unsigned long timeout;
654 ASSERT_LOCK(&io_request_lock, 0);
656 #if DEBUG
657 unsigned long *ret = 0;
658 #ifdef __mips__
659 __asm__ __volatile__("move\t%0,$31":"=r"(ret));
660 #else
661 ret = __builtin_return_address(0);
662 #endif
663 #endif
665 host = SCpnt->host;
667 /* Assign a unique nonzero serial_number. */
668 if (++serial_number == 0)
669 serial_number = 1;
670 SCpnt->serial_number = serial_number;
671 SCpnt->pid = scsi_pid++;
673 /*
674 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
675 * we can avoid the drive not being ready.
676 */
677 timeout = host->last_reset + MIN_RESET_DELAY;
679 if (host->resetting && time_before(jiffies, timeout)) {
680 int ticks_remaining = timeout - jiffies;
681 /*
682 * NOTE: This may be executed from within an interrupt
683 * handler! This is bad, but for now, it'll do. The irq
684 * level of the interrupt handler has been masked out by the
685 * platform dependent interrupt handling code already, so the
686 * sti() here will not cause another call to the SCSI host's
687 * interrupt handler (assuming there is one irq-level per
688 * host).
689 */
690 while (--ticks_remaining >= 0)
691 mdelay(1 + 999 / HZ);
692 host->resetting = 0;
693 }
694 if (host->hostt->use_new_eh_code) {
695 scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
696 } else {
697 scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
698 scsi_old_times_out);
699 }
701 /*
702 * We will use a queued command if possible, otherwise we will emulate the
703 * queuing and calling of completion function ourselves.
704 */
705 SCSI_LOG_MLQUEUE(3, printk(
706 "scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
707 "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
708 SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
709 SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
711 SCpnt->state = SCSI_STATE_QUEUED;
712 SCpnt->owner = SCSI_OWNER_LOWLEVEL;
713 if (host->can_queue) {
714 SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
715 host->hostt->queuecommand));
716 /*
717 * Use the old error handling code if we haven't converted the driver
718 * to use the new one yet. Note - only the new queuecommand variant
719 * passes a meaningful return value.
720 */
721 if (host->hostt->use_new_eh_code) {
722 /*
723 * Before we queue this command, check if the command
724 * length exceeds what the host adapter can handle.
725 */
726 if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
727 spin_lock_irqsave(&io_request_lock, flags);
728 rtn = host->hostt->queuecommand(SCpnt, scsi_done);
729 spin_unlock_irqrestore(&io_request_lock, flags);
730 if (rtn != 0) {
731 scsi_delete_timer(SCpnt);
732 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
733 SCSI_LOG_MLQUEUE(3, printk(
734 "queuecommand : request rejected\n"));
735 }
736 } else {
737 SCSI_LOG_MLQUEUE(3, printk(
738 "queuecommand : command too long.\n"));
739 SCpnt->result = (DID_ABORT << 16);
740 spin_lock_irqsave(&io_request_lock, flags);
741 scsi_done(SCpnt);
742 spin_unlock_irqrestore(&io_request_lock, flags);
743 rtn = 1;
744 }
745 } else {
746 /*
747 * Before we queue this command, check if the command
748 * length exceeds what the host adapter can handle.
749 */
750 if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) {
751 spin_lock_irqsave(&io_request_lock, flags);
752 host->hostt->queuecommand(SCpnt, scsi_old_done);
753 spin_unlock_irqrestore(&io_request_lock, flags);
754 } else {
755 SCSI_LOG_MLQUEUE(3, printk(
756 "queuecommand : command too long.\n"));
757 SCpnt->result = (DID_ABORT << 16);
758 spin_lock_irqsave(&io_request_lock, flags);
759 scsi_old_done(SCpnt);
760 spin_unlock_irqrestore(&io_request_lock, flags);
761 rtn = 1;
762 }
764 }
765 } else {
766 int temp;
768 SCSI_LOG_MLQUEUE(3, printk(
769 "command() : routine at %p\n", host->hostt->command));
770 spin_lock_irqsave(&io_request_lock, flags);
771 temp = host->hostt->command(SCpnt);
772 SCpnt->result = temp;
773 #ifdef DEBUG_DELAY
774 spin_unlock_irqrestore(&io_request_lock, flags);
775 clock = jiffies + 4 * HZ;
776 while (time_before(jiffies, clock)) {
777 barrier();
778 cpu_relax();
779 }
780 printk("done(host = %d, result = %04x) : routine at %p\n",
781 host->host_no, temp, host->hostt->command);
782 spin_lock_irqsave(&io_request_lock, flags);
783 #endif
784 if (host->hostt->use_new_eh_code) {
785 scsi_done(SCpnt);
786 } else {
787 scsi_old_done(SCpnt);
788 }
789 spin_unlock_irqrestore(&io_request_lock, flags);
790 }
791 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
792 return rtn;
793 }
795 #ifdef DEVFS_MUST_DIE
796 devfs_handle_t scsi_devfs_handle;
797 #endif
799 /*
800 * scsi_do_cmd sends all the commands out to the low-level driver. It
801 * handles the specifics required for each low level driver - ie queued
802 * or non queued. It also prevents conflicts when different high level
803 * drivers go for the same host at the same time.
804 */
806 void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
807 void *buffer, unsigned bufflen,
808 int timeout, int retries)
809 {
810 #if 0
811 DECLARE_COMPLETION(wait);
812 #else
813 int wait = 1;
814 int usecs = 0;
815 #endif
818 request_queue_t *q = &SRpnt->sr_device->request_queue;
820 #if 0
821 SRpnt->sr_request.waiting = &wait;
822 #else
823 SRpnt->sr_request.waiting = (void *)&wait;
824 #endif
827 SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
828 scsi_do_req (SRpnt, (void *) cmnd,
829 buffer, bufflen, scsi_wait_done, timeout, retries);
830 generic_unplug_device(q);
833 #if 0
834 wait_for_completion(&wait);
835 SRpnt->sr_request.waiting = NULL;
836 #else
838 /* XXX SMH: in 'standard' driver we think everythings ok here since
839 we've waited on &wait -- hence we deallocate the command structure
840 if it hasn't been done already. This is not the correct behaviour
841 in xen ... hmm .. how to fix? */
842 while(wait) {
843 do_softirq(); /* XXX KAF: this is safe, and necessary!! */
844 udelay(500);
845 usecs += 500;
846 if(usecs > 1000000) {
847 printk("scsi_wait_req: still waiting...!\n");
848 usecs = 0;
849 }
850 }
851 #endif
854 if( SRpnt->sr_command != NULL )
855 {
856 scsi_release_command(SRpnt->sr_command);
857 SRpnt->sr_command = NULL;
858 }
860 }
862 /*
863 * Function: scsi_do_req
864 *
865 * Purpose: Queue a SCSI request
866 *
867 * Arguments: SRpnt - command descriptor.
868 * cmnd - actual SCSI command to be performed.
869 * buffer - data buffer.
870 * bufflen - size of data buffer.
871 * done - completion function to be run.
872 * timeout - how long to let it run before timeout.
873 * retries - number of retries we allow.
874 *
875 * Lock status: With the new queueing code, this is SMP-safe, and no locks
876 * need be held upon entry. The old queueing code the lock was
877 * assumed to be held upon entry.
878 *
879 * Returns: Nothing.
880 *
881 * Notes: Prior to the new queue code, this function was not SMP-safe.
882 * Also, this function is now only used for queueing requests
883 * for things like ioctls and character device requests - this
884 * is because we essentially just inject a request into the
885 * queue for the device. Normal block device handling manipulates
886 * the queue directly.
887 */
888 void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
889 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
890 int timeout, int retries)
891 {
892 Scsi_Device * SDpnt = SRpnt->sr_device;
893 struct Scsi_Host *host = SDpnt->host;
895 ASSERT_LOCK(&io_request_lock, 0);
897 SCSI_LOG_MLQUEUE(4,
898 {
899 int i;
900 int target = SDpnt->id;
901 int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
902 printk("scsi_do_req (host = %d, channel = %d target = %d, "
903 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
904 "retries = %d)\n"
905 "command : ", host->host_no, SDpnt->channel, target, buffer,
906 bufflen, done, timeout, retries);
907 for (i = 0; i < size; ++i)
908 printk("%02x ", ((unsigned char *) cmnd)[i]);
909 printk("\n");
910 });
912 if (!host) {
913 panic("Invalid or not present host.\n");
914 }
916 /*
917 * If the upper level driver is reusing these things, then
918 * we should release the low-level block now. Another one will
919 * be allocated later when this request is getting queued.
920 */
921 if( SRpnt->sr_command != NULL )
922 {
923 scsi_release_command(SRpnt->sr_command);
924 SRpnt->sr_command = NULL;
925 }
927 /*
928 * We must prevent reentrancy to the lowlevel host driver.
929 * This prevents it - we enter a loop until the host we want
930 * to talk to is not busy. Race conditions are prevented, as
931 * interrupts are disabled in between the time we check for
932 * the host being not busy, and the time we mark it busy
933 * ourselves. */
936 /*
937 * Our own function scsi_done (which marks the host as not
938 * busy, disables the timeout counter, etc) will be called by
939 * us or by the scsi_hosts[host].queuecommand() function needs
940 * to also call the completion function for the high level
941 * driver. */
943 memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
944 sizeof(SRpnt->sr_cmnd));
946 SRpnt->sr_bufflen = bufflen;
947 SRpnt->sr_buffer = buffer;
948 SRpnt->sr_allowed = retries;
949 SRpnt->sr_done = done;
950 SRpnt->sr_timeout_per_command = timeout;
952 if (SRpnt->sr_cmd_len == 0)
953 SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
955 /*
956 * At this point, we merely set up the command, stick it in the normal
957 * request queue, and return. Eventually that request will come to the
958 * top of the list, and will be dispatched.
959 */
960 scsi_insert_special_req(SRpnt, 0);
962 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_req()\n"));
963 }
965 /*
966 * Function: scsi_init_cmd_from_req
967 *
968 * Purpose: Queue a SCSI command
969 * Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
970 *
971 * Arguments: SCpnt - command descriptor.
972 * SRpnt - Request from the queue.
973 *
974 * Lock status: None needed.
975 *
976 * Returns: Nothing.
977 *
978 * Notes: Mainly transfer data from the request structure to the
979 * command structure. The request structure is allocated
980 * using the normal memory allocator, and requests can pile
981 * up to more or less any depth. The command structure represents
982 * a consumable resource, as these are allocated into a pool
983 * when the SCSI subsystem initializes. The preallocation is
984 * required so that in low-memory situations a disk I/O request
985 * won't cause the memory manager to try and write out a page.
986 * The request structure is generally used by ioctls and character
987 * devices.
988 */
989 void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
990 {
991 struct Scsi_Host *host = SCpnt->host;
993 ASSERT_LOCK(&io_request_lock, 0);
995 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
996 SRpnt->sr_command = SCpnt;
998 if (!host) {
999 panic("Invalid or not present host.\n");
1002 SCpnt->cmd_len = SRpnt->sr_cmd_len;
1003 SCpnt->use_sg = SRpnt->sr_use_sg;
1005 memcpy((void *) &SCpnt->request, (const void *) &SRpnt->sr_request,
1006 sizeof(SRpnt->sr_request));
1007 memcpy((void *) SCpnt->data_cmnd, (const void *) SRpnt->sr_cmnd,
1008 sizeof(SCpnt->data_cmnd));
1009 SCpnt->reset_chain = NULL;
1010 SCpnt->serial_number = 0;
1011 SCpnt->serial_number_at_timeout = 0;
1012 SCpnt->bufflen = SRpnt->sr_bufflen;
1013 SCpnt->buffer = SRpnt->sr_buffer;
1014 SCpnt->flags = 0;
1015 SCpnt->retries = 0;
1016 SCpnt->allowed = SRpnt->sr_allowed;
1017 SCpnt->done = SRpnt->sr_done;
1018 SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
1020 SCpnt->sc_data_direction = SRpnt->sr_data_direction;
1022 SCpnt->sglist_len = SRpnt->sr_sglist_len;
1023 SCpnt->underflow = SRpnt->sr_underflow;
1025 SCpnt->sc_request = SRpnt;
1027 memcpy((void *) SCpnt->cmnd, (const void *) SRpnt->sr_cmnd,
1028 sizeof(SCpnt->cmnd));
1029 /* Zero the sense buffer. Some host adapters automatically request
1030 * sense on error. 0 is not a valid sense code.
1031 */
1032 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1033 SCpnt->request_buffer = SRpnt->sr_buffer;
1034 SCpnt->request_bufflen = SRpnt->sr_bufflen;
1035 SCpnt->old_use_sg = SCpnt->use_sg;
1036 if (SCpnt->cmd_len == 0)
1037 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1038 SCpnt->old_cmd_len = SCpnt->cmd_len;
1039 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1040 SCpnt->old_underflow = SCpnt->underflow;
1042 /* Start the timer ticking. */
1044 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1045 SCpnt->abort_reason = 0;
1046 SCpnt->result = 0;
1048 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
1051 /*
1052 * Function: scsi_do_cmd
1054 * Purpose: Queue a SCSI command
1056 * Arguments: SCpnt - command descriptor.
1057 * cmnd - actual SCSI command to be performed.
1058 * buffer - data buffer.
1059 * bufflen - size of data buffer.
1060 * done - completion function to be run.
1061 * timeout - how long to let it run before timeout.
1062 * retries - number of retries we allow.
1064 * Lock status: With the new queueing code, this is SMP-safe, and no locks
1065 * need be held upon entry. The old queueing code the lock was
1066 * assumed to be held upon entry.
1068 * Returns: Nothing.
1070 * Notes: Prior to the new queue code, this function was not SMP-safe.
1071 * Also, this function is now only used for queueing requests
1072 * for things like ioctls and character device requests - this
1073 * is because we essentially just inject a request into the
1074 * queue for the device. Normal block device handling manipulates
1075 * the queue directly.
1076 */
1077 void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
1078 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
1079 int timeout, int retries)
1081 struct Scsi_Host *host = SCpnt->host;
1083 ASSERT_LOCK(&io_request_lock, 0);
1085 SCpnt->pid = scsi_pid++;
1086 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
1088 SCSI_LOG_MLQUEUE(4,
1090 int i;
1091 int target = SCpnt->target;
1092 int size = COMMAND_SIZE(((const unsigned char *)cmnd)[0]);
1093 printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
1094 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
1095 "retries = %d)\n"
1096 "command : ", host->host_no, SCpnt->channel, target, buffer,
1097 bufflen, done, timeout, retries);
1098 for (i = 0; i < size; ++i)
1099 printk("%02x ", ((unsigned char *) cmnd)[i]);
1100 printk("\n");
1101 });
1103 if (!host) {
1104 panic("Invalid or not present host.\n");
1106 /*
1107 * We must prevent reentrancy to the lowlevel host driver. This prevents
1108 * it - we enter a loop until the host we want to talk to is not busy.
1109 * Race conditions are prevented, as interrupts are disabled in between the
1110 * time we check for the host being not busy, and the time we mark it busy
1111 * ourselves.
1112 */
1115 /*
1116 * Our own function scsi_done (which marks the host as not busy, disables
1117 * the timeout counter, etc) will be called by us or by the
1118 * scsi_hosts[host].queuecommand() function needs to also call
1119 * the completion function for the high level driver.
1120 */
1122 memcpy((void *) SCpnt->data_cmnd, (const void *) cmnd,
1123 sizeof(SCpnt->data_cmnd));
1124 SCpnt->reset_chain = NULL;
1125 SCpnt->serial_number = 0;
1126 SCpnt->serial_number_at_timeout = 0;
1127 SCpnt->bufflen = bufflen;
1128 SCpnt->buffer = buffer;
1129 SCpnt->flags = 0;
1130 SCpnt->retries = 0;
1131 SCpnt->allowed = retries;
1132 SCpnt->done = done;
1133 SCpnt->timeout_per_command = timeout;
1135 memcpy((void *) SCpnt->cmnd, (const void *) cmnd,
1136 sizeof(SCpnt->cmnd));
1137 /* Zero the sense buffer. Some host adapters automatically request
1138 * sense on error. 0 is not a valid sense code.
1139 */
1140 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1141 SCpnt->request_buffer = buffer;
1142 SCpnt->request_bufflen = bufflen;
1143 SCpnt->old_use_sg = SCpnt->use_sg;
1144 if (SCpnt->cmd_len == 0)
1145 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1146 SCpnt->old_cmd_len = SCpnt->cmd_len;
1147 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1148 SCpnt->old_underflow = SCpnt->underflow;
1150 /* Start the timer ticking. */
1152 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1153 SCpnt->abort_reason = 0;
1154 SCpnt->result = 0;
1156 /*
1157 * At this point, we merely set up the command, stick it in the normal
1158 * request queue, and return. Eventually that request will come to the
1159 * top of the list, and will be dispatched.
1160 */
1161 scsi_insert_special_cmd(SCpnt, 0);
1163 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1166 /*
1167 * This function is the mid-level interrupt routine, which decides how
1168 * to handle error conditions. Each invocation of this function must
1169 * do one and *only* one of the following:
1171 * 1) Insert command in BH queue.
1172 * 2) Activate error handler for host.
1174 * FIXME(eric) - I am concerned about stack overflow (still). An
1175 * interrupt could come while we are processing the bottom queue,
1176 * which would cause another command to be stuffed onto the bottom
1177 * queue, and it would in turn be processed as that interrupt handler
1178 * is returning. Given a sufficiently steady rate of returning
1179 * commands, this could cause the stack to overflow. I am not sure
1180 * what is the most appropriate solution here - we should probably
1181 * keep a depth count, and not process any commands while we still
1182 * have a bottom handler active higher in the stack.
1184 * There is currently code in the bottom half handler to monitor
1185 * recursion in the bottom handler and report if it ever happens. If
1186 * this becomes a problem, it won't be hard to engineer something to
1187 * deal with it so that only the outer layer ever does any real
1188 * processing.
1189 */
1190 void scsi_done(Scsi_Cmnd * SCpnt)
1192 unsigned long flags;
1193 int tstatus;
1195 /*
1196 * We don't have to worry about this one timing out any more.
1197 */
1198 tstatus = scsi_delete_timer(SCpnt);
1200 /*
1201 * If we are unable to remove the timer, it means that the command
1202 * has already timed out. In this case, we have no choice but to
1203 * let the timeout function run, as we have no idea where in fact
1204 * that function could really be. It might be on another processor,
1205 * etc, etc.
1206 */
1207 if (!tstatus) {
1208 SCpnt->done_late = 1;
1209 return;
1211 /* Set the serial numbers back to zero */
1212 SCpnt->serial_number = 0;
1214 /*
1215 * First, see whether this command already timed out. If so, we ignore
1216 * the response. We treat it as if the command never finished.
1218 * Since serial_number is now 0, the error handler cound detect this
1219 * situation and avoid to call the low level driver abort routine.
1220 * (DB)
1222 * FIXME(eric) - I believe that this test is now redundant, due to
1223 * the test of the return status of del_timer().
1224 */
1225 if (SCpnt->state == SCSI_STATE_TIMEOUT) {
1226 SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
1227 return;
1229 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1231 SCpnt->serial_number_at_timeout = 0;
1232 SCpnt->state = SCSI_STATE_BHQUEUE;
1233 SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1234 SCpnt->bh_next = NULL;
1236 /*
1237 * Next, put this command in the BH queue.
1239 * We need a spinlock here, or compare and exchange if we can reorder incoming
1240 * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1241 * before bh is serviced. -jj
1243 * We already have the io_request_lock here, since we are called from the
1244 * interrupt handler or the error handler. (DB)
1246 * This may be true at the moment, but I would like to wean all of the low
1247 * level drivers away from using io_request_lock. Technically they should
1248 * all use their own locking. I am adding a small spinlock to protect
1249 * this datastructure to make it safe for that day. (ERY)
1250 */
1251 if (!scsi_bh_queue_head) {
1252 scsi_bh_queue_head = SCpnt;
1253 scsi_bh_queue_tail = SCpnt;
1254 } else {
1255 scsi_bh_queue_tail->bh_next = SCpnt;
1256 scsi_bh_queue_tail = SCpnt;
1259 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1260 /*
1261 * Mark the bottom half handler to be run.
1262 */
1263 mark_bh(SCSI_BH);
1266 /*
1267 * Procedure: scsi_bottom_half_handler
1269 * Purpose: Called after we have finished processing interrupts, it
1270 * performs post-interrupt handling for commands that may
1271 * have completed.
1273 * Notes: This is called with all interrupts enabled. This should reduce
1274 * interrupt latency, stack depth, and reentrancy of the low-level
1275 * drivers.
1277 * The io_request_lock is required in all the routine. There was a subtle
1278 * race condition when scsi_done is called after a command has already
1279 * timed out but before the time out is processed by the error handler.
1280 * (DB)
1282 * I believe I have corrected this. We simply monitor the return status of
1283 * del_timer() - if this comes back as 0, it means that the timer has fired
1284 * and that a timeout is in progress. I have modified scsi_done() such
1285 * that in this instance the command is never inserted in the bottom
1286 * half queue. Thus the only time we hold the lock here is when
1287 * we wish to atomically remove the contents of the queue.
1288 */
1289 void scsi_bottom_half_handler(void)
1291 Scsi_Cmnd *SCpnt;
1292 Scsi_Cmnd *SCnext;
1293 unsigned long flags;
1296 while (1 == 1) {
1297 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1298 SCpnt = scsi_bh_queue_head;
1299 scsi_bh_queue_head = NULL;
1300 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1302 if (SCpnt == NULL) {
1303 return;
1305 SCnext = SCpnt->bh_next;
1307 for (; SCpnt; SCpnt = SCnext) {
1308 SCnext = SCpnt->bh_next;
1310 switch (scsi_decide_disposition(SCpnt)) {
1311 case SUCCESS:
1312 /*
1313 * Add to BH queue.
1314 */
1315 SCSI_LOG_MLCOMPLETE(3,
1316 printk("Command finished %d %d 0x%x\n",
1317 SCpnt->host->host_busy,
1318 SCpnt->host->host_failed,
1319 SCpnt->result));
1321 scsi_finish_command(SCpnt);
1322 break;
1323 case NEEDS_RETRY:
1324 /*
1325 * We only come in here if we want to retry a command.
1326 * The test to see whether the command should be
1327 * retried should be keeping track of the number of
1328 * tries, so we don't end up looping, of course. */
1329 SCSI_LOG_MLCOMPLETE(3,
1330 printk("Command needs retry %d %d 0x%x\n",
1331 SCpnt->host->host_busy,
1332 SCpnt->host->host_failed,
1333 SCpnt->result));
1335 scsi_retry_command(SCpnt);
1336 break;
1337 case ADD_TO_MLQUEUE:
1338 /*
1339 * This typically happens for a QUEUE_FULL message -
1340 * typically only when the queue depth is only
1341 * approximate for a given device. Adding a command
1342 * to the queue for the device will prevent further commands
1343 * from being sent to the device, so we shouldn't end up
1344 * with tons of things being sent down that shouldn't be.
1345 */
1346 SCSI_LOG_MLCOMPLETE(3, printk(
1347 "Cmnd rejected as device queue full, put on ml queue %p\n",
1348 SCpnt));
1349 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1350 break;
1351 default:
1352 /*
1353 * Here we have a fatal error of some sort. Turn it over to
1354 * the error handler.
1355 */
1356 SCSI_LOG_MLCOMPLETE(3, printk(
1357 "Command failed %p %x active=%d busy=%d failed=%d\n",
1358 SCpnt, SCpnt->result,
1359 atomic_read(&SCpnt->host->host_active),
1360 SCpnt->host->host_busy,
1361 SCpnt->host->host_failed));
1363 /*
1364 * Dump the sense information too.
1365 */
1366 if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
1367 SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
1369 if (SCpnt->host->eh_wait != NULL) {
1370 SCpnt->host->host_failed++;
1371 SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1372 SCpnt->state = SCSI_STATE_FAILED;
1373 SCpnt->host->in_recovery = 1;
1374 /*
1375 * If the host is having troubles, then look to
1376 * see if this was the last command that might
1377 * have failed. If so, wake up the error handler. */
1378 if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
1379 #if 0
1380 SCSI_LOG_ERROR_RECOVERY(5, printk(
1381 "Waking error handler thread (%d)\n",
1382 atomic_read(&SCpnt->host->eh_wait->count)));
1383 up(SCpnt->host->eh_wait);
1384 #endif
1386 } else {
1387 /*
1388 * We only get here if the error recovery thread has died.
1389 */
1390 printk("scsi_bh: error finish\n");
1391 scsi_finish_command(SCpnt);
1394 } /* for(; SCpnt...) */
1396 } /* while(1==1) */
1400 /*
1401 * Function: scsi_retry_command
1403 * Purpose: Send a command back to the low level to be retried.
1405 * Notes: This command is always executed in the context of the
1406 * bottom half handler, or the error handler thread. Low
1407 * level drivers should not become re-entrant as a result of
1408 * this.
1409 */
1410 int scsi_retry_command(Scsi_Cmnd * SCpnt)
1412 memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd,
1413 sizeof(SCpnt->data_cmnd));
1414 SCpnt->request_buffer = SCpnt->buffer;
1415 SCpnt->request_bufflen = SCpnt->bufflen;
1416 SCpnt->use_sg = SCpnt->old_use_sg;
1417 SCpnt->cmd_len = SCpnt->old_cmd_len;
1418 SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
1419 SCpnt->underflow = SCpnt->old_underflow;
1421 /*
1422 * Zero the sense information from the last time we tried
1423 * this command.
1424 */
1425 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1427 return scsi_dispatch_cmd(SCpnt);
1430 /*
1431 * Function: scsi_finish_command
1433 * Purpose: Pass command off to upper layer for finishing of I/O
1434 * request, waking processes that are waiting on results,
1435 * etc.
1436 */
1437 void scsi_finish_command(Scsi_Cmnd * SCpnt)
1439 struct Scsi_Host *host;
1440 Scsi_Device *device;
1441 Scsi_Request * SRpnt;
1442 unsigned long flags;
1444 ASSERT_LOCK(&io_request_lock, 0);
1446 host = SCpnt->host;
1447 device = SCpnt->device;
1449 /*
1450 * We need to protect the decrement, as otherwise a race condition
1451 * would exist. Fiddling with SCpnt isn't a problem as the
1452 * design only allows a single SCpnt to be active in only
1453 * one execution context, but the device and host structures are
1454 * shared.
1455 */
1456 spin_lock_irqsave(&io_request_lock, flags);
1457 host->host_busy--; /* Indicate that we are free */
1458 device->device_busy--; /* Decrement device usage counter. */
1459 spin_unlock_irqrestore(&io_request_lock, flags);
1461 /*
1462 * Clear the flags which say that the device/host is no longer
1463 * capable of accepting new commands. These are set in scsi_queue.c
1464 * for both the queue full condition on a device, and for a
1465 * host full condition on the host.
1466 */
1467 host->host_blocked = FALSE;
1468 device->device_blocked = FALSE;
1470 /*
1471 * If we have valid sense information, then some kind of recovery
1472 * must have taken place. Make a note of this.
1473 */
1474 if (scsi_sense_valid(SCpnt)) {
1475 SCpnt->result |= (DRIVER_SENSE << 24);
1477 SCSI_LOG_MLCOMPLETE(3, printk(
1478 "Notifying upper driver of completion for device %d %x\n",
1479 SCpnt->device->id, SCpnt->result));
1481 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1482 SCpnt->state = SCSI_STATE_FINISHED;
1484 /* We can get here with use_sg=0, causing a panic in the
1485 upper level (DB) */
1486 SCpnt->use_sg = SCpnt->old_use_sg;
1488 /*
1489 * If there is an associated request structure, copy the data over
1490 * before we call the * completion function.
1491 */
1492 SRpnt = SCpnt->sc_request;
1494 if( SRpnt != NULL ) {
1495 if(!SRpnt->sr_command) {
1496 printk("scsi_finish_command: SRpnt=%p, SRpnt->sr_command=%p\n",
1497 SRpnt, SRpnt->sr_command);
1498 printk("SRpnt->freeaddr = %p\n", SRpnt->freeaddr);
1499 BUG();
1501 SRpnt->sr_result = SRpnt->sr_command->result;
1502 if( SRpnt->sr_result != 0 ) {
1503 memcpy(SRpnt->sr_sense_buffer,
1504 SRpnt->sr_command->sense_buffer,
1505 sizeof(SRpnt->sr_sense_buffer));
1509 SCpnt->done(SCpnt);
1512 static int scsi_register_host(Scsi_Host_Template *);
1513 static int scsi_unregister_host(Scsi_Host_Template *);
1515 /*
1516 * Function: scsi_release_commandblocks()
1518 * Purpose: Release command blocks associated with a device.
1520 * Arguments: SDpnt - device
1522 * Returns: Nothing
1524 * Lock status: No locking assumed or required.
1526 * Notes:
1527 */
1528 void scsi_release_commandblocks(Scsi_Device * SDpnt)
1530 Scsi_Cmnd *SCpnt, *SCnext;
1531 unsigned long flags;
1533 spin_lock_irqsave(&device_request_lock, flags);
1534 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) {
1535 SDpnt->device_queue = SCnext = SCpnt->next;
1536 kfree((char *) SCpnt);
1538 SDpnt->has_cmdblocks = 0;
1539 SDpnt->queue_depth = 0;
1540 spin_unlock_irqrestore(&device_request_lock, flags);
1543 /*
1544 * Function: scsi_build_commandblocks()
1546 * Purpose: Allocate command blocks associated with a device.
1548 * Arguments: SDpnt - device
1550 * Returns: Nothing
1552 * Lock status: No locking assumed or required.
1554 * Notes:
1555 */
1556 void scsi_build_commandblocks(Scsi_Device * SDpnt)
1558 unsigned long flags;
1559 struct Scsi_Host *host = SDpnt->host;
1560 int j;
1561 Scsi_Cmnd *SCpnt;
1563 spin_lock_irqsave(&device_request_lock, flags);
1565 if (SDpnt->queue_depth == 0)
1567 SDpnt->queue_depth = host->cmd_per_lun;
1568 if (SDpnt->queue_depth == 0)
1569 SDpnt->queue_depth = 1; /* live to fight another day */
1571 SDpnt->device_queue = NULL;
1573 for (j = 0; j < SDpnt->queue_depth; j++) {
1574 SCpnt = (Scsi_Cmnd *)
1575 kmalloc(sizeof(Scsi_Cmnd),
1576 GFP_ATOMIC |
1577 (host->unchecked_isa_dma ? GFP_DMA : 0));
1578 if (NULL == SCpnt)
1579 break; /* If not, the next line will oops ... */
1580 memset(SCpnt, 0, sizeof(Scsi_Cmnd));
1581 SCpnt->host = host;
1582 SCpnt->device = SDpnt;
1583 SCpnt->target = SDpnt->id;
1584 SCpnt->lun = SDpnt->lun;
1585 SCpnt->channel = SDpnt->channel;
1586 SCpnt->request.rq_status = RQ_INACTIVE;
1587 SCpnt->use_sg = 0;
1588 SCpnt->old_use_sg = 0;
1589 SCpnt->old_cmd_len = 0;
1590 SCpnt->underflow = 0;
1591 SCpnt->old_underflow = 0;
1592 SCpnt->transfersize = 0;
1593 SCpnt->resid = 0;
1594 SCpnt->serial_number = 0;
1595 SCpnt->serial_number_at_timeout = 0;
1596 SCpnt->host_scribble = NULL;
1597 SCpnt->next = SDpnt->device_queue;
1598 SDpnt->device_queue = SCpnt;
1599 SCpnt->state = SCSI_STATE_UNUSED;
1600 SCpnt->owner = SCSI_OWNER_NOBODY;
1602 if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */
1603 printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1604 SDpnt->queue_depth, j);
1605 SDpnt->queue_depth = j;
1606 SDpnt->has_cmdblocks = (0 != j);
1607 } else {
1608 SDpnt->has_cmdblocks = 1;
1610 spin_unlock_irqrestore(&device_request_lock, flags);
1613 void __init scsi_host_no_insert(char *str, int n)
1615 Scsi_Host_Name *shn, *shn2;
1616 int len;
1618 len = strlen(str);
1619 if (len && (shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC))) {
1620 if ((shn->name = kmalloc(len+1, GFP_ATOMIC))) {
1621 strncpy(shn->name, str, len);
1622 shn->name[len] = 0;
1623 shn->host_no = n;
1624 shn->host_registered = 0;
1625 shn->loaded_as_module = 1; /* numbers shouldn't be freed in any case */
1626 shn->next = NULL;
1627 if (scsi_host_no_list) {
1628 for (shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
1630 shn2->next = shn;
1632 else
1633 scsi_host_no_list = shn;
1634 max_scsi_hosts = n+1;
1636 else
1637 kfree((char *) shn);
1641 #ifdef CONFIG_PROC_FS
1642 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length)
1644 Scsi_Device *scd;
1645 struct Scsi_Host *HBA_ptr;
1646 int size, len = 0;
1647 off_t begin = 0;
1648 off_t pos = 0;
1650 /*
1651 * First, see if there are any attached devices or not.
1652 */
1653 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1654 if (HBA_ptr->host_queue != NULL) {
1655 break;
1658 size = sprintf(buffer + len, "Attached devices: %s\n", (HBA_ptr) ? "" : "none");
1659 len += size;
1660 pos = begin + len;
1661 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1662 #if 0
1663 size += sprintf(buffer + len, "scsi%2d: %s\n", (int) HBA_ptr->host_no,
1664 HBA_ptr->hostt->procname);
1665 len += size;
1666 pos = begin + len;
1667 #endif
1668 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1669 proc_print_scsidevice(scd, buffer, &size, len);
1670 len += size;
1671 pos = begin + len;
1673 if (pos < offset) {
1674 len = 0;
1675 begin = pos;
1677 if (pos > offset + length)
1678 goto stop_output;
1682 stop_output:
1683 *start = buffer + (offset - begin); /* Start of wanted data */
1684 len -= (offset - begin); /* Start slop */
1685 if (len > length)
1686 len = length; /* Ending slop */
1687 return (len);
1690 static int proc_scsi_gen_write(struct file * file, const char * buf,
1691 unsigned long length, void *data)
1693 struct Scsi_Device_Template *SDTpnt;
1694 Scsi_Device *scd;
1695 struct Scsi_Host *HBA_ptr;
1696 char *p;
1697 int host, channel, id, lun;
1698 char * buffer;
1699 int err;
1701 if (!buf || length>PAGE_SIZE)
1702 return -EINVAL;
1704 if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
1705 return -ENOMEM;
1706 if(copy_from_user(buffer, buf, length))
1708 err =-EFAULT;
1709 goto out;
1712 err = -EINVAL;
1714 if (length < PAGE_SIZE)
1715 buffer[length] = '\0';
1716 else if (buffer[PAGE_SIZE-1])
1717 goto out;
1719 if (length < 11 || strncmp("scsi", buffer, 4))
1720 goto out;
1722 /*
1723 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
1724 * to dump status of all scsi commands. The number is used to specify the level
1725 * of detail in the dump.
1726 */
1727 if (!strncmp("dump", buffer + 5, 4)) {
1728 unsigned int level;
1730 p = buffer + 10;
1732 if (*p == '\0')
1733 goto out;
1735 level = simple_strtoul(p, NULL, 0);
1736 scsi_dump_status(level);
1738 /*
1739 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
1740 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1741 * llcomplete,hlqueue,hlcomplete]
1742 */
1743 #ifdef CONFIG_SCSI_LOGGING /* { */
1745 if (!strncmp("log", buffer + 5, 3)) {
1746 char *token;
1747 unsigned int level;
1749 p = buffer + 9;
1750 token = p;
1751 while (*p != ' ' && *p != '\t' && *p != '\0') {
1752 p++;
1755 if (*p == '\0') {
1756 if (strncmp(token, "all", 3) == 0) {
1757 /*
1758 * Turn on absolutely everything.
1759 */
1760 scsi_logging_level = ~0;
1761 } else if (strncmp(token, "none", 4) == 0) {
1762 /*
1763 * Turn off absolutely everything.
1764 */
1765 scsi_logging_level = 0;
1766 } else {
1767 goto out;
1769 } else {
1770 *p++ = '\0';
1772 level = simple_strtoul(p, NULL, 0);
1774 /*
1775 * Now figure out what to do with it.
1776 */
1777 if (strcmp(token, "error") == 0) {
1778 SCSI_SET_ERROR_RECOVERY_LOGGING(level);
1779 } else if (strcmp(token, "timeout") == 0) {
1780 SCSI_SET_TIMEOUT_LOGGING(level);
1781 } else if (strcmp(token, "scan") == 0) {
1782 SCSI_SET_SCAN_BUS_LOGGING(level);
1783 } else if (strcmp(token, "mlqueue") == 0) {
1784 SCSI_SET_MLQUEUE_LOGGING(level);
1785 } else if (strcmp(token, "mlcomplete") == 0) {
1786 SCSI_SET_MLCOMPLETE_LOGGING(level);
1787 } else if (strcmp(token, "llqueue") == 0) {
1788 SCSI_SET_LLQUEUE_LOGGING(level);
1789 } else if (strcmp(token, "llcomplete") == 0) {
1790 SCSI_SET_LLCOMPLETE_LOGGING(level);
1791 } else if (strcmp(token, "hlqueue") == 0) {
1792 SCSI_SET_HLQUEUE_LOGGING(level);
1793 } else if (strcmp(token, "hlcomplete") == 0) {
1794 SCSI_SET_HLCOMPLETE_LOGGING(level);
1795 } else if (strcmp(token, "ioctl") == 0) {
1796 SCSI_SET_IOCTL_LOGGING(level);
1797 } else {
1798 goto out;
1802 printk(KERN_INFO "scsi logging level set to 0x%8.8x\n", scsi_logging_level);
1804 #endif /* CONFIG_SCSI_LOGGING */ /* } */
1806 /*
1807 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1808 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1809 * Consider this feature BETA.
1810 * CAUTION: This is not for hotplugging your peripherals. As
1811 * SCSI was not designed for this you could damage your
1812 * hardware !
1813 * However perhaps it is legal to switch on an
1814 * already connected device. It is perhaps not
1815 * guaranteed this device doesn't corrupt an ongoing data transfer.
1816 */
1817 if (!strncmp("add-single-device", buffer + 5, 17)) {
1818 p = buffer + 23;
1820 host = simple_strtoul(p, &p, 0);
1821 channel = simple_strtoul(p + 1, &p, 0);
1822 id = simple_strtoul(p + 1, &p, 0);
1823 lun = simple_strtoul(p + 1, &p, 0);
1825 printk(KERN_INFO "scsi singledevice %d %d %d %d\n", host, channel,
1826 id, lun);
1828 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1829 if (HBA_ptr->host_no == host) {
1830 break;
1833 err = -ENXIO;
1834 if (!HBA_ptr)
1835 goto out;
1837 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1838 if ((scd->channel == channel
1839 && scd->id == id
1840 && scd->lun == lun)) {
1841 break;
1845 err = -ENOSYS;
1846 if (scd)
1847 goto out; /* We do not yet support unplugging */
1849 scan_scsis(HBA_ptr, 1, channel, id, lun);
1851 /* FIXME (DB) This assumes that the queue_depth routines can be used
1852 in this context as well, while they were all designed to be
1853 called only once after the detect routine. (DB) */
1854 /* queue_depth routine moved to inside scan_scsis(,1,,,) so
1855 it is called before build_commandblocks() */
1857 err = length;
1858 goto out;
1860 /*
1861 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1862 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1864 * Consider this feature pre-BETA.
1866 * CAUTION: This is not for hotplugging your peripherals. As
1867 * SCSI was not designed for this you could damage your
1868 * hardware and thoroughly confuse the SCSI subsystem.
1870 */
1871 else if (!strncmp("remove-single-device", buffer + 5, 20)) {
1872 p = buffer + 26;
1874 host = simple_strtoul(p, &p, 0);
1875 channel = simple_strtoul(p + 1, &p, 0);
1876 id = simple_strtoul(p + 1, &p, 0);
1877 lun = simple_strtoul(p + 1, &p, 0);
1880 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1881 if (HBA_ptr->host_no == host) {
1882 break;
1885 err = -ENODEV;
1886 if (!HBA_ptr)
1887 goto out;
1889 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1890 if ((scd->channel == channel
1891 && scd->id == id
1892 && scd->lun == lun)) {
1893 break;
1897 if (scd == NULL)
1898 goto out; /* there is no such device attached */
1900 err = -EBUSY;
1901 if (scd->access_count)
1902 goto out;
1904 SDTpnt = scsi_devicelist;
1905 while (SDTpnt != NULL) {
1906 if (SDTpnt->detach)
1907 (*SDTpnt->detach) (scd);
1908 SDTpnt = SDTpnt->next;
1911 if (scd->attached == 0) {
1912 /*
1913 * Nobody is using this device any more.
1914 * Free all of the command structures.
1915 */
1916 if (HBA_ptr->hostt->revoke)
1917 HBA_ptr->hostt->revoke(scd);
1918 #ifdef DEVFS_MUST_DIE
1919 devfs_unregister (scd->de);
1920 #endif
1921 scsi_release_commandblocks(scd);
1923 /* Now we can remove the device structure */
1924 if (scd->next != NULL)
1925 scd->next->prev = scd->prev;
1927 if (scd->prev != NULL)
1928 scd->prev->next = scd->next;
1930 if (HBA_ptr->host_queue == scd) {
1931 HBA_ptr->host_queue = scd->next;
1933 blk_cleanup_queue(&scd->request_queue);
1934 kfree((char *) scd);
1935 } else {
1936 goto out;
1938 err = 0;
1940 out:
1942 free_page((unsigned long) buffer);
1943 return err;
1945 #endif
1947 /*
1948 * This entry point should be called by a driver if it is trying
1949 * to add a low level scsi driver to the system.
1950 */
1951 static int scsi_register_host(Scsi_Host_Template * tpnt)
1953 int pcount;
1954 struct Scsi_Host *shpnt;
1955 Scsi_Device *SDpnt;
1956 struct Scsi_Device_Template *sdtpnt;
1957 const char *name;
1958 unsigned long flags;
1959 int out_of_space = 0;
1961 if (tpnt->next || !tpnt->detect)
1962 return 1; /* Must be already loaded, or
1963 * no detect routine available
1964 */
1966 /* If max_sectors isn't set, default to max */
1967 if (!tpnt->max_sectors)
1968 tpnt->max_sectors = MAX_SECTORS;
1970 pcount = next_scsi_host;
1972 MOD_INC_USE_COUNT;
1974 /* The detect routine must carefully spinunlock/spinlock if
1975 it enables interrupts, since all interrupt handlers do
1976 spinlock as well.
1977 All lame drivers are going to fail due to the following
1978 spinlock. For the time beeing let's use it only for drivers
1979 using the new scsi code. NOTE: the detect routine could
1980 redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1982 if (tpnt->use_new_eh_code) {
1983 spin_lock_irqsave(&io_request_lock, flags);
1984 tpnt->present = tpnt->detect(tpnt);
1985 spin_unlock_irqrestore(&io_request_lock, flags);
1986 } else
1987 tpnt->present = tpnt->detect(tpnt);
1989 if (tpnt->present) {
1990 if (pcount == next_scsi_host) {
1991 if (tpnt->present > 1) {
1992 printk(KERN_ERR "scsi: Failure to register low-level "
1993 "scsi driver");
1994 scsi_unregister_host(tpnt);
1995 return 1;
1997 /*
1998 * The low-level driver failed to register a driver.
1999 * We can do this now.
2000 */
2001 if(scsi_register(tpnt, 0)==NULL)
2003 printk(KERN_ERR "scsi: register failed.\n");
2004 scsi_unregister_host(tpnt);
2005 return 1;
2008 tpnt->next = scsi_hosts; /* Add to the linked list */
2009 scsi_hosts = tpnt;
2011 /* Add the new driver to /proc/scsi */
2012 #ifdef CONFIG_PROC_FS
2013 build_proc_dir_entries(tpnt);
2014 #endif
2017 #if 0
2018 /*
2019 * Add the kernel threads for each host adapter that will
2020 * handle error correction.
2021 */
2022 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2023 if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
2024 DECLARE_MUTEX_LOCKED(sem);
2026 shpnt->eh_notify = &sem;
2027 kernel_thread((int (*)(void *)) scsi_error_handler,
2028 (void *) shpnt, 0);
2030 /*
2031 * Now wait for the kernel error thread to initialize itself
2032 * as it might be needed when we scan the bus.
2033 */
2034 down(&sem);
2035 shpnt->eh_notify = NULL;
2038 #endif
2040 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2041 if (shpnt->hostt == tpnt) {
2042 if (tpnt->info) {
2043 name = tpnt->info(shpnt);
2044 } else {
2045 name = tpnt->name;
2047 printk(KERN_INFO "scsi%d : %s\n", /* And print a little message */
2048 shpnt->host_no, name);
2052 /* The next step is to call scan_scsis here. This generates the
2053 * Scsi_Devices entries
2054 */
2055 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2056 if (shpnt->hostt == tpnt) {
2057 scan_scsis(shpnt, 0, 0, 0, 0);
2058 if (shpnt->select_queue_depths != NULL) {
2059 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
2064 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2065 if (sdtpnt->init && sdtpnt->dev_noticed)
2066 (*sdtpnt->init) ();
2069 /*
2070 * Next we create the Scsi_Cmnd structures for this host
2071 */
2072 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2073 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
2074 if (SDpnt->host->hostt == tpnt) {
2075 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2076 if (sdtpnt->attach)
2077 (*sdtpnt->attach) (SDpnt);
2078 if (SDpnt->attached) {
2079 scsi_build_commandblocks(SDpnt);
2080 if (0 == SDpnt->has_cmdblocks)
2081 out_of_space = 1;
2086 /*
2087 * Now that we have all of the devices, resize the DMA pool,
2088 * as required. */
2089 if (!out_of_space)
2090 scsi_resize_dma_pool();
2093 /* This does any final handling that is required. */
2094 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2095 if (sdtpnt->finish && sdtpnt->nr_dev) {
2096 (*sdtpnt->finish) ();
2100 #if defined(USE_STATIC_SCSI_MEMORY)
2101 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2102 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2103 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2104 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2105 #endif
2107 if (out_of_space) {
2108 scsi_unregister_host(tpnt); /* easiest way to clean up?? */
2109 return 1;
2110 } else
2111 return 0;
2115 /*
2116 * Similarly, this entry point should be called by a loadable module if it
2117 * is trying to remove a low level scsi driver from the system.
2118 */
2119 static int scsi_unregister_host(Scsi_Host_Template * tpnt)
2121 int online_status;
2122 int pcount0, pcount;
2123 Scsi_Cmnd *SCpnt;
2124 Scsi_Device *SDpnt;
2125 Scsi_Device *SDpnt1;
2126 struct Scsi_Device_Template *sdtpnt;
2127 struct Scsi_Host *sh1;
2128 struct Scsi_Host *shpnt;
2129 char name[10]; /* host_no>=10^9? I don't think so. */
2131 #if 0
2132 /* get the big kernel lock, so we don't race with open() */
2133 lock_kernel();
2134 #endif
2136 /*
2137 * First verify that this host adapter is completely free with no pending
2138 * commands
2139 */
2140 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2141 for (SDpnt = shpnt->host_queue; SDpnt;
2142 SDpnt = SDpnt->next) {
2143 if (SDpnt->host->hostt == tpnt
2144 && SDpnt->host->hostt->module
2145 && GET_USE_COUNT(SDpnt->host->hostt->module))
2146 goto err_out;
2147 /*
2148 * FIXME(eric) - We need to find a way to notify the
2149 * low level driver that we are shutting down - via the
2150 * special device entry that still needs to get added.
2152 * Is detach interface below good enough for this?
2153 */
2157 /*
2158 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2159 * to help prevent race conditions where other hosts/processors could try and
2160 * get in and queue a command.
2161 */
2162 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2163 for (SDpnt = shpnt->host_queue; SDpnt;
2164 SDpnt = SDpnt->next) {
2165 if (SDpnt->host->hostt == tpnt)
2166 SDpnt->online = FALSE;
2171 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2172 if (shpnt->hostt != tpnt) {
2173 continue;
2175 for (SDpnt = shpnt->host_queue; SDpnt;
2176 SDpnt = SDpnt->next) {
2177 /*
2178 * Loop over all of the commands associated with the device. If any of
2179 * them are busy, then set the state back to inactive and bail.
2180 */
2181 for (SCpnt = SDpnt->device_queue; SCpnt;
2182 SCpnt = SCpnt->next) {
2183 online_status = SDpnt->online;
2184 SDpnt->online = FALSE;
2185 if (SCpnt->request.rq_status != RQ_INACTIVE) {
2186 printk(KERN_ERR "SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2187 SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2188 SCpnt->state, SCpnt->owner);
2189 for (SDpnt1 = shpnt->host_queue; SDpnt1;
2190 SDpnt1 = SDpnt1->next) {
2191 for (SCpnt = SDpnt1->device_queue; SCpnt;
2192 SCpnt = SCpnt->next)
2193 if (SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2194 SCpnt->request.rq_status = RQ_INACTIVE;
2196 SDpnt->online = online_status;
2197 printk(KERN_ERR "Device busy???\n");
2198 goto err_out;
2200 /*
2201 * No, this device is really free. Mark it as such, and
2202 * continue on.
2203 */
2204 SCpnt->state = SCSI_STATE_DISCONNECTING;
2205 SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
2209 /* Next we detach the high level drivers from the Scsi_Device structures */
2211 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2212 if (shpnt->hostt != tpnt) {
2213 continue;
2215 for (SDpnt = shpnt->host_queue; SDpnt;
2216 SDpnt = SDpnt->next) {
2217 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2218 if (sdtpnt->detach)
2219 (*sdtpnt->detach) (SDpnt);
2221 /* If something still attached, punt */
2222 if (SDpnt->attached) {
2223 printk(KERN_ERR "Attached usage count = %d\n", SDpnt->attached);
2224 goto err_out;
2226 #ifdef DEVFS_MUST_DIE
2227 devfs_unregister (SDpnt->de);
2228 #endif
2232 #if 0
2233 /*
2234 * Next, kill the kernel error recovery thread for this host.
2235 */
2236 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2237 if (shpnt->hostt == tpnt
2238 && shpnt->hostt->use_new_eh_code
2239 && shpnt->ehandler != NULL) {
2240 DECLARE_MUTEX_LOCKED(sem);
2242 shpnt->eh_notify = &sem;
2243 send_sig(SIGHUP, shpnt->ehandler, 1);
2244 down(&sem);
2245 shpnt->eh_notify = NULL;
2248 #endif
2250 /* Next we free up the Scsi_Cmnd structures for this host */
2252 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2253 if (shpnt->hostt != tpnt) {
2254 continue;
2256 for (SDpnt = shpnt->host_queue; SDpnt;
2257 SDpnt = shpnt->host_queue) {
2258 scsi_release_commandblocks(SDpnt);
2260 blk_cleanup_queue(&SDpnt->request_queue);
2261 /* Next free up the Scsi_Device structures for this host */
2262 shpnt->host_queue = SDpnt->next;
2263 kfree((char *) SDpnt);
2268 /* Next we go through and remove the instances of the individual hosts
2269 * that were detected */
2271 pcount0 = next_scsi_host;
2272 for (shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2273 sh1 = shpnt->next;
2274 if (shpnt->hostt != tpnt)
2275 continue;
2276 pcount = next_scsi_host;
2277 /* Remove the /proc/scsi directory entry */
2278 sprintf(name,"%d",shpnt->host_no);
2279 #ifdef CONFIG_PROC_FS
2280 remove_proc_entry(name, tpnt->proc_dir);
2281 #endif
2282 if (tpnt->release)
2283 (*tpnt->release) (shpnt);
2284 else {
2285 /* This is the default case for the release function.
2286 * It should do the right thing for most correctly
2287 * written host adapters.
2288 */
2289 if (shpnt->irq)
2290 free_irq(shpnt->irq, NULL);
2292 #if 0
2293 if (shpnt->dma_channel != 0xff)
2294 free_dma(shpnt->dma_channel);
2295 #endif
2296 if (shpnt->io_port && shpnt->n_io_port)
2297 release_region(shpnt->io_port, shpnt->n_io_port);
2299 if (pcount == next_scsi_host)
2300 scsi_unregister(shpnt);
2301 tpnt->present--;
2304 /*
2305 * If there are absolutely no more hosts left, it is safe
2306 * to completely nuke the DMA pool. The resize operation will
2307 * do the right thing and free everything.
2308 */
2309 if (!scsi_hosts)
2310 scsi_resize_dma_pool();
2312 if (pcount0 != next_scsi_host)
2313 printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
2314 (next_scsi_host == 1) ? "" : "s");
2316 #if defined(USE_STATIC_SCSI_MEMORY)
2317 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2318 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2319 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2320 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2321 #endif
2323 /*
2324 * Remove it from the linked list and /proc if all
2325 * hosts were successfully removed (ie preset == 0)
2326 */
2327 if (!tpnt->present) {
2328 Scsi_Host_Template **SHTp = &scsi_hosts;
2329 Scsi_Host_Template *SHT;
2331 while ((SHT = *SHTp) != NULL) {
2332 if (SHT == tpnt) {
2333 *SHTp = SHT->next;
2334 #ifdef CONFIG_PROC_FS
2335 remove_proc_entry(tpnt->proc_name, proc_scsi);
2336 #endif
2337 break;
2339 SHTp = &SHT->next;
2342 MOD_DEC_USE_COUNT;
2344 #if 0
2345 unlock_kernel();
2346 #endif
2347 return 0;
2349 err_out:
2351 #if 0
2352 unlock_kernel();
2353 #endif
2354 return -1;
2357 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt);
2359 /*
2360 * This entry point should be called by a loadable module if it is trying
2361 * add a high level scsi driver to the system.
2362 */
2363 static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
2365 Scsi_Device *SDpnt;
2366 struct Scsi_Host *shpnt;
2367 int out_of_space = 0;
2369 if (tpnt->next)
2370 return 1;
2372 scsi_register_device(tpnt);
2373 /*
2374 * First scan the devices that we know about, and see if we notice them.
2375 */
2377 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2378 for (SDpnt = shpnt->host_queue; SDpnt;
2379 SDpnt = SDpnt->next) {
2380 if (tpnt->detect)
2381 SDpnt->detected = (*tpnt->detect) (SDpnt);
2385 /*
2386 * If any of the devices would match this driver, then perform the
2387 * init function.
2388 */
2389 if (tpnt->init && tpnt->dev_noticed) {
2390 if ((*tpnt->init) ()) {
2391 for (shpnt = scsi_hostlist; shpnt;
2392 shpnt = shpnt->next) {
2393 for (SDpnt = shpnt->host_queue; SDpnt;
2394 SDpnt = SDpnt->next) {
2395 SDpnt->detected = 0;
2398 scsi_deregister_device(tpnt);
2399 return 1;
2403 /*
2404 * Now actually connect the devices to the new driver.
2405 */
2406 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2407 for (SDpnt = shpnt->host_queue; SDpnt;
2408 SDpnt = SDpnt->next) {
2409 SDpnt->attached += SDpnt->detected;
2410 SDpnt->detected = 0;
2411 if (tpnt->attach)
2412 (*tpnt->attach) (SDpnt);
2413 /*
2414 * If this driver attached to the device, and don't have any
2415 * command blocks for this device, allocate some.
2416 */
2417 if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
2418 SDpnt->online = TRUE;
2419 scsi_build_commandblocks(SDpnt);
2420 if (0 == SDpnt->has_cmdblocks)
2421 out_of_space = 1;
2426 /*
2427 * This does any final handling that is required.
2428 */
2429 if (tpnt->finish && tpnt->nr_dev)
2430 (*tpnt->finish) ();
2431 if (!out_of_space)
2432 scsi_resize_dma_pool();
2433 MOD_INC_USE_COUNT;
2435 if (out_of_space) {
2436 scsi_unregister_device(tpnt); /* easiest way to clean up?? */
2437 return 1;
2438 } else
2439 return 0;
2442 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
2444 Scsi_Device *SDpnt;
2445 struct Scsi_Host *shpnt;
2447 #if 0
2448 lock_kernel();
2449 #endif
2450 /*
2451 * If we are busy, this is not going to fly.
2452 */
2453 if (GET_USE_COUNT(tpnt->module) != 0)
2454 goto error_out;
2456 /*
2457 * Next, detach the devices from the driver.
2458 */
2460 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2461 for (SDpnt = shpnt->host_queue; SDpnt;
2462 SDpnt = SDpnt->next) {
2463 if (tpnt->detach)
2464 (*tpnt->detach) (SDpnt);
2465 if (SDpnt->attached == 0) {
2466 SDpnt->online = FALSE;
2468 /*
2469 * Nobody is using this device any more. Free all of the
2470 * command structures.
2471 */
2472 scsi_release_commandblocks(SDpnt);
2476 /*
2477 * Extract the template from the linked list.
2478 */
2479 scsi_deregister_device(tpnt);
2481 MOD_DEC_USE_COUNT;
2482 #if 0
2483 unlock_kernel();
2484 #endif
2486 /*
2487 * Final cleanup for the driver is done in the driver sources in the
2488 * cleanup function.
2489 */
2490 return 0;
2491 error_out:
2492 #if 0
2493 unlock_kernel();
2494 #endif
2495 return -1;
2499 /* This function should be called by drivers which needs to register
2500 * with the midlevel scsi system. As of 2.4.0-test9pre3 this is our
2501 * main device/hosts register function /mathiasen
2502 */
2503 int scsi_register_module(int module_type, void *ptr)
2505 switch (module_type) {
2506 case MODULE_SCSI_HA:
2507 return scsi_register_host((Scsi_Host_Template *) ptr);
2509 /* Load upper level device handler of some kind */
2510 case MODULE_SCSI_DEV:
2511 #ifdef CONFIG_KMOD
2512 if (scsi_hosts == NULL)
2513 request_module("scsi_hostadapter");
2514 #endif
2515 return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
2516 /* The rest of these are not yet implemented */
2518 /* Load constants.o */
2519 case MODULE_SCSI_CONST:
2521 /* Load specialized ioctl handler for some device. Intended for
2522 * cdroms that have non-SCSI2 audio command sets. */
2523 case MODULE_SCSI_IOCTL:
2525 default:
2526 return 1;
2530 /* Reverse the actions taken above
2531 */
2532 int scsi_unregister_module(int module_type, void *ptr)
2534 int retval = 0;
2536 switch (module_type) {
2537 case MODULE_SCSI_HA:
2538 retval = scsi_unregister_host((Scsi_Host_Template *) ptr);
2539 break;
2540 case MODULE_SCSI_DEV:
2541 retval = scsi_unregister_device((struct Scsi_Device_Template *)ptr);
2542 break;
2543 /* The rest of these are not yet implemented. */
2544 case MODULE_SCSI_CONST:
2545 case MODULE_SCSI_IOCTL:
2546 break;
2547 default:;
2549 return retval;
2552 #ifdef CONFIG_PROC_FS
2553 /*
2554 * Function: scsi_dump_status
2556 * Purpose: Brain dump of scsi system, used for problem solving.
2558 * Arguments: level - used to indicate level of detail.
2560 * Notes: The level isn't used at all yet, but we need to find some way
2561 * of sensibly logging varying degrees of information. A quick one-line
2562 * display of each command, plus the status would be most useful.
2564 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2565 * it all off if the user wants a lean and mean kernel. It would probably
2566 * also be useful to allow the user to specify one single host to be dumped.
2567 * A second argument to the function would be useful for that purpose.
2569 * FIXME - some formatting of the output into tables would be very handy.
2570 */
2571 static void scsi_dump_status(int level)
2573 #ifdef CONFIG_SCSI_LOGGING /* { */
2574 int i;
2575 struct Scsi_Host *shpnt;
2576 Scsi_Cmnd *SCpnt;
2577 Scsi_Device *SDpnt;
2578 printk(KERN_INFO "Dump of scsi host parameters:\n");
2579 i = 0;
2580 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2581 printk(KERN_INFO " %d %d %d : %d %d\n",
2582 shpnt->host_failed,
2583 shpnt->host_busy,
2584 atomic_read(&shpnt->host_active),
2585 shpnt->host_blocked,
2586 shpnt->host_self_blocked);
2589 printk(KERN_INFO "\n\n");
2590 printk(KERN_INFO "Dump of scsi command parameters:\n");
2591 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2592 printk(KERN_INFO "h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2593 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2594 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
2595 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
2596 printk(KERN_INFO "(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2597 i++,
2599 SCpnt->host->host_no,
2600 SCpnt->channel,
2601 SCpnt->target,
2602 SCpnt->lun,
2604 kdevname(SCpnt->request.rq_dev),
2605 SCpnt->request.sector,
2606 SCpnt->request.nr_sectors,
2607 SCpnt->request.current_nr_sectors,
2608 SCpnt->request.rq_status,
2609 SCpnt->use_sg,
2611 SCpnt->retries,
2612 SCpnt->allowed,
2613 SCpnt->flags,
2615 SCpnt->timeout_per_command,
2616 SCpnt->timeout,
2617 SCpnt->internal_timeout,
2619 SCpnt->cmnd[0],
2620 SCpnt->sense_buffer[2],
2621 SCpnt->result);
2626 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2627 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2628 /* Now dump the request lists for each block device */
2629 printk(KERN_INFO "Dump of pending block device requests\n");
2630 for (i = 0; i < MAX_BLKDEV; i++) {
2631 struct list_head * queue_head;
2633 queue_head = &blk_dev[i].request_queue.queue_head;
2634 if (!list_empty(queue_head)) {
2635 struct request *req;
2636 struct list_head * entry;
2638 printk(KERN_INFO "%d: ", i);
2639 entry = queue_head->next;
2640 do {
2641 req = blkdev_entry_to_request(entry);
2642 printk("(%s %d %ld %ld %ld) ",
2643 kdevname(req->rq_dev),
2644 req->cmd,
2645 req->sector,
2646 req->nr_sectors,
2647 req->current_nr_sectors);
2648 } while ((entry = entry->next) != queue_head);
2649 printk("\n");
2654 #endif /* CONFIG_SCSI_LOGGING */ /* } */
2656 #endif /* CONFIG_PROC_FS */
2658 static int __init scsi_host_no_init (char *str)
2660 static int next_no = 0;
2661 char *temp;
2663 while (str) {
2664 temp = str;
2665 while (*temp && (*temp != ':') && (*temp != ','))
2666 temp++;
2667 if (!*temp)
2668 temp = NULL;
2669 else
2670 *temp++ = 0;
2671 scsi_host_no_insert(str, next_no);
2672 str = temp;
2673 next_no++;
2675 return 1;
2678 static char *scsihosts;
2680 MODULE_PARM(scsihosts, "s");
2681 MODULE_DESCRIPTION("SCSI core");
2682 MODULE_LICENSE("GPL");
2684 #ifndef MODULE
2685 int __init scsi_setup(char *str)
2687 scsihosts = str;
2688 return 1;
2691 __setup("scsihosts=", scsi_setup);
2692 #endif
2694 static int __init init_scsi(void)
2696 #ifdef CONFIG_PROC_FS
2697 struct proc_dir_entry *generic;
2698 #endif
2700 printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
2702 if( scsi_init_minimal_dma_pool() != 0 )
2704 return 1;
2707 #ifdef CONFIG_PROC_FS
2708 /*
2709 * This makes /proc/scsi and /proc/scsi/scsi visible.
2710 */
2711 proc_scsi = proc_mkdir("scsi", 0);
2712 if (!proc_scsi) {
2713 printk (KERN_ERR "cannot init /proc/scsi\n");
2714 return -ENOMEM;
2716 generic = create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info);
2717 if (!generic) {
2718 printk (KERN_ERR "cannot init /proc/scsi/scsi\n");
2719 remove_proc_entry("scsi", 0);
2720 return -ENOMEM;
2722 generic->write_proc = proc_scsi_gen_write;
2723 #endif
2725 #ifdef DEVFS_MUST_DIE
2726 scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
2727 #endif
2728 if (scsihosts)
2729 printk(KERN_INFO "scsi: host order: %s\n", scsihosts);
2730 scsi_host_no_init (scsihosts);
2731 /*
2732 * This is where the processing takes place for most everything
2733 * when commands are completed.
2734 */
2735 init_bh(SCSI_BH, scsi_bottom_half_handler);
2737 return 0;
2742 static void __exit exit_scsi(void)
2744 Scsi_Host_Name *shn, *shn2 = NULL;
2746 remove_bh(SCSI_BH);
2748 #ifdef DEVFS_MUST_DIE
2749 devfs_unregister (scsi_devfs_handle);
2750 #endif
2751 for (shn = scsi_host_no_list;shn;shn = shn->next) {
2752 if (shn->name)
2753 kfree(shn->name);
2754 if (shn2)
2755 kfree (shn2);
2756 shn2 = shn;
2758 if (shn2)
2759 kfree (shn2);
2761 #ifdef CONFIG_PROC_FS
2762 /* No, we're not here anymore. Don't show the /proc/scsi files. */
2763 remove_proc_entry ("scsi/scsi", 0);
2764 remove_proc_entry ("scsi", 0);
2765 #endif
2767 /*
2768 * Free up the DMA pool.
2769 */
2770 scsi_resize_dma_pool();
2774 module_init(init_scsi);
2775 module_exit(exit_scsi);
2777 /*
2778 * Function: scsi_get_host_dev()
2780 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2782 * Arguments: SHpnt - Host that needs a Scsi_Device
2784 * Lock status: None assumed.
2786 * Returns: The Scsi_Device or NULL
2788 * Notes:
2789 */
2790 Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
2792 Scsi_Device * SDpnt;
2794 /*
2795 * Attach a single Scsi_Device to the Scsi_Host - this should
2796 * be made to look like a "pseudo-device" that points to the
2797 * HA itself. For the moment, we include it at the head of
2798 * the host_queue itself - I don't think we want to show this
2799 * to the HA in select_queue_depths(), as this would probably confuse
2800 * matters.
2801 * Note - this device is not accessible from any high-level
2802 * drivers (including generics), which is probably not
2803 * optimal. We can add hooks later to attach
2804 */
2805 SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device),
2806 GFP_ATOMIC);
2807 if(SDpnt == NULL)
2808 return NULL;
2810 memset(SDpnt, 0, sizeof(Scsi_Device));
2812 SDpnt->host = SHpnt;
2813 SDpnt->id = SHpnt->this_id;
2814 SDpnt->type = -1;
2815 SDpnt->queue_depth = 1;
2817 scsi_build_commandblocks(SDpnt);
2819 scsi_initialize_queue(SDpnt, SHpnt);
2821 SDpnt->online = TRUE;
2823 #if 0
2824 /*
2825 * Initialize the object that we will use to wait for command blocks.
2826 */
2827 init_waitqueue_head(&SDpnt->scpnt_wait);
2828 #endif
2829 return SDpnt;
2832 /*
2833 * Function: scsi_free_host_dev()
2835 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2837 * Arguments: SHpnt - Host that needs a Scsi_Device
2839 * Lock status: None assumed.
2841 * Returns: Nothing
2843 * Notes:
2844 */
2845 void scsi_free_host_dev(Scsi_Device * SDpnt)
2847 if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
2849 panic("Attempt to delete wrong device\n");
2852 blk_cleanup_queue(&SDpnt->request_queue);
2854 /*
2855 * We only have a single SCpnt attached to this device. Free
2856 * it now.
2857 */
2858 scsi_release_commandblocks(SDpnt);
2859 kfree(SDpnt);
2862 /*
2863 * Function: scsi_reset_provider_done_command
2865 * Purpose: Dummy done routine.
2867 * Notes: Some low level drivers will call scsi_done and end up here,
2868 * others won't bother.
2869 * We don't want the bogus command used for the bus/device
2870 * reset to find its way into the mid-layer so we intercept
2871 * it here.
2872 */
2873 static void
2874 scsi_reset_provider_done_command(Scsi_Cmnd *SCpnt)
2878 /*
2879 * Function: scsi_reset_provider
2881 * Purpose: Send requested reset to a bus or device at any phase.
2883 * Arguments: device - device to send reset to
2884 * flag - reset type (see scsi.h)
2886 * Returns: SUCCESS/FAILURE.
2888 * Notes: This is used by the SCSI Generic driver to provide
2889 * Bus/Device reset capability.
2890 */
2891 int
2892 scsi_reset_provider(Scsi_Device *dev, int flag)
2894 Scsi_Cmnd SC, *SCpnt = &SC;
2895 int rtn;
2897 memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout));
2898 SCpnt->host = dev->host;
2899 SCpnt->device = dev;
2900 SCpnt->target = dev->id;
2901 SCpnt->lun = dev->lun;
2902 SCpnt->channel = dev->channel;
2903 SCpnt->request.rq_status = RQ_SCSI_BUSY;
2904 SCpnt->request.waiting = NULL;
2905 SCpnt->use_sg = 0;
2906 SCpnt->old_use_sg = 0;
2907 SCpnt->old_cmd_len = 0;
2908 SCpnt->underflow = 0;
2909 SCpnt->transfersize = 0;
2910 SCpnt->resid = 0;
2911 SCpnt->serial_number = 0;
2912 SCpnt->serial_number_at_timeout = 0;
2913 SCpnt->host_scribble = NULL;
2914 SCpnt->next = NULL;
2915 SCpnt->state = SCSI_STATE_INITIALIZING;
2916 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
2918 memset(&SCpnt->cmnd, '\0', sizeof(SCpnt->cmnd));
2920 SCpnt->scsi_done = scsi_reset_provider_done_command;
2921 SCpnt->done = NULL;
2922 SCpnt->reset_chain = NULL;
2924 SCpnt->buffer = NULL;
2925 SCpnt->bufflen = 0;
2926 SCpnt->request_buffer = NULL;
2927 SCpnt->request_bufflen = 0;
2929 SCpnt->internal_timeout = NORMAL_TIMEOUT;
2930 SCpnt->abort_reason = DID_ABORT;
2932 SCpnt->cmd_len = 0;
2934 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
2935 SCpnt->sc_request = NULL;
2936 SCpnt->sc_magic = SCSI_CMND_MAGIC;
2938 /*
2939 * Sometimes the command can get back into the timer chain,
2940 * so use the pid as an identifier.
2941 */
2942 SCpnt->pid = 0;
2944 if (dev->host->hostt->use_new_eh_code) {
2945 rtn = scsi_new_reset(SCpnt, flag);
2946 } else {
2947 unsigned long flags;
2949 spin_lock_irqsave(&io_request_lock, flags);
2950 rtn = scsi_old_reset(SCpnt, flag);
2951 spin_unlock_irqrestore(&io_request_lock, flags);
2952 rtn= 0;
2955 scsi_delete_timer(SCpnt);
2956 return rtn;
2959 /*
2960 * Overrides for Emacs so that we follow Linus's tabbing style.
2961 * Emacs will notice this stuff at the end of the file and automatically
2962 * adjust the settings for this buffer only. This must remain at the end
2963 * of the file.
2964 * ---------------------------------------------------------------------------
2965 * Local variables:
2966 * c-indent-level: 4
2967 * c-brace-imaginary-offset: 0
2968 * c-brace-offset: -4
2969 * c-argdecl-indent: 4
2970 * c-label-offset: -4
2971 * c-continued-statement-offset: 4
2972 * c-continued-brace-offset: 0
2973 * indent-tabs-mode: nil
2974 * tab-width: 8
2975 * End:
2976 */