ia64/linux-2.6.18-xen.hg

view drivers/block/ub.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * The low performance USB storage driver (ub).
3 *
4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
6 *
7 * This work is a part of Linux kernel, is derived from it,
8 * and is not licensed separately. See file COPYING for details.
9 *
10 * TODO (sorted by decreasing priority)
11 * -- set readonly flag for CDs, set removable flag for CF readers
12 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
13 * -- verify the 13 conditions and do bulk resets
14 * -- highmem
15 * -- move top_sense and work_bcs into separate allocations (if they survive)
16 * for cache purists and esoteric architectures.
17 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
18 * -- prune comments, they are too volumnous
19 * -- Resove XXX's
20 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
21 */
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/usb.h>
25 #include <linux/usb_usual.h>
26 #include <linux/blkdev.h>
27 #include <linux/timer.h>
28 #include <scsi/scsi.h>
30 #define DRV_NAME "ub"
32 #define UB_MAJOR 180
34 /*
35 * The command state machine is the key model for understanding of this driver.
36 *
37 * The general rule is that all transitions are done towards the bottom
38 * of the diagram, thus preventing any loops.
39 *
40 * An exception to that is how the STAT state is handled. A counter allows it
41 * to be re-entered along the path marked with [C].
42 *
43 * +--------+
44 * ! INIT !
45 * +--------+
46 * !
47 * ub_scsi_cmd_start fails ->--------------------------------------\
48 * ! !
49 * V !
50 * +--------+ !
51 * ! CMD ! !
52 * +--------+ !
53 * ! +--------+ !
54 * was -EPIPE -->-------------------------------->! CLEAR ! !
55 * ! +--------+ !
56 * ! ! !
57 * was error -->------------------------------------- ! --------->\
58 * ! ! !
59 * /--<-- cmd->dir == NONE ? ! !
60 * ! ! ! !
61 * ! V ! !
62 * ! +--------+ ! !
63 * ! ! DATA ! ! !
64 * ! +--------+ ! !
65 * ! ! +---------+ ! !
66 * ! was -EPIPE -->--------------->! CLR2STS ! ! !
67 * ! ! +---------+ ! !
68 * ! ! ! ! !
69 * ! ! was error -->---- ! --------->\
70 * ! was error -->--------------------- ! ------------- ! --------->\
71 * ! ! ! ! !
72 * ! V ! ! !
73 * \--->+--------+ ! ! !
74 * ! STAT !<--------------------------/ ! !
75 * /--->+--------+ ! !
76 * ! ! ! !
77 * [C] was -EPIPE -->-----------\ ! !
78 * ! ! ! ! !
79 * +<---- len == 0 ! ! !
80 * ! ! ! ! !
81 * ! was error -->--------------------------------------!---------->\
82 * ! ! ! ! !
83 * +<---- bad CSW ! ! !
84 * +<---- bad tag ! ! !
85 * ! ! V ! !
86 * ! ! +--------+ ! !
87 * ! ! ! CLRRS ! ! !
88 * ! ! +--------+ ! !
89 * ! ! ! ! !
90 * \------- ! --------------------[C]--------\ ! !
91 * ! ! ! !
92 * cmd->error---\ +--------+ ! !
93 * ! +--------------->! SENSE !<----------/ !
94 * STAT_FAIL----/ +--------+ !
95 * ! ! V
96 * ! V +--------+
97 * \--------------------------------\--------------------->! DONE !
98 * +--------+
99 */
101 /*
102 * This many LUNs per USB device.
103 * Every one of them takes a host, see UB_MAX_HOSTS.
104 */
105 #define UB_MAX_LUNS 9
107 /*
108 */
110 #define UB_PARTS_PER_LUN 8
112 #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
114 #define UB_SENSE_SIZE 18
116 /*
117 */
119 /* command block wrapper */
120 struct bulk_cb_wrap {
121 __le32 Signature; /* contains 'USBC' */
122 u32 Tag; /* unique per command id */
123 __le32 DataTransferLength; /* size of data */
124 u8 Flags; /* direction in bit 0 */
125 u8 Lun; /* LUN */
126 u8 Length; /* of of the CDB */
127 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */
128 };
130 #define US_BULK_CB_WRAP_LEN 31
131 #define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
132 #define US_BULK_FLAG_IN 1
133 #define US_BULK_FLAG_OUT 0
135 /* command status wrapper */
136 struct bulk_cs_wrap {
137 __le32 Signature; /* should = 'USBS' */
138 u32 Tag; /* same as original command */
139 __le32 Residue; /* amount not transferred */
140 u8 Status; /* see below */
141 };
143 #define US_BULK_CS_WRAP_LEN 13
144 #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
145 #define US_BULK_STAT_OK 0
146 #define US_BULK_STAT_FAIL 1
147 #define US_BULK_STAT_PHASE 2
149 /* bulk-only class specific requests */
150 #define US_BULK_RESET_REQUEST 0xff
151 #define US_BULK_GET_MAX_LUN 0xfe
153 /*
154 */
155 struct ub_dev;
157 #define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
158 #define UB_MAX_SECTORS 64
160 /*
161 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
162 * even if a webcam hogs the bus, but some devices need time to spin up.
163 */
164 #define UB_URB_TIMEOUT (HZ*2)
165 #define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */
166 #define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */
167 #define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */
169 /*
170 * An instance of a SCSI command in transit.
171 */
172 #define UB_DIR_NONE 0
173 #define UB_DIR_READ 1
174 #define UB_DIR_ILLEGAL2 2
175 #define UB_DIR_WRITE 3
177 #define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
178 (((c)==UB_DIR_READ)? 'r': 'n'))
180 enum ub_scsi_cmd_state {
181 UB_CMDST_INIT, /* Initial state */
182 UB_CMDST_CMD, /* Command submitted */
183 UB_CMDST_DATA, /* Data phase */
184 UB_CMDST_CLR2STS, /* Clearing before requesting status */
185 UB_CMDST_STAT, /* Status phase */
186 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */
187 UB_CMDST_CLRRS, /* Clearing before retrying status */
188 UB_CMDST_SENSE, /* Sending Request Sense */
189 UB_CMDST_DONE /* Final state */
190 };
192 struct ub_scsi_cmd {
193 unsigned char cdb[UB_MAX_CDB_SIZE];
194 unsigned char cdb_len;
196 unsigned char dir; /* 0 - none, 1 - read, 3 - write. */
197 enum ub_scsi_cmd_state state;
198 unsigned int tag;
199 struct ub_scsi_cmd *next;
201 int error; /* Return code - valid upon done */
202 unsigned int act_len; /* Return size */
203 unsigned char key, asc, ascq; /* May be valid if error==-EIO */
205 int stat_count; /* Retries getting status. */
207 unsigned int len; /* Requested length */
208 unsigned int current_sg;
209 unsigned int nsg; /* sgv[nsg] */
210 struct scatterlist sgv[UB_MAX_REQ_SG];
212 struct ub_lun *lun;
213 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
214 void *back;
215 };
217 struct ub_request {
218 struct request *rq;
219 unsigned int current_try;
220 unsigned int nsg; /* sgv[nsg] */
221 struct scatterlist sgv[UB_MAX_REQ_SG];
222 };
224 /*
225 */
226 struct ub_capacity {
227 unsigned long nsec; /* Linux size - 512 byte sectors */
228 unsigned int bsize; /* Linux hardsect_size */
229 unsigned int bshift; /* Shift between 512 and hard sects */
230 };
232 /*
233 * This is a direct take-off from linux/include/completion.h
234 * The difference is that I do not wait on this thing, just poll.
235 * When I want to wait (ub_probe), I just use the stock completion.
236 *
237 * Note that INIT_COMPLETION takes no lock. It is correct. But why
238 * in the bloody hell that thing takes struct instead of pointer to struct
239 * is quite beyond me. I just copied it from the stock completion.
240 */
241 struct ub_completion {
242 unsigned int done;
243 spinlock_t lock;
244 };
246 static inline void ub_init_completion(struct ub_completion *x)
247 {
248 x->done = 0;
249 spin_lock_init(&x->lock);
250 }
252 #define UB_INIT_COMPLETION(x) ((x).done = 0)
254 static void ub_complete(struct ub_completion *x)
255 {
256 unsigned long flags;
258 spin_lock_irqsave(&x->lock, flags);
259 x->done++;
260 spin_unlock_irqrestore(&x->lock, flags);
261 }
263 static int ub_is_completed(struct ub_completion *x)
264 {
265 unsigned long flags;
266 int ret;
268 spin_lock_irqsave(&x->lock, flags);
269 ret = x->done;
270 spin_unlock_irqrestore(&x->lock, flags);
271 return ret;
272 }
274 /*
275 */
276 struct ub_scsi_cmd_queue {
277 int qlen, qmax;
278 struct ub_scsi_cmd *head, *tail;
279 };
281 /*
282 * The block device instance (one per LUN).
283 */
284 struct ub_lun {
285 struct ub_dev *udev;
286 struct list_head link;
287 struct gendisk *disk;
288 int id; /* Host index */
289 int num; /* LUN number */
290 char name[16];
292 int changed; /* Media was changed */
293 int removable;
294 int readonly;
296 struct ub_request urq;
298 /* Use Ingo's mempool if or when we have more than one command. */
299 /*
300 * Currently we never need more than one command for the whole device.
301 * However, giving every LUN a command is a cheap and automatic way
302 * to enforce fairness between them.
303 */
304 int cmda[1];
305 struct ub_scsi_cmd cmdv[1];
307 struct ub_capacity capacity;
308 };
310 /*
311 * The USB device instance.
312 */
313 struct ub_dev {
314 spinlock_t *lock;
315 atomic_t poison; /* The USB device is disconnected */
316 int openc; /* protected by ub_lock! */
317 /* kref is too implicit for our taste */
318 int reset; /* Reset is running */
319 unsigned int tagcnt;
320 char name[12];
321 struct usb_device *dev;
322 struct usb_interface *intf;
324 struct list_head luns;
326 unsigned int send_bulk_pipe; /* cached pipe values */
327 unsigned int recv_bulk_pipe;
328 unsigned int send_ctrl_pipe;
329 unsigned int recv_ctrl_pipe;
331 struct tasklet_struct tasklet;
333 struct ub_scsi_cmd_queue cmd_queue;
334 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
335 unsigned char top_sense[UB_SENSE_SIZE];
337 struct ub_completion work_done;
338 struct urb work_urb;
339 struct timer_list work_timer;
340 int last_pipe; /* What might need clearing */
341 __le32 signature; /* Learned signature */
342 struct bulk_cb_wrap work_bcb;
343 struct bulk_cs_wrap work_bcs;
344 struct usb_ctrlrequest work_cr;
346 struct work_struct reset_work;
347 wait_queue_head_t reset_wait;
349 int sg_stat[6];
350 };
352 /*
353 */
354 static void ub_cleanup(struct ub_dev *sc);
355 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
356 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
357 struct ub_scsi_cmd *cmd, struct ub_request *urq);
358 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
359 struct ub_scsi_cmd *cmd, struct ub_request *urq);
360 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
361 static void ub_end_rq(struct request *rq, int uptodate);
362 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
363 struct ub_request *urq, struct ub_scsi_cmd *cmd);
364 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
365 static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
366 static void ub_scsi_action(unsigned long _dev);
367 static void ub_scsi_dispatch(struct ub_dev *sc);
368 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
369 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
370 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
371 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
372 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
373 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
374 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
375 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
376 int stalled_pipe);
377 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
378 static void ub_reset_enter(struct ub_dev *sc, int try);
379 static void ub_reset_task(void *arg);
380 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
381 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
382 struct ub_capacity *ret);
383 static int ub_sync_reset(struct ub_dev *sc);
384 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
385 static int ub_probe_lun(struct ub_dev *sc, int lnum);
387 /*
388 */
389 #ifdef CONFIG_USB_LIBUSUAL
391 #define ub_usb_ids storage_usb_ids
392 #else
394 static struct usb_device_id ub_usb_ids[] = {
395 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
396 { }
397 };
399 MODULE_DEVICE_TABLE(usb, ub_usb_ids);
400 #endif /* CONFIG_USB_LIBUSUAL */
402 /*
403 * Find me a way to identify "next free minor" for add_disk(),
404 * and the array disappears the next day. However, the number of
405 * hosts has something to do with the naming and /proc/partitions.
406 * This has to be thought out in detail before changing.
407 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
408 */
409 #define UB_MAX_HOSTS 26
410 static char ub_hostv[UB_MAX_HOSTS];
412 #define UB_QLOCK_NUM 5
413 static spinlock_t ub_qlockv[UB_QLOCK_NUM];
414 static int ub_qlock_next = 0;
416 static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
418 /*
419 * The id allocator.
420 *
421 * This also stores the host for indexing by minor, which is somewhat dirty.
422 */
423 static int ub_id_get(void)
424 {
425 unsigned long flags;
426 int i;
428 spin_lock_irqsave(&ub_lock, flags);
429 for (i = 0; i < UB_MAX_HOSTS; i++) {
430 if (ub_hostv[i] == 0) {
431 ub_hostv[i] = 1;
432 spin_unlock_irqrestore(&ub_lock, flags);
433 return i;
434 }
435 }
436 spin_unlock_irqrestore(&ub_lock, flags);
437 return -1;
438 }
440 static void ub_id_put(int id)
441 {
442 unsigned long flags;
444 if (id < 0 || id >= UB_MAX_HOSTS) {
445 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
446 return;
447 }
449 spin_lock_irqsave(&ub_lock, flags);
450 if (ub_hostv[id] == 0) {
451 spin_unlock_irqrestore(&ub_lock, flags);
452 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
453 return;
454 }
455 ub_hostv[id] = 0;
456 spin_unlock_irqrestore(&ub_lock, flags);
457 }
459 /*
460 * This is necessitated by the fact that blk_cleanup_queue does not
461 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
462 * Since our blk_init_queue() passes a spinlock common with ub_dev,
463 * we have life time issues when ub_cleanup frees ub_dev.
464 */
465 static spinlock_t *ub_next_lock(void)
466 {
467 unsigned long flags;
468 spinlock_t *ret;
470 spin_lock_irqsave(&ub_lock, flags);
471 ret = &ub_qlockv[ub_qlock_next];
472 ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
473 spin_unlock_irqrestore(&ub_lock, flags);
474 return ret;
475 }
477 /*
478 * Downcount for deallocation. This rides on two assumptions:
479 * - once something is poisoned, its refcount cannot grow
480 * - opens cannot happen at this time (del_gendisk was done)
481 * If the above is true, we can drop the lock, which we need for
482 * blk_cleanup_queue(): the silly thing may attempt to sleep.
483 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
484 */
485 static void ub_put(struct ub_dev *sc)
486 {
487 unsigned long flags;
489 spin_lock_irqsave(&ub_lock, flags);
490 --sc->openc;
491 if (sc->openc == 0 && atomic_read(&sc->poison)) {
492 spin_unlock_irqrestore(&ub_lock, flags);
493 ub_cleanup(sc);
494 } else {
495 spin_unlock_irqrestore(&ub_lock, flags);
496 }
497 }
499 /*
500 * Final cleanup and deallocation.
501 */
502 static void ub_cleanup(struct ub_dev *sc)
503 {
504 struct list_head *p;
505 struct ub_lun *lun;
506 request_queue_t *q;
508 while (!list_empty(&sc->luns)) {
509 p = sc->luns.next;
510 lun = list_entry(p, struct ub_lun, link);
511 list_del(p);
513 /* I don't think queue can be NULL. But... Stolen from sx8.c */
514 if ((q = lun->disk->queue) != NULL)
515 blk_cleanup_queue(q);
516 /*
517 * If we zero disk->private_data BEFORE put_disk, we have
518 * to check for NULL all over the place in open, release,
519 * check_media and revalidate, because the block level
520 * semaphore is well inside the put_disk.
521 * But we cannot zero after the call, because *disk is gone.
522 * The sd.c is blatantly racy in this area.
523 */
524 /* disk->private_data = NULL; */
525 put_disk(lun->disk);
526 lun->disk = NULL;
528 ub_id_put(lun->id);
529 kfree(lun);
530 }
532 usb_set_intfdata(sc->intf, NULL);
533 usb_put_intf(sc->intf);
534 usb_put_dev(sc->dev);
535 kfree(sc);
536 }
538 /*
539 * The "command allocator".
540 */
541 static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
542 {
543 struct ub_scsi_cmd *ret;
545 if (lun->cmda[0])
546 return NULL;
547 ret = &lun->cmdv[0];
548 lun->cmda[0] = 1;
549 return ret;
550 }
552 static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
553 {
554 if (cmd != &lun->cmdv[0]) {
555 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
556 lun->name, cmd);
557 return;
558 }
559 if (!lun->cmda[0]) {
560 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
561 return;
562 }
563 lun->cmda[0] = 0;
564 }
566 /*
567 * The command queue.
568 */
569 static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
570 {
571 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
573 if (t->qlen++ == 0) {
574 t->head = cmd;
575 t->tail = cmd;
576 } else {
577 t->tail->next = cmd;
578 t->tail = cmd;
579 }
581 if (t->qlen > t->qmax)
582 t->qmax = t->qlen;
583 }
585 static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
586 {
587 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
589 if (t->qlen++ == 0) {
590 t->head = cmd;
591 t->tail = cmd;
592 } else {
593 cmd->next = t->head;
594 t->head = cmd;
595 }
597 if (t->qlen > t->qmax)
598 t->qmax = t->qlen;
599 }
601 static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
602 {
603 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
604 struct ub_scsi_cmd *cmd;
606 if (t->qlen == 0)
607 return NULL;
608 if (--t->qlen == 0)
609 t->tail = NULL;
610 cmd = t->head;
611 t->head = cmd->next;
612 cmd->next = NULL;
613 return cmd;
614 }
616 #define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
618 /*
619 * The request function is our main entry point
620 */
622 static void ub_request_fn(request_queue_t *q)
623 {
624 struct ub_lun *lun = q->queuedata;
625 struct request *rq;
627 while ((rq = elv_next_request(q)) != NULL) {
628 if (ub_request_fn_1(lun, rq) != 0) {
629 blk_stop_queue(q);
630 break;
631 }
632 }
633 }
635 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
636 {
637 struct ub_dev *sc = lun->udev;
638 struct ub_scsi_cmd *cmd;
639 struct ub_request *urq;
640 int n_elem;
642 if (atomic_read(&sc->poison) || lun->changed) {
643 blkdev_dequeue_request(rq);
644 ub_end_rq(rq, 0);
645 return 0;
646 }
648 if (lun->urq.rq != NULL)
649 return -1;
650 if ((cmd = ub_get_cmd(lun)) == NULL)
651 return -1;
652 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
654 blkdev_dequeue_request(rq);
656 urq = &lun->urq;
657 memset(urq, 0, sizeof(struct ub_request));
658 urq->rq = rq;
660 /*
661 * get scatterlist from block layer
662 */
663 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
664 if (n_elem < 0) {
665 /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
666 printk(KERN_INFO "%s: failed request map (%d)\n",
667 lun->name, n_elem);
668 goto drop;
669 }
670 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
671 printk(KERN_WARNING "%s: request with %d segments\n",
672 lun->name, n_elem);
673 goto drop;
674 }
675 urq->nsg = n_elem;
676 sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
678 if (blk_pc_request(rq)) {
679 ub_cmd_build_packet(sc, lun, cmd, urq);
680 } else {
681 ub_cmd_build_block(sc, lun, cmd, urq);
682 }
683 cmd->state = UB_CMDST_INIT;
684 cmd->lun = lun;
685 cmd->done = ub_rw_cmd_done;
686 cmd->back = urq;
688 cmd->tag = sc->tagcnt++;
689 if (ub_submit_scsi(sc, cmd) != 0)
690 goto drop;
692 return 0;
694 drop:
695 ub_put_cmd(lun, cmd);
696 ub_end_rq(rq, 0);
697 return 0;
698 }
700 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
701 struct ub_scsi_cmd *cmd, struct ub_request *urq)
702 {
703 struct request *rq = urq->rq;
704 unsigned int block, nblks;
706 if (rq_data_dir(rq) == WRITE)
707 cmd->dir = UB_DIR_WRITE;
708 else
709 cmd->dir = UB_DIR_READ;
711 cmd->nsg = urq->nsg;
712 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
714 /*
715 * build the command
716 *
717 * The call to blk_queue_hardsect_size() guarantees that request
718 * is aligned, but it is given in terms of 512 byte units, always.
719 */
720 block = rq->sector >> lun->capacity.bshift;
721 nblks = rq->nr_sectors >> lun->capacity.bshift;
723 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
724 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
725 cmd->cdb[2] = block >> 24;
726 cmd->cdb[3] = block >> 16;
727 cmd->cdb[4] = block >> 8;
728 cmd->cdb[5] = block;
729 cmd->cdb[7] = nblks >> 8;
730 cmd->cdb[8] = nblks;
731 cmd->cdb_len = 10;
733 cmd->len = rq->nr_sectors * 512;
734 }
736 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
737 struct ub_scsi_cmd *cmd, struct ub_request *urq)
738 {
739 struct request *rq = urq->rq;
741 if (rq->data_len == 0) {
742 cmd->dir = UB_DIR_NONE;
743 } else {
744 if (rq_data_dir(rq) == WRITE)
745 cmd->dir = UB_DIR_WRITE;
746 else
747 cmd->dir = UB_DIR_READ;
748 }
750 cmd->nsg = urq->nsg;
751 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
753 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
754 cmd->cdb_len = rq->cmd_len;
756 cmd->len = rq->data_len;
757 }
759 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
760 {
761 struct ub_lun *lun = cmd->lun;
762 struct ub_request *urq = cmd->back;
763 struct request *rq;
764 int uptodate;
766 rq = urq->rq;
768 if (cmd->error == 0) {
769 uptodate = 1;
771 if (blk_pc_request(rq)) {
772 if (cmd->act_len >= rq->data_len)
773 rq->data_len = 0;
774 else
775 rq->data_len -= cmd->act_len;
776 }
777 } else {
778 uptodate = 0;
780 if (blk_pc_request(rq)) {
781 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
782 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
783 rq->sense_len = UB_SENSE_SIZE;
784 if (sc->top_sense[0] != 0)
785 rq->errors = SAM_STAT_CHECK_CONDITION;
786 else
787 rq->errors = DID_ERROR << 16;
788 } else {
789 if (cmd->error == -EIO) {
790 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
791 return;
792 }
793 }
794 }
796 urq->rq = NULL;
798 ub_put_cmd(lun, cmd);
799 ub_end_rq(rq, uptodate);
800 blk_start_queue(lun->disk->queue);
801 }
803 static void ub_end_rq(struct request *rq, int uptodate)
804 {
805 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
806 end_that_request_last(rq, uptodate);
807 }
809 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
810 struct ub_request *urq, struct ub_scsi_cmd *cmd)
811 {
813 if (atomic_read(&sc->poison))
814 return -ENXIO;
816 ub_reset_enter(sc, urq->current_try);
818 if (urq->current_try >= 3)
819 return -EIO;
820 urq->current_try++;
822 /* Remove this if anyone complains of flooding. */
823 printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
824 "[sense %x %02x %02x] retry %d\n",
825 sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
826 cmd->key, cmd->asc, cmd->ascq, urq->current_try);
828 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
829 ub_cmd_build_block(sc, lun, cmd, urq);
831 cmd->state = UB_CMDST_INIT;
832 cmd->lun = lun;
833 cmd->done = ub_rw_cmd_done;
834 cmd->back = urq;
836 cmd->tag = sc->tagcnt++;
838 #if 0 /* Wasteful */
839 return ub_submit_scsi(sc, cmd);
840 #else
841 ub_cmdq_add(sc, cmd);
842 return 0;
843 #endif
844 }
846 /*
847 * Submit a regular SCSI operation (not an auto-sense).
848 *
849 * The Iron Law of Good Submit Routine is:
850 * Zero return - callback is done, Nonzero return - callback is not done.
851 * No exceptions.
852 *
853 * Host is assumed locked.
854 */
855 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
856 {
858 if (cmd->state != UB_CMDST_INIT ||
859 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
860 return -EINVAL;
861 }
863 ub_cmdq_add(sc, cmd);
864 /*
865 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
866 * safer to jump to a tasklet, in case upper layers do something silly.
867 */
868 tasklet_schedule(&sc->tasklet);
869 return 0;
870 }
872 /*
873 * Submit the first URB for the queued command.
874 * This function does not deal with queueing in any way.
875 */
876 static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
877 {
878 struct bulk_cb_wrap *bcb;
879 int rc;
881 bcb = &sc->work_bcb;
883 /*
884 * ``If the allocation length is eighteen or greater, and a device
885 * server returns less than eithteen bytes of data, the application
886 * client should assume that the bytes not transferred would have been
887 * zeroes had the device server returned those bytes.''
888 *
889 * We zero sense for all commands so that when a packet request
890 * fails it does not return a stale sense.
891 */
892 memset(&sc->top_sense, 0, UB_SENSE_SIZE);
894 /* set up the command wrapper */
895 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
896 bcb->Tag = cmd->tag; /* Endianness is not important */
897 bcb->DataTransferLength = cpu_to_le32(cmd->len);
898 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
899 bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
900 bcb->Length = cmd->cdb_len;
902 /* copy the command payload */
903 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
905 UB_INIT_COMPLETION(sc->work_done);
907 sc->last_pipe = sc->send_bulk_pipe;
908 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
909 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
911 /* Fill what we shouldn't be filling, because usb-storage did so. */
912 sc->work_urb.actual_length = 0;
913 sc->work_urb.error_count = 0;
914 sc->work_urb.status = 0;
916 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
917 /* XXX Clear stalls */
918 ub_complete(&sc->work_done);
919 return rc;
920 }
922 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
923 add_timer(&sc->work_timer);
925 cmd->state = UB_CMDST_CMD;
926 return 0;
927 }
929 /*
930 * Timeout handler.
931 */
932 static void ub_urb_timeout(unsigned long arg)
933 {
934 struct ub_dev *sc = (struct ub_dev *) arg;
935 unsigned long flags;
937 spin_lock_irqsave(sc->lock, flags);
938 if (!ub_is_completed(&sc->work_done))
939 usb_unlink_urb(&sc->work_urb);
940 spin_unlock_irqrestore(sc->lock, flags);
941 }
943 /*
944 * Completion routine for the work URB.
945 *
946 * This can be called directly from usb_submit_urb (while we have
947 * the sc->lock taken) and from an interrupt (while we do NOT have
948 * the sc->lock taken). Therefore, bounce this off to a tasklet.
949 */
950 static void ub_urb_complete(struct urb *urb, struct pt_regs *pt)
951 {
952 struct ub_dev *sc = urb->context;
954 ub_complete(&sc->work_done);
955 tasklet_schedule(&sc->tasklet);
956 }
958 static void ub_scsi_action(unsigned long _dev)
959 {
960 struct ub_dev *sc = (struct ub_dev *) _dev;
961 unsigned long flags;
963 spin_lock_irqsave(sc->lock, flags);
964 ub_scsi_dispatch(sc);
965 spin_unlock_irqrestore(sc->lock, flags);
966 }
968 static void ub_scsi_dispatch(struct ub_dev *sc)
969 {
970 struct ub_scsi_cmd *cmd;
971 int rc;
973 while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
974 if (cmd->state == UB_CMDST_DONE) {
975 ub_cmdq_pop(sc);
976 (*cmd->done)(sc, cmd);
977 } else if (cmd->state == UB_CMDST_INIT) {
978 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
979 break;
980 cmd->error = rc;
981 cmd->state = UB_CMDST_DONE;
982 } else {
983 if (!ub_is_completed(&sc->work_done))
984 break;
985 del_timer(&sc->work_timer);
986 ub_scsi_urb_compl(sc, cmd);
987 }
988 }
989 }
991 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
992 {
993 struct urb *urb = &sc->work_urb;
994 struct bulk_cs_wrap *bcs;
995 int len;
996 int rc;
998 if (atomic_read(&sc->poison)) {
999 ub_state_done(sc, cmd, -ENODEV);
1000 return;
1003 if (cmd->state == UB_CMDST_CLEAR) {
1004 if (urb->status == -EPIPE) {
1005 /*
1006 * STALL while clearning STALL.
1007 * The control pipe clears itself - nothing to do.
1008 */
1009 printk(KERN_NOTICE "%s: stall on control pipe\n",
1010 sc->name);
1011 goto Bad_End;
1014 /*
1015 * We ignore the result for the halt clear.
1016 */
1018 /* reset the endpoint toggle */
1019 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1020 usb_pipeout(sc->last_pipe), 0);
1022 ub_state_sense(sc, cmd);
1024 } else if (cmd->state == UB_CMDST_CLR2STS) {
1025 if (urb->status == -EPIPE) {
1026 printk(KERN_NOTICE "%s: stall on control pipe\n",
1027 sc->name);
1028 goto Bad_End;
1031 /*
1032 * We ignore the result for the halt clear.
1033 */
1035 /* reset the endpoint toggle */
1036 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1037 usb_pipeout(sc->last_pipe), 0);
1039 ub_state_stat(sc, cmd);
1041 } else if (cmd->state == UB_CMDST_CLRRS) {
1042 if (urb->status == -EPIPE) {
1043 printk(KERN_NOTICE "%s: stall on control pipe\n",
1044 sc->name);
1045 goto Bad_End;
1048 /*
1049 * We ignore the result for the halt clear.
1050 */
1052 /* reset the endpoint toggle */
1053 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1054 usb_pipeout(sc->last_pipe), 0);
1056 ub_state_stat_counted(sc, cmd);
1058 } else if (cmd->state == UB_CMDST_CMD) {
1059 switch (urb->status) {
1060 case 0:
1061 break;
1062 case -EOVERFLOW:
1063 goto Bad_End;
1064 case -EPIPE:
1065 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1066 if (rc != 0) {
1067 printk(KERN_NOTICE "%s: "
1068 "unable to submit clear (%d)\n",
1069 sc->name, rc);
1070 /*
1071 * This is typically ENOMEM or some other such shit.
1072 * Retrying is pointless. Just do Bad End on it...
1073 */
1074 ub_state_done(sc, cmd, rc);
1075 return;
1077 cmd->state = UB_CMDST_CLEAR;
1078 return;
1079 case -ESHUTDOWN: /* unplug */
1080 case -EILSEQ: /* unplug timeout on uhci */
1081 ub_state_done(sc, cmd, -ENODEV);
1082 return;
1083 default:
1084 goto Bad_End;
1086 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1087 goto Bad_End;
1090 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1091 ub_state_stat(sc, cmd);
1092 return;
1095 // udelay(125); // usb-storage has this
1096 ub_data_start(sc, cmd);
1098 } else if (cmd->state == UB_CMDST_DATA) {
1099 if (urb->status == -EPIPE) {
1100 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1101 if (rc != 0) {
1102 printk(KERN_NOTICE "%s: "
1103 "unable to submit clear (%d)\n",
1104 sc->name, rc);
1105 ub_state_done(sc, cmd, rc);
1106 return;
1108 cmd->state = UB_CMDST_CLR2STS;
1109 return;
1111 if (urb->status == -EOVERFLOW) {
1112 /*
1113 * A babble? Failure, but we must transfer CSW now.
1114 */
1115 cmd->error = -EOVERFLOW; /* A cheap trick... */
1116 ub_state_stat(sc, cmd);
1117 return;
1120 if (cmd->dir == UB_DIR_WRITE) {
1121 /*
1122 * Do not continue writes in case of a failure.
1123 * Doing so would cause sectors to be mixed up,
1124 * which is worse than sectors lost.
1126 * We must try to read the CSW, or many devices
1127 * get confused.
1128 */
1129 len = urb->actual_length;
1130 if (urb->status != 0 ||
1131 len != cmd->sgv[cmd->current_sg].length) {
1132 cmd->act_len += len;
1134 cmd->error = -EIO;
1135 ub_state_stat(sc, cmd);
1136 return;
1139 } else {
1140 /*
1141 * If an error occurs on read, we record it, and
1142 * continue to fetch data in order to avoid bubble.
1144 * As a small shortcut, we stop if we detect that
1145 * a CSW mixed into data.
1146 */
1147 if (urb->status != 0)
1148 cmd->error = -EIO;
1150 len = urb->actual_length;
1151 if (urb->status != 0 ||
1152 len != cmd->sgv[cmd->current_sg].length) {
1153 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1154 goto Bad_End;
1158 cmd->act_len += urb->actual_length;
1160 if (++cmd->current_sg < cmd->nsg) {
1161 ub_data_start(sc, cmd);
1162 return;
1164 ub_state_stat(sc, cmd);
1166 } else if (cmd->state == UB_CMDST_STAT) {
1167 if (urb->status == -EPIPE) {
1168 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1169 if (rc != 0) {
1170 printk(KERN_NOTICE "%s: "
1171 "unable to submit clear (%d)\n",
1172 sc->name, rc);
1173 ub_state_done(sc, cmd, rc);
1174 return;
1177 /*
1178 * Having a stall when getting CSW is an error, so
1179 * make sure uppper levels are not oblivious to it.
1180 */
1181 cmd->error = -EIO; /* A cheap trick... */
1183 cmd->state = UB_CMDST_CLRRS;
1184 return;
1187 /* Catch everything, including -EOVERFLOW and other nasties. */
1188 if (urb->status != 0)
1189 goto Bad_End;
1191 if (urb->actual_length == 0) {
1192 ub_state_stat_counted(sc, cmd);
1193 return;
1196 /*
1197 * Check the returned Bulk protocol status.
1198 * The status block has to be validated first.
1199 */
1201 bcs = &sc->work_bcs;
1203 if (sc->signature == cpu_to_le32(0)) {
1204 /*
1205 * This is the first reply, so do not perform the check.
1206 * Instead, remember the signature the device uses
1207 * for future checks. But do not allow a nul.
1208 */
1209 sc->signature = bcs->Signature;
1210 if (sc->signature == cpu_to_le32(0)) {
1211 ub_state_stat_counted(sc, cmd);
1212 return;
1214 } else {
1215 if (bcs->Signature != sc->signature) {
1216 ub_state_stat_counted(sc, cmd);
1217 return;
1221 if (bcs->Tag != cmd->tag) {
1222 /*
1223 * This usually happens when we disagree with the
1224 * device's microcode about something. For instance,
1225 * a few of them throw this after timeouts. They buffer
1226 * commands and reply at commands we timed out before.
1227 * Without flushing these replies we loop forever.
1228 */
1229 ub_state_stat_counted(sc, cmd);
1230 return;
1233 len = le32_to_cpu(bcs->Residue);
1234 if (len != cmd->len - cmd->act_len) {
1235 /*
1236 * It is all right to transfer less, the caller has
1237 * to check. But it's not all right if the device
1238 * counts disagree with our counts.
1239 */
1240 goto Bad_End;
1243 switch (bcs->Status) {
1244 case US_BULK_STAT_OK:
1245 break;
1246 case US_BULK_STAT_FAIL:
1247 ub_state_sense(sc, cmd);
1248 return;
1249 case US_BULK_STAT_PHASE:
1250 goto Bad_End;
1251 default:
1252 printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1253 sc->name, bcs->Status);
1254 ub_state_done(sc, cmd, -EINVAL);
1255 return;
1258 /* Not zeroing error to preserve a babble indicator */
1259 if (cmd->error != 0) {
1260 ub_state_sense(sc, cmd);
1261 return;
1263 cmd->state = UB_CMDST_DONE;
1264 ub_cmdq_pop(sc);
1265 (*cmd->done)(sc, cmd);
1267 } else if (cmd->state == UB_CMDST_SENSE) {
1268 ub_state_done(sc, cmd, -EIO);
1270 } else {
1271 printk(KERN_WARNING "%s: "
1272 "wrong command state %d\n",
1273 sc->name, cmd->state);
1274 ub_state_done(sc, cmd, -EINVAL);
1275 return;
1277 return;
1279 Bad_End: /* Little Excel is dead */
1280 ub_state_done(sc, cmd, -EIO);
1283 /*
1284 * Factorization helper for the command state machine:
1285 * Initiate a data segment transfer.
1286 */
1287 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1289 struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1290 int pipe;
1291 int rc;
1293 UB_INIT_COMPLETION(sc->work_done);
1295 if (cmd->dir == UB_DIR_READ)
1296 pipe = sc->recv_bulk_pipe;
1297 else
1298 pipe = sc->send_bulk_pipe;
1299 sc->last_pipe = pipe;
1300 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1301 page_address(sg->page) + sg->offset, sg->length,
1302 ub_urb_complete, sc);
1303 sc->work_urb.actual_length = 0;
1304 sc->work_urb.error_count = 0;
1305 sc->work_urb.status = 0;
1307 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1308 /* XXX Clear stalls */
1309 ub_complete(&sc->work_done);
1310 ub_state_done(sc, cmd, rc);
1311 return;
1314 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1315 add_timer(&sc->work_timer);
1317 cmd->state = UB_CMDST_DATA;
1320 /*
1321 * Factorization helper for the command state machine:
1322 * Finish the command.
1323 */
1324 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1327 cmd->error = rc;
1328 cmd->state = UB_CMDST_DONE;
1329 ub_cmdq_pop(sc);
1330 (*cmd->done)(sc, cmd);
1333 /*
1334 * Factorization helper for the command state machine:
1335 * Submit a CSW read.
1336 */
1337 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1339 int rc;
1341 UB_INIT_COMPLETION(sc->work_done);
1343 sc->last_pipe = sc->recv_bulk_pipe;
1344 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1345 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1346 sc->work_urb.actual_length = 0;
1347 sc->work_urb.error_count = 0;
1348 sc->work_urb.status = 0;
1350 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1351 /* XXX Clear stalls */
1352 ub_complete(&sc->work_done);
1353 ub_state_done(sc, cmd, rc);
1354 return -1;
1357 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1358 add_timer(&sc->work_timer);
1359 return 0;
1362 /*
1363 * Factorization helper for the command state machine:
1364 * Submit a CSW read and go to STAT state.
1365 */
1366 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1369 if (__ub_state_stat(sc, cmd) != 0)
1370 return;
1372 cmd->stat_count = 0;
1373 cmd->state = UB_CMDST_STAT;
1376 /*
1377 * Factorization helper for the command state machine:
1378 * Submit a CSW read and go to STAT state with counter (along [C] path).
1379 */
1380 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1383 if (++cmd->stat_count >= 4) {
1384 ub_state_sense(sc, cmd);
1385 return;
1388 if (__ub_state_stat(sc, cmd) != 0)
1389 return;
1391 cmd->state = UB_CMDST_STAT;
1394 /*
1395 * Factorization helper for the command state machine:
1396 * Submit a REQUEST SENSE and go to SENSE state.
1397 */
1398 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1400 struct ub_scsi_cmd *scmd;
1401 struct scatterlist *sg;
1402 int rc;
1404 if (cmd->cdb[0] == REQUEST_SENSE) {
1405 rc = -EPIPE;
1406 goto error;
1409 scmd = &sc->top_rqs_cmd;
1410 memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1411 scmd->cdb[0] = REQUEST_SENSE;
1412 scmd->cdb[4] = UB_SENSE_SIZE;
1413 scmd->cdb_len = 6;
1414 scmd->dir = UB_DIR_READ;
1415 scmd->state = UB_CMDST_INIT;
1416 scmd->nsg = 1;
1417 sg = &scmd->sgv[0];
1418 sg->page = virt_to_page(sc->top_sense);
1419 sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
1420 sg->length = UB_SENSE_SIZE;
1421 scmd->len = UB_SENSE_SIZE;
1422 scmd->lun = cmd->lun;
1423 scmd->done = ub_top_sense_done;
1424 scmd->back = cmd;
1426 scmd->tag = sc->tagcnt++;
1428 cmd->state = UB_CMDST_SENSE;
1430 ub_cmdq_insert(sc, scmd);
1431 return;
1433 error:
1434 ub_state_done(sc, cmd, rc);
1437 /*
1438 * A helper for the command's state machine:
1439 * Submit a stall clear.
1440 */
1441 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1442 int stalled_pipe)
1444 int endp;
1445 struct usb_ctrlrequest *cr;
1446 int rc;
1448 endp = usb_pipeendpoint(stalled_pipe);
1449 if (usb_pipein (stalled_pipe))
1450 endp |= USB_DIR_IN;
1452 cr = &sc->work_cr;
1453 cr->bRequestType = USB_RECIP_ENDPOINT;
1454 cr->bRequest = USB_REQ_CLEAR_FEATURE;
1455 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1456 cr->wIndex = cpu_to_le16(endp);
1457 cr->wLength = cpu_to_le16(0);
1459 UB_INIT_COMPLETION(sc->work_done);
1461 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1462 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1463 sc->work_urb.actual_length = 0;
1464 sc->work_urb.error_count = 0;
1465 sc->work_urb.status = 0;
1467 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1468 ub_complete(&sc->work_done);
1469 return rc;
1472 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1473 add_timer(&sc->work_timer);
1474 return 0;
1477 /*
1478 */
1479 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1481 unsigned char *sense = sc->top_sense;
1482 struct ub_scsi_cmd *cmd;
1484 /*
1485 * Find the command which triggered the unit attention or a check,
1486 * save the sense into it, and advance its state machine.
1487 */
1488 if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1489 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1490 return;
1492 if (cmd != scmd->back) {
1493 printk(KERN_WARNING "%s: "
1494 "sense done for wrong command 0x%x\n",
1495 sc->name, cmd->tag);
1496 return;
1498 if (cmd->state != UB_CMDST_SENSE) {
1499 printk(KERN_WARNING "%s: "
1500 "sense done with bad cmd state %d\n",
1501 sc->name, cmd->state);
1502 return;
1505 /*
1506 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1507 */
1508 cmd->key = sense[2] & 0x0F;
1509 cmd->asc = sense[12];
1510 cmd->ascq = sense[13];
1512 ub_scsi_urb_compl(sc, cmd);
1515 /*
1516 * Reset management
1517 * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
1518 * XXX Make usb_sync_reset asynchronous.
1519 */
1521 static void ub_reset_enter(struct ub_dev *sc, int try)
1524 if (sc->reset) {
1525 /* This happens often on multi-LUN devices. */
1526 return;
1528 sc->reset = try + 1;
1530 #if 0 /* Not needed because the disconnect waits for us. */
1531 unsigned long flags;
1532 spin_lock_irqsave(&ub_lock, flags);
1533 sc->openc++;
1534 spin_unlock_irqrestore(&ub_lock, flags);
1535 #endif
1537 #if 0 /* We let them stop themselves. */
1538 struct list_head *p;
1539 struct ub_lun *lun;
1540 list_for_each(p, &sc->luns) {
1541 lun = list_entry(p, struct ub_lun, link);
1542 blk_stop_queue(lun->disk->queue);
1544 #endif
1546 schedule_work(&sc->reset_work);
1549 static void ub_reset_task(void *arg)
1551 struct ub_dev *sc = arg;
1552 unsigned long flags;
1553 struct list_head *p;
1554 struct ub_lun *lun;
1555 int lkr, rc;
1557 if (!sc->reset) {
1558 printk(KERN_WARNING "%s: Running reset unrequested\n",
1559 sc->name);
1560 return;
1563 if (atomic_read(&sc->poison)) {
1565 } else if ((sc->reset & 1) == 0) {
1566 ub_sync_reset(sc);
1567 msleep(700); /* usb-storage sleeps 6s (!) */
1568 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1569 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1570 } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1572 } else {
1573 if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
1574 printk(KERN_NOTICE
1575 "%s: usb_lock_device_for_reset failed (%d)\n",
1576 sc->name, lkr);
1577 } else {
1578 rc = usb_reset_device(sc->dev);
1579 if (rc < 0) {
1580 printk(KERN_NOTICE "%s: "
1581 "usb_lock_device_for_reset failed (%d)\n",
1582 sc->name, rc);
1585 if (lkr)
1586 usb_unlock_device(sc->dev);
1590 /*
1591 * In theory, no commands can be running while reset is active,
1592 * so nobody can ask for another reset, and so we do not need any
1593 * queues of resets or anything. We do need a spinlock though,
1594 * to interact with block layer.
1595 */
1596 spin_lock_irqsave(sc->lock, flags);
1597 sc->reset = 0;
1598 tasklet_schedule(&sc->tasklet);
1599 list_for_each(p, &sc->luns) {
1600 lun = list_entry(p, struct ub_lun, link);
1601 blk_start_queue(lun->disk->queue);
1603 wake_up(&sc->reset_wait);
1604 spin_unlock_irqrestore(sc->lock, flags);
1607 /*
1608 * This is called from a process context.
1609 */
1610 static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1613 lun->readonly = 0; /* XXX Query this from the device */
1615 lun->capacity.nsec = 0;
1616 lun->capacity.bsize = 512;
1617 lun->capacity.bshift = 0;
1619 if (ub_sync_tur(sc, lun) != 0)
1620 return; /* Not ready */
1621 lun->changed = 0;
1623 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1624 /*
1625 * The retry here means something is wrong, either with the
1626 * device, with the transport, or with our code.
1627 * We keep this because sd.c has retries for capacity.
1628 */
1629 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1630 lun->capacity.nsec = 0;
1631 lun->capacity.bsize = 512;
1632 lun->capacity.bshift = 0;
1637 /*
1638 * The open funcion.
1639 * This is mostly needed to keep refcounting, but also to support
1640 * media checks on removable media drives.
1641 */
1642 static int ub_bd_open(struct inode *inode, struct file *filp)
1644 struct gendisk *disk = inode->i_bdev->bd_disk;
1645 struct ub_lun *lun = disk->private_data;
1646 struct ub_dev *sc = lun->udev;
1647 unsigned long flags;
1648 int rc;
1650 spin_lock_irqsave(&ub_lock, flags);
1651 if (atomic_read(&sc->poison)) {
1652 spin_unlock_irqrestore(&ub_lock, flags);
1653 return -ENXIO;
1655 sc->openc++;
1656 spin_unlock_irqrestore(&ub_lock, flags);
1658 if (lun->removable || lun->readonly)
1659 check_disk_change(inode->i_bdev);
1661 /*
1662 * The sd.c considers ->media_present and ->changed not equivalent,
1663 * under some pretty murky conditions (a failure of READ CAPACITY).
1664 * We may need it one day.
1665 */
1666 if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
1667 rc = -ENOMEDIUM;
1668 goto err_open;
1671 if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
1672 rc = -EROFS;
1673 goto err_open;
1676 return 0;
1678 err_open:
1679 ub_put(sc);
1680 return rc;
1683 /*
1684 */
1685 static int ub_bd_release(struct inode *inode, struct file *filp)
1687 struct gendisk *disk = inode->i_bdev->bd_disk;
1688 struct ub_lun *lun = disk->private_data;
1689 struct ub_dev *sc = lun->udev;
1691 ub_put(sc);
1692 return 0;
1695 /*
1696 * The ioctl interface.
1697 */
1698 static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1699 unsigned int cmd, unsigned long arg)
1701 struct gendisk *disk = inode->i_bdev->bd_disk;
1702 void __user *usermem = (void __user *) arg;
1704 return scsi_cmd_ioctl(filp, disk, cmd, usermem);
1707 /*
1708 * This is called once a new disk was seen by the block layer or by ub_probe().
1709 * The main onjective here is to discover the features of the media such as
1710 * the capacity, read-only status, etc. USB storage generally does not
1711 * need to be spun up, but if we needed it, this would be the place.
1713 * This call can sleep.
1715 * The return code is not used.
1716 */
1717 static int ub_bd_revalidate(struct gendisk *disk)
1719 struct ub_lun *lun = disk->private_data;
1721 ub_revalidate(lun->udev, lun);
1723 /* XXX Support sector size switching like in sr.c */
1724 blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
1725 set_capacity(disk, lun->capacity.nsec);
1726 // set_disk_ro(sdkp->disk, lun->readonly);
1728 return 0;
1731 /*
1732 * The check is called by the block layer to verify if the media
1733 * is still available. It is supposed to be harmless, lightweight and
1734 * non-intrusive in case the media was not changed.
1736 * This call can sleep.
1738 * The return code is bool!
1739 */
1740 static int ub_bd_media_changed(struct gendisk *disk)
1742 struct ub_lun *lun = disk->private_data;
1744 if (!lun->removable)
1745 return 0;
1747 /*
1748 * We clean checks always after every command, so this is not
1749 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1750 * the device is actually not ready with operator or software
1751 * intervention required. One dangerous item might be a drive which
1752 * spins itself down, and come the time to write dirty pages, this
1753 * will fail, then block layer discards the data. Since we never
1754 * spin drives up, such devices simply cannot be used with ub anyway.
1755 */
1756 if (ub_sync_tur(lun->udev, lun) != 0) {
1757 lun->changed = 1;
1758 return 1;
1761 return lun->changed;
1764 static struct block_device_operations ub_bd_fops = {
1765 .owner = THIS_MODULE,
1766 .open = ub_bd_open,
1767 .release = ub_bd_release,
1768 .ioctl = ub_bd_ioctl,
1769 .media_changed = ub_bd_media_changed,
1770 .revalidate_disk = ub_bd_revalidate,
1771 };
1773 /*
1774 * Common ->done routine for commands executed synchronously.
1775 */
1776 static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1778 struct completion *cop = cmd->back;
1779 complete(cop);
1782 /*
1783 * Test if the device has a check condition on it, synchronously.
1784 */
1785 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1787 struct ub_scsi_cmd *cmd;
1788 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1789 unsigned long flags;
1790 struct completion compl;
1791 int rc;
1793 init_completion(&compl);
1795 rc = -ENOMEM;
1796 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1797 goto err_alloc;
1799 cmd->cdb[0] = TEST_UNIT_READY;
1800 cmd->cdb_len = 6;
1801 cmd->dir = UB_DIR_NONE;
1802 cmd->state = UB_CMDST_INIT;
1803 cmd->lun = lun; /* This may be NULL, but that's ok */
1804 cmd->done = ub_probe_done;
1805 cmd->back = &compl;
1807 spin_lock_irqsave(sc->lock, flags);
1808 cmd->tag = sc->tagcnt++;
1810 rc = ub_submit_scsi(sc, cmd);
1811 spin_unlock_irqrestore(sc->lock, flags);
1813 if (rc != 0)
1814 goto err_submit;
1816 wait_for_completion(&compl);
1818 rc = cmd->error;
1820 if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */
1821 rc = cmd->key;
1823 err_submit:
1824 kfree(cmd);
1825 err_alloc:
1826 return rc;
1829 /*
1830 * Read the SCSI capacity synchronously (for probing).
1831 */
1832 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
1833 struct ub_capacity *ret)
1835 struct ub_scsi_cmd *cmd;
1836 struct scatterlist *sg;
1837 char *p;
1838 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1839 unsigned long flags;
1840 unsigned int bsize, shift;
1841 unsigned long nsec;
1842 struct completion compl;
1843 int rc;
1845 init_completion(&compl);
1847 rc = -ENOMEM;
1848 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1849 goto err_alloc;
1850 p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1852 cmd->cdb[0] = 0x25;
1853 cmd->cdb_len = 10;
1854 cmd->dir = UB_DIR_READ;
1855 cmd->state = UB_CMDST_INIT;
1856 cmd->nsg = 1;
1857 sg = &cmd->sgv[0];
1858 sg->page = virt_to_page(p);
1859 sg->offset = (unsigned long)p & (PAGE_SIZE-1);
1860 sg->length = 8;
1861 cmd->len = 8;
1862 cmd->lun = lun;
1863 cmd->done = ub_probe_done;
1864 cmd->back = &compl;
1866 spin_lock_irqsave(sc->lock, flags);
1867 cmd->tag = sc->tagcnt++;
1869 rc = ub_submit_scsi(sc, cmd);
1870 spin_unlock_irqrestore(sc->lock, flags);
1872 if (rc != 0)
1873 goto err_submit;
1875 wait_for_completion(&compl);
1877 if (cmd->error != 0) {
1878 rc = -EIO;
1879 goto err_read;
1881 if (cmd->act_len != 8) {
1882 rc = -EIO;
1883 goto err_read;
1886 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1887 nsec = be32_to_cpu(*(__be32 *)p) + 1;
1888 bsize = be32_to_cpu(*(__be32 *)(p + 4));
1889 switch (bsize) {
1890 case 512: shift = 0; break;
1891 case 1024: shift = 1; break;
1892 case 2048: shift = 2; break;
1893 case 4096: shift = 3; break;
1894 default:
1895 rc = -EDOM;
1896 goto err_inv_bsize;
1899 ret->bsize = bsize;
1900 ret->bshift = shift;
1901 ret->nsec = nsec << shift;
1902 rc = 0;
1904 err_inv_bsize:
1905 err_read:
1906 err_submit:
1907 kfree(cmd);
1908 err_alloc:
1909 return rc;
1912 /*
1913 */
1914 static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt)
1916 struct completion *cop = urb->context;
1917 complete(cop);
1920 static void ub_probe_timeout(unsigned long arg)
1922 struct completion *cop = (struct completion *) arg;
1923 complete(cop);
1926 /*
1927 * Reset with a Bulk reset.
1928 */
1929 static int ub_sync_reset(struct ub_dev *sc)
1931 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1932 struct usb_ctrlrequest *cr;
1933 struct completion compl;
1934 struct timer_list timer;
1935 int rc;
1937 init_completion(&compl);
1939 cr = &sc->work_cr;
1940 cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1941 cr->bRequest = US_BULK_RESET_REQUEST;
1942 cr->wValue = cpu_to_le16(0);
1943 cr->wIndex = cpu_to_le16(ifnum);
1944 cr->wLength = cpu_to_le16(0);
1946 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1947 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1948 sc->work_urb.actual_length = 0;
1949 sc->work_urb.error_count = 0;
1950 sc->work_urb.status = 0;
1952 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1953 printk(KERN_WARNING
1954 "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
1955 return rc;
1958 init_timer(&timer);
1959 timer.function = ub_probe_timeout;
1960 timer.data = (unsigned long) &compl;
1961 timer.expires = jiffies + UB_CTRL_TIMEOUT;
1962 add_timer(&timer);
1964 wait_for_completion(&compl);
1966 del_timer_sync(&timer);
1967 usb_kill_urb(&sc->work_urb);
1969 return sc->work_urb.status;
1972 /*
1973 * Get number of LUNs by the way of Bulk GetMaxLUN command.
1974 */
1975 static int ub_sync_getmaxlun(struct ub_dev *sc)
1977 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
1978 unsigned char *p;
1979 enum { ALLOC_SIZE = 1 };
1980 struct usb_ctrlrequest *cr;
1981 struct completion compl;
1982 struct timer_list timer;
1983 int nluns;
1984 int rc;
1986 init_completion(&compl);
1988 rc = -ENOMEM;
1989 if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1990 goto err_alloc;
1991 *p = 55;
1993 cr = &sc->work_cr;
1994 cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
1995 cr->bRequest = US_BULK_GET_MAX_LUN;
1996 cr->wValue = cpu_to_le16(0);
1997 cr->wIndex = cpu_to_le16(ifnum);
1998 cr->wLength = cpu_to_le16(1);
2000 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2001 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2002 sc->work_urb.actual_length = 0;
2003 sc->work_urb.error_count = 0;
2004 sc->work_urb.status = 0;
2006 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
2007 goto err_submit;
2009 init_timer(&timer);
2010 timer.function = ub_probe_timeout;
2011 timer.data = (unsigned long) &compl;
2012 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2013 add_timer(&timer);
2015 wait_for_completion(&compl);
2017 del_timer_sync(&timer);
2018 usb_kill_urb(&sc->work_urb);
2020 if ((rc = sc->work_urb.status) < 0)
2021 goto err_io;
2023 if (sc->work_urb.actual_length != 1) {
2024 nluns = 0;
2025 } else {
2026 if ((nluns = *p) == 55) {
2027 nluns = 0;
2028 } else {
2029 /* GetMaxLUN returns the maximum LUN number */
2030 nluns += 1;
2031 if (nluns > UB_MAX_LUNS)
2032 nluns = UB_MAX_LUNS;
2036 kfree(p);
2037 return nluns;
2039 err_io:
2040 err_submit:
2041 kfree(p);
2042 err_alloc:
2043 return rc;
2046 /*
2047 * Clear initial stalls.
2048 */
2049 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2051 int endp;
2052 struct usb_ctrlrequest *cr;
2053 struct completion compl;
2054 struct timer_list timer;
2055 int rc;
2057 init_completion(&compl);
2059 endp = usb_pipeendpoint(stalled_pipe);
2060 if (usb_pipein (stalled_pipe))
2061 endp |= USB_DIR_IN;
2063 cr = &sc->work_cr;
2064 cr->bRequestType = USB_RECIP_ENDPOINT;
2065 cr->bRequest = USB_REQ_CLEAR_FEATURE;
2066 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2067 cr->wIndex = cpu_to_le16(endp);
2068 cr->wLength = cpu_to_le16(0);
2070 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2071 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2072 sc->work_urb.actual_length = 0;
2073 sc->work_urb.error_count = 0;
2074 sc->work_urb.status = 0;
2076 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2077 printk(KERN_WARNING
2078 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2079 return rc;
2082 init_timer(&timer);
2083 timer.function = ub_probe_timeout;
2084 timer.data = (unsigned long) &compl;
2085 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2086 add_timer(&timer);
2088 wait_for_completion(&compl);
2090 del_timer_sync(&timer);
2091 usb_kill_urb(&sc->work_urb);
2093 /* reset the endpoint toggle */
2094 usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
2096 return 0;
2099 /*
2100 * Get the pipe settings.
2101 */
2102 static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2103 struct usb_interface *intf)
2105 struct usb_host_interface *altsetting = intf->cur_altsetting;
2106 struct usb_endpoint_descriptor *ep_in = NULL;
2107 struct usb_endpoint_descriptor *ep_out = NULL;
2108 struct usb_endpoint_descriptor *ep;
2109 int i;
2111 /*
2112 * Find the endpoints we need.
2113 * We are expecting a minimum of 2 endpoints - in and out (bulk).
2114 * We will ignore any others.
2115 */
2116 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2117 ep = &altsetting->endpoint[i].desc;
2119 /* Is it a BULK endpoint? */
2120 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
2121 == USB_ENDPOINT_XFER_BULK) {
2122 /* BULK in or out? */
2123 if (ep->bEndpointAddress & USB_DIR_IN)
2124 ep_in = ep;
2125 else
2126 ep_out = ep;
2130 if (ep_in == NULL || ep_out == NULL) {
2131 printk(KERN_NOTICE "%s: failed endpoint check\n",
2132 sc->name);
2133 return -ENODEV;
2136 /* Calculate and store the pipe values */
2137 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2138 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2139 sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2140 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2141 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
2142 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2144 return 0;
2147 /*
2148 * Probing is done in the process context, which allows us to cheat
2149 * and not to build a state machine for the discovery.
2150 */
2151 static int ub_probe(struct usb_interface *intf,
2152 const struct usb_device_id *dev_id)
2154 struct ub_dev *sc;
2155 int nluns;
2156 int rc;
2157 int i;
2159 if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2160 return -ENXIO;
2162 rc = -ENOMEM;
2163 if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2164 goto err_core;
2165 sc->lock = ub_next_lock();
2166 INIT_LIST_HEAD(&sc->luns);
2167 usb_init_urb(&sc->work_urb);
2168 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2169 atomic_set(&sc->poison, 0);
2170 INIT_WORK(&sc->reset_work, ub_reset_task, sc);
2171 init_waitqueue_head(&sc->reset_wait);
2173 init_timer(&sc->work_timer);
2174 sc->work_timer.data = (unsigned long) sc;
2175 sc->work_timer.function = ub_urb_timeout;
2177 ub_init_completion(&sc->work_done);
2178 sc->work_done.done = 1; /* A little yuk, but oh well... */
2180 sc->dev = interface_to_usbdev(intf);
2181 sc->intf = intf;
2182 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2183 usb_set_intfdata(intf, sc);
2184 usb_get_dev(sc->dev);
2185 /*
2186 * Since we give the interface struct to the block level through
2187 * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2188 * oopses on close after a disconnect (kernels 2.6.16 and up).
2189 */
2190 usb_get_intf(sc->intf);
2192 snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2193 sc->dev->bus->busnum, sc->dev->devnum);
2195 /* XXX Verify that we can handle the device (from descriptors) */
2197 if (ub_get_pipes(sc, sc->dev, intf) != 0)
2198 goto err_dev_desc;
2200 /*
2201 * At this point, all USB initialization is done, do upper layer.
2202 * We really hate halfway initialized structures, so from the
2203 * invariants perspective, this ub_dev is fully constructed at
2204 * this point.
2205 */
2207 /*
2208 * This is needed to clear toggles. It is a problem only if we do
2209 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2210 */
2211 #if 0 /* iPod Mini fails if we do this (big white iPod works) */
2212 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2213 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2214 #endif
2216 /*
2217 * The way this is used by the startup code is a little specific.
2218 * A SCSI check causes a USB stall. Our common case code sees it
2219 * and clears the check, after which the device is ready for use.
2220 * But if a check was not present, any command other than
2221 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2223 * If we neglect to clear the SCSI check, the first real command fails
2224 * (which is the capacity readout). We clear that and retry, but why
2225 * causing spurious retries for no reason.
2227 * Revalidation may start with its own TEST_UNIT_READY, but that one
2228 * has to succeed, so we clear checks with an additional one here.
2229 * In any case it's not our business how revaliadation is implemented.
2230 */
2231 for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */
2232 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2233 if (rc != 0x6) break;
2234 msleep(10);
2237 nluns = 1;
2238 for (i = 0; i < 3; i++) {
2239 if ((rc = ub_sync_getmaxlun(sc)) < 0)
2240 break;
2241 if (rc != 0) {
2242 nluns = rc;
2243 break;
2245 msleep(100);
2248 for (i = 0; i < nluns; i++) {
2249 ub_probe_lun(sc, i);
2251 return 0;
2253 err_dev_desc:
2254 usb_set_intfdata(intf, NULL);
2255 usb_put_intf(sc->intf);
2256 usb_put_dev(sc->dev);
2257 kfree(sc);
2258 err_core:
2259 return rc;
2262 static int ub_probe_lun(struct ub_dev *sc, int lnum)
2264 struct ub_lun *lun;
2265 request_queue_t *q;
2266 struct gendisk *disk;
2267 int rc;
2269 rc = -ENOMEM;
2270 if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2271 goto err_alloc;
2272 lun->num = lnum;
2274 rc = -ENOSR;
2275 if ((lun->id = ub_id_get()) == -1)
2276 goto err_id;
2278 lun->udev = sc;
2280 snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2281 lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2283 lun->removable = 1; /* XXX Query this from the device */
2284 lun->changed = 1; /* ub_revalidate clears only */
2285 ub_revalidate(sc, lun);
2287 rc = -ENOMEM;
2288 if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2289 goto err_diskalloc;
2291 sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2292 disk->major = UB_MAJOR;
2293 disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2294 disk->fops = &ub_bd_fops;
2295 disk->private_data = lun;
2296 disk->driverfs_dev = &sc->intf->dev;
2298 rc = -ENOMEM;
2299 if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2300 goto err_blkqinit;
2302 disk->queue = q;
2304 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2305 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2306 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2307 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2308 blk_queue_max_sectors(q, UB_MAX_SECTORS);
2309 blk_queue_hardsect_size(q, lun->capacity.bsize);
2311 lun->disk = disk;
2312 q->queuedata = lun;
2313 list_add(&lun->link, &sc->luns);
2315 set_capacity(disk, lun->capacity.nsec);
2316 if (lun->removable)
2317 disk->flags |= GENHD_FL_REMOVABLE;
2319 add_disk(disk);
2321 return 0;
2323 err_blkqinit:
2324 put_disk(disk);
2325 err_diskalloc:
2326 ub_id_put(lun->id);
2327 err_id:
2328 kfree(lun);
2329 err_alloc:
2330 return rc;
2333 static void ub_disconnect(struct usb_interface *intf)
2335 struct ub_dev *sc = usb_get_intfdata(intf);
2336 struct list_head *p;
2337 struct ub_lun *lun;
2338 unsigned long flags;
2340 /*
2341 * Prevent ub_bd_release from pulling the rug from under us.
2342 * XXX This is starting to look like a kref.
2343 * XXX Why not to take this ref at probe time?
2344 */
2345 spin_lock_irqsave(&ub_lock, flags);
2346 sc->openc++;
2347 spin_unlock_irqrestore(&ub_lock, flags);
2349 /*
2350 * Fence stall clearnings, operations triggered by unlinkings and so on.
2351 * We do not attempt to unlink any URBs, because we do not trust the
2352 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2353 */
2354 atomic_set(&sc->poison, 1);
2356 /*
2357 * Wait for reset to end, if any.
2358 */
2359 wait_event(sc->reset_wait, !sc->reset);
2361 /*
2362 * Blow away queued commands.
2364 * Actually, this never works, because before we get here
2365 * the HCD terminates outstanding URB(s). It causes our
2366 * SCSI command queue to advance, commands fail to submit,
2367 * and the whole queue drains. So, we just use this code to
2368 * print warnings.
2369 */
2370 spin_lock_irqsave(sc->lock, flags);
2372 struct ub_scsi_cmd *cmd;
2373 int cnt = 0;
2374 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2375 cmd->error = -ENOTCONN;
2376 cmd->state = UB_CMDST_DONE;
2377 ub_cmdq_pop(sc);
2378 (*cmd->done)(sc, cmd);
2379 cnt++;
2381 if (cnt != 0) {
2382 printk(KERN_WARNING "%s: "
2383 "%d was queued after shutdown\n", sc->name, cnt);
2386 spin_unlock_irqrestore(sc->lock, flags);
2388 /*
2389 * Unregister the upper layer.
2390 */
2391 list_for_each (p, &sc->luns) {
2392 lun = list_entry(p, struct ub_lun, link);
2393 del_gendisk(lun->disk);
2394 /*
2395 * I wish I could do:
2396 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
2397 * As it is, we rely on our internal poisoning and let
2398 * the upper levels to spin furiously failing all the I/O.
2399 */
2402 /*
2403 * Testing for -EINPROGRESS is always a bug, so we are bending
2404 * the rules a little.
2405 */
2406 spin_lock_irqsave(sc->lock, flags);
2407 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
2408 printk(KERN_WARNING "%s: "
2409 "URB is active after disconnect\n", sc->name);
2411 spin_unlock_irqrestore(sc->lock, flags);
2413 /*
2414 * There is virtually no chance that other CPU runs times so long
2415 * after ub_urb_complete should have called del_timer, but only if HCD
2416 * didn't forget to deliver a callback on unlink.
2417 */
2418 del_timer_sync(&sc->work_timer);
2420 /*
2421 * At this point there must be no commands coming from anyone
2422 * and no URBs left in transit.
2423 */
2425 ub_put(sc);
2428 static struct usb_driver ub_driver = {
2429 .name = "ub",
2430 .probe = ub_probe,
2431 .disconnect = ub_disconnect,
2432 .id_table = ub_usb_ids,
2433 };
2435 static int __init ub_init(void)
2437 int rc;
2438 int i;
2440 for (i = 0; i < UB_QLOCK_NUM; i++)
2441 spin_lock_init(&ub_qlockv[i]);
2443 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2444 goto err_regblkdev;
2446 if ((rc = usb_register(&ub_driver)) != 0)
2447 goto err_register;
2449 usb_usual_set_present(USB_US_TYPE_UB);
2450 return 0;
2452 err_register:
2453 unregister_blkdev(UB_MAJOR, DRV_NAME);
2454 err_regblkdev:
2455 return rc;
2458 static void __exit ub_exit(void)
2460 usb_deregister(&ub_driver);
2462 unregister_blkdev(UB_MAJOR, DRV_NAME);
2463 usb_usual_clear_present(USB_US_TYPE_UB);
2466 module_init(ub_init);
2467 module_exit(ub_exit);
2469 MODULE_LICENSE("GPL");