ia64/linux-2.6.18-xen.hg

view drivers/xen/usbback/usbback.c @ 854:950b9eb27661

usbback: fix urb interval value for interrupt urbs.

Signed-off-by: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Apr 06 13:51:20 2009 +0100 (2009-04-06)
parents 4c7eb2e71e9d
children
line source
1 /*
2 * usbback.c
3 *
4 * Xen USB backend driver
5 *
6 * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
7 * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 *
22 * or, by your choice,
23 *
24 * When distributed separately from the Linux kernel or incorporated into
25 * other software packages, subject to the following license:
26 *
27 * Permission is hereby granted, free of charge, to any person obtaining a copy
28 * of this software and associated documentation files (the "Software"), to
29 * deal in the Software without restriction, including without limitation the
30 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
31 * sell copies of the Software, and to permit persons to whom the Software is
32 * furnished to do so, subject to the following conditions:
33 *
34 * The above copyright notice and this permission notice shall be included in
35 * all copies or substantial portions of the Software.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
40 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
41 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
42 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
43 * DEALINGS IN THE SOFTWARE.
44 */
46 #include <linux/mm.h>
47 #include <xen/balloon.h>
48 #include "usbback.h"
50 #if 0
51 #include "../../usb/core/hub.h"
52 #endif
54 int usbif_reqs = USBIF_BACK_MAX_PENDING_REQS;
55 module_param_named(reqs, usbif_reqs, int, 0);
56 MODULE_PARM_DESC(reqs, "Number of usbback requests to allocate");
58 struct pending_req_segment {
59 uint16_t offset;
60 uint16_t length;
61 };
63 typedef struct {
64 usbif_t *usbif;
66 uint16_t id; /* request id */
68 struct usbstub *stub;
69 struct list_head urb_list;
71 /* urb */
72 struct urb *urb;
73 void *buffer;
74 dma_addr_t transfer_dma;
75 struct usb_ctrlrequest *setup;
76 dma_addr_t setup_dma;
78 /* request segments */
79 uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
80 uint16_t nr_extra_segs; /* number of iso_frame_desc segments (ISO) */
81 struct pending_req_segment *seg;
83 struct list_head free_list;
84 } pending_req_t;
86 static pending_req_t *pending_reqs;
87 static struct list_head pending_free;
88 static DEFINE_SPINLOCK(pending_free_lock);
89 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
91 #define USBBACK_INVALID_HANDLE (~0)
93 static struct page **pending_pages;
94 static grant_handle_t *pending_grant_handles;
96 static inline int vaddr_pagenr(pending_req_t *req, int seg)
97 {
98 return (req - pending_reqs) * USBIF_MAX_SEGMENTS_PER_REQUEST + seg;
99 }
101 static inline unsigned long vaddr(pending_req_t *req, int seg)
102 {
103 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
104 return (unsigned long)pfn_to_kaddr(pfn);
105 }
107 #define pending_handle(_req, _seg) \
108 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
110 static pending_req_t* alloc_req(void)
111 {
112 pending_req_t *req = NULL;
113 unsigned long flags;
115 spin_lock_irqsave(&pending_free_lock, flags);
116 if (!list_empty(&pending_free)) {
117 req = list_entry(pending_free.next, pending_req_t, free_list);
118 list_del(&req->free_list);
119 }
120 spin_unlock_irqrestore(&pending_free_lock, flags);
121 return req;
122 }
124 static void free_req(pending_req_t *req)
125 {
126 unsigned long flags;
127 int was_empty;
129 spin_lock_irqsave(&pending_free_lock, flags);
130 was_empty = list_empty(&pending_free);
131 list_add(&req->free_list, &pending_free);
132 spin_unlock_irqrestore(&pending_free_lock, flags);
133 if (was_empty)
134 wake_up(&pending_free_wq);
135 }
137 static inline void add_req_to_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
138 {
139 unsigned long flags;
141 spin_lock_irqsave(&stub->submitting_lock, flags);
142 list_add_tail(&pending_req->urb_list, &stub->submitting_list);
143 spin_unlock_irqrestore(&stub->submitting_lock, flags);
144 }
146 static inline void remove_req_from_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
147 {
148 unsigned long flags;
150 spin_lock_irqsave(&stub->submitting_lock, flags);
151 list_del_init(&pending_req->urb_list);
152 spin_unlock_irqrestore(&stub->submitting_lock, flags);
153 }
155 void usbbk_unlink_urbs(struct usbstub *stub)
156 {
157 pending_req_t *req, *tmp;
158 unsigned long flags;
160 spin_lock_irqsave(&stub->submitting_lock, flags);
161 list_for_each_entry_safe(req, tmp, &stub->submitting_list, urb_list) {
162 usb_unlink_urb(req->urb);
163 }
164 spin_unlock_irqrestore(&stub->submitting_lock, flags);
165 }
167 static void fast_flush_area(pending_req_t *pending_req)
168 {
169 struct gnttab_unmap_grant_ref unmap[USBIF_MAX_SEGMENTS_PER_REQUEST];
170 unsigned int i, nr_segs, invcount = 0;
171 grant_handle_t handle;
172 int ret;
174 nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
176 if (nr_segs) {
177 for (i = 0; i < nr_segs; i++) {
178 handle = pending_handle(pending_req, i);
179 if (handle == USBBACK_INVALID_HANDLE)
180 continue;
181 gnttab_set_unmap_op(&unmap[invcount], vaddr(pending_req, i),
182 GNTMAP_host_map, handle);
183 pending_handle(pending_req, i) = USBBACK_INVALID_HANDLE;
184 invcount++;
185 }
187 ret = HYPERVISOR_grant_table_op(
188 GNTTABOP_unmap_grant_ref, unmap, invcount);
189 BUG_ON(ret);
191 kfree(pending_req->seg);
192 }
194 return;
195 }
197 static void copy_buff_to_pages(void *buff, pending_req_t *pending_req,
198 int start, int nr_pages)
199 {
200 unsigned long copied = 0;
201 int i;
203 for (i = start; i < start + nr_pages; i++) {
204 memcpy((void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
205 buff + copied,
206 pending_req->seg[i].length);
207 copied += pending_req->seg[i].length;
208 }
209 }
211 static void copy_pages_to_buff(void *buff, pending_req_t *pending_req,
212 int start, int nr_pages)
213 {
214 unsigned long copied = 0;
215 int i;
217 for (i = start; i < start + nr_pages; i++) {
218 memcpy(buff + copied,
219 (void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
220 pending_req->seg[i].length);
221 copied += pending_req->seg[i].length;
222 }
223 }
225 static int usbbk_alloc_urb(usbif_request_t *req, pending_req_t *pending_req)
226 {
227 int ret;
229 if (usb_pipeisoc(req->pipe))
230 pending_req->urb = usb_alloc_urb(req->u.isoc.number_of_packets, GFP_KERNEL);
231 else
232 pending_req->urb = usb_alloc_urb(0, GFP_KERNEL);
233 if (!pending_req->urb) {
234 printk(KERN_ERR "usbback: can't alloc urb\n");
235 ret = -ENOMEM;
236 goto fail;
237 }
239 if (req->buffer_length) {
240 pending_req->buffer = usb_buffer_alloc(pending_req->stub->udev,
241 req->buffer_length, GFP_KERNEL,
242 &pending_req->transfer_dma);
243 if (!pending_req->buffer) {
244 printk(KERN_ERR "usbback: can't alloc urb buffer\n");
245 ret = -ENOMEM;
246 goto fail_free_urb;
247 }
248 }
250 if (usb_pipecontrol(req->pipe)) {
251 pending_req->setup = usb_buffer_alloc(pending_req->stub->udev,
252 sizeof(struct usb_ctrlrequest), GFP_KERNEL,
253 &pending_req->setup_dma);
254 if (!pending_req->setup) {
255 printk(KERN_ERR "usbback: can't alloc usb_ctrlrequest\n");
256 ret = -ENOMEM;
257 goto fail_free_buffer;
258 }
259 }
261 return 0;
263 fail_free_buffer:
264 if (req->buffer_length)
265 usb_buffer_free(pending_req->stub->udev, req->buffer_length,
266 pending_req->buffer, pending_req->transfer_dma);
267 fail_free_urb:
268 usb_free_urb(pending_req->urb);
269 fail:
270 return ret;
271 }
273 static void usbbk_free_urb(struct urb *urb)
274 {
275 if (usb_pipecontrol(urb->pipe))
276 usb_buffer_free(urb->dev, sizeof(struct usb_ctrlrequest),
277 urb->setup_packet, urb->setup_dma);
278 if (urb->transfer_buffer_length)
279 usb_buffer_free(urb->dev, urb->transfer_buffer_length,
280 urb->transfer_buffer, urb->transfer_dma);
281 barrier();
282 usb_free_urb(urb);
283 }
285 static void usbbk_notify_work(usbif_t *usbif)
286 {
287 usbif->waiting_reqs = 1;
288 wake_up(&usbif->wq);
289 }
291 irqreturn_t usbbk_be_int(int irq, void *dev_id, struct pt_regs *regs)
292 {
293 usbbk_notify_work(dev_id);
294 return IRQ_HANDLED;
295 }
297 static void usbbk_do_response(pending_req_t *pending_req, int32_t status,
298 int32_t actual_length, int32_t error_count, uint16_t start_frame)
299 {
300 usbif_t *usbif = pending_req->usbif;
301 usbif_response_t *ring_res;
302 unsigned long flags;
303 int notify;
305 spin_lock_irqsave(&usbif->ring_lock, flags);
306 ring_res = RING_GET_RESPONSE(&usbif->ring, usbif->ring.rsp_prod_pvt);
307 ring_res->id = pending_req->id;
308 ring_res->status = status;
309 ring_res->actual_length = actual_length;
310 ring_res->error_count = error_count;
311 ring_res->start_frame = start_frame;
312 usbif->ring.rsp_prod_pvt++;
313 barrier();
314 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->ring, notify);
315 spin_unlock_irqrestore(&usbif->ring_lock, flags);
317 if (notify)
318 notify_remote_via_irq(usbif->irq);
319 }
321 static void usbbk_urb_complete(struct urb *urb, struct pt_regs *regs)
322 {
323 pending_req_t *pending_req = (pending_req_t *)urb->context;
325 if (usb_pipein(urb->pipe) && urb->status == 0 && urb->actual_length > 0)
326 copy_buff_to_pages(pending_req->buffer, pending_req,
327 0, pending_req->nr_buffer_segs);
329 if (usb_pipeisoc(urb->pipe))
330 copy_buff_to_pages(&urb->iso_frame_desc[0], pending_req,
331 pending_req->nr_buffer_segs, pending_req->nr_extra_segs);
333 barrier();
335 fast_flush_area(pending_req);
337 usbbk_do_response(pending_req, urb->status, urb->actual_length,
338 urb->error_count, urb->start_frame);
340 remove_req_from_submitting_list(pending_req->stub, pending_req);
342 barrier();
343 usbbk_free_urb(urb);
344 usbif_put(pending_req->usbif);
345 free_req(pending_req);
346 }
348 static int usbbk_gnttab_map(usbif_t *usbif,
349 usbif_request_t *req, pending_req_t *pending_req)
350 {
351 int i, ret;
352 unsigned int nr_segs;
353 uint32_t flags;
354 struct gnttab_map_grant_ref map[USBIF_MAX_SEGMENTS_PER_REQUEST];
356 nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
358 if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
359 printk(KERN_ERR "Bad number of segments in request\n");
360 ret = -EINVAL;
361 goto fail;
362 }
364 if (nr_segs) {
365 pending_req->seg = kmalloc(sizeof(struct pending_req_segment)
366 * nr_segs, GFP_KERNEL);
367 if (!pending_req->seg) {
368 ret = -ENOMEM;
369 goto fail;
370 }
372 if (pending_req->nr_buffer_segs) {
373 flags = GNTMAP_host_map;
374 if (usb_pipeout(req->pipe))
375 flags |= GNTMAP_readonly;
376 for (i = 0; i < pending_req->nr_buffer_segs; i++)
377 gnttab_set_map_op(&map[i], vaddr(
378 pending_req, i), flags,
379 req->seg[i].gref,
380 usbif->domid);
381 }
383 if (pending_req->nr_extra_segs) {
384 flags = GNTMAP_host_map;
385 for (i = req->nr_buffer_segs; i < nr_segs; i++)
386 gnttab_set_map_op(&map[i], vaddr(
387 pending_req, i), flags,
388 req->seg[i].gref,
389 usbif->domid);
390 }
392 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
393 map, nr_segs);
394 BUG_ON(ret);
396 for (i = 0; i < nr_segs; i++) {
397 if (unlikely(map[i].status != 0)) {
398 printk(KERN_ERR "usbback: invalid buffer -- could not remap it\n");
399 map[i].handle = USBBACK_INVALID_HANDLE;
400 ret |= 1;
401 }
403 pending_handle(pending_req, i) = map[i].handle;
405 if (ret)
406 continue;
408 set_phys_to_machine(__pa(vaddr(
409 pending_req, i)) >> PAGE_SHIFT,
410 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
412 pending_req->seg[i].offset = req->seg[i].offset;
413 pending_req->seg[i].length = req->seg[i].length;
415 barrier();
417 if (pending_req->seg[i].offset >= PAGE_SIZE ||
418 pending_req->seg[i].length > PAGE_SIZE ||
419 pending_req->seg[i].offset + pending_req->seg[i].length > PAGE_SIZE)
420 ret |= 1;
421 }
423 if (ret)
424 goto fail_flush;
425 }
427 return 0;
429 fail_flush:
430 fast_flush_area(pending_req);
431 ret = -ENOMEM;
433 fail:
434 return ret;
435 }
437 static void usbbk_init_urb(usbif_request_t *req, pending_req_t *pending_req)
438 {
439 unsigned int pipe;
440 struct usb_device *udev = pending_req->stub->udev;
441 struct urb *urb = pending_req->urb;
443 switch (usb_pipetype(req->pipe)) {
444 case PIPE_ISOCHRONOUS:
445 if (usb_pipein(req->pipe))
446 pipe = usb_rcvisocpipe(udev, usb_pipeendpoint(req->pipe));
447 else
448 pipe = usb_sndisocpipe(udev, usb_pipeendpoint(req->pipe));
450 urb->dev = udev;
451 urb->pipe = pipe;
452 urb->transfer_flags = req->transfer_flags;
453 urb->transfer_flags |= URB_ISO_ASAP;
454 urb->transfer_buffer = pending_req->buffer;
455 urb->transfer_buffer_length = req->buffer_length;
456 urb->complete = usbbk_urb_complete;
457 urb->context = pending_req;
458 urb->interval = req->u.isoc.interval;
459 urb->start_frame = req->u.isoc.start_frame;
460 urb->number_of_packets = req->u.isoc.number_of_packets;
462 break;
463 case PIPE_INTERRUPT:
464 if (usb_pipein(req->pipe))
465 pipe = usb_rcvintpipe(udev, usb_pipeendpoint(req->pipe));
466 else
467 pipe = usb_sndintpipe(udev, usb_pipeendpoint(req->pipe));
469 usb_fill_int_urb(urb, udev, pipe,
470 pending_req->buffer, req->buffer_length,
471 usbbk_urb_complete,
472 pending_req, req->u.intr.interval);
473 /*
474 * high speed interrupt endpoints use a logarithmic encoding of
475 * the endpoint interval, and usb_fill_int_urb() initializes a
476 * interrupt urb with the encoded interval value.
477 *
478 * req->u.intr.interval is the interval value that already
479 * encoded in the frontend part, and the above usb_fill_int_urb()
480 * initializes the urb->interval with double encoded value.
481 *
482 * so, simply overwrite the urb->interval with original value.
483 */
484 urb->interval = req->u.intr.interval;
485 urb->transfer_flags = req->transfer_flags;
487 break;
488 case PIPE_CONTROL:
489 if (usb_pipein(req->pipe))
490 pipe = usb_rcvctrlpipe(udev, 0);
491 else
492 pipe = usb_sndctrlpipe(udev, 0);
494 usb_fill_control_urb(urb, udev, pipe,
495 (unsigned char *) pending_req->setup,
496 pending_req->buffer, req->buffer_length,
497 usbbk_urb_complete, pending_req);
498 memcpy(pending_req->setup, req->u.ctrl, 8);
499 urb->setup_dma = pending_req->setup_dma;
500 urb->transfer_flags = req->transfer_flags;
501 urb->transfer_flags |= URB_NO_SETUP_DMA_MAP;
503 break;
504 case PIPE_BULK:
505 if (usb_pipein(req->pipe))
506 pipe = usb_rcvbulkpipe(udev, usb_pipeendpoint(req->pipe));
507 else
508 pipe = usb_sndbulkpipe(udev, usb_pipeendpoint(req->pipe));
510 usb_fill_bulk_urb(urb, udev, pipe,
511 pending_req->buffer, req->buffer_length,
512 usbbk_urb_complete, pending_req);
513 urb->transfer_flags = req->transfer_flags;
515 break;
516 default:
517 break;
518 }
520 if (req->buffer_length) {
521 urb->transfer_dma = pending_req->transfer_dma;
522 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
523 }
524 }
526 struct set_interface_request {
527 pending_req_t *pending_req;
528 int interface;
529 int alternate;
530 struct work_struct work;
531 };
533 static void usbbk_set_interface_work(void *data)
534 {
535 struct set_interface_request *req = (struct set_interface_request *) data;
536 pending_req_t *pending_req = req->pending_req;
537 struct usb_device *udev = req->pending_req->stub->udev;
539 int ret;
541 usb_lock_device(udev);
542 ret = usb_set_interface(udev, req->interface, req->alternate);
543 usb_unlock_device(udev);
544 usb_put_dev(udev);
546 usbbk_do_response(pending_req, ret, 0, 0, 0);
547 usbif_put(pending_req->usbif);
548 free_req(pending_req);
549 kfree(req);
550 }
552 static int usbbk_set_interface(pending_req_t *pending_req, int interface, int alternate)
553 {
554 struct set_interface_request *req;
555 struct usb_device *udev = pending_req->stub->udev;
557 req = kmalloc(sizeof(*req), GFP_KERNEL);
558 if (!req)
559 return -ENOMEM;
560 req->pending_req = pending_req;
561 req->interface = interface;
562 req->alternate = alternate;
563 INIT_WORK(&req->work, usbbk_set_interface_work, req);
564 usb_get_dev(udev);
565 schedule_work(&req->work);
566 return 0;
567 }
569 struct clear_halt_request {
570 pending_req_t *pending_req;
571 int pipe;
572 struct work_struct work;
573 };
575 static void usbbk_clear_halt_work(void *data)
576 {
577 struct clear_halt_request *req = (struct clear_halt_request *) data;
578 pending_req_t *pending_req = req->pending_req;
579 struct usb_device *udev = req->pending_req->stub->udev;
580 int ret;
582 usb_lock_device(udev);
583 ret = usb_clear_halt(req->pending_req->stub->udev, req->pipe);
584 usb_unlock_device(udev);
585 usb_put_dev(udev);
587 usbbk_do_response(pending_req, ret, 0, 0, 0);
588 usbif_put(pending_req->usbif);
589 free_req(pending_req);
590 kfree(req);
591 }
593 static int usbbk_clear_halt(pending_req_t *pending_req, int pipe)
594 {
595 struct clear_halt_request *req;
596 struct usb_device *udev = pending_req->stub->udev;
598 req = kmalloc(sizeof(*req), GFP_KERNEL);
599 if (!req)
600 return -ENOMEM;
601 req->pending_req = pending_req;
602 req->pipe = pipe;
603 INIT_WORK(&req->work, usbbk_clear_halt_work, req);
605 usb_get_dev(udev);
606 schedule_work(&req->work);
607 return 0;
608 }
610 #if 0
611 struct port_reset_request {
612 pending_req_t *pending_req;
613 struct work_struct work;
614 };
616 static void usbbk_port_reset_work(void *data)
617 {
618 struct port_reset_request *req = (struct port_reset_request *) data;
619 pending_req_t *pending_req = req->pending_req;
620 struct usb_device *udev = pending_req->stub->udev;
621 int ret, ret_lock;
623 ret = ret_lock = usb_lock_device_for_reset(udev, NULL);
624 if (ret_lock >= 0) {
625 ret = usb_reset_device(udev);
626 if (ret_lock)
627 usb_unlock_device(udev);
628 }
629 usb_put_dev(udev);
631 usbbk_do_response(pending_req, ret, 0, 0, 0);
632 usbif_put(pending_req->usbif);
633 free_req(pending_req);
634 kfree(req);
635 }
637 static int usbbk_port_reset(pending_req_t *pending_req)
638 {
639 struct port_reset_request *req;
640 struct usb_device *udev = pending_req->stub->udev;
642 req = kmalloc(sizeof(*req), GFP_KERNEL);
643 if (!req)
644 return -ENOMEM;
646 req->pending_req = pending_req;
647 INIT_WORK(&req->work, usbbk_port_reset_work, req);
649 usb_get_dev(udev);
650 schedule_work(&req->work);
651 return 0;
652 }
653 #endif
655 static void usbbk_set_address(usbif_t *usbif, struct usbstub *stub, int cur_addr, int new_addr)
656 {
657 unsigned long flags;
659 spin_lock_irqsave(&usbif->addr_lock, flags);
660 if (cur_addr)
661 usbif->addr_table[cur_addr] = NULL;
662 if (new_addr)
663 usbif->addr_table[new_addr] = stub;
664 stub->addr = new_addr;
665 spin_unlock_irqrestore(&usbif->addr_lock, flags);
666 }
668 struct usbstub *find_attached_device(usbif_t *usbif, int portnum)
669 {
670 struct usbstub *stub;
671 int found = 0;
672 unsigned long flags;
674 spin_lock_irqsave(&usbif->plug_lock, flags);
675 list_for_each_entry(stub, &usbif->plugged_devices, plugged_list) {
676 if (stub->id->portnum == portnum) {
677 found = 1;
678 break;
679 }
680 }
681 spin_unlock_irqrestore(&usbif->plug_lock, flags);
683 if (found)
684 return stub;
686 return NULL;
687 }
689 static int check_and_submit_special_ctrlreq(usbif_t *usbif, usbif_request_t *req, pending_req_t *pending_req)
690 {
691 int devnum;
692 struct usbstub *stub = NULL;
693 struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *) req->u.ctrl;
694 int ret;
695 int done = 0;
697 devnum = usb_pipedevice(req->pipe);
699 /*
700 * When the device is first connected or reseted, USB device has no address.
701 * In this initial state, following requests are send to device address (#0),
702 *
703 * 1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is send,
704 * and OS knows what device is connected to.
705 *
706 * 2. SET_ADDRESS is send, and then, device has its address.
707 *
708 * In the next step, SET_CONFIGURATION is send to addressed device, and then,
709 * the device is finally ready to use.
710 */
711 if (unlikely(devnum == 0)) {
712 stub = find_attached_device(usbif, usbif_pipeportnum(req->pipe));
713 if (unlikely(!stub)) {
714 ret = -ENODEV;
715 goto fail_response;
716 }
718 switch (ctrl->bRequest) {
719 case USB_REQ_GET_DESCRIPTOR:
720 /*
721 * GET_DESCRIPTOR request to device #0.
722 * through to normal urb transfer.
723 */
724 pending_req->stub = stub;
725 return 0;
726 break;
727 case USB_REQ_SET_ADDRESS:
728 /*
729 * SET_ADDRESS request to device #0.
730 * add attached device to addr_table.
731 */
732 {
733 __u16 addr = le16_to_cpu(ctrl->wValue);
734 usbbk_set_address(usbif, stub, 0, addr);
735 }
736 ret = 0;
737 goto fail_response;
738 break;
739 default:
740 ret = -EINVAL;
741 goto fail_response;
742 }
743 } else {
744 if (unlikely(!usbif->addr_table[devnum])) {
745 ret = -ENODEV;
746 goto fail_response;
747 }
748 pending_req->stub = usbif->addr_table[devnum];
749 }
751 /*
752 * Check special request
753 */
754 switch (ctrl->bRequest) {
755 case USB_REQ_SET_ADDRESS:
756 /*
757 * SET_ADDRESS request to addressed device.
758 * change addr or remove from addr_table.
759 */
760 {
761 __u16 addr = le16_to_cpu(ctrl->wValue);
762 usbbk_set_address(usbif, stub, devnum, addr);
763 }
764 ret = 0;
765 goto fail_response;
766 break;
767 #if 0
768 case USB_REQ_SET_CONFIGURATION:
769 /*
770 * linux 2.6.27 or later version only!
771 */
772 if (ctrl->RequestType == USB_RECIP_DEVICE) {
773 __u16 config = le16_to_cpu(ctrl->wValue);
774 usb_driver_set_configuration(pending_req->stub->udev, config);
775 done = 1;
776 }
777 break;
778 #endif
779 case USB_REQ_SET_INTERFACE:
780 if (ctrl->bRequestType == USB_RECIP_INTERFACE) {
781 __u16 alt = le16_to_cpu(ctrl->wValue);
782 __u16 intf = le16_to_cpu(ctrl->wIndex);
783 usbbk_set_interface(pending_req, intf, alt);
784 done = 1;
785 }
786 break;
787 case USB_REQ_CLEAR_FEATURE:
788 if (ctrl->bRequestType == USB_RECIP_ENDPOINT
789 && ctrl->wValue == USB_ENDPOINT_HALT) {
790 int pipe;
791 int ep = le16_to_cpu(ctrl->wIndex) & 0x0f;
792 int dir = le16_to_cpu(ctrl->wIndex)
793 & USB_DIR_IN;
794 if (dir)
795 pipe = usb_rcvctrlpipe(pending_req->stub->udev, ep);
796 else
797 pipe = usb_sndctrlpipe(pending_req->stub->udev, ep);
798 usbbk_clear_halt(pending_req, pipe);
799 done = 1;
800 }
801 break;
802 #if 0 /* not tested yet */
803 case USB_REQ_SET_FEATURE:
804 if (ctrl->bRequestType == USB_RT_PORT) {
805 __u16 feat = le16_to_cpu(ctrl->wValue);
806 if (feat == USB_PORT_FEAT_RESET) {
807 usbbk_port_reset(pending_req);
808 done = 1;
809 }
810 }
811 break;
812 #endif
813 default:
814 break;
815 }
817 return done;
819 fail_response:
820 usbbk_do_response(pending_req, ret, 0, 0, 0);
821 usbif_put(usbif);
822 free_req(pending_req);
823 return 1;
824 }
826 static void dispatch_request_to_pending_reqs(usbif_t *usbif,
827 usbif_request_t *req,
828 pending_req_t *pending_req)
829 {
830 int ret;
832 pending_req->id = req->id;
833 pending_req->usbif = usbif;
835 barrier();
837 /*
838 * TODO:
839 * receive unlink request and cancel the urb in backend
840 */
841 #if 0
842 if (unlikely(usb_pipeunlink(req->pipe))) {
844 }
845 #endif
847 usbif_get(usbif);
849 if (usb_pipecontrol(req->pipe)) {
850 if (check_and_submit_special_ctrlreq(usbif, req, pending_req))
851 return;
852 } else {
853 int devnum = usb_pipedevice(req->pipe);
854 if (unlikely(!usbif->addr_table[devnum])) {
855 ret = -ENODEV;
856 goto fail_response;
857 }
858 pending_req->stub = usbif->addr_table[devnum];
859 }
861 barrier();
863 ret = usbbk_alloc_urb(req, pending_req);
864 if (ret) {
865 ret = -ESHUTDOWN;
866 goto fail_response;
867 }
869 add_req_to_submitting_list(pending_req->stub, pending_req);
871 barrier();
873 usbbk_init_urb(req, pending_req);
875 barrier();
877 pending_req->nr_buffer_segs = req->nr_buffer_segs;
878 if (usb_pipeisoc(req->pipe))
879 pending_req->nr_extra_segs = req->u.isoc.nr_frame_desc_segs;
880 else
881 pending_req->nr_extra_segs = 0;
883 barrier();
885 ret = usbbk_gnttab_map(usbif, req, pending_req);
886 if (ret) {
887 printk(KERN_ERR "usbback: invalid buffer\n");
888 ret = -ESHUTDOWN;
889 goto fail_free_urb;
890 }
892 barrier();
894 if (usb_pipeout(req->pipe) && req->buffer_length)
895 copy_pages_to_buff(pending_req->buffer,
896 pending_req,
897 0,
898 pending_req->nr_buffer_segs);
899 if (usb_pipeisoc(req->pipe)) {
900 copy_pages_to_buff(&pending_req->urb->iso_frame_desc[0],
901 pending_req,
902 pending_req->nr_buffer_segs,
903 pending_req->nr_extra_segs);
904 }
906 barrier();
908 ret = usb_submit_urb(pending_req->urb, GFP_KERNEL);
909 if (ret) {
910 printk(KERN_ERR "usbback: failed submitting urb, error %d\n", ret);
911 ret = -ESHUTDOWN;
912 goto fail_flush_area;
913 }
914 return;
916 fail_flush_area:
917 fast_flush_area(pending_req);
918 fail_free_urb:
919 remove_req_from_submitting_list(pending_req->stub, pending_req);
920 barrier();
921 usbbk_free_urb(pending_req->urb);
922 fail_response:
923 usbbk_do_response(pending_req, ret, 0, 0, 0);
924 usbif_put(usbif);
925 free_req(pending_req);
926 }
928 static int usbbk_start_submit_urb(usbif_t *usbif)
929 {
930 usbif_back_ring_t *usb_ring = &usbif->ring;
931 usbif_request_t *ring_req;
932 pending_req_t *pending_req;
933 RING_IDX rc, rp;
934 int more_to_do = 0;
936 rc = usb_ring->req_cons;
937 rp = usb_ring->sring->req_prod;
938 rmb();
940 while (rc != rp) {
941 if (RING_REQUEST_CONS_OVERFLOW(usb_ring, rc)) {
942 printk(KERN_WARNING "RING_REQUEST_CONS_OVERFLOW\n");
943 break;
944 }
946 pending_req = alloc_req();
947 if (NULL == pending_req) {
948 more_to_do = 1;
949 break;
950 }
952 ring_req = RING_GET_REQUEST(usb_ring, rc);
953 usb_ring->req_cons = ++rc;
955 dispatch_request_to_pending_reqs(usbif, ring_req,
956 pending_req);
957 }
959 RING_FINAL_CHECK_FOR_REQUESTS(&usbif->ring, more_to_do);
961 cond_resched();
963 return more_to_do;
964 }
966 int usbbk_schedule(void *arg)
967 {
968 usbif_t *usbif = (usbif_t *)arg;
970 usbif_get(usbif);
972 while(!kthread_should_stop()) {
973 wait_event_interruptible(
974 usbif->wq,
975 usbif->waiting_reqs || kthread_should_stop());
976 wait_event_interruptible(
977 pending_free_wq,
978 !list_empty(&pending_free) || kthread_should_stop());
979 usbif->waiting_reqs = 0;
980 smp_mb();
982 if (usbbk_start_submit_urb(usbif))
983 usbif->waiting_reqs = 1;
984 }
986 usbif->xenusbd = NULL;
987 usbif_put(usbif);
989 return 0;
990 }
992 /*
993 * attach the grabbed device to usbif.
994 */
995 void usbbk_plug_device(usbif_t *usbif, struct usbstub *stub)
996 {
997 unsigned long flags;
999 spin_lock_irqsave(&usbif->plug_lock, flags);
1000 list_add(&stub->plugged_list, &usbif->plugged_devices);
1001 spin_unlock_irqrestore(&usbif->plug_lock, flags);
1002 stub->plugged = 1;
1003 stub->usbif = usbif;
1006 /*
1007 * detach the grabbed device from usbif.
1008 */
1009 void usbbk_unplug_device(usbif_t *usbif, struct usbstub *stub)
1011 unsigned long flags;
1013 if (stub->addr)
1014 usbbk_set_address(usbif, stub, stub->addr, 0);
1015 spin_lock_irqsave(&usbif->plug_lock, flags);
1016 list_del(&stub->plugged_list);
1017 spin_unlock_irqrestore(&usbif->plug_lock, flags);
1018 stub->plugged = 0;
1019 stub->usbif = NULL;
1022 void detach_device_without_lock(usbif_t *usbif, struct usbstub *stub)
1024 if (stub->addr)
1025 usbbk_set_address(usbif, stub, stub->addr, 0);
1026 list_del(&stub->plugged_list);
1027 stub->plugged = 0;
1028 stub->usbif = NULL;
1031 static int __init usbback_init(void)
1033 int i, mmap_pages;
1035 if (!is_running_on_xen())
1036 return -ENODEV;
1038 if (usbstub_init())
1039 return -ENODEV;
1041 mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST;
1042 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
1043 usbif_reqs, GFP_KERNEL);
1044 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
1045 mmap_pages, GFP_KERNEL);
1046 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
1048 if (!pending_reqs || !pending_grant_handles || !pending_pages)
1049 goto out_of_memory;
1051 for (i = 0; i < mmap_pages; i++)
1052 pending_grant_handles[i] = USBBACK_INVALID_HANDLE;
1054 memset(pending_reqs, 0, sizeof(pending_reqs));
1055 INIT_LIST_HEAD(&pending_free);
1057 for (i = 0; i < usbif_reqs; i++) {
1058 list_add_tail(&pending_reqs[i].free_list, &pending_free);
1061 usbback_xenbus_init();
1063 return 0;
1065 out_of_memory:
1066 kfree(pending_reqs);
1067 kfree(pending_grant_handles);
1068 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
1069 printk("%s: out of memory\n", __FUNCTION__);
1070 return -ENOMEM;
1073 static void __exit usbback_exit(void)
1075 usbback_xenbus_exit();
1076 usbstub_exit();
1077 kfree(pending_reqs);
1078 kfree(pending_grant_handles);
1079 free_empty_pages_and_pagevec(pending_pages, usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST);
1082 module_init(usbback_init);
1083 module_exit(usbback_exit);
1085 MODULE_AUTHOR("");
1086 MODULE_DESCRIPTION("Xen USB backend driver (usbback)");
1087 MODULE_LICENSE("Dual BSD/GPL");