ia64/linux-2.6.18-xen.hg

view drivers/xen/usbback/usbback.c @ 845:4c7eb2e71e9d

pvusb: Fix license headers.

Signed-off-by: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:11:23 2009 +0100 (2009-03-31)
parents f799db0570f2
children 950b9eb27661
line source
1 /*
2 * usbback.c
3 *
4 * Xen USB backend driver
5 *
6 * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
7 * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 *
22 * or, by your choice,
23 *
24 * When distributed separately from the Linux kernel or incorporated into
25 * other software packages, subject to the following license:
26 *
27 * Permission is hereby granted, free of charge, to any person obtaining a copy
28 * of this software and associated documentation files (the "Software"), to
29 * deal in the Software without restriction, including without limitation the
30 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
31 * sell copies of the Software, and to permit persons to whom the Software is
32 * furnished to do so, subject to the following conditions:
33 *
34 * The above copyright notice and this permission notice shall be included in
35 * all copies or substantial portions of the Software.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
40 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
41 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
42 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
43 * DEALINGS IN THE SOFTWARE.
44 */
46 #include <linux/mm.h>
47 #include <xen/balloon.h>
48 #include "usbback.h"
50 #if 0
51 #include "../../usb/core/hub.h"
52 #endif
54 int usbif_reqs = USBIF_BACK_MAX_PENDING_REQS;
55 module_param_named(reqs, usbif_reqs, int, 0);
56 MODULE_PARM_DESC(reqs, "Number of usbback requests to allocate");
58 struct pending_req_segment {
59 uint16_t offset;
60 uint16_t length;
61 };
63 typedef struct {
64 usbif_t *usbif;
66 uint16_t id; /* request id */
68 struct usbstub *stub;
69 struct list_head urb_list;
71 /* urb */
72 struct urb *urb;
73 void *buffer;
74 dma_addr_t transfer_dma;
75 struct usb_ctrlrequest *setup;
76 dma_addr_t setup_dma;
78 /* request segments */
79 uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
80 uint16_t nr_extra_segs; /* number of iso_frame_desc segments (ISO) */
81 struct pending_req_segment *seg;
83 struct list_head free_list;
84 } pending_req_t;
86 static pending_req_t *pending_reqs;
87 static struct list_head pending_free;
88 static DEFINE_SPINLOCK(pending_free_lock);
89 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
91 #define USBBACK_INVALID_HANDLE (~0)
93 static struct page **pending_pages;
94 static grant_handle_t *pending_grant_handles;
96 static inline int vaddr_pagenr(pending_req_t *req, int seg)
97 {
98 return (req - pending_reqs) * USBIF_MAX_SEGMENTS_PER_REQUEST + seg;
99 }
101 static inline unsigned long vaddr(pending_req_t *req, int seg)
102 {
103 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
104 return (unsigned long)pfn_to_kaddr(pfn);
105 }
107 #define pending_handle(_req, _seg) \
108 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
110 static pending_req_t* alloc_req(void)
111 {
112 pending_req_t *req = NULL;
113 unsigned long flags;
115 spin_lock_irqsave(&pending_free_lock, flags);
116 if (!list_empty(&pending_free)) {
117 req = list_entry(pending_free.next, pending_req_t, free_list);
118 list_del(&req->free_list);
119 }
120 spin_unlock_irqrestore(&pending_free_lock, flags);
121 return req;
122 }
124 static void free_req(pending_req_t *req)
125 {
126 unsigned long flags;
127 int was_empty;
129 spin_lock_irqsave(&pending_free_lock, flags);
130 was_empty = list_empty(&pending_free);
131 list_add(&req->free_list, &pending_free);
132 spin_unlock_irqrestore(&pending_free_lock, flags);
133 if (was_empty)
134 wake_up(&pending_free_wq);
135 }
137 static inline void add_req_to_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
138 {
139 unsigned long flags;
141 spin_lock_irqsave(&stub->submitting_lock, flags);
142 list_add_tail(&pending_req->urb_list, &stub->submitting_list);
143 spin_unlock_irqrestore(&stub->submitting_lock, flags);
144 }
146 static inline void remove_req_from_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
147 {
148 unsigned long flags;
150 spin_lock_irqsave(&stub->submitting_lock, flags);
151 list_del_init(&pending_req->urb_list);
152 spin_unlock_irqrestore(&stub->submitting_lock, flags);
153 }
155 void usbbk_unlink_urbs(struct usbstub *stub)
156 {
157 pending_req_t *req, *tmp;
158 unsigned long flags;
160 spin_lock_irqsave(&stub->submitting_lock, flags);
161 list_for_each_entry_safe(req, tmp, &stub->submitting_list, urb_list) {
162 usb_unlink_urb(req->urb);
163 }
164 spin_unlock_irqrestore(&stub->submitting_lock, flags);
165 }
167 static void fast_flush_area(pending_req_t *pending_req)
168 {
169 struct gnttab_unmap_grant_ref unmap[USBIF_MAX_SEGMENTS_PER_REQUEST];
170 unsigned int i, nr_segs, invcount = 0;
171 grant_handle_t handle;
172 int ret;
174 nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
176 if (nr_segs) {
177 for (i = 0; i < nr_segs; i++) {
178 handle = pending_handle(pending_req, i);
179 if (handle == USBBACK_INVALID_HANDLE)
180 continue;
181 gnttab_set_unmap_op(&unmap[invcount], vaddr(pending_req, i),
182 GNTMAP_host_map, handle);
183 pending_handle(pending_req, i) = USBBACK_INVALID_HANDLE;
184 invcount++;
185 }
187 ret = HYPERVISOR_grant_table_op(
188 GNTTABOP_unmap_grant_ref, unmap, invcount);
189 BUG_ON(ret);
191 kfree(pending_req->seg);
192 }
194 return;
195 }
197 static void copy_buff_to_pages(void *buff, pending_req_t *pending_req,
198 int start, int nr_pages)
199 {
200 unsigned long copied = 0;
201 int i;
203 for (i = start; i < start + nr_pages; i++) {
204 memcpy((void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
205 buff + copied,
206 pending_req->seg[i].length);
207 copied += pending_req->seg[i].length;
208 }
209 }
211 static void copy_pages_to_buff(void *buff, pending_req_t *pending_req,
212 int start, int nr_pages)
213 {
214 unsigned long copied = 0;
215 int i;
217 for (i = start; i < start + nr_pages; i++) {
218 memcpy(buff + copied,
219 (void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
220 pending_req->seg[i].length);
221 copied += pending_req->seg[i].length;
222 }
223 }
225 static int usbbk_alloc_urb(usbif_request_t *req, pending_req_t *pending_req)
226 {
227 int ret;
229 if (usb_pipeisoc(req->pipe))
230 pending_req->urb = usb_alloc_urb(req->u.isoc.number_of_packets, GFP_KERNEL);
231 else
232 pending_req->urb = usb_alloc_urb(0, GFP_KERNEL);
233 if (!pending_req->urb) {
234 printk(KERN_ERR "usbback: can't alloc urb\n");
235 ret = -ENOMEM;
236 goto fail;
237 }
239 if (req->buffer_length) {
240 pending_req->buffer = usb_buffer_alloc(pending_req->stub->udev,
241 req->buffer_length, GFP_KERNEL,
242 &pending_req->transfer_dma);
243 if (!pending_req->buffer) {
244 printk(KERN_ERR "usbback: can't alloc urb buffer\n");
245 ret = -ENOMEM;
246 goto fail_free_urb;
247 }
248 }
250 if (usb_pipecontrol(req->pipe)) {
251 pending_req->setup = usb_buffer_alloc(pending_req->stub->udev,
252 sizeof(struct usb_ctrlrequest), GFP_KERNEL,
253 &pending_req->setup_dma);
254 if (!pending_req->setup) {
255 printk(KERN_ERR "usbback: can't alloc usb_ctrlrequest\n");
256 ret = -ENOMEM;
257 goto fail_free_buffer;
258 }
259 }
261 return 0;
263 fail_free_buffer:
264 if (req->buffer_length)
265 usb_buffer_free(pending_req->stub->udev, req->buffer_length,
266 pending_req->buffer, pending_req->transfer_dma);
267 fail_free_urb:
268 usb_free_urb(pending_req->urb);
269 fail:
270 return ret;
271 }
273 static void usbbk_free_urb(struct urb *urb)
274 {
275 if (usb_pipecontrol(urb->pipe))
276 usb_buffer_free(urb->dev, sizeof(struct usb_ctrlrequest),
277 urb->setup_packet, urb->setup_dma);
278 if (urb->transfer_buffer_length)
279 usb_buffer_free(urb->dev, urb->transfer_buffer_length,
280 urb->transfer_buffer, urb->transfer_dma);
281 barrier();
282 usb_free_urb(urb);
283 }
285 static void usbbk_notify_work(usbif_t *usbif)
286 {
287 usbif->waiting_reqs = 1;
288 wake_up(&usbif->wq);
289 }
291 irqreturn_t usbbk_be_int(int irq, void *dev_id, struct pt_regs *regs)
292 {
293 usbbk_notify_work(dev_id);
294 return IRQ_HANDLED;
295 }
297 static void usbbk_do_response(pending_req_t *pending_req, int32_t status,
298 int32_t actual_length, int32_t error_count, uint16_t start_frame)
299 {
300 usbif_t *usbif = pending_req->usbif;
301 usbif_response_t *ring_res;
302 unsigned long flags;
303 int notify;
305 spin_lock_irqsave(&usbif->ring_lock, flags);
306 ring_res = RING_GET_RESPONSE(&usbif->ring, usbif->ring.rsp_prod_pvt);
307 ring_res->id = pending_req->id;
308 ring_res->status = status;
309 ring_res->actual_length = actual_length;
310 ring_res->error_count = error_count;
311 ring_res->start_frame = start_frame;
312 usbif->ring.rsp_prod_pvt++;
313 barrier();
314 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->ring, notify);
315 spin_unlock_irqrestore(&usbif->ring_lock, flags);
317 if (notify)
318 notify_remote_via_irq(usbif->irq);
319 }
321 static void usbbk_urb_complete(struct urb *urb, struct pt_regs *regs)
322 {
323 pending_req_t *pending_req = (pending_req_t *)urb->context;
325 if (usb_pipein(urb->pipe) && urb->status == 0 && urb->actual_length > 0)
326 copy_buff_to_pages(pending_req->buffer, pending_req,
327 0, pending_req->nr_buffer_segs);
329 if (usb_pipeisoc(urb->pipe))
330 copy_buff_to_pages(&urb->iso_frame_desc[0], pending_req,
331 pending_req->nr_buffer_segs, pending_req->nr_extra_segs);
333 barrier();
335 fast_flush_area(pending_req);
337 usbbk_do_response(pending_req, urb->status, urb->actual_length,
338 urb->error_count, urb->start_frame);
340 remove_req_from_submitting_list(pending_req->stub, pending_req);
342 barrier();
343 usbbk_free_urb(urb);
344 usbif_put(pending_req->usbif);
345 free_req(pending_req);
346 }
348 static int usbbk_gnttab_map(usbif_t *usbif,
349 usbif_request_t *req, pending_req_t *pending_req)
350 {
351 int i, ret;
352 unsigned int nr_segs;
353 uint32_t flags;
354 struct gnttab_map_grant_ref map[USBIF_MAX_SEGMENTS_PER_REQUEST];
356 nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
358 if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
359 printk(KERN_ERR "Bad number of segments in request\n");
360 ret = -EINVAL;
361 goto fail;
362 }
364 if (nr_segs) {
365 pending_req->seg = kmalloc(sizeof(struct pending_req_segment)
366 * nr_segs, GFP_KERNEL);
367 if (!pending_req->seg) {
368 ret = -ENOMEM;
369 goto fail;
370 }
372 if (pending_req->nr_buffer_segs) {
373 flags = GNTMAP_host_map;
374 if (usb_pipeout(req->pipe))
375 flags |= GNTMAP_readonly;
376 for (i = 0; i < pending_req->nr_buffer_segs; i++)
377 gnttab_set_map_op(&map[i], vaddr(
378 pending_req, i), flags,
379 req->seg[i].gref,
380 usbif->domid);
381 }
383 if (pending_req->nr_extra_segs) {
384 flags = GNTMAP_host_map;
385 for (i = req->nr_buffer_segs; i < nr_segs; i++)
386 gnttab_set_map_op(&map[i], vaddr(
387 pending_req, i), flags,
388 req->seg[i].gref,
389 usbif->domid);
390 }
392 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
393 map, nr_segs);
394 BUG_ON(ret);
396 for (i = 0; i < nr_segs; i++) {
397 if (unlikely(map[i].status != 0)) {
398 printk(KERN_ERR "usbback: invalid buffer -- could not remap it\n");
399 map[i].handle = USBBACK_INVALID_HANDLE;
400 ret |= 1;
401 }
403 pending_handle(pending_req, i) = map[i].handle;
405 if (ret)
406 continue;
408 set_phys_to_machine(__pa(vaddr(
409 pending_req, i)) >> PAGE_SHIFT,
410 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
412 pending_req->seg[i].offset = req->seg[i].offset;
413 pending_req->seg[i].length = req->seg[i].length;
415 barrier();
417 if (pending_req->seg[i].offset >= PAGE_SIZE ||
418 pending_req->seg[i].length > PAGE_SIZE ||
419 pending_req->seg[i].offset + pending_req->seg[i].length > PAGE_SIZE)
420 ret |= 1;
421 }
423 if (ret)
424 goto fail_flush;
425 }
427 return 0;
429 fail_flush:
430 fast_flush_area(pending_req);
431 ret = -ENOMEM;
433 fail:
434 return ret;
435 }
437 static void usbbk_init_urb(usbif_request_t *req, pending_req_t *pending_req)
438 {
439 unsigned int pipe;
440 struct usb_device *udev = pending_req->stub->udev;
441 struct urb *urb = pending_req->urb;
443 switch (usb_pipetype(req->pipe)) {
444 case PIPE_ISOCHRONOUS:
445 if (usb_pipein(req->pipe))
446 pipe = usb_rcvisocpipe(udev, usb_pipeendpoint(req->pipe));
447 else
448 pipe = usb_sndisocpipe(udev, usb_pipeendpoint(req->pipe));
450 urb->dev = udev;
451 urb->pipe = pipe;
452 urb->transfer_flags = req->transfer_flags;
453 urb->transfer_flags |= URB_ISO_ASAP;
454 urb->transfer_buffer = pending_req->buffer;
455 urb->transfer_buffer_length = req->buffer_length;
456 urb->complete = usbbk_urb_complete;
457 urb->context = pending_req;
458 urb->interval = req->u.isoc.interval;
459 urb->start_frame = req->u.isoc.start_frame;
460 urb->number_of_packets = req->u.isoc.number_of_packets;
462 break;
463 case PIPE_INTERRUPT:
464 if (usb_pipein(req->pipe))
465 pipe = usb_rcvintpipe(udev, usb_pipeendpoint(req->pipe));
466 else
467 pipe = usb_sndintpipe(udev, usb_pipeendpoint(req->pipe));
469 usb_fill_int_urb(urb, udev, pipe,
470 pending_req->buffer, req->buffer_length,
471 usbbk_urb_complete,
472 pending_req, req->u.intr.interval);
473 urb->transfer_flags = req->transfer_flags;
475 break;
476 case PIPE_CONTROL:
477 if (usb_pipein(req->pipe))
478 pipe = usb_rcvctrlpipe(udev, 0);
479 else
480 pipe = usb_sndctrlpipe(udev, 0);
482 usb_fill_control_urb(urb, udev, pipe,
483 (unsigned char *) pending_req->setup,
484 pending_req->buffer, req->buffer_length,
485 usbbk_urb_complete, pending_req);
486 memcpy(pending_req->setup, req->u.ctrl, 8);
487 urb->setup_dma = pending_req->setup_dma;
488 urb->transfer_flags = req->transfer_flags;
489 urb->transfer_flags |= URB_NO_SETUP_DMA_MAP;
491 break;
492 case PIPE_BULK:
493 if (usb_pipein(req->pipe))
494 pipe = usb_rcvbulkpipe(udev, usb_pipeendpoint(req->pipe));
495 else
496 pipe = usb_sndbulkpipe(udev, usb_pipeendpoint(req->pipe));
498 usb_fill_bulk_urb(urb, udev, pipe,
499 pending_req->buffer, req->buffer_length,
500 usbbk_urb_complete, pending_req);
501 urb->transfer_flags = req->transfer_flags;
503 break;
504 default:
505 break;
506 }
508 if (req->buffer_length) {
509 urb->transfer_dma = pending_req->transfer_dma;
510 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
511 }
512 }
514 struct set_interface_request {
515 pending_req_t *pending_req;
516 int interface;
517 int alternate;
518 struct work_struct work;
519 };
521 static void usbbk_set_interface_work(void *data)
522 {
523 struct set_interface_request *req = (struct set_interface_request *) data;
524 pending_req_t *pending_req = req->pending_req;
525 struct usb_device *udev = req->pending_req->stub->udev;
527 int ret;
529 usb_lock_device(udev);
530 ret = usb_set_interface(udev, req->interface, req->alternate);
531 usb_unlock_device(udev);
532 usb_put_dev(udev);
534 usbbk_do_response(pending_req, ret, 0, 0, 0);
535 usbif_put(pending_req->usbif);
536 free_req(pending_req);
537 kfree(req);
538 }
540 static int usbbk_set_interface(pending_req_t *pending_req, int interface, int alternate)
541 {
542 struct set_interface_request *req;
543 struct usb_device *udev = pending_req->stub->udev;
545 req = kmalloc(sizeof(*req), GFP_KERNEL);
546 if (!req)
547 return -ENOMEM;
548 req->pending_req = pending_req;
549 req->interface = interface;
550 req->alternate = alternate;
551 INIT_WORK(&req->work, usbbk_set_interface_work, req);
552 usb_get_dev(udev);
553 schedule_work(&req->work);
554 return 0;
555 }
557 struct clear_halt_request {
558 pending_req_t *pending_req;
559 int pipe;
560 struct work_struct work;
561 };
563 static void usbbk_clear_halt_work(void *data)
564 {
565 struct clear_halt_request *req = (struct clear_halt_request *) data;
566 pending_req_t *pending_req = req->pending_req;
567 struct usb_device *udev = req->pending_req->stub->udev;
568 int ret;
570 usb_lock_device(udev);
571 ret = usb_clear_halt(req->pending_req->stub->udev, req->pipe);
572 usb_unlock_device(udev);
573 usb_put_dev(udev);
575 usbbk_do_response(pending_req, ret, 0, 0, 0);
576 usbif_put(pending_req->usbif);
577 free_req(pending_req);
578 kfree(req);
579 }
581 static int usbbk_clear_halt(pending_req_t *pending_req, int pipe)
582 {
583 struct clear_halt_request *req;
584 struct usb_device *udev = pending_req->stub->udev;
586 req = kmalloc(sizeof(*req), GFP_KERNEL);
587 if (!req)
588 return -ENOMEM;
589 req->pending_req = pending_req;
590 req->pipe = pipe;
591 INIT_WORK(&req->work, usbbk_clear_halt_work, req);
593 usb_get_dev(udev);
594 schedule_work(&req->work);
595 return 0;
596 }
598 #if 0
599 struct port_reset_request {
600 pending_req_t *pending_req;
601 struct work_struct work;
602 };
604 static void usbbk_port_reset_work(void *data)
605 {
606 struct port_reset_request *req = (struct port_reset_request *) data;
607 pending_req_t *pending_req = req->pending_req;
608 struct usb_device *udev = pending_req->stub->udev;
609 int ret, ret_lock;
611 ret = ret_lock = usb_lock_device_for_reset(udev, NULL);
612 if (ret_lock >= 0) {
613 ret = usb_reset_device(udev);
614 if (ret_lock)
615 usb_unlock_device(udev);
616 }
617 usb_put_dev(udev);
619 usbbk_do_response(pending_req, ret, 0, 0, 0);
620 usbif_put(pending_req->usbif);
621 free_req(pending_req);
622 kfree(req);
623 }
625 static int usbbk_port_reset(pending_req_t *pending_req)
626 {
627 struct port_reset_request *req;
628 struct usb_device *udev = pending_req->stub->udev;
630 req = kmalloc(sizeof(*req), GFP_KERNEL);
631 if (!req)
632 return -ENOMEM;
634 req->pending_req = pending_req;
635 INIT_WORK(&req->work, usbbk_port_reset_work, req);
637 usb_get_dev(udev);
638 schedule_work(&req->work);
639 return 0;
640 }
641 #endif
643 static void usbbk_set_address(usbif_t *usbif, struct usbstub *stub, int cur_addr, int new_addr)
644 {
645 unsigned long flags;
647 spin_lock_irqsave(&usbif->addr_lock, flags);
648 if (cur_addr)
649 usbif->addr_table[cur_addr] = NULL;
650 if (new_addr)
651 usbif->addr_table[new_addr] = stub;
652 stub->addr = new_addr;
653 spin_unlock_irqrestore(&usbif->addr_lock, flags);
654 }
656 struct usbstub *find_attached_device(usbif_t *usbif, int portnum)
657 {
658 struct usbstub *stub;
659 int found = 0;
660 unsigned long flags;
662 spin_lock_irqsave(&usbif->plug_lock, flags);
663 list_for_each_entry(stub, &usbif->plugged_devices, plugged_list) {
664 if (stub->id->portnum == portnum) {
665 found = 1;
666 break;
667 }
668 }
669 spin_unlock_irqrestore(&usbif->plug_lock, flags);
671 if (found)
672 return stub;
674 return NULL;
675 }
677 static int check_and_submit_special_ctrlreq(usbif_t *usbif, usbif_request_t *req, pending_req_t *pending_req)
678 {
679 int devnum;
680 struct usbstub *stub = NULL;
681 struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *) req->u.ctrl;
682 int ret;
683 int done = 0;
685 devnum = usb_pipedevice(req->pipe);
687 /*
688 * When the device is first connected or reseted, USB device has no address.
689 * In this initial state, following requests are send to device address (#0),
690 *
691 * 1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is send,
692 * and OS knows what device is connected to.
693 *
694 * 2. SET_ADDRESS is send, and then, device has its address.
695 *
696 * In the next step, SET_CONFIGURATION is send to addressed device, and then,
697 * the device is finally ready to use.
698 */
699 if (unlikely(devnum == 0)) {
700 stub = find_attached_device(usbif, usbif_pipeportnum(req->pipe));
701 if (unlikely(!stub)) {
702 ret = -ENODEV;
703 goto fail_response;
704 }
706 switch (ctrl->bRequest) {
707 case USB_REQ_GET_DESCRIPTOR:
708 /*
709 * GET_DESCRIPTOR request to device #0.
710 * through to normal urb transfer.
711 */
712 pending_req->stub = stub;
713 return 0;
714 break;
715 case USB_REQ_SET_ADDRESS:
716 /*
717 * SET_ADDRESS request to device #0.
718 * add attached device to addr_table.
719 */
720 {
721 __u16 addr = le16_to_cpu(ctrl->wValue);
722 usbbk_set_address(usbif, stub, 0, addr);
723 }
724 ret = 0;
725 goto fail_response;
726 break;
727 default:
728 ret = -EINVAL;
729 goto fail_response;
730 }
731 } else {
732 if (unlikely(!usbif->addr_table[devnum])) {
733 ret = -ENODEV;
734 goto fail_response;
735 }
736 pending_req->stub = usbif->addr_table[devnum];
737 }
739 /*
740 * Check special request
741 */
742 switch (ctrl->bRequest) {
743 case USB_REQ_SET_ADDRESS:
744 /*
745 * SET_ADDRESS request to addressed device.
746 * change addr or remove from addr_table.
747 */
748 {
749 __u16 addr = le16_to_cpu(ctrl->wValue);
750 usbbk_set_address(usbif, stub, devnum, addr);
751 }
752 ret = 0;
753 goto fail_response;
754 break;
755 #if 0
756 case USB_REQ_SET_CONFIGURATION:
757 /*
758 * linux 2.6.27 or later version only!
759 */
760 if (ctrl->RequestType == USB_RECIP_DEVICE) {
761 __u16 config = le16_to_cpu(ctrl->wValue);
762 usb_driver_set_configuration(pending_req->stub->udev, config);
763 done = 1;
764 }
765 break;
766 #endif
767 case USB_REQ_SET_INTERFACE:
768 if (ctrl->bRequestType == USB_RECIP_INTERFACE) {
769 __u16 alt = le16_to_cpu(ctrl->wValue);
770 __u16 intf = le16_to_cpu(ctrl->wIndex);
771 usbbk_set_interface(pending_req, intf, alt);
772 done = 1;
773 }
774 break;
775 case USB_REQ_CLEAR_FEATURE:
776 if (ctrl->bRequestType == USB_RECIP_ENDPOINT
777 && ctrl->wValue == USB_ENDPOINT_HALT) {
778 int pipe;
779 int ep = le16_to_cpu(ctrl->wIndex) & 0x0f;
780 int dir = le16_to_cpu(ctrl->wIndex)
781 & USB_DIR_IN;
782 if (dir)
783 pipe = usb_rcvctrlpipe(pending_req->stub->udev, ep);
784 else
785 pipe = usb_sndctrlpipe(pending_req->stub->udev, ep);
786 usbbk_clear_halt(pending_req, pipe);
787 done = 1;
788 }
789 break;
790 #if 0 /* not tested yet */
791 case USB_REQ_SET_FEATURE:
792 if (ctrl->bRequestType == USB_RT_PORT) {
793 __u16 feat = le16_to_cpu(ctrl->wValue);
794 if (feat == USB_PORT_FEAT_RESET) {
795 usbbk_port_reset(pending_req);
796 done = 1;
797 }
798 }
799 break;
800 #endif
801 default:
802 break;
803 }
805 return done;
807 fail_response:
808 usbbk_do_response(pending_req, ret, 0, 0, 0);
809 usbif_put(usbif);
810 free_req(pending_req);
811 return 1;
812 }
814 static void dispatch_request_to_pending_reqs(usbif_t *usbif,
815 usbif_request_t *req,
816 pending_req_t *pending_req)
817 {
818 int ret;
820 pending_req->id = req->id;
821 pending_req->usbif = usbif;
823 barrier();
825 /*
826 * TODO:
827 * receive unlink request and cancel the urb in backend
828 */
829 #if 0
830 if (unlikely(usb_pipeunlink(req->pipe))) {
832 }
833 #endif
835 usbif_get(usbif);
837 if (usb_pipecontrol(req->pipe)) {
838 if (check_and_submit_special_ctrlreq(usbif, req, pending_req))
839 return;
840 } else {
841 int devnum = usb_pipedevice(req->pipe);
842 if (unlikely(!usbif->addr_table[devnum])) {
843 ret = -ENODEV;
844 goto fail_response;
845 }
846 pending_req->stub = usbif->addr_table[devnum];
847 }
849 barrier();
851 ret = usbbk_alloc_urb(req, pending_req);
852 if (ret) {
853 ret = -ESHUTDOWN;
854 goto fail_response;
855 }
857 add_req_to_submitting_list(pending_req->stub, pending_req);
859 barrier();
861 usbbk_init_urb(req, pending_req);
863 barrier();
865 pending_req->nr_buffer_segs = req->nr_buffer_segs;
866 if (usb_pipeisoc(req->pipe))
867 pending_req->nr_extra_segs = req->u.isoc.nr_frame_desc_segs;
868 else
869 pending_req->nr_extra_segs = 0;
871 barrier();
873 ret = usbbk_gnttab_map(usbif, req, pending_req);
874 if (ret) {
875 printk(KERN_ERR "usbback: invalid buffer\n");
876 ret = -ESHUTDOWN;
877 goto fail_free_urb;
878 }
880 barrier();
882 if (usb_pipeout(req->pipe) && req->buffer_length)
883 copy_pages_to_buff(pending_req->buffer,
884 pending_req,
885 0,
886 pending_req->nr_buffer_segs);
887 if (usb_pipeisoc(req->pipe)) {
888 copy_pages_to_buff(&pending_req->urb->iso_frame_desc[0],
889 pending_req,
890 pending_req->nr_buffer_segs,
891 pending_req->nr_extra_segs);
892 }
894 barrier();
896 ret = usb_submit_urb(pending_req->urb, GFP_KERNEL);
897 if (ret) {
898 printk(KERN_ERR "usbback: failed submitting urb, error %d\n", ret);
899 ret = -ESHUTDOWN;
900 goto fail_flush_area;
901 }
902 return;
904 fail_flush_area:
905 fast_flush_area(pending_req);
906 fail_free_urb:
907 remove_req_from_submitting_list(pending_req->stub, pending_req);
908 barrier();
909 usbbk_free_urb(pending_req->urb);
910 fail_response:
911 usbbk_do_response(pending_req, ret, 0, 0, 0);
912 usbif_put(usbif);
913 free_req(pending_req);
914 }
916 static int usbbk_start_submit_urb(usbif_t *usbif)
917 {
918 usbif_back_ring_t *usb_ring = &usbif->ring;
919 usbif_request_t *ring_req;
920 pending_req_t *pending_req;
921 RING_IDX rc, rp;
922 int more_to_do = 0;
924 rc = usb_ring->req_cons;
925 rp = usb_ring->sring->req_prod;
926 rmb();
928 while (rc != rp) {
929 if (RING_REQUEST_CONS_OVERFLOW(usb_ring, rc)) {
930 printk(KERN_WARNING "RING_REQUEST_CONS_OVERFLOW\n");
931 break;
932 }
934 pending_req = alloc_req();
935 if (NULL == pending_req) {
936 more_to_do = 1;
937 break;
938 }
940 ring_req = RING_GET_REQUEST(usb_ring, rc);
941 usb_ring->req_cons = ++rc;
943 dispatch_request_to_pending_reqs(usbif, ring_req,
944 pending_req);
945 }
947 RING_FINAL_CHECK_FOR_REQUESTS(&usbif->ring, more_to_do);
949 cond_resched();
951 return more_to_do;
952 }
954 int usbbk_schedule(void *arg)
955 {
956 usbif_t *usbif = (usbif_t *)arg;
958 usbif_get(usbif);
960 while(!kthread_should_stop()) {
961 wait_event_interruptible(
962 usbif->wq,
963 usbif->waiting_reqs || kthread_should_stop());
964 wait_event_interruptible(
965 pending_free_wq,
966 !list_empty(&pending_free) || kthread_should_stop());
967 usbif->waiting_reqs = 0;
968 smp_mb();
970 if (usbbk_start_submit_urb(usbif))
971 usbif->waiting_reqs = 1;
972 }
974 usbif->xenusbd = NULL;
975 usbif_put(usbif);
977 return 0;
978 }
980 /*
981 * attach the grabbed device to usbif.
982 */
983 void usbbk_plug_device(usbif_t *usbif, struct usbstub *stub)
984 {
985 unsigned long flags;
987 spin_lock_irqsave(&usbif->plug_lock, flags);
988 list_add(&stub->plugged_list, &usbif->plugged_devices);
989 spin_unlock_irqrestore(&usbif->plug_lock, flags);
990 stub->plugged = 1;
991 stub->usbif = usbif;
992 }
994 /*
995 * detach the grabbed device from usbif.
996 */
997 void usbbk_unplug_device(usbif_t *usbif, struct usbstub *stub)
998 {
999 unsigned long flags;
1001 if (stub->addr)
1002 usbbk_set_address(usbif, stub, stub->addr, 0);
1003 spin_lock_irqsave(&usbif->plug_lock, flags);
1004 list_del(&stub->plugged_list);
1005 spin_unlock_irqrestore(&usbif->plug_lock, flags);
1006 stub->plugged = 0;
1007 stub->usbif = NULL;
1010 void detach_device_without_lock(usbif_t *usbif, struct usbstub *stub)
1012 if (stub->addr)
1013 usbbk_set_address(usbif, stub, stub->addr, 0);
1014 list_del(&stub->plugged_list);
1015 stub->plugged = 0;
1016 stub->usbif = NULL;
1019 static int __init usbback_init(void)
1021 int i, mmap_pages;
1023 if (!is_running_on_xen())
1024 return -ENODEV;
1026 if (usbstub_init())
1027 return -ENODEV;
1029 mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST;
1030 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
1031 usbif_reqs, GFP_KERNEL);
1032 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
1033 mmap_pages, GFP_KERNEL);
1034 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
1036 if (!pending_reqs || !pending_grant_handles || !pending_pages)
1037 goto out_of_memory;
1039 for (i = 0; i < mmap_pages; i++)
1040 pending_grant_handles[i] = USBBACK_INVALID_HANDLE;
1042 memset(pending_reqs, 0, sizeof(pending_reqs));
1043 INIT_LIST_HEAD(&pending_free);
1045 for (i = 0; i < usbif_reqs; i++) {
1046 list_add_tail(&pending_reqs[i].free_list, &pending_free);
1049 usbback_xenbus_init();
1051 return 0;
1053 out_of_memory:
1054 kfree(pending_reqs);
1055 kfree(pending_grant_handles);
1056 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
1057 printk("%s: out of memory\n", __FUNCTION__);
1058 return -ENOMEM;
1061 static void __exit usbback_exit(void)
1063 usbback_xenbus_exit();
1064 usbstub_exit();
1065 kfree(pending_reqs);
1066 kfree(pending_grant_handles);
1067 free_empty_pages_and_pagevec(pending_pages, usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST);
1070 module_init(usbback_init);
1071 module_exit(usbback_exit);
1073 MODULE_AUTHOR("");
1074 MODULE_DESCRIPTION("Xen USB backend driver (usbback)");
1075 MODULE_LICENSE("Dual BSD/GPL");