ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c @ 6552:a9873d384da4

Merge.
author adsharma@los-vmm.sc.intel.com
date Thu Aug 25 12:24:48 2005 -0700 (2005-08-25)
parents 112d44270733 fa0754a9f64f
children dfaf788ab18c
line source
1 /******************************************************************************
2 * arch/xen/drivers/usbif/backend/main.c
3 *
4 * Backend for the Xen virtual USB driver - provides an abstraction of a
5 * USB host controller to the corresponding frontend driver.
6 *
7 * by Mark Williamson
8 * Copyright (c) 2004 Intel Research Cambridge
9 * Copyright (c) 2004, 2005 Mark Williamson
10 *
11 * Based on arch/xen/drivers/blkif/backend/main.c
12 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
13 */
15 #include "common.h"
18 #include <linux/list.h>
19 #include <linux/usb.h>
20 #include <linux/spinlock.h>
21 #include <linux/module.h>
22 #include <linux/tqueue.h>
24 /*
25 * This is rather arbitrary.
26 */
27 #define MAX_PENDING_REQS 4
28 #define BATCH_PER_DOMAIN 1
30 static unsigned long mmap_vstart;
32 /* Needs to be sufficiently large that we can map the (large) buffers
33 * the USB mass storage driver wants. */
34 #define MMAP_PAGES_PER_REQUEST \
35 (128)
36 #define MMAP_PAGES \
37 (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
39 #define MMAP_VADDR(_req,_seg) \
40 (mmap_vstart + \
41 ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
42 ((_seg) * PAGE_SIZE))
45 static spinlock_t owned_ports_lock;
46 LIST_HEAD(owned_ports);
48 /* A list of these structures is used to track ownership of physical USB
49 * ports. */
50 typedef struct
51 {
52 usbif_priv_t *usbif_priv;
53 char path[16];
54 int guest_port;
55 int enabled;
56 struct list_head list;
57 unsigned long guest_address; /* The USB device address that has been
58 * assigned by the guest. */
59 int dev_present; /* Is there a device present? */
60 struct usb_device * dev;
61 unsigned long ifaces; /* What interfaces are present on this device? */
62 } owned_port_t;
65 /*
66 * Each outstanding request that we've passed to the lower device layers has a
67 * 'pending_req' allocated to it. The request is complete, the specified
68 * domain has a response queued for it, with the saved 'id' passed back.
69 */
70 typedef struct {
71 usbif_priv_t *usbif_priv;
72 unsigned long id;
73 int nr_pages;
74 unsigned short operation;
75 int status;
76 } pending_req_t;
78 /*
79 * We can't allocate pending_req's in order, since they may complete out of
80 * order. We therefore maintain an allocation ring. This ring also indicates
81 * when enough work has been passed down -- at that point the allocation ring
82 * will be empty.
83 */
84 static pending_req_t pending_reqs[MAX_PENDING_REQS];
85 static unsigned char pending_ring[MAX_PENDING_REQS];
86 static spinlock_t pend_prod_lock;
88 /* NB. We use a different index type to differentiate from shared usb rings. */
89 typedef unsigned int PEND_RING_IDX;
90 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
91 static PEND_RING_IDX pending_prod, pending_cons;
92 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
94 static int do_usb_io_op(usbif_priv_t *usbif, int max_to_do);
95 static void make_response(usbif_priv_t *usbif, unsigned long id,
96 unsigned short op, int st, int inband,
97 unsigned long actual_length);
98 static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long port);
99 static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req);
100 static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid);
101 static owned_port_t *usbif_find_port(char *);
103 /******************************************************************
104 * PRIVATE DEBUG FUNCTIONS
105 */
107 #undef DEBUG
108 #ifdef DEBUG
110 static void dump_port(owned_port_t *p)
111 {
112 printk(KERN_DEBUG "owned_port_t @ %p\n"
113 " usbif_priv @ %p\n"
114 " path: %s\n"
115 " guest_port: %d\n"
116 " guest_address: %ld\n"
117 " dev_present: %d\n"
118 " dev @ %p\n"
119 " ifaces: 0x%lx\n",
120 p, p->usbif_priv, p->path, p->guest_port, p->guest_address,
121 p->dev_present, p->dev, p->ifaces);
122 }
125 static void dump_request(usbif_request_t *req)
126 {
127 printk(KERN_DEBUG "id = 0x%lx\n"
128 "devnum %d\n"
129 "endpoint 0x%x\n"
130 "direction %d\n"
131 "speed %d\n"
132 "pipe_type 0x%x\n"
133 "transfer_buffer 0x%lx\n"
134 "length 0x%lx\n"
135 "transfer_flags 0x%lx\n"
136 "setup = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n"
137 "iso_schedule = 0x%lx\n"
138 "num_iso %ld\n",
139 req->id, req->devnum, req->endpoint, req->direction, req->speed,
140 req->pipe_type, req->transfer_buffer, req->length,
141 req->transfer_flags, req->setup[0], req->setup[1], req->setup[2],
142 req->setup[3], req->setup[4], req->setup[5], req->setup[6],
143 req->setup[7], req->iso_schedule, req->num_iso);
144 }
146 static void dump_urb(struct urb *urb)
147 {
148 printk(KERN_DEBUG "dumping urb @ %p\n", urb);
150 #define DUMP_URB_FIELD(name, format) \
151 printk(KERN_DEBUG " " # name " " format "\n", urb-> name)
153 DUMP_URB_FIELD(pipe, "0x%x");
154 DUMP_URB_FIELD(status, "%d");
155 DUMP_URB_FIELD(transfer_flags, "0x%x");
156 DUMP_URB_FIELD(transfer_buffer, "%p");
157 DUMP_URB_FIELD(transfer_buffer_length, "%d");
158 DUMP_URB_FIELD(actual_length, "%d");
159 }
161 static void dump_response(usbif_response_t *resp)
162 {
163 printk(KERN_DEBUG "usbback: Sending response:\n"
164 " id = 0x%x\n"
165 " op = %d\n"
166 " status = %d\n"
167 " data = %d\n"
168 " length = %d\n",
169 resp->id, resp->op, resp->status, resp->data, resp->length);
170 }
172 #else /* DEBUG */
174 #define dump_port(blah) ((void)0)
175 #define dump_request(blah) ((void)0)
176 #define dump_urb(blah) ((void)0)
177 #define dump_response(blah) ((void)0)
179 #endif /* DEBUG */
181 /******************************************************************
182 * MEMORY MANAGEMENT
183 */
185 static void fast_flush_area(int idx, int nr_pages)
186 {
187 multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
188 int i;
190 for ( i = 0; i < nr_pages; i++ )
191 {
192 MULTI_update_va_mapping(mcl+i, MMAP_VADDR(idx, i),
193 __pte(0), 0);
194 }
196 mcl[nr_pages-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
197 if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
198 BUG();
199 }
202 /******************************************************************
203 * USB INTERFACE SCHEDULER LIST MAINTENANCE
204 */
206 static struct list_head usbio_schedule_list;
207 static spinlock_t usbio_schedule_list_lock;
209 static int __on_usbif_list(usbif_priv_t *up)
210 {
211 return up->usbif_list.next != NULL;
212 }
214 void remove_from_usbif_list(usbif_priv_t *up)
215 {
216 unsigned long flags;
217 if ( !__on_usbif_list(up) ) return;
218 spin_lock_irqsave(&usbio_schedule_list_lock, flags);
219 if ( __on_usbif_list(up) )
220 {
221 list_del(&up->usbif_list);
222 up->usbif_list.next = NULL;
223 usbif_put(up);
224 }
225 spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
226 }
228 static void add_to_usbif_list_tail(usbif_priv_t *up)
229 {
230 unsigned long flags;
231 if ( __on_usbif_list(up) ) return;
232 spin_lock_irqsave(&usbio_schedule_list_lock, flags);
233 if ( !__on_usbif_list(up) && (up->status == CONNECTED) )
234 {
235 list_add_tail(&up->usbif_list, &usbio_schedule_list);
236 usbif_get(up);
237 }
238 spin_unlock_irqrestore(&usbio_schedule_list_lock, flags);
239 }
241 void free_pending(int pending_idx)
242 {
243 unsigned long flags;
245 /* Free the pending request. */
246 spin_lock_irqsave(&pend_prod_lock, flags);
247 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
248 spin_unlock_irqrestore(&pend_prod_lock, flags);
249 }
251 /******************************************************************
252 * COMPLETION CALLBACK -- Called as urb->complete()
253 */
255 static void maybe_trigger_usbio_schedule(void);
257 static void __end_usb_io_op(struct urb *purb)
258 {
259 pending_req_t *pending_req;
260 int pending_idx;
262 pending_req = purb->context;
264 pending_idx = pending_req - pending_reqs;
266 ASSERT(purb->actual_length <= purb->transfer_buffer_length);
267 ASSERT(purb->actual_length <= pending_req->nr_pages * PAGE_SIZE);
269 /* An error fails the entire request. */
270 if ( purb->status )
271 {
272 printk(KERN_WARNING "URB @ %p failed. Status %d\n", purb, purb->status);
273 }
275 if ( usb_pipetype(purb->pipe) == 0 )
276 {
277 int i;
278 usbif_iso_t *sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, pending_req->nr_pages - 1);
280 /* If we're dealing with an iso pipe, we need to copy back the schedule. */
281 for ( i = 0; i < purb->number_of_packets; i++ )
282 {
283 sched[i].length = purb->iso_frame_desc[i].actual_length;
284 ASSERT(sched[i].buffer_offset ==
285 purb->iso_frame_desc[i].offset);
286 sched[i].status = purb->iso_frame_desc[i].status;
287 }
288 }
290 fast_flush_area(pending_req - pending_reqs, pending_req->nr_pages);
292 kfree(purb->setup_packet);
294 make_response(pending_req->usbif_priv, pending_req->id,
295 pending_req->operation, pending_req->status, 0, purb->actual_length);
296 usbif_put(pending_req->usbif_priv);
298 usb_free_urb(purb);
300 free_pending(pending_idx);
302 rmb();
304 /* Check for anything still waiting in the rings, having freed a request... */
305 maybe_trigger_usbio_schedule();
306 }
308 /******************************************************************
309 * SCHEDULER FUNCTIONS
310 */
312 static DECLARE_WAIT_QUEUE_HEAD(usbio_schedule_wait);
314 static int usbio_schedule(void *arg)
315 {
316 DECLARE_WAITQUEUE(wq, current);
318 usbif_priv_t *up;
319 struct list_head *ent;
321 daemonize();
323 for ( ; ; )
324 {
325 /* Wait for work to do. */
326 add_wait_queue(&usbio_schedule_wait, &wq);
327 set_current_state(TASK_INTERRUPTIBLE);
328 if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
329 list_empty(&usbio_schedule_list) )
330 schedule();
331 __set_current_state(TASK_RUNNING);
332 remove_wait_queue(&usbio_schedule_wait, &wq);
334 /* Queue up a batch of requests. */
335 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
336 !list_empty(&usbio_schedule_list) )
337 {
338 ent = usbio_schedule_list.next;
339 up = list_entry(ent, usbif_priv_t, usbif_list);
340 usbif_get(up);
341 remove_from_usbif_list(up);
342 if ( do_usb_io_op(up, BATCH_PER_DOMAIN) )
343 add_to_usbif_list_tail(up);
344 usbif_put(up);
345 }
346 }
347 }
349 static void maybe_trigger_usbio_schedule(void)
350 {
351 /*
352 * Needed so that two processes, who together make the following predicate
353 * true, don't both read stale values and evaluate the predicate
354 * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
355 */
356 smp_mb();
358 if ( !list_empty(&usbio_schedule_list) )
359 wake_up(&usbio_schedule_wait);
360 }
363 /******************************************************************************
364 * NOTIFICATION FROM GUEST OS.
365 */
367 irqreturn_t usbif_be_int(int irq, void *dev_id, struct pt_regs *regs)
368 {
369 usbif_priv_t *up = dev_id;
371 smp_mb();
373 add_to_usbif_list_tail(up);
375 /* Will in fact /always/ trigger an io schedule in this case. */
376 maybe_trigger_usbio_schedule();
378 return IRQ_HANDLED;
379 }
383 /******************************************************************
384 * DOWNWARD CALLS -- These interface with the usb-device layer proper.
385 */
387 static int do_usb_io_op(usbif_priv_t *up, int max_to_do)
388 {
389 usbif_back_ring_t *usb_ring = &up->usb_ring;
390 usbif_request_t *req;
391 RING_IDX i, rp;
392 int more_to_do = 0;
394 rp = usb_ring->sring->req_prod;
395 rmb(); /* Ensure we see queued requests up to 'rp'. */
397 /* Take items off the comms ring, taking care not to overflow. */
398 for ( i = usb_ring->req_cons;
399 (i != rp) && !RING_REQUEST_CONS_OVERFLOW(usb_ring, i);
400 i++ )
401 {
402 if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
403 {
404 more_to_do = 1;
405 break;
406 }
408 req = RING_GET_REQUEST(usb_ring, i);
410 switch ( req->operation )
411 {
412 case USBIF_OP_PROBE:
413 dispatch_usb_probe(up, req->id, req->port);
414 break;
416 case USBIF_OP_IO:
417 /* Assemble an appropriate URB. */
418 dispatch_usb_io(up, req);
419 break;
421 case USBIF_OP_RESET:
422 dispatch_usb_reset(up, req->port);
423 break;
425 default:
426 DPRINTK("error: unknown USB io operation [%d]\n",
427 req->operation);
428 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
429 break;
430 }
431 }
433 usb_ring->req_cons = i;
435 return more_to_do;
436 }
438 static owned_port_t *find_guest_port(usbif_priv_t *up, int port)
439 {
440 unsigned long flags;
441 struct list_head *l;
443 spin_lock_irqsave(&owned_ports_lock, flags);
444 list_for_each(l, &owned_ports)
445 {
446 owned_port_t *p = list_entry(l, owned_port_t, list);
447 if(p->usbif_priv == up && p->guest_port == port)
448 {
449 spin_unlock_irqrestore(&owned_ports_lock, flags);
450 return p;
451 }
452 }
453 spin_unlock_irqrestore(&owned_ports_lock, flags);
455 return NULL;
456 }
458 static void dispatch_usb_reset(usbif_priv_t *up, unsigned long portid)
459 {
460 owned_port_t *port = find_guest_port(up, portid);
461 int ret = 0;
464 /* Allowing the guest to actually reset the device causes more problems
465 * than it's worth. We just fake it out in software but we will do a real
466 * reset when the interface is destroyed. */
468 dump_port(port);
470 port->guest_address = 0;
471 /* If there's an attached device then the port is now enabled. */
472 if ( port->dev_present )
473 port->enabled = 1;
474 else
475 port->enabled = 0;
477 make_response(up, 0, USBIF_OP_RESET, ret, 0, 0);
478 }
480 static void dispatch_usb_probe(usbif_priv_t *up, unsigned long id, unsigned long portid)
481 {
482 owned_port_t *port = find_guest_port(up, portid);
483 int ret;
485 if ( port != NULL )
486 ret = port->dev_present;
487 else
488 {
489 ret = -EINVAL;
490 printk(KERN_INFO "dispatch_usb_probe(): invalid port probe request "
491 "(port %ld)\n", portid);
492 }
494 /* Probe result is sent back in-band. Probes don't have an associated id
495 * right now... */
496 make_response(up, id, USBIF_OP_PROBE, ret, portid, 0);
497 }
499 /**
500 * check_iso_schedule - safety check the isochronous schedule for an URB
501 * @purb : the URB in question
502 */
503 static int check_iso_schedule(struct urb *purb)
504 {
505 int i;
506 unsigned long total_length = 0;
508 for ( i = 0; i < purb->number_of_packets; i++ )
509 {
510 struct usb_iso_packet_descriptor *desc = &purb->iso_frame_desc[i];
512 if ( desc->offset >= purb->transfer_buffer_length
513 || ( desc->offset + desc->length) > purb->transfer_buffer_length )
514 return -EINVAL;
516 total_length += desc->length;
518 if ( total_length > purb->transfer_buffer_length )
519 return -EINVAL;
520 }
522 return 0;
523 }
525 owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req);
527 static void dispatch_usb_io(usbif_priv_t *up, usbif_request_t *req)
528 {
529 unsigned long buffer_mach;
530 int i = 0, offset = 0,
531 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
532 pending_req_t *pending_req;
533 unsigned long remap_prot;
534 multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
535 struct urb *purb = NULL;
536 owned_port_t *port;
537 unsigned char *setup;
539 dump_request(req);
541 if ( NR_PENDING_REQS == MAX_PENDING_REQS )
542 {
543 printk(KERN_WARNING "usbback: Max requests already queued. "
544 "Giving up!\n");
546 return;
547 }
549 port = find_port_for_request(up, req);
551 if ( port == NULL )
552 {
553 printk(KERN_WARNING "No such device! (%d)\n", req->devnum);
554 dump_request(req);
556 make_response(up, req->id, req->operation, -ENODEV, 0, 0);
557 return;
558 }
559 else if ( !port->dev_present )
560 {
561 /* In normal operation, we'll only get here if a device is unplugged
562 * and the frontend hasn't noticed yet. */
563 make_response(up, req->id, req->operation, -ENODEV, 0, 0);
564 return;
565 }
568 setup = kmalloc(8, GFP_KERNEL);
570 if ( setup == NULL )
571 goto no_mem;
573 /* Copy request out for safety. */
574 memcpy(setup, req->setup, 8);
576 if( setup[0] == 0x0 && setup[1] == 0x5)
577 {
578 /* To virtualise the USB address space, we need to intercept
579 * set_address messages and emulate. From the USB specification:
580 * bmRequestType = 0x0;
581 * Brequest = SET_ADDRESS (i.e. 0x5)
582 * wValue = device address
583 * wIndex = 0
584 * wLength = 0
585 * data = None
586 */
587 /* Store into the guest transfer buffer using cpu_to_le16 */
588 port->guest_address = le16_to_cpu(*(u16 *)(setup + 2));
589 /* Make a successful response. That was easy! */
591 make_response(up, req->id, req->operation, 0, 0, 0);
593 kfree(setup);
594 return;
595 }
596 else if ( setup[0] == 0x0 && setup[1] == 0x9 )
597 {
598 /* The host kernel needs to know what device configuration is in use
599 * because various error checks get confused otherwise. We just do
600 * configuration settings here, under controlled conditions.
601 */
603 /* Ignore configuration setting and hope that the host kernel
604 did it right. */
605 /* usb_set_configuration(port->dev, setup[2]); */
607 make_response(up, req->id, req->operation, 0, 0, 0);
609 kfree(setup);
610 return;
611 }
612 else if ( setup[0] == 0x1 && setup[1] == 0xB )
613 {
614 /* The host kernel needs to know what device interface is in use
615 * because various error checks get confused otherwise. We just do
616 * configuration settings here, under controlled conditions.
617 */
618 usb_set_interface(port->dev, (setup[4] | setup[5] << 8),
619 (setup[2] | setup[3] << 8) );
621 make_response(up, req->id, req->operation, 0, 0, 0);
623 kfree(setup);
624 return;
625 }
627 if ( ( req->transfer_buffer - (req->transfer_buffer & PAGE_MASK)
628 + req->length )
629 > MMAP_PAGES_PER_REQUEST * PAGE_SIZE )
630 {
631 printk(KERN_WARNING "usbback: request of %lu bytes too large\n",
632 req->length);
633 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
634 kfree(setup);
635 return;
636 }
638 buffer_mach = req->transfer_buffer;
640 if( buffer_mach == 0 )
641 goto no_remap;
643 ASSERT((req->length >> PAGE_SHIFT) <= MMAP_PAGES_PER_REQUEST);
644 ASSERT(buffer_mach);
646 /* Always map writeable for now. */
647 remap_prot = _KERNPG_TABLE;
649 for ( i = 0, offset = 0; offset < req->length;
650 i++, offset += PAGE_SIZE )
651 {
652 MULTI_update_va_mapping_otherdomain(
653 mcl+i, MMAP_VADDR(pending_idx, i),
654 pfn_pte_ma((buffer_mach + offset) >> PAGE_SHIFT, remap_prot),
655 0, up->domid);
657 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
658 FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT);
660 ASSERT(virt_to_mfn(MMAP_VADDR(pending_idx, i))
661 == ((buffer_mach >> PAGE_SHIFT) + i));
662 }
664 if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */
665 {
666 /* Map in ISO schedule, if necessary. */
667 MULTI_update_va_mapping_otherdomain(
668 mcl+i, MMAP_VADDR(pending_idx, i),
669 pfn_pte_ma(req->iso_schedule >> PAGE_SHIFT, remap_prot),
670 0, up->domid);
672 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
673 FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT);
675 i++;
676 }
678 if ( unlikely(HYPERVISOR_multicall(mcl, i) != 0) )
679 BUG();
681 {
682 int j;
683 for ( j = 0; j < i; j++ )
684 {
685 if ( unlikely(mcl[j].result != 0) )
686 {
687 printk(KERN_WARNING
688 "invalid buffer %d -- could not remap it\n", j);
689 fast_flush_area(pending_idx, i);
690 goto bad_descriptor;
691 }
692 }
693 }
695 no_remap:
697 ASSERT(i <= MMAP_PAGES_PER_REQUEST);
698 ASSERT(i * PAGE_SIZE >= req->length);
700 /* We have to do this because some things might complete out of order. */
701 pending_req = &pending_reqs[pending_idx];
702 pending_req->usbif_priv= up;
703 pending_req->id = req->id;
704 pending_req->operation = req->operation;
705 pending_req->nr_pages = i;
707 pending_cons++;
709 usbif_get(up);
711 /* Fill out an actual request for the USB layer. */
712 purb = usb_alloc_urb(req->num_iso);
714 if ( purb == NULL )
715 {
716 usbif_put(up);
717 free_pending(pending_idx);
718 goto no_mem;
719 }
721 purb->dev = port->dev;
722 purb->context = pending_req;
723 purb->transfer_buffer =
724 (void *)(MMAP_VADDR(pending_idx, 0) + (buffer_mach & ~PAGE_MASK));
725 if(buffer_mach == 0)
726 purb->transfer_buffer = NULL;
727 purb->complete = __end_usb_io_op;
728 purb->transfer_buffer_length = req->length;
729 purb->transfer_flags = req->transfer_flags;
731 purb->pipe = 0;
732 purb->pipe |= req->direction << 7;
733 purb->pipe |= port->dev->devnum << 8;
734 purb->pipe |= req->speed << 26;
735 purb->pipe |= req->pipe_type << 30;
736 purb->pipe |= req->endpoint << 15;
738 purb->number_of_packets = req->num_iso;
740 if ( purb->number_of_packets * sizeof(usbif_iso_t) > PAGE_SIZE )
741 goto urb_error;
743 /* Make sure there's always some kind of timeout. */
744 purb->timeout = ( req->timeout > 0 ) ? (req->timeout * HZ) / 1000
745 : 1000;
747 purb->setup_packet = setup;
749 if ( req->pipe_type == 0 ) /* ISO */
750 {
751 int j;
752 usbif_iso_t *iso_sched = (usbif_iso_t *)MMAP_VADDR(pending_idx, i - 1);
754 /* If we're dealing with an iso pipe, we need to copy in a schedule. */
755 for ( j = 0; j < purb->number_of_packets; j++ )
756 {
757 purb->iso_frame_desc[j].length = iso_sched[j].length;
758 purb->iso_frame_desc[j].offset = iso_sched[j].buffer_offset;
759 iso_sched[j].status = 0;
760 }
761 }
763 if ( check_iso_schedule(purb) != 0 )
764 goto urb_error;
766 if ( usb_submit_urb(purb) != 0 )
767 goto urb_error;
769 return;
771 urb_error:
772 dump_urb(purb);
773 usbif_put(up);
774 free_pending(pending_idx);
776 bad_descriptor:
777 kfree ( setup );
778 if ( purb != NULL )
779 usb_free_urb(purb);
780 make_response(up, req->id, req->operation, -EINVAL, 0, 0);
781 return;
783 no_mem:
784 if ( setup != NULL )
785 kfree(setup);
786 make_response(up, req->id, req->operation, -ENOMEM, 0, 0);
787 return;
788 }
792 /******************************************************************
793 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
794 */
797 static void make_response(usbif_priv_t *up, unsigned long id,
798 unsigned short op, int st, int inband,
799 unsigned long length)
800 {
801 usbif_response_t *resp;
802 unsigned long flags;
803 usbif_back_ring_t *usb_ring = &up->usb_ring;
805 /* Place on the response ring for the relevant domain. */
806 spin_lock_irqsave(&up->usb_ring_lock, flags);
807 resp = RING_GET_RESPONSE(usb_ring, usb_ring->rsp_prod_pvt);
808 resp->id = id;
809 resp->operation = op;
810 resp->status = st;
811 resp->data = inband;
812 resp->length = length;
813 wmb(); /* Ensure other side can see the response fields. */
815 dump_response(resp);
817 usb_ring->rsp_prod_pvt++;
818 RING_PUSH_RESPONSES(usb_ring);
819 spin_unlock_irqrestore(&up->usb_ring_lock, flags);
821 /* Kick the relevant domain. */
822 notify_via_evtchn(up->evtchn);
823 }
825 /**
826 * usbif_claim_port - claim devices on a port on behalf of guest
827 *
828 * Once completed, this will ensure that any device attached to that
829 * port is claimed by this driver for use by the guest.
830 */
831 int usbif_claim_port(usbif_be_claim_port_t *msg)
832 {
833 owned_port_t *o_p;
835 /* Sanity... */
836 if ( usbif_find_port(msg->path) != NULL )
837 {
838 printk(KERN_WARNING "usbback: Attempted to claim USB port "
839 "we already own!\n");
840 return -EINVAL;
841 }
843 /* No need for a slab cache - this should be infrequent. */
844 o_p = kmalloc(sizeof(owned_port_t), GFP_KERNEL);
846 if ( o_p == NULL )
847 return -ENOMEM;
849 o_p->enabled = 0;
850 o_p->usbif_priv = usbif_find(msg->domid);
851 o_p->guest_port = msg->usbif_port;
852 o_p->dev_present = 0;
853 o_p->guest_address = 0; /* Default address. */
855 strcpy(o_p->path, msg->path);
857 spin_lock_irq(&owned_ports_lock);
859 list_add(&o_p->list, &owned_ports);
861 spin_unlock_irq(&owned_ports_lock);
863 printk(KERN_INFO "usbback: Claimed USB port (%s) for %d.%d\n", o_p->path,
864 msg->domid, msg->usbif_port);
866 /* Force a reprobe for unclaimed devices. */
867 usb_scan_devices();
869 return 0;
870 }
872 owned_port_t *find_port_for_request(usbif_priv_t *up, usbif_request_t *req)
873 {
874 unsigned long flags;
875 struct list_head *port;
877 /* I'm assuming this is not called from IRQ context - correct? I think
878 * it's probably only called in response to control messages or plug events
879 * in the USB hub kernel thread, so should be OK. */
880 spin_lock_irqsave(&owned_ports_lock, flags);
881 list_for_each(port, &owned_ports)
882 {
883 owned_port_t *p = list_entry(port, owned_port_t, list);
884 if(p->usbif_priv == up && p->guest_address == req->devnum && p->enabled )
885 {
886 dump_port(p);
888 spin_unlock_irqrestore(&owned_ports_lock, flags);
889 return p;
890 }
891 }
892 spin_unlock_irqrestore(&owned_ports_lock, flags);
894 return NULL;
895 }
897 owned_port_t *__usbif_find_port(char *path)
898 {
899 struct list_head *port;
901 list_for_each(port, &owned_ports)
902 {
903 owned_port_t *p = list_entry(port, owned_port_t, list);
904 if(!strcmp(path, p->path))
905 {
906 return p;
907 }
908 }
910 return NULL;
911 }
913 owned_port_t *usbif_find_port(char *path)
914 {
915 owned_port_t *ret;
916 unsigned long flags;
918 spin_lock_irqsave(&owned_ports_lock, flags);
919 ret = __usbif_find_port(path);
920 spin_unlock_irqrestore(&owned_ports_lock, flags);
922 return ret;
923 }
926 static void *probe(struct usb_device *dev, unsigned iface,
927 const struct usb_device_id *id)
928 {
929 owned_port_t *p;
931 /* We don't care what the device is - if we own the port, we want it. We
932 * don't deal with device-specifics in this driver, so we don't care what
933 * the device actually is ;-) */
934 if ( ( p = usbif_find_port(dev->devpath) ) != NULL )
935 {
936 printk(KERN_INFO "usbback: claimed device attached to owned port\n");
938 p->dev_present = 1;
939 p->dev = dev;
940 set_bit(iface, &p->ifaces);
942 return p->usbif_priv;
943 }
944 else
945 printk(KERN_INFO "usbback: hotplug for non-owned port (%s), ignoring\n",
946 dev->devpath);
949 return NULL;
950 }
952 static void disconnect(struct usb_device *dev, void *usbif)
953 {
954 /* Note the device is removed so we can tell the guest when it probes. */
955 owned_port_t *port = usbif_find_port(dev->devpath);
956 port->dev_present = 0;
957 port->dev = NULL;
958 port->ifaces = 0;
959 }
962 struct usb_driver driver =
963 {
964 .owner = THIS_MODULE,
965 .name = "Xen USB Backend",
966 .probe = probe,
967 .disconnect = disconnect,
968 .id_table = NULL,
969 };
971 /* __usbif_release_port - internal mechanics for releasing a port */
972 void __usbif_release_port(owned_port_t *p)
973 {
974 int i;
976 for ( i = 0; p->ifaces != 0; i++)
977 if ( p->ifaces & 1 << i )
978 {
979 usb_driver_release_interface(&driver, usb_ifnum_to_if(p->dev, i));
980 clear_bit(i, &p->ifaces);
981 }
982 list_del(&p->list);
984 /* Reset the real device. We don't simulate disconnect / probe for other
985 * drivers in this kernel because we assume the device is completely under
986 * the control of ourselves (i.e. the guest!). This should ensure that the
987 * device is in a sane state for the next customer ;-) */
989 /* MAW NB: we're not resetting the real device here. This looks perfectly
990 * valid to me but it causes memory corruption. We seem to get away with not
991 * resetting for now, although it'd be nice to have this tracked down. */
992 /* if ( p->dev != NULL) */
993 /* usb_reset_device(p->dev); */
995 kfree(p);
996 }
999 /**
1000 * usbif_release_port - stop claiming devices on a port on behalf of guest
1001 */
1002 void usbif_release_port(usbif_be_release_port_t *msg)
1004 owned_port_t *p;
1006 spin_lock_irq(&owned_ports_lock);
1007 p = __usbif_find_port(msg->path);
1008 __usbif_release_port(p);
1009 spin_unlock_irq(&owned_ports_lock);
1012 void usbif_release_ports(usbif_priv_t *up)
1014 struct list_head *port, *tmp;
1015 unsigned long flags;
1017 spin_lock_irqsave(&owned_ports_lock, flags);
1018 list_for_each_safe(port, tmp, &owned_ports)
1020 owned_port_t *p = list_entry(port, owned_port_t, list);
1021 if ( p->usbif_priv == up )
1022 __usbif_release_port(p);
1024 spin_unlock_irqrestore(&owned_ports_lock, flags);
1027 static int __init usbif_init(void)
1029 int i;
1030 struct page *page;
1032 if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
1033 !(xen_start_info.flags & SIF_USB_BE_DOMAIN) )
1034 return 0;
1036 page = balloon_alloc_empty_page_range(MMAP_PAGES);
1037 BUG_ON(page == NULL);
1038 mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
1040 pending_cons = 0;
1041 pending_prod = MAX_PENDING_REQS;
1042 memset(pending_reqs, 0, sizeof(pending_reqs));
1043 for ( i = 0; i < MAX_PENDING_REQS; i++ )
1044 pending_ring[i] = i;
1046 spin_lock_init(&pend_prod_lock);
1048 spin_lock_init(&owned_ports_lock);
1049 INIT_LIST_HEAD(&owned_ports);
1051 spin_lock_init(&usbio_schedule_list_lock);
1052 INIT_LIST_HEAD(&usbio_schedule_list);
1054 if ( kernel_thread(usbio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
1055 BUG();
1057 usbif_interface_init();
1059 usbif_ctrlif_init();
1061 usb_register(&driver);
1063 printk(KERN_INFO "Xen USB Backend Initialised");
1065 return 0;
1068 __initcall(usbif_init);