ia64/linux-2.6.18-xen.hg

diff drivers/xen/usbback/usbback.c @ 829:f799db0570f2

PVUSB: backend driver

Signed-off-by: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 18 11:43:24 2009 +0000 (2009-03-18)
parents
children 4c7eb2e71e9d
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/drivers/xen/usbback/usbback.c	Wed Mar 18 11:43:24 2009 +0000
     1.3 @@ -0,0 +1,1075 @@
     1.4 +/*
     1.5 + * usbback.c
     1.6 + *
     1.7 + * Xen USB backend driver
     1.8 + *
     1.9 + * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
    1.10 + * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
    1.11 + *
    1.12 + * This program is free software; you can redistribute it and/or modify
    1.13 + * it under the terms of the GNU General Public License as published by
    1.14 + * the Free Software Foundation; either version 2 of the License, or
    1.15 + * (at your option) any later version.
    1.16 + *
    1.17 + * This program is distributed in the hope that it will be useful,
    1.18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
    1.19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    1.20 + * GNU General Public License for more details.
    1.21 + *
    1.22 + * You should have received a copy of the GNU General Public License
    1.23 + * along with this program; if not, see <http://www.gnu.org/licenses/>.
    1.24 + *
    1.25 + * or,
    1.26 + *
    1.27 + * When distributed separately from the Linux kernel or incorporated into
    1.28 + * other software packages, subject to the following license:
    1.29 + *
    1.30 + * Permission is hereby granted, free of charge, to any person obtaining a copy
    1.31 + * of this software and associated documentation files (the "Software"), to
    1.32 + * deal in the Software without restriction, including without limitation the
    1.33 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
    1.34 + * sell copies of the Software, and to permit persons to whom the Software is
    1.35 + * furnished to do so, subject to the following conditions:
    1.36 + *
    1.37 + * The above copyright notice and this permission notice shall be included in
    1.38 + * all copies or substantial portions of the Software.
    1.39 + *
    1.40 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    1.41 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    1.42 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    1.43 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    1.44 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
    1.45 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    1.46 + * DEALINGS IN THE SOFTWARE.
    1.47 + */
    1.48 +
    1.49 +#include <linux/mm.h>
    1.50 +#include <xen/balloon.h>
    1.51 +#include "usbback.h"
    1.52 +
    1.53 +#if 0
    1.54 +#include "../../usb/core/hub.h"
    1.55 +#endif
    1.56 +
    1.57 +int usbif_reqs = USBIF_BACK_MAX_PENDING_REQS;
    1.58 +module_param_named(reqs, usbif_reqs, int, 0);
    1.59 +MODULE_PARM_DESC(reqs, "Number of usbback requests to allocate");
    1.60 +
    1.61 +struct pending_req_segment {
    1.62 +	uint16_t offset;
    1.63 +	uint16_t length;
    1.64 +};
    1.65 +
    1.66 +typedef struct {
    1.67 +	usbif_t *usbif;
    1.68 +
    1.69 +	uint16_t id; /* request id */
    1.70 +
    1.71 +	struct usbstub *stub;
    1.72 +	struct list_head urb_list;
    1.73 +
    1.74 +	/* urb */
    1.75 +	struct urb *urb;
    1.76 +	void *buffer;
    1.77 +	dma_addr_t transfer_dma;
    1.78 +	struct usb_ctrlrequest *setup;
    1.79 +	dma_addr_t setup_dma;
    1.80 +
    1.81 +	/* request segments */
    1.82 +	uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
    1.83 +	uint16_t nr_extra_segs; /* number of iso_frame_desc segments (ISO) */
    1.84 +	struct pending_req_segment *seg;
    1.85 +
    1.86 +	struct list_head free_list;
    1.87 +} pending_req_t;
    1.88 +
    1.89 +static pending_req_t *pending_reqs;
    1.90 +static struct list_head pending_free;
    1.91 +static DEFINE_SPINLOCK(pending_free_lock);
    1.92 +static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
    1.93 +
    1.94 +#define USBBACK_INVALID_HANDLE (~0)
    1.95 +
    1.96 +static struct page **pending_pages;
    1.97 +static grant_handle_t *pending_grant_handles;
    1.98 +
    1.99 +static inline int vaddr_pagenr(pending_req_t *req, int seg)
   1.100 +{
   1.101 +	return (req - pending_reqs) * USBIF_MAX_SEGMENTS_PER_REQUEST + seg;
   1.102 +}
   1.103 +
   1.104 +static inline unsigned long vaddr(pending_req_t *req, int seg)
   1.105 +{
   1.106 +	unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
   1.107 +	return (unsigned long)pfn_to_kaddr(pfn);
   1.108 +}
   1.109 +
   1.110 +#define pending_handle(_req, _seg) \
   1.111 +	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
   1.112 +
   1.113 +static pending_req_t* alloc_req(void)
   1.114 +{
   1.115 +	pending_req_t *req = NULL;
   1.116 +	unsigned long flags;
   1.117 +
   1.118 +	spin_lock_irqsave(&pending_free_lock, flags);
   1.119 +	if (!list_empty(&pending_free)) {
   1.120 +		req = list_entry(pending_free.next, pending_req_t, free_list);
   1.121 +		list_del(&req->free_list);
   1.122 +	}
   1.123 +	spin_unlock_irqrestore(&pending_free_lock, flags);
   1.124 +	return req;
   1.125 +}
   1.126 +
   1.127 +static void free_req(pending_req_t *req)
   1.128 +{
   1.129 +	unsigned long flags;
   1.130 +	int was_empty;
   1.131 +
   1.132 +	spin_lock_irqsave(&pending_free_lock, flags);
   1.133 +	was_empty = list_empty(&pending_free);
   1.134 +	list_add(&req->free_list, &pending_free);
   1.135 +	spin_unlock_irqrestore(&pending_free_lock, flags);
   1.136 +	if (was_empty)
   1.137 +		wake_up(&pending_free_wq);
   1.138 +}
   1.139 +
   1.140 +static inline void add_req_to_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
   1.141 +{
   1.142 +	unsigned long flags;
   1.143 +
   1.144 +	spin_lock_irqsave(&stub->submitting_lock, flags);
   1.145 +	list_add_tail(&pending_req->urb_list, &stub->submitting_list);
   1.146 +	spin_unlock_irqrestore(&stub->submitting_lock, flags);
   1.147 +}
   1.148 +
   1.149 +static inline void remove_req_from_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
   1.150 +{
   1.151 +	unsigned long flags;
   1.152 +
   1.153 +	spin_lock_irqsave(&stub->submitting_lock, flags);
   1.154 +	list_del_init(&pending_req->urb_list);
   1.155 +	spin_unlock_irqrestore(&stub->submitting_lock, flags);
   1.156 +}
   1.157 +
   1.158 +void usbbk_unlink_urbs(struct usbstub *stub)
   1.159 +{
   1.160 +	pending_req_t *req, *tmp;
   1.161 +	unsigned long flags;
   1.162 +
   1.163 +	spin_lock_irqsave(&stub->submitting_lock, flags);
   1.164 +	list_for_each_entry_safe(req, tmp, &stub->submitting_list, urb_list) {
   1.165 +		usb_unlink_urb(req->urb);
   1.166 +	}
   1.167 +	spin_unlock_irqrestore(&stub->submitting_lock, flags);
   1.168 +}
   1.169 +
   1.170 +static void fast_flush_area(pending_req_t *pending_req)
   1.171 +{
   1.172 +	struct gnttab_unmap_grant_ref unmap[USBIF_MAX_SEGMENTS_PER_REQUEST];
   1.173 +	unsigned int i, nr_segs, invcount = 0;
   1.174 +	grant_handle_t handle;
   1.175 +	int ret;
   1.176 +
   1.177 +	nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
   1.178 +
   1.179 +	if (nr_segs) {
   1.180 +		for (i = 0; i < nr_segs; i++) {
   1.181 +			handle = pending_handle(pending_req, i);
   1.182 +			if (handle == USBBACK_INVALID_HANDLE)
   1.183 +				continue;
   1.184 +			gnttab_set_unmap_op(&unmap[invcount], vaddr(pending_req, i),
   1.185 +					    GNTMAP_host_map, handle);
   1.186 +			pending_handle(pending_req, i) = USBBACK_INVALID_HANDLE;
   1.187 +			invcount++;
   1.188 +		}
   1.189 +
   1.190 +		ret = HYPERVISOR_grant_table_op(
   1.191 +			GNTTABOP_unmap_grant_ref, unmap, invcount);
   1.192 +		BUG_ON(ret);
   1.193 +
   1.194 +		kfree(pending_req->seg);
   1.195 +	}
   1.196 +
   1.197 +	return;
   1.198 +}
   1.199 +
   1.200 +static void copy_buff_to_pages(void *buff, pending_req_t *pending_req,
   1.201 +		int start, int nr_pages)
   1.202 +{
   1.203 +	unsigned long copied = 0;
   1.204 +	int i;
   1.205 +
   1.206 +	for (i = start; i < start + nr_pages; i++) {
   1.207 +		memcpy((void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
   1.208 +			buff + copied,
   1.209 +			pending_req->seg[i].length);
   1.210 +		copied += pending_req->seg[i].length;
   1.211 +	}
   1.212 +}
   1.213 +
   1.214 +static void copy_pages_to_buff(void *buff, pending_req_t *pending_req,
   1.215 +		int start, int nr_pages)
   1.216 +{
   1.217 +	unsigned long copied = 0;
   1.218 +	int i;
   1.219 +
   1.220 +	for (i = start; i < start + nr_pages; i++) {
   1.221 +		memcpy(buff + copied,
   1.222 +			(void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
   1.223 +			pending_req->seg[i].length);
   1.224 +		copied += pending_req->seg[i].length;
   1.225 +	}
   1.226 +}
   1.227 +
   1.228 +static int usbbk_alloc_urb(usbif_request_t *req, pending_req_t *pending_req)
   1.229 +{
   1.230 +	int ret;
   1.231 +
   1.232 +	if (usb_pipeisoc(req->pipe))
   1.233 +		pending_req->urb = usb_alloc_urb(req->u.isoc.number_of_packets, GFP_KERNEL);
   1.234 +	else
   1.235 +		pending_req->urb = usb_alloc_urb(0, GFP_KERNEL);
   1.236 +	if (!pending_req->urb) {
   1.237 +		printk(KERN_ERR "usbback: can't alloc urb\n");
   1.238 +		ret = -ENOMEM;
   1.239 +		goto fail;
   1.240 +	}
   1.241 +
   1.242 +	if (req->buffer_length) {
   1.243 +		pending_req->buffer = usb_buffer_alloc(pending_req->stub->udev,
   1.244 +				req->buffer_length, GFP_KERNEL,
   1.245 +				&pending_req->transfer_dma);
   1.246 +		if (!pending_req->buffer) {
   1.247 +			printk(KERN_ERR "usbback: can't alloc urb buffer\n");
   1.248 +			ret = -ENOMEM;
   1.249 +			goto fail_free_urb;
   1.250 +		}
   1.251 +	}
   1.252 +
   1.253 +	if (usb_pipecontrol(req->pipe)) {
   1.254 +		pending_req->setup = usb_buffer_alloc(pending_req->stub->udev,
   1.255 +				sizeof(struct usb_ctrlrequest), GFP_KERNEL,
   1.256 +				&pending_req->setup_dma);
   1.257 +		if (!pending_req->setup) {
   1.258 +			printk(KERN_ERR "usbback: can't alloc usb_ctrlrequest\n");
   1.259 +			ret = -ENOMEM;
   1.260 +			goto fail_free_buffer;
   1.261 +		}
   1.262 +	}
   1.263 +
   1.264 +	return 0;
   1.265 +
   1.266 +fail_free_buffer:
   1.267 +	if (req->buffer_length)
   1.268 +		usb_buffer_free(pending_req->stub->udev, req->buffer_length,
   1.269 +				pending_req->buffer, pending_req->transfer_dma);
   1.270 +fail_free_urb:
   1.271 +	usb_free_urb(pending_req->urb);
   1.272 +fail:
   1.273 +	return ret;
   1.274 +}
   1.275 +
   1.276 +static void usbbk_free_urb(struct urb *urb)
   1.277 +{
   1.278 +	if (usb_pipecontrol(urb->pipe))
   1.279 +		usb_buffer_free(urb->dev, sizeof(struct usb_ctrlrequest),
   1.280 +				urb->setup_packet, urb->setup_dma);
   1.281 +	if (urb->transfer_buffer_length)
   1.282 +		usb_buffer_free(urb->dev, urb->transfer_buffer_length,
   1.283 +				urb->transfer_buffer, urb->transfer_dma);
   1.284 +	barrier();
   1.285 +	usb_free_urb(urb);
   1.286 +}
   1.287 +
   1.288 +static void usbbk_notify_work(usbif_t *usbif)
   1.289 +{
   1.290 +	usbif->waiting_reqs = 1;
   1.291 +	wake_up(&usbif->wq);
   1.292 +}
   1.293 +
   1.294 +irqreturn_t usbbk_be_int(int irq, void *dev_id, struct pt_regs *regs)
   1.295 +{
   1.296 +	usbbk_notify_work(dev_id);
   1.297 +	return IRQ_HANDLED;
   1.298 +}
   1.299 +
   1.300 +static void usbbk_do_response(pending_req_t *pending_req, int32_t status,
   1.301 +					int32_t actual_length, int32_t error_count, uint16_t start_frame)
   1.302 +{
   1.303 +	usbif_t *usbif = pending_req->usbif;
   1.304 +	usbif_response_t *ring_res;
   1.305 +	unsigned long flags;
   1.306 +	int notify;
   1.307 +
   1.308 +	spin_lock_irqsave(&usbif->ring_lock, flags);
   1.309 +	ring_res = RING_GET_RESPONSE(&usbif->ring, usbif->ring.rsp_prod_pvt);
   1.310 +	ring_res->id = pending_req->id;
   1.311 +	ring_res->status = status;
   1.312 +	ring_res->actual_length = actual_length;
   1.313 +	ring_res->error_count = error_count;
   1.314 +	ring_res->start_frame = start_frame;
   1.315 +	usbif->ring.rsp_prod_pvt++;
   1.316 +	barrier();
   1.317 +	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->ring, notify);
   1.318 +	spin_unlock_irqrestore(&usbif->ring_lock, flags);
   1.319 +
   1.320 +	if (notify)
   1.321 +		notify_remote_via_irq(usbif->irq);
   1.322 +}
   1.323 +
   1.324 +static void usbbk_urb_complete(struct urb *urb, struct pt_regs *regs)
   1.325 +{
   1.326 +	pending_req_t *pending_req = (pending_req_t *)urb->context;
   1.327 +
   1.328 +	if (usb_pipein(urb->pipe) && urb->status == 0 && urb->actual_length > 0)
   1.329 +		copy_buff_to_pages(pending_req->buffer, pending_req,
   1.330 +					0, pending_req->nr_buffer_segs);
   1.331 +
   1.332 +	if (usb_pipeisoc(urb->pipe))
   1.333 +		copy_buff_to_pages(&urb->iso_frame_desc[0], pending_req,
   1.334 +					pending_req->nr_buffer_segs, pending_req->nr_extra_segs);
   1.335 +
   1.336 +	barrier();
   1.337 +
   1.338 +	fast_flush_area(pending_req);
   1.339 +
   1.340 +	usbbk_do_response(pending_req, urb->status, urb->actual_length,
   1.341 +					urb->error_count, urb->start_frame);
   1.342 +
   1.343 +	remove_req_from_submitting_list(pending_req->stub, pending_req);
   1.344 +
   1.345 +	barrier();
   1.346 +	usbbk_free_urb(urb);
   1.347 +	usbif_put(pending_req->usbif);
   1.348 +	free_req(pending_req);
   1.349 +}
   1.350 +
   1.351 +static int usbbk_gnttab_map(usbif_t *usbif,
   1.352 +			usbif_request_t *req, pending_req_t *pending_req)
   1.353 +{
   1.354 +	int i, ret;
   1.355 +	unsigned int nr_segs;
   1.356 +	uint32_t flags;
   1.357 +	struct gnttab_map_grant_ref map[USBIF_MAX_SEGMENTS_PER_REQUEST];
   1.358 +
   1.359 +	nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
   1.360 +
   1.361 +	if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
   1.362 +		printk(KERN_ERR "Bad number of segments in request\n");
   1.363 +		ret = -EINVAL;
   1.364 +		goto fail;
   1.365 +	}
   1.366 +
   1.367 +	if (nr_segs) {
   1.368 +		pending_req->seg = kmalloc(sizeof(struct pending_req_segment)
   1.369 +				* nr_segs, GFP_KERNEL);
   1.370 +		if (!pending_req->seg) {
   1.371 +			ret = -ENOMEM;
   1.372 +			goto fail;
   1.373 +		}
   1.374 +
   1.375 +		if (pending_req->nr_buffer_segs) {
   1.376 +			flags = GNTMAP_host_map;
   1.377 +			if (usb_pipeout(req->pipe))
   1.378 +				flags |= GNTMAP_readonly;
   1.379 +			for (i = 0; i < pending_req->nr_buffer_segs; i++)
   1.380 +				gnttab_set_map_op(&map[i], vaddr(
   1.381 +						pending_req, i), flags,
   1.382 +						req->seg[i].gref,
   1.383 +						usbif->domid);
   1.384 +		}
   1.385 +
   1.386 +		if (pending_req->nr_extra_segs) {
   1.387 +			flags = GNTMAP_host_map;
   1.388 +			for (i = req->nr_buffer_segs; i < nr_segs; i++)
   1.389 +				gnttab_set_map_op(&map[i], vaddr(
   1.390 +						pending_req, i), flags,
   1.391 +						req->seg[i].gref,
   1.392 +						usbif->domid);
   1.393 +		}
   1.394 +
   1.395 +		ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
   1.396 +					map, nr_segs);
   1.397 +		BUG_ON(ret);
   1.398 +
   1.399 +		for (i = 0; i < nr_segs; i++) {
   1.400 +			if (unlikely(map[i].status != 0)) {
   1.401 +				printk(KERN_ERR "usbback: invalid buffer -- could not remap it\n");
   1.402 +				map[i].handle = USBBACK_INVALID_HANDLE;
   1.403 +				ret |= 1;
   1.404 +			}
   1.405 +
   1.406 +			pending_handle(pending_req, i) = map[i].handle;
   1.407 +
   1.408 +			if (ret)
   1.409 +				continue;
   1.410 +
   1.411 +			set_phys_to_machine(__pa(vaddr(
   1.412 +				pending_req, i)) >> PAGE_SHIFT,
   1.413 +				FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
   1.414 +
   1.415 +			pending_req->seg[i].offset = req->seg[i].offset;
   1.416 +			pending_req->seg[i].length = req->seg[i].length;
   1.417 +
   1.418 +			barrier();
   1.419 +
   1.420 +			if (pending_req->seg[i].offset >= PAGE_SIZE ||
   1.421 +					pending_req->seg[i].length > PAGE_SIZE ||
   1.422 +					pending_req->seg[i].offset + pending_req->seg[i].length > PAGE_SIZE)
   1.423 +					ret |= 1;
   1.424 +		}
   1.425 +
   1.426 +		if (ret)
   1.427 +			goto fail_flush;
   1.428 +	}
   1.429 +
   1.430 +	return 0;
   1.431 +
   1.432 +fail_flush:
   1.433 +	fast_flush_area(pending_req);
   1.434 +	ret = -ENOMEM;
   1.435 +
   1.436 +fail:
   1.437 +	return ret;
   1.438 +}
   1.439 +
   1.440 +static void usbbk_init_urb(usbif_request_t *req, pending_req_t *pending_req)
   1.441 +{
   1.442 +	unsigned int pipe;
   1.443 +	struct usb_device *udev = pending_req->stub->udev;
   1.444 +	struct urb *urb = pending_req->urb;
   1.445 +
   1.446 +	switch (usb_pipetype(req->pipe)) {
   1.447 +	case PIPE_ISOCHRONOUS:
   1.448 +		if (usb_pipein(req->pipe))
   1.449 +			pipe = usb_rcvisocpipe(udev, usb_pipeendpoint(req->pipe));
   1.450 +		else
   1.451 +			pipe = usb_sndisocpipe(udev, usb_pipeendpoint(req->pipe));
   1.452 +
   1.453 +		urb->dev = udev;
   1.454 +		urb->pipe = pipe;
   1.455 +		urb->transfer_flags = req->transfer_flags;
   1.456 +		urb->transfer_flags |= URB_ISO_ASAP;
   1.457 +		urb->transfer_buffer = pending_req->buffer;
   1.458 +		urb->transfer_buffer_length = req->buffer_length;
   1.459 +		urb->complete = usbbk_urb_complete;
   1.460 +		urb->context = pending_req;
   1.461 +		urb->interval = req->u.isoc.interval;
   1.462 +		urb->start_frame = req->u.isoc.start_frame;
   1.463 +		urb->number_of_packets = req->u.isoc.number_of_packets;
   1.464 +
   1.465 +		break;
   1.466 +	case PIPE_INTERRUPT:
   1.467 +		if (usb_pipein(req->pipe))
   1.468 +			pipe = usb_rcvintpipe(udev, usb_pipeendpoint(req->pipe));
   1.469 +		else
   1.470 +			pipe = usb_sndintpipe(udev, usb_pipeendpoint(req->pipe));
   1.471 +
   1.472 +		usb_fill_int_urb(urb, udev, pipe,
   1.473 +				pending_req->buffer, req->buffer_length,
   1.474 +				usbbk_urb_complete,
   1.475 +				pending_req, req->u.intr.interval);
   1.476 +		urb->transfer_flags = req->transfer_flags;
   1.477 +
   1.478 +		break;
   1.479 +	case PIPE_CONTROL:
   1.480 +		if (usb_pipein(req->pipe))
   1.481 +			pipe = usb_rcvctrlpipe(udev, 0);
   1.482 +		else
   1.483 +			pipe = usb_sndctrlpipe(udev, 0);
   1.484 +
   1.485 +		usb_fill_control_urb(urb, udev, pipe,
   1.486 +				(unsigned char *) pending_req->setup,
   1.487 +				pending_req->buffer, req->buffer_length,
   1.488 +				usbbk_urb_complete, pending_req);
   1.489 +		memcpy(pending_req->setup, req->u.ctrl, 8);
   1.490 +		urb->setup_dma = pending_req->setup_dma;
   1.491 +		urb->transfer_flags = req->transfer_flags;
   1.492 +		urb->transfer_flags |= URB_NO_SETUP_DMA_MAP;
   1.493 +
   1.494 +		break;
   1.495 +	case PIPE_BULK:
   1.496 +		if (usb_pipein(req->pipe))
   1.497 +			pipe = usb_rcvbulkpipe(udev, usb_pipeendpoint(req->pipe));
   1.498 +		else
   1.499 +			pipe = usb_sndbulkpipe(udev, usb_pipeendpoint(req->pipe));
   1.500 +
   1.501 +		usb_fill_bulk_urb(urb, udev, pipe,
   1.502 +				pending_req->buffer, req->buffer_length,
   1.503 +				usbbk_urb_complete, pending_req);
   1.504 +		urb->transfer_flags = req->transfer_flags;
   1.505 +
   1.506 +		break;
   1.507 +	default:
   1.508 +		break;
   1.509 +	}
   1.510 +
   1.511 +	if (req->buffer_length) {
   1.512 +		urb->transfer_dma = pending_req->transfer_dma;
   1.513 +		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
   1.514 +	}
   1.515 +}
   1.516 +
   1.517 +struct set_interface_request {
   1.518 +	pending_req_t *pending_req;
   1.519 +	int interface;
   1.520 +	int alternate;
   1.521 +	struct work_struct work;
   1.522 +};
   1.523 +
   1.524 +static void usbbk_set_interface_work(void *data)
   1.525 +{
   1.526 +	struct set_interface_request *req = (struct set_interface_request *) data;
   1.527 +	pending_req_t *pending_req = req->pending_req;
   1.528 +	struct usb_device *udev = req->pending_req->stub->udev;
   1.529 +
   1.530 +	int ret;
   1.531 +
   1.532 +	usb_lock_device(udev);
   1.533 +	ret = usb_set_interface(udev, req->interface, req->alternate);
   1.534 +	usb_unlock_device(udev);
   1.535 +	usb_put_dev(udev);
   1.536 +
   1.537 +	usbbk_do_response(pending_req, ret, 0, 0, 0);
   1.538 +	usbif_put(pending_req->usbif);
   1.539 +	free_req(pending_req);
   1.540 +	kfree(req);
   1.541 +}
   1.542 +
   1.543 +static int usbbk_set_interface(pending_req_t *pending_req, int interface, int alternate)
   1.544 +{
   1.545 +	struct set_interface_request *req;
   1.546 +	struct usb_device *udev = pending_req->stub->udev;
   1.547 +
   1.548 +	req = kmalloc(sizeof(*req), GFP_KERNEL);
   1.549 +	if (!req)
   1.550 +		return -ENOMEM;
   1.551 +	req->pending_req = pending_req;
   1.552 +	req->interface = interface;
   1.553 +	req->alternate = alternate;
   1.554 +	INIT_WORK(&req->work, usbbk_set_interface_work, req);
   1.555 +	usb_get_dev(udev);
   1.556 +	schedule_work(&req->work);
   1.557 +	return 0;
   1.558 +}
   1.559 +
   1.560 +struct clear_halt_request {
   1.561 +	pending_req_t *pending_req;
   1.562 +	int pipe;
   1.563 +	struct work_struct work;
   1.564 +};
   1.565 +
   1.566 +static void usbbk_clear_halt_work(void *data)
   1.567 +{
   1.568 +	struct clear_halt_request *req = (struct clear_halt_request *) data;
   1.569 +	pending_req_t *pending_req = req->pending_req;
   1.570 +	struct usb_device *udev = req->pending_req->stub->udev;
   1.571 +	int ret;
   1.572 +
   1.573 +	usb_lock_device(udev);
   1.574 +	ret = usb_clear_halt(req->pending_req->stub->udev, req->pipe);
   1.575 +	usb_unlock_device(udev);
   1.576 +	usb_put_dev(udev);
   1.577 +
   1.578 +	usbbk_do_response(pending_req, ret, 0, 0, 0);
   1.579 +	usbif_put(pending_req->usbif);
   1.580 +	free_req(pending_req);
   1.581 +	kfree(req);
   1.582 +}
   1.583 +
   1.584 +static int usbbk_clear_halt(pending_req_t *pending_req, int pipe)
   1.585 +{
   1.586 +	struct clear_halt_request *req;
   1.587 +	struct usb_device *udev = pending_req->stub->udev;
   1.588 +
   1.589 +	req = kmalloc(sizeof(*req), GFP_KERNEL);
   1.590 +	if (!req)
   1.591 +		return -ENOMEM;
   1.592 +	req->pending_req = pending_req;
   1.593 +	req->pipe = pipe;
   1.594 +	INIT_WORK(&req->work, usbbk_clear_halt_work, req);
   1.595 +
   1.596 +	usb_get_dev(udev);
   1.597 +	schedule_work(&req->work);
   1.598 +	return 0;
   1.599 +}
   1.600 +
   1.601 +#if 0
   1.602 +struct port_reset_request {
   1.603 +	pending_req_t *pending_req;
   1.604 +	struct work_struct work;
   1.605 +};
   1.606 +
   1.607 +static void usbbk_port_reset_work(void *data)
   1.608 +{
   1.609 +	struct port_reset_request *req = (struct port_reset_request *) data;
   1.610 +	pending_req_t *pending_req = req->pending_req;
   1.611 +	struct usb_device *udev = pending_req->stub->udev;
   1.612 +	int ret, ret_lock;
   1.613 +
   1.614 +	ret = ret_lock = usb_lock_device_for_reset(udev, NULL);
   1.615 +	if (ret_lock >= 0) {
   1.616 +		ret = usb_reset_device(udev);
   1.617 +		if (ret_lock)
   1.618 +			usb_unlock_device(udev);
   1.619 +	}
   1.620 +	usb_put_dev(udev);
   1.621 +
   1.622 +	usbbk_do_response(pending_req, ret, 0, 0, 0);
   1.623 +	usbif_put(pending_req->usbif);
   1.624 +	free_req(pending_req);
   1.625 +	kfree(req);
   1.626 +}
   1.627 +
   1.628 +static int usbbk_port_reset(pending_req_t *pending_req)
   1.629 +{
   1.630 +	struct port_reset_request *req;
   1.631 +	struct usb_device *udev = pending_req->stub->udev;
   1.632 +
   1.633 +	req = kmalloc(sizeof(*req), GFP_KERNEL);
   1.634 +	if (!req)
   1.635 +		return -ENOMEM;
   1.636 +
   1.637 +	req->pending_req = pending_req;
   1.638 +	INIT_WORK(&req->work, usbbk_port_reset_work, req);
   1.639 +
   1.640 +	usb_get_dev(udev);
   1.641 +	schedule_work(&req->work);
   1.642 +	return 0;
   1.643 +}
   1.644 +#endif
   1.645 +
   1.646 +static void usbbk_set_address(usbif_t *usbif, struct usbstub *stub, int cur_addr, int new_addr)
   1.647 +{
   1.648 +	unsigned long flags;
   1.649 +
   1.650 +	spin_lock_irqsave(&usbif->addr_lock, flags);
   1.651 +	if (cur_addr)
   1.652 +		usbif->addr_table[cur_addr] = NULL;
   1.653 +	if (new_addr)
   1.654 +		usbif->addr_table[new_addr] = stub;
   1.655 +	stub->addr = new_addr;
   1.656 +	spin_unlock_irqrestore(&usbif->addr_lock, flags);
   1.657 +}
   1.658 +
   1.659 +struct usbstub *find_attached_device(usbif_t *usbif, int portnum)
   1.660 +{
   1.661 +	struct usbstub *stub;
   1.662 +	int found = 0;
   1.663 +	unsigned long flags;
   1.664 +
   1.665 +	spin_lock_irqsave(&usbif->plug_lock, flags);
   1.666 +	list_for_each_entry(stub, &usbif->plugged_devices, plugged_list) {
   1.667 +		if (stub->id->portnum == portnum) {
   1.668 +			found = 1;
   1.669 +			break;
   1.670 +		}
   1.671 +	}
   1.672 +	spin_unlock_irqrestore(&usbif->plug_lock, flags);
   1.673 +
   1.674 +	if (found)
   1.675 +		return stub;
   1.676 +
   1.677 +	return NULL;
   1.678 +}
   1.679 +
   1.680 +static int check_and_submit_special_ctrlreq(usbif_t *usbif, usbif_request_t *req, pending_req_t *pending_req)
   1.681 +{
   1.682 +	int devnum;
   1.683 +	struct usbstub *stub = NULL;
   1.684 +	struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *) req->u.ctrl;
   1.685 +	int ret;
   1.686 +	int done = 0;
   1.687 +
   1.688 +	devnum = usb_pipedevice(req->pipe);
   1.689 +
   1.690 +	/*
   1.691 +	 * When the device is first connected or reseted, USB device has no address.
   1.692 +	 * In this initial state, following requests are send to device address (#0),
   1.693 +	 *
   1.694 +	 *  1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is send,
   1.695 +	 *     and OS knows what device is connected to.
   1.696 +	 *
   1.697 +	 *  2. SET_ADDRESS is send, and then, device has its address.
   1.698 +	 *
   1.699 +	 * In the next step, SET_CONFIGURATION is send to addressed device, and then,
   1.700 +	 * the device is finally ready to use.
   1.701 +	 */
   1.702 +	if (unlikely(devnum == 0)) {
   1.703 +		stub = find_attached_device(usbif, usbif_pipeportnum(req->pipe));
   1.704 +		if (unlikely(!stub)) {
   1.705 +			ret = -ENODEV;
   1.706 +			goto fail_response;
   1.707 +		}
   1.708 +
   1.709 +		switch (ctrl->bRequest) {
   1.710 +		case USB_REQ_GET_DESCRIPTOR:
   1.711 +			/*
   1.712 +			 * GET_DESCRIPTOR request to device #0.
   1.713 +			 * through to normal urb transfer.
   1.714 +			 */
   1.715 +			pending_req->stub = stub;
   1.716 +			return 0;
   1.717 +			break;
   1.718 +		case USB_REQ_SET_ADDRESS:
   1.719 +			/*
   1.720 +			 * SET_ADDRESS request to device #0.
   1.721 +			 * add attached device to addr_table.
   1.722 +			 */
   1.723 +			{
   1.724 +				__u16 addr = le16_to_cpu(ctrl->wValue);
   1.725 +				usbbk_set_address(usbif, stub, 0, addr);
   1.726 +			}
   1.727 +			ret = 0;
   1.728 +			goto fail_response;
   1.729 +			break;
   1.730 +		default:
   1.731 +			ret = -EINVAL;
   1.732 +			goto fail_response;
   1.733 +		}
   1.734 +	} else {
   1.735 +		if (unlikely(!usbif->addr_table[devnum])) {
   1.736 +			ret = -ENODEV;
   1.737 +			goto fail_response;
   1.738 +		}
   1.739 +		pending_req->stub = usbif->addr_table[devnum];
   1.740 +	}
   1.741 +
   1.742 +	/*
   1.743 +	 * Check special request
   1.744 +	 */
   1.745 +	switch (ctrl->bRequest) {
   1.746 +	case USB_REQ_SET_ADDRESS:
   1.747 +		/*
   1.748 +		 * SET_ADDRESS request to addressed device.
   1.749 +		 * change addr or remove from addr_table.
   1.750 +		 */
   1.751 +		{
   1.752 +			__u16 addr = le16_to_cpu(ctrl->wValue);
   1.753 +			usbbk_set_address(usbif, stub, devnum, addr);
   1.754 +		}
   1.755 +		ret = 0;
   1.756 +		goto fail_response;
   1.757 +		break;
   1.758 +#if 0
   1.759 +	case USB_REQ_SET_CONFIGURATION:
   1.760 +		/*
   1.761 +		 * linux 2.6.27 or later version only!
   1.762 +		 */
   1.763 +		if (ctrl->RequestType == USB_RECIP_DEVICE) {
   1.764 +			__u16 config = le16_to_cpu(ctrl->wValue);
   1.765 +			usb_driver_set_configuration(pending_req->stub->udev, config);
   1.766 +			done = 1;
   1.767 +		}
   1.768 +		break;
   1.769 +#endif
   1.770 +	case USB_REQ_SET_INTERFACE:
   1.771 +		if (ctrl->bRequestType == USB_RECIP_INTERFACE) {
   1.772 +			__u16 alt = le16_to_cpu(ctrl->wValue);
   1.773 +			__u16 intf = le16_to_cpu(ctrl->wIndex);
   1.774 +			usbbk_set_interface(pending_req, intf, alt);
   1.775 +			done = 1;
   1.776 +		}
   1.777 +		break;
   1.778 +	case USB_REQ_CLEAR_FEATURE:
   1.779 +		if (ctrl->bRequestType == USB_RECIP_ENDPOINT
   1.780 +			&& ctrl->wValue == USB_ENDPOINT_HALT) {
   1.781 +			int pipe;
   1.782 +			int ep = le16_to_cpu(ctrl->wIndex) & 0x0f;
   1.783 +			int dir = le16_to_cpu(ctrl->wIndex)
   1.784 +					& USB_DIR_IN;
   1.785 +			if (dir)
   1.786 +				pipe = usb_rcvctrlpipe(pending_req->stub->udev, ep);
   1.787 +			else
   1.788 +				pipe = usb_sndctrlpipe(pending_req->stub->udev, ep);
   1.789 +			usbbk_clear_halt(pending_req, pipe);
   1.790 +			done = 1;
   1.791 +		}
   1.792 +		break;
   1.793 +#if 0 /* not tested yet */
   1.794 +	case USB_REQ_SET_FEATURE:
   1.795 +		if (ctrl->bRequestType == USB_RT_PORT) {
   1.796 +			__u16 feat = le16_to_cpu(ctrl->wValue);
   1.797 +			if (feat == USB_PORT_FEAT_RESET) {
   1.798 +				usbbk_port_reset(pending_req);
   1.799 +				done = 1;
   1.800 +			}
   1.801 +		}
   1.802 +		break;
   1.803 +#endif
   1.804 +	default:
   1.805 +		break;
   1.806 +	}
   1.807 +
   1.808 +	return done;
   1.809 +
   1.810 +fail_response:
   1.811 +	usbbk_do_response(pending_req, ret, 0, 0, 0);
   1.812 +	usbif_put(usbif);
   1.813 +	free_req(pending_req);
   1.814 +	return 1;
   1.815 +}
   1.816 +
   1.817 +static void dispatch_request_to_pending_reqs(usbif_t *usbif,
   1.818 +		usbif_request_t *req,
   1.819 +		pending_req_t *pending_req)
   1.820 +{
   1.821 +	int ret;
   1.822 +
   1.823 +	pending_req->id = req->id;
   1.824 +	pending_req->usbif = usbif;
   1.825 +
   1.826 +	barrier();
   1.827 +
   1.828 +	/*
   1.829 +	 * TODO:
   1.830 +	 * receive unlink request and cancel the urb in backend
   1.831 +	 */
   1.832 +#if 0
   1.833 +	if (unlikely(usb_pipeunlink(req->pipe))) {
   1.834 +
   1.835 +	}
   1.836 +#endif
   1.837 +
   1.838 +	usbif_get(usbif);
   1.839 +
   1.840 +	if (usb_pipecontrol(req->pipe)) {
   1.841 +		if (check_and_submit_special_ctrlreq(usbif, req, pending_req))
   1.842 +			return;
   1.843 +	} else {
   1.844 +		int devnum = usb_pipedevice(req->pipe);
   1.845 +		if (unlikely(!usbif->addr_table[devnum])) {
   1.846 +			ret = -ENODEV;
   1.847 +			goto fail_response;
   1.848 +		}
   1.849 +		pending_req->stub = usbif->addr_table[devnum];
   1.850 +	}
   1.851 +
   1.852 +	barrier();
   1.853 +
   1.854 +	ret = usbbk_alloc_urb(req, pending_req);
   1.855 +	if (ret) {
   1.856 +		ret = -ESHUTDOWN;
   1.857 +		goto fail_response;
   1.858 +	}
   1.859 +
   1.860 +	add_req_to_submitting_list(pending_req->stub, pending_req);
   1.861 +
   1.862 +	barrier();
   1.863 +
   1.864 +	usbbk_init_urb(req, pending_req);
   1.865 +
   1.866 +	barrier();
   1.867 +
   1.868 +	pending_req->nr_buffer_segs = req->nr_buffer_segs;
   1.869 +	if (usb_pipeisoc(req->pipe))
   1.870 +		pending_req->nr_extra_segs = req->u.isoc.nr_frame_desc_segs;
   1.871 +	else
   1.872 +		pending_req->nr_extra_segs = 0;
   1.873 +
   1.874 +	barrier();
   1.875 +
   1.876 +	ret = usbbk_gnttab_map(usbif, req, pending_req);
   1.877 +	if (ret) {
   1.878 +		printk(KERN_ERR "usbback: invalid buffer\n");
   1.879 +		ret = -ESHUTDOWN;
   1.880 +		goto fail_free_urb;
   1.881 +	}
   1.882 +
   1.883 +	barrier();
   1.884 +
   1.885 +	if (usb_pipeout(req->pipe) && req->buffer_length)
   1.886 +		copy_pages_to_buff(pending_req->buffer,
   1.887 +					pending_req,
   1.888 +					0,
   1.889 +					pending_req->nr_buffer_segs);
   1.890 +	if (usb_pipeisoc(req->pipe)) {
   1.891 +		copy_pages_to_buff(&pending_req->urb->iso_frame_desc[0],
   1.892 +			pending_req,
   1.893 +			pending_req->nr_buffer_segs,
   1.894 +			pending_req->nr_extra_segs);
   1.895 +	}
   1.896 +
   1.897 +	barrier();
   1.898 +
   1.899 +	ret = usb_submit_urb(pending_req->urb, GFP_KERNEL);
   1.900 +	if (ret) {
   1.901 +		printk(KERN_ERR "usbback: failed submitting urb, error %d\n", ret);
   1.902 +		ret = -ESHUTDOWN;
   1.903 +		goto fail_flush_area;
   1.904 +	}
   1.905 +	return;
   1.906 +
   1.907 +fail_flush_area:
   1.908 +	fast_flush_area(pending_req);
   1.909 +fail_free_urb:
   1.910 +	remove_req_from_submitting_list(pending_req->stub, pending_req);
   1.911 +	barrier();
   1.912 +	usbbk_free_urb(pending_req->urb);
   1.913 +fail_response:
   1.914 +	usbbk_do_response(pending_req, ret, 0, 0, 0);
   1.915 +	usbif_put(usbif);
   1.916 +	free_req(pending_req);
   1.917 +}
   1.918 +
   1.919 +static int usbbk_start_submit_urb(usbif_t *usbif)
   1.920 +{
   1.921 +	usbif_back_ring_t *usb_ring = &usbif->ring;
   1.922 +	usbif_request_t *ring_req;
   1.923 +	pending_req_t *pending_req;
   1.924 +	RING_IDX rc, rp;
   1.925 +	int more_to_do = 0;
   1.926 +
   1.927 +	rc = usb_ring->req_cons;
   1.928 +	rp = usb_ring->sring->req_prod;
   1.929 +	rmb();
   1.930 +
   1.931 +	while (rc != rp) {
   1.932 +		if (RING_REQUEST_CONS_OVERFLOW(usb_ring, rc)) {
   1.933 +			printk(KERN_WARNING "RING_REQUEST_CONS_OVERFLOW\n");
   1.934 +			break;
   1.935 +		}
   1.936 +
   1.937 +		pending_req = alloc_req();
   1.938 +		if (NULL == pending_req) {
   1.939 +			more_to_do = 1;
   1.940 +			break;
   1.941 +		}
   1.942 +
   1.943 +		ring_req = RING_GET_REQUEST(usb_ring, rc);
   1.944 +		usb_ring->req_cons = ++rc;
   1.945 +
   1.946 +		dispatch_request_to_pending_reqs(usbif, ring_req,
   1.947 +							pending_req);
   1.948 +	}
   1.949 +
   1.950 +	RING_FINAL_CHECK_FOR_REQUESTS(&usbif->ring, more_to_do);
   1.951 +
   1.952 +	cond_resched();
   1.953 +
   1.954 +	return more_to_do;
   1.955 +}
   1.956 +
   1.957 +int usbbk_schedule(void *arg)
   1.958 +{
   1.959 +        usbif_t *usbif = (usbif_t *)arg;
   1.960 +
   1.961 +        usbif_get(usbif);
   1.962 +
   1.963 +        while(!kthread_should_stop()) {
   1.964 +                wait_event_interruptible(
   1.965 +                                usbif->wq,
   1.966 +                                usbif->waiting_reqs || kthread_should_stop());
   1.967 +                wait_event_interruptible(
   1.968 +                                pending_free_wq,
   1.969 +                                !list_empty(&pending_free) || kthread_should_stop());
   1.970 +                usbif->waiting_reqs = 0;
   1.971 +                smp_mb();
   1.972 +
   1.973 +                if (usbbk_start_submit_urb(usbif))
   1.974 +                        usbif->waiting_reqs = 1;
   1.975 +        }
   1.976 +
   1.977 +        usbif->xenusbd = NULL;
   1.978 +        usbif_put(usbif);
   1.979 +
   1.980 +        return 0;
   1.981 +}
   1.982 +
   1.983 +/*
   1.984 + * attach the grabbed device to usbif.
   1.985 + */
   1.986 +void usbbk_plug_device(usbif_t *usbif, struct usbstub *stub)
   1.987 +{
   1.988 +	unsigned long flags;
   1.989 +
   1.990 +	spin_lock_irqsave(&usbif->plug_lock, flags);
   1.991 +	list_add(&stub->plugged_list, &usbif->plugged_devices);
   1.992 +	spin_unlock_irqrestore(&usbif->plug_lock, flags);
   1.993 +	stub->plugged = 1;
   1.994 +	stub->usbif = usbif;
   1.995 +}
   1.996 +
   1.997 +/*
   1.998 + * detach the grabbed device from usbif.
   1.999 + */
  1.1000 +void usbbk_unplug_device(usbif_t *usbif, struct usbstub *stub)
  1.1001 +{
  1.1002 +	unsigned long flags;
  1.1003 +
  1.1004 +	if (stub->addr)
  1.1005 +		usbbk_set_address(usbif, stub, stub->addr, 0);
  1.1006 +	spin_lock_irqsave(&usbif->plug_lock, flags);
  1.1007 +	list_del(&stub->plugged_list);
  1.1008 +	spin_unlock_irqrestore(&usbif->plug_lock, flags);
  1.1009 +	stub->plugged = 0;
  1.1010 +	stub->usbif = NULL;
  1.1011 +}
  1.1012 +
  1.1013 +void detach_device_without_lock(usbif_t *usbif, struct usbstub *stub)
  1.1014 +{
  1.1015 +	if (stub->addr)
  1.1016 +		usbbk_set_address(usbif, stub, stub->addr, 0);
  1.1017 +	list_del(&stub->plugged_list);
  1.1018 +	stub->plugged = 0;
  1.1019 +	stub->usbif = NULL;
  1.1020 +}
  1.1021 +
  1.1022 +static int __init usbback_init(void)
  1.1023 +{
  1.1024 +	int i, mmap_pages;
  1.1025 +
  1.1026 +	if (!is_running_on_xen())
  1.1027 +		return -ENODEV;
  1.1028 +
  1.1029 +	if (usbstub_init())
  1.1030 +		return -ENODEV;
  1.1031 +
  1.1032 +	mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST;
  1.1033 +	pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
  1.1034 +			usbif_reqs, GFP_KERNEL);
  1.1035 +	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
  1.1036 +			mmap_pages, GFP_KERNEL);
  1.1037 +	pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
  1.1038 +
  1.1039 +	if (!pending_reqs || !pending_grant_handles || !pending_pages)
  1.1040 +		goto out_of_memory;
  1.1041 +
  1.1042 +	for (i = 0; i < mmap_pages; i++)
  1.1043 +		pending_grant_handles[i] = USBBACK_INVALID_HANDLE;
  1.1044 +
  1.1045 +	memset(pending_reqs, 0, sizeof(pending_reqs));
  1.1046 +	INIT_LIST_HEAD(&pending_free);
  1.1047 +
  1.1048 +	for (i = 0; i < usbif_reqs; i++) {
  1.1049 +		list_add_tail(&pending_reqs[i].free_list, &pending_free);
  1.1050 +	}
  1.1051 +
  1.1052 +	usbback_xenbus_init();
  1.1053 +
  1.1054 +	return 0;
  1.1055 +
  1.1056 + out_of_memory:
  1.1057 +	 kfree(pending_reqs);
  1.1058 +	 kfree(pending_grant_handles);
  1.1059 +	 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
  1.1060 +	 printk("%s: out of memory\n", __FUNCTION__);
  1.1061 +	 return -ENOMEM;
  1.1062 +}
  1.1063 +
  1.1064 +static void __exit usbback_exit(void)
  1.1065 +{
  1.1066 +	usbback_xenbus_exit();
  1.1067 +	usbstub_exit();
  1.1068 +	kfree(pending_reqs);
  1.1069 +	kfree(pending_grant_handles);
  1.1070 +	free_empty_pages_and_pagevec(pending_pages, usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST);
  1.1071 +}
  1.1072 +
  1.1073 +module_init(usbback_init);
  1.1074 +module_exit(usbback_exit);
  1.1075 +
  1.1076 +MODULE_AUTHOR("");
  1.1077 +MODULE_DESCRIPTION("Xen USB backend driver (usbback)");
  1.1078 +MODULE_LICENSE("Dual BSD/GPL");