ia64/linux-2.6.18-xen.hg

view drivers/xen/usbfront/usbfront-q.c @ 845:4c7eb2e71e9d

pvusb: Fix license headers.

Signed-off-by: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:11:23 2009 +0100 (2009-03-31)
parents 8f996719f2ff
children b358ebf1c416
line source
1 /*
2 * usbfront-q.c
3 *
4 * Xen USB Virtual Host Controller - RING operations.
5 *
6 * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
7 * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 *
22 * or, by your choice,
23 *
24 * When distributed separately from the Linux kernel or incorporated into
25 * other software packages, subject to the following license:
26 *
27 * Permission is hereby granted, free of charge, to any person obtaining a copy
28 * of this software and associated documentation files (the "Software"), to
29 * deal in the Software without restriction, including without limitation the
30 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
31 * sell copies of the Software, and to permit persons to whom the Software is
32 * furnished to do so, subject to the following conditions:
33 *
34 * The above copyright notice and this permission notice shall be included in
35 * all copies or substantial portions of the Software.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
40 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
41 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
42 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
43 * DEALINGS IN THE SOFTWARE.
44 */
46 struct kmem_cache *xenhcd_urbp_cachep;
48 static struct urb_priv *alloc_urb_priv(struct urb *urb)
49 {
50 struct urb_priv *urbp;
52 urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, GFP_ATOMIC);
53 if (!urbp) {
54 return NULL;
55 }
57 urbp->urb = urb;
58 urb->hcpriv = urbp;
59 urbp->req_id = ~0;
60 INIT_LIST_HEAD(&urbp->list);
62 return urbp;
63 }
65 static void free_urb_priv(struct urb_priv *urbp)
66 {
67 urbp->urb->hcpriv = NULL;
68 kmem_cache_free(xenhcd_urbp_cachep, urbp);
69 }
71 static inline int get_id_from_freelist(
72 struct usbfront_info *info)
73 {
74 unsigned long free;
75 free = info->shadow_free;
76 BUG_ON(free > USB_RING_SIZE);
77 info->shadow_free = info->shadow[free].req.id;
78 info->shadow[free].req.id = (unsigned int)0x0fff; /* debug */
79 return free;
80 }
82 static inline void add_id_to_freelist(
83 struct usbfront_info *info, unsigned long id)
84 {
85 info->shadow[id].req.id = info->shadow_free;
86 info->shadow[id].urb = NULL;
87 info->shadow_free = id;
88 }
90 static inline int count_pages(void *addr, int length)
91 {
92 unsigned long start = (unsigned long) addr >> PAGE_SHIFT;
93 unsigned long end = (unsigned long) (addr + length + PAGE_SIZE -1) >> PAGE_SHIFT;
94 return end - start;
95 }
97 static inline void xenhcd_gnttab_map(struct usbfront_info *info,
98 void *addr, int length, grant_ref_t *gref_head,
99 struct usbif_request_segment *seg, int nr_pages, int flags)
100 {
101 grant_ref_t ref;
102 struct page *page;
103 unsigned long buffer_pfn;
104 unsigned int offset;
105 unsigned int len;
106 unsigned int bytes;
107 int i;
109 page = virt_to_page(addr);
110 buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
111 offset = offset_in_page(addr);
112 len = length;
114 for(i = 0;i < nr_pages;i++){
115 bytes = PAGE_SIZE - offset;
116 if(bytes > len)
117 bytes = len;
119 ref = gnttab_claim_grant_reference(gref_head);
120 BUG_ON(ref == -ENOSPC);
121 gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id, buffer_pfn, flags);
122 seg[i].gref = ref;
123 seg[i].offset = (uint16_t)offset;
124 seg[i].length = (uint16_t)bytes;
126 buffer_pfn++;
127 len -= bytes;
128 offset = 0;
129 }
130 }
132 static int map_urb_for_request(struct usbfront_info *info, struct urb *urb,
133 usbif_request_t *req)
134 {
135 grant_ref_t gref_head;
136 int nr_buff_pages = 0;
137 int nr_isodesc_pages = 0;
138 int ret = 0;
140 if (urb->transfer_buffer_length) {
141 nr_buff_pages = count_pages(urb->transfer_buffer, urb->transfer_buffer_length);
143 if (usb_pipeisoc(urb->pipe))
144 nr_isodesc_pages = count_pages(&urb->iso_frame_desc[0],
145 sizeof(struct usb_iso_packet_descriptor) * urb->number_of_packets);
147 if (nr_buff_pages + nr_isodesc_pages > USBIF_MAX_SEGMENTS_PER_REQUEST)
148 return -E2BIG;
150 ret = gnttab_alloc_grant_references(USBIF_MAX_SEGMENTS_PER_REQUEST, &gref_head);
151 if (ret) {
152 printk(KERN_ERR "usbfront: gnttab_alloc_grant_references() error\n");
153 return -ENOMEM;
154 }
156 xenhcd_gnttab_map(info, urb->transfer_buffer,
157 urb->transfer_buffer_length,
158 &gref_head, &req->seg[0], nr_buff_pages,
159 usb_pipein(urb->pipe) ? 0 : GTF_readonly);
161 if (!usb_pipeisoc(urb->pipe))
162 gnttab_free_grant_references(gref_head);
163 }
165 req->pipe = usbif_setportnum_pipe(urb->pipe, urb->dev->portnum);
166 req->transfer_flags = urb->transfer_flags;
167 req->buffer_length = urb->transfer_buffer_length;
168 req->nr_buffer_segs = nr_buff_pages;
170 switch (usb_pipetype(urb->pipe)) {
171 case PIPE_ISOCHRONOUS:
172 req->u.isoc.interval = urb->interval;
173 req->u.isoc.start_frame = urb->start_frame;
174 req->u.isoc.number_of_packets = urb->number_of_packets;
175 req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages;
176 /*
177 * urb->number_of_packets must be > 0
178 */
179 if (unlikely(urb->number_of_packets <= 0))
180 BUG();
181 xenhcd_gnttab_map(info, &urb->iso_frame_desc[0],
182 sizeof(struct usb_iso_packet_descriptor) * urb->number_of_packets,
183 &gref_head, &req->seg[nr_buff_pages], nr_isodesc_pages, 0);
184 gnttab_free_grant_references(gref_head);
185 break;
186 case PIPE_INTERRUPT:
187 req->u.intr.interval = urb->interval;
188 break;
189 case PIPE_CONTROL:
190 if (urb->setup_packet)
191 memcpy(req->u.ctrl, urb->setup_packet, 8);
192 break;
193 case PIPE_BULK:
194 break;
195 default:
196 ret = -EINVAL;
197 }
199 return ret;
200 }
202 static void xenhcd_gnttab_done(struct usb_shadow *shadow)
203 {
204 int nr_segs = 0;
205 int i;
207 nr_segs = shadow->req.nr_buffer_segs;
209 if (usb_pipeisoc(shadow->req.pipe))
210 nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
212 for (i = 0; i < nr_segs; i++)
213 gnttab_end_foreign_access(shadow->req.seg[i].gref, 0UL);
214 }
216 static void xenhcd_giveback_urb(struct usbfront_info *info, struct urb *urb)
217 __releases(info->lock)
218 __acquires(info->lock)
219 {
220 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
222 list_del_init(&urbp->list);
223 free_urb_priv(urbp);
224 switch (urb->status) {
225 case -ECONNRESET:
226 case -ENOENT:
227 COUNT(info->stats.unlink);
228 break;
229 default:
230 COUNT(info->stats.complete);
231 }
232 spin_unlock(&info->lock);
233 usb_hcd_giveback_urb(info_to_hcd(info), urb, NULL);
234 spin_lock(&info->lock);
235 }
237 static inline int xenhcd_do_request(struct usbfront_info *info, struct urb_priv *urbp)
238 {
239 usbif_request_t *ring_req;
240 struct urb *urb = urbp->urb;
241 uint16_t id;
242 int notify;
243 int ret = 0;
245 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
246 id = get_id_from_freelist(info);
247 ring_req->id = id;
249 ret = map_urb_for_request(info, urb, ring_req);
250 if (ret < 0) {
251 add_id_to_freelist(info, id);
252 return ret;
253 }
255 info->ring.req_prod_pvt++;
256 info->shadow[id].urb = urb;
257 info->shadow[id].req = *ring_req;
258 urbp->req_id = id;
260 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
261 if (notify)
262 notify_remote_via_irq(info->irq);
264 return ret;
265 }
267 static void xenhcd_kick_pending_urbs(struct usbfront_info *info)
268 {
269 struct urb_priv *urbp;
270 int ret;
272 while (!list_empty(&info->pending_urbs)) {
273 if (RING_FULL(&info->ring)) {
274 COUNT(info->stats.ring_full);
275 timer_action(info, TIMER_RING_WATCHDOG);
276 goto done;
277 }
279 urbp = list_entry(info->pending_urbs.next, struct urb_priv, list);
280 ret = xenhcd_do_request(info, urbp);
281 if (ret == 0)
282 list_move_tail(&urbp->list, &info->inprogress_urbs);
283 else
284 xenhcd_giveback_urb(info, urbp->urb);
285 }
286 timer_action_done(info, TIMER_SCAN_PENDING_URBS);
288 done:
289 return;
290 }
292 static void xenhcd_giveback_unlinked_urbs(struct usbfront_info *info)
293 {
294 struct urb_priv *urbp, *tmp;
296 list_for_each_entry_safe(urbp, tmp, &info->unlinked_urbs, list) {
297 xenhcd_giveback_urb(info, urbp->urb);
298 }
299 }
301 static int xenhcd_submit_urb(struct usbfront_info *info, struct urb_priv *urbp)
302 {
303 int ret = 0;
305 if (RING_FULL(&info->ring)) {
306 list_add_tail(&urbp->list, &info->pending_urbs);
307 COUNT(info->stats.ring_full);
308 timer_action(info, TIMER_RING_WATCHDOG);
309 goto done;
310 }
312 if (!list_empty(&info->pending_urbs)) {
313 list_add_tail(&urbp->list, &info->pending_urbs);
314 timer_action(info, TIMER_SCAN_PENDING_URBS);
315 goto done;
316 }
318 ret = xenhcd_do_request(info, urbp);
319 if (ret == 0)
320 list_add_tail(&urbp->list, &info->inprogress_urbs);
322 done:
323 return ret;
324 }
326 static int xenhcd_unlink_urb(struct usbfront_info *info, struct urb_priv *urbp)
327 {
328 if (urbp->unlinked)
329 return -EBUSY;
330 urbp->unlinked = 1;
332 /* if the urb is in pending_urbs */
333 if (urbp->req_id == ~0) {
334 list_move_tail(&urbp->list, &info->unlinked_urbs);
335 timer_action(info, TIMER_SCAN_PENDING_URBS);
336 }
338 /* TODO: send cancel request to backend */
340 return 0;
341 }
343 static int xenhcd_end_submit_urb(struct usbfront_info *info)
344 {
345 usbif_response_t *ring_res;
346 struct urb *urb;
347 struct urb_priv *urbp;
349 RING_IDX i, rp;
350 uint16_t id;
351 int more_to_do = 0;
352 unsigned long flags;
354 spin_lock_irqsave(&info->lock, flags);
355 rp = info->ring.sring->rsp_prod;
356 rmb(); /* ensure we see queued responses up to "rp" */
358 for (i = info->ring.rsp_cons; i != rp; i++) {
359 ring_res = RING_GET_RESPONSE(&info->ring, i);
360 id = ring_res->id;
361 xenhcd_gnttab_done(&info->shadow[id]);
362 urb = info->shadow[id].urb;
363 barrier();
364 add_id_to_freelist(info, id);
366 urbp = (struct urb_priv *)urb->hcpriv;
367 if (likely(!urbp->unlinked)) {
368 urb->status = ring_res->status;
369 urb->actual_length = ring_res->actual_length;
370 urb->error_count = ring_res->error_count;
371 urb->start_frame = ring_res->start_frame;
372 }
373 barrier();
374 xenhcd_giveback_urb(info, urb);
375 }
376 info->ring.rsp_cons = i;
378 if (i != info->ring.req_prod_pvt)
379 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
380 else
381 info->ring.sring->rsp_event = i + 1;
383 spin_unlock_irqrestore(&info->lock, flags);
385 cond_resched();
387 return more_to_do;
388 }
390 int xenhcd_schedule(void *arg)
391 {
392 struct usbfront_info *info = (struct usbfront_info *) arg;
394 while (!kthread_should_stop()) {
395 wait_event_interruptible(
396 info->wq,
397 info->waiting_resp || kthread_should_stop());
398 info->waiting_resp = 0;
399 smp_mb();
401 if (xenhcd_end_submit_urb(info))
402 info->waiting_resp = 1;
403 }
405 return 0;
406 }
408 static void xenhcd_notify_work(struct usbfront_info *info)
409 {
410 info->waiting_resp = 1;
411 wake_up(&info->wq);
412 }
414 irqreturn_t xenhcd_int(int irq, void *dev_id, struct pt_regs *ptregs)
415 {
416 xenhcd_notify_work((struct usbfront_info *) dev_id);
417 return IRQ_HANDLED;
418 }