ia64/linux-2.6.18-xen.hg

view drivers/xen/scsifront/scsifront.c @ 601:e0c15322ff6d

pvSCSI: More fixes (including locking cleanups)

Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 14 10:15:44 2008 +0100 (2008-07-14)
parents db4f08203b8a
children bd4b58143713
line source
1 /*
2 * Xen SCSI frontend driver
3 *
4 * Copyright (c) 2008, FUJITSU Limited
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
32 #include <linux/version.h>
33 #include "common.h"
35 static int get_id_from_freelist(struct vscsifrnt_info *info)
36 {
37 unsigned long flags;
38 uint32_t free;
40 spin_lock_irqsave(&info->shadow_lock, flags);
42 free = info->shadow_free;
43 BUG_ON(free > VSCSIIF_MAX_REQS);
44 info->shadow_free = info->shadow[free].next_free;
45 info->shadow[free].next_free = 0x0fff;
47 info->shadow[free].wait_reset = 0;
49 spin_unlock_irqrestore(&info->shadow_lock, flags);
51 return free;
52 }
54 static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
55 {
56 unsigned long flags;
58 spin_lock_irqsave(&info->shadow_lock, flags);
60 info->shadow[id].next_free = info->shadow_free;
61 info->shadow[id].req_scsi_cmnd = 0;
62 info->shadow_free = id;
64 spin_unlock_irqrestore(&info->shadow_lock, flags);
65 }
68 struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info)
69 {
70 struct vscsiif_front_ring *ring = &(info->ring);
71 vscsiif_request_t *ring_req;
72 uint32_t id;
74 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
76 ring->req_prod_pvt++;
78 id = get_id_from_freelist(info); /* use id by response */
79 ring_req->rqid = (uint16_t)id;
81 return ring_req;
82 }
85 static void scsifront_notify_work(struct vscsifrnt_info *info)
86 {
87 info->waiting_resp = 1;
88 wake_up(&info->wq);
89 }
92 static void scsifront_do_request(struct vscsifrnt_info *info)
93 {
94 struct vscsiif_front_ring *ring = &(info->ring);
95 unsigned int irq = info->irq;
96 int notify;
98 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
99 if (notify)
100 notify_remote_via_irq(irq);
101 }
103 irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs)
104 {
105 scsifront_notify_work((struct vscsifrnt_info *)dev_id);
106 return IRQ_HANDLED;
107 }
110 static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id)
111 {
112 int i;
114 if (s->sc_data_direction == DMA_NONE)
115 return;
117 if (s->nr_segments) {
118 for (i = 0; i < s->nr_segments; i++) {
119 if (unlikely(gnttab_query_foreign_access(
120 s->gref[i]) != 0)) {
121 printk(KERN_ALERT "scsifront: "
122 "grant still in use by backend.\n");
123 BUG();
124 }
125 gnttab_end_foreign_access(s->gref[i], 0UL);
126 }
127 }
129 return;
130 }
133 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
134 vscsiif_response_t *ring_res)
135 {
136 struct scsi_cmnd *sc;
137 uint32_t id;
138 uint8_t sense_len;
140 id = ring_res->rqid;
141 sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd;
143 if (sc == NULL)
144 BUG();
146 scsifront_gnttab_done(&info->shadow[id], id);
147 add_id_to_freelist(info, id);
149 sc->result = ring_res->rslt;
150 sc->resid = 0;
152 if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE)
153 sense_len = VSCSIIF_SENSE_BUFFERSIZE;
154 else
155 sense_len = ring_res->sense_len;
157 if (sense_len)
158 memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len);
160 sc->scsi_done(sc);
162 return;
163 }
166 static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
167 vscsiif_response_t *ring_res)
168 {
169 uint16_t id = ring_res->rqid;
170 unsigned long flags;
172 spin_lock_irqsave(&info->shadow_lock, flags);
173 info->shadow[id].wait_reset = 1;
174 info->shadow[id].rslt_reset = ring_res->rslt;
175 spin_unlock_irqrestore(&info->shadow_lock, flags);
177 wake_up(&(info->shadow[id].wq_reset));
178 }
181 int scsifront_cmd_done(struct vscsifrnt_info *info)
182 {
183 vscsiif_response_t *ring_res;
185 RING_IDX i, rp;
186 int more_to_do = 0;
187 unsigned long flags;
189 spin_lock_irqsave(&info->io_lock, flags);
191 rp = info->ring.sring->rsp_prod;
192 rmb();
193 for (i = info->ring.rsp_cons; i != rp; i++) {
195 ring_res = RING_GET_RESPONSE(&info->ring, i);
197 if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB)
198 scsifront_cdb_cmd_done(info, ring_res);
199 else
200 scsifront_sync_cmd_done(info, ring_res);
201 }
203 info->ring.rsp_cons = i;
205 if (i != info->ring.req_prod_pvt) {
206 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
207 } else {
208 info->ring.sring->rsp_event = i + 1;
209 }
211 spin_unlock_irqrestore(&info->io_lock, flags);
214 /* Yield point for this unbounded loop. */
215 cond_resched();
217 return more_to_do;
218 }
223 int scsifront_schedule(void *data)
224 {
225 struct vscsifrnt_info *info = (struct vscsifrnt_info *)data;
227 while (!kthread_should_stop()) {
228 wait_event_interruptible(
229 info->wq,
230 info->waiting_resp || kthread_should_stop());
232 info->waiting_resp = 0;
233 smp_mb();
235 if (scsifront_cmd_done(info))
236 info->waiting_resp = 1;
237 }
239 return 0;
240 }
244 static int map_data_for_request(struct vscsifrnt_info *info,
245 struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id)
246 {
247 grant_ref_t gref_head;
248 struct page *page;
249 int err, i, ref, ref_cnt = 0;
250 int write = (sc->sc_data_direction == DMA_TO_DEVICE);
251 int nr_pages, off, len, bytes;
252 unsigned long buffer_pfn;
253 unsigned int data_len = 0;
255 if (sc->sc_data_direction == DMA_NONE)
256 return 0;
258 err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
259 if (err) {
260 printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n");
261 return -ENOMEM;
262 }
264 if (sc->use_sg) {
265 /* quoted scsi_lib.c/scsi_req_map_sg . */
266 struct scatterlist *sg = (struct scatterlist *)sc->request_buffer;
267 nr_pages = (sc->request_bufflen + sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
269 if (nr_pages > VSCSIIF_SG_TABLESIZE) {
270 printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n");
271 ref_cnt = (-E2BIG);
272 goto big_to_sg;
273 }
275 for (i = 0; i < sc->use_sg; i++) {
276 page = sg[i].page;
277 off = sg[i].offset;
278 len = sg[i].length;
279 data_len += len;
281 buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
283 while (len > 0) {
284 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
286 ref = gnttab_claim_grant_reference(&gref_head);
287 BUG_ON(ref == -ENOSPC);
289 gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
290 buffer_pfn, write);
292 info->shadow[id].gref[ref_cnt] = ref;
293 ring_req->seg[ref_cnt].gref = ref;
294 ring_req->seg[ref_cnt].offset = (uint16_t)off;
295 ring_req->seg[ref_cnt].length = (uint16_t)bytes;
297 buffer_pfn++;
298 len -= bytes;
299 off = 0;
300 ref_cnt++;
301 }
302 }
303 } else if (sc->request_bufflen) {
304 unsigned long end = ((unsigned long)sc->request_buffer
305 + sc->request_bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
306 unsigned long start = (unsigned long)sc->request_buffer >> PAGE_SHIFT;
308 page = virt_to_page(sc->request_buffer);
309 nr_pages = end - start;
310 len = sc->request_bufflen;
312 if (nr_pages > VSCSIIF_SG_TABLESIZE) {
313 ref_cnt = (-E2BIG);
314 goto big_to_sg;
315 }
317 buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
319 off = offset_in_page((unsigned long)sc->request_buffer);
320 for (i = 0; i < nr_pages; i++) {
321 bytes = PAGE_SIZE - off;
323 if (bytes > len)
324 bytes = len;
326 ref = gnttab_claim_grant_reference(&gref_head);
327 BUG_ON(ref == -ENOSPC);
329 gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
330 buffer_pfn, write);
332 info->shadow[id].gref[i] = ref;
333 ring_req->seg[i].gref = ref;
334 ring_req->seg[i].offset = (uint16_t)off;
335 ring_req->seg[i].length = (uint16_t)bytes;
337 buffer_pfn++;
338 len -= bytes;
339 off = 0;
340 ref_cnt++;
341 }
342 }
344 big_to_sg:
346 gnttab_free_grant_references(gref_head);
348 return ref_cnt;
349 }
351 static int scsifront_queuecommand(struct scsi_cmnd *sc,
352 void (*done)(struct scsi_cmnd *))
353 {
354 struct vscsifrnt_info *info =
355 (struct vscsifrnt_info *) sc->device->host->hostdata;
356 vscsiif_request_t *ring_req;
357 int ref_cnt;
358 uint16_t rqid;
360 if (RING_FULL(&info->ring)) {
361 goto out_host_busy;
362 }
364 sc->scsi_done = done;
365 sc->result = 0;
367 ring_req = scsifront_pre_request(info);
368 rqid = ring_req->rqid;
369 ring_req->act = VSCSIIF_ACT_SCSI_CDB;
371 ring_req->id = sc->device->id;
372 ring_req->lun = sc->device->lun;
373 ring_req->channel = sc->device->channel;
374 ring_req->cmd_len = sc->cmd_len;
376 BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
378 if ( sc->cmd_len )
379 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
380 else
381 memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
383 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
384 ring_req->timeout_per_command = (sc->timeout_per_command / HZ);
386 info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc;
387 info->shadow[rqid].sc_data_direction = sc->sc_data_direction;
388 info->shadow[rqid].act = ring_req->act;
390 ref_cnt = map_data_for_request(info, sc, ring_req, rqid);
391 if (ref_cnt < 0) {
392 add_id_to_freelist(info, rqid);
393 if (ref_cnt == (-ENOMEM))
394 goto out_host_busy;
395 else {
396 sc->result = (DID_ERROR << 16);
397 goto out_fail_command;
398 }
399 }
401 ring_req->nr_segments = (uint8_t)ref_cnt;
402 info->shadow[rqid].nr_segments = ref_cnt;
404 scsifront_do_request(info);
406 return 0;
408 out_host_busy:
409 return SCSI_MLQUEUE_HOST_BUSY;
411 out_fail_command:
412 done(sc);
413 return 0;
414 }
417 static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
418 {
419 return (FAILED);
420 }
422 /* vscsi supports only device_reset, because it is each of LUNs */
423 static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
424 {
425 struct Scsi_Host *host = sc->device->host;
426 struct vscsifrnt_info *info =
427 (struct vscsifrnt_info *) sc->device->host->hostdata;
429 vscsiif_request_t *ring_req;
430 uint16_t rqid;
431 int err;
433 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
434 spin_lock_irq(host->host_lock);
435 #endif
437 ring_req = scsifront_pre_request(info);
438 ring_req->act = VSCSIIF_ACT_SCSI_RESET;
440 rqid = ring_req->rqid;
441 info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET;
443 ring_req->channel = sc->device->channel;
444 ring_req->id = sc->device->id;
445 ring_req->lun = sc->device->lun;
446 ring_req->cmd_len = sc->cmd_len;
448 if ( sc->cmd_len )
449 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
450 else
451 memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
453 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
454 ring_req->timeout_per_command = (sc->timeout_per_command / HZ);
455 ring_req->nr_segments = 0;
457 scsifront_do_request(info);
459 spin_unlock_irq(host->host_lock);
460 wait_event_interruptible(info->shadow[rqid].wq_reset,
461 info->shadow[rqid].wait_reset);
462 spin_lock_irq(host->host_lock);
464 err = info->shadow[rqid].rslt_reset;
466 add_id_to_freelist(info, rqid);
468 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
469 spin_unlock_irq(host->host_lock);
470 #endif
471 return (err);
472 }
475 struct scsi_host_template scsifront_sht = {
476 .module = THIS_MODULE,
477 .name = "Xen SCSI frontend driver",
478 .queuecommand = scsifront_queuecommand,
479 .eh_abort_handler = scsifront_eh_abort_handler,
480 .eh_device_reset_handler= scsifront_dev_reset_handler,
481 .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
482 .can_queue = VSCSIIF_MAX_REQS,
483 .this_id = -1,
484 .sg_tablesize = VSCSIIF_SG_TABLESIZE,
485 .use_clustering = DISABLE_CLUSTERING,
486 .proc_name = "scsifront",
487 };
490 static int __init scsifront_init(void)
491 {
492 int err;
494 if (!is_running_on_xen())
495 return -ENODEV;
497 err = scsifront_xenbus_init();
499 return err;
500 }
502 static void __exit scsifront_exit(void)
503 {
504 scsifront_xenbus_unregister();
505 }
507 module_init(scsifront_init);
508 module_exit(scsifront_exit);
510 MODULE_DESCRIPTION("Xen SCSI frontend driver");
511 MODULE_LICENSE("GPL");