ia64/xen-unstable

view tools/blktap/drivers/tapdisk.c @ 15783:c93e2a822d6f

[xen, xencomm] xencomm multiple page support
Current implementation doesn't allow struct xencomm_desc::address
array to be more than single page. On IA64 it causes 64GB+ domain
creation failure. This patch generalizes xencomm to allow multipage

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kfraser@localhost.localdomain
date Tue Aug 28 15:32:27 2007 +0100 (2007-08-28)
parents b515e66234e8
children b3689eb59c5e
line source
1 /* tapdisk.c
2 *
3 * separate disk process, spawned by blktapctrl. Inherits code from driver
4 * plugins
5 *
6 * Copyright (c) 2005 Julian Chesterfield and Andrew Warfield.
7 *
8 */
10 #define MSG_SIZE 4096
11 #define TAPDISK
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <sys/mman.h>
16 #include <fcntl.h>
17 #include <string.h>
18 #include <signal.h>
19 #include <sys/stat.h>
20 #include <sys/types.h>
21 #include <sys/poll.h>
22 #include <unistd.h>
23 #include <errno.h>
24 #include <pthread.h>
25 #include <time.h>
26 #include <err.h>
27 #include <poll.h>
28 #include <sys/statvfs.h>
29 #include <sys/ioctl.h>
30 #include <linux/fs.h>
31 #include "blktaplib.h"
32 #include "tapdisk.h"
34 #if 1
35 #define ASSERT(_p) \
36 if ( !(_p) ) { DPRINTF("Assertion '%s' failed, line %d, file %s", #_p , \
37 __LINE__, __FILE__); *(int*)0=0; }
38 #else
39 #define ASSERT(_p) ((void)0)
40 #endif
42 #define INPUT 0
43 #define OUTPUT 1
45 static int maxfds, fds[2], run = 1;
47 static pid_t process;
48 int connected_disks = 0;
49 fd_list_entry_t *fd_start = NULL;
51 int do_cow_read(struct disk_driver *dd, blkif_request_t *req,
52 int sidx, uint64_t sector, int nr_secs);
54 #define td_for_each_disk(tds, drv) \
55 for (drv = tds->disks; drv != NULL; drv = drv->next)
57 void usage(void)
58 {
59 fprintf(stderr, "blktap-utils: v1.0.0\n");
60 fprintf(stderr, "usage: tapdisk <READ fifo> <WRITE fifo>\n");
61 exit(-1);
62 }
64 void daemonize(void)
65 {
66 int i;
68 if (getppid()==1) return; /* already a daemon */
69 if (fork() != 0) exit(0);
71 #if 0
72 /*Set new program session ID and close all descriptors*/
73 setsid();
74 for (i = getdtablesize(); i >= 0; --i) close(i);
76 /*Send all I/O to /dev/null */
77 i = open("/dev/null",O_RDWR);
78 dup(i);
79 dup(i);
80 #endif
81 return;
82 }
84 static void free_driver(struct disk_driver *d)
85 {
86 if (d->name)
87 free(d->name);
88 if (d->private)
89 free(d->private);
90 free(d);
91 }
93 static void unmap_disk(struct td_state *s)
94 {
95 tapdev_info_t *info = s->ring_info;
96 struct disk_driver *dd, *tmp;
97 fd_list_entry_t *entry;
99 dd = s->disks;
100 while (dd) {
101 tmp = dd->next;
102 dd->drv->td_close(dd);
103 free_driver(dd);
104 dd = tmp;
105 }
107 if (info != NULL && info->mem > 0)
108 munmap(info->mem, getpagesize() * BLKTAP_MMAP_REGION_SIZE);
110 entry = s->fd_entry;
111 *entry->pprev = entry->next;
112 if (entry->next)
113 entry->next->pprev = entry->pprev;
115 close(info->fd);
117 free(s->fd_entry);
118 free(s->blkif);
119 free(s->ring_info);
120 free(s);
122 return;
123 }
125 void sig_handler(int sig)
126 {
127 /*Received signal to close. If no disks are active, we close app.*/
129 if (connected_disks < 1) run = 0;
130 }
132 static inline int LOCAL_FD_SET(fd_set *readfds)
133 {
134 fd_list_entry_t *ptr;
135 struct disk_driver *dd;
137 ptr = fd_start;
138 while (ptr != NULL) {
139 if (ptr->tap_fd) {
140 FD_SET(ptr->tap_fd, readfds);
141 td_for_each_disk(ptr->s, dd) {
142 if (dd->io_fd[READ])
143 FD_SET(dd->io_fd[READ], readfds);
144 maxfds = (dd->io_fd[READ] > maxfds ?
145 dd->io_fd[READ] : maxfds);
146 }
147 maxfds = (ptr->tap_fd > maxfds ? ptr->tap_fd : maxfds);
148 }
149 ptr = ptr->next;
150 }
152 return 0;
153 }
155 static inline fd_list_entry_t *add_fd_entry(int tap_fd, struct td_state *s)
156 {
157 fd_list_entry_t **pprev, *entry;
158 int i;
160 DPRINTF("Adding fd_list_entry\n");
162 /*Add to linked list*/
163 s->fd_entry = entry = malloc(sizeof(fd_list_entry_t));
164 entry->tap_fd = tap_fd;
165 entry->s = s;
166 entry->next = NULL;
168 pprev = &fd_start;
169 while (*pprev != NULL)
170 pprev = &(*pprev)->next;
172 *pprev = entry;
173 entry->pprev = pprev;
175 return entry;
176 }
178 static inline struct td_state *get_state(int cookie)
179 {
180 fd_list_entry_t *ptr;
182 ptr = fd_start;
183 while (ptr != NULL) {
184 if (ptr->cookie == cookie) return ptr->s;
185 ptr = ptr->next;
186 }
187 return NULL;
188 }
190 static struct tap_disk *get_driver(int drivertype)
191 {
192 /* blktapctrl has passed us the driver type */
194 return dtypes[drivertype]->drv;
195 }
197 static struct td_state *state_init(void)
198 {
199 int i;
200 struct td_state *s;
201 blkif_t *blkif;
203 s = malloc(sizeof(struct td_state));
204 blkif = s->blkif = malloc(sizeof(blkif_t));
205 s->ring_info = calloc(1, sizeof(tapdev_info_t));
207 for (i = 0; i < MAX_REQUESTS; i++) {
208 blkif->pending_list[i].secs_pending = 0;
209 blkif->pending_list[i].submitting = 0;
210 }
212 return s;
213 }
215 static int map_new_dev(struct td_state *s, int minor)
216 {
217 int tap_fd;
218 tapdev_info_t *info = s->ring_info;
219 char *devname;
220 fd_list_entry_t *ptr;
221 int page_size;
223 if (asprintf(&devname,"%s/%s%d", BLKTAP_DEV_DIR, BLKTAP_DEV_NAME, minor) == -1)
224 return -1;
225 tap_fd = open(devname, O_RDWR);
226 if (tap_fd == -1)
227 {
228 DPRINTF("open failed on dev %s!",devname);
229 goto fail;
230 }
231 info->fd = tap_fd;
233 /*Map the shared memory*/
234 page_size = getpagesize();
235 info->mem = mmap(0, page_size * BLKTAP_MMAP_REGION_SIZE,
236 PROT_READ | PROT_WRITE, MAP_SHARED, info->fd, 0);
237 if ((long int)info->mem == -1)
238 {
239 DPRINTF("mmap failed on dev %s!\n",devname);
240 goto fail;
241 }
243 /* assign the rings to the mapped memory */
244 info->sring = (blkif_sring_t *)((unsigned long)info->mem);
245 BACK_RING_INIT(&info->fe_ring, info->sring, page_size);
247 info->vstart =
248 (unsigned long)info->mem + (BLKTAP_RING_PAGES * page_size);
250 ioctl(info->fd, BLKTAP_IOCTL_SENDPID, process );
251 ioctl(info->fd, BLKTAP_IOCTL_SETMODE, BLKTAP_MODE_INTERPOSE );
252 free(devname);
254 /*Update the fd entry*/
255 ptr = fd_start;
256 while (ptr != NULL) {
257 if (s == ptr->s) {
258 ptr->tap_fd = tap_fd;
259 break;
260 }
261 ptr = ptr->next;
262 }
264 return minor;
266 fail:
267 free(devname);
268 return -1;
269 }
271 static struct disk_driver *disk_init(struct td_state *s,
272 struct tap_disk *drv,
273 char *name, td_flag_t flags)
274 {
275 struct disk_driver *dd;
277 dd = calloc(1, sizeof(struct disk_driver));
278 if (!dd)
279 return NULL;
281 dd->private = malloc(drv->private_data_size);
282 if (!dd->private) {
283 free(dd);
284 return NULL;
285 }
287 dd->drv = drv;
288 dd->td_state = s;
289 dd->name = name;
290 dd->flags = flags;
292 return dd;
293 }
295 static int open_disk(struct td_state *s,
296 struct tap_disk *drv, char *path, td_flag_t flags)
297 {
298 int err;
299 char *dup;
300 td_flag_t pflags;
301 struct disk_id id;
302 struct disk_driver *d;
304 dup = strdup(path);
305 if (!dup)
306 return -ENOMEM;
308 memset(&id, 0, sizeof(struct disk_id));
309 s->disks = d = disk_init(s, drv, dup, flags);
310 if (!d)
311 return -ENOMEM;
313 err = drv->td_open(d, path, flags);
314 if (err) {
315 free_driver(d);
316 s->disks = NULL;
317 return -ENOMEM;
318 }
319 pflags = flags | TD_RDONLY;
321 /* load backing files as necessary */
322 while ((err = d->drv->td_get_parent_id(d, &id)) == 0) {
323 struct disk_driver *new;
325 if (id.drivertype > MAX_DISK_TYPES ||
326 !get_driver(id.drivertype) || !id.name)
327 goto fail;
329 dup = strdup(id.name);
330 if (!dup)
331 goto fail;
333 new = disk_init(s, get_driver(id.drivertype), dup, pflags);
334 if (!new)
335 goto fail;
337 err = new->drv->td_open(new, new->name, pflags);
338 if (err)
339 goto fail;
341 err = d->drv->td_validate_parent(d, new, 0);
342 if (err) {
343 d->next = new;
344 goto fail;
345 }
347 d = d->next = new;
348 free(id.name);
349 }
351 s->info |= ((flags & TD_RDONLY) ? VDISK_READONLY : 0);
353 if (err >= 0)
354 return 0;
356 fail:
357 DPRINTF("failed opening disk\n");
358 if (id.name)
359 free(id.name);
360 d = s->disks;
361 while (d) {
362 struct disk_driver *tmp = d->next;
363 d->drv->td_close(d);
364 free_driver(d);
365 d = tmp;
366 }
367 s->disks = NULL;
368 return -1;
369 }
371 static int read_msg(char *buf)
372 {
373 int length, len, msglen, tap_fd, *io_fd;
374 char *ptr, *path;
375 image_t *img;
376 msg_hdr_t *msg;
377 msg_newdev_t *msg_dev;
378 msg_pid_t *msg_pid;
379 struct tap_disk *drv;
380 int ret = -1;
381 struct td_state *s = NULL;
382 fd_list_entry_t *entry;
384 length = read(fds[READ], buf, MSG_SIZE);
386 if (length > 0 && length >= sizeof(msg_hdr_t))
387 {
388 msg = (msg_hdr_t *)buf;
389 DPRINTF("Tapdisk: Received msg, len %d, type %d, UID %d\n",
390 length,msg->type,msg->cookie);
392 switch (msg->type) {
393 case CTLMSG_PARAMS:
394 ptr = buf + sizeof(msg_hdr_t);
395 len = (length - sizeof(msg_hdr_t));
396 path = calloc(1, len);
398 memcpy(path, ptr, len);
399 DPRINTF("Received CTLMSG_PARAMS: [%s]\n", path);
401 /*Assign driver*/
402 drv = get_driver(msg->drivertype);
403 if (drv == NULL)
404 goto params_done;
406 DPRINTF("Loaded driver: name [%s], type [%d]\n",
407 drv->disk_type, msg->drivertype);
409 /* Allocate the disk structs */
410 s = state_init();
411 if (s == NULL)
412 goto params_done;
414 /*Open file*/
415 ret = open_disk(s, drv, path,
416 ((msg->readonly) ? TD_RDONLY : 0));
417 if (ret)
418 goto params_done;
420 entry = add_fd_entry(0, s);
421 entry->cookie = msg->cookie;
422 DPRINTF("Entered cookie %d\n", entry->cookie);
424 memset(buf, 0x00, MSG_SIZE);
426 params_done:
427 if (ret == 0) {
428 msglen = sizeof(msg_hdr_t) + sizeof(image_t);
429 msg->type = CTLMSG_IMG;
430 img = (image_t *)(buf + sizeof(msg_hdr_t));
431 img->size = s->size;
432 img->secsize = s->sector_size;
433 img->info = s->info;
434 } else {
435 msglen = sizeof(msg_hdr_t);
436 msg->type = CTLMSG_IMG_FAIL;
437 msg->len = msglen;
438 }
439 len = write(fds[WRITE], buf, msglen);
440 free(path);
441 return 1;
443 case CTLMSG_NEWDEV:
444 msg_dev = (msg_newdev_t *)(buf + sizeof(msg_hdr_t));
446 s = get_state(msg->cookie);
447 DPRINTF("Retrieving state, cookie %d.....[%s]\n",
448 msg->cookie, (s == NULL ? "FAIL":"OK"));
449 if (s != NULL) {
450 ret = ((map_new_dev(s, msg_dev->devnum)
451 == msg_dev->devnum ? 0: -1));
452 connected_disks++;
453 }
455 memset(buf, 0x00, MSG_SIZE);
456 msglen = sizeof(msg_hdr_t);
457 msg->type = (ret == 0 ? CTLMSG_NEWDEV_RSP
458 : CTLMSG_NEWDEV_FAIL);
459 msg->len = msglen;
461 len = write(fds[WRITE], buf, msglen);
462 return 1;
464 case CTLMSG_CLOSE:
465 s = get_state(msg->cookie);
466 if (s) unmap_disk(s);
468 connected_disks--;
469 sig_handler(SIGINT);
471 return 1;
473 case CTLMSG_PID:
474 memset(buf, 0x00, MSG_SIZE);
475 msglen = sizeof(msg_hdr_t) + sizeof(msg_pid_t);
476 msg->type = CTLMSG_PID_RSP;
477 msg->len = msglen;
479 msg_pid = (msg_pid_t *)(buf + sizeof(msg_hdr_t));
480 process = getpid();
481 msg_pid->pid = process;
483 len = write(fds[WRITE], buf, msglen);
484 return 1;
486 default:
487 return 0;
488 }
489 }
490 return 0;
491 }
493 static inline int write_rsp_to_ring(struct td_state *s, blkif_response_t *rsp)
494 {
495 tapdev_info_t *info = s->ring_info;
496 blkif_response_t *rsp_d;
498 rsp_d = RING_GET_RESPONSE(&info->fe_ring, info->fe_ring.rsp_prod_pvt);
499 memcpy(rsp_d, rsp, sizeof(blkif_response_t));
500 info->fe_ring.rsp_prod_pvt++;
502 return 0;
503 }
505 static inline void kick_responses(struct td_state *s)
506 {
507 tapdev_info_t *info = s->ring_info;
509 if (info->fe_ring.rsp_prod_pvt != info->fe_ring.sring->rsp_prod)
510 {
511 RING_PUSH_RESPONSES(&info->fe_ring);
512 ioctl(info->fd, BLKTAP_IOCTL_KICK_FE);
513 }
514 }
516 void io_done(struct disk_driver *dd, int sid)
517 {
518 struct tap_disk *drv = dd->drv;
520 if (!run) return; /*We have received signal to close*/
522 if (sid > MAX_IOFD || drv->td_do_callbacks(dd, sid) > 0)
523 kick_responses(dd->td_state);
525 return;
526 }
528 static inline uint64_t
529 segment_start(blkif_request_t *req, int sidx)
530 {
531 int i;
532 uint64_t start = req->sector_number;
534 for (i = 0; i < sidx; i++)
535 start += (req->seg[i].last_sect - req->seg[i].first_sect + 1);
537 return start;
538 }
540 uint64_t sends, responds;
541 int send_responses(struct disk_driver *dd, int res,
542 uint64_t sector, int nr_secs, int idx, void *private)
543 {
544 pending_req_t *preq;
545 blkif_request_t *req;
546 int responses_queued = 0;
547 struct td_state *s = dd->td_state;
548 blkif_t *blkif = s->blkif;
549 int sidx = (int)(long)private, secs_done = nr_secs;
551 if ( (idx > MAX_REQUESTS-1) )
552 {
553 DPRINTF("invalid index returned(%u)!\n", idx);
554 return 0;
555 }
556 preq = &blkif->pending_list[idx];
557 req = &preq->req;
559 if (res == BLK_NOT_ALLOCATED) {
560 res = do_cow_read(dd, req, sidx, sector, nr_secs);
561 if (res >= 0) {
562 secs_done = res;
563 res = 0;
564 } else
565 secs_done = 0;
566 }
568 preq->secs_pending -= secs_done;
570 if (res == -EBUSY && preq->submitting)
571 return -EBUSY; /* propagate -EBUSY back to higher layers */
572 if (res)
573 preq->status = BLKIF_RSP_ERROR;
575 if (!preq->submitting && preq->secs_pending == 0)
576 {
577 blkif_request_t tmp;
578 blkif_response_t *rsp;
580 tmp = preq->req;
581 rsp = (blkif_response_t *)req;
583 rsp->id = tmp.id;
584 rsp->operation = tmp.operation;
585 rsp->status = preq->status;
587 write_rsp_to_ring(s, rsp);
588 responses_queued++;
589 }
590 return responses_queued;
591 }
593 int do_cow_read(struct disk_driver *dd, blkif_request_t *req,
594 int sidx, uint64_t sector, int nr_secs)
595 {
596 char *page;
597 int ret, early;
598 uint64_t seg_start, seg_end;
599 struct td_state *s = dd->td_state;
600 tapdev_info_t *info = s->ring_info;
601 struct disk_driver *parent = dd->next;
603 seg_start = segment_start(req, sidx);
604 seg_end = seg_start + req->seg[sidx].last_sect + 1;
606 ASSERT(sector >= seg_start && sector + nr_secs <= seg_end);
608 page = (char *)MMAP_VADDR(info->vstart,
609 (unsigned long)req->id, sidx);
610 page += (req->seg[sidx].first_sect << SECTOR_SHIFT);
611 page += ((sector - seg_start) << SECTOR_SHIFT);
613 if (!parent) {
614 memset(page, 0, nr_secs << SECTOR_SHIFT);
615 return nr_secs;
616 }
618 /* reissue request to backing file */
619 ret = parent->drv->td_queue_read(parent, sector, nr_secs,
620 page, send_responses,
621 req->id, (void *)(long)sidx);
622 if (ret > 0)
623 parent->early += ret;
625 return ((ret >= 0) ? 0 : ret);
626 }
628 static void get_io_request(struct td_state *s)
629 {
630 RING_IDX rp, rc, j, i;
631 blkif_request_t *req;
632 int idx, nsects, ret;
633 uint64_t sector_nr;
634 char *page;
635 int early = 0; /* count early completions */
636 struct disk_driver *dd = s->disks;
637 struct tap_disk *drv = dd->drv;
638 blkif_t *blkif = s->blkif;
639 tapdev_info_t *info = s->ring_info;
640 int page_size = getpagesize();
642 if (!run) return; /*We have received signal to close*/
644 rp = info->fe_ring.sring->req_prod;
645 rmb();
646 for (j = info->fe_ring.req_cons; j != rp; j++)
647 {
648 int done = 0, start_seg = 0;
650 req = NULL;
651 req = RING_GET_REQUEST(&info->fe_ring, j);
652 ++info->fe_ring.req_cons;
654 if (req == NULL) continue;
656 idx = req->id;
658 if (info->busy.req) {
659 /* continue where we left off last time */
660 ASSERT(info->busy.req == req);
661 start_seg = info->busy.seg_idx;
662 sector_nr = segment_start(req, start_seg);
663 info->busy.seg_idx = 0;
664 info->busy.req = NULL;
665 } else {
666 ASSERT(blkif->pending_list[idx].secs_pending == 0);
667 memcpy(&blkif->pending_list[idx].req,
668 req, sizeof(*req));
669 blkif->pending_list[idx].status = BLKIF_RSP_OKAY;
670 blkif->pending_list[idx].submitting = 1;
671 sector_nr = req->sector_number;
672 }
674 if ((dd->flags & TD_RDONLY) &&
675 (req->operation == BLKIF_OP_WRITE)) {
676 blkif->pending_list[idx].status = BLKIF_RSP_ERROR;
677 goto send_response;
678 }
680 for (i = start_seg; i < req->nr_segments; i++) {
681 nsects = req->seg[i].last_sect -
682 req->seg[i].first_sect + 1;
684 if ((req->seg[i].last_sect >= page_size >> 9) ||
685 (nsects <= 0))
686 continue;
688 page = (char *)MMAP_VADDR(info->vstart,
689 (unsigned long)req->id, i);
690 page += (req->seg[i].first_sect << SECTOR_SHIFT);
692 if (sector_nr >= s->size) {
693 DPRINTF("Sector request failed:\n");
694 DPRINTF("%s request, idx [%d,%d] size [%llu], "
695 "sector [%llu,%llu]\n",
696 (req->operation == BLKIF_OP_WRITE ?
697 "WRITE" : "READ"),
698 idx,i,
699 (long long unsigned)
700 nsects<<SECTOR_SHIFT,
701 (long long unsigned)
702 sector_nr<<SECTOR_SHIFT,
703 (long long unsigned) sector_nr);
704 continue;
705 }
707 blkif->pending_list[idx].secs_pending += nsects;
709 switch (req->operation)
710 {
711 case BLKIF_OP_WRITE:
712 ret = drv->td_queue_write(dd, sector_nr,
713 nsects, page,
714 send_responses,
715 idx, (void *)(long)i);
716 if (ret > 0) dd->early += ret;
717 else if (ret == -EBUSY) {
718 /* put req back on queue */
719 --info->fe_ring.req_cons;
720 info->busy.req = req;
721 info->busy.seg_idx = i;
722 goto out;
723 }
724 break;
725 case BLKIF_OP_READ:
726 ret = drv->td_queue_read(dd, sector_nr,
727 nsects, page,
728 send_responses,
729 idx, (void *)(long)i);
730 if (ret > 0) dd->early += ret;
731 else if (ret == -EBUSY) {
732 /* put req back on queue */
733 --info->fe_ring.req_cons;
734 info->busy.req = req;
735 info->busy.seg_idx = i;
736 goto out;
737 }
738 break;
739 default:
740 DPRINTF("Unknown block operation\n");
741 break;
742 }
743 sector_nr += nsects;
744 }
745 send_response:
746 blkif->pending_list[idx].submitting = 0;
747 /* force write_rsp_to_ring for synchronous case */
748 if (blkif->pending_list[idx].secs_pending == 0)
749 dd->early += send_responses(dd, 0, 0, 0, idx,
750 (void *)(long)0);
751 }
753 out:
754 /*Batch done*/
755 td_for_each_disk(s, dd) {
756 dd->early += dd->drv->td_submit(dd);
757 if (dd->early > 0) {
758 io_done(dd, MAX_IOFD + 1);
759 dd->early = 0;
760 }
761 }
763 return;
764 }
766 int main(int argc, char *argv[])
767 {
768 int len, msglen, ret;
769 char *p, *buf;
770 fd_set readfds, writefds;
771 fd_list_entry_t *ptr;
772 struct td_state *s;
773 char openlogbuf[128];
775 if (argc != 3) usage();
777 daemonize();
779 snprintf(openlogbuf, sizeof(openlogbuf), "TAPDISK[%d]", getpid());
780 openlog(openlogbuf, LOG_CONS|LOG_ODELAY, LOG_DAEMON);
781 /*Setup signal handlers*/
782 signal (SIGBUS, sig_handler);
783 signal (SIGINT, sig_handler);
785 /*Open the control channel*/
786 fds[READ] = open(argv[1],O_RDWR|O_NONBLOCK);
787 fds[WRITE] = open(argv[2],O_RDWR|O_NONBLOCK);
789 if ( (fds[READ] < 0) || (fds[WRITE] < 0) )
790 {
791 DPRINTF("FD open failed [%d,%d]\n", fds[READ], fds[WRITE]);
792 exit(-1);
793 }
795 buf = calloc(MSG_SIZE, 1);
797 if (buf == NULL)
798 {
799 DPRINTF("ERROR: allocating memory.\n");
800 exit(-1);
801 }
803 while (run)
804 {
805 ret = 0;
806 FD_ZERO(&readfds);
807 FD_SET(fds[READ], &readfds);
808 maxfds = fds[READ];
810 /*Set all tap fds*/
811 LOCAL_FD_SET(&readfds);
813 /*Wait for incoming messages*/
814 ret = select(maxfds + 1, &readfds, (fd_set *) 0,
815 (fd_set *) 0, NULL);
817 if (ret > 0)
818 {
819 ptr = fd_start;
820 while (ptr != NULL) {
821 int progress_made = 0;
822 struct disk_driver *dd;
823 tapdev_info_t *info = ptr->s->ring_info;
825 td_for_each_disk(ptr->s, dd) {
826 if (dd->io_fd[READ] &&
827 FD_ISSET(dd->io_fd[READ],
828 &readfds)) {
829 io_done(dd, READ);
830 progress_made = 1;
831 }
832 }
834 /* completed io from above may have
835 * queued new requests on chained disks */
836 if (progress_made) {
837 td_for_each_disk(ptr->s, dd) {
838 dd->early +=
839 dd->drv->td_submit(dd);
840 if (dd->early > 0) {
841 io_done(dd,
842 MAX_IOFD + 1);
843 dd->early = 0;
844 }
845 }
846 }
848 if (FD_ISSET(ptr->tap_fd, &readfds) ||
849 (info->busy.req && progress_made))
850 get_io_request(ptr->s);
852 ptr = ptr->next;
853 }
855 if (FD_ISSET(fds[READ], &readfds))
856 read_msg(buf);
857 }
858 }
859 free(buf);
860 close(fds[READ]);
861 close(fds[WRITE]);
863 ptr = fd_start;
864 while (ptr != NULL) {
865 s = ptr->s;
867 unmap_disk(s);
868 free(s->blkif);
869 free(s->ring_info);
870 free(s);
871 close(ptr->tap_fd);
872 ptr = ptr->next;
873 }
874 closelog();
876 return 0;
877 }