ia64/xen-unstable

view extras/mini-os/fs-front.c @ 18114:ed5481094c10

mini-os: fix FS frontend index reading

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 21 09:38:33 2008 +0100 (2008-07-21)
parents 25f125425f0f
children 6314450a73c7
line source
1 /******************************************************************************
2 * fs-front.c
3 *
4 * Frontend driver for FS split device driver.
5 *
6 * Copyright (c) 2007, Grzegorz Milos, <gm281@cam.ac.uk>.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
27 #undef NDEBUG
28 #include <os.h>
29 #include <list.h>
30 #include <xmalloc.h>
31 #include <xenbus.h>
32 #include <gnttab.h>
33 #include <events.h>
34 #include <xen/io/fsif.h>
35 #include <fs.h>
36 #include <sched.h>
38 #define preempt_disable()
39 #define preempt_enable()
40 #define cmpxchg(p,o,n) synch_cmpxchg(p,o,n)
43 #ifdef FS_DEBUG
44 #define DEBUG(_f, _a...) \
45 printk("MINI_OS(file=fs-front.c, line=%d) " _f "\n", __LINE__, ## _a)
46 #else
47 #define DEBUG(_f, _a...) ((void)0)
48 #endif
51 struct fs_request;
52 struct fs_import *fs_import;
54 /******************************************************************************/
55 /* RING REQUEST/RESPONSES HANDLING */
56 /******************************************************************************/
58 struct fs_request
59 {
60 void *page;
61 grant_ref_t gref;
62 struct thread *thread; /* Thread blocked on this request */
63 struct fsif_response shadow_rsp; /* Response copy writen by the
64 interrupt handler */
65 };
67 /* Ring operations:
68 * FSIF ring is used differently to Linux-like split devices. This stems from
69 * the fact that no I/O request queue is present. The use of some of the macros
70 * defined in ring.h is not allowed, in particular:
71 * RING_PUSH_REQUESTS_AND_CHECK_NOTIFY cannot be used.
72 *
73 * The protocol used for FSIF ring is described below:
74 *
75 * In order to reserve a request the frontend:
76 * a) saves current frontend_ring->req_prod_pvt into a local variable
77 * b) checks that there are free request using the local req_prod_pvt
78 * c) tries to reserve the request using cmpxchg on frontend_ring->req_prod_pvt
79 * if cmpxchg fails, it means that someone reserved the request, start from
80 * a)
81 *
82 * In order to commit a request to the shared ring:
83 * a) cmpxchg shared_ring->req_prod from local req_prod_pvt to req_prod_pvt+1
84 * Loop if unsuccessful.
85 * NOTE: Request should be commited to the shared ring as quickly as possible,
86 * because otherwise other threads might busy loop trying to commit next
87 * requests. It also follows that preemption should be disabled, if
88 * possible, for the duration of the request construction.
89 */
91 /* Number of free requests (for use on front side only). */
92 #define FS_RING_FREE_REQUESTS(_r, _req_prod_pvt) \
93 (RING_SIZE(_r) - (_req_prod_pvt - (_r)->rsp_cons))
97 static RING_IDX reserve_fsif_request(struct fs_import *import)
98 {
99 RING_IDX idx;
101 down(&import->reqs_sem);
102 preempt_disable();
103 again:
104 /* We will attempt to reserve slot idx */
105 idx = import->ring.req_prod_pvt;
106 ASSERT (FS_RING_FREE_REQUESTS(&import->ring, idx));
107 /* Attempt to reserve */
108 if(cmpxchg(&import->ring.req_prod_pvt, idx, idx+1) != idx)
109 goto again;
111 return idx;
112 }
114 static void commit_fsif_request(struct fs_import *import, RING_IDX idx)
115 {
116 while(cmpxchg(&import->ring.sring->req_prod, idx, idx+1) != idx)
117 {
118 printk("Failed to commit a request: req_prod=%d, idx=%d\n",
119 import->ring.sring->req_prod, idx);
120 }
121 preempt_enable();
123 /* NOTE: we cannot do anything clever about rsp_event, to hold off
124 * notifications, because we don't know if we are a single request (in which
125 * case we have to notify always), or a part of a larger request group
126 * (when, in some cases, notification isn't required) */
127 notify_remote_via_evtchn(import->local_port);
128 }
132 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
133 {
134 unsigned int old_id, new_id;
136 again:
137 old_id = freelist[0];
138 /* Note: temporal inconsistency, since freelist[0] can be changed by someone
139 * else, but we are a sole owner of freelist[id + 1], it's OK. */
140 freelist[id + 1] = old_id;
141 new_id = id;
142 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
143 {
144 printk("Cmpxchg on freelist add failed.\n");
145 goto again;
146 }
147 }
149 /* always call reserve_fsif_request(import) before this, to protect from
150 * depletion. */
151 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
152 {
153 unsigned int old_id, new_id;
155 again:
156 old_id = freelist[0];
157 new_id = freelist[old_id + 1];
158 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
159 {
160 printk("Cmpxchg on freelist remove failed.\n");
161 goto again;
162 }
164 return old_id;
165 }
167 /******************************************************************************/
168 /* END OF RING REQUEST/RESPONSES HANDLING */
169 /******************************************************************************/
173 /******************************************************************************/
174 /* INDIVIDUAL FILE OPERATIONS */
175 /******************************************************************************/
176 int fs_open(struct fs_import *import, char *file)
177 {
178 struct fs_request *fsr;
179 unsigned short priv_req_id;
180 RING_IDX back_req_id;
181 struct fsif_request *req;
182 int fd;
184 /* Prepare request for the backend */
185 back_req_id = reserve_fsif_request(import);
186 DEBUG("Backend request id=%d\n", back_req_id);
188 /* Prepare our private request structure */
189 priv_req_id = get_id_from_freelist(import->freelist);
190 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
191 fsr = &import->requests[priv_req_id];
192 DEBUG("gref id=%d\n", fsr->gref);
193 fsr->thread = current;
194 sprintf(fsr->page, "%s", file);
196 req = RING_GET_REQUEST(&import->ring, back_req_id);
197 req->type = REQ_FILE_OPEN;
198 req->id = priv_req_id;
199 req->u.fopen.gref = fsr->gref;
201 /* Set blocked flag before commiting the request, thus avoiding missed
202 * response race */
203 block(current);
204 commit_fsif_request(import, back_req_id);
205 schedule();
207 /* Read the response */
208 fd = (int)fsr->shadow_rsp.ret_val;
209 DEBUG("The following FD returned: %d\n", fd);
210 add_id_to_freelist(priv_req_id, import->freelist);
212 return fd;
213 }
215 int fs_close(struct fs_import *import, int fd)
216 {
217 struct fs_request *fsr;
218 unsigned short priv_req_id;
219 RING_IDX back_req_id;
220 struct fsif_request *req;
221 int ret;
223 /* Prepare request for the backend */
224 back_req_id = reserve_fsif_request(import);
225 DEBUG("Backend request id=%d\n", back_req_id);
227 /* Prepare our private request structure */
228 priv_req_id = get_id_from_freelist(import->freelist);
229 DEBUG("Request id for fs_close call is: %d\n", priv_req_id);
230 fsr = &import->requests[priv_req_id];
231 fsr->thread = current;
233 req = RING_GET_REQUEST(&import->ring, back_req_id);
234 req->type = REQ_FILE_CLOSE;
235 req->id = priv_req_id;
236 req->u.fclose.fd = fd;
238 /* Set blocked flag before commiting the request, thus avoiding missed
239 * response race */
240 block(current);
241 commit_fsif_request(import, back_req_id);
242 schedule();
244 /* Read the response */
245 ret = (int)fsr->shadow_rsp.ret_val;
246 DEBUG("Close returned: %d\n", ret);
247 add_id_to_freelist(priv_req_id, import->freelist);
249 return ret;
250 }
252 ssize_t fs_read(struct fs_import *import, int fd, void *buf,
253 ssize_t len, ssize_t offset)
254 {
255 struct fs_request *fsr;
256 unsigned short priv_req_id;
257 RING_IDX back_req_id;
258 struct fsif_request *req;
259 ssize_t ret;
261 BUG_ON(len > PAGE_SIZE);
263 /* Prepare request for the backend */
264 back_req_id = reserve_fsif_request(import);
265 DEBUG("Backend request id=%d\n", back_req_id);
267 /* Prepare our private request structure */
268 priv_req_id = get_id_from_freelist(import->freelist);
269 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
270 fsr = &import->requests[priv_req_id];
271 DEBUG("gref=%d\n", fsr->gref);
272 fsr->thread = current;
273 memset(fsr->page, 0, PAGE_SIZE);
275 req = RING_GET_REQUEST(&import->ring, back_req_id);
276 req->type = REQ_FILE_READ;
277 req->id = priv_req_id;
278 req->u.fread.fd = fd;
279 req->u.fread.gref = fsr->gref;
280 req->u.fread.len = len;
281 req->u.fread.offset = offset;
283 /* Set blocked flag before commiting the request, thus avoiding missed
284 * response race */
285 block(current);
286 commit_fsif_request(import, back_req_id);
287 schedule();
289 /* Read the response */
290 ret = (ssize_t)fsr->shadow_rsp.ret_val;
291 DEBUG("The following ret value returned %d\n", ret);
292 if(ret > 0)
293 memcpy(buf, fsr->page, ret);
294 add_id_to_freelist(priv_req_id, import->freelist);
296 return ret;
297 }
299 ssize_t fs_write(struct fs_import *import, int fd, void *buf,
300 ssize_t len, ssize_t offset)
301 {
302 struct fs_request *fsr;
303 unsigned short priv_req_id;
304 RING_IDX back_req_id;
305 struct fsif_request *req;
306 ssize_t ret;
308 BUG_ON(len > PAGE_SIZE);
310 /* Prepare request for the backend */
311 back_req_id = reserve_fsif_request(import);
312 DEBUG("Backend request id=%d\n", back_req_id);
314 /* Prepare our private request structure */
315 priv_req_id = get_id_from_freelist(import->freelist);
316 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
317 fsr = &import->requests[priv_req_id];
318 DEBUG("gref=%d\n", fsr->gref);
319 fsr->thread = current;
320 memcpy(fsr->page, buf, len);
321 BUG_ON(len > PAGE_SIZE);
322 memset((char *)fsr->page + len, 0, PAGE_SIZE - len);
324 req = RING_GET_REQUEST(&import->ring, back_req_id);
325 req->type = REQ_FILE_WRITE;
326 req->id = priv_req_id;
327 req->u.fwrite.fd = fd;
328 req->u.fwrite.gref = fsr->gref;
329 req->u.fwrite.len = len;
330 req->u.fwrite.offset = offset;
332 /* Set blocked flag before commiting the request, thus avoiding missed
333 * response race */
334 block(current);
335 commit_fsif_request(import, back_req_id);
336 schedule();
338 /* Read the response */
339 ret = (ssize_t)fsr->shadow_rsp.ret_val;
340 DEBUG("The following ret value returned %d\n", ret);
341 add_id_to_freelist(priv_req_id, import->freelist);
343 return ret;
344 }
346 int fs_stat(struct fs_import *import,
347 int fd,
348 struct fsif_stat_response *stat)
349 {
350 struct fs_request *fsr;
351 unsigned short priv_req_id;
352 RING_IDX back_req_id;
353 struct fsif_request *req;
354 int ret;
356 /* Prepare request for the backend */
357 back_req_id = reserve_fsif_request(import);
358 DEBUG("Backend request id=%d\n", back_req_id);
360 /* Prepare our private request structure */
361 priv_req_id = get_id_from_freelist(import->freelist);
362 DEBUG("Request id for fs_stat call is: %d\n", priv_req_id);
363 fsr = &import->requests[priv_req_id];
364 DEBUG("gref=%d\n", fsr->gref);
365 fsr->thread = current;
366 memset(fsr->page, 0, PAGE_SIZE);
368 req = RING_GET_REQUEST(&import->ring, back_req_id);
369 req->type = REQ_STAT;
370 req->id = priv_req_id;
371 req->u.fstat.fd = fd;
372 req->u.fstat.gref = fsr->gref;
374 /* Set blocked flag before commiting the request, thus avoiding missed
375 * response race */
376 block(current);
377 commit_fsif_request(import, back_req_id);
378 schedule();
380 /* Read the response */
381 ret = (int)fsr->shadow_rsp.ret_val;
382 DEBUG("Following ret from fstat: %d\n", ret);
383 memcpy(stat, fsr->page, sizeof(struct fsif_stat_response));
384 add_id_to_freelist(priv_req_id, import->freelist);
386 return ret;
387 }
389 int fs_truncate(struct fs_import *import,
390 int fd,
391 int64_t length)
392 {
393 struct fs_request *fsr;
394 unsigned short priv_req_id;
395 RING_IDX back_req_id;
396 struct fsif_request *req;
397 int ret;
399 /* Prepare request for the backend */
400 back_req_id = reserve_fsif_request(import);
401 DEBUG("Backend request id=%d\n", back_req_id);
403 /* Prepare our private request structure */
404 priv_req_id = get_id_from_freelist(import->freelist);
405 DEBUG("Request id for fs_truncate call is: %d\n", priv_req_id);
406 fsr = &import->requests[priv_req_id];
407 fsr->thread = current;
409 req = RING_GET_REQUEST(&import->ring, back_req_id);
410 req->type = REQ_FILE_TRUNCATE;
411 req->id = priv_req_id;
412 req->u.ftruncate.fd = fd;
413 req->u.ftruncate.length = length;
415 /* Set blocked flag before commiting the request, thus avoiding missed
416 * response race */
417 block(current);
418 commit_fsif_request(import, back_req_id);
419 schedule();
421 /* Read the response */
422 ret = (int)fsr->shadow_rsp.ret_val;
423 DEBUG("Following ret from ftruncate: %d\n", ret);
424 add_id_to_freelist(priv_req_id, import->freelist);
426 return ret;
427 }
429 int fs_remove(struct fs_import *import, char *file)
430 {
431 struct fs_request *fsr;
432 unsigned short priv_req_id;
433 RING_IDX back_req_id;
434 struct fsif_request *req;
435 int ret;
437 /* Prepare request for the backend */
438 back_req_id = reserve_fsif_request(import);
439 DEBUG("Backend request id=%d\n", back_req_id);
441 /* Prepare our private request structure */
442 priv_req_id = get_id_from_freelist(import->freelist);
443 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
444 fsr = &import->requests[priv_req_id];
445 DEBUG("gref=%d\n", fsr->gref);
446 fsr->thread = current;
447 sprintf(fsr->page, "%s", file);
449 req = RING_GET_REQUEST(&import->ring, back_req_id);
450 req->type = REQ_REMOVE;
451 req->id = priv_req_id;
452 req->u.fremove.gref = fsr->gref;
454 /* Set blocked flag before commiting the request, thus avoiding missed
455 * response race */
456 block(current);
457 commit_fsif_request(import, back_req_id);
458 schedule();
460 /* Read the response */
461 ret = (int)fsr->shadow_rsp.ret_val;
462 DEBUG("The following ret: %d\n", ret);
463 add_id_to_freelist(priv_req_id, import->freelist);
465 return ret;
466 }
469 int fs_rename(struct fs_import *import,
470 char *old_file_name,
471 char *new_file_name)
472 {
473 struct fs_request *fsr;
474 unsigned short priv_req_id;
475 RING_IDX back_req_id;
476 struct fsif_request *req;
477 int ret;
478 char old_header[] = "old: ";
479 char new_header[] = "new: ";
481 /* Prepare request for the backend */
482 back_req_id = reserve_fsif_request(import);
483 DEBUG("Backend request id=%d\n", back_req_id);
485 /* Prepare our private request structure */
486 priv_req_id = get_id_from_freelist(import->freelist);
487 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
488 fsr = &import->requests[priv_req_id];
489 DEBUG("gref=%d\n", fsr->gref);
490 fsr->thread = current;
491 sprintf(fsr->page, "%s%s%c%s%s",
492 old_header, old_file_name, '\0', new_header, new_file_name);
494 req = RING_GET_REQUEST(&import->ring, back_req_id);
495 req->type = REQ_RENAME;
496 req->id = priv_req_id;
497 req->u.frename.gref = fsr->gref;
498 req->u.frename.old_name_offset = strlen(old_header);
499 req->u.frename.new_name_offset = strlen(old_header) +
500 strlen(old_file_name) +
501 strlen(new_header) +
502 1 /* Accouning for the additional
503 end of string character */;
505 /* Set blocked flag before commiting the request, thus avoiding missed
506 * response race */
507 block(current);
508 commit_fsif_request(import, back_req_id);
509 schedule();
511 /* Read the response */
512 ret = (int)fsr->shadow_rsp.ret_val;
513 DEBUG("The following ret: %d\n", ret);
514 add_id_to_freelist(priv_req_id, import->freelist);
516 return ret;
517 }
519 int fs_create(struct fs_import *import, char *name,
520 int8_t directory, int32_t mode)
521 {
522 struct fs_request *fsr;
523 unsigned short priv_req_id;
524 RING_IDX back_req_id;
525 struct fsif_request *req;
526 int ret;
528 /* Prepare request for the backend */
529 back_req_id = reserve_fsif_request(import);
530 DEBUG("Backend request id=%d\n", back_req_id);
532 /* Prepare our private request structure */
533 priv_req_id = get_id_from_freelist(import->freelist);
534 DEBUG("Request id for fs_create call is: %d\n", priv_req_id);
535 fsr = &import->requests[priv_req_id];
536 DEBUG("gref=%d\n", fsr->gref);
537 fsr->thread = current;
538 sprintf(fsr->page, "%s", name);
540 req = RING_GET_REQUEST(&import->ring, back_req_id);
541 req->type = REQ_CREATE;
542 req->id = priv_req_id;
543 req->u.fcreate.gref = fsr->gref;
544 req->u.fcreate.directory = directory;
545 req->u.fcreate.mode = mode;
547 /* Set blocked flag before commiting the request, thus avoiding missed
548 * response race */
549 block(current);
550 commit_fsif_request(import, back_req_id);
551 schedule();
553 /* Read the response */
554 ret = (int)fsr->shadow_rsp.ret_val;
555 DEBUG("The following ret: %d\n", ret);
556 add_id_to_freelist(priv_req_id, import->freelist);
558 return ret;
559 }
561 char** fs_list(struct fs_import *import, char *name,
562 int32_t offset, int32_t *nr_files, int *has_more)
563 {
564 struct fs_request *fsr;
565 unsigned short priv_req_id;
566 RING_IDX back_req_id;
567 struct fsif_request *req;
568 char **files, *current_file;
569 int i;
571 DEBUG("Different masks: NR_FILES=(%llx, %d), ERROR=(%llx, %d), HAS_MORE(%llx, %d)\n",
572 NR_FILES_MASK, NR_FILES_SHIFT, ERROR_MASK, ERROR_SHIFT, HAS_MORE_FLAG, HAS_MORE_SHIFT);
574 /* Prepare request for the backend */
575 back_req_id = reserve_fsif_request(import);
576 DEBUG("Backend request id=%d\n", back_req_id);
578 /* Prepare our private request structure */
579 priv_req_id = get_id_from_freelist(import->freelist);
580 DEBUG("Request id for fs_list call is: %d\n", priv_req_id);
581 fsr = &import->requests[priv_req_id];
582 DEBUG("gref=%d\n", fsr->gref);
583 fsr->thread = current;
584 sprintf(fsr->page, "%s", name);
586 req = RING_GET_REQUEST(&import->ring, back_req_id);
587 req->type = REQ_DIR_LIST;
588 req->id = priv_req_id;
589 req->u.flist.gref = fsr->gref;
590 req->u.flist.offset = offset;
592 /* Set blocked flag before commiting the request, thus avoiding missed
593 * response race */
594 block(current);
595 commit_fsif_request(import, back_req_id);
596 schedule();
598 /* Read the response */
599 *nr_files = (fsr->shadow_rsp.ret_val & NR_FILES_MASK) >> NR_FILES_SHIFT;
600 files = NULL;
601 if(*nr_files <= 0) goto exit;
602 files = malloc(sizeof(char*) * (*nr_files));
603 current_file = fsr->page;
604 for(i=0; i<*nr_files; i++)
605 {
606 files[i] = strdup(current_file);
607 current_file += strlen(current_file) + 1;
608 }
609 if(has_more != NULL)
610 *has_more = fsr->shadow_rsp.ret_val & HAS_MORE_FLAG;
611 add_id_to_freelist(priv_req_id, import->freelist);
612 exit:
613 return files;
614 }
616 int fs_chmod(struct fs_import *import, int fd, int32_t mode)
617 {
618 struct fs_request *fsr;
619 unsigned short priv_req_id;
620 RING_IDX back_req_id;
621 struct fsif_request *req;
622 int ret;
624 /* Prepare request for the backend */
625 back_req_id = reserve_fsif_request(import);
626 DEBUG("Backend request id=%d\n", back_req_id);
628 /* Prepare our private request structure */
629 priv_req_id = get_id_from_freelist(import->freelist);
630 DEBUG("Request id for fs_chmod call is: %d\n", priv_req_id);
631 fsr = &import->requests[priv_req_id];
632 fsr->thread = current;
634 req = RING_GET_REQUEST(&import->ring, back_req_id);
635 req->type = REQ_CHMOD;
636 req->id = priv_req_id;
637 req->u.fchmod.fd = fd;
638 req->u.fchmod.mode = mode;
640 /* Set blocked flag before commiting the request, thus avoiding missed
641 * response race */
642 block(current);
643 commit_fsif_request(import, back_req_id);
644 schedule();
646 /* Read the response */
647 ret = (int)fsr->shadow_rsp.ret_val;
648 DEBUG("The following returned: %d\n", ret);
649 add_id_to_freelist(priv_req_id, import->freelist);
651 return ret;
652 }
654 int64_t fs_space(struct fs_import *import, char *location)
655 {
656 struct fs_request *fsr;
657 unsigned short priv_req_id;
658 RING_IDX back_req_id;
659 struct fsif_request *req;
660 int64_t ret;
662 /* Prepare request for the backend */
663 back_req_id = reserve_fsif_request(import);
664 DEBUG("Backend request id=%d\n", back_req_id);
666 /* Prepare our private request structure */
667 priv_req_id = get_id_from_freelist(import->freelist);
668 DEBUG("Request id for fs_space is: %d\n", priv_req_id);
669 fsr = &import->requests[priv_req_id];
670 DEBUG("gref=%d\n", fsr->gref);
671 fsr->thread = current;
672 sprintf(fsr->page, "%s", location);
674 req = RING_GET_REQUEST(&import->ring, back_req_id);
675 req->type = REQ_FS_SPACE;
676 req->id = priv_req_id;
677 req->u.fspace.gref = fsr->gref;
679 /* Set blocked flag before commiting the request, thus avoiding missed
680 * response race */
681 block(current);
682 commit_fsif_request(import, back_req_id);
683 schedule();
685 /* Read the response */
686 ret = (int64_t)fsr->shadow_rsp.ret_val;
687 DEBUG("The following returned: %lld\n", ret);
688 add_id_to_freelist(priv_req_id, import->freelist);
690 return ret;
691 }
693 int fs_sync(struct fs_import *import, int fd)
694 {
695 struct fs_request *fsr;
696 unsigned short priv_req_id;
697 RING_IDX back_req_id;
698 struct fsif_request *req;
699 int ret;
701 /* Prepare request for the backend */
702 back_req_id = reserve_fsif_request(import);
703 DEBUG("Backend request id=%d\n", back_req_id);
705 /* Prepare our private request structure */
706 priv_req_id = get_id_from_freelist(import->freelist);
707 DEBUG("Request id for fs_sync call is: %d\n", priv_req_id);
708 fsr = &import->requests[priv_req_id];
709 fsr->thread = current;
711 req = RING_GET_REQUEST(&import->ring, back_req_id);
712 req->type = REQ_FILE_SYNC;
713 req->id = priv_req_id;
714 req->u.fsync.fd = fd;
716 /* Set blocked flag before commiting the request, thus avoiding missed
717 * response race */
718 block(current);
719 commit_fsif_request(import, back_req_id);
720 schedule();
722 /* Read the response */
723 ret = (int)fsr->shadow_rsp.ret_val;
724 DEBUG("Close returned: %d\n", ret);
725 add_id_to_freelist(priv_req_id, import->freelist);
727 return ret;
728 }
731 /******************************************************************************/
732 /* END OF INDIVIDUAL FILE OPERATIONS */
733 /******************************************************************************/
736 static void fsfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
737 {
738 struct fs_import *import = (struct fs_import*)data;
739 static int in_irq = 0;
740 RING_IDX cons, rp;
741 int more;
743 /* Check for non-reentrance */
744 BUG_ON(in_irq);
745 in_irq = 1;
747 DEBUG("Event from import [%d:%d].\n", import->dom_id, import->export_id);
748 moretodo:
749 rp = import->ring.sring->rsp_prod;
750 rmb(); /* Ensure we see queued responses up to 'rp'. */
751 cons = import->ring.rsp_cons;
752 while (cons != rp)
753 {
754 struct fsif_response *rsp;
755 struct fs_request *req;
757 rsp = RING_GET_RESPONSE(&import->ring, cons);
758 DEBUG("Response at idx=%d to request id=%d, ret_val=%lx\n",
759 cons, rsp->id, rsp->ret_val);
760 req = &import->requests[rsp->id];
761 memcpy(&req->shadow_rsp, rsp, sizeof(struct fsif_response));
762 DEBUG("Waking up: %s\n", req->thread->name);
763 wake(req->thread);
765 cons++;
766 up(&import->reqs_sem);
767 }
769 import->ring.rsp_cons = rp;
770 RING_FINAL_CHECK_FOR_RESPONSES(&import->ring, more);
771 if(more) goto moretodo;
773 in_irq = 0;
774 }
776 /* Small utility function to figure out our domain id */
777 static domid_t get_self_id(void)
778 {
779 char *dom_id;
780 domid_t ret;
782 BUG_ON(xenbus_read(XBT_NIL, "domid", &dom_id));
783 sscanf(dom_id, "%d", &ret);
785 return ret;
786 }
788 static void alloc_request_table(struct fs_import *import)
789 {
790 struct fs_request *requests;
791 int i;
793 BUG_ON(import->nr_entries <= 0);
794 printk("Allocating request array for import %d, nr_entries = %d.\n",
795 import->import_id, import->nr_entries);
796 requests = xmalloc_array(struct fs_request, import->nr_entries);
797 import->freelist = xmalloc_array(unsigned short, import->nr_entries + 1);
798 memset(import->freelist, 0, sizeof(unsigned short) * (import->nr_entries + 1));
799 for(i=0; i<import->nr_entries; i++)
800 {
801 /* TODO: that's a lot of memory */
802 requests[i].page = (void *)alloc_page();
803 requests[i].gref = gnttab_grant_access(import->dom_id,
804 virt_to_mfn(requests[i].page),
805 0);
806 //printk(" ===>> Page=%lx, gref=%d, mfn=%lx\n", requests[i].page, requests[i].gref, virt_to_mfn(requests[i].page));
807 add_id_to_freelist(i, import->freelist);
808 }
809 import->requests = requests;
810 }
813 /******************************************************************************/
814 /* FS TESTS */
815 /******************************************************************************/
818 void test_fs_import(void *data)
819 {
820 struct fs_import *import = (struct fs_import *)data;
821 int ret, fd, i;
822 int32_t nr_files;
823 char buffer[1024];
824 ssize_t offset;
825 char **files;
826 long ret64;
828 /* Sleep for 1s and then try to open a file */
829 msleep(1000);
830 ret = fs_create(import, "mini-os-created-directory", 1, 0777);
831 printk("Directory create: %d\n", ret);
833 ret = fs_create(import, "mini-os-created-directory/mini-os-created-file", 0, 0666);
834 printk("File create: %d\n", ret);
836 fd = fs_open(import, "mini-os-created-directory/mini-os-created-file");
837 printk("File descriptor: %d\n", fd);
838 if(fd < 0) return;
840 offset = 0;
841 for(i=0; i<10; i++)
842 {
843 sprintf(buffer, "Current time is: %lld\n", NOW());
844 ret = fs_write(import, fd, buffer, strlen(buffer), offset);
845 printk("Writen current time (%d)\n", ret);
846 if(ret < 0)
847 return;
848 offset += ret;
849 }
851 ret = fs_close(import, fd);
852 printk("Closed fd: %d, ret=%d\n", fd, ret);
854 printk("Listing files in /\n");
855 files = fs_list(import, "/", 0, &nr_files, NULL);
856 for(i=0; i<nr_files; i++)
857 printk(" files[%d] = %s\n", i, files[i]);
859 ret64 = fs_space(import, "/");
860 printk("Free space: %lld (=%lld Mb)\n", ret64, (ret64 >> 20));
862 }
864 #if 0
865 // char *content = (char *)alloc_page();
866 int fd, ret;
867 // int read;
868 char write_string[] = "\"test data written from minios\"";
869 struct fsif_stat_response stat;
870 char **files;
871 int32_t nr_files, i;
872 int64_t ret64;
875 fd = fs_open(import, "test-export-file");
876 // read = fs_read(import, fd, content, PAGE_SIZE, 0);
877 // printk("Read: %d bytes\n", read);
878 // content[read] = '\0';
879 // printk("Value: %s\n", content);
880 ret = fs_write(import, fd, write_string, strlen(write_string), 0);
881 printk("Ret after write: %d\n", ret);
882 ret = fs_stat(import, fd, &stat);
883 printk("Ret after stat: %d\n", ret);
884 printk(" st_mode=%o\n", stat.stat_mode);
885 printk(" st_uid =%d\n", stat.stat_uid);
886 printk(" st_gid =%d\n", stat.stat_gid);
887 printk(" st_size=%ld\n", stat.stat_size);
888 printk(" st_atime=%ld\n", stat.stat_atime);
889 printk(" st_mtime=%ld\n", stat.stat_mtime);
890 printk(" st_ctime=%ld\n", stat.stat_ctime);
891 ret = fs_truncate(import, fd, 30);
892 printk("Ret after truncate: %d\n", ret);
893 ret = fs_remove(import, "test-to-remove/test-file");
894 printk("Ret after remove: %d\n", ret);
895 ret = fs_remove(import, "test-to-remove");
896 printk("Ret after remove: %d\n", ret);
897 ret = fs_chmod(import, fd, 0700);
898 printk("Ret after chmod: %d\n", ret);
899 ret = fs_sync(import, fd);
900 printk("Ret after sync: %d\n", ret);
901 ret = fs_close(import, fd);
902 //ret = fs_rename(import, "test-export-file", "renamed-test-export-file");
903 //printk("Ret after rename: %d\n", ret);
904 ret = fs_create(import, "created-dir", 1, 0777);
905 printk("Ret after dir create: %d\n", ret);
906 ret = fs_create(import, "created-dir/created-file", 0, 0777);
907 printk("Ret after file create: %d\n", ret);
908 files = fs_list(import, "/", 15, &nr_files, NULL);
909 for(i=0; i<nr_files; i++)
910 printk(" files[%d] = %s\n", i, files[i]);
911 ret64 = fs_space(import, "created-dir");
912 printk("Ret after space: %lld\n", ret64);
914 #endif
917 /******************************************************************************/
918 /* END OF FS TESTS */
919 /******************************************************************************/
921 static int init_fs_import(struct fs_import *import)
922 {
923 char *err;
924 xenbus_transaction_t xbt;
925 char nodename[1024], r_nodename[1024], token[128], *message = NULL;
926 struct fsif_sring *sring;
927 int retry = 0;
928 domid_t self_id;
929 xenbus_event_queue events = NULL;
931 printk("Initialising FS fortend to backend dom %d\n", import->dom_id);
932 /* Allocate page for the shared ring */
933 sring = (struct fsif_sring*) alloc_page();
934 memset(sring, 0, PAGE_SIZE);
936 /* Init the shared ring */
937 SHARED_RING_INIT(sring);
939 /* Init private frontend ring */
940 FRONT_RING_INIT(&import->ring, sring, PAGE_SIZE);
941 import->nr_entries = import->ring.nr_ents;
943 /* Allocate table of requests */
944 alloc_request_table(import);
945 init_SEMAPHORE(&import->reqs_sem, import->nr_entries);
947 /* Grant access to the shared ring */
948 import->gnt_ref = gnttab_grant_access(import->dom_id, virt_to_mfn(sring), 0);
950 /* Allocate event channel */
951 BUG_ON(evtchn_alloc_unbound(import->dom_id,
952 fsfront_handler,
953 //ANY_CPU,
954 import,
955 &import->local_port));
956 unmask_evtchn(import->local_port);
959 self_id = get_self_id();
960 /* Write the frontend info to a node in our Xenbus */
961 sprintf(nodename, "/local/domain/%d/device/vfs/%d",
962 self_id, import->import_id);
964 again:
965 err = xenbus_transaction_start(&xbt);
966 if (err) {
967 printk("starting transaction\n");
968 }
970 err = xenbus_printf(xbt,
971 nodename,
972 "ring-ref",
973 "%u",
974 import->gnt_ref);
975 if (err) {
976 message = "writing ring-ref";
977 goto abort_transaction;
978 }
980 err = xenbus_printf(xbt,
981 nodename,
982 "event-channel",
983 "%u",
984 import->local_port);
985 if (err) {
986 message = "writing event-channel";
987 goto abort_transaction;
988 }
990 err = xenbus_printf(xbt, nodename, "state", STATE_READY, 0xdeadbeef);
993 err = xenbus_transaction_end(xbt, 0, &retry);
994 if (retry) {
995 goto again;
996 printk("completing transaction\n");
997 }
999 /* Now, when our node is prepared we write request in the exporting domain
1000 * */
1001 printk("Our own id is %d\n", self_id);
1002 sprintf(r_nodename,
1003 "/local/domain/%d/backend/vfs/exports/requests/%d/%d/frontend",
1004 import->dom_id, self_id, import->export_id);
1005 BUG_ON(xenbus_write(XBT_NIL, r_nodename, nodename));
1007 goto done;
1009 abort_transaction:
1010 xenbus_transaction_end(xbt, 1, &retry);
1012 done:
1014 #define WAIT_PERIOD 10 /* Wait period in ms */
1015 #define MAX_WAIT 10 /* Max number of WAIT_PERIODs */
1016 import->backend = NULL;
1017 sprintf(r_nodename, "%s/backend", nodename);
1019 for(retry = MAX_WAIT; retry > 0; retry--)
1021 xenbus_read(XBT_NIL, r_nodename, &import->backend);
1022 if(import->backend)
1024 printk("Backend found at %s\n", import->backend);
1025 break;
1027 msleep(WAIT_PERIOD);
1030 if(!import->backend)
1032 printk("No backend available.\n");
1033 /* TODO - cleanup datastructures/xenbus */
1034 return 0;
1036 sprintf(r_nodename, "%s/state", import->backend);
1037 sprintf(token, "fs-front-%d", import->import_id);
1038 /* The token will not be unique if multiple imports are inited */
1039 xenbus_watch_path_token(XBT_NIL, r_nodename, r_nodename, &events);
1040 xenbus_wait_for_value(r_nodename, STATE_READY, &events);
1041 xenbus_unwatch_path(XBT_NIL, r_nodename);
1042 printk("Backend ready.\n");
1044 //create_thread("fs-tester", test_fs_import, import);
1046 return 1;
1049 static void add_export(struct list_head *exports, unsigned int domid)
1051 char node[1024], **exports_list = NULL, *ret_msg;
1052 int j = 0;
1053 static int import_id = 0;
1055 sprintf(node, "/local/domain/%d/backend/vfs/exports", domid);
1056 ret_msg = xenbus_ls(XBT_NIL, node, &exports_list);
1057 if (ret_msg && strcmp(ret_msg, "ENOENT"))
1058 printk("couldn't read %s: %s\n", node, ret_msg);
1059 while(exports_list && exports_list[j])
1061 struct fs_import *import;
1062 int export_id = -1;
1064 sscanf(exports_list[j], "%d", &export_id);
1065 if(export_id >= 0)
1067 import = xmalloc(struct fs_import);
1068 import->dom_id = domid;
1069 import->export_id = export_id;
1070 import->import_id = import_id++;
1071 INIT_LIST_HEAD(&import->list);
1072 list_add(&import->list, exports);
1074 free(exports_list[j]);
1075 j++;
1077 if(exports_list)
1078 free(exports_list);
1079 if(ret_msg)
1080 free(ret_msg);
1083 #if 0
1084 static struct list_head* probe_exports(void)
1086 struct list_head *exports;
1087 char **node_list = NULL, *msg = NULL;
1088 int i = 0;
1090 exports = xmalloc(struct list_head);
1091 INIT_LIST_HEAD(exports);
1093 msg = xenbus_ls(XBT_NIL, "/local/domain", &node_list);
1094 if(msg)
1096 printk("Could not list VFS exports (%s).\n", msg);
1097 goto exit;
1100 while(node_list[i])
1102 add_export(exports, atoi(node_list[i]));
1103 free(node_list[i]);
1104 i++;
1107 exit:
1108 if(msg)
1109 free(msg);
1110 if(node_list)
1111 free(node_list);
1112 return exports;
1114 #endif
1116 LIST_HEAD(exports);
1118 void init_fs_frontend(void)
1120 struct list_head *entry;
1121 struct fs_import *import = NULL;
1122 printk("Initing FS fronend(s).\n");
1124 //exports = probe_exports();
1125 add_export(&exports, 0);
1126 list_for_each(entry, &exports)
1128 import = list_entry(entry, struct fs_import, list);
1129 printk("FS export [dom=%d, id=%d] found\n",
1130 import->dom_id, import->export_id);
1131 init_fs_import(import);
1134 fs_import = import;
1136 if (!fs_import)
1137 printk("No FS import\n");
1140 /* TODO: shutdown */