ia64/xen-unstable

view extras/mini-os/fs-front.c @ 18811:390ef36eb596

Remove Xen-private definitions from kexec public header.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Nov 19 13:13:39 2008 +0000 (2008-11-19)
parents 3ab0e76fb8e2
children 86db039882ea
line source
1 /******************************************************************************
2 * fs-front.c
3 *
4 * Frontend driver for FS split device driver.
5 *
6 * Copyright (c) 2007, Grzegorz Milos, <gm281@cam.ac.uk>.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
27 #undef NDEBUG
28 #include <os.h>
29 #include <list.h>
30 #include <xmalloc.h>
31 #include <xenbus.h>
32 #include <gnttab.h>
33 #include <events.h>
34 #include <xen/io/fsif.h>
35 #include <fs.h>
36 #include <sched.h>
38 #define preempt_disable()
39 #define preempt_enable()
40 #define cmpxchg(p,o,n) synch_cmpxchg(p,o,n)
43 #ifdef FS_DEBUG
44 #define DEBUG(_f, _a...) \
45 printk("MINI_OS(file=fs-front.c, line=%d) " _f "\n", __LINE__, ## _a)
46 #else
47 #define DEBUG(_f, _a...) ((void)0)
48 #endif
51 struct fs_request;
52 struct fs_import *fs_import;
53 void *alloc_buffer_page(struct fs_request *req, domid_t domid, grant_ref_t *gref);
54 void free_buffer_page(struct fs_request *req);
56 /******************************************************************************/
57 /* RING REQUEST/RESPONSES HANDLING */
58 /******************************************************************************/
60 struct fs_request
61 {
62 void *private1; /* Specific to request type */
63 void *private2;
64 struct thread *thread; /* Thread blocked on this request */
65 struct fsif_response shadow_rsp; /* Response copy writen by the
66 interrupt handler */
67 };
69 struct fs_rw_gnts
70 {
71 /* TODO 16 bit? */
72 int count;
73 grant_ref_t grefs[FSIF_NR_READ_GNTS];
74 void *pages[FSIF_NR_READ_GNTS];
75 };
77 /* Ring operations:
78 * FSIF ring is used differently to Linux-like split devices. This stems from
79 * the fact that no I/O request queue is present. The use of some of the macros
80 * defined in ring.h is not allowed, in particular:
81 * RING_PUSH_REQUESTS_AND_CHECK_NOTIFY cannot be used.
82 *
83 * The protocol used for FSIF ring is described below:
84 *
85 * In order to reserve a request the frontend:
86 * a) saves current frontend_ring->req_prod_pvt into a local variable
87 * b) checks that there are free request using the local req_prod_pvt
88 * c) tries to reserve the request using cmpxchg on frontend_ring->req_prod_pvt
89 * if cmpxchg fails, it means that someone reserved the request, start from
90 * a)
91 *
92 * In order to commit a request to the shared ring:
93 * a) cmpxchg shared_ring->req_prod from local req_prod_pvt to req_prod_pvt+1
94 * Loop if unsuccessful.
95 * NOTE: Request should be commited to the shared ring as quickly as possible,
96 * because otherwise other threads might busy loop trying to commit next
97 * requests. It also follows that preemption should be disabled, if
98 * possible, for the duration of the request construction.
99 */
101 /* Number of free requests (for use on front side only). */
102 #define FS_RING_FREE_REQUESTS(_r, _req_prod_pvt) \
103 (RING_SIZE(_r) - (_req_prod_pvt - (_r)->rsp_cons))
107 static RING_IDX reserve_fsif_request(struct fs_import *import)
108 {
109 RING_IDX idx;
111 down(&import->reqs_sem);
112 preempt_disable();
113 again:
114 /* We will attempt to reserve slot idx */
115 idx = import->ring.req_prod_pvt;
116 ASSERT (FS_RING_FREE_REQUESTS(&import->ring, idx));
117 /* Attempt to reserve */
118 if(cmpxchg(&import->ring.req_prod_pvt, idx, idx+1) != idx)
119 goto again;
121 return idx;
122 }
124 static void commit_fsif_request(struct fs_import *import, RING_IDX idx)
125 {
126 while(cmpxchg(&import->ring.sring->req_prod, idx, idx+1) != idx)
127 {
128 printk("Failed to commit a request: req_prod=%d, idx=%d\n",
129 import->ring.sring->req_prod, idx);
130 }
131 preempt_enable();
133 /* NOTE: we cannot do anything clever about rsp_event, to hold off
134 * notifications, because we don't know if we are a single request (in which
135 * case we have to notify always), or a part of a larger request group
136 * (when, in some cases, notification isn't required) */
137 notify_remote_via_evtchn(import->local_port);
138 }
142 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
143 {
144 unsigned int old_id, new_id;
146 again:
147 old_id = freelist[0];
148 /* Note: temporal inconsistency, since freelist[0] can be changed by someone
149 * else, but we are a sole owner of freelist[id + 1], it's OK. */
150 freelist[id + 1] = old_id;
151 new_id = id;
152 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
153 {
154 printk("Cmpxchg on freelist add failed.\n");
155 goto again;
156 }
157 }
159 /* always call reserve_fsif_request(import) before this, to protect from
160 * depletion. */
161 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
162 {
163 unsigned int old_id, new_id;
165 again:
166 old_id = freelist[0];
167 new_id = freelist[old_id + 1];
168 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
169 {
170 printk("Cmpxchg on freelist remove failed.\n");
171 goto again;
172 }
174 return old_id;
175 }
177 /******************************************************************************/
178 /* END OF RING REQUEST/RESPONSES HANDLING */
179 /******************************************************************************/
183 /******************************************************************************/
184 /* INDIVIDUAL FILE OPERATIONS */
185 /******************************************************************************/
186 int fs_open(struct fs_import *import, char *file)
187 {
188 struct fs_request *fsr;
189 unsigned short priv_req_id;
190 grant_ref_t gref;
191 void *buffer;
192 RING_IDX back_req_id;
193 struct fsif_request *req;
194 int fd;
196 /* Prepare request for the backend */
197 back_req_id = reserve_fsif_request(import);
198 DEBUG("Backend request id=%d\n", back_req_id);
200 /* Prepare our private request structure */
201 priv_req_id = get_id_from_freelist(import->freelist);
202 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
203 fsr = &import->requests[priv_req_id];
204 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
205 DEBUG("gref id=%d\n", gref);
206 fsr->thread = current;
207 sprintf(buffer, "%s", file);
209 req = RING_GET_REQUEST(&import->ring, back_req_id);
210 req->type = REQ_FILE_OPEN;
211 req->id = priv_req_id;
212 req->u.fopen.gref = gref;
214 /* Set blocked flag before commiting the request, thus avoiding missed
215 * response race */
216 block(current);
217 commit_fsif_request(import, back_req_id);
218 schedule();
220 /* Read the response */
221 fd = (int)fsr->shadow_rsp.ret_val;
222 DEBUG("The following FD returned: %d\n", fd);
223 free_buffer_page(fsr);
224 add_id_to_freelist(priv_req_id, import->freelist);
226 return fd;
227 }
229 int fs_close(struct fs_import *import, int fd)
230 {
231 struct fs_request *fsr;
232 unsigned short priv_req_id;
233 RING_IDX back_req_id;
234 struct fsif_request *req;
235 int ret;
237 /* Prepare request for the backend */
238 back_req_id = reserve_fsif_request(import);
239 DEBUG("Backend request id=%d\n", back_req_id);
241 /* Prepare our private request structure */
242 priv_req_id = get_id_from_freelist(import->freelist);
243 DEBUG("Request id for fs_close call is: %d\n", priv_req_id);
244 fsr = &import->requests[priv_req_id];
245 fsr->thread = current;
247 req = RING_GET_REQUEST(&import->ring, back_req_id);
248 req->type = REQ_FILE_CLOSE;
249 req->id = priv_req_id;
250 req->u.fclose.fd = fd;
252 /* Set blocked flag before commiting the request, thus avoiding missed
253 * response race */
254 block(current);
255 commit_fsif_request(import, back_req_id);
256 schedule();
258 /* Read the response */
259 ret = (int)fsr->shadow_rsp.ret_val;
260 DEBUG("Close returned: %d\n", ret);
261 add_id_to_freelist(priv_req_id, import->freelist);
263 return ret;
264 }
266 ssize_t fs_read(struct fs_import *import, int fd, void *buf,
267 ssize_t len, ssize_t offset)
268 {
269 struct fs_request *fsr;
270 unsigned short priv_req_id;
271 struct fs_rw_gnts gnts;
272 RING_IDX back_req_id;
273 struct fsif_request *req;
274 ssize_t ret;
275 int i;
277 BUG_ON(len > PAGE_SIZE * FSIF_NR_READ_GNTS);
279 /* Prepare request for the backend */
280 back_req_id = reserve_fsif_request(import);
281 DEBUG("Backend request id=%d\n", back_req_id);
283 /* Prepare our private request structure */
284 priv_req_id = get_id_from_freelist(import->freelist);
285 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
286 fsr = &import->requests[priv_req_id];
288 req = RING_GET_REQUEST(&import->ring, back_req_id);
289 req->type = REQ_FILE_READ;
290 req->id = priv_req_id;
291 req->u.fread.fd = fd;
292 req->u.fread.len = len;
293 req->u.fread.offset = offset;
296 ASSERT(len > 0);
297 gnts.count = ((len - 1) / PAGE_SIZE) + 1;
298 for(i=0; i<gnts.count; i++)
299 {
300 gnts.pages[i] = (void *)alloc_page();
301 gnts.grefs[i] = gnttab_grant_access(import->dom_id,
302 virt_to_mfn(gnts.pages[i]),
303 0);
304 memset(gnts.pages[i], 0, PAGE_SIZE);
305 req->u.fread.grefs[i] = gnts.grefs[i];
306 }
307 fsr->thread = current;
309 /* Set blocked flag before commiting the request, thus avoiding missed
310 * response race */
311 block(current);
312 commit_fsif_request(import, back_req_id);
313 schedule();
315 /* Read the response */
316 ret = (ssize_t)fsr->shadow_rsp.ret_val;
317 DEBUG("The following ret value returned %d\n", ret);
318 if(ret > 0)
319 {
320 ssize_t to_copy = ret, current_copy;
321 for(i=0; i<gnts.count; i++)
322 {
323 gnttab_end_access(gnts.grefs[i]);
324 current_copy = to_copy > PAGE_SIZE ? PAGE_SIZE : to_copy;
325 if(current_copy > 0)
326 memcpy(buf, gnts.pages[i], current_copy);
327 to_copy -= current_copy;
328 buf = (char*) buf + current_copy;
329 free_page(gnts.pages[i]);
330 }
331 }
332 add_id_to_freelist(priv_req_id, import->freelist);
334 return ret;
335 }
337 ssize_t fs_write(struct fs_import *import, int fd, void *buf,
338 ssize_t len, ssize_t offset)
339 {
340 struct fs_request *fsr;
341 unsigned short priv_req_id;
342 struct fs_rw_gnts gnts;
343 RING_IDX back_req_id;
344 struct fsif_request *req;
345 ssize_t ret, to_copy;
346 int i;
348 BUG_ON(len > PAGE_SIZE * FSIF_NR_WRITE_GNTS);
350 /* Prepare request for the backend */
351 back_req_id = reserve_fsif_request(import);
352 DEBUG("Backend request id=%d\n", back_req_id);
354 /* Prepare our private request structure */
355 priv_req_id = get_id_from_freelist(import->freelist);
356 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
357 fsr = &import->requests[priv_req_id];
359 req = RING_GET_REQUEST(&import->ring, back_req_id);
360 req->type = REQ_FILE_WRITE;
361 req->id = priv_req_id;
362 req->u.fwrite.fd = fd;
363 req->u.fwrite.len = len;
364 req->u.fwrite.offset = offset;
366 ASSERT(len > 0);
367 gnts.count = ((len - 1) / PAGE_SIZE) + 1;
368 to_copy = len;
369 for(i=0; i<gnts.count; i++)
370 {
371 int current_copy = (to_copy > PAGE_SIZE ? PAGE_SIZE : to_copy);
372 gnts.pages[i] = (void *)alloc_page();
373 gnts.grefs[i] = gnttab_grant_access(import->dom_id,
374 virt_to_mfn(gnts.pages[i]),
375 0);
376 memcpy(gnts.pages[i], buf, current_copy);
377 if(current_copy < PAGE_SIZE)
378 memset((char *)gnts.pages[i] + current_copy,
379 0,
380 PAGE_SIZE - current_copy);
381 req->u.fwrite.grefs[i] = gnts.grefs[i];
382 to_copy -= current_copy;
383 buf = (char*) buf + current_copy;
384 }
385 fsr->thread = current;
387 /* Set blocked flag before commiting the request, thus avoiding missed
388 * response race */
389 block(current);
390 commit_fsif_request(import, back_req_id);
391 schedule();
393 /* Read the response */
394 ret = (ssize_t)fsr->shadow_rsp.ret_val;
395 DEBUG("The following ret value returned %d\n", ret);
396 for(i=0; i<gnts.count; i++)
397 {
398 gnttab_end_access(gnts.grefs[i]);
399 free_page(gnts.pages[i]);
400 }
401 add_id_to_freelist(priv_req_id, import->freelist);
403 return ret;
404 }
406 int fs_stat(struct fs_import *import,
407 int fd,
408 struct fsif_stat_response *stat)
409 {
410 struct fs_request *fsr;
411 unsigned short priv_req_id;
412 RING_IDX back_req_id;
413 struct fsif_request *req;
414 int ret;
416 /* Prepare request for the backend */
417 back_req_id = reserve_fsif_request(import);
418 DEBUG("Backend request id=%d\n", back_req_id);
420 /* Prepare our private request structure */
421 priv_req_id = get_id_from_freelist(import->freelist);
422 DEBUG("Request id for fs_stat call is: %d\n", priv_req_id);
423 fsr = &import->requests[priv_req_id];
424 fsr->thread = current;
426 req = RING_GET_REQUEST(&import->ring, back_req_id);
427 req->type = REQ_STAT;
428 req->id = priv_req_id;
429 req->u.fstat.fd = fd;
431 /* Set blocked flag before commiting the request, thus avoiding missed
432 * response race */
433 block(current);
434 commit_fsif_request(import, back_req_id);
435 schedule();
437 /* Read the response */
438 ret = (int)fsr->shadow_rsp.ret_val;
439 DEBUG("Following ret from fstat: %d\n", ret);
440 memcpy(stat,
441 &fsr->shadow_rsp.fstat,
442 sizeof(struct fsif_stat_response));
443 add_id_to_freelist(priv_req_id, import->freelist);
445 return ret;
446 }
448 int fs_truncate(struct fs_import *import,
449 int fd,
450 int64_t length)
451 {
452 struct fs_request *fsr;
453 unsigned short priv_req_id;
454 RING_IDX back_req_id;
455 struct fsif_request *req;
456 int ret;
458 /* Prepare request for the backend */
459 back_req_id = reserve_fsif_request(import);
460 DEBUG("Backend request id=%d\n", back_req_id);
462 /* Prepare our private request structure */
463 priv_req_id = get_id_from_freelist(import->freelist);
464 DEBUG("Request id for fs_truncate call is: %d\n", priv_req_id);
465 fsr = &import->requests[priv_req_id];
466 fsr->thread = current;
468 req = RING_GET_REQUEST(&import->ring, back_req_id);
469 req->type = REQ_FILE_TRUNCATE;
470 req->id = priv_req_id;
471 req->u.ftruncate.fd = fd;
472 req->u.ftruncate.length = length;
474 /* Set blocked flag before commiting the request, thus avoiding missed
475 * response race */
476 block(current);
477 commit_fsif_request(import, back_req_id);
478 schedule();
480 /* Read the response */
481 ret = (int)fsr->shadow_rsp.ret_val;
482 DEBUG("Following ret from ftruncate: %d\n", ret);
483 add_id_to_freelist(priv_req_id, import->freelist);
485 return ret;
486 }
488 int fs_remove(struct fs_import *import, char *file)
489 {
490 struct fs_request *fsr;
491 unsigned short priv_req_id;
492 grant_ref_t gref;
493 void *buffer;
494 RING_IDX back_req_id;
495 struct fsif_request *req;
496 int ret;
498 /* Prepare request for the backend */
499 back_req_id = reserve_fsif_request(import);
500 DEBUG("Backend request id=%d\n", back_req_id);
502 /* Prepare our private request structure */
503 priv_req_id = get_id_from_freelist(import->freelist);
504 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
505 fsr = &import->requests[priv_req_id];
506 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
507 DEBUG("gref=%d\n", gref);
508 fsr->thread = current;
509 sprintf(buffer, "%s", file);
511 req = RING_GET_REQUEST(&import->ring, back_req_id);
512 req->type = REQ_REMOVE;
513 req->id = priv_req_id;
514 req->u.fremove.gref = gref;
516 /* Set blocked flag before commiting the request, thus avoiding missed
517 * response race */
518 block(current);
519 commit_fsif_request(import, back_req_id);
520 schedule();
522 /* Read the response */
523 ret = (int)fsr->shadow_rsp.ret_val;
524 DEBUG("The following ret: %d\n", ret);
525 free_buffer_page(fsr);
526 add_id_to_freelist(priv_req_id, import->freelist);
528 return ret;
529 }
532 int fs_rename(struct fs_import *import,
533 char *old_file_name,
534 char *new_file_name)
535 {
536 struct fs_request *fsr;
537 unsigned short priv_req_id;
538 grant_ref_t gref;
539 void *buffer;
540 RING_IDX back_req_id;
541 struct fsif_request *req;
542 int ret;
543 char old_header[] = "old: ";
544 char new_header[] = "new: ";
546 /* Prepare request for the backend */
547 back_req_id = reserve_fsif_request(import);
548 DEBUG("Backend request id=%d\n", back_req_id);
550 /* Prepare our private request structure */
551 priv_req_id = get_id_from_freelist(import->freelist);
552 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
553 fsr = &import->requests[priv_req_id];
554 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
555 DEBUG("gref=%d\n", gref);
556 fsr->thread = current;
557 sprintf(buffer, "%s%s%c%s%s",
558 old_header, old_file_name, '\0', new_header, new_file_name);
560 req = RING_GET_REQUEST(&import->ring, back_req_id);
561 req->type = REQ_RENAME;
562 req->id = priv_req_id;
563 req->u.frename.gref = gref;
564 req->u.frename.old_name_offset = strlen(old_header);
565 req->u.frename.new_name_offset = strlen(old_header) +
566 strlen(old_file_name) +
567 strlen(new_header) +
568 1 /* Accouning for the additional
569 end of string character */;
571 /* Set blocked flag before commiting the request, thus avoiding missed
572 * response race */
573 block(current);
574 commit_fsif_request(import, back_req_id);
575 schedule();
577 /* Read the response */
578 ret = (int)fsr->shadow_rsp.ret_val;
579 DEBUG("The following ret: %d\n", ret);
580 free_buffer_page(fsr);
581 add_id_to_freelist(priv_req_id, import->freelist);
583 return ret;
584 }
586 int fs_create(struct fs_import *import, char *name,
587 int8_t directory, int32_t mode)
588 {
589 struct fs_request *fsr;
590 unsigned short priv_req_id;
591 grant_ref_t gref;
592 void *buffer;
593 RING_IDX back_req_id;
594 struct fsif_request *req;
595 int ret;
597 /* Prepare request for the backend */
598 back_req_id = reserve_fsif_request(import);
599 DEBUG("Backend request id=%d\n", back_req_id);
601 /* Prepare our private request structure */
602 priv_req_id = get_id_from_freelist(import->freelist);
603 DEBUG("Request id for fs_create call is: %d\n", priv_req_id);
604 fsr = &import->requests[priv_req_id];
605 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
606 DEBUG("gref=%d\n", gref);
607 fsr->thread = current;
608 sprintf(buffer, "%s", name);
610 req = RING_GET_REQUEST(&import->ring, back_req_id);
611 req->type = REQ_CREATE;
612 req->id = priv_req_id;
613 req->u.fcreate.gref = gref;
614 req->u.fcreate.directory = directory;
615 req->u.fcreate.mode = mode;
617 /* Set blocked flag before commiting the request, thus avoiding missed
618 * response race */
619 block(current);
620 commit_fsif_request(import, back_req_id);
621 schedule();
623 /* Read the response */
624 ret = (int)fsr->shadow_rsp.ret_val;
625 DEBUG("The following ret: %d\n", ret);
626 free_buffer_page(fsr);
627 add_id_to_freelist(priv_req_id, import->freelist);
629 return ret;
630 }
632 char** fs_list(struct fs_import *import, char *name,
633 int32_t offset, int32_t *nr_files, int *has_more)
634 {
635 struct fs_request *fsr;
636 unsigned short priv_req_id;
637 grant_ref_t gref;
638 void *buffer;
639 RING_IDX back_req_id;
640 struct fsif_request *req;
641 char **files, *current_file;
642 int i;
644 DEBUG("Different masks: NR_FILES=(%llx, %d), ERROR=(%llx, %d), HAS_MORE(%llx, %d)\n",
645 NR_FILES_MASK, NR_FILES_SHIFT, ERROR_MASK, ERROR_SHIFT, HAS_MORE_FLAG, HAS_MORE_SHIFT);
647 /* Prepare request for the backend */
648 back_req_id = reserve_fsif_request(import);
649 DEBUG("Backend request id=%d\n", back_req_id);
651 /* Prepare our private request structure */
652 priv_req_id = get_id_from_freelist(import->freelist);
653 DEBUG("Request id for fs_list call is: %d\n", priv_req_id);
654 fsr = &import->requests[priv_req_id];
655 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
656 DEBUG("gref=%d\n", gref);
657 fsr->thread = current;
658 sprintf(buffer, "%s", name);
660 req = RING_GET_REQUEST(&import->ring, back_req_id);
661 req->type = REQ_DIR_LIST;
662 req->id = priv_req_id;
663 req->u.flist.gref = gref;
664 req->u.flist.offset = offset;
666 /* Set blocked flag before commiting the request, thus avoiding missed
667 * response race */
668 block(current);
669 commit_fsif_request(import, back_req_id);
670 schedule();
672 /* Read the response */
673 *nr_files = (fsr->shadow_rsp.ret_val & NR_FILES_MASK) >> NR_FILES_SHIFT;
674 files = NULL;
675 if(*nr_files <= 0) goto exit;
676 files = malloc(sizeof(char*) * (*nr_files));
677 current_file = buffer;
678 for(i=0; i<*nr_files; i++)
679 {
680 files[i] = strdup(current_file);
681 current_file += strlen(current_file) + 1;
682 }
683 if(has_more != NULL)
684 *has_more = fsr->shadow_rsp.ret_val & HAS_MORE_FLAG;
685 free_buffer_page(fsr);
686 add_id_to_freelist(priv_req_id, import->freelist);
687 exit:
688 return files;
689 }
691 int fs_chmod(struct fs_import *import, int fd, int32_t mode)
692 {
693 struct fs_request *fsr;
694 unsigned short priv_req_id;
695 RING_IDX back_req_id;
696 struct fsif_request *req;
697 int ret;
699 /* Prepare request for the backend */
700 back_req_id = reserve_fsif_request(import);
701 DEBUG("Backend request id=%d\n", back_req_id);
703 /* Prepare our private request structure */
704 priv_req_id = get_id_from_freelist(import->freelist);
705 DEBUG("Request id for fs_chmod call is: %d\n", priv_req_id);
706 fsr = &import->requests[priv_req_id];
707 fsr->thread = current;
709 req = RING_GET_REQUEST(&import->ring, back_req_id);
710 req->type = REQ_CHMOD;
711 req->id = priv_req_id;
712 req->u.fchmod.fd = fd;
713 req->u.fchmod.mode = mode;
715 /* Set blocked flag before commiting the request, thus avoiding missed
716 * response race */
717 block(current);
718 commit_fsif_request(import, back_req_id);
719 schedule();
721 /* Read the response */
722 ret = (int)fsr->shadow_rsp.ret_val;
723 DEBUG("The following returned: %d\n", ret);
724 add_id_to_freelist(priv_req_id, import->freelist);
726 return ret;
727 }
729 int64_t fs_space(struct fs_import *import, char *location)
730 {
731 struct fs_request *fsr;
732 unsigned short priv_req_id;
733 grant_ref_t gref;
734 void *buffer;
735 RING_IDX back_req_id;
736 struct fsif_request *req;
737 int64_t ret;
739 /* Prepare request for the backend */
740 back_req_id = reserve_fsif_request(import);
741 DEBUG("Backend request id=%d\n", back_req_id);
743 /* Prepare our private request structure */
744 priv_req_id = get_id_from_freelist(import->freelist);
745 DEBUG("Request id for fs_space is: %d\n", priv_req_id);
746 fsr = &import->requests[priv_req_id];
747 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
748 DEBUG("gref=%d\n", gref);
749 fsr->thread = current;
750 sprintf(buffer, "%s", location);
752 req = RING_GET_REQUEST(&import->ring, back_req_id);
753 req->type = REQ_FS_SPACE;
754 req->id = priv_req_id;
755 req->u.fspace.gref = gref;
757 /* Set blocked flag before commiting the request, thus avoiding missed
758 * response race */
759 block(current);
760 commit_fsif_request(import, back_req_id);
761 schedule();
763 /* Read the response */
764 ret = (int64_t)fsr->shadow_rsp.ret_val;
765 DEBUG("The following returned: %lld\n", ret);
766 free_buffer_page(fsr);
767 add_id_to_freelist(priv_req_id, import->freelist);
769 return ret;
770 }
772 int fs_sync(struct fs_import *import, int fd)
773 {
774 struct fs_request *fsr;
775 unsigned short priv_req_id;
776 RING_IDX back_req_id;
777 struct fsif_request *req;
778 int ret;
780 /* Prepare request for the backend */
781 back_req_id = reserve_fsif_request(import);
782 DEBUG("Backend request id=%d\n", back_req_id);
784 /* Prepare our private request structure */
785 priv_req_id = get_id_from_freelist(import->freelist);
786 DEBUG("Request id for fs_sync call is: %d\n", priv_req_id);
787 fsr = &import->requests[priv_req_id];
788 fsr->thread = current;
790 req = RING_GET_REQUEST(&import->ring, back_req_id);
791 req->type = REQ_FILE_SYNC;
792 req->id = priv_req_id;
793 req->u.fsync.fd = fd;
795 /* Set blocked flag before commiting the request, thus avoiding missed
796 * response race */
797 block(current);
798 commit_fsif_request(import, back_req_id);
799 schedule();
801 /* Read the response */
802 ret = (int)fsr->shadow_rsp.ret_val;
803 DEBUG("Close returned: %d\n", ret);
804 add_id_to_freelist(priv_req_id, import->freelist);
806 return ret;
807 }
810 /******************************************************************************/
811 /* END OF INDIVIDUAL FILE OPERATIONS */
812 /******************************************************************************/
814 void *alloc_buffer_page(struct fs_request *req, domid_t domid, grant_ref_t *gref)
815 {
816 void *page;
818 page = (void *)alloc_page();
819 *gref = gnttab_grant_access(domid, virt_to_mfn(page), 0);
820 req->private1 = page;
821 req->private2 = (void *)(uintptr_t)(*gref);
823 return page;
824 }
826 void free_buffer_page(struct fs_request *req)
827 {
828 gnttab_end_access((grant_ref_t)(uintptr_t)req->private2);
829 free_page(req->private1);
830 }
832 static void fsfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
833 {
834 struct fs_import *import = (struct fs_import*)data;
835 static int in_irq = 0;
836 RING_IDX cons, rp;
837 int more;
839 /* Check for non-reentrance */
840 BUG_ON(in_irq);
841 in_irq = 1;
843 DEBUG("Event from import [%d:%d].\n", import->dom_id, import->export_id);
844 moretodo:
845 rp = import->ring.sring->rsp_prod;
846 rmb(); /* Ensure we see queued responses up to 'rp'. */
847 cons = import->ring.rsp_cons;
848 while (cons != rp)
849 {
850 struct fsif_response *rsp;
851 struct fs_request *req;
853 rsp = RING_GET_RESPONSE(&import->ring, cons);
854 DEBUG("Response at idx=%d to request id=%d, ret_val=%lx\n",
855 cons, rsp->id, rsp->ret_val);
856 req = &import->requests[rsp->id];
857 memcpy(&req->shadow_rsp, rsp, sizeof(struct fsif_response));
858 DEBUG("Waking up: %s\n", req->thread->name);
859 wake(req->thread);
861 cons++;
862 up(&import->reqs_sem);
863 }
865 import->ring.rsp_cons = rp;
866 RING_FINAL_CHECK_FOR_RESPONSES(&import->ring, more);
867 if(more) goto moretodo;
869 in_irq = 0;
870 }
872 /* Small utility function to figure out our domain id */
873 static domid_t get_self_id(void)
874 {
875 char *dom_id;
876 domid_t ret;
878 BUG_ON(xenbus_read(XBT_NIL, "domid", &dom_id));
879 sscanf(dom_id, "%d", &ret);
881 return ret;
882 }
884 static void alloc_request_table(struct fs_import *import)
885 {
886 struct fs_request *requests;
887 int i;
889 BUG_ON(import->nr_entries <= 0);
890 printk("Allocating request array for import %d, nr_entries = %d.\n",
891 import->import_id, import->nr_entries);
892 requests = xmalloc_array(struct fs_request, import->nr_entries);
893 import->freelist = xmalloc_array(unsigned short, import->nr_entries + 1);
894 memset(import->freelist, 0, sizeof(unsigned short) * (import->nr_entries + 1));
895 for(i=0; i<import->nr_entries; i++)
896 add_id_to_freelist(i, import->freelist);
897 import->requests = requests;
898 }
901 /******************************************************************************/
902 /* FS TESTS */
903 /******************************************************************************/
906 void test_fs_import(void *data)
907 {
908 struct fs_import *import = (struct fs_import *)data;
909 int ret, fd, i, repeat_count;
910 int32_t nr_files;
911 char buffer[1024];
912 ssize_t offset;
913 char **files;
914 long ret64;
915 struct fsif_stat_response stat;
917 repeat_count = 10;
918 /* Sleep for 1s and then try to open a file */
919 msleep(1000);
920 again:
921 ret = fs_create(import, "mini-os-created-directory", 1, 0777);
922 printk("Directory create: %d\n", ret);
924 sprintf(buffer, "mini-os-created-directory/mini-os-created-file-%d",
925 repeat_count);
926 ret = fs_create(import, buffer, 0, 0666);
927 printk("File create: %d\n", ret);
929 fd = fs_open(import, buffer);
930 printk("File descriptor: %d\n", fd);
931 if(fd < 0) return;
933 offset = 0;
934 for(i=0; i<10; i++)
935 {
936 sprintf(buffer, "Current time is: %lld\n", NOW());
937 ret = fs_write(import, fd, buffer, strlen(buffer), offset);
938 printk("Writen current time (%d)\n", ret);
939 if(ret < 0)
940 return;
941 offset += ret;
942 }
943 ret = fs_stat(import, fd, &stat);
944 printk("Ret after stat: %d\n", ret);
945 printk(" st_mode=%o\n", stat.stat_mode);
946 printk(" st_uid =%d\n", stat.stat_uid);
947 printk(" st_gid =%d\n", stat.stat_gid);
948 printk(" st_size=%ld\n", stat.stat_size);
949 printk(" st_atime=%ld\n", stat.stat_atime);
950 printk(" st_mtime=%ld\n", stat.stat_mtime);
951 printk(" st_ctime=%ld\n", stat.stat_ctime);
953 ret = fs_close(import, fd);
954 printk("Closed fd: %d, ret=%d\n", fd, ret);
956 printk("Listing files in /\n");
957 files = fs_list(import, "/", 0, &nr_files, NULL);
958 for(i=0; i<nr_files; i++)
959 printk(" files[%d] = %s\n", i, files[i]);
961 ret64 = fs_space(import, "/");
962 printk("Free space: %lld (=%lld Mb)\n", ret64, (ret64 >> 20));
963 repeat_count--;
964 if(repeat_count > 0)
965 goto again;
967 }
969 #if 0
970 // char *content = (char *)alloc_page();
971 int fd, ret;
972 // int read;
973 char write_string[] = "\"test data written from minios\"";
974 struct fsif_stat_response stat;
975 char **files;
976 int32_t nr_files, i;
977 int64_t ret64;
980 fd = fs_open(import, "test-export-file");
981 // read = fs_read(import, fd, content, PAGE_SIZE, 0);
982 // printk("Read: %d bytes\n", read);
983 // content[read] = '\0';
984 // printk("Value: %s\n", content);
985 ret = fs_write(import, fd, write_string, strlen(write_string), 0);
986 printk("Ret after write: %d\n", ret);
987 ret = fs_stat(import, fd, &stat);
988 printk("Ret after stat: %d\n", ret);
989 printk(" st_mode=%o\n", stat.stat_mode);
990 printk(" st_uid =%d\n", stat.stat_uid);
991 printk(" st_gid =%d\n", stat.stat_gid);
992 printk(" st_size=%ld\n", stat.stat_size);
993 printk(" st_atime=%ld\n", stat.stat_atime);
994 printk(" st_mtime=%ld\n", stat.stat_mtime);
995 printk(" st_ctime=%ld\n", stat.stat_ctime);
996 ret = fs_truncate(import, fd, 30);
997 printk("Ret after truncate: %d\n", ret);
998 ret = fs_remove(import, "test-to-remove/test-file");
999 printk("Ret after remove: %d\n", ret);
1000 ret = fs_remove(import, "test-to-remove");
1001 printk("Ret after remove: %d\n", ret);
1002 ret = fs_chmod(import, fd, 0700);
1003 printk("Ret after chmod: %d\n", ret);
1004 ret = fs_sync(import, fd);
1005 printk("Ret after sync: %d\n", ret);
1006 ret = fs_close(import, fd);
1007 //ret = fs_rename(import, "test-export-file", "renamed-test-export-file");
1008 //printk("Ret after rename: %d\n", ret);
1009 ret = fs_create(import, "created-dir", 1, 0777);
1010 printk("Ret after dir create: %d\n", ret);
1011 ret = fs_create(import, "created-dir/created-file", 0, 0777);
1012 printk("Ret after file create: %d\n", ret);
1013 files = fs_list(import, "/", 15, &nr_files, NULL);
1014 for(i=0; i<nr_files; i++)
1015 printk(" files[%d] = %s\n", i, files[i]);
1016 ret64 = fs_space(import, "created-dir");
1017 printk("Ret after space: %lld\n", ret64);
1019 #endif
1022 /******************************************************************************/
1023 /* END OF FS TESTS */
1024 /******************************************************************************/
1026 static int init_fs_import(struct fs_import *import)
1028 char *err;
1029 xenbus_transaction_t xbt;
1030 char nodename[1024], r_nodename[1024], token[128], *message = NULL;
1031 struct fsif_sring *sring;
1032 int i, retry = 0;
1033 domid_t self_id;
1034 xenbus_event_queue events = NULL;
1036 printk("Initialising FS fortend to backend dom %d\n", import->dom_id);
1037 /* Allocate page for the shared ring */
1038 sring = (struct fsif_sring*) alloc_pages(FSIF_RING_SIZE_ORDER);
1039 memset(sring, 0, PAGE_SIZE * FSIF_RING_SIZE_PAGES);
1041 /* Init the shared ring */
1042 SHARED_RING_INIT(sring);
1043 ASSERT(FSIF_NR_READ_GNTS == FSIF_NR_WRITE_GNTS);
1045 /* Init private frontend ring */
1046 FRONT_RING_INIT(&import->ring, sring, PAGE_SIZE * FSIF_RING_SIZE_PAGES);
1047 import->nr_entries = import->ring.nr_ents;
1049 /* Allocate table of requests */
1050 alloc_request_table(import);
1051 init_SEMAPHORE(&import->reqs_sem, import->nr_entries);
1053 /* Grant access to the shared ring */
1054 for(i=0; i<FSIF_RING_SIZE_PAGES; i++)
1055 import->gnt_refs[i] =
1056 gnttab_grant_access(import->dom_id,
1057 virt_to_mfn((char *)sring + i * PAGE_SIZE),
1058 0);
1060 /* Allocate event channel */
1061 BUG_ON(evtchn_alloc_unbound(import->dom_id,
1062 fsfront_handler,
1063 //ANY_CPU,
1064 import,
1065 &import->local_port));
1066 unmask_evtchn(import->local_port);
1069 self_id = get_self_id();
1070 /* Write the frontend info to a node in our Xenbus */
1071 sprintf(nodename, "/local/domain/%d/device/vfs/%d",
1072 self_id, import->import_id);
1074 again:
1075 err = xenbus_transaction_start(&xbt);
1076 if (err) {
1077 printk("starting transaction\n");
1080 err = xenbus_printf(xbt,
1081 nodename,
1082 "ring-size",
1083 "%u",
1084 FSIF_RING_SIZE_PAGES);
1085 if (err) {
1086 message = "writing ring-size";
1087 goto abort_transaction;
1090 for(i=0; i<FSIF_RING_SIZE_PAGES; i++)
1092 sprintf(r_nodename, "ring-ref-%d", i);
1093 err = xenbus_printf(xbt,
1094 nodename,
1095 r_nodename,
1096 "%u",
1097 import->gnt_refs[i]);
1098 if (err) {
1099 message = "writing ring-refs";
1100 goto abort_transaction;
1104 err = xenbus_printf(xbt,
1105 nodename,
1106 "event-channel",
1107 "%u",
1108 import->local_port);
1109 if (err) {
1110 message = "writing event-channel";
1111 goto abort_transaction;
1114 err = xenbus_printf(xbt, nodename, "state", STATE_READY, 0xdeadbeef);
1117 err = xenbus_transaction_end(xbt, 0, &retry);
1118 if (retry) {
1119 goto again;
1120 printk("completing transaction\n");
1123 /* Now, when our node is prepared we write request in the exporting domain
1124 * */
1125 printk("Our own id is %d\n", self_id);
1126 sprintf(r_nodename,
1127 "/local/domain/%d/backend/vfs/exports/requests/%d/%d/frontend",
1128 import->dom_id, self_id, import->export_id);
1129 BUG_ON(xenbus_write(XBT_NIL, r_nodename, nodename));
1131 goto done;
1133 abort_transaction:
1134 xenbus_transaction_end(xbt, 1, &retry);
1136 done:
1138 #define WAIT_PERIOD 10 /* Wait period in ms */
1139 #define MAX_WAIT 10 /* Max number of WAIT_PERIODs */
1140 import->backend = NULL;
1141 sprintf(r_nodename, "%s/backend", nodename);
1143 for(retry = MAX_WAIT; retry > 0; retry--)
1145 xenbus_read(XBT_NIL, r_nodename, &import->backend);
1146 if(import->backend)
1148 printk("Backend found at %s\n", import->backend);
1149 break;
1151 msleep(WAIT_PERIOD);
1154 if(!import->backend)
1156 printk("No backend available.\n");
1157 /* TODO - cleanup datastructures/xenbus */
1158 return 0;
1160 sprintf(r_nodename, "%s/state", import->backend);
1161 sprintf(token, "fs-front-%d", import->import_id);
1162 /* The token will not be unique if multiple imports are inited */
1163 xenbus_watch_path_token(XBT_NIL, r_nodename, r_nodename, &events);
1164 xenbus_wait_for_value(r_nodename, STATE_READY, &events);
1165 xenbus_unwatch_path(XBT_NIL, r_nodename);
1166 printk("Backend ready.\n");
1168 //create_thread("fs-tester", test_fs_import, import);
1170 return 1;
1173 static void add_export(struct minios_list_head *exports, unsigned int domid)
1175 char node[1024], **exports_list = NULL, *ret_msg;
1176 int j = 0;
1177 static int import_id = 0;
1179 sprintf(node, "/local/domain/%d/backend/vfs/exports", domid);
1180 ret_msg = xenbus_ls(XBT_NIL, node, &exports_list);
1181 if (ret_msg && strcmp(ret_msg, "ENOENT"))
1182 printk("couldn't read %s: %s\n", node, ret_msg);
1183 while(exports_list && exports_list[j])
1185 struct fs_import *import;
1186 int export_id = -1;
1188 sscanf(exports_list[j], "%d", &export_id);
1189 if(export_id >= 0)
1191 import = xmalloc(struct fs_import);
1192 import->dom_id = domid;
1193 import->export_id = export_id;
1194 import->import_id = import_id++;
1195 MINIOS_INIT_LIST_HEAD(&import->list);
1196 minios_list_add(&import->list, exports);
1198 free(exports_list[j]);
1199 j++;
1201 if(exports_list)
1202 free(exports_list);
1203 if(ret_msg)
1204 free(ret_msg);
1207 #if 0
1208 static struct minios_list_head* probe_exports(void)
1210 struct minios_list_head *exports;
1211 char **node_list = NULL, *msg = NULL;
1212 int i = 0;
1214 exports = xmalloc(struct minios_list_head);
1215 MINIOS_INIT_LIST_HEAD(exports);
1217 msg = xenbus_ls(XBT_NIL, "/local/domain", &node_list);
1218 if(msg)
1220 printk("Could not list VFS exports (%s).\n", msg);
1221 goto exit;
1224 while(node_list[i])
1226 add_export(exports, atoi(node_list[i]));
1227 free(node_list[i]);
1228 i++;
1231 exit:
1232 if(msg)
1233 free(msg);
1234 if(node_list)
1235 free(node_list);
1236 return exports;
1238 #endif
1240 MINIOS_LIST_HEAD(exports);
1242 void init_fs_frontend(void)
1244 struct minios_list_head *entry;
1245 struct fs_import *import = NULL;
1246 printk("Initing FS fronend(s).\n");
1248 //exports = probe_exports();
1249 add_export(&exports, 0);
1250 minios_list_for_each(entry, &exports)
1252 import = minios_list_entry(entry, struct fs_import, list);
1253 printk("FS export [dom=%d, id=%d] found\n",
1254 import->dom_id, import->export_id);
1255 init_fs_import(import);
1258 fs_import = import;
1260 if (!fs_import)
1261 printk("No FS import\n");
1264 /* TODO: shutdown */