ia64/xen-unstable

view extras/mini-os/fs-front.c @ 16838:945820bfedb6

minios: POSIX fixes
Fixes some functions which are POSIX. Also make them ifndef HAVE_LIBC.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 22 14:20:22 2008 +0000 (2008-01-22)
parents 1e6455d608bd
children ea5ee63548e4
line source
1 /******************************************************************************
2 * fs-front.c
3 *
4 * Frontend driver for FS split device driver.
5 *
6 * Copyright (c) 2007, Grzegorz Milos, Sun Microsystems, Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
27 #undef NDEBUG
28 #include <os.h>
29 #include <list.h>
30 #include <xmalloc.h>
31 #include <xenbus.h>
32 #include <gnttab.h>
33 #include <events.h>
34 #include <xen/io/fsif.h>
35 #include <fs.h>
36 #include <sched.h>
38 #define preempt_disable()
39 #define preempt_enable()
40 #define cmpxchg(p,o,n) synch_cmpxchg(p,o,n)
43 #ifdef FS_DEBUG
44 #define DEBUG(_f, _a...) \
45 printk("MINI_OS(file=fs-front.c, line=%d) " _f "\n", __LINE__, ## _a)
46 #else
47 #define DEBUG(_f, _a...) ((void)0)
48 #endif
51 struct fs_request;
52 struct fs_import *fs_import;
54 /******************************************************************************/
55 /* RING REQUEST/RESPONSES HANDLING */
56 /******************************************************************************/
58 struct fs_request
59 {
60 void *page;
61 grant_ref_t gref;
62 struct thread *thread; /* Thread blocked on this request */
63 struct fsif_response shadow_rsp; /* Response copy writen by the
64 interrupt handler */
65 };
67 /* Ring operations:
68 * FSIF ring is used differently to Linux-like split devices. This stems from
69 * the fact that no I/O request queue is present. The use of some of the macros
70 * defined in ring.h is not allowed, in particular:
71 * RING_PUSH_REQUESTS_AND_CHECK_NOTIFY cannot be used.
72 *
73 * The protocol used for FSIF ring is described below:
74 *
75 * In order to reserve a request the frontend:
76 * a) saves current frontend_ring->req_prod_pvt into a local variable
77 * b) checks that there are free request using the local req_prod_pvt
78 * c) tries to reserve the request using cmpxchg on frontend_ring->req_prod_pvt
79 * if cmpxchg fails, it means that someone reserved the request, start from
80 * a)
81 *
82 * In order to commit a request to the shared ring:
83 * a) cmpxchg shared_ring->req_prod from local req_prod_pvt to req_prod_pvt+1
84 * Loop if unsuccessful.
85 * NOTE: Request should be commited to the shared ring as quickly as possible,
86 * because otherwise other threads might busy loop trying to commit next
87 * requests. It also follows that preemption should be disabled, if
88 * possible, for the duration of the request construction.
89 */
91 /* Number of free requests (for use on front side only). */
92 #define FS_RING_FREE_REQUESTS(_r, _req_prod_pvt) \
93 (RING_SIZE(_r) - (_req_prod_pvt - (_r)->rsp_cons))
97 static RING_IDX reserve_fsif_request(struct fs_import *import)
98 {
99 RING_IDX idx;
101 down(&import->reqs_sem);
102 preempt_disable();
103 again:
104 /* We will attempt to reserve slot idx */
105 idx = import->ring.req_prod_pvt;
106 ASSERT (FS_RING_FREE_REQUESTS(&import->ring, idx));
107 /* Attempt to reserve */
108 if(cmpxchg(&import->ring.req_prod_pvt, idx, idx+1) != idx)
109 goto again;
111 return idx;
112 }
114 static void commit_fsif_request(struct fs_import *import, RING_IDX idx)
115 {
116 while(cmpxchg(&import->ring.sring->req_prod, idx, idx+1) != idx)
117 {
118 printk("Failed to commit a request: req_prod=%d, idx=%d\n",
119 import->ring.sring->req_prod, idx);
120 }
121 preempt_enable();
123 /* NOTE: we cannot do anything clever about rsp_event, to hold off
124 * notifications, because we don't know if we are a single request (in which
125 * case we have to notify always), or a part of a larger request group
126 * (when, in some cases, notification isn't required) */
127 notify_remote_via_evtchn(import->local_port);
128 }
132 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
133 {
134 unsigned int old_id, new_id;
136 again:
137 old_id = freelist[0];
138 /* Note: temporal inconsistency, since freelist[0] can be changed by someone
139 * else, but we are a sole owner of freelist[id], it's OK. */
140 freelist[id] = old_id;
141 new_id = id;
142 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
143 {
144 printk("Cmpxchg on freelist add failed.\n");
145 goto again;
146 }
147 }
149 /* always call reserve_fsif_request(import) before this, to protect from
150 * depletion. */
151 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
152 {
153 unsigned int old_id, new_id;
155 again:
156 old_id = freelist[0];
157 new_id = freelist[old_id];
158 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
159 {
160 printk("Cmpxchg on freelist remove failed.\n");
161 goto again;
162 }
164 return old_id;
165 }
167 /******************************************************************************/
168 /* END OF RING REQUEST/RESPONSES HANDLING */
169 /******************************************************************************/
173 /******************************************************************************/
174 /* INDIVIDUAL FILE OPERATIONS */
175 /******************************************************************************/
176 int fs_open(struct fs_import *import, char *file)
177 {
178 struct fs_request *fsr;
179 unsigned short priv_req_id;
180 RING_IDX back_req_id;
181 struct fsif_request *req;
182 int fd;
184 /* Prepare request for the backend */
185 back_req_id = reserve_fsif_request(import);
186 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
188 /* Prepare our private request structure */
189 priv_req_id = get_id_from_freelist(import->freelist);
190 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
191 fsr = &import->requests[priv_req_id];
192 fsr->thread = current;
193 sprintf(fsr->page, "%s", file);
195 req = RING_GET_REQUEST(&import->ring, back_req_id);
196 req->type = REQ_FILE_OPEN;
197 req->id = priv_req_id;
198 req->u.fopen.gref = fsr->gref;
200 /* Set blocked flag before commiting the request, thus avoiding missed
201 * response race */
202 block(current);
203 commit_fsif_request(import, back_req_id);
204 schedule();
206 /* Read the response */
207 fd = (int)fsr->shadow_rsp.ret_val;
208 DEBUG("The following FD returned: %d\n", fd);
209 add_id_to_freelist(priv_req_id, import->freelist);
211 return fd;
212 }
214 int fs_close(struct fs_import *import, int fd)
215 {
216 struct fs_request *fsr;
217 unsigned short priv_req_id;
218 RING_IDX back_req_id;
219 struct fsif_request *req;
220 int ret;
222 /* Prepare request for the backend */
223 back_req_id = reserve_fsif_request(import);
224 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
226 /* Prepare our private request structure */
227 priv_req_id = get_id_from_freelist(import->freelist);
228 DEBUG("Request id for fs_close call is: %d\n", priv_req_id);
229 fsr = &import->requests[priv_req_id];
230 fsr->thread = current;
232 req = RING_GET_REQUEST(&import->ring, back_req_id);
233 req->type = REQ_FILE_CLOSE;
234 req->id = priv_req_id;
235 req->u.fclose.fd = fd;
237 /* Set blocked flag before commiting the request, thus avoiding missed
238 * response race */
239 block(current);
240 commit_fsif_request(import, back_req_id);
241 schedule();
243 /* Read the response */
244 ret = (int)fsr->shadow_rsp.ret_val;
245 DEBUG("Close returned: %d\n", ret);
246 add_id_to_freelist(priv_req_id, import->freelist);
248 return ret;
249 }
251 ssize_t fs_read(struct fs_import *import, int fd, void *buf,
252 ssize_t len, ssize_t offset)
253 {
254 struct fs_request *fsr;
255 unsigned short priv_req_id;
256 RING_IDX back_req_id;
257 struct fsif_request *req;
258 ssize_t ret;
260 BUG_ON(len > PAGE_SIZE);
262 /* Prepare request for the backend */
263 back_req_id = reserve_fsif_request(import);
264 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
266 /* Prepare our private request structure */
267 priv_req_id = get_id_from_freelist(import->freelist);
268 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
269 fsr = &import->requests[priv_req_id];
270 fsr->thread = current;
271 memset(fsr->page, 0, PAGE_SIZE);
273 req = RING_GET_REQUEST(&import->ring, back_req_id);
274 req->type = REQ_FILE_READ;
275 req->id = priv_req_id;
276 req->u.fread.fd = fd;
277 req->u.fread.gref = fsr->gref;
278 req->u.fread.len = len;
279 req->u.fread.offset = offset;
281 /* Set blocked flag before commiting the request, thus avoiding missed
282 * response race */
283 block(current);
284 commit_fsif_request(import, back_req_id);
285 schedule();
287 /* Read the response */
288 ret = (ssize_t)fsr->shadow_rsp.ret_val;
289 DEBUG("The following ret value returned %d\n", ret);
290 if(ret > 0)
291 memcpy(buf, fsr->page, ret);
292 add_id_to_freelist(priv_req_id, import->freelist);
294 return ret;
295 }
297 ssize_t fs_write(struct fs_import *import, int fd, void *buf,
298 ssize_t len, ssize_t offset)
299 {
300 struct fs_request *fsr;
301 unsigned short priv_req_id;
302 RING_IDX back_req_id;
303 struct fsif_request *req;
304 ssize_t ret;
306 BUG_ON(len > PAGE_SIZE);
308 /* Prepare request for the backend */
309 back_req_id = reserve_fsif_request(import);
310 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
312 /* Prepare our private request structure */
313 priv_req_id = get_id_from_freelist(import->freelist);
314 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
315 fsr = &import->requests[priv_req_id];
316 fsr->thread = current;
317 memcpy(fsr->page, buf, len);
318 BUG_ON(len > PAGE_SIZE);
319 memset((char *)fsr->page + len, 0, PAGE_SIZE - len);
321 req = RING_GET_REQUEST(&import->ring, back_req_id);
322 req->type = REQ_FILE_WRITE;
323 req->id = priv_req_id;
324 req->u.fwrite.fd = fd;
325 req->u.fwrite.gref = fsr->gref;
326 req->u.fwrite.len = len;
327 req->u.fwrite.offset = offset;
329 /* Set blocked flag before commiting the request, thus avoiding missed
330 * response race */
331 block(current);
332 commit_fsif_request(import, back_req_id);
333 schedule();
335 /* Read the response */
336 ret = (ssize_t)fsr->shadow_rsp.ret_val;
337 DEBUG("The following ret value returned %d\n", ret);
338 add_id_to_freelist(priv_req_id, import->freelist);
340 return ret;
341 }
343 int fs_stat(struct fs_import *import,
344 int fd,
345 struct fsif_stat_response *stat)
346 {
347 struct fs_request *fsr;
348 unsigned short priv_req_id;
349 RING_IDX back_req_id;
350 struct fsif_request *req;
351 int ret;
353 /* Prepare request for the backend */
354 back_req_id = reserve_fsif_request(import);
355 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
357 /* Prepare our private request structure */
358 priv_req_id = get_id_from_freelist(import->freelist);
359 DEBUG("Request id for fs_stat call is: %d\n", priv_req_id);
360 fsr = &import->requests[priv_req_id];
361 fsr->thread = current;
362 memset(fsr->page, 0, PAGE_SIZE);
364 req = RING_GET_REQUEST(&import->ring, back_req_id);
365 req->type = REQ_STAT;
366 req->id = priv_req_id;
367 req->u.fstat.fd = fd;
368 req->u.fstat.gref = fsr->gref;
370 /* Set blocked flag before commiting the request, thus avoiding missed
371 * response race */
372 block(current);
373 commit_fsif_request(import, back_req_id);
374 schedule();
376 /* Read the response */
377 ret = (int)fsr->shadow_rsp.ret_val;
378 DEBUG("Following ret from fstat: %d\n", ret);
379 memcpy(stat, fsr->page, sizeof(struct fsif_stat_response));
380 add_id_to_freelist(priv_req_id, import->freelist);
382 return ret;
383 }
385 int fs_truncate(struct fs_import *import,
386 int fd,
387 int64_t length)
388 {
389 struct fs_request *fsr;
390 unsigned short priv_req_id;
391 RING_IDX back_req_id;
392 struct fsif_request *req;
393 int ret;
395 /* Prepare request for the backend */
396 back_req_id = reserve_fsif_request(import);
397 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
399 /* Prepare our private request structure */
400 priv_req_id = get_id_from_freelist(import->freelist);
401 DEBUG("Request id for fs_truncate call is: %d\n", priv_req_id);
402 fsr = &import->requests[priv_req_id];
403 fsr->thread = current;
405 req = RING_GET_REQUEST(&import->ring, back_req_id);
406 req->type = REQ_FILE_TRUNCATE;
407 req->id = priv_req_id;
408 req->u.ftruncate.fd = fd;
409 req->u.ftruncate.length = length;
411 /* Set blocked flag before commiting the request, thus avoiding missed
412 * response race */
413 block(current);
414 commit_fsif_request(import, back_req_id);
415 schedule();
417 /* Read the response */
418 ret = (int)fsr->shadow_rsp.ret_val;
419 DEBUG("Following ret from ftruncate: %d\n", ret);
420 add_id_to_freelist(priv_req_id, import->freelist);
422 return ret;
423 }
425 int fs_remove(struct fs_import *import, char *file)
426 {
427 struct fs_request *fsr;
428 unsigned short priv_req_id;
429 RING_IDX back_req_id;
430 struct fsif_request *req;
431 int ret;
433 /* Prepare request for the backend */
434 back_req_id = reserve_fsif_request(import);
435 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
437 /* Prepare our private request structure */
438 priv_req_id = get_id_from_freelist(import->freelist);
439 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
440 fsr = &import->requests[priv_req_id];
441 fsr->thread = current;
442 sprintf(fsr->page, "%s", file);
444 req = RING_GET_REQUEST(&import->ring, back_req_id);
445 req->type = REQ_REMOVE;
446 req->id = priv_req_id;
447 req->u.fremove.gref = fsr->gref;
449 /* Set blocked flag before commiting the request, thus avoiding missed
450 * response race */
451 block(current);
452 commit_fsif_request(import, back_req_id);
453 schedule();
455 /* Read the response */
456 ret = (int)fsr->shadow_rsp.ret_val;
457 DEBUG("The following ret: %d\n", ret);
458 add_id_to_freelist(priv_req_id, import->freelist);
460 return ret;
461 }
464 int fs_rename(struct fs_import *import,
465 char *old_file_name,
466 char *new_file_name)
467 {
468 struct fs_request *fsr;
469 unsigned short priv_req_id;
470 RING_IDX back_req_id;
471 struct fsif_request *req;
472 int ret;
473 char old_header[] = "old: ";
474 char new_header[] = "new: ";
476 /* Prepare request for the backend */
477 back_req_id = reserve_fsif_request(import);
478 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
480 /* Prepare our private request structure */
481 priv_req_id = get_id_from_freelist(import->freelist);
482 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
483 fsr = &import->requests[priv_req_id];
484 fsr->thread = current;
485 sprintf(fsr->page, "%s%s%c%s%s",
486 old_header, old_file_name, '\0', new_header, new_file_name);
488 req = RING_GET_REQUEST(&import->ring, back_req_id);
489 req->type = REQ_RENAME;
490 req->id = priv_req_id;
491 req->u.frename.gref = fsr->gref;
492 req->u.frename.old_name_offset = strlen(old_header);
493 req->u.frename.new_name_offset = strlen(old_header) +
494 strlen(old_file_name) +
495 strlen(new_header) +
496 1 /* Accouning for the additional
497 end of string character */;
499 /* Set blocked flag before commiting the request, thus avoiding missed
500 * response race */
501 block(current);
502 commit_fsif_request(import, back_req_id);
503 schedule();
505 /* Read the response */
506 ret = (int)fsr->shadow_rsp.ret_val;
507 DEBUG("The following ret: %d\n", ret);
508 add_id_to_freelist(priv_req_id, import->freelist);
510 return ret;
511 }
513 int fs_create(struct fs_import *import, char *name,
514 int8_t directory, int32_t mode)
515 {
516 struct fs_request *fsr;
517 unsigned short priv_req_id;
518 RING_IDX back_req_id;
519 struct fsif_request *req;
520 int ret;
522 /* Prepare request for the backend */
523 back_req_id = reserve_fsif_request(import);
524 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
526 /* Prepare our private request structure */
527 priv_req_id = get_id_from_freelist(import->freelist);
528 DEBUG("Request id for fs_create call is: %d\n", priv_req_id);
529 fsr = &import->requests[priv_req_id];
530 fsr->thread = current;
531 sprintf(fsr->page, "%s", name);
533 req = RING_GET_REQUEST(&import->ring, back_req_id);
534 req->type = REQ_CREATE;
535 req->id = priv_req_id;
536 req->u.fcreate.gref = fsr->gref;
537 req->u.fcreate.directory = directory;
538 req->u.fcreate.mode = mode;
540 /* Set blocked flag before commiting the request, thus avoiding missed
541 * response race */
542 block(current);
543 commit_fsif_request(import, back_req_id);
544 schedule();
546 /* Read the response */
547 ret = (int)fsr->shadow_rsp.ret_val;
548 DEBUG("The following ret: %d\n", ret);
549 add_id_to_freelist(priv_req_id, import->freelist);
551 return ret;
552 }
554 char** fs_list(struct fs_import *import, char *name,
555 int32_t offset, int32_t *nr_files, int *has_more)
556 {
557 struct fs_request *fsr;
558 unsigned short priv_req_id;
559 RING_IDX back_req_id;
560 struct fsif_request *req;
561 char **files, *current_file;
562 int i;
564 DEBUG("Different masks: NR_FILES=(%llx, %d), ERROR=(%llx, %d), HAS_MORE(%llx, %d)\n",
565 NR_FILES_MASK, NR_FILES_SHIFT, ERROR_MASK, ERROR_SHIFT, HAS_MORE_FLAG, HAS_MORE_SHIFT);
567 /* Prepare request for the backend */
568 back_req_id = reserve_fsif_request(import);
569 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
571 /* Prepare our private request structure */
572 priv_req_id = get_id_from_freelist(import->freelist);
573 DEBUG("Request id for fs_list call is: %d\n", priv_req_id);
574 fsr = &import->requests[priv_req_id];
575 fsr->thread = current;
576 sprintf(fsr->page, "%s", name);
578 req = RING_GET_REQUEST(&import->ring, back_req_id);
579 req->type = REQ_DIR_LIST;
580 req->id = priv_req_id;
581 req->u.flist.gref = fsr->gref;
582 req->u.flist.offset = offset;
584 /* Set blocked flag before commiting the request, thus avoiding missed
585 * response race */
586 block(current);
587 commit_fsif_request(import, back_req_id);
588 schedule();
590 /* Read the response */
591 *nr_files = (fsr->shadow_rsp.ret_val & NR_FILES_MASK) >> NR_FILES_SHIFT;
592 files = NULL;
593 if(*nr_files <= 0) goto exit;
594 files = malloc(sizeof(char*) * (*nr_files));
595 current_file = fsr->page;
596 for(i=0; i<*nr_files; i++)
597 {
598 files[i] = strdup(current_file);
599 current_file += strlen(current_file) + 1;
600 }
601 if(has_more != NULL)
602 *has_more = fsr->shadow_rsp.ret_val & HAS_MORE_FLAG;
603 add_id_to_freelist(priv_req_id, import->freelist);
604 exit:
605 return files;
606 }
608 int fs_chmod(struct fs_import *import, int fd, int32_t mode)
609 {
610 struct fs_request *fsr;
611 unsigned short priv_req_id;
612 RING_IDX back_req_id;
613 struct fsif_request *req;
614 int ret;
616 /* Prepare request for the backend */
617 back_req_id = reserve_fsif_request(import);
618 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
620 /* Prepare our private request structure */
621 priv_req_id = get_id_from_freelist(import->freelist);
622 DEBUG("Request id for fs_chmod call is: %d\n", priv_req_id);
623 fsr = &import->requests[priv_req_id];
624 fsr->thread = current;
626 req = RING_GET_REQUEST(&import->ring, back_req_id);
627 req->type = REQ_CHMOD;
628 req->id = priv_req_id;
629 req->u.fchmod.fd = fd;
630 req->u.fchmod.mode = mode;
632 /* Set blocked flag before commiting the request, thus avoiding missed
633 * response race */
634 block(current);
635 commit_fsif_request(import, back_req_id);
636 schedule();
638 /* Read the response */
639 ret = (int)fsr->shadow_rsp.ret_val;
640 DEBUG("The following returned: %d\n", ret);
641 add_id_to_freelist(priv_req_id, import->freelist);
643 return ret;
644 }
646 int64_t fs_space(struct fs_import *import, char *location)
647 {
648 struct fs_request *fsr;
649 unsigned short priv_req_id;
650 RING_IDX back_req_id;
651 struct fsif_request *req;
652 int64_t ret;
654 /* Prepare request for the backend */
655 back_req_id = reserve_fsif_request(import);
656 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
658 /* Prepare our private request structure */
659 priv_req_id = get_id_from_freelist(import->freelist);
660 DEBUG("Request id for fs_space is: %d\n", priv_req_id);
661 fsr = &import->requests[priv_req_id];
662 fsr->thread = current;
663 sprintf(fsr->page, "%s", location);
665 req = RING_GET_REQUEST(&import->ring, back_req_id);
666 req->type = REQ_FS_SPACE;
667 req->id = priv_req_id;
668 req->u.fspace.gref = fsr->gref;
670 /* Set blocked flag before commiting the request, thus avoiding missed
671 * response race */
672 block(current);
673 commit_fsif_request(import, back_req_id);
674 schedule();
676 /* Read the response */
677 ret = (int64_t)fsr->shadow_rsp.ret_val;
678 DEBUG("The following returned: %lld\n", ret);
679 add_id_to_freelist(priv_req_id, import->freelist);
681 return ret;
682 }
684 int fs_sync(struct fs_import *import, int fd)
685 {
686 struct fs_request *fsr;
687 unsigned short priv_req_id;
688 RING_IDX back_req_id;
689 struct fsif_request *req;
690 int ret;
692 /* Prepare request for the backend */
693 back_req_id = reserve_fsif_request(import);
694 DEBUG("Backend request id=%d, gref=%d\n", back_req_id, fsr->gref);
696 /* Prepare our private request structure */
697 priv_req_id = get_id_from_freelist(import->freelist);
698 DEBUG("Request id for fs_sync call is: %d\n", priv_req_id);
699 fsr = &import->requests[priv_req_id];
700 fsr->thread = current;
702 req = RING_GET_REQUEST(&import->ring, back_req_id);
703 req->type = REQ_FILE_SYNC;
704 req->id = priv_req_id;
705 req->u.fsync.fd = fd;
707 /* Set blocked flag before commiting the request, thus avoiding missed
708 * response race */
709 block(current);
710 commit_fsif_request(import, back_req_id);
711 schedule();
713 /* Read the response */
714 ret = (int)fsr->shadow_rsp.ret_val;
715 DEBUG("Close returned: %d\n", ret);
716 add_id_to_freelist(priv_req_id, import->freelist);
718 return ret;
719 }
722 /******************************************************************************/
723 /* END OF INDIVIDUAL FILE OPERATIONS */
724 /******************************************************************************/
727 static void fsfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
728 {
729 struct fs_import *import = (struct fs_import*)data;
730 static int in_irq = 0;
731 RING_IDX cons, rp;
732 int more;
734 /* Check for non-reentrance */
735 BUG_ON(in_irq);
736 in_irq = 1;
738 DEBUG("Event from import [%d:%d].\n", import->dom_id, import->export_id);
739 moretodo:
740 rp = import->ring.sring->req_prod;
741 rmb(); /* Ensure we see queued responses up to 'rp'. */
742 cons = import->ring.rsp_cons;
743 while (cons != rp)
744 {
745 struct fsif_response *rsp;
746 struct fs_request *req;
748 rsp = RING_GET_RESPONSE(&import->ring, cons);
749 DEBUG("Response at idx=%d to request id=%d, ret_val=%lx\n",
750 import->ring.rsp_cons, rsp->id, rsp->ret_val);
751 req = &import->requests[rsp->id];
752 memcpy(&req->shadow_rsp, rsp, sizeof(struct fsif_response));
753 DEBUG("Waking up: %s\n", req->thread->name);
754 wake(req->thread);
756 cons++;
757 up(&import->reqs_sem);
758 }
760 import->ring.rsp_cons = rp;
761 RING_FINAL_CHECK_FOR_RESPONSES(&import->ring, more);
762 if(more) goto moretodo;
764 in_irq = 0;
765 }
767 /* Small utility function to figure out our domain id */
768 static domid_t get_self_id(void)
769 {
770 char *dom_id;
771 domid_t ret;
773 BUG_ON(xenbus_read(XBT_NIL, "domid", &dom_id));
774 sscanf(dom_id, "%d", &ret);
776 return ret;
777 }
779 static void alloc_request_table(struct fs_import *import)
780 {
781 struct fs_request *requests;
782 int i;
784 BUG_ON(import->nr_entries <= 0);
785 printk("Allocating request array for import %d, nr_entries = %d.\n",
786 import->import_id, import->nr_entries);
787 requests = xmalloc_array(struct fs_request, import->nr_entries);
788 import->freelist = xmalloc_array(unsigned short, import->nr_entries);
789 memset(import->freelist, 0, sizeof(unsigned short) * import->nr_entries);
790 for(i=0; i<import->nr_entries; i++)
791 {
792 /* TODO: that's a lot of memory */
793 requests[i].page = (void *)alloc_page();
794 requests[i].gref = gnttab_grant_access(import->dom_id,
795 virt_to_mfn(requests[i].page),
796 0);
797 //printk(" ===>> Page=%lx, gref=%d, mfn=%lx\n", requests[i].page, requests[i].gref, virt_to_mfn(requests[i].page));
798 add_id_to_freelist(i, import->freelist);
799 }
800 import->requests = requests;
801 }
804 /******************************************************************************/
805 /* FS TESTS */
806 /******************************************************************************/
809 void test_fs_import(void *data)
810 {
811 struct fs_import *import = (struct fs_import *)data;
812 int ret, fd, i;
813 int32_t nr_files;
814 char buffer[1024];
815 ssize_t offset;
816 char **files;
817 long ret64;
819 /* Sleep for 1s and then try to open a file */
820 msleep(1000);
821 ret = fs_create(import, "mini-os-created-directory", 1, 0777);
822 printk("Directory create: %d\n", ret);
824 ret = fs_create(import, "mini-os-created-directory/mini-os-created-file", 0, 0666);
825 printk("File create: %d\n", ret);
827 fd = fs_open(import, "mini-os-created-directory/mini-os-created-file");
828 printk("File descriptor: %d\n", fd);
829 if(fd < 0) return;
831 offset = 0;
832 for(i=0; i<10; i++)
833 {
834 sprintf(buffer, "Current time is: %lld\n", NOW());
835 ret = fs_write(import, fd, buffer, strlen(buffer), offset);
836 printk("Writen current time (%d)\n", ret);
837 if(ret < 0)
838 return;
839 offset += ret;
840 }
842 ret = fs_close(import, fd);
843 printk("Closed fd: %d, ret=%d\n", fd, ret);
845 printk("Listing files in /\n");
846 files = fs_list(import, "/", 0, &nr_files, NULL);
847 for(i=0; i<nr_files; i++)
848 printk(" files[%d] = %s\n", i, files[i]);
850 ret64 = fs_space(import, "/");
851 printk("Free space: %lld (=%lld Mb)\n", ret64, (ret64 >> 20));
853 }
855 #if 0
856 // char *content = (char *)alloc_page();
857 int fd, ret;
858 // int read;
859 char write_string[] = "\"test data written from minios\"";
860 struct fsif_stat_response stat;
861 char **files;
862 int32_t nr_files, i;
863 int64_t ret64;
866 fd = fs_open(import, "test-export-file");
867 // read = fs_read(import, fd, content, PAGE_SIZE, 0);
868 // printk("Read: %d bytes\n", read);
869 // content[read] = '\0';
870 // printk("Value: %s\n", content);
871 ret = fs_write(import, fd, write_string, strlen(write_string), 0);
872 printk("Ret after write: %d\n", ret);
873 ret = fs_stat(import, fd, &stat);
874 printk("Ret after stat: %d\n", ret);
875 printk(" st_mode=%o\n", stat.stat_mode);
876 printk(" st_uid =%d\n", stat.stat_uid);
877 printk(" st_gid =%d\n", stat.stat_gid);
878 printk(" st_size=%ld\n", stat.stat_size);
879 printk(" st_atime=%ld\n", stat.stat_atime);
880 printk(" st_mtime=%ld\n", stat.stat_mtime);
881 printk(" st_ctime=%ld\n", stat.stat_ctime);
882 ret = fs_truncate(import, fd, 30);
883 printk("Ret after truncate: %d\n", ret);
884 ret = fs_remove(import, "test-to-remove/test-file");
885 printk("Ret after remove: %d\n", ret);
886 ret = fs_remove(import, "test-to-remove");
887 printk("Ret after remove: %d\n", ret);
888 ret = fs_chmod(import, fd, 0700);
889 printk("Ret after chmod: %d\n", ret);
890 ret = fs_sync(import, fd);
891 printk("Ret after sync: %d\n", ret);
892 ret = fs_close(import, fd);
893 //ret = fs_rename(import, "test-export-file", "renamed-test-export-file");
894 //printk("Ret after rename: %d\n", ret);
895 ret = fs_create(import, "created-dir", 1, 0777);
896 printk("Ret after dir create: %d\n", ret);
897 ret = fs_create(import, "created-dir/created-file", 0, 0777);
898 printk("Ret after file create: %d\n", ret);
899 files = fs_list(import, "/", 15, &nr_files, NULL);
900 for(i=0; i<nr_files; i++)
901 printk(" files[%d] = %s\n", i, files[i]);
902 ret64 = fs_space(import, "created-dir");
903 printk("Ret after space: %lld\n", ret64);
905 #endif
908 /******************************************************************************/
909 /* END OF FS TESTS */
910 /******************************************************************************/
912 static int init_fs_import(struct fs_import *import)
913 {
914 char *err;
915 xenbus_transaction_t xbt;
916 char nodename[1024], r_nodename[1024], token[128], *message = NULL;
917 struct fsif_sring *sring;
918 int retry = 0;
919 domid_t self_id;
921 printk("Initialising FS fortend to backend dom %d\n", import->dom_id);
922 /* Allocate page for the shared ring */
923 sring = (struct fsif_sring*) alloc_page();
924 memset(sring, 0, PAGE_SIZE);
926 /* Init the shared ring */
927 SHARED_RING_INIT(sring);
929 /* Init private frontend ring */
930 FRONT_RING_INIT(&import->ring, sring, PAGE_SIZE);
931 import->nr_entries = import->ring.nr_ents;
933 /* Allocate table of requests */
934 alloc_request_table(import);
935 init_SEMAPHORE(&import->reqs_sem, import->nr_entries);
937 /* Grant access to the shared ring */
938 import->gnt_ref = gnttab_grant_access(import->dom_id, virt_to_mfn(sring), 0);
940 /* Allocate event channel */
941 BUG_ON(evtchn_alloc_unbound(import->dom_id,
942 fsfront_handler,
943 //ANY_CPU,
944 import,
945 &import->local_port));
948 self_id = get_self_id();
949 /* Write the frontend info to a node in our Xenbus */
950 sprintf(nodename, "/local/domain/%d/device/vfs/%d",
951 self_id, import->import_id);
953 again:
954 err = xenbus_transaction_start(&xbt);
955 if (err) {
956 printk("starting transaction\n");
957 }
959 err = xenbus_printf(xbt,
960 nodename,
961 "ring-ref",
962 "%u",
963 import->gnt_ref);
964 if (err) {
965 message = "writing ring-ref";
966 goto abort_transaction;
967 }
969 err = xenbus_printf(xbt,
970 nodename,
971 "event-channel",
972 "%u",
973 import->local_port);
974 if (err) {
975 message = "writing event-channel";
976 goto abort_transaction;
977 }
979 err = xenbus_printf(xbt, nodename, "state", STATE_READY, 0xdeadbeef);
982 err = xenbus_transaction_end(xbt, 0, &retry);
983 if (retry) {
984 goto again;
985 printk("completing transaction\n");
986 }
988 /* Now, when our node is prepared we write request in the exporting domain
989 * */
990 printk("Our own id is %d\n", self_id);
991 sprintf(r_nodename,
992 "/local/domain/%d/backend/vfs/exports/requests/%d/%d/frontend",
993 import->dom_id, self_id, import->export_id);
994 BUG_ON(xenbus_write(XBT_NIL, r_nodename, nodename));
996 goto done;
998 abort_transaction:
999 xenbus_transaction_end(xbt, 1, &retry);
1001 done:
1003 #define WAIT_PERIOD 10 /* Wait period in ms */
1004 #define MAX_WAIT 10 /* Max number of WAIT_PERIODs */
1005 import->backend = NULL;
1006 sprintf(r_nodename, "%s/backend", nodename);
1008 for(retry = MAX_WAIT; retry > 0; retry--)
1010 xenbus_read(XBT_NIL, r_nodename, &import->backend);
1011 if(import->backend)
1013 printk("Backend found at %s\n", import->backend);
1014 break;
1016 msleep(WAIT_PERIOD);
1019 if(!import->backend)
1021 printk("No backend available.\n");
1022 /* TODO - cleanup datastructures/xenbus */
1023 return 0;
1025 sprintf(r_nodename, "%s/state", import->backend);
1026 sprintf(token, "fs-front-%d", import->import_id);
1027 /* The token will not be unique if multiple imports are inited */
1028 xenbus_watch_path(XBT_NIL, r_nodename/*, token*/);
1029 xenbus_wait_for_value(/*token,*/ r_nodename, STATE_READY);
1030 printk("Backend ready.\n");
1032 //create_thread("fs-tester", test_fs_import, import);
1034 return 1;
1037 static void add_export(struct list_head *exports, unsigned int domid)
1039 char node[1024], **exports_list = NULL, *ret_msg;
1040 int j = 0;
1041 static int import_id = 0;
1043 sprintf(node, "/local/domain/%d/backend/vfs/exports", domid);
1044 ret_msg = xenbus_ls(XBT_NIL, node, &exports_list);
1045 if (ret_msg && strcmp(ret_msg, "ENOENT"))
1046 printk("couldn't read %s: %s\n", node, ret_msg);
1047 while(exports_list && exports_list[j])
1049 struct fs_import *import;
1050 int export_id = -1;
1052 sscanf(exports_list[j], "%d", &export_id);
1053 if(export_id >= 0)
1055 import = xmalloc(struct fs_import);
1056 import->dom_id = domid;
1057 import->export_id = export_id;
1058 import->import_id = import_id++;
1059 INIT_LIST_HEAD(&import->list);
1060 list_add(&import->list, exports);
1062 free(exports_list[j]);
1063 j++;
1065 if(exports_list)
1066 free(exports_list);
1067 if(ret_msg)
1068 free(ret_msg);
1071 #if 0
1072 static struct list_head* probe_exports(void)
1074 struct list_head *exports;
1075 char **node_list = NULL, *msg = NULL;
1076 int i = 0;
1078 exports = xmalloc(struct list_head);
1079 INIT_LIST_HEAD(exports);
1081 msg = xenbus_ls(XBT_NIL, "/local/domain", &node_list);
1082 if(msg)
1084 printk("Could not list VFS exports (%s).\n", msg);
1085 goto exit;
1088 while(node_list[i])
1090 add_export(exports, atoi(node_list[i]));
1091 free(node_list[i]);
1092 i++;
1095 exit:
1096 if(msg)
1097 free(msg);
1098 if(node_list)
1099 free(node_list);
1100 return exports;
1102 #endif
1104 LIST_HEAD(exports);
1106 void init_fs_frontend(void)
1108 struct list_head *entry;
1109 struct fs_import *import = NULL;
1110 printk("Initing FS fronend(s).\n");
1112 //exports = probe_exports();
1113 add_export(&exports, 0);
1114 list_for_each(entry, &exports)
1116 import = list_entry(entry, struct fs_import, list);
1117 printk("FS export [dom=%d, id=%d] found\n",
1118 import->dom_id, import->export_id);
1119 init_fs_import(import);
1122 fs_import = import;
1124 if (!fs_import)
1125 printk("No FS import\n");