ia64/xen-unstable

view extras/mini-os/fs-front.c @ 19557:226ef307cd2e

AMD IOMMU: Fix ioapic interrupt remapping

A few ioapic redirection entries are initialized by hypervisor before
enabling iommu hardware. This patch copies those entries from ioapic
redirection table into interrupt remapping table after interrupt
remapping table has been allocated.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Apr 17 13:16:39 2009 +0100 (2009-04-17)
parents 86db039882ea
children aaab04808ee7
line source
1 /******************************************************************************
2 * fs-front.c
3 *
4 * Frontend driver for FS split device driver.
5 *
6 * Copyright (c) 2007, Grzegorz Milos, <gm281@cam.ac.uk>.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
27 #undef NDEBUG
28 #include <os.h>
29 #include <list.h>
30 #include <xmalloc.h>
31 #include <xenbus.h>
32 #include <gnttab.h>
33 #include <events.h>
34 #include <xen/io/fsif.h>
35 #include <fs.h>
36 #include <sched.h>
38 #define preempt_disable()
39 #define preempt_enable()
40 #define cmpxchg(p,o,n) synch_cmpxchg(p,o,n)
43 #ifdef FS_DEBUG
44 #define DEBUG(_f, _a...) \
45 printk("MINI_OS(file=fs-front.c, line=%d) " _f "\n", __LINE__, ## _a)
46 #else
47 #define DEBUG(_f, _a...) ((void)0)
48 #endif
51 struct fs_request;
52 struct fs_import *fs_import;
53 void *alloc_buffer_page(struct fs_request *req, domid_t domid, grant_ref_t *gref);
54 void free_buffer_page(struct fs_request *req);
56 /******************************************************************************/
57 /* RING REQUEST/RESPONSES HANDLING */
58 /******************************************************************************/
60 struct fs_request
61 {
62 void *private1; /* Specific to request type */
63 void *private2;
64 struct thread *thread; /* Thread blocked on this request */
65 struct fsif_response shadow_rsp; /* Response copy writen by the
66 interrupt handler */
67 };
69 struct fs_rw_gnts
70 {
71 /* TODO 16 bit? */
72 int count;
73 grant_ref_t grefs[FSIF_NR_READ_GNTS];
74 void *pages[FSIF_NR_READ_GNTS];
75 };
77 /* Ring operations:
78 * FSIF ring is used differently to Linux-like split devices. This stems from
79 * the fact that no I/O request queue is present. The use of some of the macros
80 * defined in ring.h is not allowed, in particular:
81 * RING_PUSH_REQUESTS_AND_CHECK_NOTIFY cannot be used.
82 *
83 * The protocol used for FSIF ring is described below:
84 *
85 * In order to reserve a request the frontend:
86 * a) saves current frontend_ring->req_prod_pvt into a local variable
87 * b) checks that there are free request using the local req_prod_pvt
88 * c) tries to reserve the request using cmpxchg on frontend_ring->req_prod_pvt
89 * if cmpxchg fails, it means that someone reserved the request, start from
90 * a)
91 *
92 * In order to commit a request to the shared ring:
93 * a) cmpxchg shared_ring->req_prod from local req_prod_pvt to req_prod_pvt+1
94 * Loop if unsuccessful.
95 * NOTE: Request should be commited to the shared ring as quickly as possible,
96 * because otherwise other threads might busy loop trying to commit next
97 * requests. It also follows that preemption should be disabled, if
98 * possible, for the duration of the request construction.
99 */
101 /* Number of free requests (for use on front side only). */
102 #define FS_RING_FREE_REQUESTS(_r, _req_prod_pvt) \
103 (RING_SIZE(_r) - (_req_prod_pvt - (_r)->rsp_cons))
107 static RING_IDX reserve_fsif_request(struct fs_import *import)
108 {
109 RING_IDX idx;
111 down(&import->reqs_sem);
112 preempt_disable();
113 again:
114 /* We will attempt to reserve slot idx */
115 idx = import->ring.req_prod_pvt;
116 ASSERT (FS_RING_FREE_REQUESTS(&import->ring, idx));
117 /* Attempt to reserve */
118 if(cmpxchg(&import->ring.req_prod_pvt, idx, idx+1) != idx)
119 goto again;
121 return idx;
122 }
124 static void commit_fsif_request(struct fs_import *import, RING_IDX idx)
125 {
126 while(cmpxchg(&import->ring.sring->req_prod, idx, idx+1) != idx)
127 {
128 printk("Failed to commit a request: req_prod=%d, idx=%d\n",
129 import->ring.sring->req_prod, idx);
130 }
131 preempt_enable();
133 /* NOTE: we cannot do anything clever about rsp_event, to hold off
134 * notifications, because we don't know if we are a single request (in which
135 * case we have to notify always), or a part of a larger request group
136 * (when, in some cases, notification isn't required) */
137 notify_remote_via_evtchn(import->local_port);
138 }
142 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
143 {
144 unsigned int old_id, new_id;
146 again:
147 old_id = freelist[0];
148 /* Note: temporal inconsistency, since freelist[0] can be changed by someone
149 * else, but we are a sole owner of freelist[id + 1], it's OK. */
150 freelist[id + 1] = old_id;
151 new_id = id;
152 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
153 {
154 printk("Cmpxchg on freelist add failed.\n");
155 goto again;
156 }
157 }
159 /* always call reserve_fsif_request(import) before this, to protect from
160 * depletion. */
161 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
162 {
163 unsigned int old_id, new_id;
165 again:
166 old_id = freelist[0];
167 new_id = freelist[old_id + 1];
168 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
169 {
170 printk("Cmpxchg on freelist remove failed.\n");
171 goto again;
172 }
174 return old_id;
175 }
177 /******************************************************************************/
178 /* END OF RING REQUEST/RESPONSES HANDLING */
179 /******************************************************************************/
183 /******************************************************************************/
184 /* INDIVIDUAL FILE OPERATIONS */
185 /******************************************************************************/
186 int fs_open(struct fs_import *import, char *file)
187 {
188 struct fs_request *fsr;
189 unsigned short priv_req_id;
190 grant_ref_t gref;
191 void *buffer;
192 RING_IDX back_req_id;
193 struct fsif_request *req;
194 int fd;
196 /* Prepare request for the backend */
197 back_req_id = reserve_fsif_request(import);
198 DEBUG("Backend request id=%d\n", back_req_id);
200 /* Prepare our private request structure */
201 priv_req_id = get_id_from_freelist(import->freelist);
202 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
203 fsr = &import->requests[priv_req_id];
204 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
205 DEBUG("gref id=%d\n", gref);
206 fsr->thread = current;
207 sprintf(buffer, "%s", file);
209 req = RING_GET_REQUEST(&import->ring, back_req_id);
210 req->type = REQ_FILE_OPEN;
211 req->id = priv_req_id;
212 req->u.fopen.gref = gref;
214 /* Set blocked flag before commiting the request, thus avoiding missed
215 * response race */
216 block(current);
217 commit_fsif_request(import, back_req_id);
218 schedule();
220 /* Read the response */
221 fd = (int)fsr->shadow_rsp.ret_val;
222 DEBUG("The following FD returned: %d\n", fd);
223 free_buffer_page(fsr);
224 add_id_to_freelist(priv_req_id, import->freelist);
226 return fd;
227 }
229 int fs_close(struct fs_import *import, int fd)
230 {
231 struct fs_request *fsr;
232 unsigned short priv_req_id;
233 RING_IDX back_req_id;
234 struct fsif_request *req;
235 int ret;
237 /* Prepare request for the backend */
238 back_req_id = reserve_fsif_request(import);
239 DEBUG("Backend request id=%d\n", back_req_id);
241 /* Prepare our private request structure */
242 priv_req_id = get_id_from_freelist(import->freelist);
243 DEBUG("Request id for fs_close call is: %d\n", priv_req_id);
244 fsr = &import->requests[priv_req_id];
245 fsr->thread = current;
247 req = RING_GET_REQUEST(&import->ring, back_req_id);
248 req->type = REQ_FILE_CLOSE;
249 req->id = priv_req_id;
250 req->u.fclose.fd = fd;
252 /* Set blocked flag before commiting the request, thus avoiding missed
253 * response race */
254 block(current);
255 commit_fsif_request(import, back_req_id);
256 schedule();
258 /* Read the response */
259 ret = (int)fsr->shadow_rsp.ret_val;
260 DEBUG("Close returned: %d\n", ret);
261 add_id_to_freelist(priv_req_id, import->freelist);
263 return ret;
264 }
266 ssize_t fs_read(struct fs_import *import, int fd, void *buf,
267 ssize_t len, ssize_t offset)
268 {
269 struct fs_request *fsr;
270 unsigned short priv_req_id;
271 struct fs_rw_gnts gnts;
272 RING_IDX back_req_id;
273 struct fsif_request *req;
274 ssize_t ret;
275 int i;
277 BUG_ON(len > PAGE_SIZE * FSIF_NR_READ_GNTS);
279 /* Prepare request for the backend */
280 back_req_id = reserve_fsif_request(import);
281 DEBUG("Backend request id=%d\n", back_req_id);
283 /* Prepare our private request structure */
284 priv_req_id = get_id_from_freelist(import->freelist);
285 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
286 fsr = &import->requests[priv_req_id];
288 req = RING_GET_REQUEST(&import->ring, back_req_id);
289 req->type = REQ_FILE_READ;
290 req->id = priv_req_id;
291 req->u.fread.fd = fd;
292 req->u.fread.len = len;
293 req->u.fread.offset = offset;
296 ASSERT(len > 0);
297 gnts.count = ((len - 1) / PAGE_SIZE) + 1;
298 for(i=0; i<gnts.count; i++)
299 {
300 gnts.pages[i] = (void *)alloc_page();
301 gnts.grefs[i] = gnttab_grant_access(import->dom_id,
302 virt_to_mfn(gnts.pages[i]),
303 0);
304 memset(gnts.pages[i], 0, PAGE_SIZE);
305 req->u.fread.grefs[i] = gnts.grefs[i];
306 }
307 fsr->thread = current;
309 /* Set blocked flag before commiting the request, thus avoiding missed
310 * response race */
311 block(current);
312 commit_fsif_request(import, back_req_id);
313 schedule();
315 /* Read the response */
316 ret = (ssize_t)fsr->shadow_rsp.ret_val;
317 DEBUG("The following ret value returned %d\n", ret);
318 if(ret > 0)
319 {
320 ssize_t to_copy = ret, current_copy;
321 for(i=0; i<gnts.count; i++)
322 {
323 gnttab_end_access(gnts.grefs[i]);
324 current_copy = to_copy > PAGE_SIZE ? PAGE_SIZE : to_copy;
325 if(current_copy > 0)
326 memcpy(buf, gnts.pages[i], current_copy);
327 to_copy -= current_copy;
328 buf = (char*) buf + current_copy;
329 free_page(gnts.pages[i]);
330 }
331 }
332 add_id_to_freelist(priv_req_id, import->freelist);
334 return ret;
335 }
337 ssize_t fs_write(struct fs_import *import, int fd, void *buf,
338 ssize_t len, ssize_t offset)
339 {
340 struct fs_request *fsr;
341 unsigned short priv_req_id;
342 struct fs_rw_gnts gnts;
343 RING_IDX back_req_id;
344 struct fsif_request *req;
345 ssize_t ret, to_copy;
346 int i;
348 BUG_ON(len > PAGE_SIZE * FSIF_NR_WRITE_GNTS);
350 /* Prepare request for the backend */
351 back_req_id = reserve_fsif_request(import);
352 DEBUG("Backend request id=%d\n", back_req_id);
354 /* Prepare our private request structure */
355 priv_req_id = get_id_from_freelist(import->freelist);
356 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
357 fsr = &import->requests[priv_req_id];
359 req = RING_GET_REQUEST(&import->ring, back_req_id);
360 req->type = REQ_FILE_WRITE;
361 req->id = priv_req_id;
362 req->u.fwrite.fd = fd;
363 req->u.fwrite.len = len;
364 req->u.fwrite.offset = offset;
366 ASSERT(len > 0);
367 gnts.count = ((len - 1) / PAGE_SIZE) + 1;
368 to_copy = len;
369 for(i=0; i<gnts.count; i++)
370 {
371 int current_copy = (to_copy > PAGE_SIZE ? PAGE_SIZE : to_copy);
372 gnts.pages[i] = (void *)alloc_page();
373 gnts.grefs[i] = gnttab_grant_access(import->dom_id,
374 virt_to_mfn(gnts.pages[i]),
375 0);
376 memcpy(gnts.pages[i], buf, current_copy);
377 if(current_copy < PAGE_SIZE)
378 memset((char *)gnts.pages[i] + current_copy,
379 0,
380 PAGE_SIZE - current_copy);
381 req->u.fwrite.grefs[i] = gnts.grefs[i];
382 to_copy -= current_copy;
383 buf = (char*) buf + current_copy;
384 }
385 fsr->thread = current;
387 /* Set blocked flag before commiting the request, thus avoiding missed
388 * response race */
389 block(current);
390 commit_fsif_request(import, back_req_id);
391 schedule();
393 /* Read the response */
394 ret = (ssize_t)fsr->shadow_rsp.ret_val;
395 DEBUG("The following ret value returned %d\n", ret);
396 for(i=0; i<gnts.count; i++)
397 {
398 gnttab_end_access(gnts.grefs[i]);
399 free_page(gnts.pages[i]);
400 }
401 add_id_to_freelist(priv_req_id, import->freelist);
403 return ret;
404 }
406 int fs_stat(struct fs_import *import,
407 int fd,
408 struct fsif_stat_response *stat)
409 {
410 struct fs_request *fsr;
411 unsigned short priv_req_id;
412 RING_IDX back_req_id;
413 struct fsif_request *req;
414 int ret;
416 /* Prepare request for the backend */
417 back_req_id = reserve_fsif_request(import);
418 DEBUG("Backend request id=%d\n", back_req_id);
420 /* Prepare our private request structure */
421 priv_req_id = get_id_from_freelist(import->freelist);
422 DEBUG("Request id for fs_stat call is: %d\n", priv_req_id);
423 fsr = &import->requests[priv_req_id];
424 fsr->thread = current;
426 req = RING_GET_REQUEST(&import->ring, back_req_id);
427 req->type = REQ_STAT;
428 req->id = priv_req_id;
429 req->u.fstat.fd = fd;
431 /* Set blocked flag before commiting the request, thus avoiding missed
432 * response race */
433 block(current);
434 commit_fsif_request(import, back_req_id);
435 schedule();
437 /* Read the response */
438 ret = (int)fsr->shadow_rsp.ret_val;
439 DEBUG("Following ret from fstat: %d\n", ret);
440 memcpy(stat,
441 &fsr->shadow_rsp.fstat,
442 sizeof(struct fsif_stat_response));
443 add_id_to_freelist(priv_req_id, import->freelist);
445 return ret;
446 }
448 int fs_truncate(struct fs_import *import,
449 int fd,
450 int64_t length)
451 {
452 struct fs_request *fsr;
453 unsigned short priv_req_id;
454 RING_IDX back_req_id;
455 struct fsif_request *req;
456 int ret;
458 /* Prepare request for the backend */
459 back_req_id = reserve_fsif_request(import);
460 DEBUG("Backend request id=%d\n", back_req_id);
462 /* Prepare our private request structure */
463 priv_req_id = get_id_from_freelist(import->freelist);
464 DEBUG("Request id for fs_truncate call is: %d\n", priv_req_id);
465 fsr = &import->requests[priv_req_id];
466 fsr->thread = current;
468 req = RING_GET_REQUEST(&import->ring, back_req_id);
469 req->type = REQ_FILE_TRUNCATE;
470 req->id = priv_req_id;
471 req->u.ftruncate.fd = fd;
472 req->u.ftruncate.length = length;
474 /* Set blocked flag before commiting the request, thus avoiding missed
475 * response race */
476 block(current);
477 commit_fsif_request(import, back_req_id);
478 schedule();
480 /* Read the response */
481 ret = (int)fsr->shadow_rsp.ret_val;
482 DEBUG("Following ret from ftruncate: %d\n", ret);
483 add_id_to_freelist(priv_req_id, import->freelist);
485 return ret;
486 }
488 int fs_remove(struct fs_import *import, char *file)
489 {
490 struct fs_request *fsr;
491 unsigned short priv_req_id;
492 grant_ref_t gref;
493 void *buffer;
494 RING_IDX back_req_id;
495 struct fsif_request *req;
496 int ret;
498 /* Prepare request for the backend */
499 back_req_id = reserve_fsif_request(import);
500 DEBUG("Backend request id=%d\n", back_req_id);
502 /* Prepare our private request structure */
503 priv_req_id = get_id_from_freelist(import->freelist);
504 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
505 fsr = &import->requests[priv_req_id];
506 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
507 DEBUG("gref=%d\n", gref);
508 fsr->thread = current;
509 sprintf(buffer, "%s", file);
511 req = RING_GET_REQUEST(&import->ring, back_req_id);
512 req->type = REQ_REMOVE;
513 req->id = priv_req_id;
514 req->u.fremove.gref = gref;
516 /* Set blocked flag before commiting the request, thus avoiding missed
517 * response race */
518 block(current);
519 commit_fsif_request(import, back_req_id);
520 schedule();
522 /* Read the response */
523 ret = (int)fsr->shadow_rsp.ret_val;
524 DEBUG("The following ret: %d\n", ret);
525 free_buffer_page(fsr);
526 add_id_to_freelist(priv_req_id, import->freelist);
528 return ret;
529 }
532 int fs_rename(struct fs_import *import,
533 char *old_file_name,
534 char *new_file_name)
535 {
536 struct fs_request *fsr;
537 unsigned short priv_req_id;
538 grant_ref_t gref;
539 void *buffer;
540 RING_IDX back_req_id;
541 struct fsif_request *req;
542 int ret;
543 char old_header[] = "old: ";
544 char new_header[] = "new: ";
546 /* Prepare request for the backend */
547 back_req_id = reserve_fsif_request(import);
548 DEBUG("Backend request id=%d\n", back_req_id);
550 /* Prepare our private request structure */
551 priv_req_id = get_id_from_freelist(import->freelist);
552 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
553 fsr = &import->requests[priv_req_id];
554 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
555 DEBUG("gref=%d\n", gref);
556 fsr->thread = current;
557 sprintf(buffer, "%s%s%c%s%s",
558 old_header, old_file_name, '\0', new_header, new_file_name);
560 req = RING_GET_REQUEST(&import->ring, back_req_id);
561 req->type = REQ_RENAME;
562 req->id = priv_req_id;
563 req->u.frename.gref = gref;
564 req->u.frename.old_name_offset = strlen(old_header);
565 req->u.frename.new_name_offset = strlen(old_header) +
566 strlen(old_file_name) +
567 strlen(new_header) +
568 1 /* Accouning for the additional
569 end of string character */;
571 /* Set blocked flag before commiting the request, thus avoiding missed
572 * response race */
573 block(current);
574 commit_fsif_request(import, back_req_id);
575 schedule();
577 /* Read the response */
578 ret = (int)fsr->shadow_rsp.ret_val;
579 DEBUG("The following ret: %d\n", ret);
580 free_buffer_page(fsr);
581 add_id_to_freelist(priv_req_id, import->freelist);
583 return ret;
584 }
586 int fs_create(struct fs_import *import, char *name,
587 int8_t directory, int32_t mode)
588 {
589 struct fs_request *fsr;
590 unsigned short priv_req_id;
591 grant_ref_t gref;
592 void *buffer;
593 RING_IDX back_req_id;
594 struct fsif_request *req;
595 int ret;
597 /* Prepare request for the backend */
598 back_req_id = reserve_fsif_request(import);
599 DEBUG("Backend request id=%d\n", back_req_id);
601 /* Prepare our private request structure */
602 priv_req_id = get_id_from_freelist(import->freelist);
603 DEBUG("Request id for fs_create call is: %d\n", priv_req_id);
604 fsr = &import->requests[priv_req_id];
605 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
606 DEBUG("gref=%d\n", gref);
607 fsr->thread = current;
608 sprintf(buffer, "%s", name);
610 req = RING_GET_REQUEST(&import->ring, back_req_id);
611 req->type = REQ_CREATE;
612 req->id = priv_req_id;
613 req->u.fcreate.gref = gref;
614 req->u.fcreate.directory = directory;
615 req->u.fcreate.mode = mode;
617 /* Set blocked flag before commiting the request, thus avoiding missed
618 * response race */
619 block(current);
620 commit_fsif_request(import, back_req_id);
621 schedule();
623 /* Read the response */
624 ret = (int)fsr->shadow_rsp.ret_val;
625 DEBUG("The following ret: %d\n", ret);
626 free_buffer_page(fsr);
627 add_id_to_freelist(priv_req_id, import->freelist);
629 return ret;
630 }
632 char** fs_list(struct fs_import *import, char *name,
633 int32_t offset, int32_t *nr_files, int *has_more)
634 {
635 struct fs_request *fsr;
636 unsigned short priv_req_id;
637 grant_ref_t gref;
638 void *buffer;
639 RING_IDX back_req_id;
640 struct fsif_request *req;
641 char **files, *current_file;
642 int i;
644 DEBUG("Different masks: NR_FILES=(%llx, %d), ERROR=(%llx, %d), HAS_MORE(%llx, %d)\n",
645 NR_FILES_MASK, NR_FILES_SHIFT, ERROR_MASK, ERROR_SHIFT, HAS_MORE_FLAG, HAS_MORE_SHIFT);
647 /* Prepare request for the backend */
648 back_req_id = reserve_fsif_request(import);
649 DEBUG("Backend request id=%d\n", back_req_id);
651 /* Prepare our private request structure */
652 priv_req_id = get_id_from_freelist(import->freelist);
653 DEBUG("Request id for fs_list call is: %d\n", priv_req_id);
654 fsr = &import->requests[priv_req_id];
655 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
656 DEBUG("gref=%d\n", gref);
657 fsr->thread = current;
658 sprintf(buffer, "%s", name);
660 req = RING_GET_REQUEST(&import->ring, back_req_id);
661 req->type = REQ_DIR_LIST;
662 req->id = priv_req_id;
663 req->u.flist.gref = gref;
664 req->u.flist.offset = offset;
666 /* Set blocked flag before commiting the request, thus avoiding missed
667 * response race */
668 block(current);
669 commit_fsif_request(import, back_req_id);
670 schedule();
672 /* Read the response */
673 *nr_files = (fsr->shadow_rsp.ret_val & NR_FILES_MASK) >> NR_FILES_SHIFT;
674 files = NULL;
675 if(*nr_files <= 0) goto exit;
676 files = malloc(sizeof(char*) * (*nr_files));
677 current_file = buffer;
678 for(i=0; i<*nr_files; i++)
679 {
680 files[i] = strdup(current_file);
681 current_file += strlen(current_file) + 1;
682 }
683 if(has_more != NULL)
684 *has_more = fsr->shadow_rsp.ret_val & HAS_MORE_FLAG;
685 free_buffer_page(fsr);
686 add_id_to_freelist(priv_req_id, import->freelist);
687 exit:
688 return files;
689 }
691 int fs_chmod(struct fs_import *import, int fd, int32_t mode)
692 {
693 struct fs_request *fsr;
694 unsigned short priv_req_id;
695 RING_IDX back_req_id;
696 struct fsif_request *req;
697 int ret;
699 /* Prepare request for the backend */
700 back_req_id = reserve_fsif_request(import);
701 DEBUG("Backend request id=%d\n", back_req_id);
703 /* Prepare our private request structure */
704 priv_req_id = get_id_from_freelist(import->freelist);
705 DEBUG("Request id for fs_chmod call is: %d\n", priv_req_id);
706 fsr = &import->requests[priv_req_id];
707 fsr->thread = current;
709 req = RING_GET_REQUEST(&import->ring, back_req_id);
710 req->type = REQ_CHMOD;
711 req->id = priv_req_id;
712 req->u.fchmod.fd = fd;
713 req->u.fchmod.mode = mode;
715 /* Set blocked flag before commiting the request, thus avoiding missed
716 * response race */
717 block(current);
718 commit_fsif_request(import, back_req_id);
719 schedule();
721 /* Read the response */
722 ret = (int)fsr->shadow_rsp.ret_val;
723 DEBUG("The following returned: %d\n", ret);
724 add_id_to_freelist(priv_req_id, import->freelist);
726 return ret;
727 }
729 int64_t fs_space(struct fs_import *import, char *location)
730 {
731 struct fs_request *fsr;
732 unsigned short priv_req_id;
733 grant_ref_t gref;
734 void *buffer;
735 RING_IDX back_req_id;
736 struct fsif_request *req;
737 int64_t ret;
739 /* Prepare request for the backend */
740 back_req_id = reserve_fsif_request(import);
741 DEBUG("Backend request id=%d\n", back_req_id);
743 /* Prepare our private request structure */
744 priv_req_id = get_id_from_freelist(import->freelist);
745 DEBUG("Request id for fs_space is: %d\n", priv_req_id);
746 fsr = &import->requests[priv_req_id];
747 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
748 DEBUG("gref=%d\n", gref);
749 fsr->thread = current;
750 sprintf(buffer, "%s", location);
752 req = RING_GET_REQUEST(&import->ring, back_req_id);
753 req->type = REQ_FS_SPACE;
754 req->id = priv_req_id;
755 req->u.fspace.gref = gref;
757 /* Set blocked flag before commiting the request, thus avoiding missed
758 * response race */
759 block(current);
760 commit_fsif_request(import, back_req_id);
761 schedule();
763 /* Read the response */
764 ret = (int64_t)fsr->shadow_rsp.ret_val;
765 DEBUG("The following returned: %lld\n", ret);
766 free_buffer_page(fsr);
767 add_id_to_freelist(priv_req_id, import->freelist);
769 return ret;
770 }
772 int fs_sync(struct fs_import *import, int fd)
773 {
774 struct fs_request *fsr;
775 unsigned short priv_req_id;
776 RING_IDX back_req_id;
777 struct fsif_request *req;
778 int ret;
780 /* Prepare request for the backend */
781 back_req_id = reserve_fsif_request(import);
782 DEBUG("Backend request id=%d\n", back_req_id);
784 /* Prepare our private request structure */
785 priv_req_id = get_id_from_freelist(import->freelist);
786 DEBUG("Request id for fs_sync call is: %d\n", priv_req_id);
787 fsr = &import->requests[priv_req_id];
788 fsr->thread = current;
790 req = RING_GET_REQUEST(&import->ring, back_req_id);
791 req->type = REQ_FILE_SYNC;
792 req->id = priv_req_id;
793 req->u.fsync.fd = fd;
795 /* Set blocked flag before commiting the request, thus avoiding missed
796 * response race */
797 block(current);
798 commit_fsif_request(import, back_req_id);
799 schedule();
801 /* Read the response */
802 ret = (int)fsr->shadow_rsp.ret_val;
803 DEBUG("Close returned: %d\n", ret);
804 add_id_to_freelist(priv_req_id, import->freelist);
806 return ret;
807 }
810 /******************************************************************************/
811 /* END OF INDIVIDUAL FILE OPERATIONS */
812 /******************************************************************************/
814 void *alloc_buffer_page(struct fs_request *req, domid_t domid, grant_ref_t *gref)
815 {
816 void *page;
818 page = (void *)alloc_page();
819 *gref = gnttab_grant_access(domid, virt_to_mfn(page), 0);
820 req->private1 = page;
821 req->private2 = (void *)(uintptr_t)(*gref);
823 return page;
824 }
826 void free_buffer_page(struct fs_request *req)
827 {
828 gnttab_end_access((grant_ref_t)(uintptr_t)req->private2);
829 free_page(req->private1);
830 }
832 static void fsfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
833 {
834 struct fs_import *import = (struct fs_import*)data;
835 static int in_irq = 0;
836 RING_IDX cons, rp;
837 int more;
839 /* Check for non-reentrance */
840 BUG_ON(in_irq);
841 in_irq = 1;
843 DEBUG("Event from import [%d:%d].\n", import->dom_id, import->export_id);
844 moretodo:
845 rp = import->ring.sring->rsp_prod;
846 rmb(); /* Ensure we see queued responses up to 'rp'. */
847 cons = import->ring.rsp_cons;
848 while (cons != rp)
849 {
850 struct fsif_response *rsp;
851 struct fs_request *req;
853 rsp = RING_GET_RESPONSE(&import->ring, cons);
854 DEBUG("Response at idx=%d to request id=%d, ret_val=%lx\n",
855 cons, rsp->id, rsp->ret_val);
856 req = &import->requests[rsp->id];
857 memcpy(&req->shadow_rsp, rsp, sizeof(struct fsif_response));
858 DEBUG("Waking up: %s\n", req->thread->name);
859 wake(req->thread);
861 cons++;
862 up(&import->reqs_sem);
863 }
865 import->ring.rsp_cons = rp;
866 RING_FINAL_CHECK_FOR_RESPONSES(&import->ring, more);
867 if(more) goto moretodo;
869 in_irq = 0;
870 }
872 static void alloc_request_table(struct fs_import *import)
873 {
874 struct fs_request *requests;
875 int i;
877 BUG_ON(import->nr_entries <= 0);
878 printk("Allocating request array for import %d, nr_entries = %d.\n",
879 import->import_id, import->nr_entries);
880 requests = xmalloc_array(struct fs_request, import->nr_entries);
881 import->freelist = xmalloc_array(unsigned short, import->nr_entries + 1);
882 memset(import->freelist, 0, sizeof(unsigned short) * (import->nr_entries + 1));
883 for(i=0; i<import->nr_entries; i++)
884 add_id_to_freelist(i, import->freelist);
885 import->requests = requests;
886 }
889 /******************************************************************************/
890 /* FS TESTS */
891 /******************************************************************************/
894 void test_fs_import(void *data)
895 {
896 struct fs_import *import = (struct fs_import *)data;
897 int ret, fd, i, repeat_count;
898 int32_t nr_files;
899 char buffer[1024];
900 ssize_t offset;
901 char **files;
902 long ret64;
903 struct fsif_stat_response stat;
905 repeat_count = 10;
906 /* Sleep for 1s and then try to open a file */
907 msleep(1000);
908 again:
909 ret = fs_create(import, "mini-os-created-directory", 1, 0777);
910 printk("Directory create: %d\n", ret);
912 sprintf(buffer, "mini-os-created-directory/mini-os-created-file-%d",
913 repeat_count);
914 ret = fs_create(import, buffer, 0, 0666);
915 printk("File create: %d\n", ret);
917 fd = fs_open(import, buffer);
918 printk("File descriptor: %d\n", fd);
919 if(fd < 0) return;
921 offset = 0;
922 for(i=0; i<10; i++)
923 {
924 sprintf(buffer, "Current time is: %lld\n", NOW());
925 ret = fs_write(import, fd, buffer, strlen(buffer), offset);
926 printk("Writen current time (%d)\n", ret);
927 if(ret < 0)
928 return;
929 offset += ret;
930 }
931 ret = fs_stat(import, fd, &stat);
932 printk("Ret after stat: %d\n", ret);
933 printk(" st_mode=%o\n", stat.stat_mode);
934 printk(" st_uid =%d\n", stat.stat_uid);
935 printk(" st_gid =%d\n", stat.stat_gid);
936 printk(" st_size=%ld\n", stat.stat_size);
937 printk(" st_atime=%ld\n", stat.stat_atime);
938 printk(" st_mtime=%ld\n", stat.stat_mtime);
939 printk(" st_ctime=%ld\n", stat.stat_ctime);
941 ret = fs_close(import, fd);
942 printk("Closed fd: %d, ret=%d\n", fd, ret);
944 printk("Listing files in /\n");
945 files = fs_list(import, "/", 0, &nr_files, NULL);
946 for(i=0; i<nr_files; i++)
947 printk(" files[%d] = %s\n", i, files[i]);
949 ret64 = fs_space(import, "/");
950 printk("Free space: %lld (=%lld Mb)\n", ret64, (ret64 >> 20));
951 repeat_count--;
952 if(repeat_count > 0)
953 goto again;
955 }
957 #if 0
958 // char *content = (char *)alloc_page();
959 int fd, ret;
960 // int read;
961 char write_string[] = "\"test data written from minios\"";
962 struct fsif_stat_response stat;
963 char **files;
964 int32_t nr_files, i;
965 int64_t ret64;
968 fd = fs_open(import, "test-export-file");
969 // read = fs_read(import, fd, content, PAGE_SIZE, 0);
970 // printk("Read: %d bytes\n", read);
971 // content[read] = '\0';
972 // printk("Value: %s\n", content);
973 ret = fs_write(import, fd, write_string, strlen(write_string), 0);
974 printk("Ret after write: %d\n", ret);
975 ret = fs_stat(import, fd, &stat);
976 printk("Ret after stat: %d\n", ret);
977 printk(" st_mode=%o\n", stat.stat_mode);
978 printk(" st_uid =%d\n", stat.stat_uid);
979 printk(" st_gid =%d\n", stat.stat_gid);
980 printk(" st_size=%ld\n", stat.stat_size);
981 printk(" st_atime=%ld\n", stat.stat_atime);
982 printk(" st_mtime=%ld\n", stat.stat_mtime);
983 printk(" st_ctime=%ld\n", stat.stat_ctime);
984 ret = fs_truncate(import, fd, 30);
985 printk("Ret after truncate: %d\n", ret);
986 ret = fs_remove(import, "test-to-remove/test-file");
987 printk("Ret after remove: %d\n", ret);
988 ret = fs_remove(import, "test-to-remove");
989 printk("Ret after remove: %d\n", ret);
990 ret = fs_chmod(import, fd, 0700);
991 printk("Ret after chmod: %d\n", ret);
992 ret = fs_sync(import, fd);
993 printk("Ret after sync: %d\n", ret);
994 ret = fs_close(import, fd);
995 //ret = fs_rename(import, "test-export-file", "renamed-test-export-file");
996 //printk("Ret after rename: %d\n", ret);
997 ret = fs_create(import, "created-dir", 1, 0777);
998 printk("Ret after dir create: %d\n", ret);
999 ret = fs_create(import, "created-dir/created-file", 0, 0777);
1000 printk("Ret after file create: %d\n", ret);
1001 files = fs_list(import, "/", 15, &nr_files, NULL);
1002 for(i=0; i<nr_files; i++)
1003 printk(" files[%d] = %s\n", i, files[i]);
1004 ret64 = fs_space(import, "created-dir");
1005 printk("Ret after space: %lld\n", ret64);
1007 #endif
1010 /******************************************************************************/
1011 /* END OF FS TESTS */
1012 /******************************************************************************/
1014 static int init_fs_import(struct fs_import *import)
1016 char *err;
1017 xenbus_transaction_t xbt;
1018 char nodename[1024], r_nodename[1024], token[128], *message = NULL;
1019 struct fsif_sring *sring;
1020 int i, retry = 0;
1021 domid_t self_id;
1022 xenbus_event_queue events = NULL;
1024 printk("Initialising FS fortend to backend dom %d\n", import->dom_id);
1025 /* Allocate page for the shared ring */
1026 sring = (struct fsif_sring*) alloc_pages(FSIF_RING_SIZE_ORDER);
1027 memset(sring, 0, PAGE_SIZE * FSIF_RING_SIZE_PAGES);
1029 /* Init the shared ring */
1030 SHARED_RING_INIT(sring);
1031 ASSERT(FSIF_NR_READ_GNTS == FSIF_NR_WRITE_GNTS);
1033 /* Init private frontend ring */
1034 FRONT_RING_INIT(&import->ring, sring, PAGE_SIZE * FSIF_RING_SIZE_PAGES);
1035 import->nr_entries = import->ring.nr_ents;
1037 /* Allocate table of requests */
1038 alloc_request_table(import);
1039 init_SEMAPHORE(&import->reqs_sem, import->nr_entries);
1041 /* Grant access to the shared ring */
1042 for(i=0; i<FSIF_RING_SIZE_PAGES; i++)
1043 import->gnt_refs[i] =
1044 gnttab_grant_access(import->dom_id,
1045 virt_to_mfn((char *)sring + i * PAGE_SIZE),
1046 0);
1048 /* Allocate event channel */
1049 BUG_ON(evtchn_alloc_unbound(import->dom_id,
1050 fsfront_handler,
1051 //ANY_CPU,
1052 import,
1053 &import->local_port));
1054 unmask_evtchn(import->local_port);
1057 self_id = xenbus_get_self_id();
1058 /* Write the frontend info to a node in our Xenbus */
1059 sprintf(nodename, "/local/domain/%d/device/vfs/%d",
1060 self_id, import->import_id);
1062 again:
1063 err = xenbus_transaction_start(&xbt);
1064 if (err) {
1065 printk("starting transaction\n");
1068 err = xenbus_printf(xbt,
1069 nodename,
1070 "ring-size",
1071 "%u",
1072 FSIF_RING_SIZE_PAGES);
1073 if (err) {
1074 message = "writing ring-size";
1075 goto abort_transaction;
1078 for(i=0; i<FSIF_RING_SIZE_PAGES; i++)
1080 sprintf(r_nodename, "ring-ref-%d", i);
1081 err = xenbus_printf(xbt,
1082 nodename,
1083 r_nodename,
1084 "%u",
1085 import->gnt_refs[i]);
1086 if (err) {
1087 message = "writing ring-refs";
1088 goto abort_transaction;
1092 err = xenbus_printf(xbt,
1093 nodename,
1094 "event-channel",
1095 "%u",
1096 import->local_port);
1097 if (err) {
1098 message = "writing event-channel";
1099 goto abort_transaction;
1102 err = xenbus_printf(xbt, nodename, "state", STATE_READY, 0xdeadbeef);
1105 err = xenbus_transaction_end(xbt, 0, &retry);
1106 if (retry) {
1107 goto again;
1108 printk("completing transaction\n");
1111 /* Now, when our node is prepared we write request in the exporting domain
1112 * */
1113 printk("Our own id is %d\n", self_id);
1114 sprintf(r_nodename,
1115 "/local/domain/%d/backend/vfs/exports/requests/%d/%d/frontend",
1116 import->dom_id, self_id, import->export_id);
1117 BUG_ON(xenbus_write(XBT_NIL, r_nodename, nodename));
1119 goto done;
1121 abort_transaction:
1122 xenbus_transaction_end(xbt, 1, &retry);
1124 done:
1126 #define WAIT_PERIOD 10 /* Wait period in ms */
1127 #define MAX_WAIT 10 /* Max number of WAIT_PERIODs */
1128 import->backend = NULL;
1129 sprintf(r_nodename, "%s/backend", nodename);
1131 for(retry = MAX_WAIT; retry > 0; retry--)
1133 xenbus_read(XBT_NIL, r_nodename, &import->backend);
1134 if(import->backend)
1136 printk("Backend found at %s\n", import->backend);
1137 break;
1139 msleep(WAIT_PERIOD);
1142 if(!import->backend)
1144 printk("No backend available.\n");
1145 /* TODO - cleanup datastructures/xenbus */
1146 return 0;
1148 sprintf(r_nodename, "%s/state", import->backend);
1149 sprintf(token, "fs-front-%d", import->import_id);
1150 /* The token will not be unique if multiple imports are inited */
1151 xenbus_watch_path_token(XBT_NIL, r_nodename, r_nodename, &events);
1152 xenbus_wait_for_value(r_nodename, STATE_READY, &events);
1153 xenbus_unwatch_path(XBT_NIL, r_nodename);
1154 printk("Backend ready.\n");
1156 //create_thread("fs-tester", test_fs_import, import);
1158 return 1;
1161 static void add_export(struct minios_list_head *exports, unsigned int domid)
1163 char node[1024], **exports_list = NULL, *ret_msg;
1164 int j = 0;
1165 static int import_id = 0;
1167 sprintf(node, "/local/domain/%d/backend/vfs/exports", domid);
1168 ret_msg = xenbus_ls(XBT_NIL, node, &exports_list);
1169 if (ret_msg && strcmp(ret_msg, "ENOENT"))
1170 printk("couldn't read %s: %s\n", node, ret_msg);
1171 while(exports_list && exports_list[j])
1173 struct fs_import *import;
1174 int export_id = -1;
1176 sscanf(exports_list[j], "%d", &export_id);
1177 if(export_id >= 0)
1179 import = xmalloc(struct fs_import);
1180 import->dom_id = domid;
1181 import->export_id = export_id;
1182 import->import_id = import_id++;
1183 MINIOS_INIT_LIST_HEAD(&import->list);
1184 minios_list_add(&import->list, exports);
1186 free(exports_list[j]);
1187 j++;
1189 if(exports_list)
1190 free(exports_list);
1191 if(ret_msg)
1192 free(ret_msg);
1195 #if 0
1196 static struct minios_list_head* probe_exports(void)
1198 struct minios_list_head *exports;
1199 char **node_list = NULL, *msg = NULL;
1200 int i = 0;
1202 exports = xmalloc(struct minios_list_head);
1203 MINIOS_INIT_LIST_HEAD(exports);
1205 msg = xenbus_ls(XBT_NIL, "/local/domain", &node_list);
1206 if(msg)
1208 printk("Could not list VFS exports (%s).\n", msg);
1209 goto exit;
1212 while(node_list[i])
1214 add_export(exports, atoi(node_list[i]));
1215 free(node_list[i]);
1216 i++;
1219 exit:
1220 if(msg)
1221 free(msg);
1222 if(node_list)
1223 free(node_list);
1224 return exports;
1226 #endif
1228 MINIOS_LIST_HEAD(exports);
1230 void init_fs_frontend(void)
1232 struct minios_list_head *entry;
1233 struct fs_import *import = NULL;
1234 printk("Initing FS fronend(s).\n");
1236 //exports = probe_exports();
1237 add_export(&exports, 0);
1238 minios_list_for_each(entry, &exports)
1240 import = minios_list_entry(entry, struct fs_import, list);
1241 printk("FS export [dom=%d, id=%d] found\n",
1242 import->dom_id, import->export_id);
1243 init_fs_import(import);
1246 fs_import = import;
1248 if (!fs_import)
1249 printk("No FS import\n");
1252 /* TODO: shutdown */