ia64/xen-unstable

view tools/fs-back/fs-backend.c @ 18065:5f529c74a712

fs-backend: Fix freelist implementation

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jul 15 17:59:31 2008 +0100 (2008-07-15)
parents 88818d55e95a
children 649c975b72f0
line source
1 #undef NDEBUG
2 #include <stdio.h>
3 #include <string.h>
4 #include <assert.h>
5 #include <malloc.h>
6 #include <pthread.h>
7 #include <xenctrl.h>
8 #include <aio.h>
9 #include <sys/mman.h>
10 #include <sys/select.h>
11 #include <xen/io/ring.h>
12 #include "fs-backend.h"
14 struct xs_handle *xsh = NULL;
15 static struct fs_export *fs_exports = NULL;
16 static int export_id = 0;
17 static int mount_id = 0;
19 void dispatch_response(struct mount *mount, int priv_req_id)
20 {
21 int i;
22 struct fs_op *op;
23 struct fs_request *req = &mount->requests[priv_req_id];
25 for(i=0;;i++)
26 {
27 op = fsops[i];
28 /* We should dispatch a response before reaching the end of the array */
29 assert(op != NULL);
30 if(op->type == req->req_shadow.type)
31 {
32 printf("Found op for type=%d\n", op->type);
33 /* There needs to be a response handler */
34 assert(op->response_handler != NULL);
35 op->response_handler(mount, req);
36 break;
37 }
38 }
40 req->active = 0;
41 add_id_to_freelist(priv_req_id, mount->freelist);
42 }
44 static void handle_aio_events(struct mount *mount)
45 {
46 int fd, ret, count, i, notify;
47 evtchn_port_t port;
48 /* AIO control block for the evtchn file destriptor */
49 struct aiocb evtchn_cb;
50 const struct aiocb * cb_list[mount->nr_entries];
51 int request_ids[mount->nr_entries];
53 /* Prepare the AIO control block for evtchn */
54 fd = xc_evtchn_fd(mount->evth);
55 bzero(&evtchn_cb, sizeof(struct aiocb));
56 evtchn_cb.aio_fildes = fd;
57 evtchn_cb.aio_nbytes = sizeof(port);
58 evtchn_cb.aio_buf = &port;
59 assert(aio_read(&evtchn_cb) == 0);
61 wait_again:
62 /* Create list of active AIO requests */
63 count = 0;
64 for(i=0; i<mount->nr_entries; i++)
65 if(mount->requests[i].active)
66 {
67 cb_list[count] = &mount->requests[i].aiocb;
68 request_ids[count] = i;
69 count++;
70 }
71 /* Add the event channel at the end of the list. Event channel needs to be
72 * handled last as it exits this function. */
73 cb_list[count] = &evtchn_cb;
74 request_ids[count] = -1;
75 count++;
77 /* Block till an AIO requset finishes, or we get an event */
78 while(1) {
79 int ret = aio_suspend(cb_list, count, NULL);
80 if (!ret)
81 break;
82 assert(errno == EINTR);
83 }
84 for(i=0; i<count; i++)
85 if(aio_error(cb_list[i]) != EINPROGRESS)
86 {
87 if(request_ids[i] >= 0)
88 dispatch_response(mount, request_ids[i]);
89 else
90 goto read_event_channel;
91 }
93 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify);
94 printf("Pushed responces and notify=%d\n", notify);
95 if(notify)
96 xc_evtchn_notify(mount->evth, mount->local_evtchn);
98 goto wait_again;
100 read_event_channel:
101 assert(aio_return(&evtchn_cb) == sizeof(evtchn_port_t));
102 assert(xc_evtchn_unmask(mount->evth, mount->local_evtchn) >= 0);
103 }
106 void allocate_request_array(struct mount *mount)
107 {
108 int i, nr_entries = mount->nr_entries;
109 struct fs_request *requests;
110 unsigned short *freelist;
112 requests = malloc(sizeof(struct fs_request) *nr_entries);
113 freelist = malloc(sizeof(unsigned short) * (nr_entries + 1));
114 memset(requests, 0, sizeof(struct fs_request) * nr_entries);
115 memset(freelist, 0, sizeof(unsigned short) * (nr_entries + 1));
116 for(i=0; i< nr_entries; i++)
117 {
118 requests[i].active = 0;
119 add_id_to_freelist(i, freelist);
120 }
121 mount->requests = requests;
122 mount->freelist = freelist;
123 }
126 void* handle_mount(void *data)
127 {
128 int more, notify;
129 struct mount *mount = (struct mount *)data;
131 printf("Starting a thread for mount: %d\n", mount->mount_id);
132 allocate_request_array(mount);
134 for(;;)
135 {
136 int nr_consumed=0;
137 RING_IDX cons, rp;
138 struct fsif_request *req;
140 handle_aio_events(mount);
141 moretodo:
142 rp = mount->ring.sring->req_prod;
143 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
145 while ((cons = mount->ring.req_cons) != rp)
146 {
147 int i;
148 struct fs_op *op;
150 printf("Got a request at %d\n", cons);
151 req = RING_GET_REQUEST(&mount->ring, cons);
152 printf("Request type=%d\n", req->type);
153 for(i=0;;i++)
154 {
155 op = fsops[i];
156 if(op == NULL)
157 {
158 /* We've reached the end of the array, no appropirate
159 * handler found. Warn, ignore and continue. */
160 printf("WARN: Unknown request type: %d\n", req->type);
161 mount->ring.req_cons++;
162 break;
163 }
164 if(op->type == req->type)
165 {
166 /* There needs to be a dispatch handler */
167 assert(op->dispatch_handler != NULL);
168 op->dispatch_handler(mount, req);
169 break;
170 }
171 }
173 nr_consumed++;
174 }
175 printf("Backend consumed: %d requests\n", nr_consumed);
176 RING_FINAL_CHECK_FOR_REQUESTS(&mount->ring, more);
177 if(more) goto moretodo;
179 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify);
180 printf("Pushed responces and notify=%d\n", notify);
181 if(notify)
182 xc_evtchn_notify(mount->evth, mount->local_evtchn);
183 }
185 printf("Destroying thread for mount: %d\n", mount->mount_id);
186 xc_gnttab_munmap(mount->gnth, mount->ring.sring, 1);
187 xc_gnttab_close(mount->gnth);
188 xc_evtchn_unbind(mount->evth, mount->local_evtchn);
189 xc_evtchn_close(mount->evth);
190 free(mount->frontend);
191 pthread_exit(NULL);
192 }
194 static void handle_connection(int frontend_dom_id, int export_id, char *frontend)
195 {
196 struct mount *mount;
197 struct fs_export *export;
198 int evt_port;
199 pthread_t handling_thread;
200 struct fsif_sring *sring;
202 printf("Handling connection from dom=%d, for export=%d\n",
203 frontend_dom_id, export_id);
204 /* Try to find the export on the list */
205 export = fs_exports;
206 while(export)
207 {
208 if(export->export_id == export_id)
209 break;
210 export = export->next;
211 }
212 if(!export)
213 {
214 printf("Could not find the export (the id is unknown).\n");
215 return;
216 }
218 mount = (struct mount*)malloc(sizeof(struct mount));
219 mount->dom_id = frontend_dom_id;
220 mount->export = export;
221 mount->mount_id = mount_id++;
222 xenbus_read_mount_request(mount, frontend);
223 printf("Frontend found at: %s (gref=%d, evtchn=%d)\n",
224 mount->frontend, mount->gref, mount->remote_evtchn);
225 xenbus_write_backend_node(mount);
226 mount->evth = -1;
227 mount->evth = xc_evtchn_open();
228 assert(mount->evth != -1);
229 mount->local_evtchn = -1;
230 mount->local_evtchn = xc_evtchn_bind_interdomain(mount->evth,
231 mount->dom_id,
232 mount->remote_evtchn);
233 assert(mount->local_evtchn != -1);
234 mount->gnth = -1;
235 mount->gnth = xc_gnttab_open();
236 assert(mount->gnth != -1);
237 sring = xc_gnttab_map_grant_ref(mount->gnth,
238 mount->dom_id,
239 mount->gref,
240 PROT_READ | PROT_WRITE);
241 BACK_RING_INIT(&mount->ring, sring, PAGE_SIZE);
242 mount->nr_entries = mount->ring.nr_ents;
243 xenbus_write_backend_ready(mount);
245 pthread_create(&handling_thread, NULL, &handle_mount, mount);
246 }
248 static void await_connections(void)
249 {
250 int fd, ret, dom_id, export_id;
251 fd_set fds;
252 char **watch_paths;
253 unsigned int len;
254 char d;
256 assert(xsh != NULL);
257 fd = xenbus_get_watch_fd();
258 /* Infinite watch loop */
259 do {
260 FD_ZERO(&fds);
261 FD_SET(fd, &fds);
262 ret = select(fd+1, &fds, NULL, NULL, NULL);
263 assert(ret == 1);
264 watch_paths = xs_read_watch(xsh, &len);
265 assert(len == 2);
266 assert(strcmp(watch_paths[1], "conn-watch") == 0);
267 dom_id = -1;
268 export_id = -1;
269 d = 0;
270 printf("Path changed %s\n", watch_paths[0]);
271 sscanf(watch_paths[0], WATCH_NODE"/%d/%d/fronten%c",
272 &dom_id, &export_id, &d);
273 if((dom_id >= 0) && (export_id >= 0) && d == 'd') {
274 char *frontend = xs_read(xsh, XBT_NULL, watch_paths[0], NULL);
275 if (frontend) {
276 handle_connection(dom_id, export_id, frontend);
277 xs_rm(xsh, XBT_NULL, watch_paths[0]);
278 }
279 }
280 next_select:
281 printf("Awaiting next connection.\n");
282 /* TODO - we need to figure out what to free */
283 free(watch_paths);
284 } while (1);
285 }
287 struct fs_export* create_export(char *name, char *export_path)
288 {
289 struct fs_export *curr_export, **last_export;
291 /* Create export structure */
292 curr_export = (struct fs_export *)malloc(sizeof(struct fs_export));
293 curr_export->name = name;
294 curr_export->export_path = export_path;
295 curr_export->export_id = export_id++;
296 /* Thread it onto the list */
297 curr_export->next = NULL;
298 last_export = &fs_exports;
299 while(*last_export)
300 last_export = &((*last_export)->next);
301 *last_export = curr_export;
303 return curr_export;
304 }
307 int main(void)
308 {
309 struct fs_export *export;
311 /* Open the connection to XenStore first */
312 xsh = xs_domain_open();
313 assert(xsh != NULL);
314 xs_rm(xsh, XBT_NULL, ROOT_NODE);
315 /* Create watch node */
316 xenbus_create_request_node();
318 /* Create & register the default export */
319 export = create_export("default", "/exports");
320 xenbus_register_export(export);
322 await_connections();
323 /* Close the connection to XenStore when we are finished with everything */
324 xs_daemon_close(xsh);
325 #if 0
326 int xc_handle;
327 char *shared_page;
328 int prot = PROT_READ | PROT_WRITE;
330 xc_handle = xc_gnttab_open();
331 printf("Main fn.\n");
333 shared_page = xc_gnttab_map_grant_ref(xc_handle,
334 7,
335 2047,
336 prot);
338 shared_page[20] = '\0';
339 printf("Current content of the page = %s\n", shared_page);
340 sprintf(shared_page, "%s", "Haha dirty page now! Very bad page.");
341 xc_gnttab_munmap(xc_handle, shared_page, 1);
342 xc_gnttab_close(xc_handle);
343 unrelated next line, saved for later convinience
344 xc_evtchn_notify(mount->evth, mount->local_evtchn);
345 #endif
346 }