ia64/xen-unstable

view tools/xenstore/xenstored_domain.c @ 6538:84ee014ebd41

Merge xen-vtx-unstable.hg
author adsharma@los-vmm.sc.intel.com
date Wed Aug 17 12:34:38 2005 -0800 (2005-08-17)
parents 23979fb12c49 a9ee400a5da9
children 99914b54f7bf
line source
1 /*
2 Domain communications for Xen Store Daemon.
3 Copyright (C) 2005 Rusty Russell IBM Corporation
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
20 #include <stdio.h>
21 #include <linux/ioctl.h>
22 #include <sys/ioctl.h>
23 #include <sys/mman.h>
24 #include <unistd.h>
25 #include <stdlib.h>
26 #include <stdarg.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <fcntl.h>
31 //#define DEBUG
32 #include "utils.h"
33 #include "talloc.h"
34 #include "xenstored_core.h"
35 #include "xenstored_domain.h"
36 #include "xenstored_test.h"
38 static int *xc_handle;
39 static int eventchn_fd;
40 static unsigned int ringbuf_datasize;
42 struct domain
43 {
44 struct list_head list;
46 /* The id of this domain */
47 domid_t domid;
49 /* Event channel port */
50 u16 port;
52 /* Domain path in store. */
53 char *path;
55 /* Shared page. */
56 void *page;
58 /* Input and output ringbuffer heads. */
59 struct ringbuf_head *input, *output;
61 /* The connection associated with this. */
62 struct connection *conn;
64 };
66 static LIST_HEAD(domains);
68 struct ringbuf_head
69 {
70 u32 write; /* Next place to write to */
71 u32 read; /* Next place to read from */
72 u8 flags;
73 char buf[0];
74 } __attribute__((packed));
76 #define EVENTCHN_BIND _IO('E', 2)
77 #define EVENTCHN_UNBIND _IO('E', 3)
79 /* FIXME: Mark connection as broken (close it?) when this happens. */
80 static bool check_buffer(const struct ringbuf_head *h)
81 {
82 return (h->write < ringbuf_datasize && h->read < ringbuf_datasize);
83 }
85 /* We can't fill last byte: would look like empty buffer. */
86 static void *get_output_chunk(const struct ringbuf_head *h,
87 void *buf, u32 *len)
88 {
89 u32 read_mark;
91 if (h->read == 0)
92 read_mark = ringbuf_datasize - 1;
93 else
94 read_mark = h->read - 1;
96 /* Here to the end of buffer, unless they haven't read some out. */
97 *len = ringbuf_datasize - h->write;
98 if (read_mark >= h->write)
99 *len = read_mark - h->write;
100 return buf + h->write;
101 }
103 static const void *get_input_chunk(const struct ringbuf_head *h,
104 const void *buf, u32 *len)
105 {
106 /* Here to the end of buffer, unless they haven't written some. */
107 *len = ringbuf_datasize - h->read;
108 if (h->write >= h->read)
109 *len = h->write - h->read;
110 return buf + h->read;
111 }
113 static void update_output_chunk(struct ringbuf_head *h, u32 len)
114 {
115 h->write += len;
116 if (h->write == ringbuf_datasize)
117 h->write = 0;
118 }
120 static void update_input_chunk(struct ringbuf_head *h, u32 len)
121 {
122 h->read += len;
123 if (h->read == ringbuf_datasize)
124 h->read = 0;
125 }
127 static bool buffer_has_input(const struct ringbuf_head *h)
128 {
129 u32 len;
131 get_input_chunk(h, NULL, &len);
132 return (len != 0);
133 }
135 static bool buffer_has_output_room(const struct ringbuf_head *h)
136 {
137 u32 len;
139 get_output_chunk(h, NULL, &len);
140 return (len != 0);
141 }
143 static int writechn(struct connection *conn, const void *data, unsigned int len)
144 {
145 u32 avail;
146 void *dest;
147 struct ringbuf_head h;
149 /* Must read head once, and before anything else, and verified. */
150 h = *conn->domain->output;
151 mb();
152 if (!check_buffer(&h)) {
153 errno = EIO;
154 return -1;
155 }
157 dest = get_output_chunk(&h, conn->domain->output->buf, &avail);
158 if (avail < len)
159 len = avail;
161 memcpy(dest, data, len);
162 mb();
163 update_output_chunk(conn->domain->output, len);
164 /* FIXME: Probably not neccessary. */
165 mb();
166 xc_evtchn_send(*xc_handle, conn->domain->port);
167 return len;
168 }
170 static int readchn(struct connection *conn, void *data, unsigned int len)
171 {
172 u32 avail;
173 const void *src;
174 struct ringbuf_head h;
175 bool was_full;
177 /* Must read head once, and before anything else, and verified. */
178 h = *conn->domain->input;
179 mb();
181 if (!check_buffer(&h)) {
182 errno = EIO;
183 return -1;
184 }
186 src = get_input_chunk(&h, conn->domain->input->buf, &avail);
187 if (avail < len)
188 len = avail;
190 was_full = !buffer_has_output_room(&h);
191 memcpy(data, src, len);
192 mb();
193 update_input_chunk(conn->domain->input, len);
194 /* FIXME: Probably not neccessary. */
195 mb();
197 /* If it was full, tell them we've taken some. */
198 if (was_full)
199 xc_evtchn_send(*xc_handle, conn->domain->port);
200 return len;
201 }
203 static int destroy_domain(void *_domain)
204 {
205 struct domain *domain = _domain;
207 list_del(&domain->list);
209 if (domain->port &&
210 (ioctl(eventchn_fd, EVENTCHN_UNBIND, domain->port) != 0))
211 eprintf("> Unbinding port %i failed!\n", domain->port);
213 if(domain->page)
214 munmap(domain->page, getpagesize());
216 return 0;
217 }
219 static struct domain *find_domain(u16 port)
220 {
221 struct domain *i;
223 list_for_each_entry(i, &domains, list) {
224 if (i->port == port)
225 return i;
226 }
227 return NULL;
228 }
230 /* We scan all domains rather than use the information given here. */
231 void handle_event(int event_fd)
232 {
233 u16 port;
235 if (read(event_fd, &port, sizeof(port)) != sizeof(port))
236 barf_perror("Failed to read from event fd");
237 #ifndef TESTING
238 if (write(event_fd, &port, sizeof(port)) != sizeof(port))
239 barf_perror("Failed to write to event fd");
240 #endif
241 }
243 bool domain_can_read(struct connection *conn)
244 {
245 return conn->state == OK && buffer_has_input(conn->domain->input);
246 }
248 bool domain_can_write(struct connection *conn)
249 {
250 return conn->out && buffer_has_output_room(conn->domain->output);
251 }
253 static struct domain *new_domain(void *context, domid_t domid,
254 unsigned long mfn, int port,
255 const char *path)
256 {
257 struct domain *domain;
258 domain = talloc(context, struct domain);
259 domain->port = 0;
260 domain->domid = domid;
261 domain->path = talloc_strdup(domain, path);
262 domain->page = xc_map_foreign_range(*xc_handle, domain->domid,
263 getpagesize(),
264 PROT_READ|PROT_WRITE,
265 mfn);
266 if (!domain->page)
267 return NULL;
269 list_add(&domain->list, &domains);
270 talloc_set_destructor(domain, destroy_domain);
272 /* One in each half of page. */
273 domain->input = domain->page;
274 domain->output = domain->page + getpagesize()/2;
276 /* Tell kernel we're interested in this event. */
277 if (ioctl(eventchn_fd, EVENTCHN_BIND, port) != 0)
278 return NULL;
280 domain->port = port;
281 domain->conn = new_connection(writechn, readchn);
282 domain->conn->domain = domain;
283 return domain;
284 }
286 /* domid, mfn, evtchn, path */
287 void do_introduce(struct connection *conn, struct buffered_data *in)
288 {
289 struct domain *domain;
290 char *vec[4];
292 if (get_strings(in, vec, ARRAY_SIZE(vec)) < ARRAY_SIZE(vec)) {
293 send_error(conn, EINVAL);
294 return;
295 }
297 if (conn->id != 0) {
298 send_error(conn, EACCES);
299 return;
300 }
302 if (!conn->can_write) {
303 send_error(conn, EROFS);
304 return;
305 }
307 /* Sanity check args. */
308 if ((atoi(vec[2]) <= 0) || !is_valid_nodename(vec[3])) {
309 send_error(conn, EINVAL);
310 return;
311 }
312 /* Hang domain off "in" until we're finished. */
313 domain = new_domain(in, atoi(vec[0]), atol(vec[1]), atol(vec[2]),
314 vec[3]);
315 if (!domain) {
316 send_error(conn, errno);
317 return;
318 }
320 /* Now domain belongs to its connection. */
321 talloc_steal(domain->conn, domain);
322 send_ack(conn, XS_INTRODUCE);
323 }
325 static struct domain *find_domain_by_domid(domid_t domid)
326 {
327 struct domain *i;
329 list_for_each_entry(i, &domains, list) {
330 if (i->domid == domid)
331 return i;
332 }
333 return NULL;
334 }
336 /* domid */
337 void do_release(struct connection *conn, const char *domid_str)
338 {
339 struct domain *domain;
340 domid_t domid;
342 if (!domid_str) {
343 send_error(conn, EINVAL);
344 return;
345 }
347 domid = atoi(domid_str);
348 if (!domid) {
349 send_error(conn, EINVAL);
350 return;
351 }
353 if (conn->id != 0) {
354 send_error(conn, EACCES);
355 return;
356 }
358 domain = find_domain_by_domid(domid);
359 if (!domain) {
360 send_error(conn, ENOENT);
361 return;
362 }
364 if (!domain->conn) {
365 send_error(conn, EINVAL);
366 return;
367 }
369 talloc_free(domain->conn);
370 send_ack(conn, XS_RELEASE);
371 }
373 void do_get_domain_path(struct connection *conn, const char *domid_str)
374 {
375 struct domain *domain;
376 domid_t domid;
378 if (!domid_str) {
379 send_error(conn, EINVAL);
380 return;
381 }
383 domid = atoi(domid_str);
384 if (domid == DOMID_SELF)
385 domain = conn->domain;
386 else
387 domain = find_domain_by_domid(domid);
389 if (!domain)
390 send_error(conn, ENOENT);
391 else
392 send_reply(conn, XS_GETDOMAINPATH, domain->path,
393 strlen(domain->path) + 1);
394 }
396 static int close_xc_handle(void *_handle)
397 {
398 xc_interface_close(*(int *)_handle);
399 return 0;
400 }
402 /* Returns the implicit path of a connection (only domains have this) */
403 const char *get_implicit_path(const struct connection *conn)
404 {
405 if (!conn->domain)
406 return NULL;
407 return conn->domain->path;
408 }
410 /* Restore existing connections. */
411 void restore_existing_connections(void)
412 {
413 }
415 /* Returns the event channel handle. */
416 int domain_init(void)
417 {
418 /* The size of the ringbuffer: half a page minus head structure. */
419 ringbuf_datasize = getpagesize() / 2 - sizeof(struct ringbuf_head);
421 xc_handle = talloc(talloc_autofree_context(), int);
422 if (!xc_handle)
423 barf_perror("Failed to allocate domain handle");
424 *xc_handle = xc_interface_open();
425 if (*xc_handle < 0)
426 barf_perror("Failed to open connection to hypervisor");
427 talloc_set_destructor(xc_handle, close_xc_handle);
429 #ifdef TESTING
430 eventchn_fd = fake_open_eventchn();
431 #else
432 eventchn_fd = open("/dev/xen/evtchn", O_RDWR);
433 #endif
434 if (eventchn_fd < 0)
435 barf_perror("Failed to open connection to hypervisor");
436 return eventchn_fd;
437 }