ia64/xen-unstable
changeset 7370:2796f432858e
Change xenstore-domain messaging protocol to match what we use
for other inter-domain comms (power-of-two-sized rings, and
free-running indexes). The interface is defined in the spirit of
the console protocol, so maybe some chance of merging them
together later?
Signed-off-by: Keir Fraser <keir@xensource.com>
for other inter-domain comms (power-of-two-sized rings, and
free-running indexes). The interface is defined in the spirit of
the console protocol, so maybe some chance of merging them
together later?
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Wed Oct 12 18:25:40 2005 +0100 (2005-10-12) |
parents | f8c725f1fce8 |
children | 179027128d99 |
files | linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c tools/xenstore/xenstored_core.c tools/xenstore/xenstored_domain.c tools/xenstore/xs_test.c xen/include/public/io/xs_wire.h |
line diff
1.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Wed Oct 12 18:18:43 2005 +0100 1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c Wed Oct 12 18:25:40 2005 +0100 1.3 @@ -33,164 +33,114 @@ 1.4 #include <linux/interrupt.h> 1.5 #include <linux/sched.h> 1.6 #include <linux/err.h> 1.7 +#include <asm-xen/xenbus.h> 1.8 #include "xenbus_comms.h" 1.9 1.10 -#define RINGBUF_DATASIZE ((PAGE_SIZE / 2) - sizeof(struct ringbuf_head)) 1.11 -struct ringbuf_head 1.12 -{ 1.13 - u32 write; /* Next place to write to */ 1.14 - u32 read; /* Next place to read from */ 1.15 - u8 flags; 1.16 - char buf[0]; 1.17 -} __attribute__((packed)); 1.18 - 1.19 static int xenbus_irq; 1.20 1.21 DECLARE_WAIT_QUEUE_HEAD(xb_waitq); 1.22 1.23 -static inline struct ringbuf_head *outbuf(void) 1.24 +static inline struct xenstore_domain_interface *xenstore_domain_interface(void) 1.25 { 1.26 return mfn_to_virt(xen_start_info->store_mfn); 1.27 } 1.28 1.29 -static inline struct ringbuf_head *inbuf(void) 1.30 -{ 1.31 - return mfn_to_virt(xen_start_info->store_mfn) + PAGE_SIZE/2; 1.32 -} 1.33 - 1.34 static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs) 1.35 { 1.36 wake_up(&xb_waitq); 1.37 return IRQ_HANDLED; 1.38 } 1.39 1.40 -static int check_buffer(const struct ringbuf_head *h) 1.41 -{ 1.42 - return (h->write < RINGBUF_DATASIZE && h->read < RINGBUF_DATASIZE); 1.43 -} 1.44 - 1.45 -/* We can't fill last byte: would look like empty buffer. */ 1.46 -static void *get_output_chunk(const struct ringbuf_head *h, 1.47 - void *buf, u32 *len) 1.48 +static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) 1.49 { 1.50 - u32 read_mark; 1.51 - 1.52 - if (h->read == 0) 1.53 - read_mark = RINGBUF_DATASIZE - 1; 1.54 - else 1.55 - read_mark = h->read - 1; 1.56 - 1.57 - /* Here to the end of buffer, unless they haven't read some out. */ 1.58 - *len = RINGBUF_DATASIZE - h->write; 1.59 - if (read_mark >= h->write) 1.60 - *len = read_mark - h->write; 1.61 - return buf + h->write; 1.62 + return ((prod - cons) <= XENSTORE_RING_SIZE); 1.63 } 1.64 1.65 -static const void *get_input_chunk(const struct ringbuf_head *h, 1.66 - const void *buf, u32 *len) 1.67 +static void *get_output_chunk(XENSTORE_RING_IDX cons, 1.68 + XENSTORE_RING_IDX prod, 1.69 + char *buf, uint32_t *len) 1.70 { 1.71 - /* Here to the end of buffer, unless they haven't written some. */ 1.72 - *len = RINGBUF_DATASIZE - h->read; 1.73 - if (h->write >= h->read) 1.74 - *len = h->write - h->read; 1.75 - return buf + h->read; 1.76 + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); 1.77 + if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) 1.78 + *len = XENSTORE_RING_SIZE - (prod - cons); 1.79 + return buf + MASK_XENSTORE_IDX(prod); 1.80 } 1.81 1.82 -static void update_output_chunk(struct ringbuf_head *h, u32 len) 1.83 -{ 1.84 - h->write += len; 1.85 - if (h->write == RINGBUF_DATASIZE) 1.86 - h->write = 0; 1.87 -} 1.88 - 1.89 -static void update_input_chunk(struct ringbuf_head *h, u32 len) 1.90 +static const void *get_input_chunk(XENSTORE_RING_IDX cons, 1.91 + XENSTORE_RING_IDX prod, 1.92 + const char *buf, uint32_t *len) 1.93 { 1.94 - h->read += len; 1.95 - if (h->read == RINGBUF_DATASIZE) 1.96 - h->read = 0; 1.97 -} 1.98 - 1.99 -static int output_avail(struct ringbuf_head *out) 1.100 -{ 1.101 - unsigned int avail; 1.102 - 1.103 - get_output_chunk(out, out->buf, &avail); 1.104 - return avail != 0; 1.105 + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); 1.106 + if ((prod - cons) < *len) 1.107 + *len = prod - cons; 1.108 + return buf + MASK_XENSTORE_IDX(cons); 1.109 } 1.110 1.111 int xb_write(const void *data, unsigned len) 1.112 { 1.113 - struct ringbuf_head h; 1.114 - struct ringbuf_head *out = outbuf(); 1.115 + struct xenstore_domain_interface *intf = xenstore_domain_interface(); 1.116 + XENSTORE_RING_IDX cons, prod; 1.117 1.118 - do { 1.119 + while (len != 0) { 1.120 void *dst; 1.121 unsigned int avail; 1.122 1.123 - wait_event_interruptible(xb_waitq, output_avail(out)); 1.124 + wait_event_interruptible(xb_waitq, 1.125 + (intf->req_prod - intf->req_cons) != 1.126 + XENSTORE_RING_SIZE); 1.127 1.128 - /* Make local copy of header to check for sanity. */ 1.129 - h = *out; 1.130 - if (!check_buffer(&h)) 1.131 + /* Read indexes, then verify. */ 1.132 + cons = intf->req_cons; 1.133 + prod = intf->req_prod; 1.134 + mb(); 1.135 + if (!check_indexes(cons, prod)) 1.136 return -EIO; 1.137 1.138 - dst = get_output_chunk(&h, out->buf, &avail); 1.139 + dst = get_output_chunk(cons, prod, intf->req, &avail); 1.140 if (avail == 0) 1.141 continue; 1.142 if (avail > len) 1.143 avail = len; 1.144 1.145 - /* Make sure we read header before we write data 1.146 - * (implied by data-dependency, but let's play safe). */ 1.147 - mb(); 1.148 - 1.149 memcpy(dst, data, avail); 1.150 data += avail; 1.151 len -= avail; 1.152 1.153 /* Other side must not see new header until data is there. */ 1.154 wmb(); 1.155 - update_output_chunk(out, avail); 1.156 + intf->req_prod += avail; 1.157 1.158 /* This implies mb() before other side sees interrupt. */ 1.159 notify_remote_via_evtchn(xen_start_info->store_evtchn); 1.160 - } while (len != 0); 1.161 + } 1.162 1.163 return 0; 1.164 } 1.165 1.166 -int xs_input_avail(void) 1.167 -{ 1.168 - unsigned int avail; 1.169 - struct ringbuf_head *in = inbuf(); 1.170 - 1.171 - get_input_chunk(in, in->buf, &avail); 1.172 - return avail != 0; 1.173 -} 1.174 - 1.175 int xb_read(void *data, unsigned len) 1.176 { 1.177 - struct ringbuf_head h; 1.178 - struct ringbuf_head *in = inbuf(); 1.179 - int was_full; 1.180 + struct xenstore_domain_interface *intf = xenstore_domain_interface(); 1.181 + XENSTORE_RING_IDX cons, prod; 1.182 1.183 while (len != 0) { 1.184 unsigned int avail; 1.185 const char *src; 1.186 1.187 - wait_event_interruptible(xb_waitq, xs_input_avail()); 1.188 + wait_event_interruptible(xb_waitq, 1.189 + intf->rsp_cons != intf->rsp_prod); 1.190 1.191 - h = *in; 1.192 - if (!check_buffer(&h)) 1.193 + /* Read indexes, then verify. */ 1.194 + cons = intf->rsp_cons; 1.195 + prod = intf->rsp_prod; 1.196 + mb(); 1.197 + if (!check_indexes(cons, prod)) 1.198 return -EIO; 1.199 1.200 - src = get_input_chunk(&h, in->buf, &avail); 1.201 + src = get_input_chunk(cons, prod, intf->rsp, &avail); 1.202 if (avail == 0) 1.203 continue; 1.204 if (avail > len) 1.205 avail = len; 1.206 - was_full = !output_avail(&h); 1.207 1.208 /* We must read header before we read data. */ 1.209 rmb(); 1.210 @@ -201,13 +151,12 @@ int xb_read(void *data, unsigned len) 1.211 1.212 /* Other side must not see free space until we've copied out */ 1.213 mb(); 1.214 + intf->rsp_cons += avail; 1.215 1.216 - update_input_chunk(in, avail); 1.217 pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); 1.218 - /* If it was full, tell them we've taken some. */ 1.219 - if (was_full) 1.220 - /* Implies mb(): they will see new header. */ 1.221 - notify_remote_via_evtchn(xen_start_info->store_evtchn); 1.222 + 1.223 + /* Implies mb(): they will see new header. */ 1.224 + notify_remote_via_evtchn(xen_start_info->store_evtchn); 1.225 } 1.226 1.227 return 0;
2.1 --- a/tools/xenstore/xenstored_core.c Wed Oct 12 18:18:43 2005 +0100 2.2 +++ b/tools/xenstore/xenstored_core.c Wed Oct 12 18:25:40 2005 +0100 2.3 @@ -1586,7 +1586,7 @@ int main(int argc, char *argv[]) 2.4 goto more; 2.5 } 2.6 2.7 - if (domain_can_write(i)) { 2.8 + if (domain_can_write(i) && !list_empty(&i->out_list)) { 2.9 handle_output(i); 2.10 goto more; 2.11 }
3.1 --- a/tools/xenstore/xenstored_domain.c Wed Oct 12 18:18:43 2005 +0100 3.2 +++ b/tools/xenstore/xenstored_domain.c Wed Oct 12 18:25:40 2005 +0100 3.3 @@ -42,7 +42,6 @@ 3.4 static int *xc_handle; 3.5 static int eventchn_fd; 3.6 static int virq_port; 3.7 -static unsigned int ringbuf_datasize; 3.8 3.9 struct domain 3.10 { 3.11 @@ -66,10 +65,7 @@ struct domain 3.12 char *path; 3.13 3.14 /* Shared page. */ 3.15 - void *page; 3.16 - 3.17 - /* Input and output ringbuffer heads. */ 3.18 - struct ringbuf_head *input, *output; 3.19 + struct xenstore_domain_interface *interface; 3.20 3.21 /* The connection associated with this. */ 3.22 struct connection *conn; 3.23 @@ -80,14 +76,6 @@ struct domain 3.24 3.25 static LIST_HEAD(domains); 3.26 3.27 -struct ringbuf_head 3.28 -{ 3.29 - uint32_t write; /* Next place to write to */ 3.30 - uint32_t read; /* Next place to read from */ 3.31 - uint8_t flags; 3.32 - char buf[0]; 3.33 -} __attribute__((packed)); 3.34 - 3.35 #ifndef TESTING 3.36 static void evtchn_notify(int port) 3.37 { 3.38 @@ -100,91 +88,57 @@ extern void evtchn_notify(int port); 3.39 #endif 3.40 3.41 /* FIXME: Mark connection as broken (close it?) when this happens. */ 3.42 -static bool check_buffer(const struct ringbuf_head *h) 3.43 -{ 3.44 - return (h->write < ringbuf_datasize && h->read < ringbuf_datasize); 3.45 -} 3.46 - 3.47 -/* We can't fill last byte: would look like empty buffer. */ 3.48 -static void *get_output_chunk(const struct ringbuf_head *h, 3.49 - void *buf, uint32_t *len) 3.50 +static bool check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) 3.51 { 3.52 - uint32_t read_mark; 3.53 - 3.54 - if (h->read == 0) 3.55 - read_mark = ringbuf_datasize - 1; 3.56 - else 3.57 - read_mark = h->read - 1; 3.58 - 3.59 - /* Here to the end of buffer, unless they haven't read some out. */ 3.60 - *len = ringbuf_datasize - h->write; 3.61 - if (read_mark >= h->write) 3.62 - *len = read_mark - h->write; 3.63 - return buf + h->write; 3.64 + return ((prod - cons) <= XENSTORE_RING_SIZE); 3.65 } 3.66 3.67 -static const void *get_input_chunk(const struct ringbuf_head *h, 3.68 - const void *buf, uint32_t *len) 3.69 +static void *get_output_chunk(XENSTORE_RING_IDX cons, 3.70 + XENSTORE_RING_IDX prod, 3.71 + char *buf, uint32_t *len) 3.72 { 3.73 - /* Here to the end of buffer, unless they haven't written some. */ 3.74 - *len = ringbuf_datasize - h->read; 3.75 - if (h->write >= h->read) 3.76 - *len = h->write - h->read; 3.77 - return buf + h->read; 3.78 -} 3.79 - 3.80 -static void update_output_chunk(struct ringbuf_head *h, uint32_t len) 3.81 -{ 3.82 - h->write += len; 3.83 - if (h->write == ringbuf_datasize) 3.84 - h->write = 0; 3.85 + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); 3.86 + if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) 3.87 + *len = XENSTORE_RING_SIZE - (prod - cons); 3.88 + return buf + MASK_XENSTORE_IDX(prod); 3.89 } 3.90 3.91 -static void update_input_chunk(struct ringbuf_head *h, uint32_t len) 3.92 -{ 3.93 - h->read += len; 3.94 - if (h->read == ringbuf_datasize) 3.95 - h->read = 0; 3.96 -} 3.97 - 3.98 -static bool buffer_has_input(const struct ringbuf_head *h) 3.99 +static const void *get_input_chunk(XENSTORE_RING_IDX cons, 3.100 + XENSTORE_RING_IDX prod, 3.101 + const char *buf, uint32_t *len) 3.102 { 3.103 - uint32_t len; 3.104 - 3.105 - get_input_chunk(h, NULL, &len); 3.106 - return (len != 0); 3.107 -} 3.108 - 3.109 -static bool buffer_has_output_room(const struct ringbuf_head *h) 3.110 -{ 3.111 - uint32_t len; 3.112 - 3.113 - get_output_chunk(h, NULL, &len); 3.114 - return (len != 0); 3.115 + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); 3.116 + if ((prod - cons) < *len) 3.117 + *len = prod - cons; 3.118 + return buf + MASK_XENSTORE_IDX(cons); 3.119 } 3.120 3.121 static int writechn(struct connection *conn, const void *data, unsigned int len) 3.122 { 3.123 uint32_t avail; 3.124 void *dest; 3.125 - struct ringbuf_head h; 3.126 + struct xenstore_domain_interface *intf = conn->domain->interface; 3.127 + XENSTORE_RING_IDX cons, prod; 3.128 3.129 - /* Must read head once, and before anything else, and verified. */ 3.130 - h = *conn->domain->output; 3.131 + /* Must read indexes once, and before anything else, and verified. */ 3.132 + cons = intf->rsp_cons; 3.133 + prod = intf->rsp_prod; 3.134 mb(); 3.135 - if (!check_buffer(&h)) { 3.136 + if (!check_indexes(cons, prod)) { 3.137 errno = EIO; 3.138 return -1; 3.139 } 3.140 3.141 - dest = get_output_chunk(&h, conn->domain->output->buf, &avail); 3.142 + dest = get_output_chunk(cons, prod, intf->rsp, &avail); 3.143 if (avail < len) 3.144 len = avail; 3.145 3.146 memcpy(dest, data, len); 3.147 mb(); 3.148 - update_output_chunk(conn->domain->output, len); 3.149 + intf->rsp_prod += len; 3.150 + 3.151 evtchn_notify(conn->domain->port); 3.152 + 3.153 return len; 3.154 } 3.155 3.156 @@ -192,32 +146,29 @@ static int readchn(struct connection *co 3.157 { 3.158 uint32_t avail; 3.159 const void *src; 3.160 - struct ringbuf_head h; 3.161 - bool was_full; 3.162 + struct xenstore_domain_interface *intf = conn->domain->interface; 3.163 + XENSTORE_RING_IDX cons, prod; 3.164 3.165 - /* Must read head once, and before anything else, and verified. */ 3.166 - h = *conn->domain->input; 3.167 + /* Must read indexes once, and before anything else, and verified. */ 3.168 + cons = intf->req_cons; 3.169 + prod = intf->req_prod; 3.170 mb(); 3.171 3.172 - if (!check_buffer(&h)) { 3.173 + if (!check_indexes(cons, prod)) { 3.174 errno = EIO; 3.175 return -1; 3.176 } 3.177 3.178 - src = get_input_chunk(&h, conn->domain->input->buf, &avail); 3.179 + src = get_input_chunk(cons, prod, intf->req, &avail); 3.180 if (avail < len) 3.181 len = avail; 3.182 3.183 - was_full = !buffer_has_output_room(&h); 3.184 memcpy(data, src, len); 3.185 mb(); 3.186 - update_input_chunk(conn->domain->input, len); 3.187 - /* FIXME: Probably not neccessary. */ 3.188 - mb(); 3.189 + intf->req_cons += len; 3.190 3.191 - /* If it was full, tell them we've taken some. */ 3.192 - if (was_full) 3.193 - evtchn_notify(conn->domain->port); 3.194 + evtchn_notify(conn->domain->port); 3.195 + 3.196 return len; 3.197 } 3.198 3.199 @@ -234,8 +185,8 @@ static int destroy_domain(void *_domain) 3.200 eprintf("> Unbinding port %i failed!\n", domain->port); 3.201 } 3.202 3.203 - if (domain->page) 3.204 - munmap(domain->page, getpagesize()); 3.205 + if (domain->interface) 3.206 + munmap(domain->interface, getpagesize()); 3.207 3.208 return 0; 3.209 } 3.210 @@ -285,13 +236,14 @@ void handle_event(void) 3.211 3.212 bool domain_can_read(struct connection *conn) 3.213 { 3.214 - return buffer_has_input(conn->domain->input); 3.215 + struct xenstore_domain_interface *intf = conn->domain->interface; 3.216 + return (intf->req_cons != intf->req_prod); 3.217 } 3.218 3.219 bool domain_can_write(struct connection *conn) 3.220 { 3.221 - return (!list_empty(&conn->out_list) && 3.222 - buffer_has_output_room(conn->domain->output)); 3.223 + struct xenstore_domain_interface *intf = conn->domain->interface; 3.224 + return ((intf->rsp_prod - intf->rsp_cons) != XENSTORE_RING_SIZE); 3.225 } 3.226 3.227 static struct domain *new_domain(void *context, unsigned int domid, 3.228 @@ -307,20 +259,15 @@ static struct domain *new_domain(void *c 3.229 domain->shutdown = 0; 3.230 domain->domid = domid; 3.231 domain->path = talloc_strdup(domain, path); 3.232 - domain->page = xc_map_foreign_range(*xc_handle, domain->domid, 3.233 - getpagesize(), 3.234 - PROT_READ|PROT_WRITE, 3.235 - mfn); 3.236 - if (!domain->page) 3.237 + domain->interface = xc_map_foreign_range( 3.238 + *xc_handle, domain->domid, 3.239 + getpagesize(), PROT_READ|PROT_WRITE, mfn); 3.240 + if (!domain->interface) 3.241 return NULL; 3.242 3.243 list_add(&domain->list, &domains); 3.244 talloc_set_destructor(domain, destroy_domain); 3.245 3.246 - /* One in each half of page. */ 3.247 - domain->input = domain->page; 3.248 - domain->output = domain->page + getpagesize()/2; 3.249 - 3.250 /* Tell kernel we're interested in this event. */ 3.251 bind.remote_domain = domid; 3.252 bind.remote_port = port; 3.253 @@ -504,9 +451,6 @@ int domain_init(void) 3.254 struct ioctl_evtchn_bind_virq bind; 3.255 int rc; 3.256 3.257 - /* The size of the ringbuffer: half a page minus head structure. */ 3.258 - ringbuf_datasize = getpagesize() / 2 - sizeof(struct ringbuf_head); 3.259 - 3.260 xc_handle = talloc(talloc_autofree_context(), int); 3.261 if (!xc_handle) 3.262 barf_perror("Failed to allocate domain handle"); 3.263 @@ -548,3 +492,13 @@ int domain_init(void) 3.264 3.265 return eventchn_fd; 3.266 } 3.267 + 3.268 +/* 3.269 + * Local variables: 3.270 + * c-file-style: "linux" 3.271 + * indent-tabs-mode: t 3.272 + * c-indent-level: 8 3.273 + * c-basic-offset: 8 3.274 + * tab-width: 8 3.275 + * End: 3.276 + */
4.1 --- a/tools/xenstore/xs_test.c Wed Oct 12 18:18:43 2005 +0100 4.2 +++ b/tools/xenstore/xs_test.c Wed Oct 12 18:25:40 2005 +0100 4.3 @@ -50,72 +50,33 @@ static bool readonly = false; 4.4 static bool print_input = false; 4.5 static unsigned int linenum = 0; 4.6 4.7 -struct ringbuf_head 4.8 -{ 4.9 - uint32_t write; /* Next place to write to */ 4.10 - uint32_t read; /* Next place to read from */ 4.11 - uint8_t flags; 4.12 - char buf[0]; 4.13 -} __attribute__((packed)); 4.14 - 4.15 -static struct ringbuf_head *out, *in; 4.16 -static unsigned int ringbuf_datasize; 4.17 static int daemon_pid; 4.18 +static struct xenstore_domain_interface *interface; 4.19 4.20 /* FIXME: Mark connection as broken (close it?) when this happens. */ 4.21 -static bool check_buffer(const struct ringbuf_head *h) 4.22 +static bool check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) 4.23 { 4.24 - return (h->write < ringbuf_datasize && h->read < ringbuf_datasize); 4.25 + return ((prod - cons) <= XENSTORE_RING_SIZE); 4.26 } 4.27 4.28 -/* We can't fill last byte: would look like empty buffer. */ 4.29 -static void *get_output_chunk(const struct ringbuf_head *h, 4.30 - void *buf, uint32_t *len) 4.31 +static void *get_output_chunk(XENSTORE_RING_IDX cons, 4.32 + XENSTORE_RING_IDX prod, 4.33 + char *buf, uint32_t *len) 4.34 { 4.35 - uint32_t read_mark; 4.36 - 4.37 - if (h->read == 0) 4.38 - read_mark = ringbuf_datasize - 1; 4.39 - else 4.40 - read_mark = h->read - 1; 4.41 - 4.42 - /* Here to the end of buffer, unless they haven't read some out. */ 4.43 - *len = ringbuf_datasize - h->write; 4.44 - if (read_mark >= h->write) 4.45 - *len = read_mark - h->write; 4.46 - return buf + h->write; 4.47 + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); 4.48 + if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) 4.49 + *len = XENSTORE_RING_SIZE - (prod - cons); 4.50 + return buf + MASK_XENSTORE_IDX(prod); 4.51 } 4.52 4.53 -static const void *get_input_chunk(const struct ringbuf_head *h, 4.54 - const void *buf, uint32_t *len) 4.55 -{ 4.56 - /* Here to the end of buffer, unless they haven't written some. */ 4.57 - *len = ringbuf_datasize - h->read; 4.58 - if (h->write >= h->read) 4.59 - *len = h->write - h->read; 4.60 - return buf + h->read; 4.61 -} 4.62 - 4.63 -static int output_avail(struct ringbuf_head *out) 4.64 +static const void *get_input_chunk(XENSTORE_RING_IDX cons, 4.65 + XENSTORE_RING_IDX prod, 4.66 + const char *buf, uint32_t *len) 4.67 { 4.68 - unsigned int avail; 4.69 - 4.70 - get_output_chunk(out, out->buf, &avail); 4.71 - return avail != 0; 4.72 -} 4.73 - 4.74 -static void update_output_chunk(struct ringbuf_head *h, uint32_t len) 4.75 -{ 4.76 - h->write += len; 4.77 - if (h->write == ringbuf_datasize) 4.78 - h->write = 0; 4.79 -} 4.80 - 4.81 -static void update_input_chunk(struct ringbuf_head *h, uint32_t len) 4.82 -{ 4.83 - h->read += len; 4.84 - if (h->read == ringbuf_datasize) 4.85 - h->read = 0; 4.86 + *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); 4.87 + if ((prod - cons) < *len) 4.88 + *len = prod - cons; 4.89 + return buf + MASK_XENSTORE_IDX(cons); 4.90 } 4.91 4.92 /* FIXME: We spin, and we're sloppy. */ 4.93 @@ -123,25 +84,28 @@ static bool read_all_shmem(int fd __attr 4.94 void *data, unsigned int len) 4.95 { 4.96 unsigned int avail; 4.97 - int was_full; 4.98 + struct xenstore_domain_interface *intf = interface; 4.99 + XENSTORE_RING_IDX cons, prod; 4.100 + const void *src; 4.101 4.102 - if (!check_buffer(in)) 4.103 - barf("Corrupt buffer"); 4.104 + while (len) { 4.105 + cons = intf->rsp_cons; 4.106 + prod = intf->rsp_prod; 4.107 + if (!check_indexes(cons, prod)) 4.108 + barf("Corrupt buffer"); 4.109 4.110 - was_full = !output_avail(in); 4.111 - while (len) { 4.112 - const void *src = get_input_chunk(in, in->buf, &avail); 4.113 + src = get_input_chunk(cons, prod, intf->rsp, &avail); 4.114 if (avail > len) 4.115 avail = len; 4.116 memcpy(data, src, avail); 4.117 data += avail; 4.118 len -= avail; 4.119 - update_input_chunk(in, avail); 4.120 + intf->rsp_cons += avail; 4.121 } 4.122 4.123 /* Tell other end we read something. */ 4.124 - if (was_full) 4.125 - kill(daemon_pid, SIGUSR2); 4.126 + kill(daemon_pid, SIGUSR2); 4.127 + 4.128 return true; 4.129 } 4.130 4.131 @@ -149,22 +113,28 @@ static bool write_all_shmem(int fd __att 4.132 const void *data, unsigned int len) 4.133 { 4.134 uint32_t avail; 4.135 - 4.136 - if (!check_buffer(out)) 4.137 - barf("Corrupt buffer"); 4.138 + struct xenstore_domain_interface *intf = interface; 4.139 + XENSTORE_RING_IDX cons, prod; 4.140 + void *dst; 4.141 4.142 while (len) { 4.143 - void *dst = get_output_chunk(out, out->buf, &avail); 4.144 + cons = intf->req_cons; 4.145 + prod = intf->req_prod; 4.146 + if (!check_indexes(cons, prod)) 4.147 + barf("Corrupt buffer"); 4.148 + 4.149 + dst = get_output_chunk(cons, prod, intf->req, &avail); 4.150 if (avail > len) 4.151 avail = len; 4.152 memcpy(dst, data, avail); 4.153 data += avail; 4.154 len -= avail; 4.155 - update_output_chunk(out, avail); 4.156 + intf->req_prod += avail; 4.157 } 4.158 4.159 /* Tell other end we wrote something. */ 4.160 kill(daemon_pid, SIGUSR2); 4.161 + 4.162 return true; 4.163 } 4.164 4.165 @@ -552,21 +522,21 @@ static void do_introduce(unsigned int ha 4.166 break; 4.167 4.168 fd = open("/tmp/xcmap", O_RDWR); 4.169 - /* Set in and out pointers. */ 4.170 - out = mmap(NULL, getpagesize(), PROT_WRITE|PROT_READ, MAP_SHARED,fd,0); 4.171 - if (out == MAP_FAILED) 4.172 + /* Set shared comms page. */ 4.173 + interface = mmap(NULL, getpagesize(), PROT_WRITE|PROT_READ, 4.174 + MAP_SHARED,fd,0); 4.175 + if (interface == MAP_FAILED) 4.176 barf_perror("Failed to map /tmp/xcmap page"); 4.177 - in = (void *)out + getpagesize() / 2; 4.178 close(fd); 4.179 4.180 /* Tell them the event channel and our PID. */ 4.181 - *(int *)((void *)out + 32) = getpid(); 4.182 - *(uint16_t *)((void *)out + 36) = atoi(eventchn); 4.183 + *(int *)((void *)interface + 32) = getpid(); 4.184 + *(uint16_t *)((void *)interface + 36) = atoi(eventchn); 4.185 4.186 if (!xs_introduce_domain(handles[handle], atoi(domid), 4.187 atol(mfn), atoi(eventchn), path)) { 4.188 failed(handle); 4.189 - munmap(out, getpagesize()); 4.190 + munmap(interface, getpagesize()); 4.191 return; 4.192 } 4.193 output("handle is %i\n", i); 4.194 @@ -576,7 +546,7 @@ static void do_introduce(unsigned int ha 4.195 handles[i]->fd = -2; 4.196 4.197 /* Read in daemon pid. */ 4.198 - daemon_pid = *(int *)((void *)out + 32); 4.199 + daemon_pid = *(int *)((void *)interface + 32); 4.200 } 4.201 4.202 static void do_release(unsigned int handle, const char *domid) 4.203 @@ -823,9 +793,6 @@ int main(int argc, char *argv[]) 4.204 usage(); 4.205 4.206 4.207 - /* The size of the ringbuffer: half a page minus head structure. */ 4.208 - ringbuf_datasize = getpagesize() / 2 - sizeof(struct ringbuf_head); 4.209 - 4.210 signal(SIGALRM, alarmed); 4.211 while (fgets(line, sizeof(line), stdin)) 4.212 do_command(0, line);
5.1 --- a/xen/include/public/io/xs_wire.h Wed Oct 12 18:18:43 2005 +0100 5.2 +++ b/xen/include/public/io/xs_wire.h Wed Oct 12 18:25:40 2005 +0100 5.3 @@ -93,6 +93,17 @@ enum xs_watch_type 5.4 XS_WATCH_TOKEN, 5.5 }; 5.6 5.7 +/* Inter-domain shared memory communications. */ 5.8 +#define XENSTORE_RING_SIZE 1024 5.9 +typedef uint32_t XENSTORE_RING_IDX; 5.10 +#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1)) 5.11 +struct xenstore_domain_interface { 5.12 + char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */ 5.13 + char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ 5.14 + XENSTORE_RING_IDX req_cons, req_prod; 5.15 + XENSTORE_RING_IDX rsp_cons, rsp_prod; 5.16 +}; 5.17 + 5.18 #endif /* _XS_WIRE_H */ 5.19 5.20 /*