ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c @ 7294:5df423407700

Added some clarifying comments regarding xenbus/xenstore kernel startup.

Signed-off-by: Steven Hand <steven@xensource.com>
author smh22@firebug.cl.cam.ac.uk
date Sun Oct 09 20:55:53 2005 +0100 (2005-10-09)
parents 8016551fde98
children f1e8d5f64105
line source
1 /******************************************************************************
2 * xenbus_xs.c
3 *
4 * This is the kernel equivalent of the "xs" library. We don't need everything
5 * and we use xenbus_comms for communication.
6 *
7 * Copyright (C) 2005 Rusty Russell, IBM Corporation
8 *
9 * This file may be distributed separately from the Linux kernel, or
10 * incorporated into other software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/uio.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
36 #include <linux/err.h>
37 #include <linux/slab.h>
38 #include <linux/fcntl.h>
39 #include <linux/kthread.h>
40 #include <asm-xen/xenbus.h>
41 #include "xenbus_comms.h"
43 #define streq(a, b) (strcmp((a), (b)) == 0)
45 struct xs_stored_msg {
46 struct xsd_sockmsg hdr;
48 union {
49 /* Stored replies. */
50 struct {
51 struct list_head list;
52 char *body;
53 } reply;
55 /* Queued watch callbacks. */
56 struct {
57 struct work_struct work;
58 struct xenbus_watch *handle;
59 char **vec;
60 unsigned int vec_size;
61 } watch;
62 } u;
63 };
65 struct xs_handle {
66 /* A list of replies. Currently only one will ever be outstanding. */
67 struct list_head reply_list;
68 spinlock_t reply_lock;
69 wait_queue_head_t reply_waitq;
71 /* One request at a time. */
72 struct semaphore request_mutex;
74 /* One transaction at a time. */
75 struct semaphore transaction_mutex;
76 int transaction_pid;
77 };
79 static struct xs_handle xs_state;
81 static LIST_HEAD(watches);
82 static DEFINE_SPINLOCK(watches_lock);
84 /* Can wait on !xs_resuming for suspend/resume cycle to complete. */
85 static int xs_resuming;
86 static DECLARE_WAIT_QUEUE_HEAD(xs_resuming_waitq);
88 static void request_mutex_acquire(void)
89 {
90 /*
91 * We can't distinguish non-transactional from transactional
92 * requests right now. So temporarily acquire the transaction mutex
93 * if this task is outside transaction context.
94 */
95 if (xs_state.transaction_pid != current->pid)
96 down(&xs_state.transaction_mutex);
97 down(&xs_state.request_mutex);
98 }
100 static void request_mutex_release(void)
101 {
102 up(&xs_state.request_mutex);
103 if (xs_state.transaction_pid != current->pid)
104 up(&xs_state.transaction_mutex);
105 }
107 static int get_error(const char *errorstring)
108 {
109 unsigned int i;
111 for (i = 0; !streq(errorstring, xsd_errors[i].errstring); i++) {
112 if (i == ARRAY_SIZE(xsd_errors) - 1) {
113 printk(KERN_WARNING
114 "XENBUS xen store gave: unknown error %s",
115 errorstring);
116 return EINVAL;
117 }
118 }
119 return xsd_errors[i].errnum;
120 }
122 static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
123 {
124 struct xs_stored_msg *msg;
125 char *body;
127 spin_lock(&xs_state.reply_lock);
129 while (list_empty(&xs_state.reply_list)) {
130 spin_unlock(&xs_state.reply_lock);
131 wait_event(xs_state.reply_waitq,
132 !list_empty(&xs_state.reply_list));
133 spin_lock(&xs_state.reply_lock);
134 }
136 msg = list_entry(xs_state.reply_list.next,
137 struct xs_stored_msg, u.reply.list);
138 list_del(&msg->u.reply.list);
140 spin_unlock(&xs_state.reply_lock);
142 *type = msg->hdr.type;
143 if (len)
144 *len = msg->hdr.len;
145 body = msg->u.reply.body;
147 kfree(msg);
149 return body;
150 }
152 /* Emergency write. */
153 void xenbus_debug_write(const char *str, unsigned int count)
154 {
155 struct xsd_sockmsg msg;
157 msg.type = XS_DEBUG;
158 msg.len = sizeof("print") + count + 1;
160 request_mutex_acquire();
161 xb_write(&msg, sizeof(msg));
162 xb_write("print", sizeof("print"));
163 xb_write(str, count);
164 xb_write("", 1);
165 request_mutex_release();
166 }
168 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
169 {
170 void *ret;
171 struct xsd_sockmsg req_msg = *msg;
172 int err;
174 if (req_msg.type == XS_TRANSACTION_START) {
175 down(&xs_state.transaction_mutex);
176 xs_state.transaction_pid = current->pid;
177 }
179 request_mutex_acquire();
181 err = xb_write(msg, sizeof(*msg) + msg->len);
182 if (err) {
183 msg->type = XS_ERROR;
184 ret = ERR_PTR(err);
185 } else {
186 ret = read_reply(&msg->type, &msg->len);
187 }
189 request_mutex_release();
191 if ((msg->type == XS_TRANSACTION_END) ||
192 ((req_msg.type == XS_TRANSACTION_START) &&
193 (msg->type == XS_ERROR))) {
194 xs_state.transaction_pid = -1;
195 up(&xs_state.transaction_mutex);
196 }
198 return ret;
199 }
201 /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
202 static void *xs_talkv(enum xsd_sockmsg_type type,
203 const struct kvec *iovec,
204 unsigned int num_vecs,
205 unsigned int *len)
206 {
207 struct xsd_sockmsg msg;
208 void *ret = NULL;
209 unsigned int i;
210 int err;
212 msg.type = type;
213 msg.len = 0;
214 for (i = 0; i < num_vecs; i++)
215 msg.len += iovec[i].iov_len;
217 request_mutex_acquire();
219 err = xb_write(&msg, sizeof(msg));
220 if (err) {
221 up(&xs_state.request_mutex);
222 return ERR_PTR(err);
223 }
225 for (i = 0; i < num_vecs; i++) {
226 err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
227 if (err) {
228 request_mutex_release();
229 return ERR_PTR(err);
230 }
231 }
233 ret = read_reply(&msg.type, len);
235 request_mutex_release();
237 if (IS_ERR(ret))
238 return ret;
240 if (msg.type == XS_ERROR) {
241 err = get_error(ret);
242 kfree(ret);
243 return ERR_PTR(-err);
244 }
246 BUG_ON(msg.type != type);
247 return ret;
248 }
250 /* Simplified version of xs_talkv: single message. */
251 static void *xs_single(enum xsd_sockmsg_type type,
252 const char *string, unsigned int *len)
253 {
254 struct kvec iovec;
256 iovec.iov_base = (void *)string;
257 iovec.iov_len = strlen(string) + 1;
258 return xs_talkv(type, &iovec, 1, len);
259 }
261 /* Many commands only need an ack, don't care what it says. */
262 static int xs_error(char *reply)
263 {
264 if (IS_ERR(reply))
265 return PTR_ERR(reply);
266 kfree(reply);
267 return 0;
268 }
270 static unsigned int count_strings(const char *strings, unsigned int len)
271 {
272 unsigned int num;
273 const char *p;
275 for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
276 num++;
278 return num;
279 }
281 /* Return the path to dir with /name appended. */
282 static char *join(const char *dir, const char *name)
283 {
284 static char buffer[4096];
286 BUG_ON(strlen(dir) + strlen("/") + strlen(name) + 1 > sizeof(buffer));
288 strcpy(buffer, dir);
289 if (!streq(name, "")) {
290 strcat(buffer, "/");
291 strcat(buffer, name);
292 }
293 return buffer;
294 }
296 static char **split(char *strings, unsigned int len, unsigned int *num)
297 {
298 char *p, **ret;
300 /* Count the strings. */
301 *num = count_strings(strings, len);
303 /* Transfer to one big alloc for easy freeing. */
304 ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
305 if (!ret) {
306 kfree(strings);
307 return ERR_PTR(-ENOMEM);
308 }
309 memcpy(&ret[*num], strings, len);
310 kfree(strings);
312 strings = (char *)&ret[*num];
313 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
314 ret[(*num)++] = p;
316 return ret;
317 }
319 char **xenbus_directory(const char *dir, const char *node, unsigned int *num)
320 {
321 char *strings;
322 unsigned int len;
324 strings = xs_single(XS_DIRECTORY, join(dir, node), &len);
325 if (IS_ERR(strings))
326 return (char **)strings;
328 return split(strings, len, num);
329 }
330 EXPORT_SYMBOL(xenbus_directory);
332 /* Check if a path exists. Return 1 if it does. */
333 int xenbus_exists(const char *dir, const char *node)
334 {
335 char **d;
336 int dir_n;
338 d = xenbus_directory(dir, node, &dir_n);
339 if (IS_ERR(d))
340 return 0;
341 kfree(d);
342 return 1;
343 }
344 EXPORT_SYMBOL(xenbus_exists);
346 /* Get the value of a single file.
347 * Returns a kmalloced value: call free() on it after use.
348 * len indicates length in bytes.
349 */
350 void *xenbus_read(const char *dir, const char *node, unsigned int *len)
351 {
352 return xs_single(XS_READ, join(dir, node), len);
353 }
354 EXPORT_SYMBOL(xenbus_read);
356 /* Write the value of a single file.
357 * Returns -err on failure.
358 */
359 int xenbus_write(const char *dir, const char *node, const char *string)
360 {
361 const char *path;
362 struct kvec iovec[2];
364 path = join(dir, node);
366 iovec[0].iov_base = (void *)path;
367 iovec[0].iov_len = strlen(path) + 1;
368 iovec[1].iov_base = (void *)string;
369 iovec[1].iov_len = strlen(string);
371 return xs_error(xs_talkv(XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
372 }
373 EXPORT_SYMBOL(xenbus_write);
375 /* Create a new directory. */
376 int xenbus_mkdir(const char *dir, const char *node)
377 {
378 return xs_error(xs_single(XS_MKDIR, join(dir, node), NULL));
379 }
380 EXPORT_SYMBOL(xenbus_mkdir);
382 /* Destroy a file or directory (directories must be empty). */
383 int xenbus_rm(const char *dir, const char *node)
384 {
385 return xs_error(xs_single(XS_RM, join(dir, node), NULL));
386 }
387 EXPORT_SYMBOL(xenbus_rm);
389 /* Start a transaction: changes by others will not be seen during this
390 * transaction, and changes will not be visible to others until end.
391 * You can only have one transaction at any time.
392 */
393 int xenbus_transaction_start(void)
394 {
395 int err;
397 down(&xs_state.transaction_mutex);
398 xs_state.transaction_pid = current->pid;
400 err = xs_error(xs_single(XS_TRANSACTION_START, "", NULL));
401 if (err) {
402 xs_state.transaction_pid = -1;
403 up(&xs_state.transaction_mutex);
404 }
406 return err;
407 }
408 EXPORT_SYMBOL(xenbus_transaction_start);
410 /* End a transaction.
411 * If abandon is true, transaction is discarded instead of committed.
412 */
413 int xenbus_transaction_end(int abort)
414 {
415 char abortstr[2];
416 int err;
418 if (abort)
419 strcpy(abortstr, "F");
420 else
421 strcpy(abortstr, "T");
423 err = xs_error(xs_single(XS_TRANSACTION_END, abortstr, NULL));
425 xs_state.transaction_pid = -1;
426 up(&xs_state.transaction_mutex);
428 return err;
429 }
430 EXPORT_SYMBOL(xenbus_transaction_end);
432 /* Single read and scanf: returns -errno or num scanned. */
433 int xenbus_scanf(const char *dir, const char *node, const char *fmt, ...)
434 {
435 va_list ap;
436 int ret;
437 char *val;
439 val = xenbus_read(dir, node, NULL);
440 if (IS_ERR(val))
441 return PTR_ERR(val);
443 va_start(ap, fmt);
444 ret = vsscanf(val, fmt, ap);
445 va_end(ap);
446 kfree(val);
447 /* Distinctive errno. */
448 if (ret == 0)
449 return -ERANGE;
450 return ret;
451 }
452 EXPORT_SYMBOL(xenbus_scanf);
454 /* Single printf and write: returns -errno or 0. */
455 int xenbus_printf(const char *dir, const char *node, const char *fmt, ...)
456 {
457 va_list ap;
458 int ret;
459 #define PRINTF_BUFFER_SIZE 4096
460 char *printf_buffer;
462 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
463 if (printf_buffer == NULL)
464 return -ENOMEM;
466 va_start(ap, fmt);
467 ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
468 va_end(ap);
470 BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
471 ret = xenbus_write(dir, node, printf_buffer);
473 kfree(printf_buffer);
475 return ret;
476 }
477 EXPORT_SYMBOL(xenbus_printf);
479 /* Report a (negative) errno into the store, with explanation. */
480 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
481 {
482 va_list ap;
483 int ret;
484 unsigned int len;
485 char *printf_buffer;
487 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
488 if (printf_buffer == NULL)
489 goto fail;
491 len = sprintf(printf_buffer, "%i ", -err);
492 va_start(ap, fmt);
493 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
494 va_end(ap);
496 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
497 dev->has_error = 1;
498 if (xenbus_write(dev->nodename, "error", printf_buffer) != 0)
499 goto fail;
501 kfree(printf_buffer);
502 return;
504 fail:
505 printk("xenbus: failed to write error node for %s (%s)\n",
506 dev->nodename, printf_buffer);
507 }
508 EXPORT_SYMBOL(xenbus_dev_error);
510 /* Clear any error. */
511 void xenbus_dev_ok(struct xenbus_device *dev)
512 {
513 if (dev->has_error) {
514 if (xenbus_rm(dev->nodename, "error") != 0)
515 printk("xenbus: failed to clear error node for %s\n",
516 dev->nodename);
517 else
518 dev->has_error = 0;
519 }
520 }
521 EXPORT_SYMBOL(xenbus_dev_ok);
523 /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
524 int xenbus_gather(const char *dir, ...)
525 {
526 va_list ap;
527 const char *name;
528 int ret = 0;
530 va_start(ap, dir);
531 while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
532 const char *fmt = va_arg(ap, char *);
533 void *result = va_arg(ap, void *);
534 char *p;
536 p = xenbus_read(dir, name, NULL);
537 if (IS_ERR(p)) {
538 ret = PTR_ERR(p);
539 break;
540 }
541 if (fmt) {
542 if (sscanf(p, fmt, result) == 0)
543 ret = -EINVAL;
544 kfree(p);
545 } else
546 *(char **)result = p;
547 }
548 va_end(ap);
549 return ret;
550 }
551 EXPORT_SYMBOL(xenbus_gather);
553 static int xs_watch(const char *path, const char *token)
554 {
555 struct kvec iov[2];
557 iov[0].iov_base = (void *)path;
558 iov[0].iov_len = strlen(path) + 1;
559 iov[1].iov_base = (void *)token;
560 iov[1].iov_len = strlen(token) + 1;
562 return xs_error(xs_talkv(XS_WATCH, iov, ARRAY_SIZE(iov), NULL));
563 }
565 static int xs_unwatch(const char *path, const char *token)
566 {
567 struct kvec iov[2];
569 iov[0].iov_base = (char *)path;
570 iov[0].iov_len = strlen(path) + 1;
571 iov[1].iov_base = (char *)token;
572 iov[1].iov_len = strlen(token) + 1;
574 return xs_error(xs_talkv(XS_UNWATCH, iov, ARRAY_SIZE(iov), NULL));
575 }
577 static struct xenbus_watch *find_watch(const char *token)
578 {
579 struct xenbus_watch *i, *cmp;
581 cmp = (void *)simple_strtoul(token, NULL, 16);
583 list_for_each_entry(i, &watches, list)
584 if (i == cmp)
585 return i;
587 return NULL;
588 }
590 /* Register callback to watch this node. */
591 int register_xenbus_watch(struct xenbus_watch *watch)
592 {
593 /* Pointer in ascii is the token. */
594 char token[sizeof(watch) * 2 + 1];
595 int err;
597 sprintf(token, "%lX", (long)watch);
599 spin_lock(&watches_lock);
600 BUG_ON(find_watch(token));
601 spin_unlock(&watches_lock);
603 err = xs_watch(watch->node, token);
605 /* Ignore errors due to multiple registration. */
606 if ((err == 0) || (err == -EEXIST)) {
607 spin_lock(&watches_lock);
608 list_add(&watch->list, &watches);
609 spin_unlock(&watches_lock);
610 }
612 return err;
613 }
614 EXPORT_SYMBOL(register_xenbus_watch);
616 void unregister_xenbus_watch(struct xenbus_watch *watch)
617 {
618 char token[sizeof(watch) * 2 + 1];
619 int err;
621 sprintf(token, "%lX", (long)watch);
623 spin_lock(&watches_lock);
624 BUG_ON(!find_watch(token));
625 list_del(&watch->list);
626 spin_unlock(&watches_lock);
628 /* Ensure xs_resume() is not in progress (see comments there). */
629 wait_event(xs_resuming_waitq, !xs_resuming);
631 err = xs_unwatch(watch->node, token);
632 if (err)
633 printk(KERN_WARNING
634 "XENBUS Failed to release watch %s: %i\n",
635 watch->node, err);
637 /* Make sure watch is not in use. */
638 flush_scheduled_work();
639 }
640 EXPORT_SYMBOL(unregister_xenbus_watch);
642 void xs_suspend(void)
643 {
644 down(&xs_state.transaction_mutex);
645 down(&xs_state.request_mutex);
646 }
648 void xs_resume(void)
649 {
650 struct list_head *ent, *prev_ent = &watches;
651 struct xenbus_watch *watch;
652 char token[sizeof(watch) * 2 + 1];
654 /* Protect against concurrent unregistration and freeing of watches. */
655 BUG_ON(xs_resuming);
656 xs_resuming = 1;
658 up(&xs_state.request_mutex);
659 up(&xs_state.transaction_mutex);
661 /*
662 * Iterate over the watch list re-registering each node. We must
663 * be careful about concurrent registrations and unregistrations.
664 * We search for the node immediately following the previously
665 * re-registered node. If we get no match then either we are done
666 * (previous node is last in list) or the node was unregistered, in
667 * which case we restart from the beginning of the list.
668 * register_xenbus_watch() + unregister_xenbus_watch() is safe because
669 * it will only ever move a watch node earlier in the list, so it
670 * cannot cause us to skip nodes.
671 */
672 for (;;) {
673 spin_lock(&watches_lock);
674 list_for_each(ent, &watches)
675 if (ent->prev == prev_ent)
676 break;
677 spin_unlock(&watches_lock);
679 /* No match because prev_ent is at the end of the list? */
680 if ((ent == &watches) && (watches.prev == prev_ent))
681 break; /* We're done! */
683 if ((prev_ent = ent) != &watches) {
684 /*
685 * Safe even with watch_lock not held. We are saved by
686 * (xs_resumed==1) check in unregister_xenbus_watch.
687 */
688 watch = list_entry(ent, struct xenbus_watch, list);
689 sprintf(token, "%lX", (long)watch);
690 xs_watch(watch->node, token);
691 }
692 }
694 xs_resuming = 0;
695 wake_up(&xs_resuming_waitq);
696 }
698 static void xenbus_fire_watch(void *arg)
699 {
700 struct xs_stored_msg *msg = arg;
702 msg->u.watch.handle->callback(msg->u.watch.handle,
703 (const char **)msg->u.watch.vec,
704 msg->u.watch.vec_size);
706 kfree(msg->u.watch.vec);
707 kfree(msg);
708 }
710 static int process_msg(void)
711 {
712 struct xs_stored_msg *msg;
713 char *body;
714 int err;
716 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
717 if (msg == NULL)
718 return -ENOMEM;
720 err = xb_read(&msg->hdr, sizeof(msg->hdr));
721 if (err) {
722 kfree(msg);
723 return err;
724 }
726 body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
727 if (body == NULL) {
728 kfree(msg);
729 return -ENOMEM;
730 }
732 err = xb_read(body, msg->hdr.len);
733 if (err) {
734 kfree(body);
735 kfree(msg);
736 return err;
737 }
738 body[msg->hdr.len] = '\0';
740 if (msg->hdr.type == XS_WATCH_EVENT) {
741 INIT_WORK(&msg->u.watch.work, xenbus_fire_watch, msg);
743 msg->u.watch.vec = split(body, msg->hdr.len,
744 &msg->u.watch.vec_size);
745 if (IS_ERR(msg->u.watch.vec)) {
746 kfree(msg);
747 return PTR_ERR(msg->u.watch.vec);
748 }
750 spin_lock(&watches_lock);
751 msg->u.watch.handle = find_watch(
752 msg->u.watch.vec[XS_WATCH_TOKEN]);
753 if (msg->u.watch.handle != NULL) {
754 schedule_work(&msg->u.watch.work);
755 } else {
756 kfree(msg->u.watch.vec);
757 kfree(msg);
758 }
759 spin_unlock(&watches_lock);
760 } else {
761 msg->u.reply.body = body;
762 spin_lock(&xs_state.reply_lock);
763 list_add_tail(&msg->u.reply.list, &xs_state.reply_list);
764 spin_unlock(&xs_state.reply_lock);
765 wake_up(&xs_state.reply_waitq);
766 }
768 return 0;
769 }
771 static int read_thread(void *unused)
772 {
773 int err;
775 for (;;) {
776 err = process_msg();
777 if (err)
778 printk(KERN_WARNING "XENBUS error %d while reading "
779 "message\n", err);
780 }
781 }
783 /*
784 ** Initialize the interface to xenstore.
785 */
786 int xs_init(void)
787 {
788 int err;
789 struct task_struct *reader;
791 INIT_LIST_HEAD(&xs_state.reply_list);
792 spin_lock_init(&xs_state.reply_lock);
793 init_waitqueue_head(&xs_state.reply_waitq);
795 init_MUTEX(&xs_state.request_mutex);
796 init_MUTEX(&xs_state.transaction_mutex);
797 xs_state.transaction_pid = -1;
799 /* Initialize the shared memory rings to talk to xenstored */
800 err = xb_init_comms();
801 if (err)
802 return err;
804 reader = kthread_run(read_thread, NULL, "xenbusd");
805 if (IS_ERR(reader))
806 return PTR_ERR(reader);
808 return 0;
809 }
811 /*
812 * Local variables:
813 * c-file-style: "linux"
814 * indent-tabs-mode: t
815 * c-indent-level: 8
816 * c-basic-offset: 8
817 * tab-width: 8
818 * End:
819 */