# This must be before include minios.mk!
include $(MINI-OS_ROOT)/$(TARGET_ARCH_DIR)/arch.mk
-extra_incl := $(foreach dir,$(EXTRA_INC),-I$(MINI-OS_ROOT)/include/$(dir))
+extra_incl := $(foreach dir,$(EXTRA_INC),-I$(realpath $(MINI-OS_ROOT)/include/$(dir)))
-DEF_CPPFLAGS += -I$(MINI-OS_ROOT)/include
+DEF_CPPFLAGS += -I$(realpath $(MINI-OS_ROOT)/include)
DEF_CPPFLAGS += -D__MINIOS__
ifeq ($(libc),y)
DEF_CPPFLAGS += -DHAVE_LIBC
-DEF_CPPFLAGS += -I$(MINI-OS_ROOT)/include/posix
-DEF_CPPFLAGS += -I$(XEN_ROOT)/tools/xenstore
+DEF_CPPFLAGS += -I$(realpath $(MINI-OS_ROOT)/include/posix)
+DEF_CPPFLAGS += -I$(realpath $(XEN_ROOT)/tools/xenstore)
endif
ifneq ($(LWIPDIR),)
#define L2_P2M_MASK (L2_P2M_ENTRIES - 1)
#define L3_P2M_MASK (L3_P2M_ENTRIES - 1)
- unsigned long *l1_list, *l2_list, *l3_list;
+ unsigned long *l1_list = NULL, *l2_list = NULL, *l3_list;
unsigned long pfn;
l3_list = (unsigned long *)alloc_page();
free(dev);
}
-struct blkfront_dev *init_blkfront(char *nodename, struct blkfront_info *info)
+struct blkfront_dev *init_blkfront(char *_nodename, struct blkfront_info *info)
{
xenbus_transaction_t xbt;
char* err;
int retry=0;
char* msg;
char* c;
+ char* nodename = _nodename ? _nodename : "device/vbd/768";
struct blkfront_dev *dev;
- if (!nodename)
- nodename = "device/vbd/768";
-
char path[strlen(nodename) + 1 + 10 + 1];
printk("******************* BLKFRONT for %s **********\n\n\n", nodename);
void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
{
unsigned long flags;
+ DEFINE_WAIT(w);
+
ASSERT(!aiocbp->aio_cb);
aiocbp->aio_cb = blkfront_aio_cb;
blkfront_aio(aiocbp, write);
aiocbp->data = NULL;
local_irq_save(flags);
- DEFINE_WAIT(w);
while (1) {
blkfront_aio_poll(aiocbp->aio_dev);
if (aiocbp->data)
void blkfront_sync(struct blkfront_dev *dev)
{
unsigned long flags;
+ DEFINE_WAIT(w);
if (dev->info.mode == O_RDWR) {
if (dev->info.barrier == 1)
/* Note: This won't finish if another thread enqueues requests. */
local_irq_save(flags);
- DEFINE_WAIT(w);
while (1) {
blkfront_aio_poll(dev);
if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
RING_IDX rp, cons;
struct blkif_response *rsp;
int more;
+ int nr_consumed;
moretodo:
#ifdef HAVE_LIBC
rmb(); /* Ensure we see queued responses up to 'rp'. */
cons = dev->ring.rsp_cons;
- int nr_consumed = 0;
+ nr_consumed = 0;
while ((cons != rp))
{
+ struct blkfront_aiocb *aiocbp;
+ int status;
+
rsp = RING_GET_RESPONSE(&dev->ring, cons);
nr_consumed++;
- struct blkfront_aiocb *aiocbp = (void*) (uintptr_t) rsp->id;
- int status = rsp->status;
+ aiocbp = (void*) (uintptr_t) rsp->id;
+ status = rsp->status;
if (status != BLKIF_RSP_OKAY)
printk("block error %d for op %d\n", status, rsp->operation);
int evtchn_alloc_unbound(domid_t pal, evtchn_handler_t handler,
void *data, evtchn_port_t *port)
{
+ int err;
evtchn_alloc_unbound_t op;
op.dom = DOMID_SELF;
op.remote_dom = pal;
- int err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op);
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op);
if (err)
return err;
*port = bind_evtchn(op.port, handler, data);
evtchn_handler_t handler, void *data,
evtchn_port_t *local_port)
{
+ int err;
+ evtchn_port_t port;
evtchn_bind_interdomain_t op;
op.remote_dom = pal;
op.remote_port = remote_port;
- int err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &op);
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &op);
if (err)
return err;
- evtchn_port_t port = op.local_port;
+ port = op.local_port;
*local_port = bind_evtchn(port, handler, data);
return err;
}
free(dev);
}
-struct kbdfront_dev *init_kbdfront(char *nodename, int abs_pointer)
+struct kbdfront_dev *init_kbdfront(char *_nodename, int abs_pointer)
{
xenbus_transaction_t xbt;
char* err;
struct xenkbd_page *s;
int retry=0;
char* msg;
-
+ char* nodename = _nodename ? _nodename : "device/vkbd/0";
struct kbdfront_dev *dev;
- if (!nodename)
- nodename = "device/vkbd/0";
-
char path[strlen(nodename) + 1 + 10 + 1];
printk("******************* KBDFRONT for %s **********\n\n\n", nodename);
return i;
}
-struct fbfront_dev *init_fbfront(char *nodename, unsigned long *mfns, int width, int height, int depth, int stride, int n)
+struct fbfront_dev *init_fbfront(char *_nodename, unsigned long *mfns, int width, int height, int depth, int stride, int n)
{
xenbus_transaction_t xbt;
char* err;
char* msg;
int i, j;
struct fbfront_dev *dev;
-
- if (!nodename)
- nodename = "device/vfb/0";
+ int max_pd;
+ unsigned long mapped;
+ char* nodename = _nodename ? _nodename : "device/vfb/0";
char path[strlen(nodename) + 1 + 10 + 1];
dev->offset = 0;
dev->events = NULL;
- const int max_pd = sizeof(s->pd) / sizeof(s->pd[0]);
- unsigned long mapped = 0;
+ max_pd = sizeof(s->pd) / sizeof(s->pd[0]);
+ mapped = 0;
for (i = 0; mapped < n && i < max_pd; i++) {
unsigned long *pd = (unsigned long *) alloc_page();
void force_evtchn_callback(void)
{
+ int save;
vcpu_info_t *vcpu;
vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
- int save = vcpu->evtchn_upcall_mask;
+ save = vcpu->evtchn_upcall_mask;
while (vcpu->evtchn_upcall_pending) {
vcpu->evtchn_upcall_mask = 1;
#ifndef _CTYPE_H
#define _CTYPE_H
+#ifdef HAVE_LIBC
+#include_next <ctype.h>
+#else
/*
* NOTE! This ctype does not handle EOF like the standard C
* library is required to.
#define toupper(c) __toupper(c)
#endif
+
+#endif
#include <errno-base.h>
+typedef int error_t;
+
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
#define EOWNERDEAD 130 /* Owner died */
#define ENOTRECOVERABLE 131 /* State not recoverable */
+
+#define EFTYPE 132 /* Inappropriate file type or format */
+
#ifdef HAVE_LIBC
#include <sched.h>
extern int errno;
#include "atomic.h"
-#define ARCH_SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#define ARCH_SPIN_LOCK_UNLOCKED { 0 }
#define SPIN_LOCK_UNUSED 0
#define SPIN_LOCK_USED 1
--- /dev/null
+#ifndef _MINIOS_SYS_LOCK_H_
+#define _MINIOS_SYS_LOCK_H_
+
+#ifdef HAVE_LIBC
+
+/* Due to inclusion loop, we can not include sched.h, so have to hide things */
+
+#include <waittypes.h>
+
+
+typedef struct {
+ int busy;
+ struct wait_queue_head wait;
+} _LOCK_T;
+
+#define __LOCK_INIT(class,lock) \
+ class _LOCK_T lock = { .wait = __WAIT_QUEUE_HEAD_INITIALIZER(lock.wait) }
+int ___lock_init(_LOCK_T *lock);
+int ___lock_acquire(_LOCK_T *lock);
+int ___lock_try_acquire(_LOCK_T *lock);
+int ___lock_release(_LOCK_T *lock);
+int ___lock_close(_LOCK_T *lock);
+#define __lock_init(__lock) ___lock_init(&__lock)
+#define __lock_acquire(__lock) ___lock_acquire(&__lock)
+#define __lock_release(__lock) ___lock_release(&__lock)
+#define __lock_try_acquire(__lock) ___lock_try_acquire(&__lock)
+#define __lock_close(__lock) 0
+
+
+typedef struct {
+ struct thread *owner;
+ int count;
+ struct wait_queue_head wait;
+} _LOCK_RECURSIVE_T;
+
+#define __LOCK_INIT_RECURSIVE(class, lock) \
+ class _LOCK_RECURSIVE_T lock = { .wait = __WAIT_QUEUE_HEAD_INITIALIZER((lock).wait) }
+
+int ___lock_init_recursive(_LOCK_RECURSIVE_T *lock);
+int ___lock_acquire_recursive(_LOCK_RECURSIVE_T *lock);
+int ___lock_try_acquire_recursive(_LOCK_RECURSIVE_T *lock);
+int ___lock_release_recursive(_LOCK_RECURSIVE_T *lock);
+int ___lock_close_recursive(_LOCK_RECURSIVE_T *lock);
+#define __lock_init_recursive(__lock) ___lock_init_recursive(&__lock)
+#define __lock_acquire_recursive(__lock) ___lock_acquire_recursive(&__lock)
+#define __lock_release_recursive(__lock) ___lock_release_recursive(&__lock)
+#define __lock_try_acquire_recursive(__lock) ___lock_try_acquire_recursive(&__lock)
+#define __lock_close_recursive(__lock) 0
+
+#endif
+
+#endif /* _MINIOS_SYS_LOCK_H_ */
#endif
/* FreeBSD compat types */
+#ifndef HAVE_LIBC
typedef unsigned char u_char;
typedef unsigned int u_int;
typedef unsigned long u_long;
+#endif
#ifdef __i386__
typedef long long quad_t;
typedef unsigned long long u_quad_t;
typedef u64 uint64_t, uintmax_t;
typedef s64 int64_t, intmax_t;
typedef u64 off_t;
+#endif
+typedef intptr_t ptrdiff_t;
-#define INT_MAX ((int)(~0U>>1))
-#define UINT_MAX (~0U)
+#ifndef HAVE_LIBC
typedef long ssize_t;
#endif
+typedef unsigned long size_t;
+
#endif /* _TYPES_H_ */
#define __WAIT_H__
#include <sched.h>
-#include <list.h>
-#include <lib.h>
#include <os.h>
-
-struct wait_queue
-{
- struct thread *thread;
- struct list_head thread_list;
-};
-
-struct wait_queue_head
-{
- /* TODO - lock required? */
- struct list_head thread_list;
-};
-
-#define DECLARE_WAIT_QUEUE_HEAD(name) \
- struct wait_queue_head name = \
- { .thread_list = { &(name).thread_list, &(name).thread_list} }
-
-#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
- .thread_list = { &(name).thread_list, &(name).thread_list } }
-
+#include <waittypes.h>
#define DEFINE_WAIT(name) \
struct wait_queue name = { \
--- /dev/null
+#ifndef __WAITTYPE_H__
+#define __WAITTYPE_H__
+
+#include <list.h>
+
+struct thread;
+struct wait_queue
+{
+ struct thread *thread;
+ struct list_head thread_list;
+};
+
+struct wait_queue_head
+{
+ /* TODO - lock required? */
+ struct list_head thread_list;
+};
+
+#define DECLARE_WAIT_QUEUE_HEAD(name) \
+ struct wait_queue_head name = \
+ { .thread_list = { &(name).thread_list, &(name).thread_list} }
+
+#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
+ .thread_list = { &(name).thread_list, &(name).thread_list } }
+
+#endif
#include "os.h"
-#define ARCH_SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
+#define ARCH_SPIN_LOCK_UNLOCKED { 1 }
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
#else
+#include <limits.h>
+
#define DEFAULT_ALIGN (sizeof(unsigned long))
#define malloc(size) _xmalloc(size, DEFAULT_ALIGN)
#define free(ptr) xfree(ptr)
}
pci_dev = init_pcifront(NULL);
+ if (!pci_dev)
+ return;
printk("PCI devices:\n");
pcifront_scan(pci_dev, print);
}
+#ifndef HAVE_LIBC
#include <ctype.h>
unsigned char _ctype[] = {
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+#endif
#include <lib.h>
#include <mm.h>
#include <ctype.h>
+#include <limits.h>
/**
* simple_strtoul - convert a string to an unsigned long
void *_xmalloc(size_t size, size_t align)
{
- struct xmalloc_hdr *i, *hdr = NULL;
+ struct xmalloc_hdr *i, *tmp, *hdr = NULL;
uintptr_t data_begin;
size_t hdr_size;
/* unsigned long flags; */
/* Search free list. */
/* spin_lock_irqsave(&freelist_lock, flags); */
- list_for_each_entry( i, &freelist, freelist )
+ list_for_each_entry_safe( i, tmp, &freelist, freelist )
{
data_begin = align_up((uintptr_t)i + hdr_size, align);
--- /dev/null
+/*
+ * locks for newlib
+ *
+ * Samuel Thibault <Samuel.Thibault@eu.citrix.net>, July 20008
+ */
+
+#ifdef HAVE_LIBC
+
+#include <sys/lock.h>
+#include <sched.h>
+#include <wait.h>
+
+int ___lock_init(_LOCK_T *lock)
+{
+ lock->busy = 0;
+ init_waitqueue_head(&lock->wait);
+ return 0;
+}
+
+int ___lock_acquire(_LOCK_T *lock)
+{
+ unsigned long flags;
+ while(1) {
+ wait_event(lock->wait, !lock->busy);
+ local_irq_save(flags);
+ if (!lock->busy)
+ break;
+ local_irq_restore(flags);
+ }
+ lock->busy = 1;
+ local_irq_restore(flags);
+ return 0;
+}
+
+int ___lock_try_acquire(_LOCK_T *lock)
+{
+ unsigned long flags;
+ int ret = -1;
+ local_irq_save(flags);
+ if (!lock->busy) {
+ lock->busy = 1;
+ ret = 0;
+ }
+ local_irq_restore(flags);
+ return ret;
+}
+
+int ___lock_release(_LOCK_T *lock)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ lock->busy = 0;
+ wake_up(&lock->wait);
+ local_irq_restore(flags);
+ return 0;
+}
+
+
+int ___lock_init_recursive(_LOCK_RECURSIVE_T *lock)
+{
+ lock->owner = NULL;
+ init_waitqueue_head(&lock->wait);
+ return 0;
+}
+
+int ___lock_acquire_recursive(_LOCK_RECURSIVE_T *lock)
+{
+ unsigned long flags;
+ if (lock->owner != get_current()) {
+ while (1) {
+ wait_event(lock->wait, lock->owner == NULL);
+ local_irq_save(flags);
+ if (lock->owner == NULL)
+ break;
+ local_irq_restore(flags);
+ }
+ lock->owner = get_current();
+ local_irq_restore(flags);
+ }
+ lock->count++;
+ return 0;
+}
+
+int ___lock_try_acquire_recursive(_LOCK_RECURSIVE_T *lock)
+{
+ unsigned long flags;
+ int ret = -1;
+ local_irq_save(flags);
+ if (!lock->owner) {
+ ret = 0;
+ lock->owner = get_current();
+ lock->count++;
+ }
+ local_irq_restore(flags);
+ return ret;
+}
+
+int ___lock_release_recursive(_LOCK_RECURSIVE_T *lock)
+{
+ unsigned long flags;
+ BUG_ON(lock->owner != get_current());
+ if (--lock->count)
+ return 0;
+ local_irq_save(flags);
+ lock->owner = NULL;
+ wake_up(&lock->wait);
+ local_irq_restore(flags);
+ return 0;
+}
+
+#endif
static err_t
low_level_output(struct netif *netif, struct pbuf *p)
{
+ if (!dev)
+ return ERR_OK;
+
#ifdef ETH_PAD_SIZE
pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif
struct ip_addr ipaddr = { htonl(IF_IPADDR) };
struct ip_addr netmask = { htonl(IF_NETMASK) };
struct ip_addr gw = { 0 };
- char *ip;
+ char *ip = NULL;
tprintk("Waiting for network.\n");
/* Shut down the network */
void stop_networking(void)
{
- shutdown_netfront(dev);
+ if (dev)
+ shutdown_netfront(dev);
}
#ifdef CONFIG_QEMU
char *vm;
char path[128];
+ int domid;
#endif
int i;
}
/* Fetch argc, argv from XenStore */
- int domid;
domid = xenbus_read_integer("target");
if (domid == -1) {
printk("Couldn't read target\n");
# Define some default flags.
# NB. '-Wcast-qual' is nasty, so I omitted it.
-DEF_CFLAGS += -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format
+DEF_CFLAGS += -fno-builtin -Wall -Werror -Wredundant-decls -Wno-format -Wno-redundant-decls
DEF_CFLAGS += $(call cc-option,$(CC),-fno-stack-protector,)
+DEF_CFLAGS += $(call cc-option,$(CC),-fgnu89-inline)
DEF_CFLAGS += -Wstrict-prototypes -Wnested-externs -Wpointer-arith -Winline
DEF_CPPFLAGS += -D__XEN_INTERFACE_VERSION__=$(XEN_INTERFACE_VERSION)
-DEF_ASFLAGS = -D__ASSEMBLY__
-DEF_LDFLAGS =
+DEF_ASFLAGS += -D__ASSEMBLY__
+DEF_LDFLAGS +=
ifeq ($(debug),y)
DEF_CFLAGS += -g
}
+#ifndef __ia64__
int free_physical_pages(xen_pfn_t *mfns, int n)
{
struct xen_memory_reservation reservation;
reservation.domid = DOMID_SELF;
return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
}
+#endif
#ifdef HAVE_LIBC
void *sbrk(ptrdiff_t increment)
void network_rx(struct netfront_dev *dev)
{
- RING_IDX rp,cons;
+ RING_IDX rp,cons,req_prod;
struct netif_rx_response *rx;
+ int nr_consumed, some, more, i, notify;
moretodo:
rmb(); /* Ensure we see queued responses up to 'rp'. */
cons = dev->rx.rsp_cons;
- int nr_consumed=0;
- int some = 0;
+ nr_consumed = 0;
+ some = 0;
while ((cons != rp) && !some)
{
struct net_buffer* buf;
unsigned char* page;
+ int id;
rx = RING_GET_RESPONSE(&dev->rx, cons);
if (rx->status == NETIF_RSP_NULL) continue;
- int id = rx->id;
+ id = rx->id;
BUG_ON(id >= NET_TX_RING_SIZE);
buf = &dev->rx_buffers[id];
}
dev->rx.rsp_cons=cons;
- int more;
RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);
if(more && !some) goto moretodo;
- RING_IDX req_prod = dev->rx.req_prod_pvt;
-
- int i;
- netif_rx_request_t *req;
+ req_prod = dev->rx.req_prod_pvt;
for(i=0; i<nr_consumed; i++)
{
int id = xennet_rxidx(req_prod + i);
- req = RING_GET_REQUEST(&dev->rx, req_prod + i);
+ netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i);
struct net_buffer* buf = &dev->rx_buffers[id];
void* page = buf->page;
dev->rx.req_prod_pvt = req_prod + i;
- int notify;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
if (notify)
notify_remote_via_evtchn(dev->evtchn);
for (cons = dev->tx.rsp_cons; cons != prod; cons++)
{
struct netif_tx_response *txrsp;
+ struct net_buffer *buf;
txrsp = RING_GET_RESPONSE(&dev->tx, cons);
if (txrsp->status == NETIF_RSP_NULL)
id = txrsp->id;
BUG_ON(id >= NET_TX_RING_SIZE);
- struct net_buffer* buf = &dev->tx_buffers[id];
+ buf = &dev->tx_buffers[id];
gnttab_end_access(buf->gref);
buf->gref=GRANT_INVALID_REF;
free(dev);
}
-struct netfront_dev *init_netfront(char *nodename, void (*thenetif_rx)(unsigned char* data, int len), unsigned char rawmac[6], char **ip)
+struct netfront_dev *init_netfront(char *_nodename, void (*thenetif_rx)(unsigned char* data, int len), unsigned char rawmac[6], char **ip)
{
xenbus_transaction_t xbt;
char* err;
int retry=0;
int i;
char* msg;
+ char* nodename = _nodename ? _nodename : "device/vif/0";
struct netfront_dev *dev;
- if (!nodename)
- nodename = "device/vif/0";
-
char path[strlen(nodename) + 1 + 10 + 1];
if (!thenetif_rx)
free(dev);
}
-struct pcifront_dev *init_pcifront(char *nodename)
+struct pcifront_dev *init_pcifront(char *_nodename)
{
xenbus_transaction_t xbt;
char* err;
char* message=NULL;
int retry=0;
char* msg;
+ char* nodename = _nodename ? _nodename : "device/pci/0";
struct pcifront_dev *dev;
- if (!nodename)
- nodename = "device/pci/0";
-
char path[strlen(nodename) + 1 + 10 + 1];
printk("******************* PCIFRONT for %s **********\n\n\n", nodename);
void schedule(void)
{
struct thread *prev, *next, *thread;
- struct list_head *iterator;
+ struct list_head *iterator, *next_iterator;
unsigned long flags;
prev = current;
s_time_t now = NOW();
s_time_t min_wakeup_time = now + SECONDS(10);
next = NULL;
- list_for_each(iterator, &idle_thread->thread_list)
+ list_for_each_safe(iterator, next_iterator, &idle_thread->thread_list)
{
thread = list_entry(iterator, struct thread, thread_list);
if (!is_runnable(thread) && thread->wakeup_time != 0LL)
inturrupted at the return instruction. And therefore at safe point. */
if(prev != next) switch_threads(prev, next);
- list_for_each(iterator, &exited_threads)
+ list_for_each_safe(iterator, next_iterator, &exited_threads)
{
thread = list_entry(iterator, struct thread, thread_list);
if(thread != prev)
char **xenbus_wait_for_watch_return(xenbus_event_queue *queue)
{
struct xenbus_event *event;
+ DEFINE_WAIT(w);
if (!queue)
queue = &xenbus_events;
- DEFINE_WAIT(w);
while (!(event = *queue)) {
add_waiter(w, xenbus_watch_queue);
schedule();
static char *errmsg(struct xsd_sockmsg *rep)
{
+ char *res;
if (!rep) {
char msg[] = "No reply";
size_t len = strlen(msg) + 1;
}
if (rep->type != XS_ERROR)
return NULL;
- char *res = malloc(rep->len + 1);
+ res = malloc(rep->len + 1);
memcpy(res, rep + 1, rep->len);
res[rep->len] = 0;
free(rep);
struct xsd_sockmsg *reply, *repmsg;
struct write_req req[] = { { pre, strlen(pre)+1 } };
int nr_elems, x, i;
- char **res;
+ char **res, *msg;
repmsg = xenbus_msg_reply(XS_DIRECTORY, xbt, req, ARRAY_SIZE(req));
- char *msg = errmsg(repmsg);
+ msg = errmsg(repmsg);
if (msg) {
*contents = NULL;
return msg;
{
struct write_req req[] = { {path, strlen(path) + 1} };
struct xsd_sockmsg *rep;
- char *res;
+ char *res, *msg;
rep = xenbus_msg_reply(XS_READ, xbt, req, ARRAY_SIZE(req));
- char *msg = errmsg(rep);
+ msg = errmsg(rep);
if (msg) {
*value = NULL;
return msg;
{value, strlen(value)},
};
struct xsd_sockmsg *rep;
+ char *msg;
rep = xenbus_msg_reply(XS_WRITE, xbt, req, ARRAY_SIZE(req));
- char *msg = errmsg(rep);
+ msg = errmsg(rep);
if (msg) return msg;
free(rep);
return NULL;
struct watch *watch = malloc(sizeof(*watch));
+ char *msg;
+
if (!events)
events = &xenbus_events;
rep = xenbus_msg_reply(XS_WATCH, xbt, req, ARRAY_SIZE(req));
- char *msg = errmsg(rep);
+ msg = errmsg(rep);
if (msg) return msg;
free(rep);
struct watch *watch, **prev;
+ char *msg;
+
rep = xenbus_msg_reply(XS_UNWATCH, xbt, req, ARRAY_SIZE(req));
- char *msg = errmsg(rep);
+ msg = errmsg(rep);
if (msg) return msg;
free(rep);
{
struct write_req req[] = { {path, strlen(path) + 1} };
struct xsd_sockmsg *rep;
+ char *msg;
rep = xenbus_msg_reply(XS_RM, xbt, req, ARRAY_SIZE(req));
- char *msg = errmsg(rep);
+ msg = errmsg(rep);
if (msg)
return msg;
free(rep);
{
struct write_req req[] = { {path, strlen(path) + 1} };
struct xsd_sockmsg *rep;
- char *res;
+ char *res, *msg;
rep = xenbus_msg_reply(XS_GET_PERMS, xbt, req, ARRAY_SIZE(req));
- char *msg = errmsg(rep);
+ msg = errmsg(rep);
if (msg) {
*value = NULL;
return msg;
char *xenbus_set_perms(xenbus_transaction_t xbt, const char *path, domid_t dom, char perm)
{
char value[PERM_MAX_SIZE];
- snprintf(value, PERM_MAX_SIZE, "%c%hu", perm, dom);
struct write_req req[] = {
{path, strlen(path) + 1},
- {value, strlen(value) + 1},
+ {value, 0},
};
struct xsd_sockmsg *rep;
+ char *msg;
+ snprintf(value, PERM_MAX_SIZE, "%c%hu", perm, dom);
+ req[1].len = strlen(value) + 1;
rep = xenbus_msg_reply(XS_SET_PERMS, xbt, req, ARRAY_SIZE(req));
- char *msg = errmsg(rep);
+ msg = errmsg(rep);
if (msg)
return msg;
free(rep);
static void do_ls_test(const char *pre)
{
- char **dirs;
+ char **dirs, *msg;
int x;
DEBUG("ls %s...\n", pre);
- char *msg = xenbus_ls(XBT_NIL, pre, &dirs);
+ msg = xenbus_ls(XBT_NIL, pre, &dirs);
if (msg) {
DEBUG("Error in xenbus ls: %s\n", msg);
free(msg);
static void do_read_test(const char *path)
{
- char *res;
+ char *res, *msg;
DEBUG("Read %s...\n", path);
- char *msg = xenbus_read(XBT_NIL, path, &res);
+ msg = xenbus_read(XBT_NIL, path, &res);
if (msg) {
DEBUG("Error in xenbus read: %s\n", msg);
free(msg);
static void do_write_test(const char *path, const char *val)
{
+ char *msg;
DEBUG("Write %s to %s...\n", val, path);
- char *msg = xenbus_write(XBT_NIL, path, val);
+ msg = xenbus_write(XBT_NIL, path, val);
if (msg) {
DEBUG("Result %s\n", msg);
free(msg);
static void do_rm_test(const char *path)
{
+ char *msg;
DEBUG("rm %s...\n", path);
- char *msg = xenbus_rm(XBT_NIL, path);
+ msg = xenbus_rm(XBT_NIL, path);
if (msg) {
DEBUG("Result %s\n", msg);
free(msg);