direct-io.hg

view tools/libxc/xc_ptrace.c @ 12765:2dd4569e0640

[LIBXC] Add an error reporting API to the libxc library.

- An 'xc_error' struct is used to pass around error
details. Currently contains two members 'code' an enumeration of
error types, and 'message' a free text description of the specific
problem.

- The xc_get_last_error() method returns a const pointer to the
internal instance of this struct manged by libxc. By returning a
const pointer we can add extra members to the end of the struct at
any time without worrying about ABI of callers. This will let us
provide more fine-grained info if needed in the future.

- The xc_error instance is statically defined inside libxc and marked
__thread. This ensures that errors are recorded per-thread, and
that when dealing with errors we never need to call malloc - all
storage needed is statically allocated.

- The xc_clear_last_error() method resets any currently recorded
error details

- The xc_error_code_to_desc() method converts the integer error code
into a generic user facing messsage. eg "Invalid kernel". Together
with the 'message' field from xc_error, this provides the user
visible feedback. eg "Invalid kernel: Non PAE-kernel on PAE host."

- A callback can be registered with xc_set_error_handler to receive
notification whenever an error is recorded, rather than querying
for error details after the fact with xc_get_last_error

- If built with -DDEBUG set, a default error handler will be
registered which calls fprintf(stderr), thus maintaining current
behaviour of logging errors to stderr during developer builds.

- The python binding for libxc is updated to use xc_get_last_error
to pull out error details whenever appropriate, instead of
returning info based on 'errno'

- The xc_set_error method is private to libxc internals, and is used
for setting error details

- The ERROR and PERROR macros have been updated to call xc_set_error
automatically specifying XC_INTERNAL_ERROR as the error code. This
gives a generic error report for all current failure points

- Some uses of the ERROR macro have been replaced with explicit
calls to xc_set_error to enable finer grained error reporting. In
particular the code dealing with invalid kernel types uses this
to report about PAE/architecture/wordsize mismatches

The patch has been tested by calling xm create against a varietry of
config files defining invalid kernels of various kinds. It has also
been tested with libvirt talking to xend. In both cases the error
messages were propagated all the way back up the stack.

There is only one place where I need to do further work. The suspend
& restore APIs in Xend invoke external helper programs rather than
calling libxc directly. This means that error details are essentially
lost. Since there is already code in XenD which scans STDERR from
these programs I will investigate adapting this to extract actual
error messages from these helpers.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
author kfraser@localhost.localdomain
date Thu Dec 07 11:36:26 2006 +0000 (2006-12-07)
parents cfb1136ee8f7
children 3f419d160647
line source
1 #include <sys/ptrace.h>
2 #include <sys/wait.h>
3 #include <time.h>
5 #include "xc_private.h"
6 #include "xg_private.h"
7 #include "xc_ptrace.h"
9 #ifdef DEBUG
10 static char *ptrace_names[] = {
11 "PTRACE_TRACEME",
12 "PTRACE_PEEKTEXT",
13 "PTRACE_PEEKDATA",
14 "PTRACE_PEEKUSER",
15 "PTRACE_POKETEXT",
16 "PTRACE_POKEDATA",
17 "PTRACE_POKEUSER",
18 "PTRACE_CONT",
19 "PTRACE_KILL",
20 "PTRACE_SINGLESTEP",
21 "PTRACE_INVALID",
22 "PTRACE_INVALID",
23 "PTRACE_GETREGS",
24 "PTRACE_SETREGS",
25 "PTRACE_GETFPREGS",
26 "PTRACE_SETFPREGS",
27 "PTRACE_ATTACH",
28 "PTRACE_DETACH",
29 "PTRACE_GETFPXREGS",
30 "PTRACE_SETFPXREGS",
31 "PTRACE_INVALID",
32 "PTRACE_INVALID",
33 "PTRACE_INVALID",
34 "PTRACE_INVALID",
35 "PTRACE_SYSCALL",
36 };
37 #endif
39 static int current_domid = -1;
40 static int current_isfile;
41 static int current_is_hvm;
43 static uint64_t online_cpumap;
44 static uint64_t regs_valid;
45 static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
47 extern int ffsll(long long int);
48 #define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i = ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) )
50 static int
51 fetch_regs(int xc_handle, int cpu, int *online)
52 {
53 xc_vcpuinfo_t info;
54 int retval = 0;
56 if (online)
57 *online = 0;
58 if ( !(regs_valid & (1 << cpu)) )
59 {
60 retval = xc_vcpu_getcontext(xc_handle, current_domid,
61 cpu, &ctxt[cpu]);
62 if ( retval )
63 goto done;
64 regs_valid |= (1 << cpu);
66 }
67 if ( online == NULL )
68 goto done;
70 retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
71 *online = info.online;
73 done:
74 return retval;
75 }
77 static struct thr_ev_handlers {
78 thr_ev_handler_t td_create;
79 thr_ev_handler_t td_death;
80 } handlers;
82 void
83 xc_register_event_handler(thr_ev_handler_t h,
84 td_event_e e)
85 {
86 switch (e) {
87 case TD_CREATE:
88 handlers.td_create = h;
89 break;
90 case TD_DEATH:
91 handlers.td_death = h;
92 break;
93 default:
94 abort(); /* XXX */
95 }
96 }
98 static inline int
99 paging_enabled(vcpu_guest_context_t *v)
100 {
101 unsigned long cr0 = v->ctrlreg[0];
102 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
103 }
105 /*
106 * Fetch registers for all online cpus and set the cpumap
107 * to indicate which cpus are online
108 *
109 */
111 static int
112 get_online_cpumap(int xc_handle, struct xen_domctl_getdomaininfo *d,
113 uint64_t *cpumap)
114 {
115 int i, online, retval;
117 *cpumap = 0;
118 for (i = 0; i <= d->max_vcpu_id; i++) {
119 if ((retval = fetch_regs(xc_handle, i, &online)))
120 return retval;
121 if (online)
122 *cpumap |= (1 << i);
123 }
125 return 0;
126 }
128 /*
129 * Notify GDB of any vcpus that have come online or gone offline
130 * update online_cpumap
131 *
132 */
134 static void
135 online_vcpus_changed(uint64_t cpumap)
136 {
137 uint64_t changed_cpumap = cpumap ^ online_cpumap;
138 int index;
140 while ( (index = ffsll(changed_cpumap)) ) {
141 if ( cpumap & (1 << (index - 1)) )
142 {
143 if (handlers.td_create) handlers.td_create(index - 1);
144 } else {
145 IPRINTF("thread death: %d\n", index - 1);
146 if (handlers.td_death) handlers.td_death(index - 1);
147 }
148 changed_cpumap &= ~(1 << (index - 1));
149 }
150 online_cpumap = cpumap;
152 }
154 /* --------------------- */
155 /* XXX application state */
156 static long nr_pages = 0;
157 static unsigned long *page_array = NULL;
160 /*
161 * Translates physical addresses to machine addresses for HVM
162 * guests. For paravirtual domains the function will just return the
163 * given address.
164 *
165 * This function should be used when reading page directories/page
166 * tables.
167 *
168 */
169 static unsigned long
170 to_ma(int cpu,
171 unsigned long in_addr)
172 {
173 unsigned long maddr = in_addr;
175 if ( current_is_hvm && paging_enabled(&ctxt[cpu]) )
176 maddr = page_array[maddr >> PAGE_SHIFT] << PAGE_SHIFT;
177 return maddr;
178 }
180 static void *
181 map_domain_va_32(
182 int xc_handle,
183 int cpu,
184 void *guest_va,
185 int perm)
186 {
187 unsigned long l2e, l1e, l1p, p, va = (unsigned long)guest_va;
188 uint32_t *l2, *l1;
189 static void *v[MAX_VIRT_CPUS];
191 l2 = xc_map_foreign_range(
192 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
193 xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
194 if ( l2 == NULL )
195 return NULL;
197 l2e = l2[l2_table_offset_i386(va)];
198 munmap(l2, PAGE_SIZE);
199 if ( !(l2e & _PAGE_PRESENT) )
200 return NULL;
201 l1p = to_ma(cpu, l2e);
202 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
203 if ( l1 == NULL )
204 return NULL;
206 l1e = l1[l1_table_offset_i386(va)];
207 munmap(l1, PAGE_SIZE);
208 if ( !(l1e & _PAGE_PRESENT) )
209 return NULL;
210 p = to_ma(cpu, l1e);
211 if ( v[cpu] != NULL )
212 munmap(v[cpu], PAGE_SIZE);
213 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
214 if ( v[cpu] == NULL )
215 return NULL;
217 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
218 }
221 static void *
222 map_domain_va_pae(
223 int xc_handle,
224 int cpu,
225 void *guest_va,
226 int perm)
227 {
228 unsigned long l3e, l2e, l1e, l2p, l1p, p, va = (unsigned long)guest_va;
229 uint64_t *l3, *l2, *l1;
230 static void *v[MAX_VIRT_CPUS];
232 l3 = xc_map_foreign_range(
233 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
234 xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
235 if ( l3 == NULL )
236 return NULL;
238 l3e = l3[l3_table_offset_pae(va)];
239 munmap(l3, PAGE_SIZE);
240 if ( !(l3e & _PAGE_PRESENT) )
241 return NULL;
242 l2p = to_ma(cpu, l3e);
243 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT);
244 if ( l2 == NULL )
245 return NULL;
247 l2e = l2[l2_table_offset_pae(va)];
248 munmap(l2, PAGE_SIZE);
249 if ( !(l2e & _PAGE_PRESENT) )
250 return NULL;
251 l1p = to_ma(cpu, l2e);
252 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
253 if ( l1 == NULL )
254 return NULL;
256 l1e = l1[l1_table_offset_pae(va)];
257 munmap(l1, PAGE_SIZE);
258 if ( !(l1e & _PAGE_PRESENT) )
259 return NULL;
260 p = to_ma(cpu, l1e);
261 if ( v[cpu] != NULL )
262 munmap(v[cpu], PAGE_SIZE);
263 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
264 if ( v[cpu] == NULL )
265 return NULL;
267 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
268 }
270 #ifdef __x86_64__
271 static void *
272 map_domain_va_64(
273 int xc_handle,
274 int cpu,
275 void *guest_va,
276 int perm)
277 {
278 unsigned long l4e, l3e, l2e, l1e, l3p, l2p, l1p, p, va = (unsigned long)guest_va;
279 uint64_t *l4, *l3, *l2, *l1;
280 static void *v[MAX_VIRT_CPUS];
282 if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
283 return map_domain_va_32(xc_handle, cpu, guest_va, perm);
285 l4 = xc_map_foreign_range(
286 xc_handle, current_domid, PAGE_SIZE, PROT_READ,
287 xen_cr3_to_pfn(ctxt[cpu].ctrlreg[3]));
288 if ( l4 == NULL )
289 return NULL;
291 l4e = l4[l4_table_offset(va)];
292 munmap(l4, PAGE_SIZE);
293 if ( !(l4e & _PAGE_PRESENT) )
294 return NULL;
295 l3p = to_ma(cpu, l4e);
296 l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p >> PAGE_SHIFT);
297 if ( l3 == NULL )
298 return NULL;
300 l3e = l3[l3_table_offset(va)];
301 munmap(l3, PAGE_SIZE);
302 if ( !(l3e & _PAGE_PRESENT) )
303 return NULL;
304 l2p = to_ma(cpu, l3e);
305 l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p >> PAGE_SHIFT);
306 if ( l2 == NULL )
307 return NULL;
309 l2e = l2[l2_table_offset(va)];
310 munmap(l2, PAGE_SIZE);
311 if ( !(l2e & _PAGE_PRESENT) )
312 return NULL;
313 l1p = to_ma(cpu, l2e);
314 if (l2e & 0x80) { /* 2M pages */
315 p = to_ma(cpu, (l1p + l1_table_offset(va)) << PAGE_SHIFT);
316 } else { /* 4K pages */
317 l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l1p >> PAGE_SHIFT);
318 if ( l1 == NULL )
319 return NULL;
321 l1e = l1[l1_table_offset(va)];
322 munmap(l1, PAGE_SIZE);
323 if ( !(l1e & _PAGE_PRESENT) )
324 return NULL;
325 p = to_ma(cpu, l1e);
326 }
327 if ( v[cpu] != NULL )
328 munmap(v[cpu], PAGE_SIZE);
329 v[cpu] = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p >> PAGE_SHIFT);
330 if ( v[cpu] == NULL )
331 return NULL;
333 return (void *)((unsigned long)v[cpu] | (va & (PAGE_SIZE - 1)));
334 }
335 #endif
337 static void *
338 map_domain_va(
339 int xc_handle,
340 int cpu,
341 void *guest_va,
342 int perm)
343 {
344 unsigned long va = (unsigned long) guest_va;
345 long npgs = xc_get_tot_pages(xc_handle, current_domid);
346 static enum { MODE_UNKNOWN, MODE_64, MODE_32, MODE_PAE } mode;
348 if ( mode == MODE_UNKNOWN )
349 {
350 xen_capabilities_info_t caps;
351 (void)xc_version(xc_handle, XENVER_capabilities, caps);
352 if ( strstr(caps, "-x86_64") )
353 mode = MODE_64;
354 else if ( strstr(caps, "-x86_32p") )
355 mode = MODE_PAE;
356 else if ( strstr(caps, "-x86_32") )
357 mode = MODE_32;
358 }
360 if ( nr_pages != npgs )
361 {
362 if ( nr_pages > 0 )
363 free(page_array);
364 nr_pages = npgs;
365 if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
366 {
367 IPRINTF("Could not allocate memory\n");
368 return NULL;
369 }
370 if ( xc_get_pfn_list(xc_handle, current_domid,
371 page_array, nr_pages) != nr_pages )
372 {
373 IPRINTF("Could not get the page frame list\n");
374 return NULL;
375 }
376 }
378 if (fetch_regs(xc_handle, cpu, NULL))
379 return NULL;
381 if (!paging_enabled(&ctxt[cpu])) {
382 static void * v;
383 unsigned long page;
385 if ( v != NULL )
386 munmap(v, PAGE_SIZE);
388 page = to_ma(cpu, page_array[va >> PAGE_SHIFT]);
390 v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
391 perm, page >> PAGE_SHIFT);
393 if ( v == NULL )
394 return NULL;
396 return (void *)(((unsigned long)v) | (va & BSD_PAGE_MASK));
397 }
398 #ifdef __x86_64__
399 if ( mode == MODE_64 )
400 return map_domain_va_64(xc_handle, cpu, guest_va, perm);
401 #endif
402 if ( mode == MODE_PAE )
403 return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
404 /* else ( mode == MODE_32 ) */
405 return map_domain_va_32(xc_handle, cpu, guest_va, perm);
406 }
408 int control_c_pressed_flag = 0;
410 static int
411 __xc_waitdomain(
412 int xc_handle,
413 int domain,
414 int *status,
415 int options)
416 {
417 DECLARE_DOMCTL;
418 int retval;
419 struct timespec ts;
420 uint64_t cpumap;
422 ts.tv_sec = 0;
423 ts.tv_nsec = 10*1000*1000;
425 domctl.cmd = XEN_DOMCTL_getdomaininfo;
426 domctl.domain = domain;
428 retry:
429 retval = do_domctl(xc_handle, &domctl);
430 if ( retval || (domctl.domain != domain) )
431 {
432 IPRINTF("getdomaininfo failed\n");
433 goto done;
434 }
435 *status = domctl.u.getdomaininfo.flags;
437 if ( options & WNOHANG )
438 goto done;
440 if (control_c_pressed_flag) {
441 xc_domain_pause(xc_handle, domain);
442 control_c_pressed_flag = 0;
443 goto done;
444 }
446 if ( !(domctl.u.getdomaininfo.flags & XEN_DOMINF_paused) )
447 {
448 nanosleep(&ts,NULL);
449 goto retry;
450 }
451 done:
452 if (get_online_cpumap(xc_handle, &domctl.u.getdomaininfo, &cpumap))
453 IPRINTF("get_online_cpumap failed\n");
454 if (online_cpumap != cpumap)
455 online_vcpus_changed(cpumap);
456 return retval;
458 }
461 long
462 xc_ptrace(
463 int xc_handle,
464 enum __ptrace_request request,
465 uint32_t domid_tid,
466 long eaddr,
467 long edata)
468 {
469 DECLARE_DOMCTL;
470 struct gdb_regs pt;
471 long retval = 0;
472 unsigned long *guest_va;
473 uint64_t cpumap;
474 int cpu, index;
475 void *addr = (char *)eaddr;
476 void *data = (char *)edata;
478 cpu = (request != PTRACE_ATTACH) ? domid_tid : 0;
480 switch ( request )
481 {
482 case PTRACE_PEEKTEXT:
483 case PTRACE_PEEKDATA:
484 if (current_isfile)
485 guest_va = (unsigned long *)map_domain_va_core(
486 current_domid, cpu, addr, ctxt);
487 else
488 guest_va = (unsigned long *)map_domain_va(
489 xc_handle, cpu, addr, PROT_READ);
490 if ( guest_va == NULL )
491 goto out_error;
492 retval = *guest_va;
493 break;
495 case PTRACE_POKETEXT:
496 case PTRACE_POKEDATA:
497 /* XXX assume that all CPUs have the same address space */
498 if (current_isfile)
499 guest_va = (unsigned long *)map_domain_va_core(
500 current_domid, cpu, addr, ctxt);
501 else
502 guest_va = (unsigned long *)map_domain_va(
503 xc_handle, cpu, addr, PROT_READ|PROT_WRITE);
504 if ( guest_va == NULL )
505 goto out_error;
506 *guest_va = (unsigned long)data;
507 break;
509 case PTRACE_GETREGS:
510 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
511 goto out_error;
512 SET_PT_REGS(pt, ctxt[cpu].user_regs);
513 memcpy(data, &pt, sizeof(struct gdb_regs));
514 break;
516 case PTRACE_GETFPREGS:
517 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
518 goto out_error;
519 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof (elf_fpregset_t));
520 break;
522 case PTRACE_GETFPXREGS:
523 if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
524 goto out_error;
525 memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
526 break;
528 case PTRACE_SETREGS:
529 if (current_isfile)
530 goto out_unsupported; /* XXX not yet supported */
531 SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
532 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
533 &ctxt[cpu])))
534 goto out_error_domctl;
535 break;
537 case PTRACE_SINGLESTEP:
538 if (current_isfile)
539 goto out_unsupported; /* XXX not yet supported */
540 /* XXX we can still have problems if the user switches threads
541 * during single-stepping - but that just seems retarded
542 */
543 ctxt[cpu].user_regs.eflags |= PSL_T;
544 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
545 &ctxt[cpu])))
546 goto out_error_domctl;
547 /* FALLTHROUGH */
549 case PTRACE_CONT:
550 case PTRACE_DETACH:
551 if (current_isfile)
552 goto out_unsupported; /* XXX not yet supported */
553 if ( request != PTRACE_SINGLESTEP )
554 {
555 FOREACH_CPU(cpumap, index) {
556 cpu = index - 1;
557 if (fetch_regs(xc_handle, cpu, NULL))
558 goto out_error;
559 /* Clear trace flag */
560 if ( ctxt[cpu].user_regs.eflags & PSL_T )
561 {
562 ctxt[cpu].user_regs.eflags &= ~PSL_T;
563 if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
564 cpu, &ctxt[cpu])))
565 goto out_error_domctl;
566 }
567 }
568 }
569 if ( request == PTRACE_DETACH )
570 {
571 domctl.cmd = XEN_DOMCTL_setdebugging;
572 domctl.domain = current_domid;
573 domctl.u.setdebugging.enable = 0;
574 if ((retval = do_domctl(xc_handle, &domctl)))
575 goto out_error_domctl;
576 }
577 regs_valid = 0;
578 if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
579 current_domid : -current_domid)))
580 goto out_error_domctl;
581 break;
583 case PTRACE_ATTACH:
584 current_domid = domid_tid;
585 current_isfile = (int)edata;
586 if (current_isfile)
587 break;
588 domctl.cmd = XEN_DOMCTL_getdomaininfo;
589 domctl.domain = current_domid;
590 retval = do_domctl(xc_handle, &domctl);
591 if ( retval || (domctl.domain != current_domid) )
592 goto out_error_domctl;
593 if ( domctl.u.getdomaininfo.flags & XEN_DOMINF_paused )
594 IPRINTF("domain currently paused\n");
595 else if ((retval = xc_domain_pause(xc_handle, current_domid)))
596 goto out_error_domctl;
597 current_is_hvm = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
598 domctl.cmd = XEN_DOMCTL_setdebugging;
599 domctl.domain = current_domid;
600 domctl.u.setdebugging.enable = 1;
601 if ((retval = do_domctl(xc_handle, &domctl)))
602 goto out_error_domctl;
604 if (get_online_cpumap(xc_handle, &domctl.u.getdomaininfo, &cpumap))
605 IPRINTF("get_online_cpumap failed\n");
606 if (online_cpumap != cpumap)
607 online_vcpus_changed(cpumap);
608 break;
610 case PTRACE_TRACEME:
611 IPRINTF("PTRACE_TRACEME is an invalid request under Xen\n");
612 goto out_error;
614 default:
615 goto out_unsupported; /* XXX not yet supported */
616 }
618 return retval;
620 out_error_domctl:
621 perror("domctl failed");
622 out_error:
623 errno = EINVAL;
624 return retval;
626 out_unsupported:
627 #ifdef DEBUG
628 IPRINTF("unsupported xc_ptrace request %s\n", ptrace_names[request]);
629 #endif
630 errno = ENOSYS;
631 return -1;
633 }
635 int
636 xc_waitdomain(
637 int xc_handle,
638 int domain,
639 int *status,
640 int options)
641 {
642 if (current_isfile)
643 return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
644 return __xc_waitdomain(xc_handle, domain, status, options);
645 }
647 /*
648 * Local variables:
649 * mode: C
650 * c-set-style: "BSD"
651 * c-basic-offset: 4
652 * tab-width: 4
653 * indent-tabs-mode: nil
654 * End:
655 */