direct-io.hg

view tools/libxc/xenctrl.h @ 12765:2dd4569e0640

[LIBXC] Add an error reporting API to the libxc library.

- An 'xc_error' struct is used to pass around error
details. Currently contains two members 'code' an enumeration of
error types, and 'message' a free text description of the specific
problem.

- The xc_get_last_error() method returns a const pointer to the
internal instance of this struct manged by libxc. By returning a
const pointer we can add extra members to the end of the struct at
any time without worrying about ABI of callers. This will let us
provide more fine-grained info if needed in the future.

- The xc_error instance is statically defined inside libxc and marked
__thread. This ensures that errors are recorded per-thread, and
that when dealing with errors we never need to call malloc - all
storage needed is statically allocated.

- The xc_clear_last_error() method resets any currently recorded
error details

- The xc_error_code_to_desc() method converts the integer error code
into a generic user facing messsage. eg "Invalid kernel". Together
with the 'message' field from xc_error, this provides the user
visible feedback. eg "Invalid kernel: Non PAE-kernel on PAE host."

- A callback can be registered with xc_set_error_handler to receive
notification whenever an error is recorded, rather than querying
for error details after the fact with xc_get_last_error

- If built with -DDEBUG set, a default error handler will be
registered which calls fprintf(stderr), thus maintaining current
behaviour of logging errors to stderr during developer builds.

- The python binding for libxc is updated to use xc_get_last_error
to pull out error details whenever appropriate, instead of
returning info based on 'errno'

- The xc_set_error method is private to libxc internals, and is used
for setting error details

- The ERROR and PERROR macros have been updated to call xc_set_error
automatically specifying XC_INTERNAL_ERROR as the error code. This
gives a generic error report for all current failure points

- Some uses of the ERROR macro have been replaced with explicit
calls to xc_set_error to enable finer grained error reporting. In
particular the code dealing with invalid kernel types uses this
to report about PAE/architecture/wordsize mismatches

The patch has been tested by calling xm create against a varietry of
config files defining invalid kernels of various kinds. It has also
been tested with libvirt talking to xend. In both cases the error
messages were propagated all the way back up the stack.

There is only one place where I need to do further work. The suspend
& restore APIs in Xend invoke external helper programs rather than
calling libxc directly. This means that error details are essentially
lost. Since there is already code in XenD which scans STDERR from
these programs I will investigate adapting this to extract actual
error messages from these helpers.

Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
author kfraser@localhost.localdomain
date Thu Dec 07 11:36:26 2006 +0000 (2006-12-07)
parents f7b7daed94d6
children df5fa63490f4
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 */
9 #ifndef XENCTRL_H
10 #define XENCTRL_H
12 /* Tell the Xen public headers we are a user-space tools build. */
13 #ifndef __XEN_TOOLS__
14 #define __XEN_TOOLS__ 1
15 #endif
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <xen/xen.h>
20 #include <xen/domctl.h>
21 #include <xen/sysctl.h>
22 #include <xen/version.h>
23 #include <xen/event_channel.h>
24 #include <xen/sched.h>
25 #include <xen/memory.h>
26 #include <xen/acm.h>
27 #include <xen/acm_ops.h>
29 #ifdef __ia64__
30 #define XC_PAGE_SHIFT 14
31 #else
32 #define XC_PAGE_SHIFT 12
33 #endif
34 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
35 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
37 /*
38 * DEFINITIONS FOR CPU BARRIERS
39 */
41 #if defined(__i386__)
42 #define mb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
43 #define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
44 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
45 #elif defined(__x86_64__)
46 #define mb() __asm__ __volatile__ ( "mfence" : : : "memory")
47 #define rmb() __asm__ __volatile__ ( "lfence" : : : "memory")
48 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
49 #elif defined(__ia64__)
50 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
51 #define rmb() __asm__ __volatile__ ("mf" ::: "memory")
52 #define wmb() __asm__ __volatile__ ("mf" ::: "memory")
53 #elif defined(__powerpc__)
54 /* XXX loosen these up later */
55 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
56 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") /* lwsync? */
57 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") /* eieio? */
58 #else
59 #error "Define barriers"
60 #endif
62 /*
63 * INITIALIZATION FUNCTIONS
64 */
66 /**
67 * This function opens a handle to the hypervisor interface. This function can
68 * be called multiple times within a single process. Multiple processes can
69 * have an open hypervisor interface at the same time.
70 *
71 * Each call to this function should have a corresponding call to
72 * xc_interface_close().
73 *
74 * This function can fail if the caller does not have superuser permission or
75 * if a Xen-enabled kernel is not currently running.
76 *
77 * @return a handle to the hypervisor interface or -1 on failure
78 */
79 int xc_interface_open(void);
81 /**
82 * This function closes an open hypervisor interface.
83 *
84 * This function can fail if the handle does not represent an open interface or
85 * if there were problems closing the interface.
86 *
87 * @parm xc_handle a handle to an open hypervisor interface
88 * @return 0 on success, -1 otherwise.
89 */
90 int xc_interface_close(int xc_handle);
92 /*
93 * KERNEL INTERFACES
94 */
96 /*
97 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
98 * device number. Returns -1 on error (and sets errno).
99 */
100 int xc_find_device_number(const char *name);
102 /*
103 * DOMAIN DEBUGGING FUNCTIONS
104 */
106 typedef struct xc_core_header {
107 unsigned int xch_magic;
108 unsigned int xch_nr_vcpus;
109 unsigned int xch_nr_pages;
110 unsigned int xch_ctxt_offset;
111 unsigned int xch_index_offset;
112 unsigned int xch_pages_offset;
113 } xc_core_header_t;
115 #define XC_CORE_MAGIC 0xF00FEBED
116 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
118 #ifdef __linux__
120 #include <sys/ptrace.h>
121 #include <thread_db.h>
123 typedef void (*thr_ev_handler_t)(long);
125 void xc_register_event_handler(
126 thr_ev_handler_t h,
127 td_event_e e);
129 long xc_ptrace(
130 int xc_handle,
131 enum __ptrace_request request,
132 uint32_t domid,
133 long addr,
134 long data);
136 int xc_waitdomain(
137 int xc_handle,
138 int domain,
139 int *status,
140 int options);
142 #endif /* __linux__ */
144 /*
145 * DOMAIN MANAGEMENT FUNCTIONS
146 */
148 typedef struct xc_dominfo {
149 uint32_t domid;
150 uint32_t ssidref;
151 unsigned int dying:1, crashed:1, shutdown:1,
152 paused:1, blocked:1, running:1,
153 hvm:1;
154 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
155 unsigned long nr_pages;
156 unsigned long shared_info_frame;
157 uint64_t cpu_time;
158 unsigned long max_memkb;
159 unsigned int nr_online_vcpus;
160 unsigned int max_vcpu_id;
161 xen_domain_handle_t handle;
162 } xc_dominfo_t;
164 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
165 int xc_domain_create(int xc_handle,
166 uint32_t ssidref,
167 xen_domain_handle_t handle,
168 uint32_t flags,
169 uint32_t *pdomid);
172 /* Functions to produce a dump of a given domain
173 * xc_domain_dumpcore - produces a dump to a specified file
174 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
175 * callback function
176 */
177 int xc_domain_dumpcore(int xc_handle,
178 uint32_t domid,
179 const char *corename);
181 /* Define the callback function type for xc_domain_dumpcore_via_callback.
182 *
183 * This function is called by the coredump code for every "write",
184 * and passes an opaque object for the use of the function and
185 * created by the caller of xc_domain_dumpcore_via_callback.
186 */
187 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
189 int xc_domain_dumpcore_via_callback(int xc_handle,
190 uint32_t domid,
191 void *arg,
192 dumpcore_rtn_t dump_rtn);
194 /*
195 * This function sets the maximum number of vcpus that a domain may create.
196 *
197 * @parm xc_handle a handle to an open hypervisor interface.
198 * @parm domid the domain id in which vcpus are to be created.
199 * @parm max the maximum number of vcpus that the domain may create.
200 * @return 0 on success, -1 on failure.
201 */
202 int xc_domain_max_vcpus(int xc_handle,
203 uint32_t domid,
204 unsigned int max);
206 /**
207 * This function pauses a domain. A paused domain still exists in memory
208 * however it does not receive any timeslices from the hypervisor.
209 *
210 * @parm xc_handle a handle to an open hypervisor interface
211 * @parm domid the domain id to pause
212 * @return 0 on success, -1 on failure.
213 */
214 int xc_domain_pause(int xc_handle,
215 uint32_t domid);
216 /**
217 * This function unpauses a domain. The domain should have been previously
218 * paused.
219 *
220 * @parm xc_handle a handle to an open hypervisor interface
221 * @parm domid the domain id to unpause
222 * return 0 on success, -1 on failure
223 */
224 int xc_domain_unpause(int xc_handle,
225 uint32_t domid);
227 /**
228 * This function will destroy a domain. Destroying a domain removes the domain
229 * completely from memory. This function should be called after sending the
230 * domain a SHUTDOWN control message to free up the domain resources.
231 *
232 * @parm xc_handle a handle to an open hypervisor interface
233 * @parm domid the domain id to destroy
234 * @return 0 on success, -1 on failure
235 */
236 int xc_domain_destroy(int xc_handle,
237 uint32_t domid);
239 /**
240 * This function will shutdown a domain. This is intended for use in
241 * fully-virtualized domains where this operation is analogous to the
242 * sched_op operations in a paravirtualized domain. The caller is
243 * expected to give the reason for the shutdown.
244 *
245 * @parm xc_handle a handle to an open hypervisor interface
246 * @parm domid the domain id to destroy
247 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
248 * @return 0 on success, -1 on failure
249 */
250 int xc_domain_shutdown(int xc_handle,
251 uint32_t domid,
252 int reason);
254 int xc_vcpu_setaffinity(int xc_handle,
255 uint32_t domid,
256 int vcpu,
257 uint64_t cpumap);
258 int xc_vcpu_getaffinity(int xc_handle,
259 uint32_t domid,
260 int vcpu,
261 uint64_t *cpumap);
263 /**
264 * This function will return information about one or more domains. It is
265 * designed to iterate over the list of domains. If a single domain is
266 * requested, this function will return the next domain in the list - if
267 * one exists. It is, therefore, important in this case to make sure the
268 * domain requested was the one returned.
269 *
270 * @parm xc_handle a handle to an open hypervisor interface
271 * @parm first_domid the first domain to enumerate information from. Domains
272 * are currently enumerate in order of creation.
273 * @parm max_doms the number of elements in info
274 * @parm info an array of max_doms size that will contain the information for
275 * the enumerated domains.
276 * @return the number of domains enumerated or -1 on error
277 */
278 int xc_domain_getinfo(int xc_handle,
279 uint32_t first_domid,
280 unsigned int max_doms,
281 xc_dominfo_t *info);
284 /**
285 * This function will set the execution context for the specified vcpu.
286 *
287 * @parm xc_handle a handle to an open hypervisor interface
288 * @parm domid the domain to set the vcpu context for
289 * @parm vcpu the vcpu number for the context
290 * @parm ctxt pointer to the the cpu context with the values to set
291 * @return the number of domains enumerated or -1 on error
292 */
293 int xc_vcpu_setcontext(int xc_handle,
294 uint32_t domid,
295 uint32_t vcpu,
296 vcpu_guest_context_t *ctxt);
297 /**
298 * This function will return information about one or more domains, using a
299 * single hypercall. The domain information will be stored into the supplied
300 * array of xc_domaininfo_t structures.
301 *
302 * @parm xc_handle a handle to an open hypervisor interface
303 * @parm first_domain the first domain to enumerate information from.
304 * Domains are currently enumerate in order of creation.
305 * @parm max_domains the number of elements in info
306 * @parm info an array of max_doms size that will contain the information for
307 * the enumerated domains.
308 * @return the number of domains enumerated or -1 on error
309 */
310 int xc_domain_getinfolist(int xc_handle,
311 uint32_t first_domain,
312 unsigned int max_domains,
313 xc_domaininfo_t *info);
315 /**
316 * This function returns information about the execution context of a
317 * particular vcpu of a domain.
318 *
319 * @parm xc_handle a handle to an open hypervisor interface
320 * @parm domid the domain to get information from
321 * @parm vcpu the vcpu number
322 * @parm ctxt a pointer to a structure to store the execution context of the
323 * domain
324 * @return 0 on success, -1 on failure
325 */
326 int xc_vcpu_getcontext(int xc_handle,
327 uint32_t domid,
328 uint32_t vcpu,
329 vcpu_guest_context_t *ctxt);
331 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
332 int xc_vcpu_getinfo(int xc_handle,
333 uint32_t domid,
334 uint32_t vcpu,
335 xc_vcpuinfo_t *info);
337 int xc_domain_setcpuweight(int xc_handle,
338 uint32_t domid,
339 float weight);
340 long long xc_domain_get_cpu_usage(int xc_handle,
341 domid_t domid,
342 int vcpu);
344 int xc_domain_sethandle(int xc_handle, uint32_t domid,
345 xen_domain_handle_t handle);
347 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
348 int xc_shadow_control(int xc_handle,
349 uint32_t domid,
350 unsigned int sop,
351 unsigned long *dirty_bitmap,
352 unsigned long pages,
353 unsigned long *mb,
354 uint32_t mode,
355 xc_shadow_op_stats_t *stats);
357 int xc_sedf_domain_set(int xc_handle,
358 uint32_t domid,
359 uint64_t period, uint64_t slice,
360 uint64_t latency, uint16_t extratime,
361 uint16_t weight);
363 int xc_sedf_domain_get(int xc_handle,
364 uint32_t domid,
365 uint64_t* period, uint64_t *slice,
366 uint64_t *latency, uint16_t *extratime,
367 uint16_t *weight);
369 int xc_sched_credit_domain_set(int xc_handle,
370 uint32_t domid,
371 struct xen_domctl_sched_credit *sdom);
373 int xc_sched_credit_domain_get(int xc_handle,
374 uint32_t domid,
375 struct xen_domctl_sched_credit *sdom);
377 /*
378 * EVENT CHANNEL FUNCTIONS
379 */
381 /**
382 * This function allocates an unbound port. Ports are named endpoints used for
383 * interdomain communication. This function is most useful in opening a
384 * well-known port within a domain to receive events on.
385 *
386 * NOTE: If you are allocating a *local* unbound port, you probably want to
387 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
388 * ports *only* during domain creation.
389 *
390 * @parm xc_handle a handle to an open hypervisor interface
391 * @parm dom the ID of the local domain (the 'allocatee')
392 * @parm remote_dom the ID of the domain who will later bind
393 * @return allocated port (in @dom) on success, -1 on failure
394 */
395 int xc_evtchn_alloc_unbound(int xc_handle,
396 uint32_t dom,
397 uint32_t remote_dom);
399 int xc_physdev_pci_access_modify(int xc_handle,
400 uint32_t domid,
401 int bus,
402 int dev,
403 int func,
404 int enable);
406 int xc_readconsolering(int xc_handle,
407 char **pbuffer,
408 unsigned int *pnr_chars,
409 int clear);
411 typedef xen_sysctl_physinfo_t xc_physinfo_t;
412 int xc_physinfo(int xc_handle,
413 xc_physinfo_t *info);
415 int xc_sched_id(int xc_handle,
416 int *sched_id);
418 int xc_domain_setmaxmem(int xc_handle,
419 uint32_t domid,
420 unsigned int max_memkb);
422 int xc_domain_set_time_offset(int xc_handle,
423 uint32_t domid,
424 int32_t time_offset_seconds);
426 int xc_domain_memory_increase_reservation(int xc_handle,
427 uint32_t domid,
428 unsigned long nr_extents,
429 unsigned int extent_order,
430 unsigned int address_bits,
431 xen_pfn_t *extent_start);
433 int xc_domain_memory_decrease_reservation(int xc_handle,
434 uint32_t domid,
435 unsigned long nr_extents,
436 unsigned int extent_order,
437 xen_pfn_t *extent_start);
439 int xc_domain_memory_populate_physmap(int xc_handle,
440 uint32_t domid,
441 unsigned long nr_extents,
442 unsigned int extent_order,
443 unsigned int address_bits,
444 xen_pfn_t *extent_start);
446 int xc_domain_ioport_permission(int xc_handle,
447 uint32_t domid,
448 uint32_t first_port,
449 uint32_t nr_ports,
450 uint32_t allow_access);
452 int xc_domain_irq_permission(int xc_handle,
453 uint32_t domid,
454 uint8_t pirq,
455 uint8_t allow_access);
457 int xc_domain_iomem_permission(int xc_handle,
458 uint32_t domid,
459 unsigned long first_mfn,
460 unsigned long nr_mfns,
461 uint8_t allow_access);
463 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
464 unsigned long mfn);
466 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
467 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
468 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
469 arrays. */
470 int xc_perfc_control(int xc_handle,
471 uint32_t op,
472 xc_perfc_desc_t *desc,
473 xc_perfc_val_t *val,
474 int *nbr_desc,
475 int *nbr_val);
477 /**
478 * Memory maps a range within one domain to a local address range. Mappings
479 * should be unmapped with munmap and should follow the same rules as mmap
480 * regarding page alignment. Returns NULL on failure.
481 *
482 * In Linux, the ring queue for the control channel is accessible by mapping
483 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
484 * stored there is of type control_if_t.
485 *
486 * @parm xc_handle a handle on an open hypervisor interface
487 * @parm dom the domain to map memory from
488 * @parm size the amount of memory to map (in multiples of page size)
489 * @parm prot same flag as in mmap().
490 * @parm mfn the frame address to map.
491 */
492 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
493 int size, int prot,
494 unsigned long mfn );
496 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
497 xen_pfn_t *arr, int num );
499 /**
500 * Translates a virtual address in the context of a given domain and
501 * vcpu returning the machine page frame number of the associated
502 * page.
503 *
504 * @parm xc_handle a handle on an open hypervisor interface
505 * @parm dom the domain to perform the translation in
506 * @parm vcpu the vcpu to perform the translation on
507 * @parm virt the virtual address to translate
508 */
509 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
510 int vcpu, unsigned long long virt);
512 int xc_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
513 unsigned long max_pfns);
515 unsigned long xc_ia64_fpsr_default(void);
517 int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid,
518 xen_pfn_t *pfn_buf,
519 unsigned int start_page, unsigned int nr_pages);
521 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
522 unsigned long dst_pfn, const char *src_page);
524 int xc_clear_domain_page(int xc_handle, uint32_t domid,
525 unsigned long dst_pfn);
527 long xc_get_max_pages(int xc_handle, uint32_t domid);
529 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
530 domid_t dom);
532 int xc_memory_op(int xc_handle, int cmd, void *arg);
534 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
535 int num, unsigned long *arr);
538 /* Get current total pages allocated to a domain. */
539 long xc_get_tot_pages(int xc_handle, uint32_t domid);
542 /*
543 * Trace Buffer Operations
544 */
546 /**
547 * xc_tbuf_enable - enable tracing buffers
548 *
549 * @parm xc_handle a handle to an open hypervisor interface
550 * @parm cnt size of tracing buffers to create (in pages)
551 * @parm mfn location to store mfn of the trace buffers to
552 * @parm size location to store the size (in bytes) of a trace buffer to
553 *
554 * Gets the machine address of the trace pointer area and the size of the
555 * per CPU buffers.
556 */
557 int xc_tbuf_enable(int xc_handle, unsigned long pages,
558 unsigned long *mfn, unsigned long *size);
560 /*
561 * Disable tracing buffers.
562 */
563 int xc_tbuf_disable(int xc_handle);
565 /**
566 * This function sets the size of the trace buffers. Setting the size
567 * is currently a one-shot operation that may be performed either at boot
568 * time or via this interface, not both. The buffer size must be set before
569 * enabling tracing.
570 *
571 * @parm xc_handle a handle to an open hypervisor interface
572 * @parm size the size in pages per cpu for the trace buffers
573 * @return 0 on success, -1 on failure.
574 */
575 int xc_tbuf_set_size(int xc_handle, unsigned long size);
577 /**
578 * This function retrieves the current size of the trace buffers.
579 * Note that the size returned is in terms of bytes, not pages.
581 * @parm xc_handle a handle to an open hypervisor interface
582 * @parm size will contain the size in bytes for the trace buffers
583 * @return 0 on success, -1 on failure.
584 */
585 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
587 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
589 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
591 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
592 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
594 int xc_version(int xc_handle, int cmd, void *arg);
596 /*
597 * MMU updates.
598 */
599 #define MAX_MMU_UPDATES 1024
600 struct xc_mmu {
601 mmu_update_t updates[MAX_MMU_UPDATES];
602 int idx;
603 domid_t subject;
604 };
605 typedef struct xc_mmu xc_mmu_t;
606 xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
607 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
608 unsigned long long ptr, unsigned long long val);
609 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
611 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
613 /*
614 * Return a handle to the event channel driver, or -1 on failure, in which case
615 * errno will be set appropriately.
616 */
617 int xc_evtchn_open(void);
619 /*
620 * Close a handle previously allocated with xc_evtchn_open().
621 */
622 int xc_evtchn_close(int xce_handle);
624 /*
625 * Return an fd that can be select()ed on for further calls to
626 * xc_evtchn_pending().
627 */
628 int xc_evtchn_fd(int xce_handle);
630 /*
631 * Notify the given event channel. Returns -1 on failure, in which case
632 * errno will be set appropriately.
633 */
634 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
636 /*
637 * Returns a new event port awaiting interdomain connection from the given
638 * domain ID, or -1 on failure, in which case errno will be set appropriately.
639 */
640 evtchn_port_t xc_evtchn_bind_unbound_port(int xce_handle, int domid);
642 /*
643 * Returns a new event port bound to the remote port for the given domain ID,
644 * or -1 on failure, in which case errno will be set appropriately.
645 */
646 evtchn_port_t xc_evtchn_bind_interdomain(int xce_handle, int domid,
647 evtchn_port_t remote_port);
649 /*
650 * Unbind the given event channel. Returns -1 on failure, in which case errno
651 * will be set appropriately.
652 */
653 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
655 /*
656 * Bind an event channel to the given VIRQ. Returns the event channel bound to
657 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
658 */
659 evtchn_port_t xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
661 /*
662 * Return the next event channel to become pending, or -1 on failure, in which
663 * case errno will be set appropriately.
664 */
665 evtchn_port_t xc_evtchn_pending(int xce_handle);
667 /*
668 * Unmask the given event channel. Returns -1 on failure, in which case errno
669 * will be set appropriately.
670 */
671 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
673 int xc_hvm_set_pci_intx_level(
674 int xc_handle, domid_t dom,
675 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
676 unsigned int level);
677 int xc_hvm_set_isa_irq_level(
678 int xc_handle, domid_t dom,
679 uint8_t isa_irq,
680 unsigned int level);
682 int xc_hvm_set_pci_link_route(
683 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
686 typedef enum {
687 XC_ERROR_NONE = 0,
688 XC_INTERNAL_ERROR = 1,
689 XC_INVALID_KERNEL = 2,
690 } xc_error_code;
692 #define XC_MAX_ERROR_MSG_LEN 1024
693 typedef struct {
694 int code;
695 char message[XC_MAX_ERROR_MSG_LEN];
696 } xc_error;
698 /*
699 * Return a pointer to the last error. This pointer and the
700 * data pointed to are only valid until the next call to
701 * libxc.
702 */
703 const xc_error const *xc_get_last_error(void);
705 /*
706 * Clear the last error
707 */
708 void xc_clear_last_error(void);
710 typedef void (*xc_error_handler)(const xc_error const* err);
712 /*
713 * The default error handler which prints to stderr
714 */
715 void xc_default_error_handler(const xc_error const* err);
717 /*
718 * Convert an error code into a text description
719 */
720 const char *xc_error_code_to_desc(int code);
722 /*
723 * Registers a callback to handle errors
724 */
725 xc_error_handler xc_set_error_handler(xc_error_handler handler);
727 #endif