direct-io.hg

view tools/libxc/xenctrl.h @ 12988:e080700efa56

[TOOLS] Fix the build. Clearly demarcate PPC-specific stuff.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Dec 13 10:23:53 2006 +0000 (2006-12-13)
parents 749c399d73df
children 44668189f354
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 */
9 #ifndef XENCTRL_H
10 #define XENCTRL_H
12 /* Tell the Xen public headers we are a user-space tools build. */
13 #ifndef __XEN_TOOLS__
14 #define __XEN_TOOLS__ 1
15 #endif
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <xen/xen.h>
20 #include <xen/domctl.h>
21 #include <xen/sysctl.h>
22 #include <xen/version.h>
23 #include <xen/event_channel.h>
24 #include <xen/sched.h>
25 #include <xen/memory.h>
26 #include <xen/acm.h>
27 #include <xen/acm_ops.h>
29 #ifdef __ia64__
30 #define XC_PAGE_SHIFT 14
31 #else
32 #define XC_PAGE_SHIFT 12
33 #endif
34 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
35 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
37 /*
38 * DEFINITIONS FOR CPU BARRIERS
39 */
41 #if defined(__i386__)
42 #define mb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
43 #define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
44 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
45 #elif defined(__x86_64__)
46 #define mb() __asm__ __volatile__ ( "mfence" : : : "memory")
47 #define rmb() __asm__ __volatile__ ( "lfence" : : : "memory")
48 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
49 #elif defined(__ia64__)
50 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
51 #define rmb() __asm__ __volatile__ ("mf" ::: "memory")
52 #define wmb() __asm__ __volatile__ ("mf" ::: "memory")
53 #elif defined(__powerpc__)
54 /* XXX loosen these up later */
55 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
56 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") /* lwsync? */
57 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") /* eieio? */
58 #else
59 #error "Define barriers"
60 #endif
62 /*
63 * INITIALIZATION FUNCTIONS
64 */
66 /**
67 * This function opens a handle to the hypervisor interface. This function can
68 * be called multiple times within a single process. Multiple processes can
69 * have an open hypervisor interface at the same time.
70 *
71 * Each call to this function should have a corresponding call to
72 * xc_interface_close().
73 *
74 * This function can fail if the caller does not have superuser permission or
75 * if a Xen-enabled kernel is not currently running.
76 *
77 * @return a handle to the hypervisor interface or -1 on failure
78 */
79 int xc_interface_open(void);
81 /**
82 * This function closes an open hypervisor interface.
83 *
84 * This function can fail if the handle does not represent an open interface or
85 * if there were problems closing the interface.
86 *
87 * @parm xc_handle a handle to an open hypervisor interface
88 * @return 0 on success, -1 otherwise.
89 */
90 int xc_interface_close(int xc_handle);
92 /*
93 * KERNEL INTERFACES
94 */
96 /*
97 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
98 * device number. Returns -1 on error (and sets errno).
99 */
100 int xc_find_device_number(const char *name);
102 /*
103 * DOMAIN DEBUGGING FUNCTIONS
104 */
106 typedef struct xc_core_header {
107 unsigned int xch_magic;
108 unsigned int xch_nr_vcpus;
109 unsigned int xch_nr_pages;
110 unsigned int xch_ctxt_offset;
111 unsigned int xch_index_offset;
112 unsigned int xch_pages_offset;
113 } xc_core_header_t;
115 #define XC_CORE_MAGIC 0xF00FEBED
116 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
118 #ifdef __linux__
120 #include <sys/ptrace.h>
121 #include <thread_db.h>
123 typedef void (*thr_ev_handler_t)(long);
125 void xc_register_event_handler(
126 thr_ev_handler_t h,
127 td_event_e e);
129 long xc_ptrace(
130 int xc_handle,
131 enum __ptrace_request request,
132 uint32_t domid,
133 long addr,
134 long data);
136 int xc_waitdomain(
137 int xc_handle,
138 int domain,
139 int *status,
140 int options);
142 #endif /* __linux__ */
144 /*
145 * DOMAIN MANAGEMENT FUNCTIONS
146 */
148 typedef struct xc_dominfo {
149 uint32_t domid;
150 uint32_t ssidref;
151 unsigned int dying:1, crashed:1, shutdown:1,
152 paused:1, blocked:1, running:1,
153 hvm:1;
154 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
155 unsigned long nr_pages;
156 unsigned long shared_info_frame;
157 uint64_t cpu_time;
158 unsigned long max_memkb;
159 unsigned int nr_online_vcpus;
160 unsigned int max_vcpu_id;
161 xen_domain_handle_t handle;
162 } xc_dominfo_t;
164 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
165 int xc_domain_create(int xc_handle,
166 uint32_t ssidref,
167 xen_domain_handle_t handle,
168 uint32_t flags,
169 uint32_t *pdomid);
172 /* Functions to produce a dump of a given domain
173 * xc_domain_dumpcore - produces a dump to a specified file
174 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
175 * callback function
176 */
177 int xc_domain_dumpcore(int xc_handle,
178 uint32_t domid,
179 const char *corename);
181 /* Define the callback function type for xc_domain_dumpcore_via_callback.
182 *
183 * This function is called by the coredump code for every "write",
184 * and passes an opaque object for the use of the function and
185 * created by the caller of xc_domain_dumpcore_via_callback.
186 */
187 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
189 int xc_domain_dumpcore_via_callback(int xc_handle,
190 uint32_t domid,
191 void *arg,
192 dumpcore_rtn_t dump_rtn);
194 /*
195 * This function sets the maximum number of vcpus that a domain may create.
196 *
197 * @parm xc_handle a handle to an open hypervisor interface.
198 * @parm domid the domain id in which vcpus are to be created.
199 * @parm max the maximum number of vcpus that the domain may create.
200 * @return 0 on success, -1 on failure.
201 */
202 int xc_domain_max_vcpus(int xc_handle,
203 uint32_t domid,
204 unsigned int max);
206 /**
207 * This function pauses a domain. A paused domain still exists in memory
208 * however it does not receive any timeslices from the hypervisor.
209 *
210 * @parm xc_handle a handle to an open hypervisor interface
211 * @parm domid the domain id to pause
212 * @return 0 on success, -1 on failure.
213 */
214 int xc_domain_pause(int xc_handle,
215 uint32_t domid);
216 /**
217 * This function unpauses a domain. The domain should have been previously
218 * paused.
219 *
220 * @parm xc_handle a handle to an open hypervisor interface
221 * @parm domid the domain id to unpause
222 * return 0 on success, -1 on failure
223 */
224 int xc_domain_unpause(int xc_handle,
225 uint32_t domid);
227 /**
228 * This function will destroy a domain. Destroying a domain removes the domain
229 * completely from memory. This function should be called after sending the
230 * domain a SHUTDOWN control message to free up the domain resources.
231 *
232 * @parm xc_handle a handle to an open hypervisor interface
233 * @parm domid the domain id to destroy
234 * @return 0 on success, -1 on failure
235 */
236 int xc_domain_destroy(int xc_handle,
237 uint32_t domid);
239 /**
240 * This function will shutdown a domain. This is intended for use in
241 * fully-virtualized domains where this operation is analogous to the
242 * sched_op operations in a paravirtualized domain. The caller is
243 * expected to give the reason for the shutdown.
244 *
245 * @parm xc_handle a handle to an open hypervisor interface
246 * @parm domid the domain id to destroy
247 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
248 * @return 0 on success, -1 on failure
249 */
250 int xc_domain_shutdown(int xc_handle,
251 uint32_t domid,
252 int reason);
254 int xc_vcpu_setaffinity(int xc_handle,
255 uint32_t domid,
256 int vcpu,
257 uint64_t cpumap);
258 int xc_vcpu_getaffinity(int xc_handle,
259 uint32_t domid,
260 int vcpu,
261 uint64_t *cpumap);
263 /**
264 * This function will return information about one or more domains. It is
265 * designed to iterate over the list of domains. If a single domain is
266 * requested, this function will return the next domain in the list - if
267 * one exists. It is, therefore, important in this case to make sure the
268 * domain requested was the one returned.
269 *
270 * @parm xc_handle a handle to an open hypervisor interface
271 * @parm first_domid the first domain to enumerate information from. Domains
272 * are currently enumerate in order of creation.
273 * @parm max_doms the number of elements in info
274 * @parm info an array of max_doms size that will contain the information for
275 * the enumerated domains.
276 * @return the number of domains enumerated or -1 on error
277 */
278 int xc_domain_getinfo(int xc_handle,
279 uint32_t first_domid,
280 unsigned int max_doms,
281 xc_dominfo_t *info);
284 /**
285 * This function will set the execution context for the specified vcpu.
286 *
287 * @parm xc_handle a handle to an open hypervisor interface
288 * @parm domid the domain to set the vcpu context for
289 * @parm vcpu the vcpu number for the context
290 * @parm ctxt pointer to the the cpu context with the values to set
291 * @return the number of domains enumerated or -1 on error
292 */
293 int xc_vcpu_setcontext(int xc_handle,
294 uint32_t domid,
295 uint32_t vcpu,
296 vcpu_guest_context_t *ctxt);
297 /**
298 * This function will return information about one or more domains, using a
299 * single hypercall. The domain information will be stored into the supplied
300 * array of xc_domaininfo_t structures.
301 *
302 * @parm xc_handle a handle to an open hypervisor interface
303 * @parm first_domain the first domain to enumerate information from.
304 * Domains are currently enumerate in order of creation.
305 * @parm max_domains the number of elements in info
306 * @parm info an array of max_doms size that will contain the information for
307 * the enumerated domains.
308 * @return the number of domains enumerated or -1 on error
309 */
310 int xc_domain_getinfolist(int xc_handle,
311 uint32_t first_domain,
312 unsigned int max_domains,
313 xc_domaininfo_t *info);
315 /**
316 * This function returns information about the execution context of a
317 * particular vcpu of a domain.
318 *
319 * @parm xc_handle a handle to an open hypervisor interface
320 * @parm domid the domain to get information from
321 * @parm vcpu the vcpu number
322 * @parm ctxt a pointer to a structure to store the execution context of the
323 * domain
324 * @return 0 on success, -1 on failure
325 */
326 int xc_vcpu_getcontext(int xc_handle,
327 uint32_t domid,
328 uint32_t vcpu,
329 vcpu_guest_context_t *ctxt);
331 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
332 int xc_vcpu_getinfo(int xc_handle,
333 uint32_t domid,
334 uint32_t vcpu,
335 xc_vcpuinfo_t *info);
337 int xc_domain_setcpuweight(int xc_handle,
338 uint32_t domid,
339 float weight);
340 long long xc_domain_get_cpu_usage(int xc_handle,
341 domid_t domid,
342 int vcpu);
344 int xc_domain_sethandle(int xc_handle, uint32_t domid,
345 xen_domain_handle_t handle);
347 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
348 int xc_shadow_control(int xc_handle,
349 uint32_t domid,
350 unsigned int sop,
351 unsigned long *dirty_bitmap,
352 unsigned long pages,
353 unsigned long *mb,
354 uint32_t mode,
355 xc_shadow_op_stats_t *stats);
357 int xc_sedf_domain_set(int xc_handle,
358 uint32_t domid,
359 uint64_t period, uint64_t slice,
360 uint64_t latency, uint16_t extratime,
361 uint16_t weight);
363 int xc_sedf_domain_get(int xc_handle,
364 uint32_t domid,
365 uint64_t* period, uint64_t *slice,
366 uint64_t *latency, uint16_t *extratime,
367 uint16_t *weight);
369 int xc_sched_credit_domain_set(int xc_handle,
370 uint32_t domid,
371 struct xen_domctl_sched_credit *sdom);
373 int xc_sched_credit_domain_get(int xc_handle,
374 uint32_t domid,
375 struct xen_domctl_sched_credit *sdom);
377 /*
378 * EVENT CHANNEL FUNCTIONS
379 */
381 /**
382 * This function allocates an unbound port. Ports are named endpoints used for
383 * interdomain communication. This function is most useful in opening a
384 * well-known port within a domain to receive events on.
385 *
386 * NOTE: If you are allocating a *local* unbound port, you probably want to
387 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
388 * ports *only* during domain creation.
389 *
390 * @parm xc_handle a handle to an open hypervisor interface
391 * @parm dom the ID of the local domain (the 'allocatee')
392 * @parm remote_dom the ID of the domain who will later bind
393 * @return allocated port (in @dom) on success, -1 on failure
394 */
395 int xc_evtchn_alloc_unbound(int xc_handle,
396 uint32_t dom,
397 uint32_t remote_dom);
399 int xc_physdev_pci_access_modify(int xc_handle,
400 uint32_t domid,
401 int bus,
402 int dev,
403 int func,
404 int enable);
406 int xc_readconsolering(int xc_handle,
407 char **pbuffer,
408 unsigned int *pnr_chars,
409 int clear);
411 typedef xen_sysctl_physinfo_t xc_physinfo_t;
412 int xc_physinfo(int xc_handle,
413 xc_physinfo_t *info);
415 int xc_sched_id(int xc_handle,
416 int *sched_id);
418 int xc_domain_setmaxmem(int xc_handle,
419 uint32_t domid,
420 unsigned int max_memkb);
422 int xc_domain_set_memmap_limit(int xc_handle,
423 uint32_t domid,
424 unsigned long map_limitkb);
426 int xc_domain_set_time_offset(int xc_handle,
427 uint32_t domid,
428 int32_t time_offset_seconds);
430 int xc_domain_memory_increase_reservation(int xc_handle,
431 uint32_t domid,
432 unsigned long nr_extents,
433 unsigned int extent_order,
434 unsigned int address_bits,
435 xen_pfn_t *extent_start);
437 int xc_domain_memory_decrease_reservation(int xc_handle,
438 uint32_t domid,
439 unsigned long nr_extents,
440 unsigned int extent_order,
441 xen_pfn_t *extent_start);
443 int xc_domain_memory_populate_physmap(int xc_handle,
444 uint32_t domid,
445 unsigned long nr_extents,
446 unsigned int extent_order,
447 unsigned int address_bits,
448 xen_pfn_t *extent_start);
450 int xc_domain_ioport_permission(int xc_handle,
451 uint32_t domid,
452 uint32_t first_port,
453 uint32_t nr_ports,
454 uint32_t allow_access);
456 int xc_domain_irq_permission(int xc_handle,
457 uint32_t domid,
458 uint8_t pirq,
459 uint8_t allow_access);
461 int xc_domain_iomem_permission(int xc_handle,
462 uint32_t domid,
463 unsigned long first_mfn,
464 unsigned long nr_mfns,
465 uint8_t allow_access);
467 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
468 unsigned long mfn);
470 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
471 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
472 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
473 arrays. */
474 int xc_perfc_control(int xc_handle,
475 uint32_t op,
476 xc_perfc_desc_t *desc,
477 xc_perfc_val_t *val,
478 int *nbr_desc,
479 int *nbr_val);
481 /**
482 * Memory maps a range within one domain to a local address range. Mappings
483 * should be unmapped with munmap and should follow the same rules as mmap
484 * regarding page alignment. Returns NULL on failure.
485 *
486 * In Linux, the ring queue for the control channel is accessible by mapping
487 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
488 * stored there is of type control_if_t.
489 *
490 * @parm xc_handle a handle on an open hypervisor interface
491 * @parm dom the domain to map memory from
492 * @parm size the amount of memory to map (in multiples of page size)
493 * @parm prot same flag as in mmap().
494 * @parm mfn the frame address to map.
495 */
496 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
497 int size, int prot,
498 unsigned long mfn );
500 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
501 xen_pfn_t *arr, int num );
503 /**
504 * Translates a virtual address in the context of a given domain and
505 * vcpu returning the machine page frame number of the associated
506 * page.
507 *
508 * @parm xc_handle a handle on an open hypervisor interface
509 * @parm dom the domain to perform the translation in
510 * @parm vcpu the vcpu to perform the translation on
511 * @parm virt the virtual address to translate
512 */
513 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
514 int vcpu, unsigned long long virt);
516 int xc_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
517 unsigned long max_pfns);
519 unsigned long xc_ia64_fpsr_default(void);
521 int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid,
522 xen_pfn_t *pfn_buf,
523 unsigned int start_page, unsigned int nr_pages);
525 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
526 unsigned long dst_pfn, const char *src_page);
528 int xc_clear_domain_page(int xc_handle, uint32_t domid,
529 unsigned long dst_pfn);
531 long xc_get_max_pages(int xc_handle, uint32_t domid);
533 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
534 domid_t dom);
536 int xc_memory_op(int xc_handle, int cmd, void *arg);
538 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
539 int num, unsigned long *arr);
542 /* Get current total pages allocated to a domain. */
543 long xc_get_tot_pages(int xc_handle, uint32_t domid);
546 /*
547 * Trace Buffer Operations
548 */
550 /**
551 * xc_tbuf_enable - enable tracing buffers
552 *
553 * @parm xc_handle a handle to an open hypervisor interface
554 * @parm cnt size of tracing buffers to create (in pages)
555 * @parm mfn location to store mfn of the trace buffers to
556 * @parm size location to store the size (in bytes) of a trace buffer to
557 *
558 * Gets the machine address of the trace pointer area and the size of the
559 * per CPU buffers.
560 */
561 int xc_tbuf_enable(int xc_handle, unsigned long pages,
562 unsigned long *mfn, unsigned long *size);
564 /*
565 * Disable tracing buffers.
566 */
567 int xc_tbuf_disable(int xc_handle);
569 /**
570 * This function sets the size of the trace buffers. Setting the size
571 * is currently a one-shot operation that may be performed either at boot
572 * time or via this interface, not both. The buffer size must be set before
573 * enabling tracing.
574 *
575 * @parm xc_handle a handle to an open hypervisor interface
576 * @parm size the size in pages per cpu for the trace buffers
577 * @return 0 on success, -1 on failure.
578 */
579 int xc_tbuf_set_size(int xc_handle, unsigned long size);
581 /**
582 * This function retrieves the current size of the trace buffers.
583 * Note that the size returned is in terms of bytes, not pages.
585 * @parm xc_handle a handle to an open hypervisor interface
586 * @parm size will contain the size in bytes for the trace buffers
587 * @return 0 on success, -1 on failure.
588 */
589 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
591 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
593 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
595 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
596 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
598 int xc_version(int xc_handle, int cmd, void *arg);
600 /*
601 * MMU updates.
602 */
603 #define MAX_MMU_UPDATES 1024
604 struct xc_mmu {
605 mmu_update_t updates[MAX_MMU_UPDATES];
606 int idx;
607 domid_t subject;
608 };
609 typedef struct xc_mmu xc_mmu_t;
610 xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
611 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
612 unsigned long long ptr, unsigned long long val);
613 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
615 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
617 /*
618 * Return a handle to the event channel driver, or -1 on failure, in which case
619 * errno will be set appropriately.
620 */
621 int xc_evtchn_open(void);
623 /*
624 * Close a handle previously allocated with xc_evtchn_open().
625 */
626 int xc_evtchn_close(int xce_handle);
628 /*
629 * Return an fd that can be select()ed on for further calls to
630 * xc_evtchn_pending().
631 */
632 int xc_evtchn_fd(int xce_handle);
634 /*
635 * Notify the given event channel. Returns -1 on failure, in which case
636 * errno will be set appropriately.
637 */
638 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
640 /*
641 * Returns a new event port awaiting interdomain connection from the given
642 * domain ID, or -1 on failure, in which case errno will be set appropriately.
643 */
644 evtchn_port_t xc_evtchn_bind_unbound_port(int xce_handle, int domid);
646 /*
647 * Returns a new event port bound to the remote port for the given domain ID,
648 * or -1 on failure, in which case errno will be set appropriately.
649 */
650 evtchn_port_t xc_evtchn_bind_interdomain(int xce_handle, int domid,
651 evtchn_port_t remote_port);
653 /*
654 * Unbind the given event channel. Returns -1 on failure, in which case errno
655 * will be set appropriately.
656 */
657 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
659 /*
660 * Bind an event channel to the given VIRQ. Returns the event channel bound to
661 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
662 */
663 evtchn_port_t xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
665 /*
666 * Return the next event channel to become pending, or -1 on failure, in which
667 * case errno will be set appropriately.
668 */
669 evtchn_port_t xc_evtchn_pending(int xce_handle);
671 /*
672 * Unmask the given event channel. Returns -1 on failure, in which case errno
673 * will be set appropriately.
674 */
675 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
677 int xc_hvm_set_pci_intx_level(
678 int xc_handle, domid_t dom,
679 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
680 unsigned int level);
681 int xc_hvm_set_isa_irq_level(
682 int xc_handle, domid_t dom,
683 uint8_t isa_irq,
684 unsigned int level);
686 int xc_hvm_set_pci_link_route(
687 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
690 typedef enum {
691 XC_ERROR_NONE = 0,
692 XC_INTERNAL_ERROR = 1,
693 XC_INVALID_KERNEL = 2,
694 } xc_error_code;
696 #define XC_MAX_ERROR_MSG_LEN 1024
697 typedef struct {
698 int code;
699 char message[XC_MAX_ERROR_MSG_LEN];
700 } xc_error;
702 /*
703 * Return a pointer to the last error. This pointer and the
704 * data pointed to are only valid until the next call to
705 * libxc.
706 */
707 const xc_error const *xc_get_last_error(void);
709 /*
710 * Clear the last error
711 */
712 void xc_clear_last_error(void);
714 typedef void (*xc_error_handler)(const xc_error const* err);
716 /*
717 * The default error handler which prints to stderr
718 */
719 void xc_default_error_handler(const xc_error const* err);
721 /*
722 * Convert an error code into a text description
723 */
724 const char *xc_error_code_to_desc(int code);
726 /*
727 * Registers a callback to handle errors
728 */
729 xc_error_handler xc_set_error_handler(xc_error_handler handler);
731 /* PowerPC specific. */
732 int xc_alloc_real_mode_area(int xc_handle,
733 uint32_t domid,
734 unsigned int log);
735 #endif