direct-io.hg

view tools/libxc/xenctrl.h @ 13521:62e2e515febe

[XEN] New event-channel reset operation.
Plumbed through libxenctrl to python.

From: Andrei Petrov <andrei.petrov@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Jan 19 17:20:57 2007 +0000 (2007-01-19)
parents dcb145f858e3
children 9f27746eff43
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 */
9 #ifndef XENCTRL_H
10 #define XENCTRL_H
12 /* Tell the Xen public headers we are a user-space tools build. */
13 #ifndef __XEN_TOOLS__
14 #define __XEN_TOOLS__ 1
15 #endif
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <xen/xen.h>
20 #include <xen/domctl.h>
21 #include <xen/sysctl.h>
22 #include <xen/version.h>
23 #include <xen/event_channel.h>
24 #include <xen/sched.h>
25 #include <xen/memory.h>
26 #include <xen/acm.h>
27 #include <xen/acm_ops.h>
29 #ifdef __ia64__
30 #define XC_PAGE_SHIFT 14
31 #else
32 #define XC_PAGE_SHIFT 12
33 #endif
34 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
35 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
37 /*
38 * DEFINITIONS FOR CPU BARRIERS
39 */
41 #if defined(__i386__)
42 #define mb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
43 #define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
44 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
45 #elif defined(__x86_64__)
46 #define mb() __asm__ __volatile__ ( "mfence" : : : "memory")
47 #define rmb() __asm__ __volatile__ ( "lfence" : : : "memory")
48 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
49 #elif defined(__ia64__)
50 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
51 #define rmb() __asm__ __volatile__ ("mf" ::: "memory")
52 #define wmb() __asm__ __volatile__ ("mf" ::: "memory")
53 #elif defined(__powerpc__)
54 /* XXX loosen these up later */
55 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
56 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") /* lwsync? */
57 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") /* eieio? */
58 #else
59 #error "Define barriers"
60 #endif
62 /*
63 * INITIALIZATION FUNCTIONS
64 */
66 /**
67 * This function opens a handle to the hypervisor interface. This function can
68 * be called multiple times within a single process. Multiple processes can
69 * have an open hypervisor interface at the same time.
70 *
71 * Each call to this function should have a corresponding call to
72 * xc_interface_close().
73 *
74 * This function can fail if the caller does not have superuser permission or
75 * if a Xen-enabled kernel is not currently running.
76 *
77 * @return a handle to the hypervisor interface or -1 on failure
78 */
79 int xc_interface_open(void);
81 /**
82 * This function closes an open hypervisor interface.
83 *
84 * This function can fail if the handle does not represent an open interface or
85 * if there were problems closing the interface.
86 *
87 * @parm xc_handle a handle to an open hypervisor interface
88 * @return 0 on success, -1 otherwise.
89 */
90 int xc_interface_close(int xc_handle);
92 /*
93 * KERNEL INTERFACES
94 */
96 /*
97 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
98 * device number. Returns -1 on error (and sets errno).
99 */
100 int xc_find_device_number(const char *name);
102 /*
103 * DOMAIN DEBUGGING FUNCTIONS
104 */
106 typedef struct xc_core_header {
107 unsigned int xch_magic;
108 unsigned int xch_nr_vcpus;
109 unsigned int xch_nr_pages;
110 unsigned int xch_ctxt_offset;
111 unsigned int xch_index_offset;
112 unsigned int xch_pages_offset;
113 } xc_core_header_t;
115 #define XC_CORE_MAGIC 0xF00FEBED
116 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
118 #ifdef __linux__
120 #include <sys/ptrace.h>
121 #include <thread_db.h>
123 typedef void (*thr_ev_handler_t)(long);
125 void xc_register_event_handler(
126 thr_ev_handler_t h,
127 td_event_e e);
129 long xc_ptrace(
130 int xc_handle,
131 enum __ptrace_request request,
132 uint32_t domid,
133 long addr,
134 long data);
136 int xc_waitdomain(
137 int xc_handle,
138 int domain,
139 int *status,
140 int options);
142 #endif /* __linux__ */
144 /*
145 * DOMAIN MANAGEMENT FUNCTIONS
146 */
148 typedef struct xc_dominfo {
149 uint32_t domid;
150 uint32_t ssidref;
151 unsigned int dying:1, crashed:1, shutdown:1,
152 paused:1, blocked:1, running:1,
153 hvm:1;
154 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
155 unsigned long nr_pages;
156 unsigned long shared_info_frame;
157 uint64_t cpu_time;
158 unsigned long max_memkb;
159 unsigned int nr_online_vcpus;
160 unsigned int max_vcpu_id;
161 xen_domain_handle_t handle;
162 } xc_dominfo_t;
164 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
165 int xc_domain_create(int xc_handle,
166 uint32_t ssidref,
167 xen_domain_handle_t handle,
168 uint32_t flags,
169 uint32_t *pdomid);
172 /* Functions to produce a dump of a given domain
173 * xc_domain_dumpcore - produces a dump to a specified file
174 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
175 * callback function
176 */
177 int xc_domain_dumpcore(int xc_handle,
178 uint32_t domid,
179 const char *corename);
181 /* Define the callback function type for xc_domain_dumpcore_via_callback.
182 *
183 * This function is called by the coredump code for every "write",
184 * and passes an opaque object for the use of the function and
185 * created by the caller of xc_domain_dumpcore_via_callback.
186 */
187 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
189 int xc_domain_dumpcore_via_callback(int xc_handle,
190 uint32_t domid,
191 void *arg,
192 dumpcore_rtn_t dump_rtn);
194 /*
195 * This function sets the maximum number of vcpus that a domain may create.
196 *
197 * @parm xc_handle a handle to an open hypervisor interface.
198 * @parm domid the domain id in which vcpus are to be created.
199 * @parm max the maximum number of vcpus that the domain may create.
200 * @return 0 on success, -1 on failure.
201 */
202 int xc_domain_max_vcpus(int xc_handle,
203 uint32_t domid,
204 unsigned int max);
206 /**
207 * This function pauses a domain. A paused domain still exists in memory
208 * however it does not receive any timeslices from the hypervisor.
209 *
210 * @parm xc_handle a handle to an open hypervisor interface
211 * @parm domid the domain id to pause
212 * @return 0 on success, -1 on failure.
213 */
214 int xc_domain_pause(int xc_handle,
215 uint32_t domid);
216 /**
217 * This function unpauses a domain. The domain should have been previously
218 * paused.
219 *
220 * @parm xc_handle a handle to an open hypervisor interface
221 * @parm domid the domain id to unpause
222 * return 0 on success, -1 on failure
223 */
224 int xc_domain_unpause(int xc_handle,
225 uint32_t domid);
227 /**
228 * This function will destroy a domain. Destroying a domain removes the domain
229 * completely from memory. This function should be called after sending the
230 * domain a SHUTDOWN control message to free up the domain resources.
231 *
232 * @parm xc_handle a handle to an open hypervisor interface
233 * @parm domid the domain id to destroy
234 * @return 0 on success, -1 on failure
235 */
236 int xc_domain_destroy(int xc_handle,
237 uint32_t domid);
240 /**
241 * This function resumes a suspended domain. The domain should have
242 * been previously suspended.
243 *
244 * @parm xc_handle a handle to an open hypervisor interface
245 * @parm domid the domain id to resume
246 * return 0 on success, -1 on failure
247 */
248 int xc_domain_resume(int xc_handle,
249 uint32_t domid);
251 /**
252 * This function will shutdown a domain. This is intended for use in
253 * fully-virtualized domains where this operation is analogous to the
254 * sched_op operations in a paravirtualized domain. The caller is
255 * expected to give the reason for the shutdown.
256 *
257 * @parm xc_handle a handle to an open hypervisor interface
258 * @parm domid the domain id to destroy
259 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
260 * @return 0 on success, -1 on failure
261 */
262 int xc_domain_shutdown(int xc_handle,
263 uint32_t domid,
264 int reason);
266 int xc_vcpu_setaffinity(int xc_handle,
267 uint32_t domid,
268 int vcpu,
269 uint64_t cpumap);
270 int xc_vcpu_getaffinity(int xc_handle,
271 uint32_t domid,
272 int vcpu,
273 uint64_t *cpumap);
275 /**
276 * This function will return information about one or more domains. It is
277 * designed to iterate over the list of domains. If a single domain is
278 * requested, this function will return the next domain in the list - if
279 * one exists. It is, therefore, important in this case to make sure the
280 * domain requested was the one returned.
281 *
282 * @parm xc_handle a handle to an open hypervisor interface
283 * @parm first_domid the first domain to enumerate information from. Domains
284 * are currently enumerate in order of creation.
285 * @parm max_doms the number of elements in info
286 * @parm info an array of max_doms size that will contain the information for
287 * the enumerated domains.
288 * @return the number of domains enumerated or -1 on error
289 */
290 int xc_domain_getinfo(int xc_handle,
291 uint32_t first_domid,
292 unsigned int max_doms,
293 xc_dominfo_t *info);
296 /**
297 * This function will set the execution context for the specified vcpu.
298 *
299 * @parm xc_handle a handle to an open hypervisor interface
300 * @parm domid the domain to set the vcpu context for
301 * @parm vcpu the vcpu number for the context
302 * @parm ctxt pointer to the the cpu context with the values to set
303 * @return the number of domains enumerated or -1 on error
304 */
305 int xc_vcpu_setcontext(int xc_handle,
306 uint32_t domid,
307 uint32_t vcpu,
308 vcpu_guest_context_t *ctxt);
309 /**
310 * This function will return information about one or more domains, using a
311 * single hypercall. The domain information will be stored into the supplied
312 * array of xc_domaininfo_t structures.
313 *
314 * @parm xc_handle a handle to an open hypervisor interface
315 * @parm first_domain the first domain to enumerate information from.
316 * Domains are currently enumerate in order of creation.
317 * @parm max_domains the number of elements in info
318 * @parm info an array of max_doms size that will contain the information for
319 * the enumerated domains.
320 * @return the number of domains enumerated or -1 on error
321 */
322 int xc_domain_getinfolist(int xc_handle,
323 uint32_t first_domain,
324 unsigned int max_domains,
325 xc_domaininfo_t *info);
327 /**
328 * This function returns information about the context of a hvm domain
329 * @parm xc_handle a handle to an open hypervisor interface
330 * @parm domid the domain to get information from
331 * @parm hvm_ctxt a pointer to a structure to store the execution context of the
332 * hvm domain
333 * @return 0 on success, -1 on failure
334 */
335 int xc_domain_hvm_getcontext(int xc_handle,
336 uint32_t domid,
337 hvm_domain_context_t *hvm_ctxt);
339 /**
340 * This function will set the context for hvm domain
341 *
342 * @parm xc_handle a handle to an open hypervisor interface
343 * @parm domid the domain to set the hvm domain context for
344 * @parm hvm_ctxt pointer to the the hvm context with the values to set
345 * @return 0 on success, -1 on failure
346 */
347 int xc_domain_hvm_setcontext(int xc_handle,
348 uint32_t domid,
349 hvm_domain_context_t *hvm_ctxt);
351 /**
352 * This function returns information about the execution context of a
353 * particular vcpu of a domain.
354 *
355 * @parm xc_handle a handle to an open hypervisor interface
356 * @parm domid the domain to get information from
357 * @parm vcpu the vcpu number
358 * @parm ctxt a pointer to a structure to store the execution context of the
359 * domain
360 * @return 0 on success, -1 on failure
361 */
362 int xc_vcpu_getcontext(int xc_handle,
363 uint32_t domid,
364 uint32_t vcpu,
365 vcpu_guest_context_t *ctxt);
367 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
368 int xc_vcpu_getinfo(int xc_handle,
369 uint32_t domid,
370 uint32_t vcpu,
371 xc_vcpuinfo_t *info);
373 int xc_domain_setcpuweight(int xc_handle,
374 uint32_t domid,
375 float weight);
376 long long xc_domain_get_cpu_usage(int xc_handle,
377 domid_t domid,
378 int vcpu);
380 int xc_domain_sethandle(int xc_handle, uint32_t domid,
381 xen_domain_handle_t handle);
383 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
384 int xc_shadow_control(int xc_handle,
385 uint32_t domid,
386 unsigned int sop,
387 unsigned long *dirty_bitmap,
388 unsigned long pages,
389 unsigned long *mb,
390 uint32_t mode,
391 xc_shadow_op_stats_t *stats);
393 int xc_sedf_domain_set(int xc_handle,
394 uint32_t domid,
395 uint64_t period, uint64_t slice,
396 uint64_t latency, uint16_t extratime,
397 uint16_t weight);
399 int xc_sedf_domain_get(int xc_handle,
400 uint32_t domid,
401 uint64_t* period, uint64_t *slice,
402 uint64_t *latency, uint16_t *extratime,
403 uint16_t *weight);
405 int xc_sched_credit_domain_set(int xc_handle,
406 uint32_t domid,
407 struct xen_domctl_sched_credit *sdom);
409 int xc_sched_credit_domain_get(int xc_handle,
410 uint32_t domid,
411 struct xen_domctl_sched_credit *sdom);
413 /*
414 * EVENT CHANNEL FUNCTIONS
415 */
417 /**
418 * This function allocates an unbound port. Ports are named endpoints used for
419 * interdomain communication. This function is most useful in opening a
420 * well-known port within a domain to receive events on.
421 *
422 * NOTE: If you are allocating a *local* unbound port, you probably want to
423 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
424 * ports *only* during domain creation.
425 *
426 * @parm xc_handle a handle to an open hypervisor interface
427 * @parm dom the ID of the local domain (the 'allocatee')
428 * @parm remote_dom the ID of the domain who will later bind
429 * @return allocated port (in @dom) on success, -1 on failure
430 */
431 int xc_evtchn_alloc_unbound(int xc_handle,
432 uint32_t dom,
433 uint32_t remote_dom);
435 int xc_evtchn_reset(int xc_handle,
436 uint32_t dom);
438 int xc_physdev_pci_access_modify(int xc_handle,
439 uint32_t domid,
440 int bus,
441 int dev,
442 int func,
443 int enable);
445 int xc_readconsolering(int xc_handle,
446 char **pbuffer,
447 unsigned int *pnr_chars,
448 int clear);
450 typedef xen_sysctl_physinfo_t xc_physinfo_t;
451 int xc_physinfo(int xc_handle,
452 xc_physinfo_t *info);
454 int xc_sched_id(int xc_handle,
455 int *sched_id);
457 int xc_domain_setmaxmem(int xc_handle,
458 uint32_t domid,
459 unsigned int max_memkb);
461 int xc_domain_set_memmap_limit(int xc_handle,
462 uint32_t domid,
463 unsigned long map_limitkb);
465 int xc_domain_set_time_offset(int xc_handle,
466 uint32_t domid,
467 int32_t time_offset_seconds);
469 int xc_domain_memory_increase_reservation(int xc_handle,
470 uint32_t domid,
471 unsigned long nr_extents,
472 unsigned int extent_order,
473 unsigned int address_bits,
474 xen_pfn_t *extent_start);
476 int xc_domain_memory_decrease_reservation(int xc_handle,
477 uint32_t domid,
478 unsigned long nr_extents,
479 unsigned int extent_order,
480 xen_pfn_t *extent_start);
482 int xc_domain_memory_populate_physmap(int xc_handle,
483 uint32_t domid,
484 unsigned long nr_extents,
485 unsigned int extent_order,
486 unsigned int address_bits,
487 xen_pfn_t *extent_start);
489 int xc_domain_ioport_permission(int xc_handle,
490 uint32_t domid,
491 uint32_t first_port,
492 uint32_t nr_ports,
493 uint32_t allow_access);
495 int xc_domain_irq_permission(int xc_handle,
496 uint32_t domid,
497 uint8_t pirq,
498 uint8_t allow_access);
500 int xc_domain_iomem_permission(int xc_handle,
501 uint32_t domid,
502 unsigned long first_mfn,
503 unsigned long nr_mfns,
504 uint8_t allow_access);
506 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
507 unsigned long mfn);
509 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
510 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
511 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
512 arrays. */
513 int xc_perfc_control(int xc_handle,
514 uint32_t op,
515 xc_perfc_desc_t *desc,
516 xc_perfc_val_t *val,
517 int *nbr_desc,
518 int *nbr_val);
520 /**
521 * Memory maps a range within one domain to a local address range. Mappings
522 * should be unmapped with munmap and should follow the same rules as mmap
523 * regarding page alignment. Returns NULL on failure.
524 *
525 * In Linux, the ring queue for the control channel is accessible by mapping
526 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
527 * stored there is of type control_if_t.
528 *
529 * @parm xc_handle a handle on an open hypervisor interface
530 * @parm dom the domain to map memory from
531 * @parm size the amount of memory to map (in multiples of page size)
532 * @parm prot same flag as in mmap().
533 * @parm mfn the frame address to map.
534 */
535 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
536 int size, int prot,
537 unsigned long mfn );
539 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
540 xen_pfn_t *arr, int num );
542 /**
543 * Translates a virtual address in the context of a given domain and
544 * vcpu returning the machine page frame number of the associated
545 * page.
546 *
547 * @parm xc_handle a handle on an open hypervisor interface
548 * @parm dom the domain to perform the translation in
549 * @parm vcpu the vcpu to perform the translation on
550 * @parm virt the virtual address to translate
551 */
552 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
553 int vcpu, unsigned long long virt);
555 int xc_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
556 unsigned long max_pfns);
558 unsigned long xc_ia64_fpsr_default(void);
560 int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid,
561 xen_pfn_t *pfn_buf,
562 unsigned int start_page, unsigned int nr_pages);
564 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
565 unsigned long dst_pfn, const char *src_page);
567 int xc_clear_domain_page(int xc_handle, uint32_t domid,
568 unsigned long dst_pfn);
570 long xc_get_max_pages(int xc_handle, uint32_t domid);
572 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
573 domid_t dom);
575 int xc_memory_op(int xc_handle, int cmd, void *arg);
577 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
578 int num, unsigned long *arr);
581 /* Get current total pages allocated to a domain. */
582 long xc_get_tot_pages(int xc_handle, uint32_t domid);
585 /*
586 * Trace Buffer Operations
587 */
589 /**
590 * xc_tbuf_enable - enable tracing buffers
591 *
592 * @parm xc_handle a handle to an open hypervisor interface
593 * @parm cnt size of tracing buffers to create (in pages)
594 * @parm mfn location to store mfn of the trace buffers to
595 * @parm size location to store the size (in bytes) of a trace buffer to
596 *
597 * Gets the machine address of the trace pointer area and the size of the
598 * per CPU buffers.
599 */
600 int xc_tbuf_enable(int xc_handle, unsigned long pages,
601 unsigned long *mfn, unsigned long *size);
603 /*
604 * Disable tracing buffers.
605 */
606 int xc_tbuf_disable(int xc_handle);
608 /**
609 * This function sets the size of the trace buffers. Setting the size
610 * is currently a one-shot operation that may be performed either at boot
611 * time or via this interface, not both. The buffer size must be set before
612 * enabling tracing.
613 *
614 * @parm xc_handle a handle to an open hypervisor interface
615 * @parm size the size in pages per cpu for the trace buffers
616 * @return 0 on success, -1 on failure.
617 */
618 int xc_tbuf_set_size(int xc_handle, unsigned long size);
620 /**
621 * This function retrieves the current size of the trace buffers.
622 * Note that the size returned is in terms of bytes, not pages.
624 * @parm xc_handle a handle to an open hypervisor interface
625 * @parm size will contain the size in bytes for the trace buffers
626 * @return 0 on success, -1 on failure.
627 */
628 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
630 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
632 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
634 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
635 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
637 int xc_version(int xc_handle, int cmd, void *arg);
639 /*
640 * MMU updates.
641 */
642 #define MAX_MMU_UPDATES 1024
643 struct xc_mmu {
644 mmu_update_t updates[MAX_MMU_UPDATES];
645 int idx;
646 domid_t subject;
647 };
648 typedef struct xc_mmu xc_mmu_t;
649 xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
650 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
651 unsigned long long ptr, unsigned long long val);
652 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
654 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
656 /*
657 * Return a handle to the event channel driver, or -1 on failure, in which case
658 * errno will be set appropriately.
659 */
660 int xc_evtchn_open(void);
662 /*
663 * Close a handle previously allocated with xc_evtchn_open().
664 */
665 int xc_evtchn_close(int xce_handle);
667 /*
668 * Return an fd that can be select()ed on for further calls to
669 * xc_evtchn_pending().
670 */
671 int xc_evtchn_fd(int xce_handle);
673 /*
674 * Notify the given event channel. Returns -1 on failure, in which case
675 * errno will be set appropriately.
676 */
677 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
679 /*
680 * Returns a new event port awaiting interdomain connection from the given
681 * domain ID, or -1 on failure, in which case errno will be set appropriately.
682 */
683 evtchn_port_t xc_evtchn_bind_unbound_port(int xce_handle, int domid);
685 /*
686 * Returns a new event port bound to the remote port for the given domain ID,
687 * or -1 on failure, in which case errno will be set appropriately.
688 */
689 evtchn_port_t xc_evtchn_bind_interdomain(int xce_handle, int domid,
690 evtchn_port_t remote_port);
692 /*
693 * Unbind the given event channel. Returns -1 on failure, in which case errno
694 * will be set appropriately.
695 */
696 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
698 /*
699 * Bind an event channel to the given VIRQ. Returns the event channel bound to
700 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
701 */
702 evtchn_port_t xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
704 /*
705 * Return the next event channel to become pending, or -1 on failure, in which
706 * case errno will be set appropriately.
707 */
708 evtchn_port_t xc_evtchn_pending(int xce_handle);
710 /*
711 * Unmask the given event channel. Returns -1 on failure, in which case errno
712 * will be set appropriately.
713 */
714 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
716 int xc_hvm_set_pci_intx_level(
717 int xc_handle, domid_t dom,
718 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
719 unsigned int level);
720 int xc_hvm_set_isa_irq_level(
721 int xc_handle, domid_t dom,
722 uint8_t isa_irq,
723 unsigned int level);
725 int xc_hvm_set_pci_link_route(
726 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
729 typedef enum {
730 XC_ERROR_NONE = 0,
731 XC_INTERNAL_ERROR = 1,
732 XC_INVALID_KERNEL = 2,
733 } xc_error_code;
735 #define XC_MAX_ERROR_MSG_LEN 1024
736 typedef struct {
737 int code;
738 char message[XC_MAX_ERROR_MSG_LEN];
739 } xc_error;
741 /*
742 * Return a pointer to the last error. This pointer and the
743 * data pointed to are only valid until the next call to
744 * libxc.
745 */
746 const xc_error const *xc_get_last_error(void);
748 /*
749 * Clear the last error
750 */
751 void xc_clear_last_error(void);
753 typedef void (*xc_error_handler)(const xc_error const* err);
755 /*
756 * The default error handler which prints to stderr
757 */
758 void xc_default_error_handler(const xc_error const* err);
760 /*
761 * Convert an error code into a text description
762 */
763 const char *xc_error_code_to_desc(int code);
765 /*
766 * Registers a callback to handle errors
767 */
768 xc_error_handler xc_set_error_handler(xc_error_handler handler);
770 /* PowerPC specific. */
771 int xc_alloc_real_mode_area(int xc_handle,
772 uint32_t domid,
773 unsigned int log);
774 #endif