ia64/xen-unstable

view tools/libxc/xenctrl.h @ 17965:14fd83fe71c3

Add facility to get notification of domain suspend by event channel.
This event channel will be notified when the domain transitions to the
suspended state, which can be much faster than raising VIRQ_DOM_EXC
and waiting for the notification to be propagated via xenstore.

No attempt is made here to prevent multiple subscribers (last one
wins), or to detect that the subscriber has gone away. Userspace tools
should take care.

Signed-off-by: Brendan Cully <brendan@cs.ubc.ca>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jul 04 12:00:24 2008 +0100 (2008-07-04)
parents 469d9b00382d
children 6c2fe520e32d
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * xc_gnttab functions:
9 * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
10 */
12 #ifndef XENCTRL_H
13 #define XENCTRL_H
15 /* Tell the Xen public headers we are a user-space tools build. */
16 #ifndef __XEN_TOOLS__
17 #define __XEN_TOOLS__ 1
18 #endif
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <xen/xen.h>
23 #include <xen/domctl.h>
24 #include <xen/physdev.h>
25 #include <xen/sysctl.h>
26 #include <xen/version.h>
27 #include <xen/event_channel.h>
28 #include <xen/sched.h>
29 #include <xen/memory.h>
30 #include <xen/xsm/acm.h>
31 #include <xen/xsm/acm_ops.h>
32 #include <xen/xsm/flask_op.h>
34 #if defined(__i386__) || defined(__x86_64__)
35 #include <xen/foreign/x86_32.h>
36 #include <xen/foreign/x86_64.h>
37 #endif
39 #ifdef __ia64__
40 #define XC_PAGE_SHIFT 14
41 #else
42 #define XC_PAGE_SHIFT 12
43 #endif
44 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
45 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
47 /*
48 * DEFINITIONS FOR CPU BARRIERS
49 */
51 #if defined(__i386__)
52 #define xen_mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
53 #define xen_rmb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
54 #define xen_wmb() asm volatile ( "" : : : "memory")
55 #elif defined(__x86_64__)
56 #define xen_mb() asm volatile ( "mfence" : : : "memory")
57 #define xen_rmb() asm volatile ( "lfence" : : : "memory")
58 #define xen_wmb() asm volatile ( "" : : : "memory")
59 #elif defined(__ia64__)
60 #define xen_mb() asm volatile ("mf" ::: "memory")
61 #define xen_rmb() asm volatile ("mf" ::: "memory")
62 #define xen_wmb() asm volatile ("mf" ::: "memory")
63 #else
64 #error "Define barriers"
65 #endif
67 /*
68 * INITIALIZATION FUNCTIONS
69 */
71 /**
72 * This function opens a handle to the hypervisor interface. This function can
73 * be called multiple times within a single process. Multiple processes can
74 * have an open hypervisor interface at the same time.
75 *
76 * Each call to this function should have a corresponding call to
77 * xc_interface_close().
78 *
79 * This function can fail if the caller does not have superuser permission or
80 * if a Xen-enabled kernel is not currently running.
81 *
82 * @return a handle to the hypervisor interface or -1 on failure
83 */
84 int xc_interface_open(void);
86 /**
87 * This function closes an open hypervisor interface.
88 *
89 * This function can fail if the handle does not represent an open interface or
90 * if there were problems closing the interface.
91 *
92 * @parm xc_handle a handle to an open hypervisor interface
93 * @return 0 on success, -1 otherwise.
94 */
95 int xc_interface_close(int xc_handle);
97 /*
98 * KERNEL INTERFACES
99 */
101 /*
102 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
103 * device number. Returns -1 on error (and sets errno).
104 */
105 int xc_find_device_number(const char *name);
107 /*
108 * DOMAIN DEBUGGING FUNCTIONS
109 */
111 typedef struct xc_core_header {
112 unsigned int xch_magic;
113 unsigned int xch_nr_vcpus;
114 unsigned int xch_nr_pages;
115 unsigned int xch_ctxt_offset;
116 unsigned int xch_index_offset;
117 unsigned int xch_pages_offset;
118 } xc_core_header_t;
120 #define XC_CORE_MAGIC 0xF00FEBED
121 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
123 #ifdef __linux__
125 #include <sys/ptrace.h>
126 #include <thread_db.h>
128 typedef void (*thr_ev_handler_t)(long);
130 void xc_register_event_handler(
131 thr_ev_handler_t h,
132 td_event_e e);
134 long xc_ptrace(
135 int xc_handle,
136 enum __ptrace_request request,
137 uint32_t domid,
138 long addr,
139 long data);
141 int xc_waitdomain(
142 int xc_handle,
143 int domain,
144 int *status,
145 int options);
147 #endif /* __linux__ */
149 /*
150 * DOMAIN MANAGEMENT FUNCTIONS
151 */
153 typedef struct xc_dominfo {
154 uint32_t domid;
155 uint32_t ssidref;
156 unsigned int dying:1, crashed:1, shutdown:1,
157 paused:1, blocked:1, running:1,
158 hvm:1, debugged:1;
159 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
160 unsigned long nr_pages;
161 unsigned long shared_info_frame;
162 uint64_t cpu_time;
163 unsigned long max_memkb;
164 unsigned int nr_online_vcpus;
165 unsigned int max_vcpu_id;
166 xen_domain_handle_t handle;
167 } xc_dominfo_t;
169 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
171 typedef union
172 {
173 #if defined(__i386__) || defined(__x86_64__)
174 vcpu_guest_context_x86_64_t x64;
175 vcpu_guest_context_x86_32_t x32;
176 #endif
177 vcpu_guest_context_t c;
178 } vcpu_guest_context_any_t;
180 typedef union
181 {
182 #if defined(__i386__) || defined(__x86_64__)
183 shared_info_x86_64_t x64;
184 shared_info_x86_32_t x32;
185 #endif
186 shared_info_t s;
187 } shared_info_any_t;
189 typedef union
190 {
191 #if defined(__i386__) || defined(__x86_64__)
192 start_info_x86_64_t x64;
193 start_info_x86_32_t x32;
194 #endif
195 start_info_t s;
196 } start_info_any_t;
199 int xc_domain_create(int xc_handle,
200 uint32_t ssidref,
201 xen_domain_handle_t handle,
202 uint32_t flags,
203 uint32_t *pdomid);
206 /* Functions to produce a dump of a given domain
207 * xc_domain_dumpcore - produces a dump to a specified file
208 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
209 * callback function
210 */
211 int xc_domain_dumpcore(int xc_handle,
212 uint32_t domid,
213 const char *corename);
215 /* Define the callback function type for xc_domain_dumpcore_via_callback.
216 *
217 * This function is called by the coredump code for every "write",
218 * and passes an opaque object for the use of the function and
219 * created by the caller of xc_domain_dumpcore_via_callback.
220 */
221 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
223 int xc_domain_dumpcore_via_callback(int xc_handle,
224 uint32_t domid,
225 void *arg,
226 dumpcore_rtn_t dump_rtn);
228 /*
229 * This function sets the maximum number of vcpus that a domain may create.
230 *
231 * @parm xc_handle a handle to an open hypervisor interface.
232 * @parm domid the domain id in which vcpus are to be created.
233 * @parm max the maximum number of vcpus that the domain may create.
234 * @return 0 on success, -1 on failure.
235 */
236 int xc_domain_max_vcpus(int xc_handle,
237 uint32_t domid,
238 unsigned int max);
240 /**
241 * This function pauses a domain. A paused domain still exists in memory
242 * however it does not receive any timeslices from the hypervisor.
243 *
244 * @parm xc_handle a handle to an open hypervisor interface
245 * @parm domid the domain id to pause
246 * @return 0 on success, -1 on failure.
247 */
248 int xc_domain_pause(int xc_handle,
249 uint32_t domid);
250 /**
251 * This function unpauses a domain. The domain should have been previously
252 * paused.
253 *
254 * @parm xc_handle a handle to an open hypervisor interface
255 * @parm domid the domain id to unpause
256 * return 0 on success, -1 on failure
257 */
258 int xc_domain_unpause(int xc_handle,
259 uint32_t domid);
261 /**
262 * This function will destroy a domain. Destroying a domain removes the domain
263 * completely from memory. This function should be called after sending the
264 * domain a SHUTDOWN control message to free up the domain resources.
265 *
266 * @parm xc_handle a handle to an open hypervisor interface
267 * @parm domid the domain id to destroy
268 * @return 0 on success, -1 on failure
269 */
270 int xc_domain_destroy(int xc_handle,
271 uint32_t domid);
274 /**
275 * This function resumes a suspended domain. The domain should have
276 * been previously suspended.
277 *
278 * @parm xc_handle a handle to an open hypervisor interface
279 * @parm domid the domain id to resume
280 * @parm fast use cooperative resume (guest must support this)
281 * return 0 on success, -1 on failure
282 */
283 int xc_domain_resume(int xc_handle,
284 uint32_t domid,
285 int fast);
287 /**
288 * This function will shutdown a domain. This is intended for use in
289 * fully-virtualized domains where this operation is analogous to the
290 * sched_op operations in a paravirtualized domain. The caller is
291 * expected to give the reason for the shutdown.
292 *
293 * @parm xc_handle a handle to an open hypervisor interface
294 * @parm domid the domain id to destroy
295 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
296 * @return 0 on success, -1 on failure
297 */
298 int xc_domain_shutdown(int xc_handle,
299 uint32_t domid,
300 int reason);
302 int xc_vcpu_setaffinity(int xc_handle,
303 uint32_t domid,
304 int vcpu,
305 uint64_t cpumap);
306 int xc_vcpu_getaffinity(int xc_handle,
307 uint32_t domid,
308 int vcpu,
309 uint64_t *cpumap);
311 /**
312 * This function will return information about one or more domains. It is
313 * designed to iterate over the list of domains. If a single domain is
314 * requested, this function will return the next domain in the list - if
315 * one exists. It is, therefore, important in this case to make sure the
316 * domain requested was the one returned.
317 *
318 * @parm xc_handle a handle to an open hypervisor interface
319 * @parm first_domid the first domain to enumerate information from. Domains
320 * are currently enumerate in order of creation.
321 * @parm max_doms the number of elements in info
322 * @parm info an array of max_doms size that will contain the information for
323 * the enumerated domains.
324 * @return the number of domains enumerated or -1 on error
325 */
326 int xc_domain_getinfo(int xc_handle,
327 uint32_t first_domid,
328 unsigned int max_doms,
329 xc_dominfo_t *info);
332 /**
333 * This function will set the execution context for the specified vcpu.
334 *
335 * @parm xc_handle a handle to an open hypervisor interface
336 * @parm domid the domain to set the vcpu context for
337 * @parm vcpu the vcpu number for the context
338 * @parm ctxt pointer to the the cpu context with the values to set
339 * @return the number of domains enumerated or -1 on error
340 */
341 int xc_vcpu_setcontext(int xc_handle,
342 uint32_t domid,
343 uint32_t vcpu,
344 vcpu_guest_context_any_t *ctxt);
345 /**
346 * This function will return information about one or more domains, using a
347 * single hypercall. The domain information will be stored into the supplied
348 * array of xc_domaininfo_t structures.
349 *
350 * @parm xc_handle a handle to an open hypervisor interface
351 * @parm first_domain the first domain to enumerate information from.
352 * Domains are currently enumerate in order of creation.
353 * @parm max_domains the number of elements in info
354 * @parm info an array of max_doms size that will contain the information for
355 * the enumerated domains.
356 * @return the number of domains enumerated or -1 on error
357 */
358 int xc_domain_getinfolist(int xc_handle,
359 uint32_t first_domain,
360 unsigned int max_domains,
361 xc_domaininfo_t *info);
363 /**
364 * This function returns information about the context of a hvm domain
365 * @parm xc_handle a handle to an open hypervisor interface
366 * @parm domid the domain to get information from
367 * @parm ctxt_buf a pointer to a structure to store the execution context of
368 * the hvm domain
369 * @parm size the size of ctxt_buf in bytes
370 * @return 0 on success, -1 on failure
371 */
372 int xc_domain_hvm_getcontext(int xc_handle,
373 uint32_t domid,
374 uint8_t *ctxt_buf,
375 uint32_t size);
377 /**
378 * This function will set the context for hvm domain
379 *
380 * @parm xc_handle a handle to an open hypervisor interface
381 * @parm domid the domain to set the hvm domain context for
382 * @parm hvm_ctxt pointer to the the hvm context with the values to set
383 * @parm size the size of hvm_ctxt in bytes
384 * @return 0 on success, -1 on failure
385 */
386 int xc_domain_hvm_setcontext(int xc_handle,
387 uint32_t domid,
388 uint8_t *hvm_ctxt,
389 uint32_t size);
391 /**
392 * This function returns information about the execution context of a
393 * particular vcpu of a domain.
394 *
395 * @parm xc_handle a handle to an open hypervisor interface
396 * @parm domid the domain to get information from
397 * @parm vcpu the vcpu number
398 * @parm ctxt a pointer to a structure to store the execution context of the
399 * domain
400 * @return 0 on success, -1 on failure
401 */
402 int xc_vcpu_getcontext(int xc_handle,
403 uint32_t domid,
404 uint32_t vcpu,
405 vcpu_guest_context_any_t *ctxt);
407 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
408 int xc_vcpu_getinfo(int xc_handle,
409 uint32_t domid,
410 uint32_t vcpu,
411 xc_vcpuinfo_t *info);
413 long long xc_domain_get_cpu_usage(int xc_handle,
414 domid_t domid,
415 int vcpu);
417 int xc_domain_sethandle(int xc_handle, uint32_t domid,
418 xen_domain_handle_t handle);
420 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
421 int xc_shadow_control(int xc_handle,
422 uint32_t domid,
423 unsigned int sop,
424 unsigned long *dirty_bitmap,
425 unsigned long pages,
426 unsigned long *mb,
427 uint32_t mode,
428 xc_shadow_op_stats_t *stats);
430 int xc_sedf_domain_set(int xc_handle,
431 uint32_t domid,
432 uint64_t period, uint64_t slice,
433 uint64_t latency, uint16_t extratime,
434 uint16_t weight);
436 int xc_sedf_domain_get(int xc_handle,
437 uint32_t domid,
438 uint64_t* period, uint64_t *slice,
439 uint64_t *latency, uint16_t *extratime,
440 uint16_t *weight);
442 int xc_sched_credit_domain_set(int xc_handle,
443 uint32_t domid,
444 struct xen_domctl_sched_credit *sdom);
446 int xc_sched_credit_domain_get(int xc_handle,
447 uint32_t domid,
448 struct xen_domctl_sched_credit *sdom);
450 /**
451 * This function sends a trigger to a domain.
452 *
453 * @parm xc_handle a handle to an open hypervisor interface
454 * @parm domid the domain id to send trigger
455 * @parm trigger the trigger type
456 * @parm vcpu the vcpu number to send trigger
457 * return 0 on success, -1 on failure
458 */
459 int xc_domain_send_trigger(int xc_handle,
460 uint32_t domid,
461 uint32_t trigger,
462 uint32_t vcpu);
464 /**
465 * This function enables or disable debugging of a domain.
466 *
467 * @parm xc_handle a handle to an open hypervisor interface
468 * @parm domid the domain id to send trigger
469 * @parm enable true to enable debugging
470 * return 0 on success, -1 on failure
471 */
472 int xc_domain_setdebugging(int xc_handle,
473 uint32_t domid,
474 unsigned int enable);
476 /*
477 * EVENT CHANNEL FUNCTIONS
478 */
480 /* A port identifier is guaranteed to fit in 31 bits. */
481 typedef int evtchn_port_or_error_t;
483 /**
484 * This function allocates an unbound port. Ports are named endpoints used for
485 * interdomain communication. This function is most useful in opening a
486 * well-known port within a domain to receive events on.
487 *
488 * NOTE: If you are allocating a *local* unbound port, you probably want to
489 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
490 * ports *only* during domain creation.
491 *
492 * @parm xc_handle a handle to an open hypervisor interface
493 * @parm dom the ID of the local domain (the 'allocatee')
494 * @parm remote_dom the ID of the domain who will later bind
495 * @return allocated port (in @dom) on success, -1 on failure
496 */
497 evtchn_port_or_error_t
498 xc_evtchn_alloc_unbound(int xc_handle,
499 uint32_t dom,
500 uint32_t remote_dom);
502 int xc_evtchn_reset(int xc_handle,
503 uint32_t dom);
504 int xc_evtchn_status(int xc_handle,
505 uint32_t dom,
506 uint32_t port);
508 /*
509 * Return a handle to the event channel driver, or -1 on failure, in which case
510 * errno will be set appropriately.
511 */
512 int xc_evtchn_open(void);
514 /*
515 * Close a handle previously allocated with xc_evtchn_open().
516 */
517 int xc_evtchn_close(int xce_handle);
519 /*
520 * Return an fd that can be select()ed on for further calls to
521 * xc_evtchn_pending().
522 */
523 int xc_evtchn_fd(int xce_handle);
525 /*
526 * Notify the given event channel. Returns -1 on failure, in which case
527 * errno will be set appropriately.
528 */
529 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
531 /*
532 * Returns a new event port awaiting interdomain connection from the given
533 * domain ID, or -1 on failure, in which case errno will be set appropriately.
534 */
535 evtchn_port_or_error_t
536 xc_evtchn_bind_unbound_port(int xce_handle, int domid);
538 /*
539 * Returns a new event port bound to the remote port for the given domain ID,
540 * or -1 on failure, in which case errno will be set appropriately.
541 */
542 evtchn_port_or_error_t
543 xc_evtchn_bind_interdomain(int xce_handle, int domid,
544 evtchn_port_t remote_port);
546 /*
547 * Bind an event channel to the given VIRQ. Returns the event channel bound to
548 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
549 */
550 evtchn_port_or_error_t
551 xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
553 /*
554 * Unbind the given event channel. Returns -1 on failure, in which case errno
555 * will be set appropriately.
556 */
557 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
559 /*
560 * Return the next event channel to become pending, or -1 on failure, in which
561 * case errno will be set appropriately.
562 */
563 evtchn_port_or_error_t
564 xc_evtchn_pending(int xce_handle);
566 /*
567 * Unmask the given event channel. Returns -1 on failure, in which case errno
568 * will be set appropriately.
569 */
570 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
572 int xc_physdev_pci_access_modify(int xc_handle,
573 uint32_t domid,
574 int bus,
575 int dev,
576 int func,
577 int enable);
579 int xc_readconsolering(int xc_handle,
580 char **pbuffer,
581 unsigned int *pnr_chars,
582 int clear, int incremental, uint32_t *pindex);
584 int xc_send_debug_keys(int xc_handle, char *keys);
586 typedef xen_sysctl_physinfo_t xc_physinfo_t;
587 typedef uint32_t xc_cpu_to_node_t;
588 int xc_physinfo(int xc_handle,
589 xc_physinfo_t *info);
591 int xc_sched_id(int xc_handle,
592 int *sched_id);
594 typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
595 int xc_getcpuinfo(int xc_handle, int max_cpus,
596 xc_cpuinfo_t *info, int *nr_cpus);
598 int xc_domain_setmaxmem(int xc_handle,
599 uint32_t domid,
600 unsigned int max_memkb);
602 int xc_domain_set_memmap_limit(int xc_handle,
603 uint32_t domid,
604 unsigned long map_limitkb);
606 int xc_domain_set_time_offset(int xc_handle,
607 uint32_t domid,
608 int32_t time_offset_seconds);
610 int xc_domain_memory_increase_reservation(int xc_handle,
611 uint32_t domid,
612 unsigned long nr_extents,
613 unsigned int extent_order,
614 unsigned int address_bits,
615 xen_pfn_t *extent_start);
617 int xc_domain_memory_decrease_reservation(int xc_handle,
618 uint32_t domid,
619 unsigned long nr_extents,
620 unsigned int extent_order,
621 xen_pfn_t *extent_start);
623 int xc_domain_memory_populate_physmap(int xc_handle,
624 uint32_t domid,
625 unsigned long nr_extents,
626 unsigned int extent_order,
627 unsigned int address_bits,
628 xen_pfn_t *extent_start);
630 int xc_domain_ioport_permission(int xc_handle,
631 uint32_t domid,
632 uint32_t first_port,
633 uint32_t nr_ports,
634 uint32_t allow_access);
636 int xc_domain_irq_permission(int xc_handle,
637 uint32_t domid,
638 uint8_t pirq,
639 uint8_t allow_access);
641 int xc_domain_iomem_permission(int xc_handle,
642 uint32_t domid,
643 unsigned long first_mfn,
644 unsigned long nr_mfns,
645 uint8_t allow_access);
647 int xc_domain_pin_memory_cacheattr(int xc_handle,
648 uint32_t domid,
649 uint64_t start,
650 uint64_t end,
651 uint32_t type);
653 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
654 unsigned long mfn);
656 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
657 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
658 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
659 arrays. */
660 int xc_perfc_control(int xc_handle,
661 uint32_t op,
662 xc_perfc_desc_t *desc,
663 xc_perfc_val_t *val,
664 int *nbr_desc,
665 int *nbr_val);
667 /**
668 * Memory maps a range within one domain to a local address range. Mappings
669 * should be unmapped with munmap and should follow the same rules as mmap
670 * regarding page alignment. Returns NULL on failure.
671 *
672 * In Linux, the ring queue for the control channel is accessible by mapping
673 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
674 * stored there is of type control_if_t.
675 *
676 * @parm xc_handle a handle on an open hypervisor interface
677 * @parm dom the domain to map memory from
678 * @parm size the amount of memory to map (in multiples of page size)
679 * @parm prot same flag as in mmap().
680 * @parm mfn the frame address to map.
681 */
682 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
683 int size, int prot,
684 unsigned long mfn );
686 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
687 const xen_pfn_t *arr, int num );
689 /**
690 * Like xc_map_foreign_pages(), except it can succeeed partially.
691 * When a page cannot be mapped, its PFN in @arr is or'ed with
692 * 0xF0000000 to indicate the error.
693 */
694 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
695 xen_pfn_t *arr, int num );
697 /**
698 * Translates a virtual address in the context of a given domain and
699 * vcpu returning the machine page frame number of the associated
700 * page.
701 *
702 * @parm xc_handle a handle on an open hypervisor interface
703 * @parm dom the domain to perform the translation in
704 * @parm vcpu the vcpu to perform the translation on
705 * @parm virt the virtual address to translate
706 */
707 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
708 int vcpu, unsigned long long virt);
711 /**
712 * DEPRECATED. Avoid using this, as it does not correctly account for PFNs
713 * without a backing MFN.
714 */
715 int xc_get_pfn_list(int xc_handle, uint32_t domid, uint64_t *pfn_buf,
716 unsigned long max_pfns);
718 unsigned long xc_ia64_fpsr_default(void);
720 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
721 unsigned long dst_pfn, const char *src_page);
723 int xc_clear_domain_page(int xc_handle, uint32_t domid,
724 unsigned long dst_pfn);
726 long xc_get_max_pages(int xc_handle, uint32_t domid);
728 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
729 domid_t dom);
731 int xc_memory_op(int xc_handle, int cmd, void *arg);
733 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
734 int num, uint32_t *arr);
737 /* Get current total pages allocated to a domain. */
738 long xc_get_tot_pages(int xc_handle, uint32_t domid);
740 /**
741 * This function retrieves the the number of bytes available
742 * in the heap in a specific range of address-widths and nodes.
743 *
744 * @parm xc_handle a handle to an open hypervisor interface
745 * @parm domid the domain to query
746 * @parm min_width the smallest address width to query (0 if don't care)
747 * @parm max_width the largest address width to query (0 if don't care)
748 * @parm node the node to query (-1 for all)
749 * @parm *bytes caller variable to put total bytes counted
750 * @return 0 on success, <0 on failure.
751 */
752 int xc_availheap(int xc_handle, int min_width, int max_width, int node,
753 uint64_t *bytes);
755 /*
756 * Trace Buffer Operations
757 */
759 /**
760 * xc_tbuf_enable - enable tracing buffers
761 *
762 * @parm xc_handle a handle to an open hypervisor interface
763 * @parm cnt size of tracing buffers to create (in pages)
764 * @parm mfn location to store mfn of the trace buffers to
765 * @parm size location to store the size (in bytes) of a trace buffer to
766 *
767 * Gets the machine address of the trace pointer area and the size of the
768 * per CPU buffers.
769 */
770 int xc_tbuf_enable(int xc_handle, unsigned long pages,
771 unsigned long *mfn, unsigned long *size);
773 /*
774 * Disable tracing buffers.
775 */
776 int xc_tbuf_disable(int xc_handle);
778 /**
779 * This function sets the size of the trace buffers. Setting the size
780 * is currently a one-shot operation that may be performed either at boot
781 * time or via this interface, not both. The buffer size must be set before
782 * enabling tracing.
783 *
784 * @parm xc_handle a handle to an open hypervisor interface
785 * @parm size the size in pages per cpu for the trace buffers
786 * @return 0 on success, -1 on failure.
787 */
788 int xc_tbuf_set_size(int xc_handle, unsigned long size);
790 /**
791 * This function retrieves the current size of the trace buffers.
792 * Note that the size returned is in terms of bytes, not pages.
794 * @parm xc_handle a handle to an open hypervisor interface
795 * @parm size will contain the size in bytes for the trace buffers
796 * @return 0 on success, -1 on failure.
797 */
798 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
800 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
802 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
804 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
805 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
807 int xc_version(int xc_handle, int cmd, void *arg);
809 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
811 int xc_flask_op(int xc_handle, flask_op_t *op);
813 /*
814 * Subscribe to state changes in a domain via evtchn.
815 * Returns -1 on failure, in which case errno will be set appropriately.
816 */
817 int xc_dom_subscribe(int xc_handle, domid_t domid, evtchn_port_t port);
819 /**************************
820 * GRANT TABLE OPERATIONS *
821 **************************/
823 /*
824 * Return a handle to the grant table driver, or -1 on failure, in which case
825 * errno will be set appropriately.
826 */
827 int xc_gnttab_open(void);
829 /*
830 * Close a handle previously allocated with xc_gnttab_open().
831 */
832 int xc_gnttab_close(int xcg_handle);
834 /*
835 * Memory maps a grant reference from one domain to a local address range.
836 * Mappings should be unmapped with xc_gnttab_munmap. Returns NULL on failure.
837 *
838 * @parm xcg_handle a handle on an open grant table interface
839 * @parm domid the domain to map memory from
840 * @parm ref the grant reference ID to map
841 * @parm prot same flag as in mmap()
842 */
843 void *xc_gnttab_map_grant_ref(int xcg_handle,
844 uint32_t domid,
845 uint32_t ref,
846 int prot);
848 /**
849 * Memory maps one or more grant references from one or more domains to a
850 * contiguous local address range. Mappings should be unmapped with
851 * xc_gnttab_munmap. Returns NULL on failure.
852 *
853 * @parm xcg_handle a handle on an open grant table interface
854 * @parm count the number of grant references to be mapped
855 * @parm domids an array of @count domain IDs by which the corresponding @refs
856 * were granted
857 * @parm refs an array of @count grant references to be mapped
858 * @parm prot same flag as in mmap()
859 */
860 void *xc_gnttab_map_grant_refs(int xcg_handle,
861 uint32_t count,
862 uint32_t *domids,
863 uint32_t *refs,
864 int prot);
866 /*
867 * Unmaps the @count pages starting at @start_address, which were mapped by a
868 * call to xc_gnttab_map_grant_ref or xc_gnttab_map_grant_refs. Returns zero
869 * on success, otherwise sets errno and returns non-zero.
870 */
871 int xc_gnttab_munmap(int xcg_handle,
872 void *start_address,
873 uint32_t count);
875 /*
876 * Sets the maximum number of grants that may be mapped by the given instance
877 * to @count.
878 *
879 * N.B. This function must be called after opening the handle, and before any
880 * other functions are invoked on it.
881 *
882 * N.B. When variable-length grants are mapped, fragmentation may be observed,
883 * and it may not be possible to satisfy requests up to the maximum number
884 * of grants.
885 */
886 int xc_gnttab_set_max_grants(int xcg_handle,
887 uint32_t count);
889 int xc_physdev_map_pirq(int xc_handle,
890 int domid,
891 int type,
892 int index,
893 int *pirq);
895 int xc_physdev_map_pirq_msi(int xc_handle,
896 int domid,
897 int type,
898 int index,
899 int *pirq,
900 int devfn,
901 int bus,
902 int entry_nr,
903 int msi_type);
905 int xc_physdev_unmap_pirq(int xc_handle,
906 int domid,
907 int pirq);
909 int xc_hvm_set_pci_intx_level(
910 int xc_handle, domid_t dom,
911 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
912 unsigned int level);
913 int xc_hvm_set_isa_irq_level(
914 int xc_handle, domid_t dom,
915 uint8_t isa_irq,
916 unsigned int level);
918 int xc_hvm_set_pci_link_route(
919 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
922 /*
923 * Track dirty bit changes in the VRAM area
924 *
925 * All of this is done atomically:
926 * - get the dirty bitmap since the last call
927 * - set up dirty tracking area for period up to the next call
928 * - clear the dirty tracking area.
929 *
930 * Returns -ENODATA and does not fill bitmap if the area has changed since the
931 * last call.
932 */
933 int xc_hvm_track_dirty_vram(
934 int xc_handle, domid_t dom,
935 uint64_t first_pfn, uint64_t nr,
936 unsigned long *bitmap);
938 /*
939 * Notify that some pages got modified by the Device Model
940 */
941 int xc_hvm_modified_memory(
942 int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr);
944 typedef enum {
945 XC_ERROR_NONE = 0,
946 XC_INTERNAL_ERROR = 1,
947 XC_INVALID_KERNEL = 2,
948 XC_INVALID_PARAM = 3,
949 XC_OUT_OF_MEMORY = 4,
950 } xc_error_code;
952 #define XC_MAX_ERROR_MSG_LEN 1024
953 typedef struct {
954 int code;
955 char message[XC_MAX_ERROR_MSG_LEN];
956 } xc_error;
958 /*
959 * Return a pointer to the last error. This pointer and the
960 * data pointed to are only valid until the next call to
961 * libxc.
962 */
963 const xc_error *xc_get_last_error(void);
965 /*
966 * Clear the last error
967 */
968 void xc_clear_last_error(void);
970 typedef void (*xc_error_handler)(const xc_error *err);
972 /*
973 * The default error handler which prints to stderr
974 */
975 void xc_default_error_handler(const xc_error *err);
977 /*
978 * Convert an error code into a text description
979 */
980 const char *xc_error_code_to_desc(int code);
982 /*
983 * Registers a callback to handle errors
984 */
985 xc_error_handler xc_set_error_handler(xc_error_handler handler);
987 int xc_set_hvm_param(int handle, domid_t dom, int param, unsigned long value);
988 int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value);
990 /* IA64 specific, nvram save */
991 int xc_ia64_save_to_nvram(int xc_handle, uint32_t dom);
993 /* IA64 specific, nvram init */
994 int xc_ia64_nvram_init(int xc_handle, char *dom_name, uint32_t dom);
996 /* IA64 specific, set guest OS type optimizations */
997 int xc_ia64_set_os_type(int xc_handle, char *guest_os_type, uint32_t dom);
999 /* HVM guest pass-through */
1000 int xc_assign_device(int xc_handle,
1001 uint32_t domid,
1002 uint32_t machine_bdf);
1004 int xc_get_device_group(int xc_handle,
1005 uint32_t domid,
1006 uint32_t machine_bdf,
1007 uint32_t max_sdevs,
1008 uint32_t *num_sdevs,
1009 uint32_t *sdev_array);
1011 int xc_test_assign_device(int xc_handle,
1012 uint32_t domid,
1013 uint32_t machine_bdf);
1015 int xc_deassign_device(int xc_handle,
1016 uint32_t domid,
1017 uint32_t machine_bdf);
1019 int xc_domain_memory_mapping(int xc_handle,
1020 uint32_t domid,
1021 unsigned long first_gfn,
1022 unsigned long first_mfn,
1023 unsigned long nr_mfns,
1024 uint32_t add_mapping);
1026 int xc_domain_ioport_mapping(int xc_handle,
1027 uint32_t domid,
1028 uint32_t first_gport,
1029 uint32_t first_mport,
1030 uint32_t nr_ports,
1031 uint32_t add_mapping);
1033 int xc_domain_update_msi_irq(
1034 int xc_handle,
1035 uint32_t domid,
1036 uint32_t gvec,
1037 uint32_t pirq,
1038 uint32_t gflags);
1040 int xc_domain_bind_pt_irq(int xc_handle,
1041 uint32_t domid,
1042 uint8_t machine_irq,
1043 uint8_t irq_type,
1044 uint8_t bus,
1045 uint8_t device,
1046 uint8_t intx,
1047 uint8_t isa_irq);
1049 int xc_domain_unbind_pt_irq(int xc_handle,
1050 uint32_t domid,
1051 uint8_t machine_irq,
1052 uint8_t irq_type,
1053 uint8_t bus,
1054 uint8_t device,
1055 uint8_t intx,
1056 uint8_t isa_irq);
1058 int xc_domain_bind_pt_pci_irq(int xc_handle,
1059 uint32_t domid,
1060 uint8_t machine_irq,
1061 uint8_t bus,
1062 uint8_t device,
1063 uint8_t intx);
1065 int xc_domain_bind_pt_isa_irq(int xc_handle,
1066 uint32_t domid,
1067 uint8_t machine_irq);
1069 /* Set the target domain */
1070 int xc_domain_set_target(int xc_handle,
1071 uint32_t domid,
1072 uint32_t target);
1074 #if defined(__i386__) || defined(__x86_64__)
1075 int xc_cpuid_check(int xc,
1076 const unsigned int *input,
1077 const char **config,
1078 char **config_transformed);
1079 int xc_cpuid_set(int xc,
1080 domid_t domid,
1081 const unsigned int *input,
1082 const char **config,
1083 char **config_transformed);
1084 int xc_cpuid_apply_policy(int xc,
1085 domid_t domid);
1086 void xc_cpuid_to_str(const unsigned int *regs,
1087 char **strs);
1088 #endif
1090 struct xc_px_val {
1091 uint64_t freq; /* Px core frequency */
1092 uint64_t residency; /* Px residency time */
1093 uint64_t count; /* Px transition count */
1094 };
1096 struct xc_px_stat {
1097 uint8_t total; /* total Px states */
1098 uint8_t usable; /* usable Px states */
1099 uint8_t last; /* last Px state */
1100 uint8_t cur; /* current Px state */
1101 uint64_t *trans_pt; /* Px transition table */
1102 struct xc_px_val *pt;
1103 };
1105 int xc_pm_get_max_px(int xc_handle, int cpuid, int *max_px);
1106 int xc_pm_get_pxstat(int xc_handle, int cpuid, struct xc_px_stat *pxpt);
1107 int xc_pm_reset_pxstat(int xc_handle, int cpuid);
1109 struct xc_cx_stat {
1110 uint32_t nr; /* entry nr in triggers & residencies, including C0 */
1111 uint32_t last; /* last Cx state */
1112 uint64_t idle_time; /* idle time from boot */
1113 uint64_t *triggers; /* Cx trigger counts */
1114 uint64_t *residencies; /* Cx residencies */
1115 };
1116 typedef struct xc_cx_stat xc_cx_stat_t;
1118 int xc_pm_get_max_cx(int xc_handle, int cpuid, int *max_cx);
1119 int xc_pm_get_cxstat(int xc_handle, int cpuid, struct xc_cx_stat *cxpt);
1120 int xc_pm_reset_cxstat(int xc_handle, int cpuid);
1122 #endif /* XENCTRL_H */