ia64/xen-unstable

view tools/libxc/xenctrl.h @ 17571:b6aa55ca599e

shadow: track video RAM dirty bits

This adds a new HVM op that enables tracking dirty bits of a range of
video RAM. The idea is to optimize just for the most common case
(only one guest mapping, with sometimes some temporary other
mappings), which permits to keep the overhead on shadow as low as
possible.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri May 02 15:08:27 2008 +0100 (2008-05-02)
parents ad55c06c9bbc
children b0d7780794eb
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * xc_gnttab functions:
9 * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
10 */
12 #ifndef XENCTRL_H
13 #define XENCTRL_H
15 /* Tell the Xen public headers we are a user-space tools build. */
16 #ifndef __XEN_TOOLS__
17 #define __XEN_TOOLS__ 1
18 #endif
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <xen/xen.h>
23 #include <xen/domctl.h>
24 #include <xen/physdev.h>
25 #include <xen/sysctl.h>
26 #include <xen/version.h>
27 #include <xen/event_channel.h>
28 #include <xen/sched.h>
29 #include <xen/memory.h>
30 #include <xen/xsm/acm.h>
31 #include <xen/xsm/acm_ops.h>
32 #include <xen/xsm/flask_op.h>
34 #ifdef __ia64__
35 #define XC_PAGE_SHIFT 14
36 #else
37 #define XC_PAGE_SHIFT 12
38 #endif
39 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
40 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
42 /*
43 * DEFINITIONS FOR CPU BARRIERS
44 */
46 #if defined(__i386__)
47 #define xen_mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
48 #define xen_rmb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
49 #define xen_wmb() asm volatile ( "" : : : "memory")
50 #elif defined(__x86_64__)
51 #define xen_mb() asm volatile ( "mfence" : : : "memory")
52 #define xen_rmb() asm volatile ( "lfence" : : : "memory")
53 #define xen_wmb() asm volatile ( "" : : : "memory")
54 #elif defined(__ia64__)
55 #define xen_mb() asm volatile ("mf" ::: "memory")
56 #define xen_rmb() asm volatile ("mf" ::: "memory")
57 #define xen_wmb() asm volatile ("mf" ::: "memory")
58 #elif defined(__powerpc__)
59 #define xen_mb() asm volatile ("sync" : : : "memory")
60 #define xen_rmb() asm volatile ("sync" : : : "memory") /* lwsync? */
61 #define xen_wmb() asm volatile ("sync" : : : "memory") /* eieio? */
62 #else
63 #error "Define barriers"
64 #endif
66 /*
67 * INITIALIZATION FUNCTIONS
68 */
70 /**
71 * This function opens a handle to the hypervisor interface. This function can
72 * be called multiple times within a single process. Multiple processes can
73 * have an open hypervisor interface at the same time.
74 *
75 * Each call to this function should have a corresponding call to
76 * xc_interface_close().
77 *
78 * This function can fail if the caller does not have superuser permission or
79 * if a Xen-enabled kernel is not currently running.
80 *
81 * @return a handle to the hypervisor interface or -1 on failure
82 */
83 int xc_interface_open(void);
85 /**
86 * This function closes an open hypervisor interface.
87 *
88 * This function can fail if the handle does not represent an open interface or
89 * if there were problems closing the interface.
90 *
91 * @parm xc_handle a handle to an open hypervisor interface
92 * @return 0 on success, -1 otherwise.
93 */
94 int xc_interface_close(int xc_handle);
96 /*
97 * KERNEL INTERFACES
98 */
100 /*
101 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
102 * device number. Returns -1 on error (and sets errno).
103 */
104 int xc_find_device_number(const char *name);
106 /*
107 * DOMAIN DEBUGGING FUNCTIONS
108 */
110 typedef struct xc_core_header {
111 unsigned int xch_magic;
112 unsigned int xch_nr_vcpus;
113 unsigned int xch_nr_pages;
114 unsigned int xch_ctxt_offset;
115 unsigned int xch_index_offset;
116 unsigned int xch_pages_offset;
117 } xc_core_header_t;
119 #define XC_CORE_MAGIC 0xF00FEBED
120 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
122 #ifdef __linux__
124 #include <sys/ptrace.h>
125 #include <thread_db.h>
127 typedef void (*thr_ev_handler_t)(long);
129 void xc_register_event_handler(
130 thr_ev_handler_t h,
131 td_event_e e);
133 long xc_ptrace(
134 int xc_handle,
135 enum __ptrace_request request,
136 uint32_t domid,
137 long addr,
138 long data);
140 int xc_waitdomain(
141 int xc_handle,
142 int domain,
143 int *status,
144 int options);
146 #endif /* __linux__ */
148 /*
149 * DOMAIN MANAGEMENT FUNCTIONS
150 */
152 typedef struct xc_dominfo {
153 uint32_t domid;
154 uint32_t ssidref;
155 unsigned int dying:1, crashed:1, shutdown:1,
156 paused:1, blocked:1, running:1,
157 hvm:1, debugged:1;
158 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
159 unsigned long nr_pages;
160 unsigned long shared_info_frame;
161 uint64_t cpu_time;
162 unsigned long max_memkb;
163 unsigned int nr_online_vcpus;
164 unsigned int max_vcpu_id;
165 xen_domain_handle_t handle;
166 } xc_dominfo_t;
168 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
169 int xc_domain_create(int xc_handle,
170 uint32_t ssidref,
171 xen_domain_handle_t handle,
172 uint32_t flags,
173 uint32_t *pdomid);
176 /* Functions to produce a dump of a given domain
177 * xc_domain_dumpcore - produces a dump to a specified file
178 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
179 * callback function
180 */
181 int xc_domain_dumpcore(int xc_handle,
182 uint32_t domid,
183 const char *corename);
185 /* Define the callback function type for xc_domain_dumpcore_via_callback.
186 *
187 * This function is called by the coredump code for every "write",
188 * and passes an opaque object for the use of the function and
189 * created by the caller of xc_domain_dumpcore_via_callback.
190 */
191 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
193 int xc_domain_dumpcore_via_callback(int xc_handle,
194 uint32_t domid,
195 void *arg,
196 dumpcore_rtn_t dump_rtn);
198 /*
199 * This function sets the maximum number of vcpus that a domain may create.
200 *
201 * @parm xc_handle a handle to an open hypervisor interface.
202 * @parm domid the domain id in which vcpus are to be created.
203 * @parm max the maximum number of vcpus that the domain may create.
204 * @return 0 on success, -1 on failure.
205 */
206 int xc_domain_max_vcpus(int xc_handle,
207 uint32_t domid,
208 unsigned int max);
210 /**
211 * This function pauses a domain. A paused domain still exists in memory
212 * however it does not receive any timeslices from the hypervisor.
213 *
214 * @parm xc_handle a handle to an open hypervisor interface
215 * @parm domid the domain id to pause
216 * @return 0 on success, -1 on failure.
217 */
218 int xc_domain_pause(int xc_handle,
219 uint32_t domid);
220 /**
221 * This function unpauses a domain. The domain should have been previously
222 * paused.
223 *
224 * @parm xc_handle a handle to an open hypervisor interface
225 * @parm domid the domain id to unpause
226 * return 0 on success, -1 on failure
227 */
228 int xc_domain_unpause(int xc_handle,
229 uint32_t domid);
231 /**
232 * This function will destroy a domain. Destroying a domain removes the domain
233 * completely from memory. This function should be called after sending the
234 * domain a SHUTDOWN control message to free up the domain resources.
235 *
236 * @parm xc_handle a handle to an open hypervisor interface
237 * @parm domid the domain id to destroy
238 * @return 0 on success, -1 on failure
239 */
240 int xc_domain_destroy(int xc_handle,
241 uint32_t domid);
244 /**
245 * This function resumes a suspended domain. The domain should have
246 * been previously suspended.
247 *
248 * @parm xc_handle a handle to an open hypervisor interface
249 * @parm domid the domain id to resume
250 * @parm fast use cooperative resume (guest must support this)
251 * return 0 on success, -1 on failure
252 */
253 int xc_domain_resume(int xc_handle,
254 uint32_t domid,
255 int fast);
257 /**
258 * This function will shutdown a domain. This is intended for use in
259 * fully-virtualized domains where this operation is analogous to the
260 * sched_op operations in a paravirtualized domain. The caller is
261 * expected to give the reason for the shutdown.
262 *
263 * @parm xc_handle a handle to an open hypervisor interface
264 * @parm domid the domain id to destroy
265 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
266 * @return 0 on success, -1 on failure
267 */
268 int xc_domain_shutdown(int xc_handle,
269 uint32_t domid,
270 int reason);
272 int xc_vcpu_setaffinity(int xc_handle,
273 uint32_t domid,
274 int vcpu,
275 uint64_t cpumap);
276 int xc_vcpu_getaffinity(int xc_handle,
277 uint32_t domid,
278 int vcpu,
279 uint64_t *cpumap);
281 /**
282 * This function will return information about one or more domains. It is
283 * designed to iterate over the list of domains. If a single domain is
284 * requested, this function will return the next domain in the list - if
285 * one exists. It is, therefore, important in this case to make sure the
286 * domain requested was the one returned.
287 *
288 * @parm xc_handle a handle to an open hypervisor interface
289 * @parm first_domid the first domain to enumerate information from. Domains
290 * are currently enumerate in order of creation.
291 * @parm max_doms the number of elements in info
292 * @parm info an array of max_doms size that will contain the information for
293 * the enumerated domains.
294 * @return the number of domains enumerated or -1 on error
295 */
296 int xc_domain_getinfo(int xc_handle,
297 uint32_t first_domid,
298 unsigned int max_doms,
299 xc_dominfo_t *info);
302 /**
303 * This function will set the execution context for the specified vcpu.
304 *
305 * @parm xc_handle a handle to an open hypervisor interface
306 * @parm domid the domain to set the vcpu context for
307 * @parm vcpu the vcpu number for the context
308 * @parm ctxt pointer to the the cpu context with the values to set
309 * @return the number of domains enumerated or -1 on error
310 */
311 int xc_vcpu_setcontext(int xc_handle,
312 uint32_t domid,
313 uint32_t vcpu,
314 vcpu_guest_context_t *ctxt);
315 /**
316 * This function will return information about one or more domains, using a
317 * single hypercall. The domain information will be stored into the supplied
318 * array of xc_domaininfo_t structures.
319 *
320 * @parm xc_handle a handle to an open hypervisor interface
321 * @parm first_domain the first domain to enumerate information from.
322 * Domains are currently enumerate in order of creation.
323 * @parm max_domains the number of elements in info
324 * @parm info an array of max_doms size that will contain the information for
325 * the enumerated domains.
326 * @return the number of domains enumerated or -1 on error
327 */
328 int xc_domain_getinfolist(int xc_handle,
329 uint32_t first_domain,
330 unsigned int max_domains,
331 xc_domaininfo_t *info);
333 /**
334 * This function returns information about the context of a hvm domain
335 * @parm xc_handle a handle to an open hypervisor interface
336 * @parm domid the domain to get information from
337 * @parm ctxt_buf a pointer to a structure to store the execution context of
338 * the hvm domain
339 * @parm size the size of ctxt_buf in bytes
340 * @return 0 on success, -1 on failure
341 */
342 int xc_domain_hvm_getcontext(int xc_handle,
343 uint32_t domid,
344 uint8_t *ctxt_buf,
345 uint32_t size);
347 /**
348 * This function will set the context for hvm domain
349 *
350 * @parm xc_handle a handle to an open hypervisor interface
351 * @parm domid the domain to set the hvm domain context for
352 * @parm hvm_ctxt pointer to the the hvm context with the values to set
353 * @parm size the size of hvm_ctxt in bytes
354 * @return 0 on success, -1 on failure
355 */
356 int xc_domain_hvm_setcontext(int xc_handle,
357 uint32_t domid,
358 uint8_t *hvm_ctxt,
359 uint32_t size);
361 /**
362 * This function returns information about the execution context of a
363 * particular vcpu of a domain.
364 *
365 * @parm xc_handle a handle to an open hypervisor interface
366 * @parm domid the domain to get information from
367 * @parm vcpu the vcpu number
368 * @parm ctxt a pointer to a structure to store the execution context of the
369 * domain
370 * @return 0 on success, -1 on failure
371 */
372 int xc_vcpu_getcontext(int xc_handle,
373 uint32_t domid,
374 uint32_t vcpu,
375 vcpu_guest_context_t *ctxt);
377 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
378 int xc_vcpu_getinfo(int xc_handle,
379 uint32_t domid,
380 uint32_t vcpu,
381 xc_vcpuinfo_t *info);
383 long long xc_domain_get_cpu_usage(int xc_handle,
384 domid_t domid,
385 int vcpu);
387 int xc_domain_sethandle(int xc_handle, uint32_t domid,
388 xen_domain_handle_t handle);
390 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
391 int xc_shadow_control(int xc_handle,
392 uint32_t domid,
393 unsigned int sop,
394 unsigned long *dirty_bitmap,
395 unsigned long pages,
396 unsigned long *mb,
397 uint32_t mode,
398 xc_shadow_op_stats_t *stats);
400 int xc_sedf_domain_set(int xc_handle,
401 uint32_t domid,
402 uint64_t period, uint64_t slice,
403 uint64_t latency, uint16_t extratime,
404 uint16_t weight);
406 int xc_sedf_domain_get(int xc_handle,
407 uint32_t domid,
408 uint64_t* period, uint64_t *slice,
409 uint64_t *latency, uint16_t *extratime,
410 uint16_t *weight);
412 int xc_sched_credit_domain_set(int xc_handle,
413 uint32_t domid,
414 struct xen_domctl_sched_credit *sdom);
416 int xc_sched_credit_domain_get(int xc_handle,
417 uint32_t domid,
418 struct xen_domctl_sched_credit *sdom);
420 /**
421 * This function sends a trigger to a domain.
422 *
423 * @parm xc_handle a handle to an open hypervisor interface
424 * @parm domid the domain id to send trigger
425 * @parm trigger the trigger type
426 * @parm vcpu the vcpu number to send trigger
427 * return 0 on success, -1 on failure
428 */
429 int xc_domain_send_trigger(int xc_handle,
430 uint32_t domid,
431 uint32_t trigger,
432 uint32_t vcpu);
434 /**
435 * This function enables or disable debugging of a domain.
436 *
437 * @parm xc_handle a handle to an open hypervisor interface
438 * @parm domid the domain id to send trigger
439 * @parm enable true to enable debugging
440 * return 0 on success, -1 on failure
441 */
442 int xc_domain_setdebugging(int xc_handle,
443 uint32_t domid,
444 unsigned int enable);
446 /*
447 * EVENT CHANNEL FUNCTIONS
448 */
450 /* A port identifier is guaranteed to fit in 31 bits. */
451 typedef int evtchn_port_or_error_t;
453 /**
454 * This function allocates an unbound port. Ports are named endpoints used for
455 * interdomain communication. This function is most useful in opening a
456 * well-known port within a domain to receive events on.
457 *
458 * NOTE: If you are allocating a *local* unbound port, you probably want to
459 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
460 * ports *only* during domain creation.
461 *
462 * @parm xc_handle a handle to an open hypervisor interface
463 * @parm dom the ID of the local domain (the 'allocatee')
464 * @parm remote_dom the ID of the domain who will later bind
465 * @return allocated port (in @dom) on success, -1 on failure
466 */
467 evtchn_port_or_error_t
468 xc_evtchn_alloc_unbound(int xc_handle,
469 uint32_t dom,
470 uint32_t remote_dom);
472 int xc_evtchn_reset(int xc_handle,
473 uint32_t dom);
474 int xc_evtchn_status(int xc_handle,
475 uint32_t dom,
476 uint32_t port);
478 /*
479 * Return a handle to the event channel driver, or -1 on failure, in which case
480 * errno will be set appropriately.
481 */
482 int xc_evtchn_open(void);
484 /*
485 * Close a handle previously allocated with xc_evtchn_open().
486 */
487 int xc_evtchn_close(int xce_handle);
489 /*
490 * Return an fd that can be select()ed on for further calls to
491 * xc_evtchn_pending().
492 */
493 int xc_evtchn_fd(int xce_handle);
495 /*
496 * Notify the given event channel. Returns -1 on failure, in which case
497 * errno will be set appropriately.
498 */
499 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
501 /*
502 * Returns a new event port awaiting interdomain connection from the given
503 * domain ID, or -1 on failure, in which case errno will be set appropriately.
504 */
505 evtchn_port_or_error_t
506 xc_evtchn_bind_unbound_port(int xce_handle, int domid);
508 /*
509 * Returns a new event port bound to the remote port for the given domain ID,
510 * or -1 on failure, in which case errno will be set appropriately.
511 */
512 evtchn_port_or_error_t
513 xc_evtchn_bind_interdomain(int xce_handle, int domid,
514 evtchn_port_t remote_port);
516 /*
517 * Bind an event channel to the given VIRQ. Returns the event channel bound to
518 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
519 */
520 evtchn_port_or_error_t
521 xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
523 /*
524 * Unbind the given event channel. Returns -1 on failure, in which case errno
525 * will be set appropriately.
526 */
527 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
529 /*
530 * Return the next event channel to become pending, or -1 on failure, in which
531 * case errno will be set appropriately.
532 */
533 evtchn_port_or_error_t
534 xc_evtchn_pending(int xce_handle);
536 /*
537 * Unmask the given event channel. Returns -1 on failure, in which case errno
538 * will be set appropriately.
539 */
540 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
542 int xc_physdev_pci_access_modify(int xc_handle,
543 uint32_t domid,
544 int bus,
545 int dev,
546 int func,
547 int enable);
549 int xc_readconsolering(int xc_handle,
550 char **pbuffer,
551 unsigned int *pnr_chars,
552 int clear, int incremental, uint32_t *pindex);
554 int xc_send_debug_keys(int xc_handle, char *keys);
556 typedef xen_sysctl_physinfo_t xc_physinfo_t;
557 typedef uint32_t xc_cpu_to_node_t;
558 int xc_physinfo(int xc_handle,
559 xc_physinfo_t *info);
561 int xc_sched_id(int xc_handle,
562 int *sched_id);
564 typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
565 int xc_getcpuinfo(int xc_handle, int max_cpus,
566 xc_cpuinfo_t *info, int *nr_cpus);
568 int xc_domain_setmaxmem(int xc_handle,
569 uint32_t domid,
570 unsigned int max_memkb);
572 int xc_domain_set_memmap_limit(int xc_handle,
573 uint32_t domid,
574 unsigned long map_limitkb);
576 int xc_domain_set_time_offset(int xc_handle,
577 uint32_t domid,
578 int32_t time_offset_seconds);
580 int xc_domain_memory_increase_reservation(int xc_handle,
581 uint32_t domid,
582 unsigned long nr_extents,
583 unsigned int extent_order,
584 unsigned int address_bits,
585 xen_pfn_t *extent_start);
587 int xc_domain_memory_decrease_reservation(int xc_handle,
588 uint32_t domid,
589 unsigned long nr_extents,
590 unsigned int extent_order,
591 xen_pfn_t *extent_start);
593 int xc_domain_memory_populate_physmap(int xc_handle,
594 uint32_t domid,
595 unsigned long nr_extents,
596 unsigned int extent_order,
597 unsigned int address_bits,
598 xen_pfn_t *extent_start);
600 int xc_domain_ioport_permission(int xc_handle,
601 uint32_t domid,
602 uint32_t first_port,
603 uint32_t nr_ports,
604 uint32_t allow_access);
606 int xc_domain_irq_permission(int xc_handle,
607 uint32_t domid,
608 uint8_t pirq,
609 uint8_t allow_access);
611 int xc_domain_iomem_permission(int xc_handle,
612 uint32_t domid,
613 unsigned long first_mfn,
614 unsigned long nr_mfns,
615 uint8_t allow_access);
617 int xc_domain_pin_memory_cacheattr(int xc_handle,
618 uint32_t domid,
619 uint64_t start,
620 uint64_t end,
621 uint32_t type);
623 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
624 unsigned long mfn);
626 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
627 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
628 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
629 arrays. */
630 int xc_perfc_control(int xc_handle,
631 uint32_t op,
632 xc_perfc_desc_t *desc,
633 xc_perfc_val_t *val,
634 int *nbr_desc,
635 int *nbr_val);
637 /**
638 * Memory maps a range within one domain to a local address range. Mappings
639 * should be unmapped with munmap and should follow the same rules as mmap
640 * regarding page alignment. Returns NULL on failure.
641 *
642 * In Linux, the ring queue for the control channel is accessible by mapping
643 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
644 * stored there is of type control_if_t.
645 *
646 * @parm xc_handle a handle on an open hypervisor interface
647 * @parm dom the domain to map memory from
648 * @parm size the amount of memory to map (in multiples of page size)
649 * @parm prot same flag as in mmap().
650 * @parm mfn the frame address to map.
651 */
652 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
653 int size, int prot,
654 unsigned long mfn );
656 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
657 const xen_pfn_t *arr, int num );
659 /**
660 * Like xc_map_foreign_pages(), except it can succeeed partially.
661 * When a page cannot be mapped, its PFN in @arr is or'ed with
662 * 0xF0000000 to indicate the error.
663 */
664 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
665 xen_pfn_t *arr, int num );
667 /**
668 * Translates a virtual address in the context of a given domain and
669 * vcpu returning the machine page frame number of the associated
670 * page.
671 *
672 * @parm xc_handle a handle on an open hypervisor interface
673 * @parm dom the domain to perform the translation in
674 * @parm vcpu the vcpu to perform the translation on
675 * @parm virt the virtual address to translate
676 */
677 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
678 int vcpu, unsigned long long virt);
681 /**
682 * DEPRECATED. Avoid using this, as it does not correctly account for PFNs
683 * without a backing MFN.
684 */
685 int xc_get_pfn_list(int xc_handle, uint32_t domid, uint64_t *pfn_buf,
686 unsigned long max_pfns);
688 unsigned long xc_ia64_fpsr_default(void);
690 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
691 unsigned long dst_pfn, const char *src_page);
693 int xc_clear_domain_page(int xc_handle, uint32_t domid,
694 unsigned long dst_pfn);
696 long xc_get_max_pages(int xc_handle, uint32_t domid);
698 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
699 domid_t dom);
701 int xc_memory_op(int xc_handle, int cmd, void *arg);
703 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
704 int num, uint32_t *arr);
707 /* Get current total pages allocated to a domain. */
708 long xc_get_tot_pages(int xc_handle, uint32_t domid);
710 /**
711 * This function retrieves the the number of bytes available
712 * in the heap in a specific range of address-widths and nodes.
713 *
714 * @parm xc_handle a handle to an open hypervisor interface
715 * @parm domid the domain to query
716 * @parm min_width the smallest address width to query (0 if don't care)
717 * @parm max_width the largest address width to query (0 if don't care)
718 * @parm node the node to query (-1 for all)
719 * @parm *bytes caller variable to put total bytes counted
720 * @return 0 on success, <0 on failure.
721 */
722 int xc_availheap(int xc_handle, int min_width, int max_width, int node,
723 uint64_t *bytes);
725 /*
726 * Trace Buffer Operations
727 */
729 /**
730 * xc_tbuf_enable - enable tracing buffers
731 *
732 * @parm xc_handle a handle to an open hypervisor interface
733 * @parm cnt size of tracing buffers to create (in pages)
734 * @parm mfn location to store mfn of the trace buffers to
735 * @parm size location to store the size (in bytes) of a trace buffer to
736 *
737 * Gets the machine address of the trace pointer area and the size of the
738 * per CPU buffers.
739 */
740 int xc_tbuf_enable(int xc_handle, unsigned long pages,
741 unsigned long *mfn, unsigned long *size);
743 /*
744 * Disable tracing buffers.
745 */
746 int xc_tbuf_disable(int xc_handle);
748 /**
749 * This function sets the size of the trace buffers. Setting the size
750 * is currently a one-shot operation that may be performed either at boot
751 * time or via this interface, not both. The buffer size must be set before
752 * enabling tracing.
753 *
754 * @parm xc_handle a handle to an open hypervisor interface
755 * @parm size the size in pages per cpu for the trace buffers
756 * @return 0 on success, -1 on failure.
757 */
758 int xc_tbuf_set_size(int xc_handle, unsigned long size);
760 /**
761 * This function retrieves the current size of the trace buffers.
762 * Note that the size returned is in terms of bytes, not pages.
764 * @parm xc_handle a handle to an open hypervisor interface
765 * @parm size will contain the size in bytes for the trace buffers
766 * @return 0 on success, -1 on failure.
767 */
768 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
770 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
772 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
774 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
775 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
777 int xc_version(int xc_handle, int cmd, void *arg);
779 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
781 int xc_flask_op(int xc_handle, flask_op_t *op);
783 /**************************
784 * GRANT TABLE OPERATIONS *
785 **************************/
787 /*
788 * Return a handle to the grant table driver, or -1 on failure, in which case
789 * errno will be set appropriately.
790 */
791 int xc_gnttab_open(void);
793 /*
794 * Close a handle previously allocated with xc_gnttab_open().
795 */
796 int xc_gnttab_close(int xcg_handle);
798 /*
799 * Memory maps a grant reference from one domain to a local address range.
800 * Mappings should be unmapped with xc_gnttab_munmap. Returns NULL on failure.
801 *
802 * @parm xcg_handle a handle on an open grant table interface
803 * @parm domid the domain to map memory from
804 * @parm ref the grant reference ID to map
805 * @parm prot same flag as in mmap()
806 */
807 void *xc_gnttab_map_grant_ref(int xcg_handle,
808 uint32_t domid,
809 uint32_t ref,
810 int prot);
812 /**
813 * Memory maps one or more grant references from one or more domains to a
814 * contiguous local address range. Mappings should be unmapped with
815 * xc_gnttab_munmap. Returns NULL on failure.
816 *
817 * @parm xcg_handle a handle on an open grant table interface
818 * @parm count the number of grant references to be mapped
819 * @parm domids an array of @count domain IDs by which the corresponding @refs
820 * were granted
821 * @parm refs an array of @count grant references to be mapped
822 * @parm prot same flag as in mmap()
823 */
824 void *xc_gnttab_map_grant_refs(int xcg_handle,
825 uint32_t count,
826 uint32_t *domids,
827 uint32_t *refs,
828 int prot);
830 /*
831 * Unmaps the @count pages starting at @start_address, which were mapped by a
832 * call to xc_gnttab_map_grant_ref or xc_gnttab_map_grant_refs. Returns zero
833 * on success, otherwise sets errno and returns non-zero.
834 */
835 int xc_gnttab_munmap(int xcg_handle,
836 void *start_address,
837 uint32_t count);
839 /*
840 * Sets the maximum number of grants that may be mapped by the given instance
841 * to @count.
842 *
843 * N.B. This function must be called after opening the handle, and before any
844 * other functions are invoked on it.
845 *
846 * N.B. When variable-length grants are mapped, fragmentation may be observed,
847 * and it may not be possible to satisfy requests up to the maximum number
848 * of grants.
849 */
850 int xc_gnttab_set_max_grants(int xcg_handle,
851 uint32_t count);
853 int xc_physdev_map_pirq(int xc_handle,
854 int domid,
855 int type,
856 int index,
857 int *pirq);
859 int xc_physdev_map_pirq_msi(int xc_handle,
860 int domid,
861 int type,
862 int index,
863 int *pirq,
864 int devfn,
865 int bus,
866 int msi_type);
868 int xc_physdev_unmap_pirq(int xc_handle,
869 int domid,
870 int pirq);
872 int xc_hvm_set_pci_intx_level(
873 int xc_handle, domid_t dom,
874 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
875 unsigned int level);
876 int xc_hvm_set_isa_irq_level(
877 int xc_handle, domid_t dom,
878 uint8_t isa_irq,
879 unsigned int level);
881 int xc_hvm_set_pci_link_route(
882 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
885 /*
886 * Track dirty bit changes in the VRAM area
887 *
888 * All of this is done atomically:
889 * - get the dirty bitmap since the last call
890 * - set up dirty tracking area for period up to the next call
891 * - clear the dirty tracking area.
892 *
893 * Returns -ENODATA and does not fill bitmap if the area has changed since the
894 * last call.
895 */
896 int xc_hvm_track_dirty_vram(
897 int xc_handle, domid_t dom,
898 uint64_t first_pfn, uint64_t nr,
899 unsigned long *bitmap);
901 typedef enum {
902 XC_ERROR_NONE = 0,
903 XC_INTERNAL_ERROR = 1,
904 XC_INVALID_KERNEL = 2,
905 XC_INVALID_PARAM = 3,
906 XC_OUT_OF_MEMORY = 4,
907 } xc_error_code;
909 #define XC_MAX_ERROR_MSG_LEN 1024
910 typedef struct {
911 int code;
912 char message[XC_MAX_ERROR_MSG_LEN];
913 } xc_error;
915 /*
916 * Return a pointer to the last error. This pointer and the
917 * data pointed to are only valid until the next call to
918 * libxc.
919 */
920 const xc_error *xc_get_last_error(void);
922 /*
923 * Clear the last error
924 */
925 void xc_clear_last_error(void);
927 typedef void (*xc_error_handler)(const xc_error *err);
929 /*
930 * The default error handler which prints to stderr
931 */
932 void xc_default_error_handler(const xc_error *err);
934 /*
935 * Convert an error code into a text description
936 */
937 const char *xc_error_code_to_desc(int code);
939 /*
940 * Registers a callback to handle errors
941 */
942 xc_error_handler xc_set_error_handler(xc_error_handler handler);
944 int xc_set_hvm_param(int handle, domid_t dom, int param, unsigned long value);
945 int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value);
947 /* PowerPC specific. */
948 int xc_alloc_real_mode_area(int xc_handle,
949 uint32_t domid,
950 unsigned int log);
952 /* IA64 specific, nvram save */
953 int xc_ia64_save_to_nvram(int xc_handle, uint32_t dom);
955 /* IA64 specific, nvram init */
956 int xc_ia64_nvram_init(int xc_handle, char *dom_name, uint32_t dom);
958 /* IA64 specific, set guest OS type optimizations */
959 int xc_ia64_set_os_type(int xc_handle, char *guest_os_type, uint32_t dom);
961 /* HVM guest pass-through */
962 int xc_assign_device(int xc_handle,
963 uint32_t domid,
964 uint32_t machine_bdf);
966 int xc_test_assign_device(int xc_handle,
967 uint32_t domid,
968 uint32_t machine_bdf);
970 int xc_deassign_device(int xc_handle,
971 uint32_t domid,
972 uint32_t machine_bdf);
974 int xc_domain_memory_mapping(int xc_handle,
975 uint32_t domid,
976 unsigned long first_gfn,
977 unsigned long first_mfn,
978 unsigned long nr_mfns,
979 uint32_t add_mapping);
981 int xc_domain_ioport_mapping(int xc_handle,
982 uint32_t domid,
983 uint32_t first_gport,
984 uint32_t first_mport,
985 uint32_t nr_ports,
986 uint32_t add_mapping);
988 int xc_domain_update_msi_irq(
989 int xc_handle,
990 uint32_t domid,
991 uint32_t gvec,
992 uint32_t pirq,
993 uint32_t gflags);
995 int xc_domain_bind_pt_irq(int xc_handle,
996 uint32_t domid,
997 uint8_t machine_irq,
998 uint8_t irq_type,
999 uint8_t bus,
1000 uint8_t device,
1001 uint8_t intx,
1002 uint8_t isa_irq);
1004 int xc_domain_unbind_pt_irq(int xc_handle,
1005 uint32_t domid,
1006 uint8_t machine_irq,
1007 uint8_t irq_type,
1008 uint8_t bus,
1009 uint8_t device,
1010 uint8_t intx,
1011 uint8_t isa_irq);
1013 int xc_domain_bind_pt_pci_irq(int xc_handle,
1014 uint32_t domid,
1015 uint8_t machine_irq,
1016 uint8_t bus,
1017 uint8_t device,
1018 uint8_t intx);
1020 int xc_domain_bind_pt_isa_irq(int xc_handle,
1021 uint32_t domid,
1022 uint8_t machine_irq);
1024 /* Set the target domain */
1025 int xc_domain_set_target(int xc_handle,
1026 uint32_t domid,
1027 uint32_t target);
1029 #if defined(__i386__) || defined(__x86_64__)
1030 int xc_cpuid_check(int xc,
1031 const unsigned int *input,
1032 const char **config,
1033 char **config_transformed);
1034 int xc_cpuid_set(int xc,
1035 domid_t domid,
1036 const unsigned int *input,
1037 const char **config,
1038 char **config_transformed);
1039 int xc_cpuid_apply_policy(int xc,
1040 domid_t domid);
1041 void xc_cpuid_to_str(const unsigned int *regs,
1042 char **strs);
1043 #endif
1045 #endif /* XENCTRL_H */