ia64/xen-unstable

view tools/libxc/xenctrl.h @ 18394:dade7f0bdc8d

hvm: Use main memory for video memory.

When creating an HVM domain, if e.g. another domain is created before
qemu allocates video memory, the extra 8MB memory ballooning is not
available any more, because it got consumed by the other domain.

This fixes it by taking video memory from the main memory:

- make hvmloader use e820_malloc to reserve some of the main memory
and notify ioemu of its address through the Xen platform PCI card.
- add XENMAPSPACE_mfn to the xen_add_to_physmap memory op, to allow
ioemu to move the MFNs between the original position and the PCI
mapping, when LFB acceleration is disabled/enabled
- add a remove_from_physmap memory op, to allow ioemu to unmap it
completely for the case of old guests with acceleration disabled.
- add xc_domain_memory_translate_gpfn_list to libxc to allow ioemu to
get the MFNs of the video memory.
- have xend save the PCI memory space instead of ioemu: if a memory
page is there, the guest can access it like usual memory, so xend
can safely be responsible to save it. The extra benefit is that
live migration will apply the logdirty optimization there too.
- handle old saved images, populating the video memory from ioemu if
really needed.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Aug 27 14:53:39 2008 +0100 (2008-08-27)
parents 0638a5c2cc9f
children be573a356c90
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * xc_gnttab functions:
9 * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
10 */
12 #ifndef XENCTRL_H
13 #define XENCTRL_H
15 /* Tell the Xen public headers we are a user-space tools build. */
16 #ifndef __XEN_TOOLS__
17 #define __XEN_TOOLS__ 1
18 #endif
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <xen/xen.h>
23 #include <xen/domctl.h>
24 #include <xen/physdev.h>
25 #include <xen/sysctl.h>
26 #include <xen/version.h>
27 #include <xen/event_channel.h>
28 #include <xen/sched.h>
29 #include <xen/memory.h>
30 #include <xen/hvm/params.h>
31 #include <xen/xsm/acm.h>
32 #include <xen/xsm/acm_ops.h>
33 #include <xen/xsm/flask_op.h>
35 #if defined(__i386__) || defined(__x86_64__)
36 #include <xen/foreign/x86_32.h>
37 #include <xen/foreign/x86_64.h>
38 #endif
40 #ifdef __ia64__
41 #define XC_PAGE_SHIFT 14
42 #else
43 #define XC_PAGE_SHIFT 12
44 #endif
45 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
46 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
48 /*
49 * DEFINITIONS FOR CPU BARRIERS
50 */
52 #if defined(__i386__)
53 #define xen_mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
54 #define xen_rmb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
55 #define xen_wmb() asm volatile ( "" : : : "memory")
56 #elif defined(__x86_64__)
57 #define xen_mb() asm volatile ( "mfence" : : : "memory")
58 #define xen_rmb() asm volatile ( "lfence" : : : "memory")
59 #define xen_wmb() asm volatile ( "" : : : "memory")
60 #elif defined(__ia64__)
61 #define xen_mb() asm volatile ("mf" ::: "memory")
62 #define xen_rmb() asm volatile ("mf" ::: "memory")
63 #define xen_wmb() asm volatile ("mf" ::: "memory")
64 #else
65 #error "Define barriers"
66 #endif
68 /*
69 * INITIALIZATION FUNCTIONS
70 */
72 /**
73 * This function opens a handle to the hypervisor interface. This function can
74 * be called multiple times within a single process. Multiple processes can
75 * have an open hypervisor interface at the same time.
76 *
77 * Each call to this function should have a corresponding call to
78 * xc_interface_close().
79 *
80 * This function can fail if the caller does not have superuser permission or
81 * if a Xen-enabled kernel is not currently running.
82 *
83 * @return a handle to the hypervisor interface or -1 on failure
84 */
85 int xc_interface_open(void);
87 /**
88 * This function closes an open hypervisor interface.
89 *
90 * This function can fail if the handle does not represent an open interface or
91 * if there were problems closing the interface.
92 *
93 * @parm xc_handle a handle to an open hypervisor interface
94 * @return 0 on success, -1 otherwise.
95 */
96 int xc_interface_close(int xc_handle);
98 /*
99 * KERNEL INTERFACES
100 */
102 /*
103 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
104 * device number. Returns -1 on error (and sets errno).
105 */
106 int xc_find_device_number(const char *name);
108 /*
109 * DOMAIN DEBUGGING FUNCTIONS
110 */
112 typedef struct xc_core_header {
113 unsigned int xch_magic;
114 unsigned int xch_nr_vcpus;
115 unsigned int xch_nr_pages;
116 unsigned int xch_ctxt_offset;
117 unsigned int xch_index_offset;
118 unsigned int xch_pages_offset;
119 } xc_core_header_t;
121 #define XC_CORE_MAGIC 0xF00FEBED
122 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
124 #ifdef __linux__
126 #include <sys/ptrace.h>
127 #include <thread_db.h>
129 typedef void (*thr_ev_handler_t)(long);
131 void xc_register_event_handler(
132 thr_ev_handler_t h,
133 td_event_e e);
135 long xc_ptrace(
136 int xc_handle,
137 enum __ptrace_request request,
138 uint32_t domid,
139 long addr,
140 long data);
142 int xc_waitdomain(
143 int xc_handle,
144 int domain,
145 int *status,
146 int options);
148 #endif /* __linux__ */
150 /*
151 * DOMAIN MANAGEMENT FUNCTIONS
152 */
154 typedef struct xc_dominfo {
155 uint32_t domid;
156 uint32_t ssidref;
157 unsigned int dying:1, crashed:1, shutdown:1,
158 paused:1, blocked:1, running:1,
159 hvm:1, debugged:1;
160 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
161 unsigned long nr_pages;
162 unsigned long shared_info_frame;
163 uint64_t cpu_time;
164 unsigned long max_memkb;
165 unsigned int nr_online_vcpus;
166 unsigned int max_vcpu_id;
167 xen_domain_handle_t handle;
168 } xc_dominfo_t;
170 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
172 typedef union
173 {
174 #if defined(__i386__) || defined(__x86_64__)
175 vcpu_guest_context_x86_64_t x64;
176 vcpu_guest_context_x86_32_t x32;
177 #endif
178 vcpu_guest_context_t c;
179 } vcpu_guest_context_any_t;
181 typedef union
182 {
183 #if defined(__i386__) || defined(__x86_64__)
184 shared_info_x86_64_t x64;
185 shared_info_x86_32_t x32;
186 #endif
187 shared_info_t s;
188 } shared_info_any_t;
190 typedef union
191 {
192 #if defined(__i386__) || defined(__x86_64__)
193 start_info_x86_64_t x64;
194 start_info_x86_32_t x32;
195 #endif
196 start_info_t s;
197 } start_info_any_t;
200 int xc_domain_create(int xc_handle,
201 uint32_t ssidref,
202 xen_domain_handle_t handle,
203 uint32_t flags,
204 uint32_t *pdomid);
207 /* Functions to produce a dump of a given domain
208 * xc_domain_dumpcore - produces a dump to a specified file
209 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
210 * callback function
211 */
212 int xc_domain_dumpcore(int xc_handle,
213 uint32_t domid,
214 const char *corename);
216 /* Define the callback function type for xc_domain_dumpcore_via_callback.
217 *
218 * This function is called by the coredump code for every "write",
219 * and passes an opaque object for the use of the function and
220 * created by the caller of xc_domain_dumpcore_via_callback.
221 */
222 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
224 int xc_domain_dumpcore_via_callback(int xc_handle,
225 uint32_t domid,
226 void *arg,
227 dumpcore_rtn_t dump_rtn);
229 /*
230 * This function sets the maximum number of vcpus that a domain may create.
231 *
232 * @parm xc_handle a handle to an open hypervisor interface.
233 * @parm domid the domain id in which vcpus are to be created.
234 * @parm max the maximum number of vcpus that the domain may create.
235 * @return 0 on success, -1 on failure.
236 */
237 int xc_domain_max_vcpus(int xc_handle,
238 uint32_t domid,
239 unsigned int max);
241 /**
242 * This function pauses a domain. A paused domain still exists in memory
243 * however it does not receive any timeslices from the hypervisor.
244 *
245 * @parm xc_handle a handle to an open hypervisor interface
246 * @parm domid the domain id to pause
247 * @return 0 on success, -1 on failure.
248 */
249 int xc_domain_pause(int xc_handle,
250 uint32_t domid);
251 /**
252 * This function unpauses a domain. The domain should have been previously
253 * paused.
254 *
255 * @parm xc_handle a handle to an open hypervisor interface
256 * @parm domid the domain id to unpause
257 * return 0 on success, -1 on failure
258 */
259 int xc_domain_unpause(int xc_handle,
260 uint32_t domid);
262 /**
263 * This function will destroy a domain. Destroying a domain removes the domain
264 * completely from memory. This function should be called after sending the
265 * domain a SHUTDOWN control message to free up the domain resources.
266 *
267 * @parm xc_handle a handle to an open hypervisor interface
268 * @parm domid the domain id to destroy
269 * @return 0 on success, -1 on failure
270 */
271 int xc_domain_destroy(int xc_handle,
272 uint32_t domid);
275 /**
276 * This function resumes a suspended domain. The domain should have
277 * been previously suspended.
278 *
279 * @parm xc_handle a handle to an open hypervisor interface
280 * @parm domid the domain id to resume
281 * @parm fast use cooperative resume (guest must support this)
282 * return 0 on success, -1 on failure
283 */
284 int xc_domain_resume(int xc_handle,
285 uint32_t domid,
286 int fast);
288 /**
289 * This function will shutdown a domain. This is intended for use in
290 * fully-virtualized domains where this operation is analogous to the
291 * sched_op operations in a paravirtualized domain. The caller is
292 * expected to give the reason for the shutdown.
293 *
294 * @parm xc_handle a handle to an open hypervisor interface
295 * @parm domid the domain id to destroy
296 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
297 * @return 0 on success, -1 on failure
298 */
299 int xc_domain_shutdown(int xc_handle,
300 uint32_t domid,
301 int reason);
303 int xc_vcpu_setaffinity(int xc_handle,
304 uint32_t domid,
305 int vcpu,
306 uint64_t cpumap);
307 int xc_vcpu_getaffinity(int xc_handle,
308 uint32_t domid,
309 int vcpu,
310 uint64_t *cpumap);
312 /**
313 * This function will return information about one or more domains. It is
314 * designed to iterate over the list of domains. If a single domain is
315 * requested, this function will return the next domain in the list - if
316 * one exists. It is, therefore, important in this case to make sure the
317 * domain requested was the one returned.
318 *
319 * @parm xc_handle a handle to an open hypervisor interface
320 * @parm first_domid the first domain to enumerate information from. Domains
321 * are currently enumerate in order of creation.
322 * @parm max_doms the number of elements in info
323 * @parm info an array of max_doms size that will contain the information for
324 * the enumerated domains.
325 * @return the number of domains enumerated or -1 on error
326 */
327 int xc_domain_getinfo(int xc_handle,
328 uint32_t first_domid,
329 unsigned int max_doms,
330 xc_dominfo_t *info);
333 /**
334 * This function will set the execution context for the specified vcpu.
335 *
336 * @parm xc_handle a handle to an open hypervisor interface
337 * @parm domid the domain to set the vcpu context for
338 * @parm vcpu the vcpu number for the context
339 * @parm ctxt pointer to the the cpu context with the values to set
340 * @return the number of domains enumerated or -1 on error
341 */
342 int xc_vcpu_setcontext(int xc_handle,
343 uint32_t domid,
344 uint32_t vcpu,
345 vcpu_guest_context_any_t *ctxt);
346 /**
347 * This function will return information about one or more domains, using a
348 * single hypercall. The domain information will be stored into the supplied
349 * array of xc_domaininfo_t structures.
350 *
351 * @parm xc_handle a handle to an open hypervisor interface
352 * @parm first_domain the first domain to enumerate information from.
353 * Domains are currently enumerate in order of creation.
354 * @parm max_domains the number of elements in info
355 * @parm info an array of max_doms size that will contain the information for
356 * the enumerated domains.
357 * @return the number of domains enumerated or -1 on error
358 */
359 int xc_domain_getinfolist(int xc_handle,
360 uint32_t first_domain,
361 unsigned int max_domains,
362 xc_domaininfo_t *info);
364 /**
365 * This function returns information about the context of a hvm domain
366 * @parm xc_handle a handle to an open hypervisor interface
367 * @parm domid the domain to get information from
368 * @parm ctxt_buf a pointer to a structure to store the execution context of
369 * the hvm domain
370 * @parm size the size of ctxt_buf in bytes
371 * @return 0 on success, -1 on failure
372 */
373 int xc_domain_hvm_getcontext(int xc_handle,
374 uint32_t domid,
375 uint8_t *ctxt_buf,
376 uint32_t size);
378 /**
379 * This function will set the context for hvm domain
380 *
381 * @parm xc_handle a handle to an open hypervisor interface
382 * @parm domid the domain to set the hvm domain context for
383 * @parm hvm_ctxt pointer to the the hvm context with the values to set
384 * @parm size the size of hvm_ctxt in bytes
385 * @return 0 on success, -1 on failure
386 */
387 int xc_domain_hvm_setcontext(int xc_handle,
388 uint32_t domid,
389 uint8_t *hvm_ctxt,
390 uint32_t size);
392 /**
393 * This function returns information about the execution context of a
394 * particular vcpu of a domain.
395 *
396 * @parm xc_handle a handle to an open hypervisor interface
397 * @parm domid the domain to get information from
398 * @parm vcpu the vcpu number
399 * @parm ctxt a pointer to a structure to store the execution context of the
400 * domain
401 * @return 0 on success, -1 on failure
402 */
403 int xc_vcpu_getcontext(int xc_handle,
404 uint32_t domid,
405 uint32_t vcpu,
406 vcpu_guest_context_any_t *ctxt);
408 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
409 int xc_vcpu_getinfo(int xc_handle,
410 uint32_t domid,
411 uint32_t vcpu,
412 xc_vcpuinfo_t *info);
414 long long xc_domain_get_cpu_usage(int xc_handle,
415 domid_t domid,
416 int vcpu);
418 int xc_domain_sethandle(int xc_handle, uint32_t domid,
419 xen_domain_handle_t handle);
421 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
422 int xc_shadow_control(int xc_handle,
423 uint32_t domid,
424 unsigned int sop,
425 unsigned long *dirty_bitmap,
426 unsigned long pages,
427 unsigned long *mb,
428 uint32_t mode,
429 xc_shadow_op_stats_t *stats);
431 int xc_sedf_domain_set(int xc_handle,
432 uint32_t domid,
433 uint64_t period, uint64_t slice,
434 uint64_t latency, uint16_t extratime,
435 uint16_t weight);
437 int xc_sedf_domain_get(int xc_handle,
438 uint32_t domid,
439 uint64_t* period, uint64_t *slice,
440 uint64_t *latency, uint16_t *extratime,
441 uint16_t *weight);
443 int xc_sched_credit_domain_set(int xc_handle,
444 uint32_t domid,
445 struct xen_domctl_sched_credit *sdom);
447 int xc_sched_credit_domain_get(int xc_handle,
448 uint32_t domid,
449 struct xen_domctl_sched_credit *sdom);
451 /**
452 * This function sends a trigger to a domain.
453 *
454 * @parm xc_handle a handle to an open hypervisor interface
455 * @parm domid the domain id to send trigger
456 * @parm trigger the trigger type
457 * @parm vcpu the vcpu number to send trigger
458 * return 0 on success, -1 on failure
459 */
460 int xc_domain_send_trigger(int xc_handle,
461 uint32_t domid,
462 uint32_t trigger,
463 uint32_t vcpu);
465 /**
466 * This function enables or disable debugging of a domain.
467 *
468 * @parm xc_handle a handle to an open hypervisor interface
469 * @parm domid the domain id to send trigger
470 * @parm enable true to enable debugging
471 * return 0 on success, -1 on failure
472 */
473 int xc_domain_setdebugging(int xc_handle,
474 uint32_t domid,
475 unsigned int enable);
477 /*
478 * EVENT CHANNEL FUNCTIONS
479 */
481 /* A port identifier is guaranteed to fit in 31 bits. */
482 typedef int evtchn_port_or_error_t;
484 /**
485 * This function allocates an unbound port. Ports are named endpoints used for
486 * interdomain communication. This function is most useful in opening a
487 * well-known port within a domain to receive events on.
488 *
489 * NOTE: If you are allocating a *local* unbound port, you probably want to
490 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
491 * ports *only* during domain creation.
492 *
493 * @parm xc_handle a handle to an open hypervisor interface
494 * @parm dom the ID of the local domain (the 'allocatee')
495 * @parm remote_dom the ID of the domain who will later bind
496 * @return allocated port (in @dom) on success, -1 on failure
497 */
498 evtchn_port_or_error_t
499 xc_evtchn_alloc_unbound(int xc_handle,
500 uint32_t dom,
501 uint32_t remote_dom);
503 int xc_evtchn_reset(int xc_handle,
504 uint32_t dom);
505 int xc_evtchn_status(int xc_handle,
506 uint32_t dom,
507 uint32_t port);
509 /*
510 * Return a handle to the event channel driver, or -1 on failure, in which case
511 * errno will be set appropriately.
512 */
513 int xc_evtchn_open(void);
515 /*
516 * Close a handle previously allocated with xc_evtchn_open().
517 */
518 int xc_evtchn_close(int xce_handle);
520 /*
521 * Return an fd that can be select()ed on for further calls to
522 * xc_evtchn_pending().
523 */
524 int xc_evtchn_fd(int xce_handle);
526 /*
527 * Notify the given event channel. Returns -1 on failure, in which case
528 * errno will be set appropriately.
529 */
530 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
532 /*
533 * Returns a new event port awaiting interdomain connection from the given
534 * domain ID, or -1 on failure, in which case errno will be set appropriately.
535 */
536 evtchn_port_or_error_t
537 xc_evtchn_bind_unbound_port(int xce_handle, int domid);
539 /*
540 * Returns a new event port bound to the remote port for the given domain ID,
541 * or -1 on failure, in which case errno will be set appropriately.
542 */
543 evtchn_port_or_error_t
544 xc_evtchn_bind_interdomain(int xce_handle, int domid,
545 evtchn_port_t remote_port);
547 /*
548 * Bind an event channel to the given VIRQ. Returns the event channel bound to
549 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
550 */
551 evtchn_port_or_error_t
552 xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
554 /*
555 * Unbind the given event channel. Returns -1 on failure, in which case errno
556 * will be set appropriately.
557 */
558 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
560 /*
561 * Return the next event channel to become pending, or -1 on failure, in which
562 * case errno will be set appropriately.
563 */
564 evtchn_port_or_error_t
565 xc_evtchn_pending(int xce_handle);
567 /*
568 * Unmask the given event channel. Returns -1 on failure, in which case errno
569 * will be set appropriately.
570 */
571 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
573 int xc_physdev_pci_access_modify(int xc_handle,
574 uint32_t domid,
575 int bus,
576 int dev,
577 int func,
578 int enable);
580 int xc_readconsolering(int xc_handle,
581 char **pbuffer,
582 unsigned int *pnr_chars,
583 int clear, int incremental, uint32_t *pindex);
585 int xc_send_debug_keys(int xc_handle, char *keys);
587 typedef xen_sysctl_physinfo_t xc_physinfo_t;
588 typedef uint32_t xc_cpu_to_node_t;
589 int xc_physinfo(int xc_handle,
590 xc_physinfo_t *info);
592 int xc_sched_id(int xc_handle,
593 int *sched_id);
595 typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
596 int xc_getcpuinfo(int xc_handle, int max_cpus,
597 xc_cpuinfo_t *info, int *nr_cpus);
599 int xc_domain_setmaxmem(int xc_handle,
600 uint32_t domid,
601 unsigned int max_memkb);
603 int xc_domain_set_memmap_limit(int xc_handle,
604 uint32_t domid,
605 unsigned long map_limitkb);
607 int xc_domain_set_time_offset(int xc_handle,
608 uint32_t domid,
609 int32_t time_offset_seconds);
611 int xc_domain_memory_increase_reservation(int xc_handle,
612 uint32_t domid,
613 unsigned long nr_extents,
614 unsigned int extent_order,
615 unsigned int mem_flags,
616 xen_pfn_t *extent_start);
618 int xc_domain_memory_decrease_reservation(int xc_handle,
619 uint32_t domid,
620 unsigned long nr_extents,
621 unsigned int extent_order,
622 xen_pfn_t *extent_start);
624 int xc_domain_memory_populate_physmap(int xc_handle,
625 uint32_t domid,
626 unsigned long nr_extents,
627 unsigned int extent_order,
628 unsigned int mem_flags,
629 xen_pfn_t *extent_start);
631 int xc_domain_memory_translate_gpfn_list(int xc_handle,
632 uint32_t domid,
633 unsigned long nr_gpfns,
634 xen_pfn_t *gpfn_list,
635 xen_pfn_t *mfn_list);
637 int xc_domain_ioport_permission(int xc_handle,
638 uint32_t domid,
639 uint32_t first_port,
640 uint32_t nr_ports,
641 uint32_t allow_access);
643 int xc_domain_irq_permission(int xc_handle,
644 uint32_t domid,
645 uint8_t pirq,
646 uint8_t allow_access);
648 int xc_domain_iomem_permission(int xc_handle,
649 uint32_t domid,
650 unsigned long first_mfn,
651 unsigned long nr_mfns,
652 uint8_t allow_access);
654 int xc_domain_pin_memory_cacheattr(int xc_handle,
655 uint32_t domid,
656 uint64_t start,
657 uint64_t end,
658 uint32_t type);
660 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
661 unsigned long mfn);
663 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
664 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
665 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
666 arrays. */
667 int xc_perfc_control(int xc_handle,
668 uint32_t op,
669 xc_perfc_desc_t *desc,
670 xc_perfc_val_t *val,
671 int *nbr_desc,
672 int *nbr_val);
674 /**
675 * Memory maps a range within one domain to a local address range. Mappings
676 * should be unmapped with munmap and should follow the same rules as mmap
677 * regarding page alignment. Returns NULL on failure.
678 *
679 * In Linux, the ring queue for the control channel is accessible by mapping
680 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
681 * stored there is of type control_if_t.
682 *
683 * @parm xc_handle a handle on an open hypervisor interface
684 * @parm dom the domain to map memory from
685 * @parm size the amount of memory to map (in multiples of page size)
686 * @parm prot same flag as in mmap().
687 * @parm mfn the frame address to map.
688 */
689 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
690 int size, int prot,
691 unsigned long mfn );
693 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
694 const xen_pfn_t *arr, int num );
696 /**
697 * Like xc_map_foreign_pages(), except it can succeeed partially.
698 * When a page cannot be mapped, its PFN in @arr is or'ed with
699 * 0xF0000000 to indicate the error.
700 */
701 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
702 xen_pfn_t *arr, int num );
704 /**
705 * Translates a virtual address in the context of a given domain and
706 * vcpu returning the machine page frame number of the associated
707 * page.
708 *
709 * @parm xc_handle a handle on an open hypervisor interface
710 * @parm dom the domain to perform the translation in
711 * @parm vcpu the vcpu to perform the translation on
712 * @parm virt the virtual address to translate
713 */
714 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
715 int vcpu, unsigned long long virt);
718 /**
719 * DEPRECATED. Avoid using this, as it does not correctly account for PFNs
720 * without a backing MFN.
721 */
722 int xc_get_pfn_list(int xc_handle, uint32_t domid, uint64_t *pfn_buf,
723 unsigned long max_pfns);
725 unsigned long xc_ia64_fpsr_default(void);
727 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
728 unsigned long dst_pfn, const char *src_page);
730 int xc_clear_domain_page(int xc_handle, uint32_t domid,
731 unsigned long dst_pfn);
733 long xc_get_max_pages(int xc_handle, uint32_t domid);
735 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
736 domid_t dom);
738 int xc_memory_op(int xc_handle, int cmd, void *arg);
740 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
741 int num, uint32_t *arr);
744 /* Get current total pages allocated to a domain. */
745 long xc_get_tot_pages(int xc_handle, uint32_t domid);
747 /**
748 * This function retrieves the the number of bytes available
749 * in the heap in a specific range of address-widths and nodes.
750 *
751 * @parm xc_handle a handle to an open hypervisor interface
752 * @parm domid the domain to query
753 * @parm min_width the smallest address width to query (0 if don't care)
754 * @parm max_width the largest address width to query (0 if don't care)
755 * @parm node the node to query (-1 for all)
756 * @parm *bytes caller variable to put total bytes counted
757 * @return 0 on success, <0 on failure.
758 */
759 int xc_availheap(int xc_handle, int min_width, int max_width, int node,
760 uint64_t *bytes);
762 /*
763 * Trace Buffer Operations
764 */
766 /**
767 * xc_tbuf_enable - enable tracing buffers
768 *
769 * @parm xc_handle a handle to an open hypervisor interface
770 * @parm cnt size of tracing buffers to create (in pages)
771 * @parm mfn location to store mfn of the trace buffers to
772 * @parm size location to store the size (in bytes) of a trace buffer to
773 *
774 * Gets the machine address of the trace pointer area and the size of the
775 * per CPU buffers.
776 */
777 int xc_tbuf_enable(int xc_handle, unsigned long pages,
778 unsigned long *mfn, unsigned long *size);
780 /*
781 * Disable tracing buffers.
782 */
783 int xc_tbuf_disable(int xc_handle);
785 /**
786 * This function sets the size of the trace buffers. Setting the size
787 * is currently a one-shot operation that may be performed either at boot
788 * time or via this interface, not both. The buffer size must be set before
789 * enabling tracing.
790 *
791 * @parm xc_handle a handle to an open hypervisor interface
792 * @parm size the size in pages per cpu for the trace buffers
793 * @return 0 on success, -1 on failure.
794 */
795 int xc_tbuf_set_size(int xc_handle, unsigned long size);
797 /**
798 * This function retrieves the current size of the trace buffers.
799 * Note that the size returned is in terms of bytes, not pages.
801 * @parm xc_handle a handle to an open hypervisor interface
802 * @parm size will contain the size in bytes for the trace buffers
803 * @return 0 on success, -1 on failure.
804 */
805 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
807 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
809 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
811 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
812 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
814 int xc_version(int xc_handle, int cmd, void *arg);
816 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
818 int xc_flask_op(int xc_handle, flask_op_t *op);
820 /*
821 * Subscribe to state changes in a domain via evtchn.
822 * Returns -1 on failure, in which case errno will be set appropriately.
823 */
824 int xc_domain_subscribe_for_suspend(
825 int xc_handle, domid_t domid, evtchn_port_t port);
827 /**************************
828 * GRANT TABLE OPERATIONS *
829 **************************/
831 /*
832 * Return a handle to the grant table driver, or -1 on failure, in which case
833 * errno will be set appropriately.
834 */
835 int xc_gnttab_open(void);
837 /*
838 * Close a handle previously allocated with xc_gnttab_open().
839 */
840 int xc_gnttab_close(int xcg_handle);
842 /*
843 * Memory maps a grant reference from one domain to a local address range.
844 * Mappings should be unmapped with xc_gnttab_munmap. Returns NULL on failure.
845 *
846 * @parm xcg_handle a handle on an open grant table interface
847 * @parm domid the domain to map memory from
848 * @parm ref the grant reference ID to map
849 * @parm prot same flag as in mmap()
850 */
851 void *xc_gnttab_map_grant_ref(int xcg_handle,
852 uint32_t domid,
853 uint32_t ref,
854 int prot);
856 /**
857 * Memory maps one or more grant references from one or more domains to a
858 * contiguous local address range. Mappings should be unmapped with
859 * xc_gnttab_munmap. Returns NULL on failure.
860 *
861 * @parm xcg_handle a handle on an open grant table interface
862 * @parm count the number of grant references to be mapped
863 * @parm domids an array of @count domain IDs by which the corresponding @refs
864 * were granted
865 * @parm refs an array of @count grant references to be mapped
866 * @parm prot same flag as in mmap()
867 */
868 void *xc_gnttab_map_grant_refs(int xcg_handle,
869 uint32_t count,
870 uint32_t *domids,
871 uint32_t *refs,
872 int prot);
874 /**
875 * Memory maps one or more grant references from one domain to a
876 * contiguous local address range. Mappings should be unmapped with
877 * xc_gnttab_munmap. Returns NULL on failure.
878 *
879 * @parm xcg_handle a handle on an open grant table interface
880 * @parm count the number of grant references to be mapped
881 * @parm domid the domain to map memory from
882 * @parm refs an array of @count grant references to be mapped
883 * @parm prot same flag as in mmap()
884 */
885 void *xc_gnttab_map_domain_grant_refs(int xcg_handle,
886 uint32_t count,
887 uint32_t domid,
888 uint32_t *refs,
889 int prot);
891 /*
892 * Unmaps the @count pages starting at @start_address, which were mapped by a
893 * call to xc_gnttab_map_grant_ref or xc_gnttab_map_grant_refs. Returns zero
894 * on success, otherwise sets errno and returns non-zero.
895 */
896 int xc_gnttab_munmap(int xcg_handle,
897 void *start_address,
898 uint32_t count);
900 /*
901 * Sets the maximum number of grants that may be mapped by the given instance
902 * to @count.
903 *
904 * N.B. This function must be called after opening the handle, and before any
905 * other functions are invoked on it.
906 *
907 * N.B. When variable-length grants are mapped, fragmentation may be observed,
908 * and it may not be possible to satisfy requests up to the maximum number
909 * of grants.
910 */
911 int xc_gnttab_set_max_grants(int xcg_handle,
912 uint32_t count);
914 int xc_physdev_map_pirq(int xc_handle,
915 int domid,
916 int index,
917 int *pirq);
919 int xc_physdev_map_pirq_msi(int xc_handle,
920 int domid,
921 int index,
922 int *pirq,
923 int devfn,
924 int bus,
925 int entry_nr,
926 uint64_t table_base);
928 int xc_physdev_unmap_pirq(int xc_handle,
929 int domid,
930 int pirq);
932 int xc_hvm_set_pci_intx_level(
933 int xc_handle, domid_t dom,
934 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
935 unsigned int level);
936 int xc_hvm_set_isa_irq_level(
937 int xc_handle, domid_t dom,
938 uint8_t isa_irq,
939 unsigned int level);
941 int xc_hvm_set_pci_link_route(
942 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
945 /*
946 * Track dirty bit changes in the VRAM area
947 *
948 * All of this is done atomically:
949 * - get the dirty bitmap since the last call
950 * - set up dirty tracking area for period up to the next call
951 * - clear the dirty tracking area.
952 *
953 * Returns -ENODATA and does not fill bitmap if the area has changed since the
954 * last call.
955 */
956 int xc_hvm_track_dirty_vram(
957 int xc_handle, domid_t dom,
958 uint64_t first_pfn, uint64_t nr,
959 unsigned long *bitmap);
961 /*
962 * Notify that some pages got modified by the Device Model
963 */
964 int xc_hvm_modified_memory(
965 int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr);
967 /*
968 * Set a range of memory to a specific type.
969 * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm
970 */
971 int xc_hvm_set_mem_type(
972 int xc_handle, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, uint64_t nr);
975 typedef enum {
976 XC_ERROR_NONE = 0,
977 XC_INTERNAL_ERROR = 1,
978 XC_INVALID_KERNEL = 2,
979 XC_INVALID_PARAM = 3,
980 XC_OUT_OF_MEMORY = 4,
981 } xc_error_code;
983 #define XC_MAX_ERROR_MSG_LEN 1024
984 typedef struct {
985 int code;
986 char message[XC_MAX_ERROR_MSG_LEN];
987 } xc_error;
989 /*
990 * Return a pointer to the last error. This pointer and the
991 * data pointed to are only valid until the next call to
992 * libxc.
993 */
994 const xc_error *xc_get_last_error(void);
996 /*
997 * Clear the last error
998 */
999 void xc_clear_last_error(void);
1001 typedef void (*xc_error_handler)(const xc_error *err);
1003 /*
1004 * The default error handler which prints to stderr
1005 */
1006 void xc_default_error_handler(const xc_error *err);
1008 /*
1009 * Convert an error code into a text description
1010 */
1011 const char *xc_error_code_to_desc(int code);
1013 /*
1014 * Registers a callback to handle errors
1015 */
1016 xc_error_handler xc_set_error_handler(xc_error_handler handler);
1018 int xc_set_hvm_param(int handle, domid_t dom, int param, unsigned long value);
1019 int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value);
1021 /* IA64 specific, nvram save */
1022 int xc_ia64_save_to_nvram(int xc_handle, uint32_t dom);
1024 /* IA64 specific, nvram init */
1025 int xc_ia64_nvram_init(int xc_handle, char *dom_name, uint32_t dom);
1027 /* IA64 specific, set guest OS type optimizations */
1028 int xc_ia64_set_os_type(int xc_handle, char *guest_os_type, uint32_t dom);
1030 /* HVM guest pass-through */
1031 int xc_assign_device(int xc_handle,
1032 uint32_t domid,
1033 uint32_t machine_bdf);
1035 int xc_get_device_group(int xc_handle,
1036 uint32_t domid,
1037 uint32_t machine_bdf,
1038 uint32_t max_sdevs,
1039 uint32_t *num_sdevs,
1040 uint32_t *sdev_array);
1042 int xc_test_assign_device(int xc_handle,
1043 uint32_t domid,
1044 uint32_t machine_bdf);
1046 int xc_deassign_device(int xc_handle,
1047 uint32_t domid,
1048 uint32_t machine_bdf);
1050 int xc_domain_memory_mapping(int xc_handle,
1051 uint32_t domid,
1052 unsigned long first_gfn,
1053 unsigned long first_mfn,
1054 unsigned long nr_mfns,
1055 uint32_t add_mapping);
1057 int xc_domain_ioport_mapping(int xc_handle,
1058 uint32_t domid,
1059 uint32_t first_gport,
1060 uint32_t first_mport,
1061 uint32_t nr_ports,
1062 uint32_t add_mapping);
1064 int xc_domain_update_msi_irq(
1065 int xc_handle,
1066 uint32_t domid,
1067 uint32_t gvec,
1068 uint32_t pirq,
1069 uint32_t gflags);
1071 int xc_domain_bind_pt_irq(int xc_handle,
1072 uint32_t domid,
1073 uint8_t machine_irq,
1074 uint8_t irq_type,
1075 uint8_t bus,
1076 uint8_t device,
1077 uint8_t intx,
1078 uint8_t isa_irq);
1080 int xc_domain_unbind_pt_irq(int xc_handle,
1081 uint32_t domid,
1082 uint8_t machine_irq,
1083 uint8_t irq_type,
1084 uint8_t bus,
1085 uint8_t device,
1086 uint8_t intx,
1087 uint8_t isa_irq);
1089 int xc_domain_bind_pt_pci_irq(int xc_handle,
1090 uint32_t domid,
1091 uint8_t machine_irq,
1092 uint8_t bus,
1093 uint8_t device,
1094 uint8_t intx);
1096 int xc_domain_bind_pt_isa_irq(int xc_handle,
1097 uint32_t domid,
1098 uint8_t machine_irq);
1100 int xc_domain_set_machine_address_size(int handle,
1101 uint32_t domid,
1102 unsigned int width);
1103 int xc_domain_get_machine_address_size(int handle,
1104 uint32_t domid);
1106 /* Set the target domain */
1107 int xc_domain_set_target(int xc_handle,
1108 uint32_t domid,
1109 uint32_t target);
1111 #if defined(__i386__) || defined(__x86_64__)
1112 int xc_cpuid_check(int xc,
1113 const unsigned int *input,
1114 const char **config,
1115 char **config_transformed);
1116 int xc_cpuid_set(int xc,
1117 domid_t domid,
1118 const unsigned int *input,
1119 const char **config,
1120 char **config_transformed);
1121 int xc_cpuid_apply_policy(int xc,
1122 domid_t domid);
1123 void xc_cpuid_to_str(const unsigned int *regs,
1124 char **strs);
1125 #endif
1127 struct xc_px_val {
1128 uint64_t freq; /* Px core frequency */
1129 uint64_t residency; /* Px residency time */
1130 uint64_t count; /* Px transition count */
1131 };
1133 struct xc_px_stat {
1134 uint8_t total; /* total Px states */
1135 uint8_t usable; /* usable Px states */
1136 uint8_t last; /* last Px state */
1137 uint8_t cur; /* current Px state */
1138 uint64_t *trans_pt; /* Px transition table */
1139 struct xc_px_val *pt;
1140 };
1142 int xc_pm_get_max_px(int xc_handle, int cpuid, int *max_px);
1143 int xc_pm_get_pxstat(int xc_handle, int cpuid, struct xc_px_stat *pxpt);
1144 int xc_pm_reset_pxstat(int xc_handle, int cpuid);
1146 struct xc_cx_stat {
1147 uint32_t nr; /* entry nr in triggers & residencies, including C0 */
1148 uint32_t last; /* last Cx state */
1149 uint64_t idle_time; /* idle time from boot */
1150 uint64_t *triggers; /* Cx trigger counts */
1151 uint64_t *residencies; /* Cx residencies */
1152 };
1153 typedef struct xc_cx_stat xc_cx_stat_t;
1155 int xc_pm_get_max_cx(int xc_handle, int cpuid, int *max_cx);
1156 int xc_pm_get_cxstat(int xc_handle, int cpuid, struct xc_cx_stat *cxpt);
1157 int xc_pm_reset_cxstat(int xc_handle, int cpuid);
1159 #endif /* XENCTRL_H */