direct-io.hg

view tools/libxc/xenctrl.h @ 12271:f56b7ade7068

[BLKTAP] ia64 support
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kfraser@localhost.localdomain
date Tue Nov 07 11:17:39 2006 +0000 (2006-11-07)
parents cfb1136ee8f7
children 9a6fb3e2f12d
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 */
9 #ifndef XENCTRL_H
10 #define XENCTRL_H
12 /* Tell the Xen public headers we are a user-space tools build. */
13 #ifndef __XEN_TOOLS__
14 #define __XEN_TOOLS__ 1
15 #endif
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <xen/xen.h>
20 #include <xen/domctl.h>
21 #include <xen/sysctl.h>
22 #include <xen/version.h>
23 #include <xen/event_channel.h>
24 #include <xen/sched.h>
25 #include <xen/memory.h>
26 #include <xen/acm.h>
27 #include <xen/acm_ops.h>
29 #ifdef __ia64__
30 #define XC_PAGE_SHIFT 14
31 #else
32 #define XC_PAGE_SHIFT 12
33 #endif
34 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
35 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
37 /*
38 * DEFINITIONS FOR CPU BARRIERS
39 */
41 #if defined(__i386__)
42 #define mb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
43 #define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
44 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
45 #elif defined(__x86_64__)
46 #define mb() __asm__ __volatile__ ( "mfence" : : : "memory")
47 #define rmb() __asm__ __volatile__ ( "lfence" : : : "memory")
48 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
49 #elif defined(__ia64__)
50 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
51 #define rmb() __asm__ __volatile__ ("mf" ::: "memory")
52 #define wmb() __asm__ __volatile__ ("mf" ::: "memory")
53 #elif defined(__powerpc__)
54 /* XXX loosen these up later */
55 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
56 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") /* lwsync? */
57 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") /* eieio? */
58 #else
59 #error "Define barriers"
60 #endif
62 /*
63 * INITIALIZATION FUNCTIONS
64 */
66 /**
67 * This function opens a handle to the hypervisor interface. This function can
68 * be called multiple times within a single process. Multiple processes can
69 * have an open hypervisor interface at the same time.
70 *
71 * Each call to this function should have a corresponding call to
72 * xc_interface_close().
73 *
74 * This function can fail if the caller does not have superuser permission or
75 * if a Xen-enabled kernel is not currently running.
76 *
77 * @return a handle to the hypervisor interface or -1 on failure
78 */
79 int xc_interface_open(void);
81 /**
82 * This function closes an open hypervisor interface.
83 *
84 * This function can fail if the handle does not represent an open interface or
85 * if there were problems closing the interface.
86 *
87 * @parm xc_handle a handle to an open hypervisor interface
88 * @return 0 on success, -1 otherwise.
89 */
90 int xc_interface_close(int xc_handle);
92 /*
93 * KERNEL INTERFACES
94 */
96 /*
97 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
98 * device number. Returns -1 on error (and sets errno).
99 */
100 int xc_find_device_number(const char *name);
102 /*
103 * DOMAIN DEBUGGING FUNCTIONS
104 */
106 typedef struct xc_core_header {
107 unsigned int xch_magic;
108 unsigned int xch_nr_vcpus;
109 unsigned int xch_nr_pages;
110 unsigned int xch_ctxt_offset;
111 unsigned int xch_index_offset;
112 unsigned int xch_pages_offset;
113 } xc_core_header_t;
115 #define XC_CORE_MAGIC 0xF00FEBED
116 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
118 #ifdef __linux__
120 #include <sys/ptrace.h>
121 #include <thread_db.h>
123 typedef void (*thr_ev_handler_t)(long);
125 void xc_register_event_handler(
126 thr_ev_handler_t h,
127 td_event_e e);
129 long xc_ptrace(
130 int xc_handle,
131 enum __ptrace_request request,
132 uint32_t domid,
133 long addr,
134 long data);
136 int xc_waitdomain(
137 int xc_handle,
138 int domain,
139 int *status,
140 int options);
142 #endif /* __linux__ */
144 /*
145 * DOMAIN MANAGEMENT FUNCTIONS
146 */
148 typedef struct xc_dominfo {
149 uint32_t domid;
150 uint32_t ssidref;
151 unsigned int dying:1, crashed:1, shutdown:1,
152 paused:1, blocked:1, running:1,
153 hvm:1;
154 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
155 unsigned long nr_pages;
156 unsigned long shared_info_frame;
157 uint64_t cpu_time;
158 unsigned long max_memkb;
159 unsigned int nr_online_vcpus;
160 unsigned int max_vcpu_id;
161 xen_domain_handle_t handle;
162 } xc_dominfo_t;
164 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
165 int xc_domain_create(int xc_handle,
166 uint32_t ssidref,
167 xen_domain_handle_t handle,
168 uint32_t flags,
169 uint32_t *pdomid);
172 /* Functions to produce a dump of a given domain
173 * xc_domain_dumpcore - produces a dump to a specified file
174 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
175 * callback function
176 */
177 int xc_domain_dumpcore(int xc_handle,
178 uint32_t domid,
179 const char *corename);
181 /* Define the callback function type for xc_domain_dumpcore_via_callback.
182 *
183 * This function is called by the coredump code for every "write",
184 * and passes an opaque object for the use of the function and
185 * created by the caller of xc_domain_dumpcore_via_callback.
186 */
187 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
189 int xc_domain_dumpcore_via_callback(int xc_handle,
190 uint32_t domid,
191 void *arg,
192 dumpcore_rtn_t dump_rtn);
194 /*
195 * This function sets the maximum number of vcpus that a domain may create.
196 *
197 * @parm xc_handle a handle to an open hypervisor interface.
198 * @parm domid the domain id in which vcpus are to be created.
199 * @parm max the maximum number of vcpus that the domain may create.
200 * @return 0 on success, -1 on failure.
201 */
202 int xc_domain_max_vcpus(int xc_handle,
203 uint32_t domid,
204 unsigned int max);
206 /**
207 * This function pauses a domain. A paused domain still exists in memory
208 * however it does not receive any timeslices from the hypervisor.
209 *
210 * @parm xc_handle a handle to an open hypervisor interface
211 * @parm domid the domain id to pause
212 * @return 0 on success, -1 on failure.
213 */
214 int xc_domain_pause(int xc_handle,
215 uint32_t domid);
216 /**
217 * This function unpauses a domain. The domain should have been previously
218 * paused.
219 *
220 * @parm xc_handle a handle to an open hypervisor interface
221 * @parm domid the domain id to unpause
222 * return 0 on success, -1 on failure
223 */
224 int xc_domain_unpause(int xc_handle,
225 uint32_t domid);
227 /**
228 * This function will destroy a domain. Destroying a domain removes the domain
229 * completely from memory. This function should be called after sending the
230 * domain a SHUTDOWN control message to free up the domain resources.
231 *
232 * @parm xc_handle a handle to an open hypervisor interface
233 * @parm domid the domain id to destroy
234 * @return 0 on success, -1 on failure
235 */
236 int xc_domain_destroy(int xc_handle,
237 uint32_t domid);
239 /**
240 * This function will shutdown a domain. This is intended for use in
241 * fully-virtualized domains where this operation is analogous to the
242 * sched_op operations in a paravirtualized domain. The caller is
243 * expected to give the reason for the shutdown.
244 *
245 * @parm xc_handle a handle to an open hypervisor interface
246 * @parm domid the domain id to destroy
247 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
248 * @return 0 on success, -1 on failure
249 */
250 int xc_domain_shutdown(int xc_handle,
251 uint32_t domid,
252 int reason);
254 int xc_vcpu_setaffinity(int xc_handle,
255 uint32_t domid,
256 int vcpu,
257 uint64_t cpumap);
258 int xc_vcpu_getaffinity(int xc_handle,
259 uint32_t domid,
260 int vcpu,
261 uint64_t *cpumap);
263 /**
264 * This function will return information about one or more domains. It is
265 * designed to iterate over the list of domains. If a single domain is
266 * requested, this function will return the next domain in the list - if
267 * one exists. It is, therefore, important in this case to make sure the
268 * domain requested was the one returned.
269 *
270 * @parm xc_handle a handle to an open hypervisor interface
271 * @parm first_domid the first domain to enumerate information from. Domains
272 * are currently enumerate in order of creation.
273 * @parm max_doms the number of elements in info
274 * @parm info an array of max_doms size that will contain the information for
275 * the enumerated domains.
276 * @return the number of domains enumerated or -1 on error
277 */
278 int xc_domain_getinfo(int xc_handle,
279 uint32_t first_domid,
280 unsigned int max_doms,
281 xc_dominfo_t *info);
284 /**
285 * This function will set the execution context for the specified vcpu.
286 *
287 * @parm xc_handle a handle to an open hypervisor interface
288 * @parm domid the domain to set the vcpu context for
289 * @parm vcpu the vcpu number for the context
290 * @parm ctxt pointer to the the cpu context with the values to set
291 * @return the number of domains enumerated or -1 on error
292 */
293 int xc_vcpu_setcontext(int xc_handle,
294 uint32_t domid,
295 uint32_t vcpu,
296 vcpu_guest_context_t *ctxt);
297 /**
298 * This function will return information about one or more domains, using a
299 * single hypercall. The domain information will be stored into the supplied
300 * array of xc_domaininfo_t structures.
301 *
302 * @parm xc_handle a handle to an open hypervisor interface
303 * @parm first_domain the first domain to enumerate information from.
304 * Domains are currently enumerate in order of creation.
305 * @parm max_domains the number of elements in info
306 * @parm info an array of max_doms size that will contain the information for
307 * the enumerated domains.
308 * @return the number of domains enumerated or -1 on error
309 */
310 int xc_domain_getinfolist(int xc_handle,
311 uint32_t first_domain,
312 unsigned int max_domains,
313 xc_domaininfo_t *info);
315 /**
316 * This function returns information about the execution context of a
317 * particular vcpu of a domain.
318 *
319 * @parm xc_handle a handle to an open hypervisor interface
320 * @parm domid the domain to get information from
321 * @parm vcpu the vcpu number
322 * @parm ctxt a pointer to a structure to store the execution context of the
323 * domain
324 * @return 0 on success, -1 on failure
325 */
326 int xc_vcpu_getcontext(int xc_handle,
327 uint32_t domid,
328 uint32_t vcpu,
329 vcpu_guest_context_t *ctxt);
331 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
332 int xc_vcpu_getinfo(int xc_handle,
333 uint32_t domid,
334 uint32_t vcpu,
335 xc_vcpuinfo_t *info);
337 int xc_domain_setcpuweight(int xc_handle,
338 uint32_t domid,
339 float weight);
340 long long xc_domain_get_cpu_usage(int xc_handle,
341 domid_t domid,
342 int vcpu);
344 int xc_domain_sethandle(int xc_handle, uint32_t domid,
345 xen_domain_handle_t handle);
347 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
348 int xc_shadow_control(int xc_handle,
349 uint32_t domid,
350 unsigned int sop,
351 unsigned long *dirty_bitmap,
352 unsigned long pages,
353 unsigned long *mb,
354 uint32_t mode,
355 xc_shadow_op_stats_t *stats);
357 int xc_sedf_domain_set(int xc_handle,
358 uint32_t domid,
359 uint64_t period, uint64_t slice,
360 uint64_t latency, uint16_t extratime,
361 uint16_t weight);
363 int xc_sedf_domain_get(int xc_handle,
364 uint32_t domid,
365 uint64_t* period, uint64_t *slice,
366 uint64_t *latency, uint16_t *extratime,
367 uint16_t *weight);
369 int xc_sched_credit_domain_set(int xc_handle,
370 uint32_t domid,
371 struct xen_domctl_sched_credit *sdom);
373 int xc_sched_credit_domain_get(int xc_handle,
374 uint32_t domid,
375 struct xen_domctl_sched_credit *sdom);
377 /*
378 * EVENT CHANNEL FUNCTIONS
379 */
381 /**
382 * This function allocates an unbound port. Ports are named endpoints used for
383 * interdomain communication. This function is most useful in opening a
384 * well-known port within a domain to receive events on.
385 *
386 * @parm xc_handle a handle to an open hypervisor interface
387 * @parm dom the ID of the local domain (the 'allocatee')
388 * @parm remote_dom the ID of the domain who will later bind
389 * @return allocated port (in @dom) on success, -1 on failure
390 */
391 int xc_evtchn_alloc_unbound(int xc_handle,
392 uint32_t dom,
393 uint32_t remote_dom);
395 int xc_physdev_pci_access_modify(int xc_handle,
396 uint32_t domid,
397 int bus,
398 int dev,
399 int func,
400 int enable);
402 int xc_readconsolering(int xc_handle,
403 char **pbuffer,
404 unsigned int *pnr_chars,
405 int clear);
407 typedef xen_sysctl_physinfo_t xc_physinfo_t;
408 int xc_physinfo(int xc_handle,
409 xc_physinfo_t *info);
411 int xc_sched_id(int xc_handle,
412 int *sched_id);
414 int xc_domain_setmaxmem(int xc_handle,
415 uint32_t domid,
416 unsigned int max_memkb);
418 int xc_domain_set_time_offset(int xc_handle,
419 uint32_t domid,
420 int32_t time_offset_seconds);
422 int xc_domain_memory_increase_reservation(int xc_handle,
423 uint32_t domid,
424 unsigned long nr_extents,
425 unsigned int extent_order,
426 unsigned int address_bits,
427 xen_pfn_t *extent_start);
429 int xc_domain_memory_decrease_reservation(int xc_handle,
430 uint32_t domid,
431 unsigned long nr_extents,
432 unsigned int extent_order,
433 xen_pfn_t *extent_start);
435 int xc_domain_memory_populate_physmap(int xc_handle,
436 uint32_t domid,
437 unsigned long nr_extents,
438 unsigned int extent_order,
439 unsigned int address_bits,
440 xen_pfn_t *extent_start);
442 int xc_domain_translate_gpfn_list(int xc_handle,
443 uint32_t domid,
444 unsigned long nr_gpfns,
445 xen_pfn_t *gpfn_list,
446 xen_pfn_t *mfn_list);
448 int xc_domain_ioport_permission(int xc_handle,
449 uint32_t domid,
450 uint32_t first_port,
451 uint32_t nr_ports,
452 uint32_t allow_access);
454 int xc_domain_irq_permission(int xc_handle,
455 uint32_t domid,
456 uint8_t pirq,
457 uint8_t allow_access);
459 int xc_domain_iomem_permission(int xc_handle,
460 uint32_t domid,
461 unsigned long first_mfn,
462 unsigned long nr_mfns,
463 uint8_t allow_access);
465 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
466 unsigned long mfn);
468 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
469 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
470 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
471 arrays. */
472 int xc_perfc_control(int xc_handle,
473 uint32_t op,
474 xc_perfc_desc_t *desc,
475 xc_perfc_val_t *val,
476 int *nbr_desc,
477 int *nbr_val);
479 /**
480 * Memory maps a range within one domain to a local address range. Mappings
481 * should be unmapped with munmap and should follow the same rules as mmap
482 * regarding page alignment. Returns NULL on failure.
483 *
484 * In Linux, the ring queue for the control channel is accessible by mapping
485 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
486 * stored there is of type control_if_t.
487 *
488 * @parm xc_handle a handle on an open hypervisor interface
489 * @parm dom the domain to map memory from
490 * @parm size the amount of memory to map (in multiples of page size)
491 * @parm prot same flag as in mmap().
492 * @parm mfn the frame address to map.
493 */
494 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
495 int size, int prot,
496 unsigned long mfn );
498 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
499 xen_pfn_t *arr, int num );
501 /**
502 * Translates a virtual address in the context of a given domain and
503 * vcpu returning the machine page frame number of the associated
504 * page.
505 *
506 * @parm xc_handle a handle on an open hypervisor interface
507 * @parm dom the domain to perform the translation in
508 * @parm vcpu the vcpu to perform the translation on
509 * @parm virt the virtual address to translate
510 */
511 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
512 int vcpu, unsigned long long virt);
514 int xc_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
515 unsigned long max_pfns);
517 unsigned long xc_ia64_fpsr_default(void);
519 int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid,
520 xen_pfn_t *pfn_buf,
521 unsigned int start_page, unsigned int nr_pages);
523 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
524 unsigned long dst_pfn, const char *src_page);
526 int xc_clear_domain_page(int xc_handle, uint32_t domid,
527 unsigned long dst_pfn);
529 long xc_get_max_pages(int xc_handle, uint32_t domid);
531 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
532 domid_t dom);
534 int xc_memory_op(int xc_handle, int cmd, void *arg);
536 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
537 int num, unsigned long *arr);
540 /* Get current total pages allocated to a domain. */
541 long xc_get_tot_pages(int xc_handle, uint32_t domid);
544 /*
545 * Trace Buffer Operations
546 */
548 /**
549 * xc_tbuf_enable - enable tracing buffers
550 *
551 * @parm xc_handle a handle to an open hypervisor interface
552 * @parm cnt size of tracing buffers to create (in pages)
553 * @parm mfn location to store mfn of the trace buffers to
554 * @parm size location to store the size (in bytes) of a trace buffer to
555 *
556 * Gets the machine address of the trace pointer area and the size of the
557 * per CPU buffers.
558 */
559 int xc_tbuf_enable(int xc_handle, size_t cnt, unsigned long *mfn,
560 unsigned long *size);
562 /*
563 * Disable tracing buffers.
564 */
565 int xc_tbuf_disable(int xc_handle);
567 /**
568 * This function sets the size of the trace buffers. Setting the size
569 * is currently a one-shot operation that may be performed either at boot
570 * time or via this interface, not both. The buffer size must be set before
571 * enabling tracing.
572 *
573 * @parm xc_handle a handle to an open hypervisor interface
574 * @parm size the size in pages per cpu for the trace buffers
575 * @return 0 on success, -1 on failure.
576 */
577 int xc_tbuf_set_size(int xc_handle, unsigned long size);
579 /**
580 * This function retrieves the current size of the trace buffers.
581 * Note that the size returned is in terms of bytes, not pages.
583 * @parm xc_handle a handle to an open hypervisor interface
584 * @parm size will contain the size in bytes for the trace buffers
585 * @return 0 on success, -1 on failure.
586 */
587 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
589 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
591 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
593 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
594 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
596 int xc_version(int xc_handle, int cmd, void *arg);
598 /*
599 * MMU updates.
600 */
601 #define MAX_MMU_UPDATES 1024
602 struct xc_mmu {
603 mmu_update_t updates[MAX_MMU_UPDATES];
604 int idx;
605 domid_t subject;
606 };
607 typedef struct xc_mmu xc_mmu_t;
608 xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
609 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
610 unsigned long long ptr, unsigned long long val);
611 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
613 int xc_acm_op(int xc_handle, int cmd, void *arg, size_t arg_size);
615 /*
616 * Return a handle to the event channel driver, or -1 on failure, in which case
617 * errno will be set appropriately.
618 */
619 int xc_evtchn_open(void);
621 /*
622 * Close a handle previously allocated with xc_evtchn_open().
623 */
624 int xc_evtchn_close(int xce_handle);
626 /*
627 * Return an fd that can be select()ed on for further calls to
628 * xc_evtchn_pending().
629 */
630 int xc_evtchn_fd(int xce_handle);
632 /*
633 * Notify the given event channel. Returns -1 on failure, in which case
634 * errno will be set appropriately.
635 */
636 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
638 /*
639 * Returns a new event port bound to the remote port for the given domain ID,
640 * or -1 on failure, in which case errno will be set appropriately.
641 */
642 evtchn_port_t xc_evtchn_bind_interdomain(int xce_handle, int domid,
643 evtchn_port_t remote_port);
645 /*
646 * Unbind the given event channel. Returns -1 on failure, in which case errno
647 * will be set appropriately.
648 */
649 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
651 /*
652 * Bind an event channel to the given VIRQ. Returns the event channel bound to
653 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
654 */
655 evtchn_port_t xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
657 /*
658 * Return the next event channel to become pending, or -1 on failure, in which
659 * case errno will be set appropriately.
660 */
661 evtchn_port_t xc_evtchn_pending(int xce_handle);
663 /*
664 * Unmask the given event channel. Returns -1 on failure, in which case errno
665 * will be set appropriately.
666 */
667 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
669 #endif