direct-io.hg

view tools/libxc/xenctrl.h @ 11672:7e79259c2c17

[LINUX] Make evtchn device use a dynamic minor number.

Also update the code in tools to create the device node if udev fails.
The tools now read the sysfs system to find the minor number needed.

Original patch from Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Sep 29 14:20:52 2006 +0100 (2006-09-29)
parents fd6c2b5e041f
children 5d2ce349f9f4
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 */
9 #ifndef XENCTRL_H
10 #define XENCTRL_H
12 /* Tell the Xen public headers we are a user-space tools build. */
13 #ifndef __XEN_TOOLS__
14 #define __XEN_TOOLS__ 1
15 #endif
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <sys/ptrace.h>
20 #include <xen/xen.h>
21 #include <xen/domctl.h>
22 #include <xen/sysctl.h>
23 #include <xen/version.h>
24 #include <xen/event_channel.h>
25 #include <xen/sched.h>
26 #include <xen/memory.h>
27 #include <xen/acm.h>
28 #include <xen/acm_ops.h>
30 #ifdef __ia64__
31 #define XC_PAGE_SHIFT 14
32 #else
33 #define XC_PAGE_SHIFT 12
34 #endif
35 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
36 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
38 /*
39 * DEFINITIONS FOR CPU BARRIERS
40 */
42 #if defined(__i386__)
43 #define mb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
44 #define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
45 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
46 #elif defined(__x86_64__)
47 #define mb() __asm__ __volatile__ ( "mfence" : : : "memory")
48 #define rmb() __asm__ __volatile__ ( "lfence" : : : "memory")
49 #define wmb() __asm__ __volatile__ ( "" : : : "memory")
50 #elif defined(__ia64__)
51 /* FIXME */
52 #define mb()
53 #define rmb()
54 #define wmb()
55 #elif defined(__powerpc__)
56 /* XXX loosen these up later */
57 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
58 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") /* lwsync? */
59 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") /* eieio? */
60 #else
61 #error "Define barriers"
62 #endif
64 /*
65 * INITIALIZATION FUNCTIONS
66 */
68 /**
69 * This function opens a handle to the hypervisor interface. This function can
70 * be called multiple times within a single process. Multiple processes can
71 * have an open hypervisor interface at the same time.
72 *
73 * Each call to this function should have a corresponding call to
74 * xc_interface_close().
75 *
76 * This function can fail if the caller does not have superuser permission or
77 * if a Xen-enabled kernel is not currently running.
78 *
79 * @return a handle to the hypervisor interface or -1 on failure
80 */
81 int xc_interface_open(void);
83 /**
84 * This function closes an open hypervisor interface.
85 *
86 * This function can fail if the handle does not represent an open interface or
87 * if there were problems closing the interface.
88 *
89 * @parm xc_handle a handle to an open hypervisor interface
90 * @return 0 on success, -1 otherwise.
91 */
92 int xc_interface_close(int xc_handle);
94 /*
95 * KERNEL INTERFACES
96 */
98 /*
99 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
100 * device number. Returns -1 on error (and sets errno).
101 */
102 int xc_find_device_number(const char *name);
104 /*
105 * DOMAIN DEBUGGING FUNCTIONS
106 */
108 typedef struct xc_core_header {
109 unsigned int xch_magic;
110 unsigned int xch_nr_vcpus;
111 unsigned int xch_nr_pages;
112 unsigned int xch_ctxt_offset;
113 unsigned int xch_index_offset;
114 unsigned int xch_pages_offset;
115 } xc_core_header_t;
117 #define XC_CORE_MAGIC 0xF00FEBED
119 long xc_ptrace_core(
120 int xc_handle,
121 enum __ptrace_request request,
122 uint32_t domid,
123 long addr,
124 long data,
125 vcpu_guest_context_t *ctxt);
126 void * map_domain_va_core(
127 unsigned long domfd,
128 int cpu,
129 void *guest_va,
130 vcpu_guest_context_t *ctxt);
131 int xc_waitdomain_core(
132 int xc_handle,
133 int domain,
134 int *status,
135 int options,
136 vcpu_guest_context_t *ctxt);
138 /*
139 * DOMAIN MANAGEMENT FUNCTIONS
140 */
142 typedef struct {
143 uint32_t domid;
144 uint32_t ssidref;
145 unsigned int dying:1, crashed:1, shutdown:1,
146 paused:1, blocked:1, running:1;
147 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
148 unsigned long nr_pages;
149 unsigned long shared_info_frame;
150 uint64_t cpu_time;
151 unsigned long max_memkb;
152 unsigned int nr_online_vcpus;
153 unsigned int max_vcpu_id;
154 xen_domain_handle_t handle;
155 } xc_dominfo_t;
157 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
158 int xc_domain_create(int xc_handle,
159 uint32_t ssidref,
160 xen_domain_handle_t handle,
161 uint32_t *pdomid);
164 /* Functions to produce a dump of a given domain
165 * xc_domain_dumpcore - produces a dump to a specified file
166 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
167 * callback function
168 */
169 int xc_domain_dumpcore(int xc_handle,
170 uint32_t domid,
171 const char *corename);
173 /* Define the callback function type for xc_domain_dumpcore_via_callback.
174 *
175 * This function is called by the coredump code for every "write",
176 * and passes an opaque object for the use of the function and
177 * created by the caller of xc_domain_dumpcore_via_callback.
178 */
179 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
181 int xc_domain_dumpcore_via_callback(int xc_handle,
182 uint32_t domid,
183 void *arg,
184 dumpcore_rtn_t dump_rtn);
186 /*
187 * This function sets the maximum number of vcpus that a domain may create.
188 *
189 * @parm xc_handle a handle to an open hypervisor interface.
190 * @parm domid the domain id in which vcpus are to be created.
191 * @parm max the maximum number of vcpus that the domain may create.
192 * @return 0 on success, -1 on failure.
193 */
194 int xc_domain_max_vcpus(int xc_handle,
195 uint32_t domid,
196 unsigned int max);
198 /**
199 * This function pauses a domain. A paused domain still exists in memory
200 * however it does not receive any timeslices from the hypervisor.
201 *
202 * @parm xc_handle a handle to an open hypervisor interface
203 * @parm domid the domain id to pause
204 * @return 0 on success, -1 on failure.
205 */
206 int xc_domain_pause(int xc_handle,
207 uint32_t domid);
208 /**
209 * This function unpauses a domain. The domain should have been previously
210 * paused.
211 *
212 * @parm xc_handle a handle to an open hypervisor interface
213 * @parm domid the domain id to unpause
214 * return 0 on success, -1 on failure
215 */
216 int xc_domain_unpause(int xc_handle,
217 uint32_t domid);
219 /**
220 * This function will destroy a domain. Destroying a domain removes the domain
221 * completely from memory. This function should be called after sending the
222 * domain a SHUTDOWN control message to free up the domain resources.
223 *
224 * @parm xc_handle a handle to an open hypervisor interface
225 * @parm domid the domain id to destroy
226 * @return 0 on success, -1 on failure
227 */
228 int xc_domain_destroy(int xc_handle,
229 uint32_t domid);
231 /**
232 * This function will shutdown a domain. This is intended for use in
233 * fully-virtualized domains where this operation is analogous to the
234 * sched_op operations in a paravirtualized domain. The caller is
235 * expected to give the reason for the shutdown.
236 *
237 * @parm xc_handle a handle to an open hypervisor interface
238 * @parm domid the domain id to destroy
239 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
240 * @return 0 on success, -1 on failure
241 */
242 int xc_domain_shutdown(int xc_handle,
243 uint32_t domid,
244 int reason);
246 int xc_vcpu_setaffinity(int xc_handle,
247 uint32_t domid,
248 int vcpu,
249 uint64_t cpumap);
250 int xc_vcpu_getaffinity(int xc_handle,
251 uint32_t domid,
252 int vcpu,
253 uint64_t *cpumap);
255 /**
256 * This function will return information about one or more domains. It is
257 * designed to iterate over the list of domains. If a single domain is
258 * requested, this function will return the next domain in the list - if
259 * one exists. It is, therefore, important in this case to make sure the
260 * domain requested was the one returned.
261 *
262 * @parm xc_handle a handle to an open hypervisor interface
263 * @parm first_domid the first domain to enumerate information from. Domains
264 * are currently enumerate in order of creation.
265 * @parm max_doms the number of elements in info
266 * @parm info an array of max_doms size that will contain the information for
267 * the enumerated domains.
268 * @return the number of domains enumerated or -1 on error
269 */
270 int xc_domain_getinfo(int xc_handle,
271 uint32_t first_domid,
272 unsigned int max_doms,
273 xc_dominfo_t *info);
276 /**
277 * This function will set the execution context for the specified vcpu.
278 *
279 * @parm xc_handle a handle to an open hypervisor interface
280 * @parm domid the domain to set the vcpu context for
281 * @parm vcpu the vcpu number for the context
282 * @parm ctxt pointer to the the cpu context with the values to set
283 * @return the number of domains enumerated or -1 on error
284 */
285 int xc_vcpu_setcontext(int xc_handle,
286 uint32_t domid,
287 uint32_t vcpu,
288 vcpu_guest_context_t *ctxt);
289 /**
290 * This function will return information about one or more domains, using a
291 * single hypercall. The domain information will be stored into the supplied
292 * array of xc_domaininfo_t structures.
293 *
294 * @parm xc_handle a handle to an open hypervisor interface
295 * @parm first_domain the first domain to enumerate information from.
296 * Domains are currently enumerate in order of creation.
297 * @parm max_domains the number of elements in info
298 * @parm info an array of max_doms size that will contain the information for
299 * the enumerated domains.
300 * @return the number of domains enumerated or -1 on error
301 */
302 int xc_domain_getinfolist(int xc_handle,
303 uint32_t first_domain,
304 unsigned int max_domains,
305 xc_domaininfo_t *info);
307 /**
308 * This function returns information about the execution context of a
309 * particular vcpu of a domain.
310 *
311 * @parm xc_handle a handle to an open hypervisor interface
312 * @parm domid the domain to get information from
313 * @parm vcpu the vcpu number
314 * @parm ctxt a pointer to a structure to store the execution context of the
315 * domain
316 * @return 0 on success, -1 on failure
317 */
318 int xc_vcpu_getcontext(int xc_handle,
319 uint32_t domid,
320 uint32_t vcpu,
321 vcpu_guest_context_t *ctxt);
323 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
324 int xc_vcpu_getinfo(int xc_handle,
325 uint32_t domid,
326 uint32_t vcpu,
327 xc_vcpuinfo_t *info);
329 int xc_domain_setcpuweight(int xc_handle,
330 uint32_t domid,
331 float weight);
332 long long xc_domain_get_cpu_usage(int xc_handle,
333 domid_t domid,
334 int vcpu);
336 int xc_domain_sethandle(int xc_handle, uint32_t domid,
337 xen_domain_handle_t handle);
339 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
340 int xc_shadow_control(int xc_handle,
341 uint32_t domid,
342 unsigned int sop,
343 unsigned long *dirty_bitmap,
344 unsigned long pages,
345 unsigned long *mb,
346 uint32_t mode,
347 xc_shadow_op_stats_t *stats);
349 int xc_sedf_domain_set(int xc_handle,
350 uint32_t domid,
351 uint64_t period, uint64_t slice,
352 uint64_t latency, uint16_t extratime,
353 uint16_t weight);
355 int xc_sedf_domain_get(int xc_handle,
356 uint32_t domid,
357 uint64_t* period, uint64_t *slice,
358 uint64_t *latency, uint16_t *extratime,
359 uint16_t *weight);
361 int xc_sched_credit_domain_set(int xc_handle,
362 uint32_t domid,
363 struct xen_domctl_sched_credit *sdom);
365 int xc_sched_credit_domain_get(int xc_handle,
366 uint32_t domid,
367 struct xen_domctl_sched_credit *sdom);
369 /*
370 * EVENT CHANNEL FUNCTIONS
371 */
373 /**
374 * This function allocates an unbound port. Ports are named endpoints used for
375 * interdomain communication. This function is most useful in opening a
376 * well-known port within a domain to receive events on.
377 *
378 * @parm xc_handle a handle to an open hypervisor interface
379 * @parm dom the ID of the local domain (the 'allocatee')
380 * @parm remote_dom the ID of the domain who will later bind
381 * @return allocated port (in @dom) on success, -1 on failure
382 */
383 int xc_evtchn_alloc_unbound(int xc_handle,
384 uint32_t dom,
385 uint32_t remote_dom);
387 int xc_physdev_pci_access_modify(int xc_handle,
388 uint32_t domid,
389 int bus,
390 int dev,
391 int func,
392 int enable);
394 int xc_readconsolering(int xc_handle,
395 char **pbuffer,
396 unsigned int *pnr_chars,
397 int clear);
399 typedef xen_sysctl_physinfo_t xc_physinfo_t;
400 int xc_physinfo(int xc_handle,
401 xc_physinfo_t *info);
403 int xc_sched_id(int xc_handle,
404 int *sched_id);
406 int xc_domain_setmaxmem(int xc_handle,
407 uint32_t domid,
408 unsigned int max_memkb);
410 int xc_domain_set_time_offset(int xc_handle,
411 uint32_t domid,
412 int32_t time_offset_seconds);
414 int xc_domain_memory_increase_reservation(int xc_handle,
415 uint32_t domid,
416 unsigned long nr_extents,
417 unsigned int extent_order,
418 unsigned int address_bits,
419 xen_pfn_t *extent_start);
421 int xc_domain_memory_decrease_reservation(int xc_handle,
422 uint32_t domid,
423 unsigned long nr_extents,
424 unsigned int extent_order,
425 xen_pfn_t *extent_start);
427 int xc_domain_memory_populate_physmap(int xc_handle,
428 uint32_t domid,
429 unsigned long nr_extents,
430 unsigned int extent_order,
431 unsigned int address_bits,
432 xen_pfn_t *extent_start);
434 int xc_domain_translate_gpfn_list(int xc_handle,
435 uint32_t domid,
436 unsigned long nr_gpfns,
437 xen_pfn_t *gpfn_list,
438 xen_pfn_t *mfn_list);
440 int xc_domain_ioport_permission(int xc_handle,
441 uint32_t domid,
442 uint32_t first_port,
443 uint32_t nr_ports,
444 uint32_t allow_access);
446 int xc_domain_irq_permission(int xc_handle,
447 uint32_t domid,
448 uint8_t pirq,
449 uint8_t allow_access);
451 int xc_domain_iomem_permission(int xc_handle,
452 uint32_t domid,
453 unsigned long first_mfn,
454 unsigned long nr_mfns,
455 uint8_t allow_access);
457 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
458 unsigned long mfn);
460 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
461 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
462 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
463 arrays. */
464 int xc_perfc_control(int xc_handle,
465 uint32_t op,
466 xc_perfc_desc_t *desc,
467 xc_perfc_val_t *val,
468 int *nbr_desc,
469 int *nbr_val);
471 /**
472 * Memory maps a range within one domain to a local address range. Mappings
473 * should be unmapped with munmap and should follow the same rules as mmap
474 * regarding page alignment. Returns NULL on failure.
475 *
476 * In Linux, the ring queue for the control channel is accessible by mapping
477 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
478 * stored there is of type control_if_t.
479 *
480 * @parm xc_handle a handle on an open hypervisor interface
481 * @parm dom the domain to map memory from
482 * @parm size the amount of memory to map (in multiples of page size)
483 * @parm prot same flag as in mmap().
484 * @parm mfn the frame address to map.
485 */
486 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
487 int size, int prot,
488 unsigned long mfn );
490 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
491 xen_pfn_t *arr, int num );
493 /**
494 * Translates a virtual address in the context of a given domain and
495 * vcpu returning the machine page frame number of the associated
496 * page.
497 *
498 * @parm xc_handle a handle on an open hypervisor interface
499 * @parm dom the domain to perform the translation in
500 * @parm vcpu the vcpu to perform the translation on
501 * @parm virt the virtual address to translate
502 */
503 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
504 int vcpu, unsigned long long virt);
506 int xc_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
507 unsigned long max_pfns);
509 int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid,
510 xen_pfn_t *pfn_buf,
511 unsigned int start_page, unsigned int nr_pages);
513 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
514 unsigned long dst_pfn, const char *src_page);
516 int xc_clear_domain_page(int xc_handle, uint32_t domid,
517 unsigned long dst_pfn);
519 long xc_get_max_pages(int xc_handle, uint32_t domid);
521 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
522 domid_t dom);
524 int xc_memory_op(int xc_handle, int cmd, void *arg);
526 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
527 int num, unsigned long *arr);
530 /* Get current total pages allocated to a domain. */
531 long xc_get_tot_pages(int xc_handle, uint32_t domid);
534 /*
535 * Trace Buffer Operations
536 */
538 /**
539 * xc_tbuf_enable - enable tracing buffers
540 *
541 * @parm xc_handle a handle to an open hypervisor interface
542 * @parm cnt size of tracing buffers to create (in pages)
543 * @parm mfn location to store mfn of the trace buffers to
544 * @parm size location to store the size (in bytes) of a trace buffer to
545 *
546 * Gets the machine address of the trace pointer area and the size of the
547 * per CPU buffers.
548 */
549 int xc_tbuf_enable(int xc_handle, size_t cnt, unsigned long *mfn,
550 unsigned long *size);
552 /*
553 * Disable tracing buffers.
554 */
555 int xc_tbuf_disable(int xc_handle);
557 /**
558 * This function sets the size of the trace buffers. Setting the size
559 * is currently a one-shot operation that may be performed either at boot
560 * time or via this interface, not both. The buffer size must be set before
561 * enabling tracing.
562 *
563 * @parm xc_handle a handle to an open hypervisor interface
564 * @parm size the size in pages per cpu for the trace buffers
565 * @return 0 on success, -1 on failure.
566 */
567 int xc_tbuf_set_size(int xc_handle, unsigned long size);
569 /**
570 * This function retrieves the current size of the trace buffers.
571 * Note that the size returned is in terms of bytes, not pages.
573 * @parm xc_handle a handle to an open hypervisor interface
574 * @parm size will contain the size in bytes for the trace buffers
575 * @return 0 on success, -1 on failure.
576 */
577 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
579 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
581 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
583 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
584 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
586 int xc_version(int xc_handle, int cmd, void *arg);
588 /*
589 * MMU updates.
590 */
591 #define MAX_MMU_UPDATES 1024
592 struct xc_mmu {
593 mmu_update_t updates[MAX_MMU_UPDATES];
594 int idx;
595 domid_t subject;
596 };
597 typedef struct xc_mmu xc_mmu_t;
598 xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
599 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
600 unsigned long long ptr, unsigned long long val);
601 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
603 int xc_acm_op(int xc_handle, int cmd, void *arg, size_t arg_size);
605 /*
606 * Return a handle to the event channel driver, or -1 on failure, in which case
607 * errno will be set appropriately.
608 */
609 int xc_evtchn_open(void);
611 /*
612 * Close a handle previously allocated with xc_evtchn_open().
613 */
614 int xc_evtchn_close(int xce_handle);
616 /*
617 * Return an fd that can be select()ed on for further calls to
618 * xc_evtchn_pending().
619 */
620 int xc_evtchn_fd(int xce_handle);
622 /*
623 * Notify the given event channel. Returns -1 on failure, in which case
624 * errno will be set appropriately.
625 */
626 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
628 /*
629 * Returns a new event port bound to the remote port for the given domain ID,
630 * or -1 on failure, in which case errno will be set appropriately.
631 */
632 evtchn_port_t xc_evtchn_bind_interdomain(int xce_handle, int domid,
633 evtchn_port_t remote_port);
635 /*
636 * Unbind the given event channel. Returns -1 on failure, in which case errno
637 * will be set appropriately.
638 */
639 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
641 /*
642 * Bind an event channel to the given VIRQ. Returns the event channel bound to
643 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
644 */
645 evtchn_port_t xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
647 /*
648 * Return the next event channel to become pending, or -1 on failure, in which
649 * case errno will be set appropriately.
650 */
651 evtchn_port_t xc_evtchn_pending(int xce_handle);
653 /*
654 * Unmask the given event channel. Returns -1 on failure, in which case errno
655 * will be set appropriately.
656 */
657 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
659 #endif