Mixed throughout libxc are uint32_t, int, and domid_t for domid parameters.
With a signed type, and an explicitly 16-bit type, it is exceedingly difficult
to construct an INVALID_DOMID constant which works with all of them. (The
main problem being that domid_t gets unconditionally zero extended when
promoted to int for arithmatic.)
Libxl uses uint32_t consistently everywhere, so alter libxc to match.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Release-acked-by: Julien Grall <julien.grall@linaro.org>
[ wei: fix compilation error in libxl ]
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
unsigned long flags;
unsigned int console_evtchn;
unsigned int xenstore_evtchn;
- domid_t console_domid;
- domid_t xenstore_domid;
+ uint32_t console_domid;
+ uint32_t xenstore_domid;
xen_pfn_t shared_info_mfn;
xc_interface *xch;
- domid_t guest_domid;
+ uint32_t guest_domid;
int claim_enabled; /* 0 by default, 1 enables it */
int xen_version;
int xc_dom_update_guest_p2m(struct xc_dom_image *dom);
int xc_dom_boot_xen_init(struct xc_dom_image *dom, xc_interface *xch,
- domid_t domid);
+ uint32_t domid);
int xc_dom_boot_mem_init(struct xc_dom_image *dom);
void *xc_dom_boot_domU_map(struct xc_dom_image *dom, xen_pfn_t pfn,
xen_pfn_t count);
int xc_dom_boot_image(struct xc_dom_image *dom);
int xc_dom_compat_check(struct xc_dom_image *dom);
int xc_dom_gnttab_init(struct xc_dom_image *dom);
-int xc_dom_gnttab_hvm_seed(xc_interface *xch, domid_t domid,
+int xc_dom_gnttab_hvm_seed(xc_interface *xch, uint32_t domid,
xen_pfn_t console_gmfn,
xen_pfn_t xenstore_gmfn,
- domid_t console_domid,
- domid_t xenstore_domid);
-int xc_dom_gnttab_seed(xc_interface *xch, domid_t domid,
+ uint32_t console_domid,
+ uint32_t xenstore_domid);
+int xc_dom_gnttab_seed(xc_interface *xch, uint32_t domid,
xen_pfn_t console_gmfn,
xen_pfn_t xenstore_gmfn,
- domid_t console_domid,
- domid_t xenstore_domid);
+ uint32_t console_domid,
+ uint32_t xenstore_domid);
bool xc_dom_translated(const struct xc_dom_image *dom);
/* --- debugging bits ---------------------------------------------- */
*/
int xc_dom_vuart_init(xc_interface *xch,
uint32_t type,
- domid_t domid,
- domid_t console_domid,
+ uint32_t domid,
+ uint32_t console_domid,
xen_pfn_t gfn,
evtchn_port_t *evtchn);
xc_vcpuinfo_t *info);
long long xc_domain_get_cpu_usage(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
int vcpu);
int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
* @param grant_frames max. number of grant frames
* @param maptrack_frames max. number of maptrack frames
*/
-int xc_domain_set_gnttab_limits(xc_interface *xch, domid_t domid,
+int xc_domain_set_gnttab_limits(xc_interface *xch, uint32_t domid,
uint32_t grant_frames,
uint32_t maptrack_frames);
int xc_domain_disable_migrate(xc_interface *xch, uint32_t domid);
-int xc_domain_maximum_gpfn(xc_interface *xch, domid_t domid, xen_pfn_t *gpfns);
+int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
-int xc_domain_nr_gpfns(xc_interface *xch, domid_t domid, xen_pfn_t *gpfns);
+int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns);
int xc_domain_increase_reservation(xc_interface *xch,
uint32_t domid,
xen_pfn_t gpfn);
int xc_domain_add_to_physmap_batch(xc_interface *xch,
- domid_t domid,
- domid_t foreign_domid,
+ uint32_t domid,
+ uint32_t foreign_domid,
unsigned int space,
unsigned int size,
xen_ulong_t *idxs,
unsigned long nr_pages);
int xc_domain_memory_exchange_pages(xc_interface *xch,
- int domid,
+ uint32_t domid,
unsigned long nr_in_extents,
unsigned int in_order,
xen_pfn_t *in_extents,
}
int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
- domid_t dom);
+ uint32_t dom);
/* System wide memory properties */
int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn);
* (in its doc comment in domctl.h).
*/
int xc_domain_subscribe_for_suspend(
- xc_interface *xch, domid_t domid, evtchn_port_t port);
+ xc_interface *xch, uint32_t domid, evtchn_port_t port);
/**************************
* GRANT TABLE OPERATIONS *
/* Logs iff hypercall bounce fails, otherwise doesn't. */
int xc_gnttab_query_size(xc_interface *xch, struct gnttab_query_size *query);
-int xc_gnttab_get_version(xc_interface *xch, int domid); /* Never logs */
-grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, int domid, int *gnt_num);
-grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, int domid, int *gnt_num);
+int xc_gnttab_get_version(xc_interface *xch, uint32_t domid); /* Never logs */
+grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid, int *gnt_num);
+grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid, int *gnt_num);
/* Sometimes these don't set errno [fixme], and sometimes they don't log. */
int xc_physdev_map_pirq(xc_interface *xch,
- int domid,
+ uint32_t domid,
int index,
int *pirq);
int xc_physdev_map_pirq_msi(xc_interface *xch,
- int domid,
+ uint32_t domid,
int index,
int *pirq,
int devfn,
uint64_t table_base);
int xc_physdev_unmap_pirq(xc_interface *xch,
- int domid,
+ uint32_t domid,
int pirq);
/*
*/
void xc_clear_last_error(xc_interface *xch);
-int xc_hvm_param_set(xc_interface *handle, domid_t dom, uint32_t param, uint64_t value);
-int xc_hvm_param_get(xc_interface *handle, domid_t dom, uint32_t param, uint64_t *value);
+int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value);
+int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value);
/* Deprecated: use xc_hvm_param_set/get() instead. */
-int xc_set_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long value);
-int xc_get_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long *value);
+int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value);
+int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value);
/* HVM guest pass-through */
int xc_assign_device(xc_interface *xch,
#if defined(__i386__) || defined(__x86_64__)
int xc_cpuid_set(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
const unsigned int *input,
const char **config,
char **config_transformed);
int xc_cpuid_apply_policy(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
uint32_t *featureset,
unsigned int nr_features);
void xc_cpuid_to_str(const unsigned int *regs,
int32_t pool_id, uint32_t subop, uint32_t cli_id,
uint32_t len, uint32_t arg, void *buf);
int xc_tmem_auth(xc_interface *xch, int cli_id, char *uuid_str, int enable);
-int xc_tmem_save(xc_interface *xch, int dom, int live, int fd, int field_marker);
-int xc_tmem_save_extra(xc_interface *xch, int dom, int fd, int field_marker);
-void xc_tmem_save_done(xc_interface *xch, int dom);
-int xc_tmem_restore(xc_interface *xch, int dom, int fd);
-int xc_tmem_restore_extra(xc_interface *xch, int dom, int fd);
+int xc_tmem_save(xc_interface *xch, uint32_t domid, int live, int fd, int field_marker);
+int xc_tmem_save_extra(xc_interface *xch, uint32_t domid, int fd, int field_marker);
+void xc_tmem_save_done(xc_interface *xch, uint32_t domid);
+int xc_tmem_restore(xc_interface *xch, uint32_t domid, int fd);
+int xc_tmem_restore_extra(xc_interface *xch, uint32_t domid, int fd);
/**
* altp2m operations
*/
-int xc_altp2m_get_domain_state(xc_interface *handle, domid_t dom, bool *state);
-int xc_altp2m_set_domain_state(xc_interface *handle, domid_t dom, bool state);
-int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, domid_t domid,
+int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state);
+int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state);
+int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
uint32_t vcpuid, xen_pfn_t gfn);
-int xc_altp2m_create_view(xc_interface *handle, domid_t domid,
+int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
xenmem_access_t default_access, uint16_t *view_id);
-int xc_altp2m_destroy_view(xc_interface *handle, domid_t domid,
+int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
uint16_t view_id);
/* Switch all vCPUs of the domain to the specified altp2m view */
-int xc_altp2m_switch_to_view(xc_interface *handle, domid_t domid,
+int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
uint16_t view_id);
-int xc_altp2m_set_mem_access(xc_interface *handle, domid_t domid,
+int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t gfn,
xenmem_access_t access);
-int xc_altp2m_change_gfn(xc_interface *handle, domid_t domid,
+int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t old_gfn,
xen_pfn_t new_gfn);
* Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
* support is considered experimental.
*/
-int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
-int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
-int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id);
-int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
+int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id);
+int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id);
+int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id,
uint64_t gfn);
-int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, uint64_t gfn);
-int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, uint64_t gfn);
-int xc_mem_paging_load(xc_interface *xch, domid_t domain_id,
+int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
+int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn);
+int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
uint64_t gfn, void *buffer);
/**
* Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
* XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
*/
-int xc_set_mem_access(xc_interface *xch, domid_t domain_id,
+int xc_set_mem_access(xc_interface *xch, uint32_t domain_id,
xenmem_access_t access, uint64_t first_pfn,
uint32_t nr);
* The nr parameter specifies the size of the pages and access arrays.
* The same allowed access types as for xc_set_mem_access() apply.
*/
-int xc_set_mem_access_multi(xc_interface *xch, domid_t domain_id,
+int xc_set_mem_access_multi(xc_interface *xch, uint32_t domain_id,
uint8_t *access, uint64_t *pages,
uint32_t nr);
/*
* Gets the mem access for the given page (returned in access on success)
*/
-int xc_get_mem_access(xc_interface *xch, domid_t domain_id,
+int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
uint64_t pfn, xenmem_access_t *access);
/***
* Will return NULL on error.
* Caller has to unmap this page when done.
*/
-void *xc_monitor_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
-int xc_monitor_disable(xc_interface *xch, domid_t domain_id);
-int xc_monitor_resume(xc_interface *xch, domid_t domain_id);
+void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
+int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
+int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
/*
* Get a bitmap of supported monitor events in the form
* (1 << XEN_DOMCTL_MONITOR_EVENT_*).
*/
-int xc_monitor_get_capabilities(xc_interface *xch, domid_t domain_id,
+int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
uint32_t *capabilities);
-int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
+int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
uint16_t index, bool enable, bool sync,
uint64_t bitmask, bool onchangeonly);
/*
* Please consult the Intel/AMD manuals for more information on
* non-architectural indices.
*/
-int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, uint32_t msr,
+int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
bool enable);
-int xc_monitor_singlestep(xc_interface *xch, domid_t domain_id, bool enable);
-int xc_monitor_software_breakpoint(xc_interface *xch, domid_t domain_id,
+int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id, bool enable);
+int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
bool enable);
-int xc_monitor_descriptor_access(xc_interface *xch, domid_t domain_id,
+int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
bool enable);
-int xc_monitor_guest_request(xc_interface *xch, domid_t domain_id,
+int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id,
bool enable, bool sync, bool allow_userspace);
-int xc_monitor_debug_exceptions(xc_interface *xch, domid_t domain_id,
+int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
bool enable, bool sync);
-int xc_monitor_cpuid(xc_interface *xch, domid_t domain_id, bool enable);
-int xc_monitor_privileged_call(xc_interface *xch, domid_t domain_id,
+int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable);
+int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
bool enable);
-int xc_monitor_emul_unimplemented(xc_interface *xch, domid_t domain_id,
+int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
bool enable);
/**
* This function enables / disables emulation for each REP for a
* @parm enable if 0 optimize when possible, else emulate each REP.
* @return 0 on success, -1 on failure.
*/
-int xc_monitor_emulate_each_rep(xc_interface *xch, domid_t domain_id,
+int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
bool enable);
/***
* When sharing for a domain is turned off, the domain may still reference
* shared pages. Unsharing happens lazily. */
int xc_memshr_control(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
int enable);
/* Create a communication ring in which the hypervisor will place ENOMEM
*/
int xc_memshr_ring_enable(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
uint32_t *port);
/* Disable the ring for ENOMEM communication.
* May fail with EINVAL if the ring was not enabled in the first place.
*/
int xc_memshr_ring_disable(xc_interface *xch,
- domid_t domid);
+ uint32_t domid);
/*
* Calls below return EINVAL if sharing has not been enabled for the domain
* notify the hypervisor to re-schedule the faulting vcpu of the domain with an
* event channel kick and/or this call. */
int xc_memshr_domain_resume(xc_interface *xch,
- domid_t domid);
+ uint32_t domid);
/* Select a page for sharing.
*
* ENOENT or EEXIST if there are internal hypervisor errors.
*/
int xc_memshr_nominate_gfn(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
unsigned long gfn,
uint64_t *handle);
/* Same as above, but instead of a guest frame number, the input is a grant
* May fail with EINVAL if the grant reference is invalid.
*/
int xc_memshr_nominate_gref(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
grant_ref_t gref,
uint64_t *handle);
* source. Either 3-tuple can be specified later for further re-sharing.
*/
int xc_memshr_share_gfns(xc_interface *xch,
- domid_t source_domain,
+ uint32_t source_domain,
unsigned long source_gfn,
uint64_t source_handle,
- domid_t client_domain,
+ uint32_t client_domain,
unsigned long client_gfn,
uint64_t client_handle);
* May fail with EINVAL if either grant reference is invalid.
*/
int xc_memshr_share_grefs(xc_interface *xch,
- domid_t source_domain,
+ uint32_t source_domain,
grant_ref_t source_gref,
uint64_t source_handle,
- domid_t client_domain,
+ uint32_t client_domain,
grant_ref_t client_gref,
uint64_t client_handle);
* ENOENT if there is an internal hypervisor error.
*/
int xc_memshr_add_to_physmap(xc_interface *xch,
- domid_t source_domain,
+ uint32_t source_domain,
unsigned long source_gfn,
uint64_t source_handle,
- domid_t client_domain,
+ uint32_t client_domain,
unsigned long client_gfn);
/* Allows to deduplicate a range of memory of a client domain. Using
* the sharing metadata before deduplication can happen.
*/
int xc_memshr_range_share(xc_interface *xch,
- domid_t source_domain,
- domid_t client_domain,
+ uint32_t source_domain,
+ uint32_t client_domain,
uint64_t first_gfn,
uint64_t last_gfn);
* argument.
*/
int xc_memshr_debug_gfn(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
unsigned long gfn);
/* May additionally fail with EINVAL if the grant reference is invalid. */
int xc_memshr_debug_gref(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
grant_ref_t gref);
/* Audits the share subsystem.
int xc_flask_avc_hashstats(xc_interface *xc_handle, char *buf, int size);
int xc_flask_getavc_threshold(xc_interface *xc_handle);
int xc_flask_setavc_threshold(xc_interface *xc_handle, int threshold);
-int xc_flask_relabel_domain(xc_interface *xch, int domid, uint32_t sid);
+int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid);
struct elf_binary;
void xc_elf_set_logfile(xc_interface *xch, struct elf_binary *elf,
int xc_evtchn_fd(xc_evtchn *xce);
int xc_evtchn_notify(xc_evtchn *xce, evtchn_port_t port);
xc_evtchn_port_or_error_t
-xc_evtchn_bind_unbound_port(xc_evtchn *xce, int domid);
+xc_evtchn_bind_unbound_port(xc_evtchn *xce, uint32_t domid);
xc_evtchn_port_or_error_t
-xc_evtchn_bind_interdomain(xc_evtchn *xce, int domid,
+xc_evtchn_bind_interdomain(xc_evtchn *xce, uint32_t domid,
evtchn_port_t remote_port);
xc_evtchn_port_or_error_t
xc_evtchn_bind_virq(xc_evtchn *xce, unsigned int virq);
#ifdef XC_WANT_COMPAT_DEVICEMODEL_API
int xc_hvm_create_ioreq_server(
- xc_interface *xch, domid_t domid, int handle_bufioreq,
+ xc_interface *xch, uint32_t domid, int handle_bufioreq,
ioservid_t *id);
int xc_hvm_get_ioreq_server_info(
- xc_interface *xch, domid_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
+ xc_interface *xch, uint32_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_port);
int xc_hvm_map_io_range_to_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id, int is_mmio,
+ xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
uint64_t start, uint64_t end);
int xc_hvm_unmap_io_range_from_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id, int is_mmio,
+ xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
uint64_t start, uint64_t end);
int xc_hvm_map_pcidev_to_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id, uint16_t segment,
+ xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
uint8_t bus, uint8_t device, uint8_t function);
int xc_hvm_unmap_pcidev_from_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id, uint16_t segment,
+ xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
uint8_t bus, uint8_t device, uint8_t function);
int xc_hvm_destroy_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id);
+ xc_interface *xch, uint32_t domid, ioservid_t id);
int xc_hvm_set_ioreq_server_state(
- xc_interface *xch, domid_t domid, ioservid_t id, int enabled);
+ xc_interface *xch, uint32_t domid, ioservid_t id, int enabled);
int xc_hvm_set_pci_intx_level(
- xc_interface *xch, domid_t domid, uint16_t segment, uint8_t bus,
+ xc_interface *xch, uint32_t domid, uint16_t segment, uint8_t bus,
uint8_t device, uint8_t intx, unsigned int level);
int xc_hvm_set_isa_irq_level(
- xc_interface *xch, domid_t domid, uint8_t irq, unsigned int level);
+ xc_interface *xch, uint32_t domid, uint8_t irq, unsigned int level);
int xc_hvm_set_pci_link_route(
- xc_interface *xch, domid_t domid, uint8_t link, uint8_t irq);
+ xc_interface *xch, uint32_t domid, uint8_t link, uint8_t irq);
int xc_hvm_inject_msi(
- xc_interface *xch, domid_t domid, uint64_t msi_addr, uint32_t msi_data);
+ xc_interface *xch, uint32_t domid, uint64_t msi_addr, uint32_t msi_data);
int xc_hvm_track_dirty_vram(
- xc_interface *xch, domid_t domid, uint64_t first_pfn, uint32_t nr,
+ xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr,
unsigned long *dirty_bitmap);
int xc_hvm_modified_memory(
- xc_interface *xch, domid_t domid, uint64_t first_pfn, uint32_t nr);
+ xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr);
int xc_hvm_set_mem_type(
- xc_interface *xch, domid_t domid, hvmmem_type_t type,
+ xc_interface *xch, uint32_t domid, hvmmem_type_t type,
uint64_t first_pfn, uint32_t nr);
int xc_hvm_inject_trap(
- xc_interface *xch, domid_t domid, int vcpu, uint8_t vector,
+ xc_interface *xch, uint32_t domid, int vcpu, uint8_t vector,
uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2);
#endif /* XC_WANT_COMPAT_DEVICEMODEL_API */
int (*wait_checkpoint)(void* data);
/* Enable qemu-dm logging dirty pages to xen */
- int (*switch_qemu_logdirty)(int domid, unsigned enable, void *data); /* HVM only */
+ int (*switch_qemu_logdirty)(uint32_t domid, unsigned enable, void *data); /* HVM only */
/* to be provided as the last argument to each callback function */
void* data;
*/
int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
unsigned int store_evtchn, unsigned long *store_mfn,
- domid_t store_domid, unsigned int console_evtchn,
- unsigned long *console_mfn, domid_t console_domid,
+ uint32_t store_domid, unsigned int console_evtchn,
+ unsigned long *console_mfn, uint32_t console_domid,
unsigned int hvm, unsigned int pae,
xc_migration_stream_t stream_type,
struct restore_callbacks *callbacks, int send_back_fd);
*/
int xc_suspend_evtchn_release(xc_interface *xch,
struct xenevtchn_handle *xce,
- int domid, int suspend_evtchn, int *lockfd);
+ uint32_t domid, int suspend_evtchn, int *lockfd);
/**
* This function eats the initial notification.
*/
int xc_suspend_evtchn_init_exclusive(xc_interface *xch,
struct xenevtchn_handle *xce,
- int domid, int port, int *lockfd);
+ uint32_t domid, int port, int *lockfd);
/* xce must not be used for anything else */
int xc_await_suspend(xc_interface *xch, struct xenevtchn_handle *xce,
*/
int xc_suspend_evtchn_init_sane(xc_interface *xch,
struct xenevtchn_handle *xce,
- int domid, int port, int *lockfd);
+ uint32_t domid, int port, int *lockfd);
int xc_mark_page_online(xc_interface *xch, unsigned long start,
unsigned long end, uint32_t *status);
int xc_query_page_offline_status(xc_interface *xch, unsigned long start,
unsigned long end, uint32_t *status);
-int xc_exchange_page(xc_interface *xch, int domid, xen_pfn_t mfn);
+int xc_exchange_page(xc_interface *xch, uint32_t domid, xen_pfn_t mfn);
/**
unsigned long p2m_size;
};
-int xc_map_domain_meminfo(xc_interface *xch, int domid,
+int xc_map_domain_meminfo(xc_interface *xch, uint32_t domid,
struct xc_domain_meminfo *minfo);
int xc_unmap_domain_meminfo(xc_interface *xch, struct xc_domain_meminfo *mem);
#include <stdbool.h>
#include <xen/hvm/hvm_op.h>
-int xc_altp2m_get_domain_state(xc_interface *handle, domid_t dom, bool *state)
+int xc_altp2m_get_domain_state(xc_interface *handle, uint32_t dom, bool *state)
{
int rc;
DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
return rc;
}
-int xc_altp2m_set_domain_state(xc_interface *handle, domid_t dom, bool state)
+int xc_altp2m_set_domain_state(xc_interface *handle, uint32_t dom, bool state)
{
int rc;
DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
}
/* This is a bit odd to me that it acts on current.. */
-int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, domid_t domid,
+int xc_altp2m_set_vcpu_enable_notify(xc_interface *handle, uint32_t domid,
uint32_t vcpuid, xen_pfn_t gfn)
{
int rc;
return rc;
}
-int xc_altp2m_create_view(xc_interface *handle, domid_t domid,
+int xc_altp2m_create_view(xc_interface *handle, uint32_t domid,
xenmem_access_t default_access, uint16_t *view_id)
{
int rc;
return rc;
}
-int xc_altp2m_destroy_view(xc_interface *handle, domid_t domid,
+int xc_altp2m_destroy_view(xc_interface *handle, uint32_t domid,
uint16_t view_id)
{
int rc;
}
/* Switch all vCPUs of the domain to the specified altp2m view */
-int xc_altp2m_switch_to_view(xc_interface *handle, domid_t domid,
+int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
uint16_t view_id)
{
int rc;
return rc;
}
-int xc_altp2m_set_mem_access(xc_interface *handle, domid_t domid,
+int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t gfn,
xenmem_access_t access)
{
return rc;
}
-int xc_altp2m_change_gfn(xc_interface *handle, domid_t domid,
+int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t old_gfn,
xen_pfn_t new_gfn)
{
shared_info_any_t *live_shinfo,
xen_pfn_t **live_p2m, unsigned long *pfnp);
-int xc_core_arch_get_scratch_gpfn(xc_interface *xch, domid_t domid,
+int xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
xen_pfn_t *gpfn);
}
int
-xc_core_arch_get_scratch_gpfn(xc_interface *xch, domid_t domid,
+xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
xen_pfn_t *gpfn)
{
/*
}
int
-xc_core_arch_get_scratch_gpfn(xc_interface *xch, domid_t domid,
+xc_core_arch_get_scratch_gpfn(xc_interface *xch, uint32_t domid,
xen_pfn_t *gpfn)
{
return xc_domain_nr_gpfns(xch, domid, gpfn);
#endif
}
-static int get_cpuid_domain_info(xc_interface *xch, domid_t domid,
+static int get_cpuid_domain_info(xc_interface *xch, uint32_t domid,
struct cpuid_domain_info *info,
uint32_t *featureset,
unsigned int nr_features)
}
static int xc_cpuid_do_domctl(
- xc_interface *xch, domid_t domid,
+ xc_interface *xch, uint32_t domid,
const unsigned int *input, const unsigned int *regs)
{
DECLARE_DOMCTL;
}
}
-int xc_cpuid_apply_policy(xc_interface *xch, domid_t domid,
+int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid,
uint32_t *featureset,
unsigned int nr_features)
{
* For 's' and 'x' the configuration is overwritten with the value applied.
*/
int xc_cpuid_set(
- xc_interface *xch, domid_t domid, const unsigned int *input,
+ xc_interface *xch, uint32_t domid, const unsigned int *input,
const char **config, char **config_transformed)
{
int rc;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_scheduler_op;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
domctl.u.scheduler_op.u.credit = *sdom;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_scheduler_op;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT;
domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_scheduler_op;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
domctl.u.scheduler_op.u.credit2 = *sdom;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_scheduler_op;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_CREDIT2;
domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
#include "xc_private.h"
int xc_hvm_create_ioreq_server(
- xc_interface *xch, domid_t domid, int handle_bufioreq,
+ xc_interface *xch, uint32_t domid, int handle_bufioreq,
ioservid_t *id)
{
return xendevicemodel_create_ioreq_server(xch->dmod, domid,
}
int xc_hvm_get_ioreq_server_info(
- xc_interface *xch, domid_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
+ xc_interface *xch, uint32_t domid, ioservid_t id, xen_pfn_t *ioreq_pfn,
xen_pfn_t *bufioreq_pfn, evtchn_port_t *bufioreq_port)
{
return xendevicemodel_get_ioreq_server_info(xch->dmod, domid, id,
}
int xc_hvm_map_io_range_to_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id, int is_mmio,
+ xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
uint64_t start, uint64_t end)
{
return xendevicemodel_map_io_range_to_ioreq_server(xch->dmod, domid,
}
int xc_hvm_unmap_io_range_from_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id, int is_mmio,
+ xc_interface *xch, uint32_t domid, ioservid_t id, int is_mmio,
uint64_t start, uint64_t end)
{
return xendevicemodel_unmap_io_range_from_ioreq_server(xch->dmod, domid,
}
int xc_hvm_map_pcidev_to_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id, uint16_t segment,
+ xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
uint8_t bus, uint8_t device, uint8_t function)
{
return xendevicemodel_map_pcidev_to_ioreq_server(xch->dmod, domid, id,
}
int xc_hvm_unmap_pcidev_from_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id, uint16_t segment,
+ xc_interface *xch, uint32_t domid, ioservid_t id, uint16_t segment,
uint8_t bus, uint8_t device, uint8_t function)
{
return xendevicemodel_unmap_pcidev_from_ioreq_server(xch->dmod, domid,
}
int xc_hvm_destroy_ioreq_server(
- xc_interface *xch, domid_t domid, ioservid_t id)
+ xc_interface *xch, uint32_t domid, ioservid_t id)
{
return xendevicemodel_destroy_ioreq_server(xch->dmod, domid, id);
}
int xc_hvm_set_ioreq_server_state(
- xc_interface *xch, domid_t domid, ioservid_t id, int enabled)
+ xc_interface *xch, uint32_t domid, ioservid_t id, int enabled)
{
return xendevicemodel_set_ioreq_server_state(xch->dmod, domid, id,
enabled);
}
int xc_hvm_set_pci_intx_level(
- xc_interface *xch, domid_t domid, uint16_t segment, uint8_t bus,
+ xc_interface *xch, uint32_t domid, uint16_t segment, uint8_t bus,
uint8_t device, uint8_t intx, unsigned int level)
{
return xendevicemodel_set_pci_intx_level(xch->dmod, domid, segment,
}
int xc_hvm_set_isa_irq_level(
- xc_interface *xch, domid_t domid, uint8_t irq, unsigned int level)
+ xc_interface *xch, uint32_t domid, uint8_t irq, unsigned int level)
{
return xendevicemodel_set_isa_irq_level(xch->dmod, domid, irq, level);
}
int xc_hvm_set_pci_link_route(
- xc_interface *xch, domid_t domid, uint8_t link, uint8_t irq)
+ xc_interface *xch, uint32_t domid, uint8_t link, uint8_t irq)
{
return xendevicemodel_set_pci_link_route(xch->dmod, domid, link, irq);
}
int xc_hvm_inject_msi(
- xc_interface *xch, domid_t domid, uint64_t msi_addr, uint32_t msi_data)
+ xc_interface *xch, uint32_t domid, uint64_t msi_addr, uint32_t msi_data)
{
return xendevicemodel_inject_msi(xch->dmod, domid, msi_addr, msi_data);
}
int xc_hvm_track_dirty_vram(
- xc_interface *xch, domid_t domid, uint64_t first_pfn, uint32_t nr,
+ xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr,
unsigned long *dirty_bitmap)
{
return xendevicemodel_track_dirty_vram(xch->dmod, domid, first_pfn,
}
int xc_hvm_modified_memory(
- xc_interface *xch, domid_t domid, uint64_t first_pfn, uint32_t nr)
+ xc_interface *xch, uint32_t domid, uint64_t first_pfn, uint32_t nr)
{
return xendevicemodel_modified_memory(xch->dmod, domid, first_pfn, nr);
}
int xc_hvm_set_mem_type(
- xc_interface *xch, domid_t domid, hvmmem_type_t type,
+ xc_interface *xch, uint32_t domid, hvmmem_type_t type,
uint64_t first_pfn, uint32_t nr)
{
return xendevicemodel_set_mem_type(xch->dmod, domid, type, first_pfn,
}
int xc_hvm_inject_trap(
- xc_interface *xch, domid_t domid, int vcpu, uint8_t vector,
+ xc_interface *xch, uint32_t domid, int vcpu, uint8_t vector,
uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t cr2)
{
return xendevicemodel_inject_event(xch->dmod, domid, vcpu, vector,
/* ------------------------------------------------------------------------ */
-static int set_mode(xc_interface *xch, domid_t domid, char *guest_type)
+static int set_mode(xc_interface *xch, uint32_t domid, char *guest_type)
{
static const struct {
char *guest;
return found;
}
-int xc_dom_boot_xen_init(struct xc_dom_image *dom, xc_interface *xch, domid_t domid)
+int xc_dom_boot_xen_init(struct xc_dom_image *dom, xc_interface *xch, uint32_t domid)
{
dom->xch = xch;
dom->guest_domid = domid;
return rc;
}
-static xen_pfn_t xc_dom_gnttab_setup(xc_interface *xch, domid_t domid)
+static xen_pfn_t xc_dom_gnttab_setup(xc_interface *xch, uint32_t domid)
{
gnttab_setup_table_t setup;
DECLARE_HYPERCALL_BUFFER(xen_pfn_t, gmfnp);
return gmfn;
}
-int xc_dom_gnttab_seed(xc_interface *xch, domid_t domid,
+int xc_dom_gnttab_seed(xc_interface *xch, uint32_t domid,
xen_pfn_t console_gmfn,
xen_pfn_t xenstore_gmfn,
- domid_t console_domid,
- domid_t xenstore_domid)
+ uint32_t console_domid,
+ uint32_t xenstore_domid)
{
xen_pfn_t gnttab_gmfn;
return 0;
}
-int xc_dom_gnttab_hvm_seed(xc_interface *xch, domid_t domid,
+int xc_dom_gnttab_hvm_seed(xc_interface *xch, uint32_t domid,
xen_pfn_t console_gpfn,
xen_pfn_t xenstore_gpfn,
- domid_t console_domid,
- domid_t xenstore_domid)
+ uint32_t console_domid,
+ uint32_t xenstore_domid)
{
int rc;
xen_pfn_t scratch_gpfn;
/* ------------------------------------------------------------------------ */
-static int x86_compat(xc_interface *xch, domid_t domid, char *guest_type)
+static int x86_compat(xc_interface *xch, uint32_t domid, char *guest_type)
{
static const struct {
char *guest;
}
domctl.cmd = XEN_DOMCTL_createdomain;
- domctl.domain = (domid_t)*pdomid;
+ domctl.domain = *pdomid;
domctl.u.createdomain.ssidref = ssidref;
domctl.u.createdomain.flags = flags;
memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
#else
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_cacheflush;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.cacheflush.start_pfn = start_pfn;
domctl.u.cacheflush.nr_pfns = nr_pfns;
return do_domctl(xch, &domctl);
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_pausedomain;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
return do_domctl(xch, &domctl);
}
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_unpausedomain;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
return do_domctl(xch, &domctl);
}
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_destroydomain;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
return do_domctl(xch, &domctl);
}
}
domctl.cmd = XEN_DOMCTL_setnodeaffinity;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
memcpy(local, nodemap, nodesize);
set_xen_guest_handle(domctl.u.nodeaffinity.nodemap.bitmap, local);
}
domctl.cmd = XEN_DOMCTL_getnodeaffinity;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
set_xen_guest_handle(domctl.u.nodeaffinity.nodemap.bitmap, local);
domctl.u.nodeaffinity.nodemap.nr_bits = nodesize * 8;
}
domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
domctl.u.vcpuaffinity.flags = flags;
}
domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
domctl.u.vcpuaffinity.flags = flags;
int xc_dom_vuart_init(xc_interface *xch,
uint32_t type,
- domid_t domid,
- domid_t console_domid,
+ uint32_t domid,
+ uint32_t console_domid,
xen_pfn_t gfn,
evtchn_port_t *evtchn)
{
for ( nr_doms = 0; nr_doms < max_doms; nr_doms++ )
{
domctl.cmd = XEN_DOMCTL_getdomaininfo;
- domctl.domain = (domid_t)next_domid;
+ domctl.domain = next_domid;
if ( (rc = do_domctl(xch, &domctl)) < 0 )
break;
- info->domid = (uint16_t)domctl.domain;
+ info->domid = domctl.domain;
info->dying = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_dying);
info->shutdown = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_shutdown);
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_set_broken_page_p2m;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.set_broken_page_p2m.pfn = pfn;
ret = do_domctl(xch, &domctl);
return -1;
domctl.cmd = XEN_DOMCTL_gethvmcontext;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.hvmcontext.size = size;
set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
return -1;
domctl.cmd = XEN_DOMCTL_gethvmcontext_partial;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.hvmcontext_partial.type = typecode;
domctl.u.hvmcontext_partial.instance = instance;
domctl.u.hvmcontext_partial.bufsz = size;
return -1;
domctl.cmd = XEN_DOMCTL_getvcpucontext;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.vcpucontext.vcpu = (uint16_t)vcpu;
set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
return -EINVAL;
domctl.cmd = XEN_DOMCTL_getvcpuextstate;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.vcpuextstate.vcpu = (uint16_t)vcpu;
domctl.u.vcpuextstate.xfeature_mask = extstate->xfeature_mask;
domctl.u.vcpuextstate.size = extstate->size;
memset(&domctl, 0, sizeof(domctl));
domctl.cmd = XEN_DOMCTL_shadow_op;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.shadow_op.op = sop;
domctl.u.shadow_op.pages = pages;
domctl.u.shadow_op.mb = mb ? *mb : 0;
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_max_mem;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.max_mem.max_memkb = max_memkb;
return do_domctl(xch, &domctl);
}
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_pin_mem_cacheattr;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.pin_mem_cacheattr.start = start;
domctl.u.pin_mem_cacheattr.end = end;
domctl.u.pin_mem_cacheattr.type = type;
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_settimeoffset;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.settimeoffset.time_offset_seconds = time_offset_seconds;
return do_domctl(xch, &domctl);
}
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_disable_migrate;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.disable_migrate.disable = 1;
return do_domctl(xch, &domctl);
}
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_settscinfo;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.tsc_info.tsc_mode = tsc_mode;
domctl.u.tsc_info.elapsed_nsec = elapsed_nsec;
domctl.u.tsc_info.gtsc_khz = gtsc_khz;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_gettscinfo;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
rc = do_domctl(xch, &domctl);
if ( rc == 0 )
{
}
-int xc_domain_maximum_gpfn(xc_interface *xch, domid_t domid, xen_pfn_t *gpfns)
+int xc_domain_maximum_gpfn(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
{
long rc = do_memory_op(xch, XENMEM_maximum_gpfn, &domid, sizeof(domid));
return rc;
}
-int xc_domain_nr_gpfns(xc_interface *xch, domid_t domid, xen_pfn_t *gpfns)
+int xc_domain_nr_gpfns(xc_interface *xch, uint32_t domid, xen_pfn_t *gpfns)
{
int rc = xc_domain_maximum_gpfn(xch, domid, gpfns);
}
int xc_domain_add_to_physmap_batch(xc_interface *xch,
- domid_t domid,
- domid_t foreign_domid,
+ uint32_t domid,
+ uint32_t foreign_domid,
unsigned int space,
unsigned int size,
xen_ulong_t *idxs,
}
int xc_domain_memory_exchange_pages(xc_interface *xch,
- int domid,
+ uint32_t domid,
unsigned long nr_in_extents,
unsigned int in_order,
xen_pfn_t *in_extents,
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_max_vcpus;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.max_vcpus.max = max;
return do_domctl(xch, &domctl);
}
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_setdomainhandle;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
memcpy(domctl.u.setdomainhandle.handle, handle,
sizeof(xen_domain_handle_t));
return do_domctl(xch, &domctl);
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_getvcpuinfo;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
rc = do_domctl(xch, &domctl);
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_ioport_permission;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.ioport_permission.first_port = first_port;
domctl.u.ioport_permission.nr_ports = nr_ports;
domctl.u.ioport_permission.allow_access = allow_access;
return 0;
}
-int xc_hvm_param_set(xc_interface *handle, domid_t dom, uint32_t param, uint64_t value)
+int xc_hvm_param_set(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t value)
{
DECLARE_HYPERCALL_BUFFER(xen_hvm_param_t, arg);
int rc = xc_hvm_param_deprecated_check(param);
return rc;
}
-int xc_hvm_param_get(xc_interface *handle, domid_t dom, uint32_t param, uint64_t *value)
+int xc_hvm_param_get(xc_interface *handle, uint32_t dom, uint32_t param, uint64_t *value)
{
DECLARE_HYPERCALL_BUFFER(xen_hvm_param_t, arg);
int rc = xc_hvm_param_deprecated_check(param);
return rc;
}
-int xc_set_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long value)
+int xc_set_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long value)
{
return xc_hvm_param_set(handle, dom, param, value);
}
-int xc_get_hvm_param(xc_interface *handle, domid_t dom, int param, unsigned long *value)
+int xc_get_hvm_param(xc_interface *handle, uint32_t dom, int param, unsigned long *value)
{
uint64_t v;
int ret;
}
domctl.cmd = XEN_DOMCTL_get_device_group;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.get_device_group.machine_sbdf = machine_sbdf;
domctl.u.get_device_group.max_sdevs = max_sdevs;
return -1;
domctl.cmd = XEN_DOMCTL_assign_device;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
domctl.u.assign_device.u.dt.size = size;
return -1;
domctl.cmd = XEN_DOMCTL_test_assign_device;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
domctl.u.assign_device.u.dt.size = size;
return -1;
domctl.cmd = XEN_DOMCTL_deassign_device;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.assign_device.dev = XEN_DOMCTL_DEV_DT;
domctl.u.assign_device.u.dt.size = size;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_bind_pt_irq;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
bind = &(domctl.u.bind_pt_irq);
bind->irq_type = PT_IRQ_TYPE_MSI;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
bind = &(domctl.u.bind_pt_irq);
bind->irq_type = PT_IRQ_TYPE_MSI;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_bind_pt_irq;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
bind = &(domctl.u.bind_pt_irq);
bind->irq_type = irq_type;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
bind = &(domctl.u.bind_pt_irq);
bind->irq_type = irq_type;
return 0;
}
-int xc_map_domain_meminfo(xc_interface *xch, int domid,
+int xc_map_domain_meminfo(xc_interface *xch, uint32_t domid,
struct xc_domain_meminfo *minfo)
{
struct domain_info_context _di;
}
int xc_domain_subscribe_for_suspend(
- xc_interface *xch, domid_t dom, evtchn_port_t port)
+ xc_interface *xch, uint32_t dom, evtchn_port_t port)
{
DECLARE_DOMCTL;
DECLARE_DOMCTL;
memset(&domctl, 0, sizeof(domctl));
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.cmd = XEN_DOMCTL_debug_op;
domctl.u.debug_op.op = sop;
domctl.u.debug_op.vcpu = vcpu;
return do_domctl(xch, &domctl);
}
-int xc_domain_set_gnttab_limits(xc_interface *xch, domid_t domid,
+int xc_domain_set_gnttab_limits(xc_interface *xch, uint32_t domid,
uint32_t grant_frames,
uint32_t maptrack_frames)
{
set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode);
domctl.cmd = XEN_DOMCTL_setvnumainfo;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.vnuma.nr_vnodes = nr_vnodes;
domctl.u.vnuma.nr_vmemranges = nr_vmemranges;
domctl.u.vnuma.nr_vcpus = nr_vcpus;
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_soft_reset;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
return do_domctl(xch, &domctl);
}
/*
{
int rc;
struct evtchn_alloc_unbound arg = {
- .dom = (domid_t)dom,
- .remote_dom = (domid_t)remote_dom
+ .dom = dom,
+ .remote_dom = remote_dom,
};
rc = do_evtchn_op(xch, EVTCHNOP_alloc_unbound, &arg, sizeof(arg), 0);
int xc_evtchn_reset(xc_interface *xch,
uint32_t dom)
{
- struct evtchn_reset arg = { .dom = (domid_t)dom };
+ struct evtchn_reset arg = { .dom = dom };
return do_evtchn_op(xch, EVTCHNOP_reset, &arg, sizeof(arg), 0);
}
}
evtchn_port_or_error_t
-xc_evtchn_bind_unbound_port(xc_evtchn *xce, int domid)
+xc_evtchn_bind_unbound_port(xc_evtchn *xce, uint32_t domid)
{
return xenevtchn_bind_unbound_port(xce, domid);
}
evtchn_port_or_error_t
-xc_evtchn_bind_interdomain(xc_evtchn *xce, int domid,
+xc_evtchn_bind_interdomain(xc_evtchn *xce, uint32_t domid,
evtchn_port_t remote_port)
{
return xenevtchn_bind_interdomain(xce, domid, remote_port);
return xc_flask_op(xch, &op);
}
-int xc_flask_relabel_domain(xc_interface *xch, int domid, uint32_t sid)
+int xc_flask_relabel_domain(xc_interface *xch, uint32_t domid, uint32_t sid)
{
DECLARE_FLASK_OP;
op.cmd = FLASK_RELABEL_DOMAIN;
return rc;
}
-int xc_gnttab_get_version(xc_interface *xch, int domid)
+int xc_gnttab_get_version(xc_interface *xch, uint32_t domid)
{
struct gnttab_get_version query;
int rc;
return query.version;
}
-static void *_gnttab_map_table(xc_interface *xch, int domid, int *gnt_num)
+static void *_gnttab_map_table(xc_interface *xch, uint32_t domid, int *gnt_num)
{
int rc, i;
struct gnttab_query_size query;
return gnt;
}
-grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, int domid,
+grant_entry_v1_t *xc_gnttab_map_table_v1(xc_interface *xch, uint32_t domid,
int *gnt_num)
{
if (xc_gnttab_get_version(xch, domid) == 2)
return _gnttab_map_table(xch, domid, gnt_num);
}
-grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, int domid,
+grant_entry_v2_t *xc_gnttab_map_table_v2(xc_interface *xch, uint32_t domid,
int *gnt_num)
{
if (xc_gnttab_get_version(xch, domid) != 2)
#include <xen/memory.h>
int xc_set_mem_access(xc_interface *xch,
- domid_t domain_id,
+ uint32_t domain_id,
xenmem_access_t access,
uint64_t first_pfn,
uint32_t nr)
}
int xc_set_mem_access_multi(xc_interface *xch,
- domid_t domain_id,
+ uint32_t domain_id,
uint8_t *access,
uint64_t *pages,
uint32_t nr)
}
int xc_get_mem_access(xc_interface *xch,
- domid_t domain_id,
+ uint32_t domain_id,
uint64_t pfn,
xenmem_access_t *access)
{
#include "xc_private.h"
-static int xc_mem_paging_memop(xc_interface *xch, domid_t domain_id,
+static int xc_mem_paging_memop(xc_interface *xch, uint32_t domain_id,
unsigned int op, uint64_t gfn, void *buffer)
{
xen_mem_paging_op_t mpo;
return do_memory_op(xch, XENMEM_paging_op, &mpo, sizeof(mpo));
}
-int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
+int xc_mem_paging_enable(xc_interface *xch, uint32_t domain_id,
uint32_t *port)
{
if ( !port )
port);
}
-int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
+int xc_mem_paging_disable(xc_interface *xch, uint32_t domain_id)
{
return xc_vm_event_control(xch, domain_id,
XEN_VM_EVENT_DISABLE,
NULL);
}
-int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id)
+int xc_mem_paging_resume(xc_interface *xch, uint32_t domain_id)
{
return xc_vm_event_control(xch, domain_id,
XEN_VM_EVENT_RESUME,
NULL);
}
-int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, uint64_t gfn)
+int xc_mem_paging_nominate(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
{
return xc_mem_paging_memop(xch, domain_id,
XENMEM_paging_op_nominate,
gfn, NULL);
}
-int xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, uint64_t gfn)
+int xc_mem_paging_evict(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
{
return xc_mem_paging_memop(xch, domain_id,
XENMEM_paging_op_evict,
gfn, NULL);
}
-int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, uint64_t gfn)
+int xc_mem_paging_prep(xc_interface *xch, uint32_t domain_id, uint64_t gfn)
{
return xc_mem_paging_memop(xch, domain_id,
XENMEM_paging_op_prep,
gfn, NULL);
}
-int xc_mem_paging_load(xc_interface *xch, domid_t domain_id,
+int xc_mem_paging_load(xc_interface *xch, uint32_t domain_id,
uint64_t gfn, void *buffer)
{
int rc, old_errno;
#include <xen/grant_table.h>
int xc_memshr_control(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
int enable)
{
DECLARE_DOMCTL;
}
int xc_memshr_ring_enable(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
uint32_t *port)
{
if ( !port )
}
int xc_memshr_ring_disable(xc_interface *xch,
- domid_t domid)
+ uint32_t domid)
{
return xc_vm_event_control(xch, domid,
XEN_VM_EVENT_DISABLE,
NULL);
}
-static int xc_memshr_memop(xc_interface *xch, domid_t domid,
+static int xc_memshr_memop(xc_interface *xch, uint32_t domid,
xen_mem_sharing_op_t *mso)
{
mso->domain = domid;
}
int xc_memshr_nominate_gfn(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
unsigned long gfn,
uint64_t *handle)
{
}
int xc_memshr_nominate_gref(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
grant_ref_t gref,
uint64_t *handle)
{
}
int xc_memshr_share_gfns(xc_interface *xch,
- domid_t source_domain,
+ uint32_t source_domain,
unsigned long source_gfn,
uint64_t source_handle,
- domid_t client_domain,
+ uint32_t client_domain,
unsigned long client_gfn,
uint64_t client_handle)
{
}
int xc_memshr_share_grefs(xc_interface *xch,
- domid_t source_domain,
+ uint32_t source_domain,
grant_ref_t source_gref,
uint64_t source_handle,
- domid_t client_domain,
+ uint32_t client_domain,
grant_ref_t client_gref,
uint64_t client_handle)
{
}
int xc_memshr_add_to_physmap(xc_interface *xch,
- domid_t source_domain,
+ uint32_t source_domain,
unsigned long source_gfn,
uint64_t source_handle,
- domid_t client_domain,
+ uint32_t client_domain,
unsigned long client_gfn)
{
xen_mem_sharing_op_t mso;
}
int xc_memshr_range_share(xc_interface *xch,
- domid_t source_domain,
- domid_t client_domain,
+ uint32_t source_domain,
+ uint32_t client_domain,
uint64_t first_gfn,
uint64_t last_gfn)
{
}
int xc_memshr_domain_resume(xc_interface *xch,
- domid_t domid)
+ uint32_t domid)
{
return xc_vm_event_control(xch, domid,
XEN_VM_EVENT_RESUME,
}
int xc_memshr_debug_gfn(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
unsigned long gfn)
{
xen_mem_sharing_op_t mso;
}
int xc_memshr_debug_gref(xc_interface *xch,
- domid_t domid,
+ uint32_t domid,
grant_ref_t gref)
{
xen_mem_sharing_op_t mso;
#include "xc_private.h"
-void *xc_monitor_enable(xc_interface *xch, domid_t domain_id, uint32_t *port)
+void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port)
{
return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
port);
}
-int xc_monitor_disable(xc_interface *xch, domid_t domain_id)
+int xc_monitor_disable(xc_interface *xch, uint32_t domain_id)
{
return xc_vm_event_control(xch, domain_id,
XEN_VM_EVENT_DISABLE,
NULL);
}
-int xc_monitor_resume(xc_interface *xch, domid_t domain_id)
+int xc_monitor_resume(xc_interface *xch, uint32_t domain_id)
{
return xc_vm_event_control(xch, domain_id,
XEN_VM_EVENT_RESUME,
NULL);
}
-int xc_monitor_get_capabilities(xc_interface *xch, domid_t domain_id,
+int xc_monitor_get_capabilities(xc_interface *xch, uint32_t domain_id,
uint32_t *capabilities)
{
int rc;
return 0;
}
-int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
+int xc_monitor_write_ctrlreg(xc_interface *xch, uint32_t domain_id,
uint16_t index, bool enable, bool sync,
uint64_t bitmask, bool onchangeonly)
{
return do_domctl(xch, &domctl);
}
-int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, uint32_t msr,
+int xc_monitor_mov_to_msr(xc_interface *xch, uint32_t domain_id, uint32_t msr,
bool enable)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_software_breakpoint(xc_interface *xch, domid_t domain_id,
+int xc_monitor_software_breakpoint(xc_interface *xch, uint32_t domain_id,
bool enable)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_singlestep(xc_interface *xch, domid_t domain_id,
+int xc_monitor_singlestep(xc_interface *xch, uint32_t domain_id,
bool enable)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_descriptor_access(xc_interface *xch, domid_t domain_id,
+int xc_monitor_descriptor_access(xc_interface *xch, uint32_t domain_id,
bool enable)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_guest_request(xc_interface *xch, domid_t domain_id, bool enable,
+int xc_monitor_guest_request(xc_interface *xch, uint32_t domain_id, bool enable,
bool sync, bool allow_userspace)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_emulate_each_rep(xc_interface *xch, domid_t domain_id,
+int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id,
bool enable)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_debug_exceptions(xc_interface *xch, domid_t domain_id,
+int xc_monitor_debug_exceptions(xc_interface *xch, uint32_t domain_id,
bool enable, bool sync)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_cpuid(xc_interface *xch, domid_t domain_id, bool enable)
+int xc_monitor_cpuid(xc_interface *xch, uint32_t domain_id, bool enable)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_privileged_call(xc_interface *xch, domid_t domain_id,
+int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
bool enable)
{
DECLARE_DOMCTL;
return do_domctl(xch, &domctl);
}
-int xc_monitor_emul_unimplemented(xc_interface *xch, domid_t domain_id,
+int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
bool enable)
{
DECLARE_DOMCTL;
int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
unsigned int store_evtchn, unsigned long *store_mfn,
- domid_t store_domid, unsigned int console_evtchn,
- unsigned long *console_mfn, domid_t console_domid,
+ uint32_t store_domid, unsigned int console_evtchn,
+ unsigned long *console_mfn, uint32_t console_domid,
unsigned int hvm, unsigned int pae,
xc_migration_stream_t stream_type,
struct restore_callbacks *callbacks, int send_back_fd)
return 0;
}
-static int change_pte(xc_interface *xch, int domid,
+static int change_pte(xc_interface *xch, uint32_t domid,
struct xc_domain_meminfo *minfo,
struct pte_backup *backup,
struct xc_mmu *mmu,
return -1;
}
-static int update_pte(xc_interface *xch, int domid,
+static int update_pte(xc_interface *xch, uint32_t domid,
struct xc_domain_meminfo *minfo,
struct pte_backup *backup,
struct xc_mmu *mmu,
__update_pte, new_mfn);
}
-static int clear_pte(xc_interface *xch, int domid,
+static int clear_pte(xc_interface *xch, uint32_t domid,
struct xc_domain_meminfo *minfo,
struct pte_backup *backup,
struct xc_mmu *mmu,
* Check if a page can be exchanged successfully
*/
-static int is_page_exchangable(xc_interface *xch, int domid, xen_pfn_t mfn,
+static int is_page_exchangable(xc_interface *xch, uint32_t domid, xen_pfn_t mfn,
xc_dominfo_t *info)
{
uint32_t status;
}
/* The domain should be suspended when called here */
-int xc_exchange_page(xc_interface *xch, int domid, xen_pfn_t mfn)
+int xc_exchange_page(xc_interface *xch, uint32_t domid, xen_pfn_t mfn)
{
xc_dominfo_t info;
struct xc_domain_meminfo minfo;
}
int xc_physdev_map_pirq(xc_interface *xch,
- int domid,
+ uint32_t domid,
int index,
int *pirq)
{
}
int xc_physdev_map_pirq_msi(xc_interface *xch,
- int domid,
+ uint32_t domid,
int index,
int *pirq,
int devfn,
}
int xc_physdev_unmap_pirq(xc_interface *xch,
- int domid,
+ uint32_t domid,
int pirq)
{
int rc;
if ( xc_hypercall_bounce_pre(xch, arr) )
return -1;
domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
- domctl.domain = (domid_t)dom;
+ domctl.domain = dom;
domctl.u.getpageframeinfo3.num = num;
set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
rc = do_domctl(xch, &domctl);
xc_interface *xch,
struct mmuext_op *op,
unsigned int nr_ops,
- domid_t dom)
+ uint32_t dom)
{
DECLARE_HYPERCALL_BOUNCE(op, nr_ops*sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
long ret = -1;
return rc;
}
-long long xc_domain_get_cpu_usage( xc_interface *xch, domid_t domid, int vcpu )
+long long xc_domain_get_cpu_usage(xc_interface *xch, uint32_t domid, int vcpu)
{
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_getvcpuinfo;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
if ( (do_domctl(xch, &domctl) < 0) )
{
}
domctl.cmd = XEN_DOMCTL_getmemlist;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.getmemlist.max_pfns = max_pfns;
set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
/**
* vm_event operations. Internal use only.
*/
-int xc_vm_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
+int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
unsigned int mode, uint32_t *port);
/*
* Enables vm_event and returns the mapped ring page indicated by param.
* param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
*/
-void *xc_vm_event_enable(xc_interface *xch, domid_t domain_id, int param,
+void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
uint32_t *port);
-int do_dm_op(xc_interface *xch, domid_t domid, unsigned int nr_bufs, ...);
+int do_dm_op(xc_interface *xch, uint32_t domid, unsigned int nr_bufs, ...);
#endif /* __XC_PRIVATE_H__ */
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_psr_cmt_op;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_ATTACH;
return do_domctl(xch, &domctl);
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_psr_cmt_op;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_DETACH;
return do_domctl(xch, &domctl);
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_psr_cmt_op;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.psr_cmt_op.cmd = XEN_DOMCTL_PSR_CMT_OP_QUERY_RMID;
rc = do_domctl(xch, &domctl);
}
domctl.cmd = XEN_DOMCTL_psr_cat_op;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.psr_cat_op.cmd = cmd;
domctl.u.psr_cat_op.target = target;
domctl.u.psr_cat_op.data = data;
}
domctl.cmd = XEN_DOMCTL_psr_cat_op;
- domctl.domain = (domid_t)domid;
+ domctl.domain = domid;
domctl.u.psr_cat_op.cmd = cmd;
domctl.u.psr_cat_op.target = target;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_scheduler_op;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
domctl.u.scheduler_op.u.rtds.period = sdom->period;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_scheduler_op;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
return -1;
domctl.cmd = XEN_DOMCTL_scheduler_op;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putvcpuinfo;
return -1;
domctl.cmd = XEN_DOMCTL_scheduler_op;
- domctl.domain = (domid_t) domid;
+ domctl.domain = domid;
domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getvcpuinfo;
*/
xen_pfn_t xenstore_gfn, console_gfn;
unsigned int xenstore_evtchn, console_evtchn;
- domid_t xenstore_domid, console_domid;
+ uint32_t xenstore_domid, console_domid;
/* Bitmap of currently populated PFNs during restore. */
unsigned long *populated_pfns;
int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
unsigned int store_evtchn, unsigned long *store_mfn,
- domid_t store_domid, unsigned int console_evtchn,
- unsigned long *console_gfn, domid_t console_domid,
+ uint32_t store_domid, unsigned int console_evtchn,
+ unsigned long *console_gfn, uint32_t console_domid,
unsigned int hvm, unsigned int pae,
xc_migration_stream_t stream_type,
struct restore_callbacks *callbacks, int send_back_fd)
#define SUSPEND_FILE_BUFLEN (sizeof(SUSPEND_LOCK_FILE) + 10)
-static void get_suspend_file(char buf[], int domid)
+static void get_suspend_file(char buf[], uint32_t domid)
{
snprintf(buf, SUSPEND_FILE_BUFLEN, SUSPEND_LOCK_FILE, domid);
}
-static int lock_suspend_event(xc_interface *xch, int domid, int *lockfd)
+static int lock_suspend_event(xc_interface *xch, uint32_t domid, int *lockfd)
{
int fd = -1, r;
char suspend_file[SUSPEND_FILE_BUFLEN];
return -1;
}
-static int unlock_suspend_event(xc_interface *xch, int domid, int *lockfd)
+static int unlock_suspend_event(xc_interface *xch, uint32_t domid, int *lockfd)
{
int r;
char suspend_file[SUSPEND_FILE_BUFLEN];
/* Internal callers are allowed to call this with suspend_evtchn<0
* but *lockfd>0. */
int xc_suspend_evtchn_release(xc_interface *xch, xenevtchn_handle *xce,
- int domid, int suspend_evtchn, int *lockfd)
+ uint32_t domid, int suspend_evtchn, int *lockfd)
{
if (suspend_evtchn >= 0)
xenevtchn_unbind(xce, suspend_evtchn);
}
int xc_suspend_evtchn_init_sane(xc_interface *xch, xenevtchn_handle *xce,
- int domid, int port, int *lockfd)
+ uint32_t domid, int port, int *lockfd)
{
int rc, suspend_evtchn = -1;
}
int xc_suspend_evtchn_init_exclusive(xc_interface *xch, xenevtchn_handle *xce,
- int domid, int port, int *lockfd)
+ uint32_t domid, int port, int *lockfd)
{
int suspend_evtchn;
/* returns 0 if nothing to save, -1 if error saving, 1 if saved successfully */
int xc_tmem_save(xc_interface *xch,
- int dom, int io_fd, int live, int field_marker)
+ uint32_t domid, int io_fd, int live, int field_marker)
{
int marker = field_marker;
int i, j, rc;
char *buf = NULL;
rc = xc_tmem_control(xch, 0, XEN_SYSCTL_TMEM_OP_SAVE_BEGIN,
- dom, 0 /* len*/ , live, NULL);
+ domid, 0 /* len*/ , live, NULL);
if ( rc )
{
/* Nothing to save - no tmem enabled. */
if ( xc_tmem_control(xch, 0 /* pool_id */,
XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO,
- dom /* cli_id */, sizeof(info), 0 /* arg */,
+ domid /* cli_id */, sizeof(info), 0 /* arg */,
&info) < 0 )
return -1;
rc = xc_tmem_control(xch, 0 /* pool_id is ignored. */,
XEN_SYSCTL_TMEM_OP_GET_POOLS,
- dom /* cli_id */, sizeof(*pools) * info.nr_pools,
+ domid /* cli_id */, sizeof(*pools) * info.nr_pools,
0 /* arg */, pools);
if ( rc < 0 || (uint32_t)rc > info.nr_pools )
for ( j = pool->n_pages; j > 0; j-- )
{
int ret;
- if ( (ret = xc_tmem_control(xch, pool->id,
- XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE, dom,
- bufsize, 0, buf)) > 0 )
+ if ( (ret = xc_tmem_control(
+ xch, pool->id, XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE,
+ domid, bufsize, 0, buf)) > 0 )
{
h = (struct tmem_handle *)buf;
if ( write_exact(io_fd, &h->oid, sizeof(h->oid)) )
}
}
DPRINTF("saved %"PRId64" tmem pages for dom=%d pool=%d, checksum=%x\n",
- pool->n_pages - j, dom, pool->id, checksum);
+ pool->n_pages - j, domid, pool->id, checksum);
}
}
free(pools);
}
/* only called for live migration */
-int xc_tmem_save_extra(xc_interface *xch, int dom, int io_fd, int field_marker)
+int xc_tmem_save_extra(xc_interface *xch, uint32_t domid, int io_fd, int field_marker)
{
struct tmem_handle handle;
int marker = field_marker;
if ( write_exact(io_fd, &marker, sizeof(marker)) )
return -1;
- while ( xc_tmem_control(xch, 0, XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV, dom,
+ while ( xc_tmem_control(xch, 0, XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV, domid,
sizeof(handle),0,&handle) > 0 ) {
if ( write_exact(io_fd, &handle.pool_id, sizeof(handle.pool_id)) )
return -1;
}
/* only called for live migration */
-void xc_tmem_save_done(xc_interface *xch, int dom)
+void xc_tmem_save_done(xc_interface *xch, uint32_t domid)
{
- xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_SAVE_END,dom,0,0,NULL);
+ xc_tmem_control(xch, 0, XEN_SYSCTL_TMEM_OP_SAVE_END, domid, 0, 0, NULL);
}
/* restore routines */
0 /* arg */, &pool);
}
-int xc_tmem_restore(xc_interface *xch, int dom, int io_fd)
+int xc_tmem_restore(xc_interface *xch, uint32_t domid, int io_fd)
{
uint32_t minusone;
xen_tmem_client_t info;
if ( !info.nr_pools )
return -1;
- if ( xc_tmem_control(xch,0,XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN,dom,0,0,NULL) < 0 )
+ if ( xc_tmem_control(xch, 0, XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN, domid, 0, 0, NULL) < 0 )
return -1;
if ( xc_tmem_control(xch, 0 /* pool_id */,
XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO,
- dom /* cli_id */, sizeof(info), 0 /* arg */,
+ domid /* cli_id */, sizeof(info), 0 /* arg */,
&info) < 0 )
return -1;
if ( read_exact(io_fd, &pool, sizeof(pool)) )
goto out_memory;
- if ( xc_tmem_restore_new_pool(xch, dom, pool.id, pool.flags.raw,
+ if ( xc_tmem_restore_new_pool(xch, domid, pool.id, pool.flags.raw,
pool.uuid[0], pool.uuid[1]) < 0 )
goto out_memory;
goto out_memory;
checksum += *buf;
- if ( (rc = xc_tmem_control_oid(xch, pool.id,
- XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE, dom,
- bufsize, index, oid, buf)) <= 0 )
+ if ( (rc = xc_tmem_control_oid(
+ xch, pool.id, XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE,
+ domid, bufsize, index, oid, buf)) <= 0 )
{
DPRINTF("xc_tmem_restore: putting page failed, rc=%d\n",rc);
out_memory:
}
if ( pool.n_pages )
DPRINTF("restored %"PRId64" tmem pages for dom=%d pool=%d, check=%x\n",
- pool.n_pages - j, dom, pool.id, checksum);
+ pool.n_pages - j, domid, pool.id, checksum);
}
free(buf);
}
/* only called for live migration, must be called after suspend */
-int xc_tmem_restore_extra(xc_interface *xch, int dom, int io_fd)
+int xc_tmem_restore_extra(xc_interface *xch, uint32_t domid, int io_fd)
{
uint32_t pool_id;
struct xen_tmem_oid oid;
return -1;
if ( read_exact(io_fd, &index, sizeof(index)) )
return -1;
- if ( xc_tmem_control_oid(xch, pool_id, XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE, dom,
- 0,index,oid,NULL) <= 0 )
+ if ( xc_tmem_control_oid(
+ xch, pool_id, XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE,
+ domid, 0, index, oid, NULL) <= 0 )
return -1;
count++;
checksum += pool_id + oid.oid[0] + oid.oid[1] + oid.oid[2] + index;
#include "xc_private.h"
-int xc_vm_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
+int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
unsigned int mode, uint32_t *port)
{
DECLARE_DOMCTL;
return rc;
}
-void *xc_vm_event_enable(xc_interface *xch, domid_t domain_id, int param,
+void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
uint32_t *port)
{
void *ring_page = NULL;
/*******************/
int pin_table(
- xc_interface *xch, unsigned int type, unsigned long mfn, domid_t dom)
+ xc_interface *xch, unsigned int type, unsigned long mfn, uint32_t dom)
{
struct mmuext_op op;
#define MADDR_MASK_X86 (MFN_MASK_X86 << PAGE_SHIFT_X86)
int pin_table(xc_interface *xch, unsigned int type, unsigned long mfn,
- domid_t dom);
+ uint32_t dom);
#endif /* XG_PRIVATE_H */
}
void libxl__domain_suspend_common_switch_qemu_logdirty
- (int domid, unsigned enable, void *user)
+ (uint32_t domid, unsigned enable, void *user)
{
libxl__save_helper_state *shs = user;
libxl__egc *egc = shs->egc;
_hidden void libxl__domain_suspend_common_switch_qemu_logdirty
- (int domid, unsigned int enable, void *data);
+ (uint32_t domid, unsigned int enable, void *data);
_hidden void libxl__domain_common_switch_qemu_logdirty(libxl__egc *egc,
int domid, unsigned enable,
libxl__logdirty_switch *lds);
[ 4, 'srcxA', "postcopy", [] ],
[ 5, 'srcxA', "checkpoint", [] ],
[ 6, 'srcxA', "wait_checkpoint", [] ],
- [ 7, 'scxA', "switch_qemu_logdirty", [qw(int domid
+ [ 7, 'scxA', "switch_qemu_logdirty", [qw(uint32_t domid
unsigned enable)] ],
[ 8, 'rcx', "restore_results", ['xen_pfn_t', 'store_gfn',
'xen_pfn_t', 'console_gfn'] ],