From: Paul Durrant Date: Fri, 31 Oct 2014 18:03:57 +0000 (+0000) Subject: Add support for the FIFO event channel ABI X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=refs%2Fheads%2Ffifo;p=people%2Fpauldu%2Fxenbus.git Add support for the FIFO event channel ABI If it is available the FIFO ABI will be used. If it is not then the two-level ABI will be used instead. The ABI is released and re-acquired across suspend/resume so this should allow moving between hosts with different levels of ABI support. Signed-off-by: Paul Durrant --- diff --git a/include/util.h b/include/util.h index 485cda4..5fe06bb 100644 --- a/include/util.h +++ b/include/util.h @@ -39,45 +39,6 @@ #define P2ROUNDUP(_x, _a) \ (-(-(_x) & -(_a))) -static FORCEINLINE LONG -__ffs( - IN unsigned long long mask - ) -{ - unsigned char *array = (unsigned char *)&mask; - unsigned int byte; - unsigned int bit; - unsigned char val; - - val = 0; - - byte = 0; - while (byte < 8) { - val = array[byte]; - - if (val != 0) - break; - - byte++; - } - if (byte == 8) - return -1; - - bit = 0; - while (bit < 8) { - if (val & 0x01) - break; - - val >>= 1; - bit++; - } - - return (byte * 8) + bit; -} - -#define __ffu(_mask) \ - __ffs(~(_mask)) - static FORCEINLINE VOID __CpuId( IN ULONG Leaf, diff --git a/include/xen.h b/include/xen.h index 674676c..5f452b8 100644 --- a/include/xen.h +++ b/include/xen.h @@ -74,6 +74,18 @@ HvmGetParam( OUT PULONGLONG Value ); +XEN_API +ULONG +HvmGetStoreEvtchn( + VOID + ); + +XEN_API +VOID +HvmSetStoreEvtchn( + ULONG + ); + __checkReturn XEN_API NTSTATUS @@ -136,17 +148,26 @@ __checkReturn XEN_API NTSTATUS EventChannelBindInterDomain( - IN domid_t RemoteDomain, - IN evtchn_port_t RemotePort, - OUT evtchn_port_t *LocalPort + IN domid_t RemoteDomain, + IN evtchn_port_t RemotePort, + OUT evtchn_port_t *LocalPort ); __checkReturn XEN_API NTSTATUS EventChannelBindVirq( - IN uint32_t Virq, - OUT evtchn_port_t *LocalPort + IN uint32_t Virq, + OUT evtchn_port_t *LocalPort + ); + +__checkReturn +XEN_API +NTSTATUS +EventChannelQueryInterDomain( + IN evtchn_port_t LocalPort, + OUT domid_t *RemoteDomain, + OUT evtchn_port_t *RemotePort ); __checkReturn @@ -156,6 +177,28 @@ EventChannelClose( IN evtchn_port_t LocalPort ); +__checkReturn +XEN_API +NTSTATUS +EventChannelExpandArray( + IN PFN_NUMBER Pfn + ); + +__checkReturn +XEN_API +NTSTATUS +EventChannelInitControl( + IN PFN_NUMBER Pfn, + IN unsigned int vcpu_id + ); + +__checkReturn +XEN_API +NTSTATUS +EventChannelReset( + VOID + ); + // GRANT TABLE __checkReturn diff --git a/include/xen/arch-x86/xen.h b/include/xen/arch-x86/xen.h index 5cc22fb..1880f89 100644 --- a/include/xen/arch-x86/xen.h +++ b/include/xen/arch-x86/xen.h @@ -154,6 +154,15 @@ typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ /* * The following is all CPU context. Note that the fpu_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. + * + * Also note that when calling DOMCTL_setvcpucontext and VCPU_initialise + * for HVM and PVH guests, not all information in this structure is updated: + * + * - For HVM guests, the structures read include: fpu_ctxt (if + * VGCT_I387_VALID is set), flags, user_regs, debugreg[*] + * + * - PVH guests are the same as HVM guests, but additionally use ctrlreg[3] to + * set cr3. All other fields not used should be set to 0. */ struct vcpu_guest_context { /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ diff --git a/include/xen/event_channel.h b/include/xen/event_channel.h index 472efdb..05e531d 100644 --- a/include/xen/event_channel.h +++ b/include/xen/event_channel.h @@ -71,6 +71,9 @@ #define EVTCHNOP_bind_vcpu 8 #define EVTCHNOP_unmask 9 #define EVTCHNOP_reset 10 +#define EVTCHNOP_init_control 11 +#define EVTCHNOP_expand_array 12 +#define EVTCHNOP_set_priority 13 /* ` } */ typedef uint32_t evtchn_port_t; @@ -98,6 +101,17 @@ typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t; * a port that is unbound and marked as accepting bindings from the calling * domain. A fresh port is allocated in the calling domain and returned as * . + * + * In case the peer domain has already tried to set our event channel + * pending, before it was bound, EVTCHNOP_bind_interdomain always sets + * the local event channel pending. + * + * The usual pattern of use, in the guest's upcall (or subsequent + * handler) is as follows: (Re-enable the event channel for subsequent + * signalling and then) check for the existence of whatever condition + * is being waited for by other means, and take whatever action is + * needed (if any). + * * NOTES: * 1. may be DOMID_SELF, allowing loopback connections. */ @@ -250,6 +264,10 @@ typedef struct evtchn_unmask evtchn_unmask_t; * NOTES: * 1. may be specified as DOMID_SELF. * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF. + * 3. Destroys all control blocks and event array, resets event channel + * operations to 2-level ABI if called with == DOMID_SELF and FIFO + * ABI was used. Guests should not bind events during EVTCHNOP_reset call + * as these events are likely to be lost. */ struct evtchn_reset { /* IN parameters. */ @@ -257,6 +275,43 @@ struct evtchn_reset { }; typedef struct evtchn_reset evtchn_reset_t; +/* + * EVTCHNOP_init_control: initialize the control block for the FIFO ABI. + * + * Note: any events that are currently pending will not be resent and + * will be lost. Guests should call this before binding any event to + * avoid losing any events. + */ +struct evtchn_init_control { + /* IN parameters. */ + uint64_t control_gfn; + uint32_t offset; + uint32_t vcpu; + /* OUT parameters. */ + uint8_t link_bits; + uint8_t _pad[7]; +}; +typedef struct evtchn_init_control evtchn_init_control_t; + +/* + * EVTCHNOP_expand_array: add an additional page to the event array. + */ +struct evtchn_expand_array { + /* IN parameters. */ + uint64_t array_gfn; +}; +typedef struct evtchn_expand_array evtchn_expand_array_t; + +/* + * EVTCHNOP_set_priority: set the priority for an event channel. + */ +struct evtchn_set_priority { + /* IN parameters. */ + uint32_t port; + uint32_t priority; +}; +typedef struct evtchn_set_priority evtchn_set_priority_t; + /* * ` enum neg_errnoval * ` HYPERVISOR_event_channel_op_compat(struct evtchn_op *op) @@ -281,6 +336,42 @@ struct evtchn_op { typedef struct evtchn_op evtchn_op_t; DEFINE_XEN_GUEST_HANDLE(evtchn_op_t); +/* + * 2-level ABI + */ + +#define EVTCHN_2L_NR_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64) + +/* + * FIFO ABI + */ + +/* Events may have priorities from 0 (highest) to 15 (lowest). */ +#define EVTCHN_FIFO_PRIORITY_MAX 0 +#define EVTCHN_FIFO_PRIORITY_DEFAULT 7 +#define EVTCHN_FIFO_PRIORITY_MIN 15 + +#define EVTCHN_FIFO_MAX_QUEUES (EVTCHN_FIFO_PRIORITY_MIN + 1) + +typedef uint32_t event_word_t; + +#define EVTCHN_FIFO_PENDING 31 +#define EVTCHN_FIFO_MASKED 30 +#define EVTCHN_FIFO_LINKED 29 +#define EVTCHN_FIFO_BUSY 28 + +#define EVTCHN_FIFO_LINK_BITS 17 +#define EVTCHN_FIFO_LINK_MASK ((1 << EVTCHN_FIFO_LINK_BITS) - 1) + +#define EVTCHN_FIFO_NR_CHANNELS (1 << EVTCHN_FIFO_LINK_BITS) + +struct evtchn_fifo_control_block { + uint32_t ready; + uint32_t _rsvd; + uint32_t head[EVTCHN_FIFO_MAX_QUEUES]; +}; +typedef struct evtchn_fifo_control_block evtchn_fifo_control_block_t; + #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */ /* diff --git a/include/xen/hvm/hvm_op.h b/include/xen/hvm/hvm_op.h index a9aab4b..eeb0a60 100644 --- a/include/xen/hvm/hvm_op.h +++ b/include/xen/hvm/hvm_op.h @@ -23,6 +23,7 @@ #include "../xen.h" #include "../trace.h" +#include "../event_channel.h" /* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */ #define HVMOP_set_param 0 @@ -90,10 +91,10 @@ typedef enum { struct xen_hvm_track_dirty_vram { /* Domain to be tracked. */ domid_t domid; + /* Number of pages to track. */ + uint32_t nr; /* First pfn to track. */ uint64_aligned_t first_pfn; - /* Number of pages to track. */ - uint64_aligned_t nr; /* OUT variable. */ /* Dirty bitmap buffer. */ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; @@ -106,10 +107,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); struct xen_hvm_modified_memory { /* Domain to be updated. */ domid_t domid; + /* Number of pages. */ + uint32_t nr; /* First pfn. */ uint64_aligned_t first_pfn; - /* Number of pages. */ - uint64_aligned_t nr; }; typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); @@ -162,49 +163,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t); /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) +/* Deprecated by XENMEM_access_op_set_access */ #define HVMOP_set_mem_access 12 -typedef enum { - HVMMEM_access_n, - HVMMEM_access_r, - HVMMEM_access_w, - HVMMEM_access_rw, - HVMMEM_access_x, - HVMMEM_access_rx, - HVMMEM_access_wx, - HVMMEM_access_rwx, - HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically - * change to r-w on a write */ - HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically - * goes to rwx, generating an event without - * pausing the vcpu */ - HVMMEM_access_default /* Take the domain default */ -} hvmmem_access_t; -/* Notify that a region of memory is to have specific access types */ -struct xen_hvm_set_mem_access { - /* Domain to be updated. */ - domid_t domid; - /* Memory type */ - uint16_t hvmmem_access; /* hvm_access_t */ - /* Number of pages, ignored on setting default access */ - uint32_t nr; - /* First pfn, or ~0ull to set the default access for new pages */ - uint64_aligned_t first_pfn; -}; -typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t); +/* Deprecated by XENMEM_access_op_get_access */ #define HVMOP_get_mem_access 13 -/* Get the specific access type for that region of memory */ -struct xen_hvm_get_mem_access { - /* Domain to be queried. */ - domid_t domid; - /* Memory type: OUT */ - uint16_t hvmmem_access; /* hvm_access_t */ - /* pfn, or ~0ull for default access for new pages. IN */ - uint64_aligned_t pfn; -}; -typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t); #define HVMOP_inject_trap 14 /* Inject a trap into a VCPU, which will get taken up on the next @@ -270,6 +233,150 @@ struct xen_hvm_inject_msi { typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t); +/* + * IOREQ Servers + * + * The interface between an I/O emulator an Xen is called an IOREQ Server. + * A domain supports a single 'legacy' IOREQ Server which is instantiated if + * parameter... + * + * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous + * ioreq structures), or... + * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered + * ioreq ring), or... + * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses + * to request buffered I/O emulation). + * + * The following hypercalls facilitate the creation of IOREQ Servers for + * 'secondary' emulators which are invoked to implement port I/O, memory, or + * PCI config space ranges which they explicitly register. + */ + +typedef uint16_t ioservid_t; + +/* + * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary + * emulator servicing domain . + * + * The handed back is unique for . If is zero + * the buffered ioreq ring will not be allocated and hence all emulation + * requestes to this server will be synchronous. + */ +#define HVMOP_create_ioreq_server 17 +struct xen_hvm_create_ioreq_server { + domid_t domid; /* IN - domain to be serviced */ + uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */ + ioservid_t id; /* OUT - server id */ +}; +typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t); + +/* + * HVMOP_get_ioreq_server_info: Get all the information necessary to access + * IOREQ Server . + * + * The emulator needs to map the synchronous ioreq structures and buffered + * ioreq ring (if it exists) that Xen uses to request emulation. These are + * hosted in domain 's gmfns and + * respectively. In addition, if the IOREQ Server is handling buffered + * emulation requests, the emulator needs to bind to event channel + * to listen for them. (The event channels used for + * synchronous emulation requests are specified in the per-CPU ioreq + * structures in ). + * If the IOREQ Server is not handling buffered emulation requests then the + * values handed back in and will both be 0. + */ +#define HVMOP_get_ioreq_server_info 18 +struct xen_hvm_get_ioreq_server_info { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ + evtchn_port_t bufioreq_port; /* OUT - buffered ioreq port */ + uint64_aligned_t ioreq_pfn; /* OUT - sync ioreq pfn */ + uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */ +}; +typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t); + +/* + * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain + * for emulation by the client of IOREQ + * Server + * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of + * for emulation by the client of IOREQ + * Server + * + * There are three types of I/O that can be emulated: port I/O, memory accesses + * and PCI config space accesses. The field denotes which type of range + * the and (inclusive) fields are specifying. + * PCI config space ranges are specified by segment/bus/device/function values + * which should be encoded using the HVMOP_PCI_SBDF helper macro below. + * + * NOTE: unless an emulation request falls entirely within a range mapped + * by a secondary emulator, it will not be passed to that emulator. + */ +#define HVMOP_map_io_range_to_ioreq_server 19 +#define HVMOP_unmap_io_range_from_ioreq_server 20 +struct xen_hvm_io_range { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ + uint32_t type; /* IN - type of range */ +# define HVMOP_IO_RANGE_PORT 0 /* I/O port range */ +# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */ +# define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */ + uint64_aligned_t start, end; /* IN - inclusive start and end of range */ +}; +typedef struct xen_hvm_io_range xen_hvm_io_range_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t); + +#define HVMOP_PCI_SBDF(s,b,d,f) \ + ((((s) & 0xffff) << 16) | \ + (((b) & 0xff) << 8) | \ + (((d) & 0x1f) << 3) | \ + ((f) & 0x07)) + +/* + * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server servicing domain + * . + * + * Any registered I/O ranges will be automatically deregistered. + */ +#define HVMOP_destroy_ioreq_server 21 +struct xen_hvm_destroy_ioreq_server { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ +}; +typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t); + +/* + * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server servicing + * domain . + * + * The IOREQ Server will not be passed any emulation requests until it is in the + * enabled state. + * Note that the contents of the ioreq_pfn and bufioreq_fn (see + * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in + * the enabled state. + */ +#define HVMOP_set_ioreq_server_state 22 +struct xen_hvm_set_ioreq_server_state { + domid_t domid; /* IN - domain to be serviced */ + ioservid_t id; /* IN - server id */ + uint8_t enabled; /* IN - enabled? */ +}; +typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t); + #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/include/xen/hvm/params.h b/include/xen/hvm/params.h index 517a184..3c51072 100644 --- a/include/xen/hvm/params.h +++ b/include/xen/hvm/params.h @@ -56,9 +56,47 @@ #if defined(__i386__) || defined(__x86_64__) -/* Expose Viridian interfaces to this HVM guest? */ +/* + * Viridian enlightenments + * + * (See http://download.microsoft.com/download/A/B/4/AB43A34E-BDD0-4FA6-BDEF-79EEF16E880B/Hypervisor%20Top%20Level%20Functional%20Specification%20v4.0.docx) + * + * To expose viridian enlightenments to the guest set this parameter + * to the desired feature mask. The base feature set must be present + * in any valid feature mask. + */ #define HVM_PARAM_VIRIDIAN 9 +/* Base+Freq viridian feature sets: + * + * - Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) + * - APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) + * - Virtual Processor index MSR (HV_X64_MSR_VP_INDEX) + * - Timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and + * HV_X64_MSR_APIC_FREQUENCY) + */ +#define _HVMPV_base_freq 0 +#define HVMPV_base_freq (1 << _HVMPV_base_freq) + +/* Feature set modifications */ + +/* Disable timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and + * HV_X64_MSR_APIC_FREQUENCY). + * This modification restores the viridian feature set to the + * original 'base' set exposed in releases prior to Xen 4.4. + */ +#define _HVMPV_no_freq 1 +#define HVMPV_no_freq (1 << _HVMPV_no_freq) + +/* Enable Partition Time Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */ +#define _HVMPV_time_ref_count 2 +#define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count) + +#define HVMPV_feature_mask \ + (HVMPV_base_freq | \ + HVMPV_no_freq | \ + HVMPV_time_ref_count) + #endif /* @@ -145,6 +183,12 @@ /* SHUTDOWN_* action in case of a triple fault */ #define HVM_PARAM_TRIPLE_FAULT_REASON 31 -#define HVM_NR_PARAMS 32 +#define HVM_PARAM_IOREQ_SERVER_PFN 32 +#define HVM_PARAM_NR_IOREQ_SERVER_PAGES 33 + +/* Location of the VM Generation ID in guest physical address space. */ +#define HVM_PARAM_VM_GENERATION_ID_ADDR 34 + +#define HVM_NR_PARAMS 35 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ diff --git a/include/xen/io/xs_wire.h b/include/xen/io/xs_wire.h index 99d24e3..0a0cdbc 100644 --- a/include/xen/io/xs_wire.h +++ b/include/xen/io/xs_wire.h @@ -49,7 +49,9 @@ enum xsd_sockmsg_type XS_RESUME, XS_SET_TARGET, XS_RESTRICT, - XS_RESET_WATCHES + XS_RESET_WATCHES, + + XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */ }; #define XS_WRITE_NONE "NONE" @@ -83,7 +85,8 @@ __attribute__((unused)) XSD_ERROR(EROFS), XSD_ERROR(EBUSY), XSD_ERROR(EAGAIN), - XSD_ERROR(EISCONN) + XSD_ERROR(EISCONN), + XSD_ERROR(E2BIG) }; #endif @@ -115,6 +118,8 @@ struct xenstore_domain_interface { char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ XENSTORE_RING_IDX req_cons, req_prod; XENSTORE_RING_IDX rsp_cons, rsp_prod; + uint32_t server_features; /* Bitmap of features supported by the server */ + uint32_t connection; }; /* Violating this is very bad. See docs/misc/xenstore.txt. */ @@ -124,6 +129,13 @@ struct xenstore_domain_interface { #define XENSTORE_ABS_PATH_MAX 3072 #define XENSTORE_REL_PATH_MAX 2048 +/* The ability to reconnect a ring */ +#define XENSTORE_SERVER_FEATURE_RECONNECTION 1 + +/* Valid values for the connection field */ +#define XENSTORE_CONNECTED 0 /* the steady-state */ +#define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */ + #endif /* _XS_WIRE_H */ /* diff --git a/include/xen/memory.h b/include/xen/memory.h index 7a26dee..f021958 100644 --- a/include/xen/memory.h +++ b/include/xen/memory.h @@ -186,6 +186,15 @@ struct xen_machphys_mfn_list { typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t; DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t); +/* + * For a compat caller, this is identical to XENMEM_machphys_mfn_list. + * + * For a non compat caller, this functions similarly to + * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility + * m2p table. + */ +#define XENMEM_machphys_compat_mfn_list 25 + /* * Returns the location in virtual address space of the machine_to_phys * mapping table. Architectures which do not have a m2p table, or which do not @@ -207,8 +216,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t); #define XENMAPSPACE_gmfn 2 /* GMFN */ #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */ #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom, - * XENMEM_add_to_physmap_range only. - */ + * XENMEM_add_to_physmap_batch only. */ /* ` } */ /* @@ -238,8 +246,8 @@ typedef struct xen_add_to_physmap xen_add_to_physmap_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t); /* A batched version of add_to_physmap. */ -#define XENMEM_add_to_physmap_range 23 -struct xen_add_to_physmap_range { +#define XENMEM_add_to_physmap_batch 23 +struct xen_add_to_physmap_batch { /* IN */ /* Which domain to change the mapping for. */ domid_t domid; @@ -260,8 +268,15 @@ struct xen_add_to_physmap_range { /* Per index error code. */ XEN_GUEST_HANDLE(int) errs; }; -typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t; +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t; +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t); + +#if __XEN_INTERFACE_VERSION__ < 0x00040400 +#define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch +#define xen_add_to_physmap_range xen_add_to_physmap_batch +typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t; DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t); +#endif /* * Unmaps the page appearing at a particular GPFN from the specified guest's @@ -357,9 +372,6 @@ typedef struct xen_pod_target xen_pod_target_t; #define XENMEM_paging_op_evict 1 #define XENMEM_paging_op_prep 2 -#define XENMEM_access_op 21 -#define XENMEM_access_op_resume 0 - struct xen_mem_event_op { uint8_t op; /* XENMEM_*_op_* */ domid_t domain; @@ -373,6 +385,56 @@ struct xen_mem_event_op { typedef struct xen_mem_event_op xen_mem_event_op_t; DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t); +#define XENMEM_access_op 21 +#define XENMEM_access_op_resume 0 +#define XENMEM_access_op_set_access 1 +#define XENMEM_access_op_get_access 2 + +typedef enum { + XENMEM_access_n, + XENMEM_access_r, + XENMEM_access_w, + XENMEM_access_rw, + XENMEM_access_x, + XENMEM_access_rx, + XENMEM_access_wx, + XENMEM_access_rwx, + /* + * Page starts off as r-x, but automatically + * change to r-w on a write + */ + XENMEM_access_rx2rw, + /* + * Log access: starts off as n, automatically + * goes to rwx, generating an event without + * pausing the vcpu + */ + XENMEM_access_n2rwx, + /* Take the domain default */ + XENMEM_access_default +} xenmem_access_t; + +struct xen_mem_access_op { + /* XENMEM_access_op_* */ + uint8_t op; + /* xenmem_access_t */ + uint8_t access; + domid_t domid; + /* + * Number of pages for set op + * Ignored on setting default access and other ops + */ + uint32_t nr; + /* + * First pfn for set op + * pfn for get op + * ~0ull is used to set and get the default access for pages + */ + uint64_aligned_t pfn; +}; +typedef struct xen_mem_access_op xen_mem_access_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t); + #define XENMEM_sharing_op 22 #define XENMEM_sharing_op_nominate_gfn 0 #define XENMEM_sharing_op_nominate_gref 1 @@ -461,6 +523,58 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t); #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ +/* + * XENMEM_get_vnumainfo used by guest to get + * vNUMA topology from hypervisor. + */ +#define XENMEM_get_vnumainfo 26 + +/* vNUMA node memory ranges */ +struct vmemrange { + uint64_t start, end; + unsigned int flags; + unsigned int nid; +}; + +typedef struct vmemrange vmemrange_t; +DEFINE_XEN_GUEST_HANDLE(vmemrange_t); + +/* + * vNUMA topology specifies vNUMA node number, distance table, + * memory ranges and vcpu mapping provided for guests. + * XENMEM_get_vnumainfo hypercall expects to see from guest + * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory. + * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus + * copied back to guest. Domain returns expected values of nr_vnodes, + * nr_vmemranges and nr_vcpus to guest if the values where incorrect. + */ +struct vnuma_topology_info { + /* IN */ + domid_t domid; + uint16_t pad; + /* IN/OUT */ + unsigned int nr_vnodes; + unsigned int nr_vcpus; + unsigned int nr_vmemranges; + /* OUT */ + union { + XEN_GUEST_HANDLE(uint) h; + uint64_t pad; + } vdistance; + union { + XEN_GUEST_HANDLE(uint) h; + uint64_t pad; + } vcpu_to_vnode; + union { + XEN_GUEST_HANDLE(vmemrange_t) h; + uint64_t pad; + } vmemrange; +}; +typedef struct vnuma_topology_info vnuma_topology_info_t; +DEFINE_XEN_GUEST_HANDLE(vnuma_topology_info_t); + +/* Next available subop number is 27 */ + #endif /* __XEN_PUBLIC_MEMORY_H__ */ /* diff --git a/include/xen/sched.h b/include/xen/sched.h index 2d0148b..25b140d 100644 --- a/include/xen/sched.h +++ b/include/xen/sched.h @@ -76,9 +76,10 @@ * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown_t structure. * - * If the sched_shutdown_t reason is SHUTDOWN_suspend then this - * hypercall takes an additional extra argument which should be the - * MFN of the guest's start_info_t. + * If the sched_shutdown_t reason is SHUTDOWN_suspend then + * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN + * of the guest's start info page. RDX/EDX is the third hypercall + * argument. * * In addition, which reason is SHUTDOWN_suspend this hypercall * returns 1 if suspend was cancelled or the domain was merely diff --git a/include/xen/trace.h b/include/xen/trace.h index 48cc5f5..8de630d 100644 --- a/include/xen/trace.h +++ b/include/xen/trace.h @@ -50,8 +50,9 @@ #define TRC_SUBCLS_SHIFT 12 /* trace subclasses for SVM */ -#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ -#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ +#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */ +#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */ +#define TRC_HVM_EMUL 0x00084000 /* emulated devices */ #define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */ #define TRC_SCHED_CLASS 0x00022000 /* Scheduler-specific */ @@ -76,6 +77,7 @@ #define TRC_SCHED_CSCHED2 1 #define TRC_SCHED_SEDF 2 #define TRC_SCHED_ARINC653 3 +#define TRC_SCHED_RTDS 4 /* Per-scheduler tracing */ #define TRC_SCHED_CLASS_EVT(_c, _e) \ @@ -229,6 +231,25 @@ #define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216) #define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217) +/* Trace events for emulated devices */ +#define TRC_HVM_EMUL_HPET_START_TIMER (TRC_HVM_EMUL + 0x1) +#define TRC_HVM_EMUL_PIT_START_TIMER (TRC_HVM_EMUL + 0x2) +#define TRC_HVM_EMUL_RTC_START_TIMER (TRC_HVM_EMUL + 0x3) +#define TRC_HVM_EMUL_LAPIC_START_TIMER (TRC_HVM_EMUL + 0x4) +#define TRC_HVM_EMUL_HPET_STOP_TIMER (TRC_HVM_EMUL + 0x5) +#define TRC_HVM_EMUL_PIT_STOP_TIMER (TRC_HVM_EMUL + 0x6) +#define TRC_HVM_EMUL_RTC_STOP_TIMER (TRC_HVM_EMUL + 0x7) +#define TRC_HVM_EMUL_LAPIC_STOP_TIMER (TRC_HVM_EMUL + 0x8) +#define TRC_HVM_EMUL_PIT_TIMER_CB (TRC_HVM_EMUL + 0x9) +#define TRC_HVM_EMUL_LAPIC_TIMER_CB (TRC_HVM_EMUL + 0xA) +#define TRC_HVM_EMUL_PIC_INT_OUTPUT (TRC_HVM_EMUL + 0xB) +#define TRC_HVM_EMUL_PIC_KICK (TRC_HVM_EMUL + 0xC) +#define TRC_HVM_EMUL_PIC_INTACK (TRC_HVM_EMUL + 0xD) +#define TRC_HVM_EMUL_PIC_POSEDGE (TRC_HVM_EMUL + 0xE) +#define TRC_HVM_EMUL_PIC_NEGEDGE (TRC_HVM_EMUL + 0xF) +#define TRC_HVM_EMUL_PIC_PEND_IRQ_CALL (TRC_HVM_EMUL + 0x10) +#define TRC_HVM_EMUL_LAPIC_PIC_INTR (TRC_HVM_EMUL + 0x11) + /* trace events for per class */ #define TRC_PM_FREQ_CHANGE (TRC_HW_PM + 0x01) #define TRC_PM_IDLE_ENTRY (TRC_HW_PM + 0x02) diff --git a/include/xen/xen-compat.h b/include/xen/xen-compat.h index 69141c4..3eb80a0 100644 --- a/include/xen/xen-compat.h +++ b/include/xen/xen-compat.h @@ -27,7 +27,7 @@ #ifndef __XEN_PUBLIC_XEN_COMPAT_H__ #define __XEN_PUBLIC_XEN_COMPAT_H__ -#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040300 +#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040400 #if defined(__XEN__) || defined(__XEN_TOOLS__) /* Xen is built with matching headers and implements the latest interface. */ diff --git a/include/xen/xen.h b/include/xen/xen.h index c14f9ae..1ca65b0 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -541,22 +541,26 @@ DEFINE_XEN_GUEST_HANDLE(mmu_update_t); /* * ` enum neg_errnoval * ` HYPERVISOR_multicall(multicall_entry_t call_list[], - * ` unsigned int nr_calls); + * ` uint32_t nr_calls); * - * NB. The fields are natural register size for this architecture. + * NB. The fields are logically the natural register size for this + * architecture. In cases where xen_ulong_t is larger than this then + * any unused bits in the upper portion must be zero. */ struct multicall_entry { - ULONG_PTR op, result; - ULONG_PTR args[6]; + xen_ulong_t op, result; + xen_ulong_t args[6]; }; typedef struct multicall_entry multicall_entry_t; DEFINE_XEN_GUEST_HANDLE(multicall_entry_t); +#if __XEN_INTERFACE_VERSION__ < 0x00040400 /* - * Event channel endpoints per domain: + * Event channel endpoints per domain (when using the 2-level ABI): * 1024 if a LONG_PTR is 32 bits; 4096 if a LONG_PTR is 64 bits. */ -#define NR_EVENT_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64) +#define NR_EVENT_CHANNELS EVTCHN_2L_NR_CHANNELS +#endif struct vcpu_time_info { /* diff --git a/src/xen/event_channel.c b/src/xen/event_channel.c index 23e4659..d2c0256 100644 --- a/src/xen/event_channel.c +++ b/src/xen/event_channel.c @@ -172,6 +172,47 @@ fail1: return status; } +__checkReturn +XEN_API +NTSTATUS +EventChannelQueryInterDomain( + IN evtchn_port_t LocalPort, + OUT domid_t *RemoteDomain, + OUT evtchn_port_t *RemotePort + ) +{ + struct evtchn_status op; + LONG_PTR rc; + NTSTATUS status; + + op.dom = DOMID_SELF; + op.port = LocalPort; + + rc = EventChannelOp(EVTCHNOP_status, &op); + + if (rc < 0) { + ERRNO_TO_STATUS(-rc, status); + goto fail1; + } + + status = STATUS_INVALID_PARAMETER; + if (op.status != EVTCHNSTAT_interdomain) + goto fail2; + + *RemoteDomain = op.u.interdomain.dom; + *RemotePort = op.u.interdomain.port; + + return STATUS_SUCCESS; + +fail2: + Error("fail2\n"); + +fail1: + Error("fail1 (%08x)\n", status); + + return status; +} + __checkReturn XEN_API NTSTATUS @@ -199,3 +240,90 @@ fail1: return status; } + +__checkReturn +XEN_API +NTSTATUS +EventChannelExpandArray( + IN PFN_NUMBER Pfn + ) +{ + struct evtchn_expand_array op; + LONG_PTR rc; + NTSTATUS status; + + op.array_gfn = Pfn; + + rc = EventChannelOp(EVTCHNOP_expand_array, &op); + + if (rc < 0) { + ERRNO_TO_STATUS(-rc, status); + goto fail1; + } + + return STATUS_SUCCESS; + +fail1: + Error("fail1 (%08x)\n", status); + + return status; +} + +__checkReturn +XEN_API +NTSTATUS +EventChannelInitControl( + IN PFN_NUMBER Pfn, + IN unsigned int vcpu_id + ) +{ + struct evtchn_init_control op; + LONG_PTR rc; + NTSTATUS status; + + op.control_gfn = Pfn; + op.offset = 0; + op.vcpu = vcpu_id; + + rc = EventChannelOp(EVTCHNOP_init_control, &op); + + if (rc < 0) { + ERRNO_TO_STATUS(-rc, status); + goto fail1; + } + + return STATUS_SUCCESS; + +fail1: + Error("fail1 (%08x)\n", status); + + return status; +} + +__checkReturn +XEN_API +NTSTATUS +EventChannelReset( + VOID + ) +{ + struct evtchn_reset op; + LONG_PTR rc; + NTSTATUS status; + + op.dom = DOMID_SELF; + + rc = EventChannelOp(EVTCHNOP_reset, &op); + + if (rc < 0) { + ERRNO_TO_STATUS(-rc, status); + goto fail1; + } + + return STATUS_SUCCESS; + +fail1: + Error("fail1 (%08x)\n", status); + + return status; +} diff --git a/src/xen/hvm.c b/src/xen/hvm.c index 8135e97..cccdd2f 100644 --- a/src/xen/hvm.c +++ b/src/xen/hvm.c @@ -112,6 +112,36 @@ fail1: return status; } +static evtchn_port_t HvmStoreEvtchn; + +XEN_API +ULONG +HvmGetStoreEvtchn( + VOID + ) +{ + if (HvmStoreEvtchn == 0) { + ULONGLONG Value; + NTSTATUS status; + + status = HvmGetParam(HVM_PARAM_STORE_EVTCHN, &Value); + ASSERT(NT_SUCCESS(status)); + + HvmStoreEvtchn = (evtchn_port_t)Value; + } + + return HvmStoreEvtchn; +} + +XEN_API +VOID +HvmSetStoreEvtchn( + IN ULONG Port + ) +{ + HvmStoreEvtchn = Port; +} + __checkReturn XEN_API NTSTATUS diff --git a/src/xenbus/evtchn.c b/src/xenbus/evtchn.c index c3d77eb..7fb9837 100644 --- a/src/xenbus/evtchn.c +++ b/src/xenbus/evtchn.c @@ -36,6 +36,7 @@ #include "evtchn.h" #include "evtchn_2l.h" +#include "evtchn_fifo.h" #include "fdo.h" #include "hash_table.h" #include "dbg_print.h" @@ -95,6 +96,7 @@ struct _XENBUS_EVTCHN_CONTEXT { PXENBUS_DEBUG_CALLBACK DebugCallback; XENBUS_SHARED_INFO_INTERFACE SharedInfoInterface; PXENBUS_EVTCHN_ABI_CONTEXT EvtchnTwoLevelContext; + PXENBUS_EVTCHN_ABI_CONTEXT EvtchnFifoContext; XENBUS_EVTCHN_ABI EvtchnAbi; PXENBUS_HASH_TABLE Table; LIST_ENTRY List; @@ -215,7 +217,9 @@ EvtchnOpenInterDomain( RemotePort = va_arg(Arguments, ULONG); Mask = va_arg(Arguments, BOOLEAN); - status = EventChannelBindInterDomain(RemoteDomain, RemotePort, &LocalPort); + status = EventChannelBindInterDomain(RemoteDomain, + RemotePort, + &LocalPort); if (!NT_SUCCESS(status)) goto fail1; @@ -694,10 +698,20 @@ EvtchnAbiAcquire( IN PXENBUS_EVTCHN_CONTEXT Context ) { - EvtchnTwoLevelGetAbi(Context->EvtchnTwoLevelContext, - &Context->EvtchnAbi); + NTSTATUS status; + + EvtchnFifoGetAbi(Context->EvtchnFifoContext, + &Context->EvtchnAbi); + + status = XENBUS_EVTCHN_ABI(Acquire, &Context->EvtchnAbi); + if (!NT_SUCCESS(status)) { + EvtchnTwoLevelGetAbi(Context->EvtchnTwoLevelContext, + &Context->EvtchnAbi); - return XENBUS_EVTCHN_ABI(Acquire, &Context->EvtchnAbi); + status = XENBUS_EVTCHN_ABI(Acquire, &Context->EvtchnAbi); + } + + return status; } static VOID @@ -1045,6 +1059,10 @@ EvtchnInitialize( if (!NT_SUCCESS(status)) goto fail3; + status = EvtchnFifoInitialize(Fdo, &(*Context)->EvtchnFifoContext); + if (!NT_SUCCESS(status)) + goto fail4; + status = SuspendGetInterface(FdoGetSuspendContext(Fdo), XENBUS_SUSPEND_INTERFACE_VERSION_MAX, (PINTERFACE)&(*Context)->SuspendInterface, @@ -1075,6 +1093,12 @@ EvtchnInitialize( return STATUS_SUCCESS; +fail4: + Error("fail4\n"); + + EvtchnTwoLevelTeardown((*Context)->EvtchnTwoLevelContext); + (*Context)->EvtchnTwoLevelContext = NULL; + fail3: Error("fail3\n"); @@ -1169,6 +1193,9 @@ EvtchnTeardown( RtlZeroMemory(&Context->SuspendInterface, sizeof (XENBUS_SUSPEND_INTERFACE)); + EvtchnFifoTeardown(Context->EvtchnFifoContext); + Context->EvtchnFifoContext = NULL; + EvtchnTwoLevelTeardown(Context->EvtchnTwoLevelContext); Context->EvtchnTwoLevelContext = NULL; diff --git a/src/xenbus/evtchn_2l.c b/src/xenbus/evtchn_2l.c index 13dfd65..81bb652 100644 --- a/src/xenbus/evtchn_2l.c +++ b/src/xenbus/evtchn_2l.c @@ -41,6 +41,7 @@ #include "assert.h" typedef struct _XENBUS_EVTCHN_TWO_LEVEL_CONTEXT { + ULONG Magic; PXENBUS_FDO Fdo; KSPIN_LOCK Lock; LONG References; @@ -102,6 +103,8 @@ EvtchnTwoLevelPortDisable( { PXENBUS_EVTCHN_TWO_LEVEL_CONTEXT Context = (PVOID)_Context; + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_TWO_LEVEL_TAG); + XENBUS_SHARED_INFO(EvtchnMask, &Context->SharedInfoInterface, Port); @@ -115,6 +118,8 @@ EvtchnTwoLevelPortAck( { PXENBUS_EVTCHN_TWO_LEVEL_CONTEXT Context = (PVOID)_Context; + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_TWO_LEVEL_TAG); + XENBUS_SHARED_INFO(EvtchnAck, &Context->SharedInfoInterface, Port); @@ -128,6 +133,8 @@ EvtchnTwoLevelPortMask( { PXENBUS_EVTCHN_TWO_LEVEL_CONTEXT Context = (PVOID)_Context; + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_TWO_LEVEL_TAG); + XENBUS_SHARED_INFO(EvtchnMask, &Context->SharedInfoInterface, Port); @@ -141,6 +148,8 @@ EvtchnTwoLevelPortUnmask( { PXENBUS_EVTCHN_TWO_LEVEL_CONTEXT Context = (PVOID)_Context; + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_TWO_LEVEL_TAG); + return XENBUS_SHARED_INFO(EvtchnUnmask, &Context->SharedInfoInterface, Port); @@ -155,6 +164,8 @@ EvtchnTwoLevelAcquire( KIRQL Irql; NTSTATUS status; + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_TWO_LEVEL_TAG); + KeAcquireSpinLock(&Context->Lock, &Irql); if (Context->References++ != 0) @@ -193,6 +204,8 @@ EvtchnTwoLevelRelease( PXENBUS_EVTCHN_TWO_LEVEL_CONTEXT Context = (PVOID)_Context; KIRQL Irql; + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_TWO_LEVEL_TAG); + KeAcquireSpinLock(&Context->Lock, &Irql); if (--Context->References > 0) @@ -246,6 +259,7 @@ EvtchnTwoLevelInitialize( KeInitializeSpinLock(&Context->Lock); + Context->Magic = XENBUS_EVTCHN_TWO_LEVEL_TAG; Context->Fdo = Fdo; *_Context = (PVOID)Context; @@ -280,6 +294,7 @@ EvtchnTwoLevelTeardown( Trace("====>\n"); Context->Fdo = NULL; + Context->Magic = 0; RtlZeroMemory(&Context->Lock, sizeof (KSPIN_LOCK)); diff --git a/src/xenbus/evtchn_2l.h b/src/xenbus/evtchn_2l.h index c69c555..375051d 100644 --- a/src/xenbus/evtchn_2l.h +++ b/src/xenbus/evtchn_2l.h @@ -55,5 +55,5 @@ EvtchnTwoLevelTeardown( IN PXENBUS_EVTCHN_ABI_CONTEXT Context ); -#endif // _XENBUS_EVTCHN_H +#endif // _XENBUS_EVTCHN_2L_H diff --git a/src/xenbus/evtchn_fifo.c b/src/xenbus/evtchn_fifo.c new file mode 100644 index 0000000..bbb6dd1 --- /dev/null +++ b/src/xenbus/evtchn_fifo.c @@ -0,0 +1,714 @@ +/* Copyright (c) Citrix Systems Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, + * with or without modification, are permitted provided + * that the following conditions are met: + * + * * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other + * materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "evtchn_fifo.h" +#include "shared_info.h" +#include "fdo.h" +#include "dbg_print.h" +#include "assert.h" + +#define MAX_HVM_VCPUS 128 + +typedef struct _XENBUS_EVTCHN_FIFO_CONTEXT { + ULONG Magic; + PXENBUS_FDO Fdo; + KSPIN_LOCK Lock; + LONG References; + PMDL ControlBlockMdl[MAX_HVM_VCPUS]; + PMDL *EventPageMdl; + ULONG EventPageCount; + ULONG Head[EVTCHN_FIFO_MAX_QUEUES]; +} XENBUS_EVTCHN_FIFO_CONTEXT, *PXENBUS_EVTCHN_FIFO_CONTEXT; + +#define EVENT_WORDS_PER_PAGE (PAGE_SIZE / sizeof (event_word_t)) + +#define XENBUS_EVTCHN_FIFO_TAG 'OFIF' + +static FORCEINLINE PVOID +__EvtchnFifoAllocate( + IN ULONG Length + ) +{ + return __AllocatePoolWithTag(NonPagedPool, Length, XENBUS_EVTCHN_FIFO_TAG); +} + +static FORCEINLINE VOID +__EvtchnFifoFree( + IN PVOID Buffer + ) +{ + ExFreePoolWithTag(Buffer, XENBUS_EVTCHN_FIFO_TAG); +} + +static event_word_t * +EvtchnFifoEventWord( + IN PXENBUS_EVTCHN_FIFO_CONTEXT Context, + IN ULONG Port + ) +{ + ULONG Index; + PMDL Mdl; + event_word_t *EventWord; + + Index = Port / EVENT_WORDS_PER_PAGE; + ASSERT3U(Index, <, Context->EventPageCount); + + Mdl = Context->EventPageMdl[Index]; + + EventWord = MmGetSystemAddressForMdlSafe(Mdl, NormalPagePriority); + ASSERT(EventWord != NULL); + + ASSERT3U(Port, >=, Index * EVENT_WORDS_PER_PAGE); + Port -= Index * EVENT_WORDS_PER_PAGE; + + return &EventWord[Port]; +} + +static FORCEINLINE BOOLEAN +__EvtchnFifoTestFlag( + IN event_word_t *EventWord, + IN ULONG Flag + ) +{ + KeMemoryBarrier(); + + return !!(*EventWord & (1 << Flag)); +} + +static FORCEINLINE BOOLEAN +__EvtchnFifoTestAndSetFlag( + IN event_word_t *EventWord, + IN ULONG Flag + ) +{ + KeMemoryBarrier(); + + return !!InterlockedBitTestAndSet((LONG *)EventWord, Flag); +} + +static FORCEINLINE BOOLEAN +__EvtchnFifoTestAndClearFlag( + IN event_word_t *EventWord, + IN ULONG Flag + ) +{ + KeMemoryBarrier(); + + return !!InterlockedBitTestAndReset((LONG *)EventWord, Flag); +} + +static FORCEINLINE VOID +__EvtchnFifoSetFlag( + IN event_word_t *EventWord, + IN ULONG Flag + ) +{ + *EventWord |= (1 << Flag); + KeMemoryBarrier(); +} + +static FORCEINLINE VOID +__EvtchnFifoClearFlag( + IN event_word_t *EventWord, + IN ULONG Flag + ) +{ + *EventWord &= ~(1 << Flag); + KeMemoryBarrier(); +} + +static FORCEINLINE ULONG +__EvtchnFifoUnlink( + IN event_word_t *EventWord + ) +{ + LONG Old; + LONG New; + + do { + Old = *EventWord; + + // Clear linked bit and link value + New = Old & ~((1 << EVTCHN_FIFO_LINKED) | EVTCHN_FIFO_LINK_MASK); + } while (InterlockedCompareExchange((LONG *)EventWord, New, Old) != Old); + + return Old & EVTCHN_FIFO_LINK_MASK; +} + +static NTSTATUS +EvtchnFifoExpand( + IN PXENBUS_EVTCHN_FIFO_CONTEXT Context, + IN ULONG Port + ) +{ + LONG Index; + ULONG EventPageCount; + PMDL *EventPageMdl; + PMDL Mdl; + ULONG Start; + ULONG End; + NTSTATUS status; + + Index = Port / EVENT_WORDS_PER_PAGE; + ASSERT3U(Index, >=, (LONG)Context->EventPageCount); + + EventPageCount = Index + 1; + EventPageMdl = __EvtchnFifoAllocate(sizeof (PMDL) * EventPageCount); + + status = STATUS_NO_MEMORY; + if (EventPageMdl == NULL) + goto fail1; + + for (Index = 0; Index < (LONG)Context->EventPageCount; Index++) + EventPageMdl[Index] = Context->EventPageMdl[Index]; + + Index = Context->EventPageCount; + while (Index < (LONG)EventPageCount) { + event_word_t *EventWord; + PFN_NUMBER Pfn; + PHYSICAL_ADDRESS Address; + + Mdl = __AllocatePage(); + + status = STATUS_NO_MEMORY; + if (Mdl == NULL) + goto fail2; + + EventWord = MmGetSystemAddressForMdlSafe(Mdl, NormalPagePriority); + ASSERT(EventWord != NULL); + + for (Port = 0; Port < EVENT_WORDS_PER_PAGE; Port++) + __EvtchnFifoSetFlag(&EventWord[Port], EVTCHN_FIFO_MASKED); + + Pfn = MmGetMdlPfnArray(Mdl)[0]; + + status = EventChannelExpandArray(Pfn); + if (!NT_SUCCESS(status)) + goto fail3; + + Address.QuadPart = (ULONGLONG)Pfn << PAGE_SHIFT; + + LogPrintf(LOG_LEVEL_INFO, + "EVTCHN_FIFO: EVENTARRAY[%u] @ %08x.%08x\n", + Index, + Address.HighPart, + Address.LowPart); + + EventPageMdl[Index++] = Mdl; + } + + Start = Context->EventPageCount * EVENT_WORDS_PER_PAGE; + End = (EventPageCount * EVENT_WORDS_PER_PAGE) - 1; + + Info("added ports [%08x - %08x]\n", Start, End); + + if (Context->EventPageMdl != NULL) + __EvtchnFifoFree(Context->EventPageMdl); + + Context->EventPageMdl = EventPageMdl; + Context->EventPageCount = EventPageCount; + + return STATUS_SUCCESS; + +fail3: + Error("fail3\n"); + + __FreePage(Mdl); + +fail2: + Error("fail2\n"); + + while (--Index >= (LONG)Context->EventPageCount) { + Mdl = EventPageMdl[Index]; + + __FreePage(Mdl); + } + + __EvtchnFifoFree(EventPageMdl); + +fail1: + Error("fail1 (%08x)\n", status); + + return status; +} + +static VOID +EvtchnFifoContract( + IN PXENBUS_EVTCHN_FIFO_CONTEXT Context + ) +{ + LONG Index; + + Index = Context->EventPageCount; + while (--Index >= 0) { + PMDL Mdl; + + Mdl = Context->EventPageMdl[Index]; + + __FreePage(Mdl); + } + + __EvtchnFifoFree(Context->EventPageMdl); + + Context->EventPageMdl = NULL; + Context->EventPageCount = 0; +} + +static BOOLEAN +EvtchnFifoPollPriority( + IN PXENBUS_EVTCHN_FIFO_CONTEXT Context, + IN evtchn_fifo_control_block_t *ControlBlock, + IN ULONG Priority, + IN PULONG Ready, + IN XENBUS_EVTCHN_ABI_EVENT Event, + IN PVOID Argument + ) +{ + ULONG Head; + ULONG Port; + event_word_t *EventWord; + BOOLEAN DoneSomething; + + Head = Context->Head[Priority]; + + if (Head == 0) { + KeMemoryBarrier(); + Head = ControlBlock->head[Priority]; + } + + Port = Head; + EventWord = EvtchnFifoEventWord(Context, Port); + + Head = __EvtchnFifoUnlink(EventWord); + + if (Head == 0) + *Ready &= ~(1ull << Priority); + + DoneSomething = FALSE; + + if (!__EvtchnFifoTestFlag(EventWord, EVTCHN_FIFO_MASKED) && + __EvtchnFifoTestFlag(EventWord, EVTCHN_FIFO_PENDING)) + DoneSomething = Event(Argument, Port); + + Context->Head[Priority] = Head; + + return DoneSomething; +} + +static BOOLEAN +EvtchnFifoPoll( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context, + IN unsigned int vcpu_id, + IN XENBUS_EVTCHN_ABI_EVENT Event, + IN PVOID Argument + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context = (PVOID)_Context; + PMDL Mdl; + evtchn_fifo_control_block_t *ControlBlock; + ULONG Ready; + ULONG Priority; + BOOLEAN DoneSomething; + + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_FIFO_TAG); + + Mdl = Context->ControlBlockMdl[vcpu_id]; + + ControlBlock = MmGetSystemAddressForMdlSafe(Mdl, NormalPagePriority); + ASSERT(ControlBlock != NULL); + + Ready = InterlockedExchange((LONG *)&ControlBlock->ready, 0); + DoneSomething = FALSE; + + while (_BitScanReverse(&Priority, Ready)) { + DoneSomething |= EvtchnFifoPollPriority(Context, + ControlBlock, + Priority, + &Ready, + Event, + Argument); + Ready |= InterlockedExchange((LONG *)&ControlBlock->ready, 0); + } + + return DoneSomething; +} + +static NTSTATUS +EvtchnFifoPortEnable( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context, + IN ULONG Port + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context = (PVOID)_Context; + KIRQL Irql; + NTSTATUS status; + + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_FIFO_TAG); + + KeAcquireSpinLock(&Context->Lock, &Irql); + + if (Port / EVENT_WORDS_PER_PAGE >= Context->EventPageCount) { + status = EvtchnFifoExpand(Context, Port); + + if (!NT_SUCCESS(status)) + goto fail1; + } + + KeReleaseSpinLock(&Context->Lock, Irql); + + return STATUS_SUCCESS; + +fail1: + Error("fail1 (%08x)\n", status); + + KeReleaseSpinLock(&Context->Lock, Irql); + + return status; +} + +static VOID +EvtchnFifoPortAck( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context, + IN ULONG Port + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context = (PVOID)_Context; + event_word_t *EventWord; + + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_FIFO_TAG); + + EventWord = EvtchnFifoEventWord(Context, Port); + __EvtchnFifoClearFlag(&EventWord[Port], EVTCHN_FIFO_PENDING); +} + +static VOID +EvtchnFifoPortMask( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context, + IN ULONG Port + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context = (PVOID)_Context; + event_word_t *EventWord; + + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_FIFO_TAG); + + EventWord = EvtchnFifoEventWord(Context, Port); + __EvtchnFifoSetFlag(&EventWord[Port], EVTCHN_FIFO_MASKED); +} + +static BOOLEAN +EvtchnFifoPortUnmask( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context, + IN ULONG Port + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context = (PVOID)_Context; + event_word_t *EventWord; + LONG Old; + LONG New; + + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_FIFO_TAG); + + EventWord = EvtchnFifoEventWord(Context, Port); + + // Clear masked bit, spinning if busy + do { + Old = *EventWord & ~(1 << EVTCHN_FIFO_BUSY); + New = Old & ~(1 << EVTCHN_FIFO_MASKED); + } while (InterlockedCompareExchange((LONG *)EventWord, New, Old) != Old); + + // Check whether the port was masked + if (~Old & (1 << EVTCHN_FIFO_MASKED)) + return FALSE; + + // If we cleared the mask then check whether something is pending + if (!__EvtchnFifoTestAndClearFlag(EventWord, EVTCHN_FIFO_PENDING)) + return FALSE; + + // Something is pending, so re-mask the port + __EvtchnFifoSetFlag(EventWord, EVTCHN_FIFO_MASKED); + + return TRUE; +} + +static VOID +EvtchnFifoPortDisable( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context, + IN ULONG Port + ) +{ + EvtchnFifoPortMask(_Context, Port); +} + +static VOID +EvtchnFifoReset( + IN PXENBUS_EVTCHN_FIFO_CONTEXT Context + ) +{ + ULONG LocalPort; + ULONG RemotePort; + USHORT RemoteDomain; + NTSTATUS status; + + UNREFERENCED_PARAMETER(Context); + + LocalPort = HvmGetStoreEvtchn(); + + // + // When we reset the event channel ABI we will lose our + // binding to the STORE event channel, which was set up + // by the toolstack during domain build. + // We need to get the binding back, so we must query the + // remote domain and port, and then re-bind after the + // reset. + // + + status = EventChannelQueryInterDomain(LocalPort, + &RemoteDomain, + &RemotePort); + ASSERT(NT_SUCCESS(status)); + + LogPrintf(LOG_LEVEL_INFO, "EVTCHN_FIFO: RESET\n"); + (VOID) EventChannelReset(); + + status = EventChannelBindInterDomain(RemoteDomain, + RemotePort, + &LocalPort); + ASSERT(NT_SUCCESS(status)); + + HvmSetStoreEvtchn(LocalPort); +} + +static NTSTATUS +EvtchnFifoAcquire( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context = (PVOID)_Context; + KIRQL Irql; + LONG Cpu; + PMDL Mdl; + NTSTATUS status; + + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_FIFO_TAG); + + KeAcquireSpinLock(&Context->Lock, &Irql); + + if (Context->References++ != 0) + goto done; + + Trace("====>\n"); + + Cpu = 0; + while (Cpu < KeNumberProcessors) { + unsigned int vcpu_id; + PFN_NUMBER Pfn; + PHYSICAL_ADDRESS Address; + + Mdl = __AllocatePage(); + + status = STATUS_NO_MEMORY; + if (Mdl == NULL) + goto fail1; + + vcpu_id = SystemVirtualCpuIndex(Cpu); + Pfn = MmGetMdlPfnArray(Mdl)[0]; + + status = EventChannelInitControl(Pfn, vcpu_id); + if (!NT_SUCCESS(status)) + goto fail2; + + Address.QuadPart = (ULONGLONG)Pfn << PAGE_SHIFT; + + LogPrintf(LOG_LEVEL_INFO, + "EVTCHN_FIFO: CONTROLBLOCK[%u] @ %08x.%08x\n", + vcpu_id, + Address.HighPart, + Address.LowPart); + + Context->ControlBlockMdl[vcpu_id] = Mdl; + Cpu++; + } + + Trace("<====\n"); + +done: + KeReleaseSpinLock(&Context->Lock, Irql); + + return STATUS_SUCCESS; + +fail2: + __FreePage(Mdl); + +fail1: + Error("fail1 (%08x)\n", status); + + (VOID) EventChannelReset(); + + while (--Cpu >= 0) { + unsigned int vcpu_id; + + vcpu_id = SystemVirtualCpuIndex(Cpu); + + Mdl = Context->ControlBlockMdl[vcpu_id]; + Context->ControlBlockMdl[vcpu_id] = NULL; + + __FreePage(Mdl); + } + + --Context->References; + ASSERT3U(Context->References, ==, 0); + KeReleaseSpinLock(&Context->Lock, Irql); + + return status; +} + +VOID +EvtchnFifoRelease( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context = (PVOID)_Context; + KIRQL Irql; + LONG Cpu; + + ASSERT3U(Context->Magic, ==, XENBUS_EVTCHN_FIFO_TAG); + + KeAcquireSpinLock(&Context->Lock, &Irql); + + if (--Context->References > 0) + goto done; + + Trace("====>\n"); + + EvtchnFifoReset(Context); + + EvtchnFifoContract(Context); + + Cpu = KeNumberProcessors; + while (--Cpu >= 0) { + unsigned int vcpu_id; + PMDL Mdl; + + vcpu_id = SystemVirtualCpuIndex(Cpu); + + Mdl = Context->ControlBlockMdl[vcpu_id]; + Context->ControlBlockMdl[vcpu_id] = NULL; + + __FreePage(Mdl); + } + + Trace("<====\n"); + +done: + KeReleaseSpinLock(&Context->Lock, Irql); +} + +static XENBUS_EVTCHN_ABI EvtchnAbiFifo = { + NULL, + EvtchnFifoAcquire, + EvtchnFifoRelease, + EvtchnFifoPoll, + EvtchnFifoPortEnable, + EvtchnFifoPortDisable, + EvtchnFifoPortAck, + EvtchnFifoPortMask, + EvtchnFifoPortUnmask +}; + +NTSTATUS +EvtchnFifoInitialize( + IN PXENBUS_FDO Fdo, + OUT PXENBUS_EVTCHN_ABI_CONTEXT *_Context + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context; + NTSTATUS status; + + Trace("====>\n"); + + Context = __EvtchnFifoAllocate(sizeof (XENBUS_EVTCHN_FIFO_CONTEXT)); + + status = STATUS_NO_MEMORY; + if (Context == NULL) + goto fail1; + + KeInitializeSpinLock(&Context->Lock); + + Context->Fdo = Fdo; + Context->Magic = XENBUS_EVTCHN_FIFO_TAG; + + *_Context = (PVOID)Context; + + Trace("<====\n"); + + return STATUS_SUCCESS; + +fail1: + Error("fail1 (%08x)\n", status); + + return status; +} + +VOID +EvtchnFifoGetAbi( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context, + OUT PXENBUS_EVTCHN_ABI Abi) +{ + *Abi = EvtchnAbiFifo; + + Abi->Context = (PVOID)_Context; +} + +VOID +EvtchnFifoTeardown( + IN PXENBUS_EVTCHN_ABI_CONTEXT _Context + ) +{ + PXENBUS_EVTCHN_FIFO_CONTEXT Context = (PVOID)_Context; + + Trace("====>\n"); + + Context->Fdo = NULL; + Context->Magic = 0; + + RtlZeroMemory(&Context->Lock, sizeof (KSPIN_LOCK)); + + ASSERT(IsZeroMemory(Context, sizeof (XENBUS_EVTCHN_FIFO_CONTEXT))); + __EvtchnFifoFree(Context); + + Trace("<====\n"); +} diff --git a/src/xenbus/evtchn_fifo.h b/src/xenbus/evtchn_fifo.h new file mode 100644 index 0000000..bf96b19 --- /dev/null +++ b/src/xenbus/evtchn_fifo.h @@ -0,0 +1,59 @@ +/* Copyright (c) Citrix Systems Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, + * with or without modification, are permitted provided + * that the following conditions are met: + * + * * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other + * materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _XENBUS_EVTCHN_FIFO_H +#define _XENBUS_EVTCHN_FIFO_H + +#include +#include + +#include "evtchn_abi.h" +#include "fdo.h" + +extern NTSTATUS +EvtchnFifoInitialize( + IN PXENBUS_FDO Fdo, + OUT PXENBUS_EVTCHN_ABI_CONTEXT *Context + ); + +extern VOID +EvtchnFifoGetAbi( + IN PXENBUS_EVTCHN_ABI_CONTEXT Context, + OUT PXENBUS_EVTCHN_ABI Abi + ); + +extern VOID +EvtchnFifoTeardown( + IN PXENBUS_EVTCHN_ABI_CONTEXT Context + ); + +#endif // _XENBUS_EVTCHN_FIFO_H + diff --git a/src/xenbus/fdo.c b/src/xenbus/fdo.c index ebc410e..23a8fea 100644 --- a/src/xenbus/fdo.c +++ b/src/xenbus/fdo.c @@ -2630,7 +2630,10 @@ FdoS4ToS3( ASSERT3U(KeGetCurrentIrql(), ==, PASSIVE_LEVEL); ASSERT3U(__FdoGetSystemPowerState(Fdo), ==, PowerSystemHibernate); - KeRaiseIrql(DISPATCH_LEVEL, &Irql); + KeRaiseIrql(HIGH_LEVEL, &Irql); + + // Clear the event channel so that the HVM_PARAM is re-read + HvmSetStoreEvtchn(0); if (Fdo->UnplugInterface.Interface.Context != NULL) XENFILT_UNPLUG(Replay, &Fdo->UnplugInterface); diff --git a/src/xenbus/store.c b/src/xenbus/store.c index 4d664d0..df2b4c2 100644 --- a/src/xenbus/store.c +++ b/src/xenbus/store.c @@ -1825,12 +1825,14 @@ StoreEnable( IN PXENBUS_STORE_CONTEXT Context ) { - ULONGLONG Port; + ULONG Port; BOOLEAN Pending; - NTSTATUS status; - status = HvmGetParam(HVM_PARAM_STORE_EVTCHN, &Port); - ASSERT(NT_SUCCESS(status)); + Port = HvmGetStoreEvtchn(); + + LogPrintf(LOG_LEVEL_INFO, + "STORE: EVTCHN %u\n", + Port); Context->Channel = XENBUS_EVTCHN(Open, &Context->EvtchnInterface, diff --git a/src/xenbus/suspend.c b/src/xenbus/suspend.c index 6ab2bd2..ba4fca5 100644 --- a/src/xenbus/suspend.c +++ b/src/xenbus/suspend.c @@ -173,6 +173,9 @@ SuspendTrigger( Context->Count++; + // Clear the event channel so that the HVM_PARAM is re-read + HvmSetStoreEvtchn(0); + if (Context->UnplugInterface.Interface.Context != NULL) XENFILT_UNPLUG(Replay, &Context->UnplugInterface); diff --git a/vs2012/xenbus/xenbus.vcxproj b/vs2012/xenbus/xenbus.vcxproj index 8916cd8..1bd4c01 100644 --- a/vs2012/xenbus/xenbus.vcxproj +++ b/vs2012/xenbus/xenbus.vcxproj @@ -90,6 +90,7 @@ + diff --git a/vs2013/xenbus/xenbus.vcxproj b/vs2013/xenbus/xenbus.vcxproj index 922cf6a..3b274a9 100644 --- a/vs2013/xenbus/xenbus.vcxproj +++ b/vs2013/xenbus/xenbus.vcxproj @@ -131,6 +131,7 @@ +