By default the sve bits are not set.
This patch adds a new hypercall, xc_altp2m_set_supress_ve_multi(),
to set a range of sve bits.
The core function, p2m_set_suppress_ve_multi(), does not break in case
of a error and it is doing a best effort for setting the bits in the
given range. A check for continuation is made in order to have
preemption on large ranges.
The gfn of the first error is stored in
xen_hvm_altp2m_suppress_ve_multi.first_error_gfn and the error code is
stored in xen_hvm_altp2m_suppress_ve_multi.first_error.
If no error occurred the values will be 0.
Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Petre Pircalabu <ppircalabu@bitdefender.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
uint16_t view_id);
int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t gfn, bool sve);
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t first_gfn,
+ xen_pfn_t last_gfn, bool sve,
+ xen_pfn_t *error_gfn, int32_t *error_code);
int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t gfn, bool *sve);
int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
return rc;
}
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t first_gfn,
+ xen_pfn_t last_gfn, bool sve,
+ xen_pfn_t *error_gfn, int32_t *error_code)
+{
+ int rc;
+ DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+ arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+ if ( arg == NULL )
+ return -1;
+
+ arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+ arg->cmd = HVMOP_altp2m_set_suppress_ve_multi;
+ arg->domain = domid;
+ arg->u.suppress_ve_multi.view = view_id;
+ arg->u.suppress_ve_multi.first_gfn = first_gfn;
+ arg->u.suppress_ve_multi.last_gfn = last_gfn;
+ arg->u.suppress_ve_multi.suppress_ve = sve;
+
+ rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+ HYPERCALL_BUFFER_AS_ARG(arg));
+
+ if ( arg->u.suppress_ve_multi.first_error )
+ {
+ *error_gfn = arg->u.suppress_ve_multi.first_error_gfn;
+ *error_code = arg->u.suppress_ve_multi.first_error;
+ }
+
+ xc_hypercall_buffer_free(handle, arg);
+ return rc;
+}
+
int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t gfn,
xenmem_access_t access)
case HVMOP_altp2m_destroy_p2m:
case HVMOP_altp2m_switch_p2m:
case HVMOP_altp2m_set_suppress_ve:
+ case HVMOP_altp2m_set_suppress_ve_multi:
case HVMOP_altp2m_get_suppress_ve:
case HVMOP_altp2m_set_mem_access:
case HVMOP_altp2m_set_mem_access_multi:
}
break;
+ case HVMOP_altp2m_set_suppress_ve_multi:
+ {
+ uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
+
+ a.u.suppress_ve_multi.last_gfn = min(a.u.suppress_ve_multi.last_gfn,
+ max_phys_addr);
+
+ if ( a.u.suppress_ve_multi.pad1 ||
+ a.u.suppress_ve_multi.first_gfn > a.u.suppress_ve_multi.last_gfn )
+ rc = -EINVAL;
+ else
+ {
+ rc = p2m_set_suppress_ve_multi(d, &a.u.suppress_ve_multi);
+ if ( (!rc || rc == -ERESTART) && __copy_to_guest(arg, &a, 1) )
+ rc = -EFAULT;
+ }
+ break;
+ }
+
case HVMOP_altp2m_get_suppress_ve:
if ( a.u.suppress_ve.pad1 || a.u.suppress_ve.pad2 )
rc = -EINVAL;
*/
int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve,
unsigned int altp2m_idx)
+{
+ int rc;
+ struct xen_hvm_altp2m_suppress_ve_multi sve = {
+ altp2m_idx, suppress_ve, 0, 0, gfn_x(gfn), gfn_x(gfn), 0
+ };
+
+ if ( !(rc = p2m_set_suppress_ve_multi(d, &sve)) )
+ rc = sve.first_error;
+
+ return rc;
+}
+
+/*
+ * Set/clear the #VE suppress bit for multiple pages. Only available on VMX.
+ */
+int p2m_set_suppress_ve_multi(struct domain *d,
+ struct xen_hvm_altp2m_suppress_ve_multi *sve)
{
struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
struct p2m_domain *ap2m = NULL;
- struct p2m_domain *p2m;
- mfn_t mfn;
- p2m_access_t a;
- p2m_type_t t;
- int rc;
+ struct p2m_domain *p2m = host_p2m;
+ uint64_t start = sve->first_gfn;
+ int rc = 0;
- if ( altp2m_idx > 0 )
+ if ( sve->view > 0 )
{
- if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
- d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
+ if ( sve->view >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
+ d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
mfn_x(INVALID_MFN) )
return -EINVAL;
- p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
+ p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, sve->view);
}
- else
- p2m = host_p2m;
- gfn_lock(host_p2m, gfn, 0);
+ p2m_lock(host_p2m);
if ( ap2m )
p2m_lock(ap2m);
- rc = altp2m_get_effective_entry(p2m, gfn, &mfn, &t, &a, AP2MGET_query);
+ while ( sve->last_gfn >= start )
+ {
+ p2m_access_t a;
+ p2m_type_t t;
+ mfn_t mfn;
+ int err = 0;
- if ( rc )
- goto out;
+ if ( (err = altp2m_get_effective_entry(p2m, _gfn(start), &mfn, &t, &a,
+ AP2MGET_query)) &&
+ !sve->first_error )
+ {
+ sve->first_error_gfn = start; /* Save the gfn of the first error */
+ sve->first_error = err; /* Save the first error code */
+ }
- rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, suppress_ve);
+ if ( !err && (err = p2m->set_entry(p2m, _gfn(start), mfn,
+ PAGE_ORDER_4K, t, a,
+ sve->suppress_ve)) &&
+ !sve->first_error )
+ {
+ sve->first_error_gfn = start; /* Save the gfn of the first error */
+ sve->first_error = err; /* Save the first error code */
+ }
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( sve->last_gfn >= ++start && hypercall_preempt_check() )
+ {
+ rc = -ERESTART;
+ break;
+ }
+ }
+
+ sve->first_gfn = start;
-out:
if ( ap2m )
p2m_unlock(ap2m);
- gfn_unlock(host_p2m, gfn, 0);
+ p2m_unlock(host_p2m);
return rc;
}
uint64_t gfn;
};
+struct xen_hvm_altp2m_suppress_ve_multi {
+ uint16_t view;
+ uint8_t suppress_ve; /* Boolean type. */
+ uint8_t pad1;
+ int32_t first_error; /* Should be set to 0. */
+ uint64_t first_gfn; /* Value may be updated. */
+ uint64_t last_gfn;
+ uint64_t first_error_gfn; /* Gfn of the first error. */
+};
+
#if __XEN_INTERFACE_VERSION__ < 0x00040900
/* Set the logical level of one of a domain's PCI INTx wires. */
#define HVMOP_altp2m_vcpu_disable_notify 13
/* Get the active vcpu p2m index */
#define HVMOP_altp2m_get_p2m_idx 14
+/* Set the "Supress #VE" bit for a range of pages */
+#define HVMOP_altp2m_set_suppress_ve_multi 15
domid_t domain;
uint16_t pad1;
uint32_t pad2;
struct xen_hvm_altp2m_change_gfn change_gfn;
struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi;
struct xen_hvm_altp2m_suppress_ve suppress_ve;
+ struct xen_hvm_altp2m_suppress_ve_multi suppress_ve_multi;
struct xen_hvm_altp2m_vcpu_disable_notify disable_notify;
struct xen_hvm_altp2m_get_vcpu_p2m_idx get_vcpu_p2m_idx;
uint8_t pad[64];
int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve,
unsigned int altp2m_idx);
+int p2m_set_suppress_ve_multi(struct domain *d,
+ struct xen_hvm_altp2m_suppress_ve_multi *suppress_ve);
+
int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
unsigned int altp2m_idx);