to disable the HPET in order to improve compatibility with guest
Operating Systems (X86 only)
+=item B<altp2m=MODE>
+
+Specifies access mode to the alternate-p2m capability. Alternate-p2m allows a
+guest to manage multiple p2m guest physical "memory views" (as opposed to a
+single p2m). This option is disabled by default and is available to x86 hvm
+domains. You may want this option if you want to access-control/isolate
+access to specific guest physical memory pages accessed by the guest, e.g. for
+domain memory introspection or for isolation/access-control of memory between
+components within a single guest domain.
+
+The valid values are as follows:
+
+=over 4
+
+=item B<"disabled">
+
+Altp2m is disabled for the domain (default).
+
+=item B<"mixed">
+
+The mixed mode allows access to the altp2m interface for both in-guest
+and external tools as well.
+
+=item B<"external">
+
+Enables access to the alternate-p2m capability for hvm guests only
+by external privileged tools.
+
+=item B<"limited">
+
+Enables limited access to the alternate-p2m capability for hvm guests only,
+ie. giving the guest access only to enable/disable the VMFUNC and #VE features.
+
+=back
+
=item B<altp2mhvm=BOOLEAN>
Enables or disables hvm guest access to alternate-p2m capability.
access to specific guest physical memory pages accessed by
the guest, e.g. for HVM domain memory introspection or
for isolation/access-control of memory between components within
-a single guest hvm domain.
+a single guest hvm domain. This option is deprecated, use the option
+"altp2m" instead.
+
+Note: While the option "altp2mhvm" is deprecated, legacy applications for
+x86 systems will continue to work using it.
=item B<nestedhvm=BOOLEAN>
if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM &&
(libxl_defbool_val(d_config->b_info.u.hvm.nested_hvm) &&
- libxl_defbool_val(d_config->b_info.u.hvm.altp2m))) {
+ (libxl_defbool_val(d_config->b_info.u.hvm.altp2m) ||
+ (d_config->b_info.altp2m != LIBXL_ALTP2M_MODE_DISABLED)))) {
ret = ERROR_INVAL;
LOGD(ERROR, domid, "nestedhvm and altp2mhvm cannot be used together");
goto error_out;
}
if (d_config->c_info.type == LIBXL_DOMAIN_TYPE_HVM &&
- libxl_defbool_val(d_config->b_info.u.hvm.altp2m) &&
+ (libxl_defbool_val(d_config->b_info.u.hvm.altp2m) ||
+ (d_config->b_info.altp2m != LIBXL_ALTP2M_MODE_DISABLED)) &&
pod_enabled) {
ret = ERROR_INVAL;
LOGD(ERROR, domid, "Cannot enable PoD and ALTP2M at the same time");
libxl_defbool_val(info->u.hvm.vpt_align));
xc_hvm_param_set(handle, domid, HVM_PARAM_NESTEDHVM,
libxl_defbool_val(info->u.hvm.nested_hvm));
- xc_hvm_param_set(handle, domid, HVM_PARAM_ALTP2M,
- libxl_defbool_val(info->u.hvm.altp2m));
}
int libxl__build_pre(libxl__gc *gc, uint32_t domid,
#endif
}
+ /* Alternate p2m support on x86 is available only for HVM guests. */
+ if (info->type == LIBXL_DOMAIN_TYPE_HVM) {
+ /* The config parameter "altp2m" replaces the parameter "altp2mhvm". For
+ * legacy reasons, both parameters are accepted on x86 HVM guests.
+ *
+ * If the legacy field info->u.hvm.altp2m is set, activate altp2m.
+ * Otherwise set altp2m based on the field info->altp2m. */
+ if (info->altp2m == LIBXL_ALTP2M_MODE_DISABLED &&
+ libxl_defbool_val(info->u.hvm.altp2m))
+ xc_hvm_param_set(ctx->xch, domid, HVM_PARAM_ALTP2M,
+ libxl_defbool_val(info->u.hvm.altp2m));
+ else
+ xc_hvm_param_set(ctx->xch, domid, HVM_PARAM_ALTP2M,
+ info->altp2m);
+ }
+
rc = libxl__arch_domain_create(gc, d_config, domid);
return rc;
("policy", libxl_rdm_reserve_policy),
])
+# Consistent with the values defined for HVM_PARAM_ALTP2M
+libxl_altp2m_mode = Enumeration("altp2m_mode", [
+ (0, "disabled"),
+ (1, "mixed"),
+ (2, "external"),
+ (3, "limited"),
+ ], init_val = "LIBXL_ALTP2M_MODE_DISABLED")
+
libxl_domain_build_info = Struct("domain_build_info",[
("max_vcpus", integer),
("avail_vcpus", libxl_bitmap),
("mmio_hole_memkb", MemKB),
("timer_mode", libxl_timer_mode),
("nested_hvm", libxl_defbool),
+ # The u.hvm.altp2m field is used solely
+ # for x86 HVM guests and is maintained
+ # for legacy purposes.
("altp2m", libxl_defbool),
("system_firmware", string),
("smbios_firmware", string),
("arch_arm", Struct(None, [("gic_version", libxl_gic_version),
])),
+ # Alternate p2m is not bound to any architecture or guest type, as it is
+ # supported by x86 HVM and ARM support is planned.
+ ("altp2m", libxl_altp2m_mode),
], dir=DIR_IN
)
xlu_cfg_get_defbool(config, "nestedhvm", &b_info->u.hvm.nested_hvm, 0);
- xlu_cfg_get_defbool(config, "altp2mhvm", &b_info->u.hvm.altp2m, 0);
+ if (!xlu_cfg_get_defbool(config, "altp2mhvm", &b_info->u.hvm.altp2m, 0))
+ fprintf(stderr, "WARNING: Specifying \"altp2mhvm\" is deprecated. "
+ "Please use \"altp2m\" instead.\n");
xlu_cfg_replace_string(config, "smbios_firmware",
&b_info->u.hvm.smbios_firmware, 0);
abort();
}
+ if (!xlu_cfg_get_long(config, "altp2m", &l, 1)) {
+ if (l < LIBXL_ALTP2M_MODE_DISABLED ||
+ l > LIBXL_ALTP2M_MODE_LIMITED) {
+ fprintf(stderr, "ERROR: invalid value %ld for \"altp2m\"\n", l);
+ exit (1);
+ }
+
+ b_info->altp2m = l;
+ } else if (!xlu_cfg_get_string(config, "altp2m", &buf, 0)) {
+ if (libxl_altp2m_mode_from_string(buf, &b_info->altp2m)) {
+ fprintf(stderr, "ERROR: invalid value \"%s\" for \"altp2m\"\n",
+ buf);
+ exit (1);
+ }
+ }
+
if (!xlu_cfg_get_list(config, "ioports", &ioports, &num_ioports, 0)) {
b_info->num_ioports = num_ioports;
b_info->ioports = calloc(num_ioports, sizeof(*b_info->ioports));
rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d);
if ( rc )
break;
- if ( a.value > 1 )
+ if ( a.value > XEN_ALTP2M_limited )
rc = -EINVAL;
if ( a.value &&
d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
struct xen_hvm_altp2m_op a;
struct domain *d = NULL;
int rc = 0;
+ uint64_t mode;
if ( !hvm_altp2m_supported() )
return -EOPNOTSUPP;
goto out;
}
- if ( (rc = xsm_hvm_altp2mhvm_op(XSM_TARGET, d)) )
+ mode = d->arch.hvm_domain.params[HVM_PARAM_ALTP2M];
+
+ if ( XEN_ALTP2M_disabled == mode )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ( (rc = xsm_hvm_altp2mhvm_op(XSM_OTHER, d, mode, a.cmd)) )
goto out;
switch ( a.cmd )
{
case HVMOP_altp2m_get_domain_state:
- if ( !d->arch.hvm_domain.params[HVM_PARAM_ALTP2M] )
- {
- rc = -EINVAL;
- break;
- }
-
a.u.domain_state.state = altp2m_active(d);
rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
break;
struct vcpu *v;
bool_t ostate;
- if ( !d->arch.hvm_domain.params[HVM_PARAM_ALTP2M] ||
- nestedhvm_enabled(d) )
+ if ( nestedhvm_enabled(d) )
{
rc = -EINVAL;
break;
/* Location of the VM Generation ID in guest physical address space. */
#define HVM_PARAM_VM_GENERATION_ID_ADDR 34
-/* Boolean: Enable altp2m */
+/*
+ * Set mode for altp2m:
+ * disabled: don't activate altp2m (default)
+ * mixed: allow access to all altp2m ops for both in-guest and external tools
+ * external: allow access to external privileged tools only
+ * limited: guest only has limited access (ie. control VMFUNC and #VE)
+ */
#define HVM_PARAM_ALTP2M 35
+#define XEN_ALTP2M_disabled 0
+#define XEN_ALTP2M_mixed 1
+#define XEN_ALTP2M_external 2
+#define XEN_ALTP2M_limited 3
/*
* Size of the x87 FPU FIP/FDP registers that the hypervisor needs to
return xsm_default_action(action, current->domain, d);
}
-static XSM_INLINE int xsm_hvm_altp2mhvm_op(XSM_DEFAULT_ARG struct domain *d)
+static XSM_INLINE int xsm_hvm_altp2mhvm_op(XSM_DEFAULT_ARG struct domain *d, uint64_t mode, uint32_t op)
{
- XSM_ASSERT_ACTION(XSM_TARGET);
- return xsm_default_action(action, current->domain, d);
+ xsm_default_t a;
+ XSM_ASSERT_ACTION(XSM_OTHER);
+
+ switch ( mode )
+ {
+ case XEN_ALTP2M_mixed:
+ a = XSM_TARGET;
+ break;
+ case XEN_ALTP2M_external:
+ a = XSM_DM_PRIV;
+ break;
+ case XEN_ALTP2M_limited:
+ a = (HVMOP_altp2m_vcpu_enable_notify == op) ? XSM_TARGET : XSM_DM_PRIV;
+ break;
+ default:
+ return -EPERM;
+ };
+
+ return xsm_default_action(a, current->domain, d);
}
static XSM_INLINE int xsm_vm_event_control(XSM_DEFAULT_ARG struct domain *d, int mode, int op)
int (*hvm_control) (struct domain *d, unsigned long op);
int (*hvm_param_nested) (struct domain *d);
int (*hvm_param_altp2mhvm) (struct domain *d);
- int (*hvm_altp2mhvm_op) (struct domain *d);
+ int (*hvm_altp2mhvm_op) (struct domain *d, uint64_t mode, uint32_t op);
int (*get_vnumainfo) (struct domain *d);
int (*vm_event_control) (struct domain *d, int mode, int op);
return xsm_ops->hvm_param_altp2mhvm(d);
}
-static inline int xsm_hvm_altp2mhvm_op (xsm_default_t def, struct domain *d)
+static inline int xsm_hvm_altp2mhvm_op (xsm_default_t def, struct domain *d, uint64_t mode, uint32_t op)
{
- return xsm_ops->hvm_altp2mhvm_op(d);
+ return xsm_ops->hvm_altp2mhvm_op(d, mode, op);
}
static inline int xsm_get_vnumainfo (xsm_default_t def, struct domain *d)
return current_has_perm(d, SECCLASS_HVM, HVM__ALTP2MHVM);
}
-static int flask_hvm_altp2mhvm_op(struct domain *d)
+static int flask_hvm_altp2mhvm_op(struct domain *d, uint64_t mode, uint32_t op)
{
+ /*
+ * Require both mode and XSM to allow the operation. Assume XSM rules
+ * are written with the XSM_TARGET policy in mind, so add restrictions
+ * on the domain acting on itself when forbidden by the mode.
+ */
+ switch ( mode )
+ {
+ case XEN_ALTP2M_mixed:
+ break;
+ case XEN_ALTP2M_limited:
+ if ( HVMOP_altp2m_vcpu_enable_notify == op )
+ break;
+ /* fall-through */
+ case XEN_ALTP2M_external:
+ if ( d == current->domain )
+ return -EPERM;
+ break;
+ };
+
return current_has_perm(d, SECCLASS_HVM, HVM__ALTP2MHVM_OP);
}