if ( a.nr > GB(1) >> PAGE_SHIFT )
goto param_fail2;
- rc = xsm_hvm_param(XSM_TARGET, d, op);
+ rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
if ( rc )
goto param_fail2;
if ( !is_hvm_domain(d) )
goto param_fail3;
- rc = xsm_hvm_param(XSM_TARGET, d, op);
+ rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
if ( rc )
goto param_fail3;
if ( !is_hvm_domain(d) )
goto param_fail4;
- rc = xsm_hvm_param(XSM_TARGET, d, op);
+ rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
if ( rc )
goto param_fail4;
if ( !is_hvm_domain(d) )
goto param_fail8;
- rc = xsm_hvm_param(XSM_TARGET, d, op);
+ rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
if ( rc )
goto param_fail8;
if ( !is_hvm_domain(d) )
goto out;
- rc = xsm_mem_event_op(XSM_TARGET, d, XENMEM_access_op);
+ rc = xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
if ( rc )
goto out;
if ( ret )
return ret;
- ret = xsm_mem_event_op(XSM_TARGET, d, op);
+ ret = xsm_mem_event_op(XSM_DM_PRIV, d, op);
if ( ret )
goto out;
if ( rc )
return rc;
- rc = xsm_mem_sharing_op(XSM_TARGET, d, cd, mec->op);
+ rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mec->op);
if ( rc )
{
rcu_unlock_domain(cd);
if ( rc )
return rc;
- rc = xsm_mem_sharing_op(XSM_TARGET, d, cd, mec->op);
+ rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mec->op);
if ( rc )
{
rcu_unlock_domain(cd);
return xsm_default_action(action, current->domain, d);
}
+static XSM_INLINE int xsm_hvm_control(XSM_DEFAULT_ARG struct domain *d, unsigned long op)
+{
+ XSM_ASSERT_ACTION(XSM_DM_PRIV);
+ return xsm_default_action(action, current->domain, d);
+}
+
static XSM_INLINE int xsm_hvm_param_nested(XSM_DEFAULT_ARG struct domain *d)
{
XSM_ASSERT_ACTION(XSM_PRIV);
static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, int op)
{
- XSM_ASSERT_ACTION(XSM_TARGET);
+ XSM_ASSERT_ACTION(XSM_DM_PRIV);
return xsm_default_action(action, current->domain, d);
}
static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, struct domain *cd, int op)
{
- XSM_ASSERT_ACTION(XSM_TARGET);
+ XSM_ASSERT_ACTION(XSM_DM_PRIV);
return xsm_default_action(action, current->domain, cd);
}
#endif
int (*hvm_param) (struct domain *d, unsigned long op);
+ int (*hvm_control) (struct domain *d, unsigned long op);
int (*hvm_param_nested) (struct domain *d);
#ifdef CONFIG_X86
return xsm_ops->hvm_param(d, op);
}
+static inline int xsm_hvm_control(xsm_default_t def, struct domain *d, unsigned long op)
+{
+ return xsm_ops->hvm_control(d, op);
+}
+
static inline int xsm_hvm_param_nested (xsm_default_t def, struct domain *d)
{
return xsm_ops->hvm_param_nested(d);
set_to_dummy_if_null(ops, tmem_op);
set_to_dummy_if_null(ops, tmem_control);
set_to_dummy_if_null(ops, hvm_param);
+ set_to_dummy_if_null(ops, hvm_control);
set_to_dummy_if_null(ops, hvm_param_nested);
set_to_dummy_if_null(ops, do_xsm_op);
.tmem_op = flask_tmem_op,
.tmem_control = flask_tmem_control,
.hvm_param = flask_hvm_param,
+ .hvm_control = flask_hvm_param,
.hvm_param_nested = flask_hvm_param_nested,
.do_xsm_op = do_flask_op,