In particular changes to guest privilege level require the multicall
sequence to be aborted, as hypercalls are permitted from kernel mode
only. While likely not very useful in a multicall, also properly handle
the return value in the HYPERVISOR_iret case (which should be the guest
specified value).
This is XSA-213.
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Julien Grall <julien.grall@arm.com>
master commit:
22c096c99d8c05833c3c19870e36efb2dd4e8013
master date: 2017-05-02 14:45:02 +0200
return true;
}
-void do_multicall_call(struct multicall_entry *multi)
+enum mc_disposition do_multicall_call(struct multicall_entry *multi)
{
arm_hypercall_fn_t call = NULL;
if ( multi->op >= ARRAY_SIZE(arm_hypercall_table) )
{
multi->result = -ENOSYS;
- return;
+ return mc_continue;
}
call = arm_hypercall_table[multi->op].fn;
if ( call == NULL )
{
multi->result = -ENOSYS;
- return;
+ return mc_continue;
}
if ( is_32bit_domain(current->domain) &&
!check_multicall_32bit_clean(multi) )
- return;
+ return mc_continue;
multi->result = call(multi->args[0], multi->args[1],
multi->args[2], multi->args[3],
multi->args[4]);
+
+ return likely(!psr_mode_is_user(guest_cpu_user_regs()))
+ ? mc_continue : mc_preempt;
}
/*
struct mc_state *mcs = ¤t->mc_state;
uint32_t i;
int rc = 0;
+ enum mc_disposition disp = mc_continue;
if ( unlikely(__test_and_set_bit(_MCSF_in_multicall, &mcs->flags)) )
{
if ( unlikely(!guest_handle_okay(call_list, nr_calls)) )
rc = -EFAULT;
- for ( i = 0; !rc && i < nr_calls; i++ )
+ for ( i = 0; !rc && disp == mc_continue && i < nr_calls; i++ )
{
if ( i && hypercall_preempt_check() )
goto preempted;
trace_multicall_call(&mcs->call);
- do_multicall_call(&mcs->call);
+ disp = do_multicall_call(&mcs->call);
#ifndef NDEBUG
{
}
#endif
- if ( unlikely(__copy_field_to_guest(call_list, &mcs->call, result)) )
+ if ( unlikely(disp == mc_exit) )
+ {
+ if ( __copy_field_to_guest(call_list, &mcs->call, result) )
+ /* nothing, best effort only */;
+ rc = mcs->call.result;
+ }
+ else if ( unlikely(__copy_field_to_guest(call_list, &mcs->call,
+ result)) )
rc = -EFAULT;
else if ( test_bit(_MCSF_call_preempted, &mcs->flags) )
{
guest_handle_add_offset(call_list, 1);
}
+ if ( unlikely(disp == mc_preempt) && i < nr_calls )
+ goto preempted;
+
perfc_incr(calls_to_multicall);
perfc_add(calls_from_multicall, i);
mcs->flags = 0;
#ifndef __ASM_ARM_MULTICALL_H__
#define __ASM_ARM_MULTICALL_H__
-extern void do_multicall_call(struct multicall_entry *call);
+extern enum mc_disposition {
+ mc_continue,
+ mc_exit,
+ mc_preempt,
+} do_multicall_call(struct multicall_entry *call);
#endif /* __ASM_ARM_MULTICALL_H__ */
/*
#include <xen/errno.h>
+enum mc_disposition {
+ mc_continue,
+ mc_exit,
+ mc_preempt,
+};
+
+#define multicall_ret(call) \
+ (unlikely((call)->op == __HYPERVISOR_iret) \
+ ? mc_exit \
+ : likely(guest_kernel_mode(current, \
+ guest_cpu_user_regs())) \
+ ? mc_continue : mc_preempt)
+
#define do_multicall_call(_call) \
- do { \
+ ({ \
__asm__ __volatile__ ( \
" movq %c1(%0),%%rax; " \
" leaq hypercall_table(%%rip),%%rdi; " \
/* all the caller-saves registers */ \
: "rax", "rcx", "rdx", "rsi", "rdi", \
"r8", "r9", "r10", "r11" ); \
- } while ( 0 )
+ multicall_ret(_call); \
+ })
#define compat_multicall_call(_call) \
+ ({ \
__asm__ __volatile__ ( \
" movl %c1(%0),%%eax; " \
" leaq compat_hypercall_table(%%rip),%%rdi; "\
"i" (-ENOSYS) \
/* all the caller-saves registers */ \
: "rax", "rcx", "rdx", "rsi", "rdi", \
- "r8", "r9", "r10", "r11" ) \
+ "r8", "r9", "r10", "r11" ); \
+ multicall_ret(_call); \
+ })
#endif /* __ASM_X86_MULTICALL_H__ */