regs->rax = op;
- if ( is_pv_vcpu(curr) ?
- !is_pv_32bit_vcpu(curr) :
- curr->arch.hvm_vcpu.hcall_64bit )
+ if ( !curr->hcall_compat )
{
for ( i = 0; *p != '\0'; i++ )
{
{
int rc;
- if ( !current->arch.hvm_vcpu.hcall_64bit &&
- is_compat_arg_xlat_range(to, len) )
+ if ( current->hcall_compat && is_compat_arg_xlat_range(to, len) )
{
memcpy(to, from, len);
return 0;
{
int rc;
- if ( !current->arch.hvm_vcpu.hcall_64bit &&
- is_compat_arg_xlat_range(to, len) )
+ if ( current->hcall_compat && is_compat_arg_xlat_range(to, len) )
{
memset(to, 0x00, len);
return 0;
{
int rc;
- if ( !current->arch.hvm_vcpu.hcall_64bit &&
- is_compat_arg_xlat_range(from, len) )
+ if ( current->hcall_compat && is_compat_arg_xlat_range(from, len) )
{
memcpy(to, from, len);
return 0;
return -ENOSYS;
}
- if ( curr->arch.hvm_vcpu.hcall_64bit )
+ if ( !curr->hcall_compat )
rc = do_memory_op(cmd, arg);
else
rc = compat_memory_op(cmd, arg);
return -ENOSYS;
}
- if ( current->arch.hvm_vcpu.hcall_64bit )
+ if ( !current->hcall_compat )
return do_grant_table_op(cmd, uop, count);
else
return compat_grant_table_op(cmd, uop, count);
break;
}
- if ( curr->arch.hvm_vcpu.hcall_64bit )
+ if ( !curr->hcall_compat )
return do_physdev_op(cmd, arg);
else
return compat_physdev_op(cmd, arg);
}
#endif
- curr->arch.hvm_vcpu.hcall_64bit = 1;
regs->rax = hvm_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8,
r9);
- curr->arch.hvm_vcpu.hcall_64bit = 0;
-
#ifndef NDEBUG
if ( !curr->hcall_preempted )
{
}
#endif
+ curr->hcall_compat = true;
regs->rax = hvm_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi,
ebp);
+ curr->hcall_compat = false;
#ifndef NDEBUG
if ( !curr->hcall_preempted )
__trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args);
}
+ curr->hcall_compat = true;
regs->_eax = pv_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi, ebp);
+ curr->hcall_compat = false;
#ifndef NDEBUG
if ( !curr->hcall_preempted )