There's been effectively no use of the field for HVM.
Also shrink the field to unsigned int, even if this doesn't immediately
yield any space benefit for the structure itself. The resulting 32-bit
padding slot can eventually be used for some other field. The change in
size makes accesses slightly more efficient though, as no REX.W prefix
is going to be needed anymore on the respective insns.
Mirror the HVM side change here (dropping of setting the field to
VGCF_online) also to Arm, on the assumption that it was cloned like
this originally. VGCF_online really should simply and consistently be
the guest view of the inverse of VPF_down, and hence needs representing
only in the get/set vCPU context interfaces.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
if ( ((c(ldt_base) & (PAGE_SIZE - 1)) != 0) ||
(c(ldt_ents) > 8192) )
return -EINVAL;
+
+ v->arch.pv.vgc_flags = flags;
}
v->arch.flags |= TF_kernel_mode;
!is_hvm_domain(d) && !is_pv_32bit_domain(d) )
v->arch.flags &= ~TF_kernel_mode;
- v->arch.vgc_flags = flags;
-
vcpu_setup_fpu(v, v->arch.xsave_area,
flags & VGCF_I387_VALID ? &c.nat->fpu_ctxt : NULL,
FCW_DEFAULT);
domain_crash(n->domain);
}
- if ( n->arch.vgc_flags & VGCF_failsafe_disables_events )
+ if ( n->arch.pv.vgc_flags & VGCF_failsafe_disables_events )
vcpu_info(n, evtchn_upcall_mask) = 1;
regs->entry_vector |= TRAP_syscall;
domain_crash(n->domain);
}
- if ( n->arch.vgc_flags & VGCF_failsafe_disables_events )
+ if ( n->arch.pv.vgc_flags & VGCF_failsafe_disables_events )
vcpu_info(n, evtchn_upcall_mask) = 1;
regs->entry_vector |= TRAP_syscall;
#define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt));
- c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel));
+ if ( is_pv_domain(d) )
+ c(flags = v->arch.pv.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel));
+ else
+ c(flags = 0);
if ( v->fpu_initialised )
c(flags |= VGCF_i387_valid);
if ( !(v->pause_flags & VPF_down) )
v->arch.dr6 = ctxt.dr6;
v->arch.dr7 = ctxt.dr7;
- v->arch.vgc_flags = VGCF_online;
-
/* Auxiliary processors should be woken immediately. */
v->is_initialised = 1;
clear_bit(_VPF_down, &v->pause_flags);
v->arch.xsave_area->xsave_hdr.xstate_bv = 0;
vcpu_setup_fpu(v, v->arch.xsave_area, NULL, FCW_RESET);
- v->arch.vgc_flags = VGCF_online;
-
arch_vcpu_regs_init(v);
v->arch.user_regs.rip = ip;
case CALLBACKTYPE_failsafe:
curr->arch.pv.failsafe_callback_eip = reg->address;
if ( reg->flags & CALLBACKF_mask_events )
- curr->arch.vgc_flags |= VGCF_failsafe_disables_events;
+ curr->arch.pv.vgc_flags |= VGCF_failsafe_disables_events;
else
- curr->arch.vgc_flags &= ~VGCF_failsafe_disables_events;
+ curr->arch.pv.vgc_flags &= ~VGCF_failsafe_disables_events;
break;
case CALLBACKTYPE_syscall:
curr->arch.pv.syscall_callback_eip = reg->address;
if ( reg->flags & CALLBACKF_mask_events )
- curr->arch.vgc_flags |= VGCF_syscall_disables_events;
+ curr->arch.pv.vgc_flags |= VGCF_syscall_disables_events;
else
- curr->arch.vgc_flags &= ~VGCF_syscall_disables_events;
+ curr->arch.pv.vgc_flags &= ~VGCF_syscall_disables_events;
break;
case CALLBACKTYPE_syscall32:
curr->arch.pv.failsafe_callback_cs = reg->address.cs;
curr->arch.pv.failsafe_callback_eip = reg->address.eip;
if ( reg->flags & CALLBACKF_mask_events )
- curr->arch.vgc_flags |= VGCF_failsafe_disables_events;
+ curr->arch.pv.vgc_flags |= VGCF_failsafe_disables_events;
else
- curr->arch.vgc_flags &= ~VGCF_failsafe_disables_events;
+ curr->arch.pv.vgc_flags &= ~VGCF_failsafe_disables_events;
break;
case CALLBACKTYPE_syscall32:
OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv.kernel_sp);
OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv.kernel_ss);
OFFSET(VCPU_iopl, struct vcpu, arch.pv.iopl);
- OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
+ OFFSET(VCPU_guest_context_flags, struct vcpu, arch.pv.vgc_flags);
OFFSET(VCPU_cr3, struct vcpu, arch.cr3);
OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
/* map_domain_page() mapping cache. */
struct mapcache_vcpu mapcache;
+ unsigned int vgc_flags;
+
struct trap_info *trap_ctxt;
unsigned long gdt_frames[FIRST_RESERVED_GDT_PAGE];
*/
void *fpu_ctxt;
- unsigned long vgc_flags;
struct cpu_user_regs user_regs;
/* Debug registers. */