: [_val] "rm" (val) )
#ifdef CONFIG_HVM
- if ( cpu_has_svm && !compat )
+ if ( cpu_has_svm && !compat && (uregs->fs | uregs->gs) <= 3 )
{
unsigned long gsb = n->arch.flags & TF_kernel_mode
? n->arch.pv.gs_base_kernel : n->arch.pv.gs_base_user;
? n->arch.pv.gs_base_user : n->arch.pv.gs_base_kernel;
fs_gs_done = svm_load_segs(n->arch.pv.ldt_ents, LDT_VIRT_START(n),
- uregs->fs, n->arch.pv.fs_base,
- uregs->gs, gsb, gss);
+ n->arch.pv.fs_base, gsb, gss);
}
#endif
if ( !fs_gs_done )
/* Prefetch the VMCB if we expect to use it later in the context switch */
if ( cpu_has_svm && is_pv_domain(nd) && !is_pv_32bit_domain(nd) &&
!is_idle_domain(nd) )
- svm_load_segs(0, 0, 0, 0, 0, 0, 0);
+ svm_load_segs(0, 0, 0, 0, 0);
#endif
if ( need_full_gdt(nd) && !per_cpu(full_gdt_loaded, cpu) )
#ifdef CONFIG_PV
bool svm_load_segs(unsigned int ldt_ents, unsigned long ldt_base,
- unsigned int fs_sel, unsigned long fs_base,
- unsigned int gs_sel, unsigned long gs_base,
+ unsigned long fs_base, unsigned long gs_base,
unsigned long gs_shadow)
{
unsigned int cpu = smp_processor_id();
vmcb->ldtr.base = ldt_base;
}
- ASSERT(!(fs_sel & ~3));
- vmcb->fs.sel = fs_sel;
+ vmcb->fs.sel = 0;
vmcb->fs.attr = 0;
vmcb->fs.limit = 0;
vmcb->fs.base = fs_base;
- ASSERT(!(gs_sel & ~3));
- vmcb->gs.sel = gs_sel;
+ vmcb->gs.sel = 0;
vmcb->gs.attr = 0;
vmcb->gs.limit = 0;
vmcb->gs.base = gs_base;
/*
* PV context switch helper. Calls with zero ldt_base request a prefetch of
* the VMCB area to be loaded from, instead of an actual load of state.
+ *
+ * Must only be used for NUL FS/GS, as the segment attributes/limits are not
+ * read from the GDT/LDT.
*/
bool svm_load_segs(unsigned int ldt_ents, unsigned long ldt_base,
- unsigned int fs_sel, unsigned long fs_base,
- unsigned int gs_sel, unsigned long gs_base,
+ unsigned long fs_base, unsigned long gs_base,
unsigned long gs_shadow);
extern u32 svm_feature_flags;