return 0;
}
+long do_stack_switch(unsigned long ss, unsigned long esp)
+{
+ fixup_guest_stack_selector(current->domain, ss);
+ current->arch.pv.kernel_ss = ss;
+ current->arch.pv.kernel_sp = esp;
+
+ return 0;
+}
+
+long do_set_segment_base(unsigned int which, unsigned long base)
+{
+ struct vcpu *v = current;
+ long ret = 0;
+
+ if ( is_pv_32bit_vcpu(v) )
+ return -ENOSYS; /* x86/64 only. */
+
+ switch ( which )
+ {
+ case SEGBASE_FS:
+ if ( is_canonical_address(base) )
+ wrfsbase(base);
+ else
+ ret = -EINVAL;
+ break;
+
+ case SEGBASE_GS_USER:
+ if ( is_canonical_address(base) )
+ {
+ wrgsshadow(base);
+ v->arch.pv.gs_base_user = base;
+ }
+ else
+ ret = -EINVAL;
+ break;
+
+ case SEGBASE_GS_KERNEL:
+ if ( is_canonical_address(base) )
+ wrgsbase(base);
+ else
+ ret = -EINVAL;
+ break;
+
+ case SEGBASE_GS_USER_SEL:
+ {
+ unsigned int sel = (uint16_t)base;
+
+ /*
+ * We wish to update the user %gs from the GDT/LDT. Currently, the
+ * guest kernel's GS_BASE is in context.
+ */
+ asm volatile ( "swapgs" );
+
+ if ( sel > 3 )
+ /* Fix up RPL for non-NUL selectors. */
+ sel |= 3;
+ else if ( boot_cpu_data.x86_vendor &
+ (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ /* Work around NUL segment behaviour on AMD hardware. */
+ asm volatile ( "mov %[sel], %%gs"
+ :: [sel] "r" (FLAT_USER_DS32) );
+
+ /*
+ * Load the chosen selector, with fault handling.
+ *
+ * Errors ought to fail the hypercall, but that was never built in
+ * originally, and Linux will BUG() if this call fails.
+ *
+ * NUL the selector in the case of an error. This too needs to deal
+ * with the AMD NUL segment behaviour, but it is already a slowpath in
+ * #GP context so perform the flat load unconditionally to avoid
+ * complicated logic.
+ *
+ * Anyone wanting to check for errors from this hypercall should
+ * re-read %gs and compare against the input.
+ */
+ asm volatile ( "1: mov %[sel], %%gs\n\t"
+ ".section .fixup, \"ax\", @progbits\n\t"
+ "2: mov %k[flat], %%gs\n\t"
+ " xor %[sel], %[sel]\n\t"
+ " jmp 1b\n\t"
+ ".previous\n\t"
+ _ASM_EXTABLE(1b, 2b)
+ : [sel] "+r" (sel)
+ : [flat] "r" (FLAT_USER_DS32) );
+
+ /* Update the cache of the inactive base, as read from the GDT/LDT. */
+ v->arch.pv.gs_base_user = rdgsbase();
+
+ asm volatile ( safe_swapgs );
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
/*
* Local variables:
* mode: C
return rc;
}
-long do_stack_switch(unsigned long ss, unsigned long esp)
-{
- fixup_guest_stack_selector(current->domain, ss);
- current->arch.pv.kernel_ss = ss;
- current->arch.pv.kernel_sp = esp;
- return 0;
-}
-
-long do_set_segment_base(unsigned int which, unsigned long base)
-{
- struct vcpu *v = current;
- long ret = 0;
-
- if ( is_pv_32bit_vcpu(v) )
- return -ENOSYS; /* x86/64 only. */
-
- switch ( which )
- {
- case SEGBASE_FS:
- if ( is_canonical_address(base) )
- wrfsbase(base);
- else
- ret = -EINVAL;
- break;
-
- case SEGBASE_GS_USER:
- if ( is_canonical_address(base) )
- {
- wrgsshadow(base);
- v->arch.pv.gs_base_user = base;
- }
- else
- ret = -EINVAL;
- break;
-
- case SEGBASE_GS_KERNEL:
- if ( is_canonical_address(base) )
- wrgsbase(base);
- else
- ret = -EINVAL;
- break;
-
- case SEGBASE_GS_USER_SEL:
- {
- unsigned int sel = (uint16_t)base;
-
- /*
- * We wish to update the user %gs from the GDT/LDT. Currently, the
- * guest kernel's GS_BASE is in context.
- */
- asm volatile ( "swapgs" );
-
- if ( sel > 3 )
- /* Fix up RPL for non-NUL selectors. */
- sel |= 3;
- else if ( boot_cpu_data.x86_vendor &
- (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
- /* Work around NUL segment behaviour on AMD hardware. */
- asm volatile ( "mov %[sel], %%gs"
- :: [sel] "r" (FLAT_USER_DS32) );
-
- /*
- * Load the chosen selector, with fault handling.
- *
- * Errors ought to fail the hypercall, but that was never built in
- * originally, and Linux will BUG() if this call fails.
- *
- * NUL the selector in the case of an error. This too needs to deal
- * with the AMD NUL segment behaviour, but it is already a slowpath in
- * #GP context so perform the flat load unconditionally to avoid
- * complicated logic.
- *
- * Anyone wanting to check for errors from this hypercall should
- * re-read %gs and compare against the input.
- */
- asm volatile ( "1: mov %[sel], %%gs\n\t"
- ".section .fixup, \"ax\", @progbits\n\t"
- "2: mov %k[flat], %%gs\n\t"
- " xor %[sel], %[sel]\n\t"
- " jmp 1b\n\t"
- ".previous\n\t"
- _ASM_EXTABLE(1b, 2b)
- : [sel] "+r" (sel)
- : [flat] "r" (FLAT_USER_DS32) );
-
- /* Update the cache of the inactive base, as read from the GDT/LDT. */
- v->arch.pv.gs_base_user = rdgsbase();
-
- asm volatile ( safe_swapgs );
- break;
- }
-
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-
/* Returns TRUE if given descriptor is valid for GDT or LDT. */
int check_descriptor(const struct domain *dom, seg_desc_t *d)
{