}
#endif
-static __inline__ int HYPERVISOR_fpu_taskswitch(void)
+static __inline__ int
+HYPERVISOR_fpu_taskswitch(
+ int set)
{
int ret;
+ unsigned long ign;
+
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
+ : "memory" );
return ret;
}
asm volatile( "fnsave %0 ; fwait"
: "=m" (prev_p->thread.i387.fsave) );
prev_p->flags &= ~PF_USEDFPU;
- queue_multicall0(__HYPERVISOR_fpu_taskswitch);
+ queue_multicall1(__HYPERVISOR_fpu_taskswitch, 1);
}
queue_multicall2(__HYPERVISOR_stack_switch, __KERNEL_DS, next->esp0);
/* NB. 'clts' is done for us by Xen during virtual trap. */
#define clts() ((void)0)
-#define stts() (HYPERVISOR_fpu_taskswitch())
+#define stts() (HYPERVISOR_fpu_taskswitch(1))
#endif /* __KERNEL__ */
*/
if (prev_p->thread_info->status & TS_USEDFPU) {
__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
- queue_multicall0(__HYPERVISOR_fpu_taskswitch);
+ queue_multicall1(__HYPERVISOR_fpu_taskswitch, 1);
}
/*
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = thread->task;
- /*
- * A trap in kernel mode can be ignored. It'll be the fast XOR or
- * copying libraries, which will correctly save/restore state and
- * reset the TS bit in CR0.
- */
- if ((regs.xcs & 2) == 0)
- return;
-
/* NB. 'clts' is done for us by Xen during virtual trap. */
if (!tsk->used_math)
init_fpu(tsk);
/*
* Clear and set 'TS' bit respectively
*/
-#define clts() __asm__ __volatile__ ("clts")
+#define clts() (HYPERVISOR_fpu_taskswitch(0))
#define read_cr0() \
BUG();
#define write_cr0(x) \
BUG();
-
#define read_cr4() \
BUG();
#define write_cr4(x) \
BUG();
-#define stts() (HYPERVISOR_fpu_taskswitch())
+#define stts() (HYPERVISOR_fpu_taskswitch(1))
#endif /* __KERNEL__ */
static inline int
HYPERVISOR_fpu_taskswitch(
- void)
+ int set)
{
int ret;
+ unsigned long ign;
+
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
+ : "memory" );
return ret;
}
ltr(lwp0.l_md.md_tss_sel);
lldt(pcb->pcb_ldt_sel);
#else
- HYPERVISOR_fpu_taskswitch();
+ HYPERVISOR_fpu_taskswitch(1);
XENPRINTF(("lwp tss sp %p ss %04x/%04x\n",
(void *)pcb->pcb_tss.tss_esp0,
pcb->pcb_tss.tss_ss0, IDXSEL(pcb->pcb_tss.tss_ss0)));
ci = curcpu();
if (ci->ci_fpused) {
- HYPERVISOR_fpu_taskswitch();
+ HYPERVISOR_fpu_taskswitch(1);
ci->ci_fpused = 0;
}
}
static inline int
-HYPERVISOR_fpu_taskswitch(void)
+HYPERVISOR_fpu_taskswitch(int set)
{
int ret;
+ unsigned long ign;
+
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
+ : "memory" );
return ret;
}
return 0;
}
+long do_fpu_taskswitch(int set)
+{
+ struct exec_domain *ed = current;
+
+ if ( set )
+ {
+ set_bit(EDF_GUEST_STTS, &ed->ed_flags);
+ stts();
+ }
+ else
+ {
+ clear_bit(EDF_GUEST_STTS, &ed->ed_flags);
+ if ( test_bit(EDF_USEDFPU, &ed->ed_flags) )
+ clts();
+ }
+
+ return 0;
+}
+
static int emulate_privileged_op(struct xen_regs *regs)
{
extern void *decode_reg(struct xen_regs *regs, u8 b);
switch ( opcode )
{
case 0x06: /* CLTS */
- clear_bit(EDF_GUEST_STTS, &ed->ed_flags);
- if ( test_bit(EDF_USEDFPU, &ed->ed_flags) )
- clts();
+ (void)do_fpu_taskswitch(0);
break;
case 0x09: /* WBINVD */
switch ( (opcode >> 3) & 7 )
{
case 0: /* Write CR0 */
- if ( *reg & X86_CR0_TS )
- {
- set_bit(EDF_GUEST_STTS, &ed->ed_flags);
- stts();
- }
- else
- {
- clear_bit(EDF_GUEST_STTS, &ed->ed_flags);
- if ( test_bit(EDF_USEDFPU, &ed->ed_flags) )
- clts();
- }
+ (void)do_fpu_taskswitch(!!(*reg & X86_CR0_TS));
break;
case 2: /* Write CR2 */
}
-long do_fpu_taskswitch(void)
-{
- set_bit(EDF_GUEST_STTS, ¤t->ed_flags);
- stts();
- return 0;
-}
-
-
#if defined(__i386__)
#define DB_VALID_ADDR(_a) \
((_a) <= (PAGE_OFFSET - 4))