ia64/xen-unstable

view xen/arch/x86/i387.c @ 11859:025e19453f1a

Define REX64_PREFIX to account for assembler syntax differences on SVR4 targets.

Signed-off-by: John Levon <john.levon@sun.com>
author kfraser@localhost.localdomain
date Tue Oct 17 18:50:08 2006 +0100 (2006-10-17)
parents 043a4aa24781
children b7ae31726aa6
line source
1 /*
2 * linux/arch/i386/kernel/i387.c
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 */
11 #include <xen/config.h>
12 #include <xen/sched.h>
13 #include <asm/current.h>
14 #include <asm/processor.h>
15 #include <asm/hvm/support.h>
16 #include <asm/i387.h>
17 #include <asm/asm_defns.h>
19 void init_fpu(void)
20 {
21 __asm__ __volatile__ ( "fninit" );
22 if ( cpu_has_xmm )
23 load_mxcsr(0x1f80);
24 set_bit(_VCPUF_fpu_initialised, &current->vcpu_flags);
25 }
27 void save_init_fpu(struct vcpu *v)
28 {
29 unsigned long cr0 = read_cr0();
30 char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
32 /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
33 if ( cr0 & X86_CR0_TS )
34 clts();
36 if ( cpu_has_fxsr )
37 {
38 #ifdef __i386__
39 __asm__ __volatile__ (
40 "fxsave %0"
41 : "=m" (*fpu_ctxt) );
42 #else /* __x86_64__ */
43 /*
44 * The only way to force fxsaveq on a wide range of gas versions. On
45 * older versions the rex64 prefix works only if we force an
46 * addressing mode that doesn't require extended registers.
47 */
48 __asm__ __volatile__ (
49 REX64_PREFIX "fxsave (%1)"
50 : "=m" (*fpu_ctxt) : "cdaSDb" (fpu_ctxt) );
51 #endif
53 /* Clear exception flags if FSW.ES is set. */
54 if ( unlikely(fpu_ctxt[2] & 0x80) )
55 __asm__ __volatile__ ("fnclex");
57 /*
58 * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
59 * is pending. Clear the x87 state here by setting it to fixed
60 * values. The hypervisor data segment can be sometimes 0 and
61 * sometimes new user value. Both should be ok. Use the FPU saved
62 * data block as a safe address because it should be in L1.
63 */
64 if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
65 {
66 __asm__ __volatile__ (
67 "emms\n\t" /* clear stack tags */
68 "fildl %0" /* load to clear state */
69 : : "m" (*fpu_ctxt) );
70 }
71 }
72 else
73 {
74 __asm__ __volatile__ (
75 "fnsave %0 ; fwait"
76 : "=m" (*fpu_ctxt) );
77 }
79 clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
80 write_cr0(cr0|X86_CR0_TS);
81 }
83 void restore_fpu(struct vcpu *v)
84 {
85 char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
87 /*
88 * FXRSTOR can fault if passed a corrupted data block. We handle this
89 * possibility, which may occur if the block was passed to us by control
90 * tools, by silently clearing the block.
91 */
92 if ( cpu_has_fxsr )
93 {
94 __asm__ __volatile__ (
95 #ifdef __i386__
96 "1: fxrstor %0 \n"
97 #else /* __x86_64__ */
98 /* See above for why the operands/constraints are this way. */
99 "1: " REX64_PREFIX "fxrstor (%2)\n"
100 #endif
101 ".section .fixup,\"ax\" \n"
102 "2: push %%"__OP"ax \n"
103 " push %%"__OP"cx \n"
104 " push %%"__OP"di \n"
105 " lea %0,%%"__OP"di \n"
106 " mov %1,%%ecx \n"
107 " xor %%eax,%%eax \n"
108 " rep ; stosl \n"
109 " pop %%"__OP"di \n"
110 " pop %%"__OP"cx \n"
111 " pop %%"__OP"ax \n"
112 " jmp 1b \n"
113 ".previous \n"
114 ".section __ex_table,\"a\"\n"
115 " "__FIXUP_ALIGN" \n"
116 " "__FIXUP_WORD" 1b,2b \n"
117 ".previous \n"
118 :
119 : "m" (*fpu_ctxt),
120 "i" (sizeof(v->arch.guest_context.fpu_ctxt)/4)
121 #ifdef __x86_64__
122 ,"cdaSDb" (fpu_ctxt)
123 #endif
124 );
125 }
126 else
127 {
128 __asm__ __volatile__ (
129 "frstor %0"
130 : : "m" (v->arch.guest_context.fpu_ctxt) );
131 }
132 }
134 /*
135 * Local variables:
136 * mode: C
137 * c-set-style: "BSD"
138 * c-basic-offset: 4
139 * tab-width: 4
140 * indent-tabs-mode: nil
141 * End:
142 */