ia64/xen-unstable

view xen/arch/x86/i387.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents d6e6ba8a72bf
children
line source
1 /*
2 * linux/arch/i386/kernel/i387.c
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 */
11 #include <xen/config.h>
12 #include <xen/sched.h>
13 #include <asm/current.h>
14 #include <asm/processor.h>
15 #include <asm/hvm/support.h>
16 #include <asm/i387.h>
17 #include <asm/asm_defns.h>
19 void init_fpu(void)
20 {
21 asm volatile ( "fninit" );
22 if ( cpu_has_xmm )
23 load_mxcsr(0x1f80);
24 current->fpu_initialised = 1;
25 }
27 void save_init_fpu(struct vcpu *v)
28 {
29 unsigned long cr0 = read_cr0();
30 char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
32 /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
33 if ( cr0 & X86_CR0_TS )
34 clts();
36 if ( cpu_has_fxsr )
37 {
38 #ifdef __i386__
39 asm volatile (
40 "fxsave %0"
41 : "=m" (*fpu_ctxt) );
42 #else /* __x86_64__ */
43 /*
44 * The only way to force fxsaveq on a wide range of gas versions. On
45 * older versions the rex64 prefix works only if we force an
46 * addressing mode that doesn't require extended registers.
47 */
48 asm volatile (
49 REX64_PREFIX "fxsave (%1)"
50 : "=m" (*fpu_ctxt) : "cdaSDb" (fpu_ctxt) );
51 #endif
53 /* Clear exception flags if FSW.ES is set. */
54 if ( unlikely(fpu_ctxt[2] & 0x80) )
55 asm volatile ("fnclex");
57 /*
58 * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
59 * is pending. Clear the x87 state here by setting it to fixed
60 * values. The hypervisor data segment can be sometimes 0 and
61 * sometimes new user value. Both should be ok. Use the FPU saved
62 * data block as a safe address because it should be in L1.
63 */
64 if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
65 {
66 asm volatile (
67 "emms\n\t" /* clear stack tags */
68 "fildl %0" /* load to clear state */
69 : : "m" (*fpu_ctxt) );
70 }
71 }
72 else
73 {
74 /* FWAIT is required to make FNSAVE synchronous. */
75 asm volatile ( "fnsave %0 ; fwait" : "=m" (*fpu_ctxt) );
76 }
78 v->fpu_dirtied = 0;
79 write_cr0(cr0|X86_CR0_TS);
80 }
82 void restore_fpu(struct vcpu *v)
83 {
84 char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
86 /*
87 * FXRSTOR can fault if passed a corrupted data block. We handle this
88 * possibility, which may occur if the block was passed to us by control
89 * tools, by silently clearing the block.
90 */
91 if ( cpu_has_fxsr )
92 {
93 asm volatile (
94 #ifdef __i386__
95 "1: fxrstor %0 \n"
96 #else /* __x86_64__ */
97 /* See above for why the operands/constraints are this way. */
98 "1: " REX64_PREFIX "fxrstor (%2)\n"
99 #endif
100 ".section .fixup,\"ax\" \n"
101 "2: push %%"__OP"ax \n"
102 " push %%"__OP"cx \n"
103 " push %%"__OP"di \n"
104 " lea %0,%%"__OP"di \n"
105 " mov %1,%%ecx \n"
106 " xor %%eax,%%eax \n"
107 " rep ; stosl \n"
108 " pop %%"__OP"di \n"
109 " pop %%"__OP"cx \n"
110 " pop %%"__OP"ax \n"
111 " jmp 1b \n"
112 ".previous \n"
113 ".section __ex_table,\"a\"\n"
114 " "__FIXUP_ALIGN" \n"
115 " "__FIXUP_WORD" 1b,2b \n"
116 ".previous \n"
117 :
118 : "m" (*fpu_ctxt),
119 "i" (sizeof(v->arch.guest_context.fpu_ctxt)/4)
120 #ifdef __x86_64__
121 ,"cdaSDb" (fpu_ctxt)
122 #endif
123 );
124 }
125 else
126 {
127 asm volatile ( "frstor %0" : : "m" (v->arch.guest_context.fpu_ctxt) );
128 }
129 }
131 /*
132 * Local variables:
133 * mode: C
134 * c-set-style: "BSD"
135 * c-basic-offset: 4
136 * tab-width: 4
137 * indent-tabs-mode: nil
138 * End:
139 */