ia64/xen-unstable

view xen/include/asm-x86/processor.h @ 15447:5eec9a8825d4

Fix VMX guest can't boot after MCE enabled.
Signed-off-by: Xin Li <xin.b.li@intel.com>
author Keir Fraser <keir@xensource.com>
date Wed Jun 27 20:08:21 2007 +0100 (2007-06-27)
parents 3cf5052ba5e5
children 87d34c8c2fe1
line source
2 /* Portions are: Copyright (c) 1994 Linus Torvalds */
4 #ifndef __ASM_X86_PROCESSOR_H
5 #define __ASM_X86_PROCESSOR_H
7 #ifndef __ASSEMBLY__
8 #include <xen/config.h>
9 #include <xen/cache.h>
10 #include <xen/types.h>
11 #include <public/xen.h>
12 #include <asm/types.h>
13 #include <asm/cpufeature.h>
14 #include <asm/desc.h>
15 #endif
17 /*
18 * CPU vendor IDs
19 */
20 #define X86_VENDOR_INTEL 0
21 #define X86_VENDOR_CYRIX 1
22 #define X86_VENDOR_AMD 2
23 #define X86_VENDOR_UMC 3
24 #define X86_VENDOR_NEXGEN 4
25 #define X86_VENDOR_CENTAUR 5
26 #define X86_VENDOR_RISE 6
27 #define X86_VENDOR_TRANSMETA 7
28 #define X86_VENDOR_NSC 8
29 #define X86_VENDOR_NUM 9
30 #define X86_VENDOR_UNKNOWN 0xff
32 /*
33 * EFLAGS bits
34 */
35 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
36 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
37 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
38 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
39 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
40 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
41 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
42 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
43 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
44 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
45 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
46 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
47 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
48 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
49 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
50 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
51 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
53 /*
54 * Intel CPU flags in CR0
55 */
56 #define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
57 #define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */
58 #define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */
59 #define X86_CR0_TS 0x00000008 /* Task Switched (RW) */
60 #define X86_CR0_ET 0x00000010 /* Extension type (RO) */
61 #define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */
62 #define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */
63 #define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */
64 #define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */
65 #define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
66 #define X86_CR0_PG 0x80000000 /* Paging (RW) */
68 /*
69 * Intel CPU features in CR4
70 */
71 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
72 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
73 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
74 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
75 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
76 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
77 #define X86_CR4_MCE 0x0040 /* Machine check enable */
78 #define X86_CR4_PGE 0x0080 /* enable global pages */
79 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
80 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
81 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
82 #define X86_CR4_VMXE 0x2000 /* enable VMX */
84 #define X86_CR4_RESERVED_BITS \
85 ~(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | \
86 X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
87 X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
88 X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)
90 /*
91 * Trap/fault mnemonics.
92 */
93 #define TRAP_divide_error 0
94 #define TRAP_debug 1
95 #define TRAP_nmi 2
96 #define TRAP_int3 3
97 #define TRAP_overflow 4
98 #define TRAP_bounds 5
99 #define TRAP_invalid_op 6
100 #define TRAP_no_device 7
101 #define TRAP_double_fault 8
102 #define TRAP_copro_seg 9
103 #define TRAP_invalid_tss 10
104 #define TRAP_no_segment 11
105 #define TRAP_stack_error 12
106 #define TRAP_gp_fault 13
107 #define TRAP_page_fault 14
108 #define TRAP_spurious_int 15
109 #define TRAP_copro_error 16
110 #define TRAP_alignment_check 17
111 #define TRAP_machine_check 18
112 #define TRAP_simd_error 19
114 /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
115 /* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
116 #define TRAP_syscall 256
118 /*
119 * Non-fatal fault/trap handlers return an error code to the caller. If the
120 * code is non-zero, it means that either the exception was not due to a fault
121 * (i.e., it was a trap) or that the fault has been fixed up so the instruction
122 * replay ought to succeed.
123 */
124 #define EXCRET_not_a_fault 1 /* It was a trap. No instruction replay needed. */
125 #define EXCRET_fault_fixed 1 /* It was fault that we fixed: try a replay. */
127 /* 'trap_bounce' flags values */
128 #define TBF_EXCEPTION 1
129 #define TBF_EXCEPTION_ERRCODE 2
130 #define TBF_INTERRUPT 8
131 #define TBF_FAILSAFE 16
133 /* 'arch_vcpu' flags values */
134 #define _TF_kernel_mode 0
135 #define TF_kernel_mode (1<<_TF_kernel_mode)
137 /* #PF error code values. */
138 #define PFEC_page_present (1U<<0)
139 #define PFEC_write_access (1U<<1)
140 #define PFEC_user_mode (1U<<2)
141 #define PFEC_reserved_bit (1U<<3)
142 #define PFEC_insn_fetch (1U<<4)
144 #ifndef __ASSEMBLY__
146 struct domain;
147 struct vcpu;
149 /*
150 * Default implementation of macro that returns current
151 * instruction pointer ("program counter").
152 */
153 #ifdef __x86_64__
154 #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
155 #else
156 #define current_text_addr() \
157 ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
158 #endif
160 struct cpuinfo_x86 {
161 __u8 x86; /* CPU family */
162 __u8 x86_vendor; /* CPU vendor */
163 __u8 x86_model;
164 __u8 x86_mask;
165 char wp_works_ok; /* It doesn't on 386's */
166 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
167 char hard_math;
168 char rfu;
169 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
170 unsigned int x86_capability[NCAPINTS];
171 char x86_vendor_id[16];
172 char x86_model_id[64];
173 int x86_cache_size; /* in KB - valid for CPUS which support this call */
174 int x86_cache_alignment; /* In bytes */
175 char fdiv_bug;
176 char f00f_bug;
177 char coma_bug;
178 char pad0;
179 int x86_power;
180 unsigned char x86_max_cores; /* cpuid returned max cores value */
181 unsigned char booted_cores; /* number of cores as seen by OS */
182 unsigned char apicid;
183 } __cacheline_aligned;
185 /*
186 * capabilities of CPUs
187 */
189 extern struct cpuinfo_x86 boot_cpu_data;
191 #ifdef CONFIG_SMP
192 extern struct cpuinfo_x86 cpu_data[];
193 #define current_cpu_data cpu_data[smp_processor_id()]
194 #else
195 #define cpu_data (&boot_cpu_data)
196 #define current_cpu_data boot_cpu_data
197 #endif
199 extern int phys_proc_id[NR_CPUS];
200 extern int cpu_core_id[NR_CPUS];
202 extern void identify_cpu(struct cpuinfo_x86 *);
203 extern void print_cpu_info(struct cpuinfo_x86 *);
204 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
205 extern void dodgy_tsc(void);
207 #ifdef CONFIG_X86_HT
208 extern void detect_ht(struct cpuinfo_x86 *c);
209 #else
210 static always_inline void detect_ht(struct cpuinfo_x86 *c) {}
211 #endif
213 /*
214 * Generic CPUID function
215 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
216 * resulting in stale register contents being returned.
217 */
218 #define cpuid(_op,_eax,_ebx,_ecx,_edx) \
219 __asm__("cpuid" \
220 : "=a" (*(int *)(_eax)), \
221 "=b" (*(int *)(_ebx)), \
222 "=c" (*(int *)(_ecx)), \
223 "=d" (*(int *)(_edx)) \
224 : "0" (_op), "2" (0))
226 /* Some CPUID calls want 'count' to be placed in ecx */
227 static inline void cpuid_count(
228 int op,
229 int count,
230 unsigned int *eax,
231 unsigned int *ebx,
232 unsigned int *ecx,
233 unsigned int *edx)
234 {
235 __asm__("cpuid"
236 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
237 : "0" (op), "c" (count));
238 }
240 /*
241 * CPUID functions returning a single datum
242 */
243 static always_inline unsigned int cpuid_eax(unsigned int op)
244 {
245 unsigned int eax;
247 __asm__("cpuid"
248 : "=a" (eax)
249 : "0" (op)
250 : "bx", "cx", "dx");
251 return eax;
252 }
253 static always_inline unsigned int cpuid_ebx(unsigned int op)
254 {
255 unsigned int eax, ebx;
257 __asm__("cpuid"
258 : "=a" (eax), "=b" (ebx)
259 : "0" (op)
260 : "cx", "dx" );
261 return ebx;
262 }
263 static always_inline unsigned int cpuid_ecx(unsigned int op)
264 {
265 unsigned int eax, ecx;
267 __asm__("cpuid"
268 : "=a" (eax), "=c" (ecx)
269 : "0" (op)
270 : "bx", "dx" );
271 return ecx;
272 }
273 static always_inline unsigned int cpuid_edx(unsigned int op)
274 {
275 unsigned int eax, edx;
277 __asm__("cpuid"
278 : "=a" (eax), "=d" (edx)
279 : "0" (op)
280 : "bx", "cx");
281 return edx;
282 }
286 static inline unsigned long read_cr0(void)
287 {
288 unsigned long __cr0;
289 __asm__("mov %%cr0,%0\n\t" :"=r" (__cr0));
290 return __cr0;
291 }
293 static inline void write_cr0(unsigned long val)
294 {
295 __asm__("mov %0,%%cr0": :"r" ((unsigned long)val));
296 }
298 static inline unsigned long read_cr2(void)
299 {
300 unsigned long __cr2;
301 __asm__("mov %%cr2,%0\n\t" :"=r" (__cr2));
302 return __cr2;
303 }
305 static inline unsigned long read_cr4(void)
306 {
307 unsigned long __cr4;
308 __asm__("mov %%cr4,%0\n\t" :"=r" (__cr4));
309 return __cr4;
310 }
312 static inline void write_cr4(unsigned long val)
313 {
314 __asm__("mov %0,%%cr4": :"r" ((unsigned long)val));
315 }
318 /* Clear and set 'TS' bit respectively */
319 static inline void clts(void)
320 {
321 __asm__ __volatile__ ("clts");
322 }
324 static inline void stts(void)
325 {
326 write_cr0(X86_CR0_TS|read_cr0());
327 }
330 /*
331 * Save the cr4 feature set we're using (ie
332 * Pentium 4MB enable and PPro Global page
333 * enable), so that any CPU's that boot up
334 * after us can get the correct flags.
335 */
336 extern unsigned long mmu_cr4_features;
338 static always_inline void set_in_cr4 (unsigned long mask)
339 {
340 unsigned long dummy;
341 mmu_cr4_features |= mask;
342 __asm__ __volatile__ (
343 "mov %%cr4,%0\n\t"
344 "or %1,%0\n\t"
345 "mov %0,%%cr4\n"
346 : "=&r" (dummy) : "irg" (mask) );
347 }
349 static always_inline void clear_in_cr4 (unsigned long mask)
350 {
351 unsigned long dummy;
352 mmu_cr4_features &= ~mask;
353 __asm__ __volatile__ (
354 "mov %%cr4,%0\n\t"
355 "and %1,%0\n\t"
356 "mov %0,%%cr4\n"
357 : "=&r" (dummy) : "irg" (~mask) );
358 }
360 /*
361 * NSC/Cyrix CPU configuration register indexes
362 */
364 #define CX86_PCR0 0x20
365 #define CX86_GCR 0xb8
366 #define CX86_CCR0 0xc0
367 #define CX86_CCR1 0xc1
368 #define CX86_CCR2 0xc2
369 #define CX86_CCR3 0xc3
370 #define CX86_CCR4 0xe8
371 #define CX86_CCR5 0xe9
372 #define CX86_CCR6 0xea
373 #define CX86_CCR7 0xeb
374 #define CX86_PCR1 0xf0
375 #define CX86_DIR0 0xfe
376 #define CX86_DIR1 0xff
377 #define CX86_ARR_BASE 0xc4
378 #define CX86_RCR_BASE 0xdc
380 /*
381 * NSC/Cyrix CPU indexed register access macros
382 */
384 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
386 #define setCx86(reg, data) do { \
387 outb((reg), 0x22); \
388 outb((data), 0x23); \
389 } while (0)
391 /* Stop speculative execution */
392 static inline void sync_core(void)
393 {
394 int tmp;
395 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
396 }
398 static always_inline void __monitor(const void *eax, unsigned long ecx,
399 unsigned long edx)
400 {
401 /* "monitor %eax,%ecx,%edx;" */
402 asm volatile(
403 ".byte 0x0f,0x01,0xc8;"
404 : :"a" (eax), "c" (ecx), "d"(edx));
405 }
407 static always_inline void __mwait(unsigned long eax, unsigned long ecx)
408 {
409 /* "mwait %eax,%ecx;" */
410 asm volatile(
411 ".byte 0x0f,0x01,0xc9;"
412 : :"a" (eax), "c" (ecx));
413 }
415 #define IOBMP_BYTES 8192
416 #define IOBMP_INVALID_OFFSET 0x8000
418 struct tss_struct {
419 unsigned short back_link,__blh;
420 #ifdef __x86_64__
421 union { u64 rsp0, esp0; };
422 union { u64 rsp1, esp1; };
423 union { u64 rsp2, esp2; };
424 u64 reserved1;
425 u64 ist[7];
426 u64 reserved2;
427 u16 reserved3;
428 #else
429 u32 esp0;
430 u16 ss0,__ss0h;
431 u32 esp1;
432 u16 ss1,__ss1h;
433 u32 esp2;
434 u16 ss2,__ss2h;
435 u32 __cr3;
436 u32 eip;
437 u32 eflags;
438 u32 eax,ecx,edx,ebx;
439 u32 esp;
440 u32 ebp;
441 u32 esi;
442 u32 edi;
443 u16 es, __esh;
444 u16 cs, __csh;
445 u16 ss, __ssh;
446 u16 ds, __dsh;
447 u16 fs, __fsh;
448 u16 gs, __gsh;
449 u16 ldt, __ldth;
450 u16 trace;
451 #endif
452 u16 bitmap;
453 /* Pads the TSS to be cacheline-aligned (total size is 0x80). */
454 u8 __cacheline_filler[24];
455 } __cacheline_aligned __attribute__((packed));
457 #define IDT_ENTRIES 256
458 extern idt_entry_t idt_table[];
459 extern idt_entry_t *idt_tables[];
461 extern struct tss_struct init_tss[NR_CPUS];
463 extern void init_int80_direct_trap(struct vcpu *v);
465 #if defined(CONFIG_X86_32)
467 #define set_int80_direct_trap(_ed) \
468 (memcpy(idt_tables[(_ed)->processor] + 0x80, \
469 &((_ed)->arch.int80_desc), 8))
471 #else
473 #define set_int80_direct_trap(_ed) ((void)0)
475 #endif
477 extern int gpf_emulate_4gb(struct cpu_user_regs *regs);
479 extern void write_ptbase(struct vcpu *v);
481 void destroy_gdt(struct vcpu *d);
482 long set_gdt(struct vcpu *d,
483 unsigned long *frames,
484 unsigned int entries);
486 long set_debugreg(struct vcpu *p, int reg, unsigned long value);
488 struct microcode_header {
489 unsigned int hdrver;
490 unsigned int rev;
491 unsigned int date;
492 unsigned int sig;
493 unsigned int cksum;
494 unsigned int ldrver;
495 unsigned int pf;
496 unsigned int datasize;
497 unsigned int totalsize;
498 unsigned int reserved[3];
499 };
501 struct microcode {
502 struct microcode_header hdr;
503 unsigned int bits[0];
504 };
506 typedef struct microcode microcode_t;
507 typedef struct microcode_header microcode_header_t;
509 /* microcode format is extended from prescott processors */
510 struct extended_signature {
511 unsigned int sig;
512 unsigned int pf;
513 unsigned int cksum;
514 };
516 struct extended_sigtable {
517 unsigned int count;
518 unsigned int cksum;
519 unsigned int reserved[3];
520 struct extended_signature sigs[0];
521 };
523 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
524 static always_inline void rep_nop(void)
525 {
526 __asm__ __volatile__ ( "rep;nop" : : : "memory" );
527 }
529 #define cpu_relax() rep_nop()
531 /* Prefetch instructions for Pentium III and AMD Athlon */
532 #ifdef CONFIG_MPENTIUMIII
534 #define ARCH_HAS_PREFETCH
535 extern always_inline void prefetch(const void *x)
536 {
537 __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
538 }
540 #elif CONFIG_X86_USE_3DNOW
542 #define ARCH_HAS_PREFETCH
543 #define ARCH_HAS_PREFETCHW
544 #define ARCH_HAS_SPINLOCK_PREFETCH
546 extern always_inline void prefetch(const void *x)
547 {
548 __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
549 }
551 extern always_inline void prefetchw(const void *x)
552 {
553 __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
554 }
555 #define spin_lock_prefetch(x) prefetchw(x)
557 #endif
559 void show_stack(struct cpu_user_regs *regs);
560 void show_stack_overflow(unsigned int cpu, unsigned long esp);
561 void show_registers(struct cpu_user_regs *regs);
562 void show_execution_state(struct cpu_user_regs *regs);
563 void show_page_walk(unsigned long addr);
564 asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
566 #ifdef CONFIG_COMPAT
567 void compat_show_guest_stack(struct cpu_user_regs *, int lines);
568 #else
569 #define compat_show_guest_stack(regs, lines) ((void)0)
570 #endif
572 extern void mtrr_ap_init(void);
573 extern void mtrr_bp_init(void);
575 void mcheck_init(struct cpuinfo_x86 *c);
576 asmlinkage void do_machine_check(struct cpu_user_regs *regs);
578 int cpuid_hypervisor_leaves(
579 uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
580 int rdmsr_hypervisor_regs(
581 uint32_t idx, uint32_t *eax, uint32_t *edx);
582 int wrmsr_hypervisor_regs(
583 uint32_t idx, uint32_t eax, uint32_t edx);
585 #endif /* !__ASSEMBLY__ */
587 #endif /* __ASM_X86_PROCESSOR_H */
589 /*
590 * Local variables:
591 * mode: C
592 * c-set-style: "BSD"
593 * c-basic-offset: 4
594 * tab-width: 4
595 * indent-tabs-mode: nil
596 * End:
597 */