ia64/xen-unstable

view xen/include/asm-x86/msr.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 4034317507de
children
line source
1 #ifndef __ASM_MSR_H
2 #define __ASM_MSR_H
4 #include "msr-index.h"
6 #ifndef __ASSEMBLY__
8 #include <xen/smp.h>
9 #include <xen/percpu.h>
11 #define rdmsr(msr,val1,val2) \
12 __asm__ __volatile__("rdmsr" \
13 : "=a" (val1), "=d" (val2) \
14 : "c" (msr))
16 #define rdmsrl(msr,val) do { unsigned long a__,b__; \
17 __asm__ __volatile__("rdmsr" \
18 : "=a" (a__), "=d" (b__) \
19 : "c" (msr)); \
20 val = a__ | ((u64)b__<<32); \
21 } while(0);
23 #define wrmsr(msr,val1,val2) \
24 __asm__ __volatile__("wrmsr" \
25 : /* no outputs */ \
26 : "c" (msr), "a" (val1), "d" (val2))
28 static inline void wrmsrl(unsigned int msr, __u64 val)
29 {
30 __u32 lo, hi;
31 lo = (__u32)val;
32 hi = (__u32)(val >> 32);
33 wrmsr(msr, lo, hi);
34 }
36 /* rdmsr with exception handling */
37 #define rdmsr_safe(msr,val1,val2) ({\
38 int _rc; \
39 __asm__ __volatile__( \
40 "1: rdmsr\n2:\n" \
41 ".section .fixup,\"ax\"\n" \
42 "3: movl %5,%2\n; jmp 2b\n" \
43 ".previous\n" \
44 ".section __ex_table,\"a\"\n" \
45 " "__FIXUP_ALIGN"\n" \
46 " "__FIXUP_WORD" 1b,3b\n" \
47 ".previous\n" \
48 : "=a" (val1), "=d" (val2), "=&r" (_rc) \
49 : "c" (msr), "2" (0), "i" (-EFAULT)); \
50 _rc; })
52 /* wrmsr with exception handling */
53 #define wrmsr_safe(msr,val1,val2) ({\
54 int _rc; \
55 __asm__ __volatile__( \
56 "1: wrmsr\n2:\n" \
57 ".section .fixup,\"ax\"\n" \
58 "3: movl %5,%0\n; jmp 2b\n" \
59 ".previous\n" \
60 ".section __ex_table,\"a\"\n" \
61 " "__FIXUP_ALIGN"\n" \
62 " "__FIXUP_WORD" 1b,3b\n" \
63 ".previous\n" \
64 : "=&r" (_rc) \
65 : "c" (msr), "a" (val1), "d" (val2), "0" (0), "i" (-EFAULT)); \
66 _rc; })
68 #define rdtsc(low,high) \
69 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
71 #define rdtscl(low) \
72 __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
74 #if defined(__i386__)
75 #define rdtscll(val) \
76 __asm__ __volatile__("rdtsc" : "=A" (val))
77 #elif defined(__x86_64__)
78 #define rdtscll(val) do { \
79 unsigned int a,d; \
80 asm volatile("rdtsc" : "=a" (a), "=d" (d)); \
81 (val) = ((unsigned long)a) | (((unsigned long)d)<<32); \
82 } while(0)
83 #endif
85 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
87 #define rdpmc(counter,low,high) \
88 __asm__ __volatile__("rdpmc" \
89 : "=a" (low), "=d" (high) \
90 : "c" (counter))
93 DECLARE_PER_CPU(u64, efer);
95 static inline u64 read_efer(void)
96 {
97 return this_cpu(efer);
98 }
100 static inline void write_efer(u64 val)
101 {
102 this_cpu(efer) = val;
103 wrmsrl(MSR_EFER, val);
104 }
106 DECLARE_PER_CPU(u32, ler_msr);
108 static inline void ler_enable(void)
109 {
110 u64 debugctl;
112 if ( !this_cpu(ler_msr) )
113 return;
115 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
116 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl | 1);
117 }
119 #endif /* !__ASSEMBLY__ */
121 #endif /* __ASM_MSR_H */