ia64/xen-unstable

view xen/include/asm-ia64/domain.h @ 13905:2b3dd681dbce

[IA64] Fix I&D cache incoherency after vcpu migration

Windows on HVM ocasionally crashes with BSOD especially on boot time.
I finally found out the cause is PAL_CACHE_FLUSH(cache_type=4).
The cache_type means an argument of PAL_CACHE_FLUSH and cache_type=4
makes local instruction caches coherent with the data caches.
See SDM vol2 11.10.3, PAL_CACHE_FLUSH.
FYI, Linux never uses cache_type=4.

Currently PAL_CACHE_FLUSH is called on only local cpu and caches on the
other cpus are still incoherent.

Attached patch does:
- When cache_type=1,2,3 that means flushing caches on local cpus,
caches on the other cpus becomes to be flushed also.
It might be overkill and not efficient. But I think it's permissive
since these cache_type are seldom used.

- When cache_type=4, the actual PAL call to the other cpus is deferred
until the vcpu migration occurs or the cpu becomes idle.
Since Windows uses cache_type=4 quite often and many vcpus on SMP
environment call PAL_CACHE_FLUSH simultaneously.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author awilliam@xenbuild2.aw
date Thu Feb 15 10:25:33 2007 -0700 (2007-02-15)
parents d879bbaa3faa
children 3c8039aa5004
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <linux/thread_info.h>
5 #include <asm/tlb.h>
6 #include <asm/vmx_vpd.h>
7 #include <asm/vmmu.h>
8 #include <asm/regionreg.h>
9 #include <public/xen.h>
10 #include <asm/vmx_platform.h>
11 #include <xen/list.h>
12 #include <xen/cpumask.h>
13 #include <asm/fpswa.h>
14 #include <xen/rangeset.h>
16 struct p2m_entry;
17 #ifdef CONFIG_XEN_IA64_TLB_TRACK
18 struct tlb_track;
19 #endif
21 extern void domain_relinquish_resources(struct domain *);
22 struct vcpu;
23 extern void relinquish_vcpu_resources(struct vcpu *v);
24 extern int vcpu_late_initialise(struct vcpu *v);
26 /* given a current domain metaphysical address, return the physical address */
27 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
28 struct p2m_entry* entry);
30 /* Set shared_info virtual address. */
31 extern unsigned long domain_set_shared_info_va (unsigned long va);
33 /* Flush cache of domain d.
34 If sync_only is true, only synchronize I&D caches,
35 if false, flush and invalidate caches. */
36 extern void domain_cache_flush (struct domain *d, int sync_only);
38 /* Control the shadow mode. */
39 extern int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc);
41 /* Cleanly crash the current domain with a message. */
42 extern void panic_domain(struct pt_regs *, const char *, ...)
43 __attribute__ ((noreturn, format (printf, 2, 3)));
45 struct mm_struct {
46 volatile pgd_t * pgd;
47 // atomic_t mm_users; /* How many users with user space? */
48 };
50 struct last_vcpu {
51 #define INVALID_VCPU_ID INT_MAX
52 int vcpu_id;
53 #ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
54 u32 tlbflush_timestamp;
55 #endif
56 } ____cacheline_aligned_in_smp;
58 /* These are data in domain memory for SAL emulator. */
59 struct xen_sal_data {
60 /* OS boot rendez vous. */
61 unsigned long boot_rdv_ip;
62 unsigned long boot_rdv_r1;
64 /* There are these for EFI_SET_VIRTUAL_ADDRESS_MAP emulation. */
65 int efi_virt_mode; /* phys : 0 , virt : 1 */
66 };
68 struct arch_domain {
69 struct mm_struct mm;
71 /* Flags. */
72 union {
73 unsigned long flags;
74 struct {
75 unsigned int is_vti : 1;
76 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
77 unsigned int has_pervcpu_vhpt : 1;
78 #endif
79 };
80 };
82 /* maximum metaphysical address of conventional memory */
83 u64 convmem_end;
85 /* Allowed accesses to io ports. */
86 struct rangeset *ioport_caps;
88 /* There are two ranges of RID for a domain:
89 one big range, used to virtualize domain RID,
90 one small range for internal Xen use (metaphysical). */
91 /* Big range. */
92 int starting_rid; /* first RID assigned to domain */
93 int ending_rid; /* one beyond highest RID assigned to domain */
94 /* Metaphysical range. */
95 int starting_mp_rid;
96 int ending_mp_rid;
97 /* RID for metaphysical mode. */
98 unsigned long metaphysical_rr0;
99 unsigned long metaphysical_rr4;
101 int rid_bits; /* number of virtual rid bits (default: 18) */
102 int breakimm; /* The imm value for hypercalls. */
104 struct virtual_platform_def vmx_platform;
105 #define hvm_domain vmx_platform /* platform defs are not vmx specific */
107 u64 xen_vastart;
108 u64 xen_vaend;
109 u64 shared_info_va;
111 /* Address of SAL emulator data */
112 struct xen_sal_data *sal_data;
114 /* Address of efi_runtime_services_t (placed in domain memory) */
115 void *efi_runtime;
116 /* Address of fpswa_interface_t (placed in domain memory) */
117 void *fpswa_inf;
119 /* Bitmap of shadow dirty bits.
120 Set iff shadow mode is enabled. */
121 u64 *shadow_bitmap;
122 /* Length (in bits!) of shadow bitmap. */
123 unsigned long shadow_bitmap_size;
124 /* Number of bits set in bitmap. */
125 atomic64_t shadow_dirty_count;
126 /* Number of faults. */
127 atomic64_t shadow_fault_count;
129 struct last_vcpu last_vcpu[NR_CPUS];
131 #ifdef CONFIG_XEN_IA64_TLB_TRACK
132 struct tlb_track* tlb_track;
133 #endif
134 };
135 #define INT_ENABLE_OFFSET(v) \
136 (sizeof(vcpu_info_t) * (v)->vcpu_id + \
137 offsetof(vcpu_info_t, evtchn_upcall_mask))
139 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
140 #define HAS_PERVCPU_VHPT(d) ((d)->arch.has_pervcpu_vhpt)
141 #else
142 #define HAS_PERVCPU_VHPT(d) (0)
143 #endif
146 struct arch_vcpu {
147 /* Save the state of vcpu.
148 This is the first entry to speed up accesses. */
149 mapped_regs_t *privregs;
151 /* TR and TC. */
152 TR_ENTRY itrs[NITRS];
153 TR_ENTRY dtrs[NDTRS];
154 TR_ENTRY itlb;
155 TR_ENTRY dtlb;
157 /* Bit is set if there is a tr/tc for the region. */
158 unsigned char itr_regions;
159 unsigned char dtr_regions;
160 unsigned char tc_regions;
162 unsigned long irr[4]; /* Interrupt request register. */
163 unsigned long insvc[4]; /* Interrupt in service. */
164 unsigned long iva;
165 unsigned long domain_itm;
166 unsigned long domain_itm_last;
168 unsigned long event_callback_ip; // event callback handler
169 unsigned long failsafe_callback_ip; // Do we need it?
171 /* These fields are copied from arch_domain to make access easier/faster
172 in assembly code. */
173 unsigned long metaphysical_rr0; // from arch_domain (so is pinned)
174 unsigned long metaphysical_rr4; // from arch_domain (so is pinned)
175 unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned)
176 unsigned long metaphysical_saved_rr4; // from arch_domain (so is pinned)
177 unsigned long fp_psr; // used for lazy float register
178 int breakimm; // from arch_domain (so is pinned)
179 int starting_rid; /* first RID assigned to domain */
180 int ending_rid; /* one beyond highest RID assigned to domain */
182 struct thread_struct _thread; // this must be last
184 thash_cb_t vtlb;
185 thash_cb_t vhpt;
186 char irq_new_pending;
187 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI
188 char hypercall_continuation;
190 //for phycial emulation
191 int mode_flags;
192 fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
193 struct timer hlt_timer;
194 struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
196 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
197 PTA pta;
198 unsigned long vhpt_maddr;
199 struct page_info* vhpt_page;
200 unsigned long vhpt_entries;
201 #endif
202 #define INVALID_PROCESSOR INT_MAX
203 int last_processor;
204 cpumask_t cache_coherent_map;
205 };
207 #include <asm/uaccess.h> /* for KERNEL_DS */
208 #include <asm/pgtable.h>
210 /* Guest physical address of IO ports space. */
211 #define IO_PORTS_PADDR 0x00000ffffc000000UL
212 #define IO_PORTS_SIZE 0x0000000004000000UL
214 int
215 do_perfmon_op(unsigned long cmd,
216 XEN_GUEST_HANDLE(void) arg1, unsigned long arg2);
218 #endif /* __ASM_DOMAIN_H__ */
220 /*
221 * Local variables:
222 * mode: C
223 * c-set-style: "BSD"
224 * c-basic-offset: 4
225 * tab-width: 4
226 * indent-tabs-mode: nil
227 * End:
228 */