ia64/xen-unstable

view xen/include/asm-ia64/vmx_vcpu.h @ 9157:a693ccb4d581

[IA64] VTI: fix Oops: time tick before it's due

1. Guest may set itm several times in one execution of timer handler of
guest. VMM need to handle this situation.
2. VMM don't need to stop guest timer when switching out and rest guest
timer when switching in, this may make room for some corner case, I don't
figure out this kind of corner cases now :-), I just removed this logic.
3. When VMM emulate writing itv, VMM can't simply stop timer, when guest
is masked.
4. All operations such as read/write itv, itc, itm don't need to disable
interrupt, due to there is no conflict access.

After all these modifications, VTIdomain don't complain "Oops: time tick
before it's due", I don't do the full test:-).

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Mar 07 20:01:29 2006 -0700 (2006-03-07)
parents 18b0911d936d
children 1abf3783975d
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.h:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #ifndef _XEN_IA64_VMX_VCPU_H
24 #define _XEN_IA64_VMX_VCPU_H
27 #include <xen/sched.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vpd.h>
30 #include <asm/ptrace.h>
31 #include <asm/regs.h>
32 #include <asm/regionreg.h>
33 #include <asm/types.h>
34 #include <asm/vcpu.h>
36 #define VRN_SHIFT 61
37 #define VRN0 0x0UL
38 #define VRN1 0x1UL
39 #define VRN2 0x2UL
40 #define VRN3 0x3UL
41 #define VRN4 0x4UL
42 #define VRN5 0x5UL
43 #define VRN6 0x6UL
44 #define VRN7 0x7UL
45 // for vlsapic
46 #define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
47 //#define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y)
49 #define VMX(x,y) ((x)->arch.arch_vmx.y)
52 #define VMM_RR_SHIFT 20
53 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
55 extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
56 extern u64 cr_igfld_mask (int index, u64 value);
57 extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
58 extern u64 set_isr_ei_ni (VCPU *vcpu);
59 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
62 /* next all for VTI domain APIs definition */
63 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
64 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
65 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
66 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
67 extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
68 extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
69 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
70 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
71 #if 0
72 extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
73 #endif
74 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
75 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
76 extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
77 extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
78 extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
79 extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
80 extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
81 extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
82 extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
83 extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
84 extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
85 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
86 extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
87 extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
88 extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
89 extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
90 extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
91 extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
92 extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
93 extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
94 extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
95 #if 0
96 extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
97 extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
98 #endif
99 extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
100 extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
101 extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
102 extern void vtm_init(VCPU *vcpu);
103 extern uint64_t vtm_get_itc(VCPU *vcpu);
104 extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
105 extern void vtm_set_itv(VCPU *vcpu, uint64_t val);
106 extern void vtm_set_itm(VCPU *vcpu, uint64_t val);
107 extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
108 //extern void vtm_domain_out(VCPU *vcpu);
109 //extern void vtm_domain_in(VCPU *vcpu);
110 extern void vlsapic_reset(VCPU *vcpu);
111 extern int vmx_check_pending_irq(VCPU *vcpu);
112 extern void guest_write_eoi(VCPU *vcpu);
113 extern uint64_t guest_read_vivr(VCPU *vcpu);
114 extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
115 extern int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector);
116 extern struct virtual_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
117 extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
118 extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
119 extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
120 extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
121 extern void vcpu_load_kernel_regs(VCPU *vcpu);
122 extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
123 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
125 extern void dtlb_fault (VCPU *vcpu, u64 vadr);
126 extern void nested_dtlb (VCPU *vcpu);
127 extern void alt_dtlb (VCPU *vcpu, u64 vadr);
128 extern void dvhpt_fault (VCPU *vcpu, u64 vadr);
129 extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr);
130 extern void page_not_present(VCPU *vcpu, u64 vadr);
132 /**************************************************************************
133 VCPU control register access routines
134 **************************************************************************/
136 static inline
137 IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
138 {
139 *pval = VCPU(vcpu,dcr);
140 return (IA64_NO_FAULT);
141 }
143 static inline
144 IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
145 {
146 *pval = VCPU(vcpu,itm);
147 return (IA64_NO_FAULT);
148 }
150 static inline
151 IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
152 {
153 *pval = VCPU(vcpu,iva);
154 return (IA64_NO_FAULT);
155 }
156 static inline
157 IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
158 {
159 *pval = VCPU(vcpu,pta);
160 return (IA64_NO_FAULT);
161 }
163 static inline
164 IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
165 {
166 *pval = VCPU(vcpu,lid);
167 return (IA64_NO_FAULT);
168 }
169 static inline
170 IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
171 {
172 *pval = guest_read_vivr(vcpu);
173 return (IA64_NO_FAULT);
174 }
175 static inline
176 IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
177 {
178 *pval = VCPU(vcpu,tpr);
179 return (IA64_NO_FAULT);
180 }
181 static inline
182 IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
183 {
184 *pval = 0L; // reads of eoi always return 0
185 return (IA64_NO_FAULT);
186 }
187 static inline
188 IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
189 {
190 *pval = VCPU(vcpu,irr[0]);
191 return (IA64_NO_FAULT);
192 }
193 static inline
194 IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
195 {
196 *pval = VCPU(vcpu,irr[1]);
197 return (IA64_NO_FAULT);
198 }
199 static inline
200 IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
201 {
202 *pval = VCPU(vcpu,irr[2]);
203 return (IA64_NO_FAULT);
204 }
205 static inline
206 IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
207 {
208 *pval = VCPU(vcpu,irr[3]);
209 return (IA64_NO_FAULT);
210 }
211 static inline
212 IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
213 {
214 *pval = VCPU(vcpu,itv);
215 return (IA64_NO_FAULT);
216 }
217 static inline
218 IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
219 {
220 *pval = VCPU(vcpu,pmv);
221 return (IA64_NO_FAULT);
222 }
223 static inline
224 IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
225 {
226 *pval = VCPU(vcpu,cmcv);
227 return (IA64_NO_FAULT);
228 }
229 static inline
230 IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
231 {
232 *pval = VCPU(vcpu,lrr0);
233 return (IA64_NO_FAULT);
234 }
235 static inline
236 IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
237 { *pval = VCPU(vcpu,lrr1);
238 return (IA64_NO_FAULT);
239 }
240 static inline
241 IA64FAULT
242 vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
243 {
244 u64 mdcr, mask;
245 VCPU(vcpu,dcr)=val;
246 /* All vDCR bits will go to mDCR, except for be/pp bit */
247 mdcr = ia64_get_dcr();
248 mask = IA64_DCR_BE | IA64_DCR_PP;
249 mdcr = ( mdcr & mask ) | ( val & (~mask) );
250 ia64_set_dcr( mdcr);
252 return IA64_NO_FAULT;
253 }
255 static inline
256 IA64FAULT
257 vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
258 {
259 vtm_set_itm(vcpu, val);
260 return IA64_NO_FAULT;
261 }
262 static inline
263 IA64FAULT
264 vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
265 {
266 VCPU(vcpu,iva)=val;
267 return IA64_NO_FAULT;
268 }
270 static inline
271 IA64FAULT
272 vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
273 {
274 VCPU(vcpu,pta)=val;
275 return IA64_NO_FAULT;
276 }
278 static inline
279 IA64FAULT
280 vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
281 {
282 VCPU(vcpu,lid)=val;
283 return IA64_NO_FAULT;
284 }
285 extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
287 static inline
288 IA64FAULT
289 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
290 {
291 guest_write_eoi(vcpu);
292 return IA64_NO_FAULT;
293 }
295 static inline
296 IA64FAULT
297 vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
298 {
300 vtm_set_itv(vcpu, val);
301 return IA64_NO_FAULT;
302 }
303 static inline
304 IA64FAULT
305 vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
306 {
307 VCPU(vcpu,pmv)=val;
308 return IA64_NO_FAULT;
309 }
310 static inline
311 IA64FAULT
312 vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
313 {
314 VCPU(vcpu,cmcv)=val;
315 return IA64_NO_FAULT;
316 }
317 static inline
318 IA64FAULT
319 vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
320 {
321 VCPU(vcpu,lrr0)=val;
322 return IA64_NO_FAULT;
323 }
324 static inline
325 IA64FAULT
326 vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
327 {
328 VCPU(vcpu,lrr1)=val;
329 return IA64_NO_FAULT;
330 }
335 /**************************************************************************
336 VCPU privileged application register access routines
337 **************************************************************************/
338 static inline
339 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
340 {
341 vtm_set_itc(vcpu, val);
342 return IA64_NO_FAULT;
343 }
344 static inline
345 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
346 {
347 *val = vtm_get_itc(vcpu);
348 return IA64_NO_FAULT;
349 }
350 static inline
351 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
352 {
353 *pval = VMX(vcpu,vrr[reg>>61]);
354 return (IA64_NO_FAULT);
355 }
356 /**************************************************************************
357 VCPU debug breakpoint register access routines
358 **************************************************************************/
360 static inline
361 IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
362 {
363 // TODO: unimplemented DBRs return a reserved register fault
364 // TODO: Should set Logical CPU state, not just physical
365 if(reg > 4){
366 panic("there are only five cpuid registers");
367 }
368 *pval=VCPU(vcpu,vcpuid[reg]);
369 return (IA64_NO_FAULT);
370 }
373 static inline
374 IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
375 {
376 // TODO: unimplemented DBRs return a reserved register fault
377 // TODO: Should set Logical CPU state, not just physical
378 ia64_set_dbr(reg,val);
379 return (IA64_NO_FAULT);
380 }
381 static inline
382 IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
383 {
384 // TODO: unimplemented IBRs return a reserved register fault
385 // TODO: Should set Logical CPU state, not just physical
386 ia64_set_ibr(reg,val);
387 return (IA64_NO_FAULT);
388 }
389 static inline
390 IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
391 {
392 // TODO: unimplemented DBRs return a reserved register fault
393 UINT64 val = ia64_get_dbr(reg);
394 *pval = val;
395 return (IA64_NO_FAULT);
396 }
397 static inline
398 IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
399 {
400 // TODO: unimplemented IBRs return a reserved register fault
401 UINT64 val = ia64_get_ibr(reg);
402 *pval = val;
403 return (IA64_NO_FAULT);
404 }
406 /**************************************************************************
407 VCPU performance monitor register access routines
408 **************************************************************************/
409 static inline
410 IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
411 {
412 // TODO: Should set Logical CPU state, not just physical
413 // NOTE: Writes to unimplemented PMC registers are discarded
414 ia64_set_pmc(reg,val);
415 return (IA64_NO_FAULT);
416 }
417 static inline
418 IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
419 {
420 // TODO: Should set Logical CPU state, not just physical
421 // NOTE: Writes to unimplemented PMD registers are discarded
422 ia64_set_pmd(reg,val);
423 return (IA64_NO_FAULT);
424 }
425 static inline
426 IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
427 {
428 // NOTE: Reads from unimplemented PMC registers return zero
429 UINT64 val = (UINT64)ia64_get_pmc(reg);
430 *pval = val;
431 return (IA64_NO_FAULT);
432 }
433 static inline
434 IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
435 {
436 // NOTE: Reads from unimplemented PMD registers return zero
437 UINT64 val = (UINT64)ia64_get_pmd(reg);
438 *pval = val;
439 return (IA64_NO_FAULT);
440 }
442 /**************************************************************************
443 VCPU banked general register access routines
444 **************************************************************************/
445 #if 0
446 static inline
447 IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
448 {
450 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
451 return (IA64_NO_FAULT);
452 }
453 static inline
454 IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
455 {
457 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
458 return (IA64_NO_FAULT);
459 }
460 #endif
461 #if 0
462 /* Another hash performance algorithm */
463 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
464 #endif
465 static inline unsigned long
466 vmx_vrrtomrr(VCPU *v, unsigned long val)
467 {
468 ia64_rr rr;
470 rr.rrval=val;
471 rr.rid = rr.rid + v->arch.starting_rid;
472 rr.ps = PAGE_SHIFT;
473 rr.ve = 1;
474 return vmMangleRID(rr.rrval);
475 /* Disable this rid allocation algorithm for now */
476 #if 0
477 rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
478 rr.rid = redistribute_rid(rid);
479 #endif
481 }
483 #define check_work_pending(v) \
484 (event_pending((v)) || ((v)->arch.irq_new_pending))
485 #endif