ia64/xen-unstable

view xen/include/asm-ia64/vmx_vcpu.h @ 9765:7c7bcf173f8b

[IA64] cleanup vtlb code

This patch is to clean up vtlb code.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 20:53:38 2006 -0600 (2006-04-25)
parents 1abf3783975d
children 6e979aa0e6d2
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.h:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #ifndef _XEN_IA64_VMX_VCPU_H
24 #define _XEN_IA64_VMX_VCPU_H
27 #include <xen/sched.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vpd.h>
30 #include <asm/ptrace.h>
31 #include <asm/regs.h>
32 #include <asm/regionreg.h>
33 #include <asm/types.h>
34 #include <asm/vcpu.h>
36 #define VRN_SHIFT 61
37 #define VRN0 0x0UL
38 #define VRN1 0x1UL
39 #define VRN2 0x2UL
40 #define VRN3 0x3UL
41 #define VRN4 0x4UL
42 #define VRN5 0x5UL
43 #define VRN6 0x6UL
44 #define VRN7 0x7UL
45 // for vlsapic
46 #define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
47 //#define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y)
49 #define VMX(x,y) ((x)->arch.arch_vmx.y)
52 #define VMM_RR_SHIFT 20
53 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
55 extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
56 extern u64 cr_igfld_mask (int index, u64 value);
57 extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
58 extern u64 set_isr_ei_ni (VCPU *vcpu);
59 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
62 /* next all for VTI domain APIs definition */
63 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
64 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
65 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
66 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
67 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
68 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
69 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
70 extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
71 extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
72 extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 itir, UINT64 ifa);
73 extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 pte, UINT64 itir, UINT64 ifa);
74 extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
75 extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
76 extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
77 extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
78 extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
79 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
80 extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
81 extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
82 extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
83 extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
84 extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
85 extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
86 extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
87 extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
88 extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
89 #if 0
90 extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
91 extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
92 #endif
93 extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
94 extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
95 extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
96 extern void vtm_init(VCPU *vcpu);
97 extern uint64_t vtm_get_itc(VCPU *vcpu);
98 extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
99 extern void vtm_set_itv(VCPU *vcpu, uint64_t val);
100 extern void vtm_set_itm(VCPU *vcpu, uint64_t val);
101 extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
102 //extern void vtm_domain_out(VCPU *vcpu);
103 //extern void vtm_domain_in(VCPU *vcpu);
104 extern void vlsapic_reset(VCPU *vcpu);
105 extern int vmx_check_pending_irq(VCPU *vcpu);
106 extern void guest_write_eoi(VCPU *vcpu);
107 extern uint64_t guest_read_vivr(VCPU *vcpu);
108 extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
109 extern int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector);
110 extern struct virtual_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
111 extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
112 extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
113 extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
114 extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
115 extern void vcpu_load_kernel_regs(VCPU *vcpu);
116 extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
117 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
119 extern void dtlb_fault (VCPU *vcpu, u64 vadr);
120 extern void nested_dtlb (VCPU *vcpu);
121 extern void alt_dtlb (VCPU *vcpu, u64 vadr);
122 extern void dvhpt_fault (VCPU *vcpu, u64 vadr);
123 extern void dnat_page_consumption (VCPU *vcpu, uint64_t vadr);
124 extern void page_not_present(VCPU *vcpu, u64 vadr);
126 /**************************************************************************
127 VCPU control register access routines
128 **************************************************************************/
130 static inline
131 IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
132 {
133 *pval = VCPU(vcpu,dcr);
134 return (IA64_NO_FAULT);
135 }
137 static inline
138 IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
139 {
140 *pval = VCPU(vcpu,itm);
141 return (IA64_NO_FAULT);
142 }
144 static inline
145 IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
146 {
147 *pval = VCPU(vcpu,iva);
148 return (IA64_NO_FAULT);
149 }
150 static inline
151 IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
152 {
153 *pval = VCPU(vcpu,pta);
154 return (IA64_NO_FAULT);
155 }
157 static inline
158 IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
159 {
160 *pval = VCPU(vcpu,lid);
161 return (IA64_NO_FAULT);
162 }
163 static inline
164 IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
165 {
166 *pval = guest_read_vivr(vcpu);
167 return (IA64_NO_FAULT);
168 }
169 static inline
170 IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
171 {
172 *pval = VCPU(vcpu,tpr);
173 return (IA64_NO_FAULT);
174 }
175 static inline
176 IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
177 {
178 *pval = 0L; // reads of eoi always return 0
179 return (IA64_NO_FAULT);
180 }
181 static inline
182 IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
183 {
184 *pval = VCPU(vcpu,irr[0]);
185 return (IA64_NO_FAULT);
186 }
187 static inline
188 IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
189 {
190 *pval = VCPU(vcpu,irr[1]);
191 return (IA64_NO_FAULT);
192 }
193 static inline
194 IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
195 {
196 *pval = VCPU(vcpu,irr[2]);
197 return (IA64_NO_FAULT);
198 }
199 static inline
200 IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
201 {
202 *pval = VCPU(vcpu,irr[3]);
203 return (IA64_NO_FAULT);
204 }
205 static inline
206 IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
207 {
208 *pval = VCPU(vcpu,itv);
209 return (IA64_NO_FAULT);
210 }
211 static inline
212 IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
213 {
214 *pval = VCPU(vcpu,pmv);
215 return (IA64_NO_FAULT);
216 }
217 static inline
218 IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
219 {
220 *pval = VCPU(vcpu,cmcv);
221 return (IA64_NO_FAULT);
222 }
223 static inline
224 IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
225 {
226 *pval = VCPU(vcpu,lrr0);
227 return (IA64_NO_FAULT);
228 }
229 static inline
230 IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
231 { *pval = VCPU(vcpu,lrr1);
232 return (IA64_NO_FAULT);
233 }
234 static inline
235 IA64FAULT
236 vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
237 {
238 u64 mdcr, mask;
239 VCPU(vcpu,dcr)=val;
240 /* All vDCR bits will go to mDCR, except for be/pp bit */
241 mdcr = ia64_get_dcr();
242 mask = IA64_DCR_BE | IA64_DCR_PP;
243 mdcr = ( mdcr & mask ) | ( val & (~mask) );
244 ia64_set_dcr( mdcr);
246 return IA64_NO_FAULT;
247 }
249 static inline
250 IA64FAULT
251 vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
252 {
253 vtm_set_itm(vcpu, val);
254 return IA64_NO_FAULT;
255 }
256 static inline
257 IA64FAULT
258 vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
259 {
260 VCPU(vcpu,iva)=val;
261 return IA64_NO_FAULT;
262 }
264 static inline
265 IA64FAULT
266 vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
267 {
268 VCPU(vcpu,pta)=val;
269 return IA64_NO_FAULT;
270 }
272 static inline
273 IA64FAULT
274 vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
275 {
276 VCPU(vcpu,lid)=val;
277 return IA64_NO_FAULT;
278 }
279 extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
281 static inline
282 IA64FAULT
283 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
284 {
285 guest_write_eoi(vcpu);
286 return IA64_NO_FAULT;
287 }
289 static inline
290 IA64FAULT
291 vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
292 {
294 vtm_set_itv(vcpu, val);
295 return IA64_NO_FAULT;
296 }
297 static inline
298 IA64FAULT
299 vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
300 {
301 VCPU(vcpu,pmv)=val;
302 return IA64_NO_FAULT;
303 }
304 static inline
305 IA64FAULT
306 vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
307 {
308 VCPU(vcpu,cmcv)=val;
309 return IA64_NO_FAULT;
310 }
311 static inline
312 IA64FAULT
313 vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
314 {
315 VCPU(vcpu,lrr0)=val;
316 return IA64_NO_FAULT;
317 }
318 static inline
319 IA64FAULT
320 vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
321 {
322 VCPU(vcpu,lrr1)=val;
323 return IA64_NO_FAULT;
324 }
329 /**************************************************************************
330 VCPU privileged application register access routines
331 **************************************************************************/
332 static inline
333 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
334 {
335 vtm_set_itc(vcpu, val);
336 return IA64_NO_FAULT;
337 }
338 static inline
339 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
340 {
341 *val = vtm_get_itc(vcpu);
342 return IA64_NO_FAULT;
343 }
344 /*
345 static inline
346 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
347 {
348 *pval = VMX(vcpu,vrr[reg>>61]);
349 return (IA64_NO_FAULT);
350 }
351 */
352 /**************************************************************************
353 VCPU debug breakpoint register access routines
354 **************************************************************************/
356 static inline
357 IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
358 {
359 // TODO: unimplemented DBRs return a reserved register fault
360 // TODO: Should set Logical CPU state, not just physical
361 if(reg > 4){
362 panic("there are only five cpuid registers");
363 }
364 *pval=VCPU(vcpu,vcpuid[reg]);
365 return (IA64_NO_FAULT);
366 }
369 static inline
370 IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
371 {
372 // TODO: unimplemented DBRs return a reserved register fault
373 // TODO: Should set Logical CPU state, not just physical
374 ia64_set_dbr(reg,val);
375 return (IA64_NO_FAULT);
376 }
377 static inline
378 IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
379 {
380 // TODO: unimplemented IBRs return a reserved register fault
381 // TODO: Should set Logical CPU state, not just physical
382 ia64_set_ibr(reg,val);
383 return (IA64_NO_FAULT);
384 }
385 static inline
386 IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
387 {
388 // TODO: unimplemented DBRs return a reserved register fault
389 UINT64 val = ia64_get_dbr(reg);
390 *pval = val;
391 return (IA64_NO_FAULT);
392 }
393 static inline
394 IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
395 {
396 // TODO: unimplemented IBRs return a reserved register fault
397 UINT64 val = ia64_get_ibr(reg);
398 *pval = val;
399 return (IA64_NO_FAULT);
400 }
402 /**************************************************************************
403 VCPU performance monitor register access routines
404 **************************************************************************/
405 static inline
406 IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
407 {
408 // TODO: Should set Logical CPU state, not just physical
409 // NOTE: Writes to unimplemented PMC registers are discarded
410 ia64_set_pmc(reg,val);
411 return (IA64_NO_FAULT);
412 }
413 static inline
414 IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
415 {
416 // TODO: Should set Logical CPU state, not just physical
417 // NOTE: Writes to unimplemented PMD registers are discarded
418 ia64_set_pmd(reg,val);
419 return (IA64_NO_FAULT);
420 }
421 static inline
422 IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
423 {
424 // NOTE: Reads from unimplemented PMC registers return zero
425 UINT64 val = (UINT64)ia64_get_pmc(reg);
426 *pval = val;
427 return (IA64_NO_FAULT);
428 }
429 static inline
430 IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
431 {
432 // NOTE: Reads from unimplemented PMD registers return zero
433 UINT64 val = (UINT64)ia64_get_pmd(reg);
434 *pval = val;
435 return (IA64_NO_FAULT);
436 }
438 /**************************************************************************
439 VCPU banked general register access routines
440 **************************************************************************/
441 #if 0
442 static inline
443 IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
444 {
446 VCPU(vcpu,vpsr) &= ~IA64_PSR_BN;
447 return (IA64_NO_FAULT);
448 }
449 static inline
450 IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
451 {
453 VCPU(vcpu,vpsr) |= IA64_PSR_BN;
454 return (IA64_NO_FAULT);
455 }
456 #endif
457 #if 0
458 /* Another hash performance algorithm */
459 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
460 #endif
461 static inline unsigned long
462 vrrtomrr(VCPU *v, unsigned long val)
463 {
464 ia64_rr rr;
466 rr.rrval=val;
467 rr.rid = rr.rid + v->arch.starting_rid;
468 rr.ps = PAGE_SHIFT;
469 rr.ve = 1;
470 return vmMangleRID(rr.rrval);
471 /* Disable this rid allocation algorithm for now */
472 #if 0
473 rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
474 rr.rid = redistribute_rid(rid);
475 #endif
477 }
478 static inline thash_cb_t *
479 vmx_vcpu_get_vtlb(VCPU *vcpu)
480 {
481 return &vcpu->arch.vtlb;
482 }
484 static inline thash_cb_t *
485 vcpu_get_vhpt(VCPU *vcpu)
486 {
487 return &vcpu->arch.vhpt;
488 }
490 #define check_work_pending(v) \
491 (event_pending((v)) || ((v)->arch.irq_new_pending))
492 #endif