ia64/xen-unstable

view xen/include/asm-ia64/vmx_vcpu.h @ 6538:84ee014ebd41

Merge xen-vtx-unstable.hg
author adsharma@los-vmm.sc.intel.com
date Wed Aug 17 12:34:38 2005 -0800 (2005-08-17)
parents 23979fb12c49 f294acb25858
children 99914b54f7bf
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.h:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #ifndef _XEN_IA64_VMX_VCPU_H
24 #define _XEN_IA64_VMX_VCPU_H
27 #include <xen/sched.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vpd.h>
30 #include <asm/ptrace.h>
31 #include <asm/regs.h>
32 #include <asm/regionreg.h>
33 #include <asm/types.h>
34 #include <asm/vcpu.h>
36 #define VRN_SHIFT 61
37 #define VRN0 0x0UL
38 #define VRN1 0x1UL
39 #define VRN2 0x2UL
40 #define VRN3 0x3UL
41 #define VRN4 0x4UL
42 #define VRN5 0x5UL
43 #define VRN6 0x6UL
44 #define VRN7 0x7UL
46 // this def for vcpu_regs won't work if kernel stack is present
47 #define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
48 #define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y)
50 #define VMX(x,y) ((x)->arch.arch_vmx.y)
52 #define VPD_CR(x,y) (((cr_t*)VMX_VPD(x,vcr))->y)
54 #define VMM_RR_SHIFT 20
55 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
56 //#define VRID_2_MRID(vcpu,rid) ((rid) & VMM_RR_MASK) | \
57 ((vcpu->domain->domain_id) << VMM_RR_SHIFT)
58 extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
59 extern u64 cr_igfld_mask (int index, u64 value);
60 extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
61 extern u64 set_isr_ei_ni (VCPU *vcpu);
62 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
65 /* next all for CONFIG_VTI APIs definition */
66 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
67 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
68 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
69 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
70 extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
71 extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
72 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
73 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
74 extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
75 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
76 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
77 extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
78 extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
79 extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
80 extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
81 extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
82 extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
83 extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
84 extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
85 extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
86 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
87 extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
88 extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
89 extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
90 extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
91 extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
92 extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
93 extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
94 extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
95 extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
96 extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
97 extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
98 extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
99 extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
100 extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
101 extern void vtm_init(VCPU *vcpu);
102 extern uint64_t vtm_get_itc(VCPU *vcpu);
103 extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
104 extern void vtm_set_itv(VCPU *vcpu);
105 extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
106 extern void vtm_domain_out(VCPU *vcpu);
107 extern void vtm_domain_in(VCPU *vcpu);
108 #ifdef V_IOSAPIC_READY
109 extern void vlapic_update_ext_irq(VCPU *vcpu);
110 extern void vlapic_update_shared_info(VCPU *vcpu);
111 #endif
112 extern void vlsapic_reset(VCPU *vcpu);
113 extern int vmx_check_pending_irq(VCPU *vcpu);
114 extern void guest_write_eoi(VCPU *vcpu);
115 extern uint64_t guest_read_vivr(VCPU *vcpu);
116 extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
117 extern void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
118 extern struct virutal_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
119 extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
120 extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
121 extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
122 extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
125 /**************************************************************************
126 VCPU control register access routines
127 **************************************************************************/
129 static inline
130 IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
131 {
132 *pval = VPD_CR(vcpu,dcr);
133 return (IA64_NO_FAULT);
134 }
136 static inline
137 IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
138 {
139 *pval = VPD_CR(vcpu,itm);
140 return (IA64_NO_FAULT);
141 }
143 static inline
144 IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
145 {
146 *pval = VPD_CR(vcpu,iva);
147 return (IA64_NO_FAULT);
148 }
149 static inline
150 IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
151 {
152 *pval = VPD_CR(vcpu,pta);
153 return (IA64_NO_FAULT);
154 }
155 static inline
156 IA64FAULT vmx_vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
157 {
158 *pval = VPD_CR(vcpu,ipsr);
159 return (IA64_NO_FAULT);
160 }
162 static inline
163 IA64FAULT vmx_vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
164 {
165 *pval = VPD_CR(vcpu,isr);
166 return (IA64_NO_FAULT);
167 }
168 static inline
169 IA64FAULT vmx_vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
170 {
171 *pval = VPD_CR(vcpu,iip);
172 return (IA64_NO_FAULT);
173 }
174 static inline
175 IA64FAULT vmx_vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
176 {
177 *pval = VPD_CR(vcpu,ifa);
178 return (IA64_NO_FAULT);
179 }
181 static inline
182 IA64FAULT vmx_vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
183 {
184 *pval = VPD_CR(vcpu,itir);
185 return (IA64_NO_FAULT);
186 }
187 static inline
188 IA64FAULT vmx_vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
189 {
190 *pval = VPD_CR(vcpu,iipa);
191 return (IA64_NO_FAULT);
192 }
193 static inline
194 IA64FAULT vmx_vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
195 {
196 *pval = VPD_CR(vcpu,ifs);
197 return (IA64_NO_FAULT);
198 }
199 static inline
200 IA64FAULT vmx_vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
201 {
202 *pval = VPD_CR(vcpu,iim);
203 return (IA64_NO_FAULT);
204 }
205 static inline
206 IA64FAULT vmx_vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
207 {
208 *pval = VPD_CR(vcpu,iha);
209 return (IA64_NO_FAULT);
210 }
211 static inline
212 IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
213 {
214 *pval = VPD_CR(vcpu,lid);
215 return (IA64_NO_FAULT);
216 }
217 static inline
218 IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
219 {
220 *pval = guest_read_vivr(vcpu);
221 return (IA64_NO_FAULT);
222 }
223 static inline
224 IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
225 {
226 *pval = VPD_CR(vcpu,tpr);
227 return (IA64_NO_FAULT);
228 }
229 static inline
230 IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
231 {
232 *pval = 0L; // reads of eoi always return 0
233 return (IA64_NO_FAULT);
234 }
235 static inline
236 IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
237 {
238 *pval = VPD_CR(vcpu,irr[0]);
239 return (IA64_NO_FAULT);
240 }
241 static inline
242 IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
243 {
244 *pval = VPD_CR(vcpu,irr[1]);
245 return (IA64_NO_FAULT);
246 }
247 static inline
248 IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
249 {
250 *pval = VPD_CR(vcpu,irr[2]);
251 return (IA64_NO_FAULT);
252 }
253 static inline
254 IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
255 {
256 *pval = VPD_CR(vcpu,irr[3]);
257 return (IA64_NO_FAULT);
258 }
259 static inline
260 IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
261 {
262 *pval = VPD_CR(vcpu,itv);
263 return (IA64_NO_FAULT);
264 }
265 static inline
266 IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
267 {
268 *pval = VPD_CR(vcpu,pmv);
269 return (IA64_NO_FAULT);
270 }
271 static inline
272 IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
273 {
274 *pval = VPD_CR(vcpu,cmcv);
275 return (IA64_NO_FAULT);
276 }
277 static inline
278 IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
279 {
280 *pval = VPD_CR(vcpu,lrr0);
281 return (IA64_NO_FAULT);
282 }
283 static inline
284 IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
285 { *pval = VPD_CR(vcpu,lrr1);
286 return (IA64_NO_FAULT);
287 }
288 static inline
289 IA64FAULT
290 vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
291 {
292 u64 mdcr, mask;
293 VPD_CR(vcpu,dcr)=val;
294 /* All vDCR bits will go to mDCR, except for be/pp bit */
295 mdcr = ia64_get_dcr();
296 mask = IA64_DCR_BE | IA64_DCR_PP;
297 mdcr = ( mdcr & mask ) | ( val & (~mask) );
298 ia64_set_dcr( mdcr);
300 return IA64_NO_FAULT;
301 }
303 static inline
304 IA64FAULT
305 vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
306 {
307 vtime_t *vtm;
309 vtm=&(vcpu->arch.arch_vmx.vtm);
310 VPD_CR(vcpu,itm)=val;
311 vtm_interruption_update(vcpu, vtm);
312 return IA64_NO_FAULT;
313 }
314 static inline
315 IA64FAULT
316 vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
317 {
318 VPD_CR(vcpu,iva)=val;
319 return IA64_NO_FAULT;
320 }
322 static inline
323 IA64FAULT
324 vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
325 {
326 VPD_CR(vcpu,pta)=val;
327 return IA64_NO_FAULT;
328 }
330 static inline
331 IA64FAULT
332 vmx_vcpu_set_ipsr(VCPU *vcpu, u64 val)
333 {
334 VPD_CR(vcpu,ipsr)=val;
335 return IA64_NO_FAULT;
336 }
338 static inline
339 IA64FAULT
340 vmx_vcpu_set_isr(VCPU *vcpu, u64 val)
341 {
342 VPD_CR(vcpu,isr)=val;
343 return IA64_NO_FAULT;
344 }
346 static inline
347 IA64FAULT
348 vmx_vcpu_set_iip(VCPU *vcpu, u64 val)
349 {
350 VPD_CR(vcpu,iip)=val;
351 return IA64_NO_FAULT;
352 }
354 static inline
355 IA64FAULT
356 vmx_vcpu_set_ifa(VCPU *vcpu, u64 val)
357 {
358 VPD_CR(vcpu,ifa)=val;
359 return IA64_NO_FAULT;
360 }
362 static inline
363 IA64FAULT
364 vmx_vcpu_set_itir(VCPU *vcpu, u64 val)
365 {
366 VPD_CR(vcpu,itir)=val;
367 return IA64_NO_FAULT;
368 }
370 static inline
371 IA64FAULT
372 vmx_vcpu_set_iipa(VCPU *vcpu, u64 val)
373 {
374 VPD_CR(vcpu,iipa)=val;
375 return IA64_NO_FAULT;
376 }
378 static inline
379 IA64FAULT
380 vmx_vcpu_set_ifs(VCPU *vcpu, u64 val)
381 {
382 VPD_CR(vcpu,ifs)=val;
383 return IA64_NO_FAULT;
384 }
385 static inline
386 IA64FAULT
387 vmx_vcpu_set_iim(VCPU *vcpu, u64 val)
388 {
389 VPD_CR(vcpu,iim)=val;
390 return IA64_NO_FAULT;
391 }
393 static inline
394 IA64FAULT
395 vmx_vcpu_set_iha(VCPU *vcpu, u64 val)
396 {
397 VPD_CR(vcpu,iha)=val;
398 return IA64_NO_FAULT;
399 }
401 static inline
402 IA64FAULT
403 vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
404 {
405 VPD_CR(vcpu,lid)=val;
406 #ifdef V_IOSAPIC_READY
407 vlapic_update_shared_info(vcpu);
408 #endif
409 return IA64_NO_FAULT;
410 }
411 extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
413 static inline
414 IA64FAULT
415 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
416 {
417 guest_write_eoi(vcpu);
418 return IA64_NO_FAULT;
419 }
421 static inline
422 IA64FAULT
423 vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
424 {
426 VPD_CR(vcpu,itv)=val;
427 vtm_set_itv(vcpu);
428 return IA64_NO_FAULT;
429 }
430 static inline
431 IA64FAULT
432 vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
433 {
434 VPD_CR(vcpu,pmv)=val;
435 return IA64_NO_FAULT;
436 }
437 static inline
438 IA64FAULT
439 vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
440 {
441 VPD_CR(vcpu,cmcv)=val;
442 return IA64_NO_FAULT;
443 }
444 static inline
445 IA64FAULT
446 vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
447 {
448 VPD_CR(vcpu,lrr0)=val;
449 return IA64_NO_FAULT;
450 }
451 static inline
452 IA64FAULT
453 vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
454 {
455 VPD_CR(vcpu,lrr1)=val;
456 return IA64_NO_FAULT;
457 }
462 /**************************************************************************
463 VCPU privileged application register access routines
464 **************************************************************************/
465 static inline
466 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
467 {
468 vtm_set_itc(vcpu, val);
469 return IA64_NO_FAULT;
470 }
471 static inline
472 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
473 {
474 *val = vtm_get_itc(vcpu);
475 return IA64_NO_FAULT;
476 }
477 static inline
478 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
479 {
480 *pval = VMX(vcpu,vrr[reg>>61]);
481 return (IA64_NO_FAULT);
482 }
483 /**************************************************************************
484 VCPU debug breakpoint register access routines
485 **************************************************************************/
487 static inline
488 IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
489 {
490 // TODO: unimplemented DBRs return a reserved register fault
491 // TODO: Should set Logical CPU state, not just physical
492 if(reg > 4){
493 panic("there are only five cpuid registers");
494 }
495 *pval=VMX_VPD(vcpu,vcpuid[reg]);
496 return (IA64_NO_FAULT);
497 }
500 static inline
501 IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
502 {
503 // TODO: unimplemented DBRs return a reserved register fault
504 // TODO: Should set Logical CPU state, not just physical
505 ia64_set_dbr(reg,val);
506 return (IA64_NO_FAULT);
507 }
508 static inline
509 IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
510 {
511 // TODO: unimplemented IBRs return a reserved register fault
512 // TODO: Should set Logical CPU state, not just physical
513 ia64_set_ibr(reg,val);
514 return (IA64_NO_FAULT);
515 }
516 static inline
517 IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
518 {
519 // TODO: unimplemented DBRs return a reserved register fault
520 UINT64 val = ia64_get_dbr(reg);
521 *pval = val;
522 return (IA64_NO_FAULT);
523 }
524 static inline
525 IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
526 {
527 // TODO: unimplemented IBRs return a reserved register fault
528 UINT64 val = ia64_get_ibr(reg);
529 *pval = val;
530 return (IA64_NO_FAULT);
531 }
533 /**************************************************************************
534 VCPU performance monitor register access routines
535 **************************************************************************/
536 static inline
537 IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
538 {
539 // TODO: Should set Logical CPU state, not just physical
540 // NOTE: Writes to unimplemented PMC registers are discarded
541 ia64_set_pmc(reg,val);
542 return (IA64_NO_FAULT);
543 }
544 static inline
545 IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
546 {
547 // TODO: Should set Logical CPU state, not just physical
548 // NOTE: Writes to unimplemented PMD registers are discarded
549 ia64_set_pmd(reg,val);
550 return (IA64_NO_FAULT);
551 }
552 static inline
553 IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
554 {
555 // NOTE: Reads from unimplemented PMC registers return zero
556 UINT64 val = (UINT64)ia64_get_pmc(reg);
557 *pval = val;
558 return (IA64_NO_FAULT);
559 }
560 static inline
561 IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
562 {
563 // NOTE: Reads from unimplemented PMD registers return zero
564 UINT64 val = (UINT64)ia64_get_pmd(reg);
565 *pval = val;
566 return (IA64_NO_FAULT);
567 }
569 /**************************************************************************
570 VCPU banked general register access routines
571 **************************************************************************/
572 static inline
573 IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
574 {
576 VMX_VPD(vcpu,vpsr) &= ~IA64_PSR_BN;
577 return (IA64_NO_FAULT);
578 }
579 static inline
580 IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
581 {
583 VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN;
584 return (IA64_NO_FAULT);
585 }
587 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
588 static inline unsigned long
589 vmx_vrrtomrr(VCPU *vcpu,unsigned long val)
590 {
591 ia64_rr rr;
592 u64 rid;
593 rr.rrval=val;
594 rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
595 rr.rid = redistribute_rid(rid);
596 rr.ve=1;
597 return rr.rrval;
598 }
599 #endif