ia64/xen-unstable

view xen/include/asm-ia64/vmx_vcpu.h @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents c91f74efda05
children a83ac0806d6b
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.h:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #ifndef _XEN_IA64_VMX_VCPU_H
24 #define _XEN_IA64_VMX_VCPU_H
27 #include <xen/sched.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vpd.h>
30 #include <asm/ptrace.h>
31 #include <asm/regs.h>
32 #include <asm/regionreg.h>
33 #include <asm/types.h>
34 #include <asm/vcpu.h>
36 #define VRN_SHIFT 61
37 #define VRN0 0x0UL
38 #define VRN1 0x1UL
39 #define VRN2 0x2UL
40 #define VRN3 0x3UL
41 #define VRN4 0x4UL
42 #define VRN5 0x5UL
43 #define VRN6 0x6UL
44 #define VRN7 0x7UL
46 // this def for vcpu_regs won't work if kernel stack is present
47 #define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
48 #define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y)
50 #define VMX(x,y) ((x)->arch.arch_vmx.y)
52 #define VPD_CR(x,y) (((cr_t*)VMX_VPD(x,vcr))->y)
54 #define VMM_RR_SHIFT 20
55 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
56 //#define VRID_2_MRID(vcpu,rid) ((rid) & VMM_RR_MASK) | \
57 ((vcpu->domain->domain_id) << VMM_RR_SHIFT)
58 extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
59 extern u64 cr_igfld_mask (int index, u64 value);
60 extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
61 extern u64 set_isr_ei_ni (VCPU *vcpu);
62 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
65 /* next all for CONFIG_VTI APIs definition */
66 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
67 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
68 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
69 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
70 extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
71 extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
72 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
73 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
74 extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
75 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
76 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
77 extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
78 extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
79 extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
80 extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
81 extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
82 extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
83 extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
84 extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
85 extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
86 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
87 extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
88 extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
89 extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
90 extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
91 extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
92 extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
93 extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
94 extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
95 extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
96 extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
97 extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
98 extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
99 extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
100 extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
101 extern void vtm_init(VCPU *vcpu);
102 extern uint64_t vtm_get_itc(VCPU *vcpu);
103 extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
104 extern void vtm_set_itv(VCPU *vcpu);
105 extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
106 extern void vtm_domain_out(VCPU *vcpu);
107 extern void vtm_domain_in(VCPU *vcpu);
108 extern void vlsapic_reset(VCPU *vcpu);
109 extern int vmx_check_pending_irq(VCPU *vcpu);
110 extern void guest_write_eoi(VCPU *vcpu);
111 extern uint64_t guest_read_vivr(VCPU *vcpu);
112 extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
113 extern void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
114 extern struct virutal_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
115 extern void memread_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
116 extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
117 extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
118 extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
121 /**************************************************************************
122 VCPU control register access routines
123 **************************************************************************/
125 static inline
126 IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
127 {
128 *pval = VPD_CR(vcpu,dcr);
129 return (IA64_NO_FAULT);
130 }
132 static inline
133 IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
134 {
135 *pval = VPD_CR(vcpu,itm);
136 return (IA64_NO_FAULT);
137 }
139 static inline
140 IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
141 {
142 *pval = VPD_CR(vcpu,iva);
143 return (IA64_NO_FAULT);
144 }
145 static inline
146 IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
147 {
148 *pval = VPD_CR(vcpu,pta);
149 return (IA64_NO_FAULT);
150 }
151 static inline
152 IA64FAULT vmx_vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
153 {
154 *pval = VPD_CR(vcpu,ipsr);
155 return (IA64_NO_FAULT);
156 }
158 static inline
159 IA64FAULT vmx_vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
160 {
161 *pval = VPD_CR(vcpu,isr);
162 return (IA64_NO_FAULT);
163 }
164 static inline
165 IA64FAULT vmx_vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
166 {
167 *pval = VPD_CR(vcpu,iip);
168 return (IA64_NO_FAULT);
169 }
170 static inline
171 IA64FAULT vmx_vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
172 {
173 *pval = VPD_CR(vcpu,ifa);
174 return (IA64_NO_FAULT);
175 }
177 static inline
178 IA64FAULT vmx_vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
179 {
180 *pval = VPD_CR(vcpu,itir);
181 return (IA64_NO_FAULT);
182 }
183 static inline
184 IA64FAULT vmx_vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
185 {
186 *pval = VPD_CR(vcpu,iipa);
187 return (IA64_NO_FAULT);
188 }
189 static inline
190 IA64FAULT vmx_vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
191 {
192 *pval = VPD_CR(vcpu,ifs);
193 return (IA64_NO_FAULT);
194 }
195 static inline
196 IA64FAULT vmx_vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
197 {
198 *pval = VPD_CR(vcpu,iim);
199 return (IA64_NO_FAULT);
200 }
201 static inline
202 IA64FAULT vmx_vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
203 {
204 *pval = VPD_CR(vcpu,iha);
205 return (IA64_NO_FAULT);
206 }
207 static inline
208 IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
209 {
210 *pval = VPD_CR(vcpu,lid);
211 return (IA64_NO_FAULT);
212 }
213 static inline
214 IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
215 {
216 *pval = guest_read_vivr(vcpu);
217 return (IA64_NO_FAULT);
218 }
219 static inline
220 IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
221 {
222 *pval = VPD_CR(vcpu,tpr);
223 return (IA64_NO_FAULT);
224 }
225 static inline
226 IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
227 {
228 *pval = 0L; // reads of eoi always return 0
229 return (IA64_NO_FAULT);
230 }
231 static inline
232 IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
233 {
234 *pval = VPD_CR(vcpu,irr[0]);
235 return (IA64_NO_FAULT);
236 }
237 static inline
238 IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
239 {
240 *pval = VPD_CR(vcpu,irr[1]);
241 return (IA64_NO_FAULT);
242 }
243 static inline
244 IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
245 {
246 *pval = VPD_CR(vcpu,irr[2]);
247 return (IA64_NO_FAULT);
248 }
249 static inline
250 IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
251 {
252 *pval = VPD_CR(vcpu,irr[3]);
253 return (IA64_NO_FAULT);
254 }
255 static inline
256 IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
257 {
258 *pval = VPD_CR(vcpu,itv);
259 return (IA64_NO_FAULT);
260 }
261 static inline
262 IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
263 {
264 *pval = VPD_CR(vcpu,pmv);
265 return (IA64_NO_FAULT);
266 }
267 static inline
268 IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
269 {
270 *pval = VPD_CR(vcpu,cmcv);
271 return (IA64_NO_FAULT);
272 }
273 static inline
274 IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
275 {
276 *pval = VPD_CR(vcpu,lrr0);
277 return (IA64_NO_FAULT);
278 }
279 static inline
280 IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
281 { *pval = VPD_CR(vcpu,lrr1);
282 return (IA64_NO_FAULT);
283 }
284 static inline
285 IA64FAULT
286 vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
287 {
288 u64 mdcr, mask;
289 VPD_CR(vcpu,dcr)=val;
290 /* All vDCR bits will go to mDCR, except for be/pp bit */
291 mdcr = ia64_get_dcr();
292 mask = IA64_DCR_BE | IA64_DCR_PP;
293 mdcr = ( mdcr & mask ) | ( val & (~mask) );
294 ia64_set_dcr( mdcr);
296 return IA64_NO_FAULT;
297 }
299 static inline
300 IA64FAULT
301 vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
302 {
303 vtime_t *vtm;
305 vtm=&(vcpu->arch.arch_vmx.vtm);
306 VPD_CR(vcpu,itm)=val;
307 vtm_interruption_update(vcpu, vtm);
308 return IA64_NO_FAULT;
309 }
310 static inline
311 IA64FAULT
312 vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
313 {
314 VPD_CR(vcpu,iva)=val;
315 return IA64_NO_FAULT;
316 }
318 static inline
319 IA64FAULT
320 vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
321 {
322 VPD_CR(vcpu,pta)=val;
323 return IA64_NO_FAULT;
324 }
326 static inline
327 IA64FAULT
328 vmx_vcpu_set_ipsr(VCPU *vcpu, u64 val)
329 {
330 VPD_CR(vcpu,ipsr)=val;
331 return IA64_NO_FAULT;
332 }
334 static inline
335 IA64FAULT
336 vmx_vcpu_set_isr(VCPU *vcpu, u64 val)
337 {
338 VPD_CR(vcpu,isr)=val;
339 return IA64_NO_FAULT;
340 }
342 static inline
343 IA64FAULT
344 vmx_vcpu_set_iip(VCPU *vcpu, u64 val)
345 {
346 VPD_CR(vcpu,iip)=val;
347 return IA64_NO_FAULT;
348 }
350 static inline
351 IA64FAULT
352 vmx_vcpu_set_ifa(VCPU *vcpu, u64 val)
353 {
354 VPD_CR(vcpu,ifa)=val;
355 return IA64_NO_FAULT;
356 }
358 static inline
359 IA64FAULT
360 vmx_vcpu_set_itir(VCPU *vcpu, u64 val)
361 {
362 VPD_CR(vcpu,itir)=val;
363 return IA64_NO_FAULT;
364 }
366 static inline
367 IA64FAULT
368 vmx_vcpu_set_iipa(VCPU *vcpu, u64 val)
369 {
370 VPD_CR(vcpu,iipa)=val;
371 return IA64_NO_FAULT;
372 }
374 static inline
375 IA64FAULT
376 vmx_vcpu_set_ifs(VCPU *vcpu, u64 val)
377 {
378 VPD_CR(vcpu,ifs)=val;
379 return IA64_NO_FAULT;
380 }
381 static inline
382 IA64FAULT
383 vmx_vcpu_set_iim(VCPU *vcpu, u64 val)
384 {
385 VPD_CR(vcpu,iim)=val;
386 return IA64_NO_FAULT;
387 }
389 static inline
390 IA64FAULT
391 vmx_vcpu_set_iha(VCPU *vcpu, u64 val)
392 {
393 VPD_CR(vcpu,iha)=val;
394 return IA64_NO_FAULT;
395 }
397 static inline
398 IA64FAULT
399 vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
400 {
401 VPD_CR(vcpu,lid)=val;
402 return IA64_NO_FAULT;
403 }
404 extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
406 static inline
407 IA64FAULT
408 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
409 {
410 guest_write_eoi(vcpu);
411 return IA64_NO_FAULT;
412 }
414 static inline
415 IA64FAULT
416 vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
417 {
419 VPD_CR(vcpu,itv)=val;
420 vtm_set_itv(vcpu);
421 return IA64_NO_FAULT;
422 }
423 static inline
424 IA64FAULT
425 vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
426 {
427 VPD_CR(vcpu,pmv)=val;
428 return IA64_NO_FAULT;
429 }
430 static inline
431 IA64FAULT
432 vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
433 {
434 VPD_CR(vcpu,cmcv)=val;
435 return IA64_NO_FAULT;
436 }
437 static inline
438 IA64FAULT
439 vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
440 {
441 VPD_CR(vcpu,lrr0)=val;
442 return IA64_NO_FAULT;
443 }
444 static inline
445 IA64FAULT
446 vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
447 {
448 VPD_CR(vcpu,lrr1)=val;
449 return IA64_NO_FAULT;
450 }
455 /**************************************************************************
456 VCPU privileged application register access routines
457 **************************************************************************/
458 static inline
459 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
460 {
461 vtm_set_itc(vcpu, val);
462 return IA64_NO_FAULT;
463 }
464 static inline
465 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
466 {
467 *val = vtm_get_itc(vcpu);
468 return IA64_NO_FAULT;
469 }
470 static inline
471 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
472 {
473 *pval = VMX(vcpu,vrr[reg>>61]);
474 return (IA64_NO_FAULT);
475 }
476 /**************************************************************************
477 VCPU debug breakpoint register access routines
478 **************************************************************************/
480 static inline
481 IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
482 {
483 // TODO: unimplemented DBRs return a reserved register fault
484 // TODO: Should set Logical CPU state, not just physical
485 if(reg > 4){
486 panic("there are only five cpuid registers");
487 }
488 *pval=VMX_VPD(vcpu,vcpuid[reg]);
489 return (IA64_NO_FAULT);
490 }
493 static inline
494 IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
495 {
496 // TODO: unimplemented DBRs return a reserved register fault
497 // TODO: Should set Logical CPU state, not just physical
498 ia64_set_dbr(reg,val);
499 return (IA64_NO_FAULT);
500 }
501 static inline
502 IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
503 {
504 // TODO: unimplemented IBRs return a reserved register fault
505 // TODO: Should set Logical CPU state, not just physical
506 ia64_set_ibr(reg,val);
507 return (IA64_NO_FAULT);
508 }
509 static inline
510 IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
511 {
512 // TODO: unimplemented DBRs return a reserved register fault
513 UINT64 val = ia64_get_dbr(reg);
514 *pval = val;
515 return (IA64_NO_FAULT);
516 }
517 static inline
518 IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
519 {
520 // TODO: unimplemented IBRs return a reserved register fault
521 UINT64 val = ia64_get_ibr(reg);
522 *pval = val;
523 return (IA64_NO_FAULT);
524 }
526 /**************************************************************************
527 VCPU performance monitor register access routines
528 **************************************************************************/
529 static inline
530 IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
531 {
532 // TODO: Should set Logical CPU state, not just physical
533 // NOTE: Writes to unimplemented PMC registers are discarded
534 ia64_set_pmc(reg,val);
535 return (IA64_NO_FAULT);
536 }
537 static inline
538 IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
539 {
540 // TODO: Should set Logical CPU state, not just physical
541 // NOTE: Writes to unimplemented PMD registers are discarded
542 ia64_set_pmd(reg,val);
543 return (IA64_NO_FAULT);
544 }
545 static inline
546 IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
547 {
548 // NOTE: Reads from unimplemented PMC registers return zero
549 UINT64 val = (UINT64)ia64_get_pmc(reg);
550 *pval = val;
551 return (IA64_NO_FAULT);
552 }
553 static inline
554 IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
555 {
556 // NOTE: Reads from unimplemented PMD registers return zero
557 UINT64 val = (UINT64)ia64_get_pmd(reg);
558 *pval = val;
559 return (IA64_NO_FAULT);
560 }
562 /**************************************************************************
563 VCPU banked general register access routines
564 **************************************************************************/
565 static inline
566 IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
567 {
569 VMX_VPD(vcpu,vpsr) &= ~IA64_PSR_BN;
570 return (IA64_NO_FAULT);
571 }
572 static inline
573 IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
574 {
576 VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN;
577 return (IA64_NO_FAULT);
578 }
580 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
581 static inline unsigned long
582 vmx_vrrtomrr(VCPU *vcpu,unsigned long val)
583 {
584 ia64_rr rr;
585 u64 rid;
586 rr.rrval=val;
587 rid=(((u64)vcpu->domain->domain_id)<<DOMAIN_RID_SHIFT) + rr.rid;
588 rr.rid = redistribute_rid(rid);
589 rr.ve=1;
590 return rr.rrval;
591 }
592 #endif