ia64/xen-unstable

view xen/include/asm-ia64/vmx_vcpu.h @ 4993:0fadb891522c

bitkeeper revision 1.1389.23.1 (428b9f5bAkrt96p_iquJGyvXJzCz7A)

First VT-i code drop
author adsharma@linux-t08.sc.intel.com
date Wed May 18 20:02:35 2005 +0000 (2005-05-18)
parents
children 541012edd6e5
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.h:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
20 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
21 */
23 #ifndef _XEN_IA64_VMX_VCPU_H
24 #define _XEN_IA64_VMX_VCPU_H
27 #include <xen/sched.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vpd.h>
30 #include <asm/ptrace.h>
31 #include <asm/regs.h>
32 #include <asm/regionreg.h>
33 #include <asm/types.h>
34 #include <asm/vcpu.h>
36 #define VRN_SHIFT 61
37 #define VRN0 0x0UL
38 #define VRN1 0x1UL
39 #define VRN2 0x2UL
40 #define VRN3 0x3UL
41 #define VRN4 0x4UL
42 #define VRN5 0x5UL
43 #define VRN6 0x6UL
44 #define VRN7 0x7UL
46 // this def for vcpu_regs won't work if kernel stack is present
47 #define vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
48 #define VMX_VPD(x,y) ((x)->arch.arch_vmx.vpd->y)
50 #define VMX(x,y) ((x)->arch.arch_vmx.y)
52 #define VPD_CR(x,y) (((cr_t*)VMX_VPD(x,vcr))->y)
54 #define VMM_RR_SHIFT 20
55 #define VMM_RR_MASK ((1UL<<VMM_RR_SHIFT)-1)
56 #define VRID_2_MRID(vcpu,rid) ((rid) & VMM_RR_MASK) | \
57 ((vcpu->domain->id) << VMM_RR_SHIFT)
58 extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
59 extern u64 cr_igfld_mask (int index, u64 value);
60 extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
61 extern u64 set_isr_ei_ni (VCPU *vcpu);
62 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
65 /* next all for CONFIG_VTI APIs definition */
66 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
67 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
68 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
69 extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
70 extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
71 extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
72 ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
73 extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
74 extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
75 extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
76 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
77 extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
78 extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
79 extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
80 extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
81 extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
82 extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
83 extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
84 extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
85 extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
86 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
87 extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
88 extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
89 extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
90 extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
91 extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
92 extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
93 extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
94 extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
95 extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
96 extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
97 extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
98 extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
99 extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
100 extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
101 extern void vtm_init(VCPU *vcpu);
102 extern uint64_t vtm_get_itc(VCPU *vcpu);
103 extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
104 extern void vtm_set_itv(VCPU *vcpu);
105 extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
106 extern void vtm_domain_out(VCPU *vcpu);
107 extern void vtm_domain_in(VCPU *vcpu);
108 extern void vlsapic_reset(VCPU *vcpu);
109 extern int vmx_check_pending_irq(VCPU *vcpu);
110 extern void guest_write_eoi(VCPU *vcpu);
111 extern uint64_t guest_read_vivr(VCPU *vcpu);
112 extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
113 extern void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
114 extern struct virutal_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
115 extern void memread_p(VCPU *vcpu, void *src, void *dest, size_t s);
116 extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s);
117 extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s);
118 extern void memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s);
121 /**************************************************************************
122 VCPU control register access routines
123 **************************************************************************/
125 static inline
126 IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
127 {
128 *pval = VPD_CR(vcpu,dcr);
129 return (IA64_NO_FAULT);
130 }
132 static inline
133 IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
134 {
135 *pval = VPD_CR(vcpu,itm);
136 return (IA64_NO_FAULT);
137 }
139 static inline
140 IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
141 {
142 *pval = VPD_CR(vcpu,iva);
143 return (IA64_NO_FAULT);
144 }
145 static inline
146 IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
147 {
148 *pval = VPD_CR(vcpu,pta);
149 return (IA64_NO_FAULT);
150 }
151 static inline
152 IA64FAULT vmx_vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
153 {
154 *pval = VPD_CR(vcpu,ipsr);
155 return (IA64_NO_FAULT);
156 }
158 static inline
159 IA64FAULT vmx_vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
160 {
161 *pval = VPD_CR(vcpu,isr);
162 return (IA64_NO_FAULT);
163 }
164 static inline
165 IA64FAULT vmx_vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
166 {
167 *pval = VPD_CR(vcpu,iip);
168 return (IA64_NO_FAULT);
169 }
170 static inline
171 IA64FAULT vmx_vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
172 {
173 *pval = VPD_CR(vcpu,ifa);
174 return (IA64_NO_FAULT);
175 }
177 static inline
178 IA64FAULT vmx_vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
179 {
180 *pval = VPD_CR(vcpu,itir);
181 return (IA64_NO_FAULT);
182 }
183 static inline
184 IA64FAULT vmx_vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
185 {
186 *pval = VPD_CR(vcpu,iipa);
187 return (IA64_NO_FAULT);
188 }
189 static inline
190 IA64FAULT vmx_vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
191 {
192 *pval = VPD_CR(vcpu,ifs);
193 return (IA64_NO_FAULT);
194 }
195 static inline
196 IA64FAULT vmx_vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
197 {
198 *pval = VPD_CR(vcpu,iim);
199 return (IA64_NO_FAULT);
200 }
201 static inline
202 IA64FAULT vmx_vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
203 {
204 *pval = VPD_CR(vcpu,iha);
205 return (IA64_NO_FAULT);
206 }
207 static inline
208 IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
209 {
210 *pval = VPD_CR(vcpu,lid);
211 return (IA64_NO_FAULT);
212 }
213 static inline
214 IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
215 {
216 *pval = guest_read_vivr(vcpu);
217 return (IA64_NO_FAULT);
218 }
219 static inline
220 IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
221 {
222 *pval = VPD_CR(vcpu,tpr);
223 return (IA64_NO_FAULT);
224 }
225 static inline
226 IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
227 {
228 *pval = 0L; // reads of eoi always return 0
229 return (IA64_NO_FAULT);
230 }
231 static inline
232 IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
233 {
234 *pval = VPD_CR(vcpu,irr[0]);
235 return (IA64_NO_FAULT);
236 }
237 static inline
238 IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
239 {
240 *pval = VPD_CR(vcpu,irr[1]);
241 return (IA64_NO_FAULT);
242 }
243 static inline
244 IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
245 {
246 *pval = VPD_CR(vcpu,irr[2]);
247 return (IA64_NO_FAULT);
248 }
249 static inline
250 IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
251 {
252 *pval = VPD_CR(vcpu,irr[3]);
253 return (IA64_NO_FAULT);
254 }
255 static inline
256 IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
257 {
258 *pval = VPD_CR(vcpu,itv);
259 return (IA64_NO_FAULT);
260 }
261 static inline
262 IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
263 {
264 *pval = VPD_CR(vcpu,pmv);
265 return (IA64_NO_FAULT);
266 }
267 static inline
268 IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
269 {
270 *pval = VPD_CR(vcpu,cmcv);
271 return (IA64_NO_FAULT);
272 }
273 static inline
274 IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
275 {
276 *pval = VPD_CR(vcpu,lrr0);
277 return (IA64_NO_FAULT);
278 }
279 static inline
280 IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
281 { *pval = VPD_CR(vcpu,lrr1);
282 return (IA64_NO_FAULT);
283 }
284 static inline
285 IA64FAULT
286 vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
287 {
288 u64 mdcr, mask;
289 VPD_CR(vcpu,dcr)=val;
290 /* All vDCR bits will go to mDCR, except for be/pp bit */
291 mdcr = ia64_get_dcr();
292 mask = IA64_DCR_BE | IA64_DCR_PP;
293 mdcr = ( mdcr & mask ) | ( val & (~mask) );
294 ia64_set_dcr( mdcr);
296 return IA64_NO_FAULT;
297 }
299 static inline
300 IA64FAULT
301 vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
302 {
303 vtime_t *vtm;
305 vtm=&(vcpu->arch.arch_vmx.vtm);
306 VPD_CR(vcpu,itm)=val;
307 vtm_interruption_update(vcpu, vtm);
308 return IA64_NO_FAULT;
309 }
310 static inline
311 IA64FAULT
312 vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
313 {
314 VPD_CR(vcpu,iva)=val;
315 return IA64_NO_FAULT;
316 }
318 static inline
319 IA64FAULT
320 vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
321 {
322 VPD_CR(vcpu,pta)=val;
323 return IA64_NO_FAULT;
324 }
326 static inline
327 IA64FAULT
328 vmx_vcpu_set_ipsr(VCPU *vcpu, u64 val)
329 {
330 VPD_CR(vcpu,ipsr)=val;
331 return IA64_NO_FAULT;
332 }
334 static inline
335 IA64FAULT
336 vmx_vcpu_set_isr(VCPU *vcpu, u64 val)
337 {
338 VPD_CR(vcpu,isr)=val;
339 return IA64_NO_FAULT;
340 }
342 static inline
343 IA64FAULT
344 vmx_vcpu_set_iip(VCPU *vcpu, u64 val)
345 {
346 VPD_CR(vcpu,iip)=val;
347 return IA64_NO_FAULT;
348 }
350 static inline
351 IA64FAULT
352 vmx_vcpu_set_ifa(VCPU *vcpu, u64 val)
353 {
354 VPD_CR(vcpu,ifa)=val;
355 return IA64_NO_FAULT;
356 }
358 static inline
359 IA64FAULT
360 vmx_vcpu_set_itir(VCPU *vcpu, u64 val)
361 {
362 VPD_CR(vcpu,itir)=val;
363 return IA64_NO_FAULT;
364 }
366 static inline
367 IA64FAULT
368 vmx_vcpu_set_iipa(VCPU *vcpu, u64 val)
369 {
370 VPD_CR(vcpu,iipa)=val;
371 return IA64_NO_FAULT;
372 }
374 static inline
375 IA64FAULT
376 vmx_vcpu_set_ifs(VCPU *vcpu, u64 val)
377 {
378 VPD_CR(vcpu,ifs)=val;
379 return IA64_NO_FAULT;
380 }
381 static inline
382 IA64FAULT
383 vmx_vcpu_set_iim(VCPU *vcpu, u64 val)
384 {
385 VPD_CR(vcpu,iim)=val;
386 return IA64_NO_FAULT;
387 }
389 static inline
390 IA64FAULT
391 vmx_vcpu_set_iha(VCPU *vcpu, u64 val)
392 {
393 VPD_CR(vcpu,iha)=val;
394 return IA64_NO_FAULT;
395 }
397 static inline
398 IA64FAULT
399 vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
400 {
401 VPD_CR(vcpu,lid)=val;
402 return IA64_NO_FAULT;
403 }
404 static inline
405 IA64FAULT
406 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
407 {
408 VPD_CR(vcpu,tpr)=val;
409 //TODO
410 return IA64_NO_FAULT;
411 }
412 static inline
413 IA64FAULT
414 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
415 {
416 guest_write_eoi(vcpu);
417 return IA64_NO_FAULT;
418 }
420 static inline
421 IA64FAULT
422 vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
423 {
425 VPD_CR(vcpu,itv)=val;
426 vtm_set_itv(vcpu);
427 return IA64_NO_FAULT;
428 }
429 static inline
430 IA64FAULT
431 vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
432 {
433 VPD_CR(vcpu,pmv)=val;
434 return IA64_NO_FAULT;
435 }
436 static inline
437 IA64FAULT
438 vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
439 {
440 VPD_CR(vcpu,cmcv)=val;
441 return IA64_NO_FAULT;
442 }
443 static inline
444 IA64FAULT
445 vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
446 {
447 VPD_CR(vcpu,lrr0)=val;
448 return IA64_NO_FAULT;
449 }
450 static inline
451 IA64FAULT
452 vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
453 {
454 VPD_CR(vcpu,lrr1)=val;
455 return IA64_NO_FAULT;
456 }
461 /**************************************************************************
462 VCPU privileged application register access routines
463 **************************************************************************/
464 static inline
465 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
466 {
467 vtm_set_itc(vcpu, val);
468 return IA64_NO_FAULT;
469 }
470 static inline
471 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
472 {
473 *val = vtm_get_itc(vcpu);
474 return IA64_NO_FAULT;
475 }
476 static inline
477 IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
478 {
479 *pval = VMX(vcpu,vrr[reg>>61]);
480 return (IA64_NO_FAULT);
481 }
482 /**************************************************************************
483 VCPU debug breakpoint register access routines
484 **************************************************************************/
486 static inline
487 IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
488 {
489 // TODO: unimplemented DBRs return a reserved register fault
490 // TODO: Should set Logical CPU state, not just physical
491 if(reg > 4){
492 panic("there are only five cpuid registers");
493 }
494 *pval=VMX_VPD(vcpu,vcpuid[reg]);
495 return (IA64_NO_FAULT);
496 }
499 static inline
500 IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
501 {
502 // TODO: unimplemented DBRs return a reserved register fault
503 // TODO: Should set Logical CPU state, not just physical
504 ia64_set_dbr(reg,val);
505 return (IA64_NO_FAULT);
506 }
507 static inline
508 IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
509 {
510 // TODO: unimplemented IBRs return a reserved register fault
511 // TODO: Should set Logical CPU state, not just physical
512 ia64_set_ibr(reg,val);
513 return (IA64_NO_FAULT);
514 }
515 static inline
516 IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
517 {
518 // TODO: unimplemented DBRs return a reserved register fault
519 UINT64 val = ia64_get_dbr(reg);
520 *pval = val;
521 return (IA64_NO_FAULT);
522 }
523 static inline
524 IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
525 {
526 // TODO: unimplemented IBRs return a reserved register fault
527 UINT64 val = ia64_get_ibr(reg);
528 *pval = val;
529 return (IA64_NO_FAULT);
530 }
532 /**************************************************************************
533 VCPU performance monitor register access routines
534 **************************************************************************/
535 static inline
536 IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
537 {
538 // TODO: Should set Logical CPU state, not just physical
539 // NOTE: Writes to unimplemented PMC registers are discarded
540 ia64_set_pmc(reg,val);
541 return (IA64_NO_FAULT);
542 }
543 static inline
544 IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
545 {
546 // TODO: Should set Logical CPU state, not just physical
547 // NOTE: Writes to unimplemented PMD registers are discarded
548 ia64_set_pmd(reg,val);
549 return (IA64_NO_FAULT);
550 }
551 static inline
552 IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
553 {
554 // NOTE: Reads from unimplemented PMC registers return zero
555 UINT64 val = (UINT64)ia64_get_pmc(reg);
556 *pval = val;
557 return (IA64_NO_FAULT);
558 }
559 static inline
560 IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
561 {
562 // NOTE: Reads from unimplemented PMD registers return zero
563 UINT64 val = (UINT64)ia64_get_pmd(reg);
564 *pval = val;
565 return (IA64_NO_FAULT);
566 }
568 /**************************************************************************
569 VCPU banked general register access routines
570 **************************************************************************/
571 static inline
572 IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
573 {
575 VMX_VPD(vcpu,vpsr) &= ~IA64_PSR_BN;
576 return (IA64_NO_FAULT);
577 }
578 static inline
579 IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
580 {
582 VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN;
583 return (IA64_NO_FAULT);
584 }
586 #define redistribute_rid(rid) (((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
587 static inline unsigned long
588 vmx_vrrtomrr(VCPU *vcpu,unsigned long val)
589 {
590 ia64_rr rr;
591 u64 rid;
592 rr.rrval=val;
593 rid=(((u64)vcpu->domain->id)<<DOMAIN_RID_SHIFT) + rr.rid;
594 rr.rid = redistribute_rid(rid);
595 rr.ve=1;
596 return rr.rrval;
597 }
598 #endif