ia64/xen-unstable

diff xen/include/asm-ia64/vmx_vcpu.h @ 4993:0fadb891522c

bitkeeper revision 1.1389.23.1 (428b9f5bAkrt96p_iquJGyvXJzCz7A)

First VT-i code drop
author adsharma@linux-t08.sc.intel.com
date Wed May 18 20:02:35 2005 +0000 (2005-05-18)
parents
children 541012edd6e5
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Wed May 18 20:02:35 2005 +0000
     1.3 @@ -0,0 +1,598 @@
     1.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     1.5 +/*
     1.6 + * vmx_vcpu.h:
     1.7 + * Copyright (c) 2005, Intel Corporation.
     1.8 + *
     1.9 + * This program is free software; you can redistribute it and/or modify it
    1.10 + * under the terms and conditions of the GNU General Public License,
    1.11 + * version 2, as published by the Free Software Foundation.
    1.12 + *
    1.13 + * This program is distributed in the hope it will be useful, but WITHOUT
    1.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    1.16 + * more details.
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License along with
    1.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    1.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    1.21 + *
    1.22 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
    1.23 + *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
    1.24 + */
    1.25 +
    1.26 +#ifndef _XEN_IA64_VMX_VCPU_H
    1.27 +#define _XEN_IA64_VMX_VCPU_H
    1.28 +
    1.29 +
    1.30 +#include <xen/sched.h>
    1.31 +#include <asm/ia64_int.h>
    1.32 +#include <asm/vmx_vpd.h>
    1.33 +#include <asm/ptrace.h>
    1.34 +#include <asm/regs.h>
    1.35 +#include <asm/regionreg.h>
    1.36 +#include <asm/types.h>
    1.37 +#include <asm/vcpu.h>
    1.38 +
    1.39 +#define VRN_SHIFT    61
    1.40 +#define VRN0    0x0UL
    1.41 +#define VRN1    0x1UL
    1.42 +#define VRN2    0x2UL
    1.43 +#define VRN3    0x3UL
    1.44 +#define VRN4    0x4UL
    1.45 +#define VRN5    0x5UL
    1.46 +#define VRN6    0x6UL
    1.47 +#define VRN7    0x7UL
    1.48 +
    1.49 +// this def for vcpu_regs won't work if kernel stack is present
    1.50 +#define	vcpu_regs(vcpu) (((struct pt_regs *) ((char *) (vcpu) + IA64_STK_OFFSET)) - 1)
    1.51 +#define	VMX_VPD(x,y)	((x)->arch.arch_vmx.vpd->y)
    1.52 +
    1.53 +#define VMX(x,y)  ((x)->arch.arch_vmx.y)
    1.54 +
    1.55 +#define VPD_CR(x,y) (((cr_t*)VMX_VPD(x,vcr))->y)
    1.56 +
    1.57 +#define VMM_RR_SHIFT    20
    1.58 +#define VMM_RR_MASK     ((1UL<<VMM_RR_SHIFT)-1)
    1.59 +#define VRID_2_MRID(vcpu,rid)  ((rid) & VMM_RR_MASK) | \
    1.60 +                ((vcpu->domain->id) << VMM_RR_SHIFT)
    1.61 +extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
    1.62 +extern u64 cr_igfld_mask (int index, u64 value);
    1.63 +extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
    1.64 +extern u64 set_isr_ei_ni (VCPU *vcpu);
    1.65 +extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
    1.66 +
    1.67 +
    1.68 +/* next all for CONFIG_VTI APIs definition */
    1.69 +extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
    1.70 +extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
    1.71 +extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value);
    1.72 +extern IA64FAULT vmx_vcpu_cover(VCPU *vcpu);
    1.73 +extern thash_cb_t *vmx_vcpu_get_vtlb(VCPU *vcpu);
    1.74 +extern thash_cb_t *vmx_vcpu_get_vhpt(VCPU *vcpu);
    1.75 +ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
    1.76 +extern IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val);
    1.77 +extern IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
    1.78 +extern IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval);
    1.79 +IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
    1.80 +extern IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
    1.81 +extern IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa);
    1.82 +extern IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
    1.83 +extern IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx);
    1.84 +extern IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps);
    1.85 +extern IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps);
    1.86 +extern IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps);
    1.87 +extern IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UINT64 vadr);
    1.88 +extern IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 vadr, UINT64 ps);
    1.89 +extern IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 vadr,UINT64 ps);
    1.90 +extern IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
    1.91 +extern u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa);
    1.92 +extern IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, UINT64 vadr, UINT64 *pval);
    1.93 +extern IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT64 vadr, UINT64 *padr);
    1.94 +extern IA64FAULT vmx_vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
    1.95 +extern IA64FAULT vmx_vcpu_rfi(VCPU *vcpu);
    1.96 +extern UINT64 vmx_vcpu_get_psr(VCPU *vcpu);
    1.97 +extern IA64FAULT vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val);
    1.98 +extern IA64FAULT vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat);
    1.99 +extern IA64FAULT vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val);
   1.100 +extern IA64FAULT vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat);
   1.101 +extern IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24);
   1.102 +extern IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24);
   1.103 +extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val);
   1.104 +extern void vtm_init(VCPU *vcpu);
   1.105 +extern uint64_t vtm_get_itc(VCPU *vcpu);
   1.106 +extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc);
   1.107 +extern void vtm_set_itv(VCPU *vcpu);
   1.108 +extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm);
   1.109 +extern void vtm_domain_out(VCPU *vcpu);
   1.110 +extern void vtm_domain_in(VCPU *vcpu);
   1.111 +extern void vlsapic_reset(VCPU *vcpu);
   1.112 +extern int vmx_check_pending_irq(VCPU *vcpu);
   1.113 +extern void guest_write_eoi(VCPU *vcpu);
   1.114 +extern uint64_t guest_read_vivr(VCPU *vcpu);
   1.115 +extern void vmx_inject_vhpi(VCPU *vcpu, u8 vec);
   1.116 +extern void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector);
   1.117 +extern struct virutal_platform_def *vmx_vcpu_get_plat(VCPU *vcpu);
   1.118 +extern void memread_p(VCPU *vcpu, void *src, void *dest, size_t s);
   1.119 +extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s);
   1.120 +extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, void *src, void *dest, size_t s);
   1.121 +extern void memwrite_p(VCPU *vcpu, void *src, void *dest, size_t s);
   1.122 +
   1.123 +
   1.124 +/**************************************************************************
   1.125 + VCPU control register access routines
   1.126 +**************************************************************************/
   1.127 +
   1.128 +static inline
   1.129 +IA64FAULT vmx_vcpu_get_dcr(VCPU *vcpu, UINT64 *pval)
   1.130 +{
   1.131 +    *pval = VPD_CR(vcpu,dcr);
   1.132 +    return (IA64_NO_FAULT);
   1.133 +}
   1.134 +
   1.135 +static inline
   1.136 +IA64FAULT vmx_vcpu_get_itm(VCPU *vcpu, UINT64 *pval)
   1.137 +{
   1.138 +    *pval = VPD_CR(vcpu,itm);
   1.139 +    return (IA64_NO_FAULT);
   1.140 +}
   1.141 +
   1.142 +static inline
   1.143 +IA64FAULT vmx_vcpu_get_iva(VCPU *vcpu, UINT64 *pval)
   1.144 +{
   1.145 +    *pval = VPD_CR(vcpu,iva);
   1.146 +    return (IA64_NO_FAULT);
   1.147 +}
   1.148 +static inline
   1.149 +IA64FAULT vmx_vcpu_get_pta(VCPU *vcpu, UINT64 *pval)
   1.150 +{
   1.151 +    *pval = VPD_CR(vcpu,pta);
   1.152 +    return (IA64_NO_FAULT);
   1.153 +}
   1.154 +static inline
   1.155 +IA64FAULT vmx_vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval)
   1.156 +{
   1.157 +    *pval = VPD_CR(vcpu,ipsr);
   1.158 +    return (IA64_NO_FAULT);
   1.159 +}
   1.160 +
   1.161 +static inline
   1.162 +IA64FAULT vmx_vcpu_get_isr(VCPU *vcpu, UINT64 *pval)
   1.163 +{
   1.164 +    *pval = VPD_CR(vcpu,isr);
   1.165 +    return (IA64_NO_FAULT);
   1.166 +}
   1.167 +static inline
   1.168 +IA64FAULT vmx_vcpu_get_iip(VCPU *vcpu, UINT64 *pval)
   1.169 +{
   1.170 +    *pval = VPD_CR(vcpu,iip);
   1.171 +    return (IA64_NO_FAULT);
   1.172 +}
   1.173 +static inline
   1.174 +IA64FAULT vmx_vcpu_get_ifa(VCPU *vcpu, UINT64 *pval)
   1.175 +{
   1.176 +    *pval = VPD_CR(vcpu,ifa);
   1.177 +    return (IA64_NO_FAULT);
   1.178 +}
   1.179 +
   1.180 +static inline
   1.181 +IA64FAULT vmx_vcpu_get_itir(VCPU *vcpu, UINT64 *pval)
   1.182 +{
   1.183 +    *pval = VPD_CR(vcpu,itir);
   1.184 +    return (IA64_NO_FAULT);
   1.185 +}
   1.186 +static inline
   1.187 +IA64FAULT vmx_vcpu_get_iipa(VCPU *vcpu, UINT64 *pval)
   1.188 +{
   1.189 +    *pval = VPD_CR(vcpu,iipa);
   1.190 +    return (IA64_NO_FAULT);
   1.191 +}
   1.192 +static inline
   1.193 +IA64FAULT vmx_vcpu_get_ifs(VCPU *vcpu, UINT64 *pval)
   1.194 +{
   1.195 +    *pval = VPD_CR(vcpu,ifs);
   1.196 +    return (IA64_NO_FAULT);
   1.197 +}
   1.198 +static inline
   1.199 +IA64FAULT vmx_vcpu_get_iim(VCPU *vcpu, UINT64 *pval)
   1.200 +{
   1.201 +    *pval = VPD_CR(vcpu,iim);
   1.202 +    return (IA64_NO_FAULT);
   1.203 +}
   1.204 +static inline
   1.205 +IA64FAULT vmx_vcpu_get_iha(VCPU *vcpu, UINT64 *pval)
   1.206 +{
   1.207 +    *pval = VPD_CR(vcpu,iha);
   1.208 +    return (IA64_NO_FAULT);
   1.209 +}
   1.210 +static inline
   1.211 +IA64FAULT vmx_vcpu_get_lid(VCPU *vcpu, UINT64 *pval)
   1.212 +{
   1.213 +    *pval = VPD_CR(vcpu,lid);
   1.214 +    return (IA64_NO_FAULT);
   1.215 +}
   1.216 +static inline
   1.217 +IA64FAULT vmx_vcpu_get_ivr(VCPU *vcpu, UINT64 *pval)
   1.218 +{
   1.219 +    *pval = guest_read_vivr(vcpu);
   1.220 +    return (IA64_NO_FAULT);
   1.221 +}
   1.222 +static inline
   1.223 +IA64FAULT vmx_vcpu_get_tpr(VCPU *vcpu, UINT64 *pval)
   1.224 +{
   1.225 +    *pval = VPD_CR(vcpu,tpr);
   1.226 +    return (IA64_NO_FAULT);
   1.227 +}
   1.228 +static inline
   1.229 +IA64FAULT vmx_vcpu_get_eoi(VCPU *vcpu, UINT64 *pval)
   1.230 +{
   1.231 +    *pval = 0L;  // reads of eoi always return 0
   1.232 +    return (IA64_NO_FAULT);
   1.233 +}
   1.234 +static inline
   1.235 +IA64FAULT vmx_vcpu_get_irr0(VCPU *vcpu, UINT64 *pval)
   1.236 +{
   1.237 +    *pval = VPD_CR(vcpu,irr[0]);
   1.238 +    return (IA64_NO_FAULT);
   1.239 +}
   1.240 +static inline
   1.241 +IA64FAULT vmx_vcpu_get_irr1(VCPU *vcpu, UINT64 *pval)
   1.242 +{
   1.243 +    *pval = VPD_CR(vcpu,irr[1]);
   1.244 +    return (IA64_NO_FAULT);
   1.245 +}
   1.246 +static inline
   1.247 +IA64FAULT vmx_vcpu_get_irr2(VCPU *vcpu, UINT64 *pval)
   1.248 +{
   1.249 +    *pval = VPD_CR(vcpu,irr[2]);
   1.250 +    return (IA64_NO_FAULT);
   1.251 +}
   1.252 +static inline
   1.253 +IA64FAULT vmx_vcpu_get_irr3(VCPU *vcpu, UINT64 *pval)
   1.254 +{
   1.255 +    *pval = VPD_CR(vcpu,irr[3]);
   1.256 +    return (IA64_NO_FAULT);
   1.257 +}
   1.258 +static inline
   1.259 +IA64FAULT vmx_vcpu_get_itv(VCPU *vcpu, UINT64 *pval)
   1.260 +{
   1.261 +    *pval = VPD_CR(vcpu,itv);
   1.262 +    return (IA64_NO_FAULT);
   1.263 +}
   1.264 +static inline
   1.265 +IA64FAULT vmx_vcpu_get_pmv(VCPU *vcpu, UINT64 *pval)
   1.266 +{
   1.267 +    *pval = VPD_CR(vcpu,pmv);
   1.268 +    return (IA64_NO_FAULT);
   1.269 +}
   1.270 +static inline
   1.271 +IA64FAULT vmx_vcpu_get_cmcv(VCPU *vcpu, UINT64 *pval)
   1.272 +{
   1.273 +    *pval = VPD_CR(vcpu,cmcv);
   1.274 +    return (IA64_NO_FAULT);
   1.275 +}
   1.276 +static inline
   1.277 +IA64FAULT vmx_vcpu_get_lrr0(VCPU *vcpu, UINT64 *pval)
   1.278 +{
   1.279 +    *pval = VPD_CR(vcpu,lrr0);
   1.280 +    return (IA64_NO_FAULT);
   1.281 +}
   1.282 +static inline
   1.283 +IA64FAULT vmx_vcpu_get_lrr1(VCPU *vcpu, UINT64 *pval)
   1.284 +{    *pval = VPD_CR(vcpu,lrr1);
   1.285 +    return (IA64_NO_FAULT);
   1.286 +}
   1.287 +static inline
   1.288 +IA64FAULT
   1.289 +vmx_vcpu_set_dcr(VCPU *vcpu, u64 val)
   1.290 +{
   1.291 +    u64 mdcr, mask;
   1.292 +    VPD_CR(vcpu,dcr)=val;
   1.293 +    /* All vDCR bits will go to mDCR, except for be/pp bit */
   1.294 +    mdcr = ia64_get_dcr();
   1.295 +    mask = IA64_DCR_BE | IA64_DCR_PP;
   1.296 +    mdcr = ( mdcr & mask ) | ( val & (~mask) );
   1.297 +    ia64_set_dcr( mdcr);
   1.298 +
   1.299 +    return IA64_NO_FAULT;
   1.300 +}
   1.301 +
   1.302 +static inline
   1.303 +IA64FAULT
   1.304 +vmx_vcpu_set_itm(VCPU *vcpu, u64 val)
   1.305 +{
   1.306 +    vtime_t     *vtm;
   1.307 +    
   1.308 +    vtm=&(vcpu->arch.arch_vmx.vtm);
   1.309 +    VPD_CR(vcpu,itm)=val;
   1.310 +    vtm_interruption_update(vcpu, vtm);
   1.311 +    return IA64_NO_FAULT;
   1.312 +}
   1.313 +static inline
   1.314 +IA64FAULT
   1.315 +vmx_vcpu_set_iva(VCPU *vcpu, u64 val)
   1.316 +{
   1.317 +    VPD_CR(vcpu,iva)=val;
   1.318 +    return IA64_NO_FAULT;
   1.319 +}
   1.320 +
   1.321 +static inline
   1.322 +IA64FAULT
   1.323 +vmx_vcpu_set_pta(VCPU *vcpu, u64 val)
   1.324 +{
   1.325 +    VPD_CR(vcpu,pta)=val;
   1.326 +    return IA64_NO_FAULT;
   1.327 +}
   1.328 +
   1.329 +static inline
   1.330 +IA64FAULT
   1.331 +vmx_vcpu_set_ipsr(VCPU *vcpu, u64 val)
   1.332 +{
   1.333 +    VPD_CR(vcpu,ipsr)=val;
   1.334 +    return IA64_NO_FAULT;
   1.335 +}
   1.336 +
   1.337 +static inline
   1.338 +IA64FAULT
   1.339 +vmx_vcpu_set_isr(VCPU *vcpu, u64 val)
   1.340 +{
   1.341 +    VPD_CR(vcpu,isr)=val;
   1.342 +    return IA64_NO_FAULT;
   1.343 +}
   1.344 +
   1.345 +static inline
   1.346 +IA64FAULT
   1.347 +vmx_vcpu_set_iip(VCPU *vcpu, u64 val)
   1.348 +{
   1.349 +    VPD_CR(vcpu,iip)=val;
   1.350 +    return IA64_NO_FAULT;
   1.351 +}
   1.352 +
   1.353 +static inline
   1.354 +IA64FAULT
   1.355 +vmx_vcpu_set_ifa(VCPU *vcpu, u64 val)
   1.356 +{
   1.357 +    VPD_CR(vcpu,ifa)=val;
   1.358 +    return IA64_NO_FAULT;
   1.359 +}
   1.360 +
   1.361 +static inline
   1.362 +IA64FAULT
   1.363 +vmx_vcpu_set_itir(VCPU *vcpu, u64 val)
   1.364 +{
   1.365 +    VPD_CR(vcpu,itir)=val;
   1.366 +    return IA64_NO_FAULT;
   1.367 +}
   1.368 +
   1.369 +static inline
   1.370 +IA64FAULT
   1.371 +vmx_vcpu_set_iipa(VCPU *vcpu, u64 val)
   1.372 +{
   1.373 +    VPD_CR(vcpu,iipa)=val;
   1.374 +    return IA64_NO_FAULT;
   1.375 +}
   1.376 +
   1.377 +static inline
   1.378 +IA64FAULT
   1.379 +vmx_vcpu_set_ifs(VCPU *vcpu, u64 val)
   1.380 +{
   1.381 +    VPD_CR(vcpu,ifs)=val;
   1.382 +    return IA64_NO_FAULT;
   1.383 +}
   1.384 +static inline
   1.385 +IA64FAULT
   1.386 +vmx_vcpu_set_iim(VCPU *vcpu, u64 val)
   1.387 +{
   1.388 +    VPD_CR(vcpu,iim)=val;
   1.389 +    return IA64_NO_FAULT;
   1.390 +}
   1.391 +
   1.392 +static inline
   1.393 +IA64FAULT
   1.394 +vmx_vcpu_set_iha(VCPU *vcpu, u64 val)
   1.395 +{
   1.396 +    VPD_CR(vcpu,iha)=val;
   1.397 +    return IA64_NO_FAULT;
   1.398 +}
   1.399 +
   1.400 +static inline
   1.401 +IA64FAULT
   1.402 +vmx_vcpu_set_lid(VCPU *vcpu, u64 val)
   1.403 +{
   1.404 +    VPD_CR(vcpu,lid)=val;
   1.405 +    return IA64_NO_FAULT;
   1.406 +}
   1.407 +static inline
   1.408 +IA64FAULT
   1.409 +vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
   1.410 +{
   1.411 +    VPD_CR(vcpu,tpr)=val;
   1.412 +    //TODO
   1.413 +    return IA64_NO_FAULT;
   1.414 +}
   1.415 +static inline
   1.416 +IA64FAULT
   1.417 +vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
   1.418 +{
   1.419 +    guest_write_eoi(vcpu);
   1.420 +    return IA64_NO_FAULT;
   1.421 +}
   1.422 +
   1.423 +static inline
   1.424 +IA64FAULT
   1.425 +vmx_vcpu_set_itv(VCPU *vcpu, u64 val)
   1.426 +{
   1.427 +
   1.428 +    VPD_CR(vcpu,itv)=val;
   1.429 +    vtm_set_itv(vcpu);
   1.430 +    return IA64_NO_FAULT;
   1.431 +}
   1.432 +static inline
   1.433 +IA64FAULT
   1.434 +vmx_vcpu_set_pmv(VCPU *vcpu, u64 val)
   1.435 +{
   1.436 +    VPD_CR(vcpu,pmv)=val;
   1.437 +    return IA64_NO_FAULT;
   1.438 +}
   1.439 +static inline
   1.440 +IA64FAULT
   1.441 +vmx_vcpu_set_cmcv(VCPU *vcpu, u64 val)
   1.442 +{
   1.443 +    VPD_CR(vcpu,cmcv)=val;
   1.444 +    return IA64_NO_FAULT;
   1.445 +}
   1.446 +static inline
   1.447 +IA64FAULT
   1.448 +vmx_vcpu_set_lrr0(VCPU *vcpu, u64 val)
   1.449 +{
   1.450 +    VPD_CR(vcpu,lrr0)=val;
   1.451 +    return IA64_NO_FAULT;
   1.452 +}
   1.453 +static inline
   1.454 +IA64FAULT
   1.455 +vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val)
   1.456 +{
   1.457 +    VPD_CR(vcpu,lrr1)=val;
   1.458 +    return IA64_NO_FAULT;
   1.459 +}
   1.460 +
   1.461 +
   1.462 +
   1.463 +
   1.464 +/**************************************************************************
   1.465 + VCPU privileged application register access routines
   1.466 +**************************************************************************/
   1.467 +static inline
   1.468 +IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val)
   1.469 +{
   1.470 +    vtm_set_itc(vcpu, val);
   1.471 +    return  IA64_NO_FAULT;
   1.472 +}
   1.473 +static inline
   1.474 +IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val)
   1.475 +{
   1.476 +    *val = vtm_get_itc(vcpu);
   1.477 +    return  IA64_NO_FAULT;
   1.478 +}
   1.479 +static inline
   1.480 +IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
   1.481 +{
   1.482 +    *pval = VMX(vcpu,vrr[reg>>61]);
   1.483 +    return (IA64_NO_FAULT);
   1.484 +}
   1.485 +/**************************************************************************
   1.486 + VCPU debug breakpoint register access routines
   1.487 +**************************************************************************/
   1.488 +
   1.489 +static inline
   1.490 +IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu, UINT64 reg, UINT64 *pval)
   1.491 +{
   1.492 +    // TODO: unimplemented DBRs return a reserved register fault
   1.493 +    // TODO: Should set Logical CPU state, not just physical
   1.494 +    if(reg > 4){
   1.495 +        panic("there are only five cpuid registers");
   1.496 +    }
   1.497 +    *pval=VMX_VPD(vcpu,vcpuid[reg]);
   1.498 +    return (IA64_NO_FAULT);
   1.499 +}
   1.500 +
   1.501 +
   1.502 +static inline
   1.503 +IA64FAULT vmx_vcpu_set_dbr(VCPU *vcpu, UINT64 reg, UINT64 val)
   1.504 +{
   1.505 +    // TODO: unimplemented DBRs return a reserved register fault
   1.506 +    // TODO: Should set Logical CPU state, not just physical
   1.507 +    ia64_set_dbr(reg,val);
   1.508 +    return (IA64_NO_FAULT);
   1.509 +}
   1.510 +static inline
   1.511 +IA64FAULT vmx_vcpu_set_ibr(VCPU *vcpu, UINT64 reg, UINT64 val)
   1.512 +{
   1.513 +    // TODO: unimplemented IBRs return a reserved register fault
   1.514 +    // TODO: Should set Logical CPU state, not just physical
   1.515 +    ia64_set_ibr(reg,val);
   1.516 +    return (IA64_NO_FAULT);
   1.517 +}
   1.518 +static inline
   1.519 +IA64FAULT vmx_vcpu_get_dbr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
   1.520 +{
   1.521 +    // TODO: unimplemented DBRs return a reserved register fault
   1.522 +    UINT64 val = ia64_get_dbr(reg);
   1.523 +    *pval = val;
   1.524 +    return (IA64_NO_FAULT);
   1.525 +}
   1.526 +static inline
   1.527 +IA64FAULT vmx_vcpu_get_ibr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
   1.528 +{
   1.529 +    // TODO: unimplemented IBRs return a reserved register fault
   1.530 +    UINT64 val = ia64_get_ibr(reg);
   1.531 +    *pval = val;
   1.532 +    return (IA64_NO_FAULT);
   1.533 +}
   1.534 +
   1.535 +/**************************************************************************
   1.536 + VCPU performance monitor register access routines
   1.537 +**************************************************************************/
   1.538 +static inline
   1.539 +IA64FAULT vmx_vcpu_set_pmc(VCPU *vcpu, UINT64 reg, UINT64 val)
   1.540 +{
   1.541 +    // TODO: Should set Logical CPU state, not just physical
   1.542 +    // NOTE: Writes to unimplemented PMC registers are discarded
   1.543 +    ia64_set_pmc(reg,val);
   1.544 +    return (IA64_NO_FAULT);
   1.545 +}
   1.546 +static inline
   1.547 +IA64FAULT vmx_vcpu_set_pmd(VCPU *vcpu, UINT64 reg, UINT64 val)
   1.548 +{
   1.549 +    // TODO: Should set Logical CPU state, not just physical
   1.550 +    // NOTE: Writes to unimplemented PMD registers are discarded
   1.551 +    ia64_set_pmd(reg,val);
   1.552 +    return (IA64_NO_FAULT);
   1.553 +}
   1.554 +static inline
   1.555 +IA64FAULT vmx_vcpu_get_pmc(VCPU *vcpu, UINT64 reg, UINT64 *pval)
   1.556 +{
   1.557 +    // NOTE: Reads from unimplemented PMC registers return zero
   1.558 +    UINT64 val = (UINT64)ia64_get_pmc(reg);
   1.559 +    *pval = val;
   1.560 +    return (IA64_NO_FAULT);
   1.561 +}
   1.562 +static inline
   1.563 +IA64FAULT vmx_vcpu_get_pmd(VCPU *vcpu, UINT64 reg, UINT64 *pval)
   1.564 +{
   1.565 +    // NOTE: Reads from unimplemented PMD registers return zero
   1.566 +    UINT64 val = (UINT64)ia64_get_pmd(reg);
   1.567 +    *pval = val;
   1.568 +    return (IA64_NO_FAULT);
   1.569 +}
   1.570 +
   1.571 +/**************************************************************************
   1.572 + VCPU banked general register access routines
   1.573 +**************************************************************************/
   1.574 +static inline
   1.575 +IA64FAULT vmx_vcpu_bsw0(VCPU *vcpu)
   1.576 +{
   1.577 +
   1.578 +    VMX_VPD(vcpu,vpsr) &= ~IA64_PSR_BN;
   1.579 +    return (IA64_NO_FAULT);
   1.580 +}
   1.581 +static inline
   1.582 +IA64FAULT vmx_vcpu_bsw1(VCPU *vcpu)
   1.583 +{
   1.584 +
   1.585 +    VMX_VPD(vcpu,vpsr) |= IA64_PSR_BN;
   1.586 +    return (IA64_NO_FAULT);
   1.587 +}
   1.588 +
   1.589 +#define redistribute_rid(rid)	(((rid) & ~0xffff) | (((rid) << 8) & 0xff00) | (((rid) >> 8) & 0xff))
   1.590 +static inline unsigned long
   1.591 +vmx_vrrtomrr(VCPU *vcpu,unsigned long val)
   1.592 +{
   1.593 +    ia64_rr rr;
   1.594 +    u64	  rid;
   1.595 +    rr.rrval=val;
   1.596 +    rid=(((u64)vcpu->domain->id)<<DOMAIN_RID_SHIFT) + rr.rid;
   1.597 +    rr.rid = redistribute_rid(rid);
   1.598 +    rr.ve=1;
   1.599 +    return rr.rrval;
   1.600 +}
   1.601 +#endif