ia64/xen-unstable

changeset 15887:0f16d41ebb0b

[IA64] cleanup vmx_virt.c

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Mon Sep 17 11:04:29 2007 -0600 (2007-09-17)
parents 487df63c4ae9
children 0902e4aae810
files xen/arch/ia64/vmx/vmx_virt.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Mon Sep 17 10:59:27 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Mon Sep 17 11:04:29 2007 -0600
     1.3 @@ -202,6 +202,7 @@ static IA64FAULT vmx_emul_rfi(VCPU *vcpu
     1.4          return IA64_FAULT;
     1.5      }
     1.6  #endif // CHECK_FAULT
     1.7 +
     1.8      regs=vcpu_regs(vcpu);
     1.9      vpsr.val=regs->cr_ipsr;
    1.10      if ( vpsr.is == 1 ) {
    1.11 @@ -275,8 +276,9 @@ static IA64FAULT vmx_emul_ptc_l(VCPU *vc
    1.12          vcpu_set_isr(vcpu, isr.val);
    1.13          unimpl_daddr(vcpu);
    1.14          return IA64_FAULT;
    1.15 -   }
    1.16 +    }
    1.17  #endif // VMAL_NO_FAULT_CHECK
    1.18 +
    1.19      return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
    1.20  }
    1.21  
    1.22 @@ -333,8 +335,9 @@ static IA64FAULT vmx_emul_ptc_g(VCPU *vc
    1.23          vcpu_set_isr(vcpu, isr.val);
    1.24          unimpl_daddr(vcpu);
    1.25          return IA64_FAULT;
    1.26 -   }
    1.27 +    }
    1.28  #endif // VMAL_NO_FAULT_CHECK
    1.29 +
    1.30      return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
    1.31  }
    1.32  
    1.33 @@ -366,8 +369,9 @@ static IA64FAULT vmx_emul_ptc_ga(VCPU *v
    1.34          vcpu_set_isr(vcpu, isr.val);
    1.35          unimpl_daddr(vcpu);
    1.36          return IA64_FAULT;
    1.37 -   }
    1.38 +    }
    1.39  #endif // VMAL_NO_FAULT_CHECK
    1.40 +
    1.41      return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
    1.42  }
    1.43  
    1.44 @@ -568,40 +572,43 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc
    1.45  {
    1.46      u64 itir, ifa, pte, slot;
    1.47      ISR isr;
    1.48 +
    1.49  #ifdef  VMAL_NO_FAULT_CHECK
    1.50      IA64_PSR  vpsr;
    1.51 -    vpsr.val=vmx_vcpu_get_psr(vcpu);
    1.52 -    if ( vpsr.ic ) {
    1.53 +
    1.54 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
    1.55 +    if (vpsr.ic) {
    1.56          set_illegal_op_isr(vcpu);
    1.57          illegal_op(vcpu);
    1.58          return IA64_FAULT;
    1.59      }
    1.60 -    if ( vpsr.cpl != 0) {
    1.61 +    if (vpsr.cpl != 0) {
    1.62          /* Inject Privileged Operation fault into guest */
    1.63 -        set_privileged_operation_isr (vcpu, 0);
    1.64 +        set_privileged_operation_isr(vcpu, 0);
    1.65          privilege_op (vcpu);
    1.66          return IA64_FAULT;
    1.67      }
    1.68  #endif // VMAL_NO_FAULT_CHECK
    1.69 -    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
    1.70 +    if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot)
    1.71 +        || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {
    1.72  #ifdef  VMAL_NO_FAULT_CHECK
    1.73 -        set_isr_reg_nat_consumption(vcpu,0,0);
    1.74 +        set_isr_reg_nat_consumption(vcpu, 0, 0);
    1.75          rnat_comsumption(vcpu);
    1.76          return IA64_FAULT;
    1.77  #endif // VMAL_NO_FAULT_CHECK
    1.78      }
    1.79  #ifdef  VMAL_NO_FAULT_CHECK
    1.80 -    if(is_reserved_rr_register(vcpu, slot)){
    1.81 +    if (is_reserved_rr_register(vcpu, slot)) {
    1.82          set_illegal_op_isr(vcpu);
    1.83          illegal_op(vcpu);
    1.84          return IA64_FAULT;
    1.85      }
    1.86  #endif // VMAL_NO_FAULT_CHECK
    1.87  
    1.88 -    if (vcpu_get_itir(vcpu,&itir)){
    1.89 +    if (vcpu_get_itir(vcpu ,&itir)) {
    1.90          return(IA64_FAULT);
    1.91      }
    1.92 -    if (vcpu_get_ifa(vcpu,&ifa)){
    1.93 +    if (vcpu_get_ifa(vcpu, &ifa)) {
    1.94          return(IA64_FAULT);
    1.95      }
    1.96  #ifdef  VMAL_NO_FAULT_CHECK
    1.97 @@ -609,7 +616,7 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc
    1.98      	// TODO
    1.99      	return IA64_FAULT;
   1.100      }
   1.101 -    if (unimplemented_gva(vcpu,ifa) ) {
   1.102 +    if (unimplemented_gva(vcpu, ifa)) {
   1.103          isr.val = set_isr_ei_ni(vcpu);
   1.104          isr.code = IA64_RESERVED_REG_FAULT;
   1.105          vcpu_set_isr(vcpu, isr.val);
   1.106 @@ -626,7 +633,7 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc
   1.107          return IA64_FAULT;
   1.108      }
   1.109  
   1.110 -    return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
   1.111 +    return (vmx_vcpu_itr_d(vcpu, slot, pte, itir, ifa));
   1.112  }
   1.113  
   1.114  static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
   1.115 @@ -635,52 +642,53 @@ static IA64FAULT vmx_emul_itr_i(VCPU *vc
   1.116      ISR isr;
   1.117  #ifdef  VMAL_NO_FAULT_CHECK
   1.118      IA64_PSR  vpsr;
   1.119 -    vpsr.val=vmx_vcpu_get_psr(vcpu);
   1.120 -    if ( vpsr.ic ) {
   1.121 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
   1.122 +    if (vpsr.ic) {
   1.123          set_illegal_op_isr(vcpu);
   1.124          illegal_op(vcpu);
   1.125          return IA64_FAULT;
   1.126      }
   1.127 -    if ( vpsr.cpl != 0) {
   1.128 +    if (vpsr.cpl != 0) {
   1.129          /* Inject Privileged Operation fault into guest */
   1.130 -        set_privileged_operation_isr (vcpu, 0);
   1.131 -        privilege_op (vcpu);
   1.132 +        set_privileged_operation_isr(vcpu, 0);
   1.133 +        privilege_op(vcpu);
   1.134          return IA64_FAULT;
   1.135      }
   1.136  #endif // VMAL_NO_FAULT_CHECK
   1.137 -    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
   1.138 +    if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot)
   1.139 +        || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {
   1.140  #ifdef  VMAL_NO_FAULT_CHECK
   1.141 -        set_isr_reg_nat_consumption(vcpu,0,0);
   1.142 +        set_isr_reg_nat_consumption(vcpu, 0, 0);
   1.143          rnat_comsumption(vcpu);
   1.144          return IA64_FAULT;
   1.145  #endif // VMAL_NO_FAULT_CHECK
   1.146      }
   1.147  #ifdef  VMAL_NO_FAULT_CHECK
   1.148 -    if(is_reserved_rr_register(vcpu, slot)){
   1.149 +    if (is_reserved_rr_register(vcpu, slot)) {
   1.150          set_illegal_op_isr(vcpu);
   1.151          illegal_op(vcpu);
   1.152          return IA64_FAULT;
   1.153      }
   1.154  #endif // VMAL_NO_FAULT_CHECK
   1.155  
   1.156 -    if (vcpu_get_itir(vcpu,&itir)){
   1.157 -        return(IA64_FAULT);
   1.158 +    if (vcpu_get_itir(vcpu, &itir)) {
   1.159 +        return IA64_FAULT;
   1.160      }
   1.161 -    if (vcpu_get_ifa(vcpu,&ifa)){
   1.162 -        return(IA64_FAULT);
   1.163 +    if (vcpu_get_ifa(vcpu, &ifa)) {
   1.164 +        return IA64_FAULT;
   1.165      }
   1.166  #ifdef  VMAL_NO_FAULT_CHECK
   1.167      if (is_reserved_itir_field(vcpu, itir)) {
   1.168      	// TODO
   1.169      	return IA64_FAULT;
   1.170      }
   1.171 -    if (unimplemented_gva(vcpu,ifa) ) {
   1.172 +    if (unimplemented_gva(vcpu, ifa)) {
   1.173          isr.val = set_isr_ei_ni(vcpu);
   1.174          isr.code = IA64_RESERVED_REG_FAULT;
   1.175          vcpu_set_isr(vcpu, isr.val);
   1.176          unimpl_daddr(vcpu);
   1.177          return IA64_FAULT;
   1.178 -   }
   1.179 +    }
   1.180  #endif // VMAL_NO_FAULT_CHECK
   1.181  
   1.182      if (slot >= NITRS) {
   1.183 @@ -690,8 +698,8 @@ static IA64FAULT vmx_emul_itr_i(VCPU *vc
   1.184          rsv_reg_field(vcpu);
   1.185          return IA64_FAULT;
   1.186      }
   1.187 - 
   1.188 -   return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
   1.189 +
   1.190 +    return vmx_vcpu_itr_i(vcpu, slot, pte, itir, ifa);
   1.191  }
   1.192  
   1.193  static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst,
   1.194 @@ -701,8 +709,8 @@ static IA64FAULT itc_fault_check(VCPU *v
   1.195  
   1.196  #ifdef  VMAL_NO_FAULT_CHECK
   1.197      IA64_PSR  vpsr;
   1.198 -    vpsr.val=vmx_vcpu_get_psr(vcpu);
   1.199 -    if ( vpsr.ic ) {
   1.200 +    vpsr.val = vmx_vcpu_get_psr(vcpu);
   1.201 +    if (vpsr.ic) {
   1.202          set_illegal_op_isr(vcpu);
   1.203          illegal_op(vcpu);
   1.204          return IA64_FAULT;
   1.205 @@ -710,27 +718,27 @@ static IA64FAULT itc_fault_check(VCPU *v
   1.206  
   1.207      u64 fault;
   1.208      ISR isr;
   1.209 -    if ( vpsr.cpl != 0) {
   1.210 +    if (vpsr.cpl != 0) {
   1.211          /* Inject Privileged Operation fault into guest */
   1.212 -        set_privileged_operation_isr (vcpu, 0);
   1.213 -        privilege_op (vcpu);
   1.214 +        set_privileged_operation_isr(vcpu, 0);
   1.215 +        privilege_op(vcpu);
   1.216          return IA64_FAULT;
   1.217      }
   1.218  #endif // VMAL_NO_FAULT_CHECK
   1.219 -    ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
   1.220 +    ret1 = vcpu_get_gr_nat(vcpu, inst.M45.r2,pte);
   1.221  #ifdef  VMAL_NO_FAULT_CHECK
   1.222 -    if( ret1 != IA64_NO_FAULT ){
   1.223 -        set_isr_reg_nat_consumption(vcpu,0,0);
   1.224 +    if (ret1 != IA64_NO_FAULT) {
   1.225 +        set_isr_reg_nat_consumption(vcpu, 0, 0);
   1.226          rnat_comsumption(vcpu);
   1.227          return IA64_FAULT;
   1.228      }
   1.229  #endif // VMAL_NO_FAULT_CHECK
   1.230  
   1.231 -    if (vcpu_get_itir(vcpu,itir)){
   1.232 -        return(IA64_FAULT);
   1.233 +    if (vcpu_get_itir(vcpu, itir)) {
   1.234 +        return IA64_FAULT;
   1.235      }
   1.236 -    if (vcpu_get_ifa(vcpu,ifa)){
   1.237 -        return(IA64_FAULT);
   1.238 +    if (vcpu_get_ifa(vcpu, ifa)) {
   1.239 +        return IA64_FAULT;
   1.240      }
   1.241  #ifdef  VMAL_NO_FAULT_CHECK
   1.242      if (unimplemented_gva(vcpu,ifa) ) {
   1.243 @@ -739,9 +747,9 @@ static IA64FAULT itc_fault_check(VCPU *v
   1.244          vcpu_set_isr(vcpu, isr.val);
   1.245          unimpl_daddr(vcpu);
   1.246          return IA64_FAULT;
   1.247 -   }
   1.248 +    }
   1.249  #endif // VMAL_NO_FAULT_CHECK
   1.250 -   return IA64_NO_FAULT;
   1.251 +    return IA64_NO_FAULT;
   1.252  }
   1.253  
   1.254  static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
   1.255 @@ -752,7 +760,7 @@ static IA64FAULT vmx_emul_itc_d(VCPU *vc
   1.256      	return IA64_FAULT;
   1.257      }
   1.258  
   1.259 -   return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
   1.260 +    return vmx_vcpu_itc_d(vcpu, pte, itir, ifa);
   1.261  }
   1.262  
   1.263  static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
   1.264 @@ -763,8 +771,7 @@ static IA64FAULT vmx_emul_itc_i(VCPU *vc
   1.265      	return IA64_FAULT;
   1.266      }
   1.267  
   1.268 -   return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
   1.269 -
   1.270 +    return vmx_vcpu_itc_i(vcpu, pte, itir, ifa);
   1.271  }
   1.272  
   1.273  /*************************************