ia64/xen-unstable

changeset 13963:54247a642f5e

[IA64] Cleanup in vmx

Cleanup: static added, unused code #if'ed, typos.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author awilliam@xenbuild2.aw
date Sun Feb 18 16:06:24 2007 -0700 (2007-02-18)
parents 5abf33a383cf
children 38f7330d4807
files xen/arch/ia64/vmx/vmx_interrupt.c xen/arch/ia64/vmx/vmx_support.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c	Sun Feb 18 16:00:52 2007 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c	Sun Feb 18 16:06:24 2007 -0700
     1.3 @@ -20,15 +20,15 @@
     1.4   *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
     1.5   *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
     1.6   */
     1.7 -
     1.8 -
     1.9  #include <xen/types.h>
    1.10  #include <asm/vmx_vcpu.h>
    1.11  #include <asm/vmx_mm_def.h>
    1.12  #include <asm/vmx_pal_vsa.h>
    1.13 +
    1.14  /* SDM vol2 5.5 - IVA based interruption handling */
    1.15  #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
    1.16 -void
    1.17 +
    1.18 +static void
    1.19  collect_interruption(VCPU *vcpu)
    1.20  {
    1.21      u64 ipsr;
    1.22 @@ -92,14 +92,19 @@ inject_guest_interruption(VCPU *vcpu, u6
    1.23      u64 viva;
    1.24      REGS *regs;
    1.25      ISR pt_isr;
    1.26 +
    1.27      perfc_incra(vmx_inject_guest_interruption, vec >> 8);
    1.28 -    regs=vcpu_regs(vcpu);
    1.29 -    // clear cr.isr.ri 
    1.30 +
    1.31 +    regs = vcpu_regs(vcpu);
    1.32 +
    1.33 +    // clear cr.isr.ir (incomplete register frame)
    1.34      pt_isr.val = VMX(vcpu,cr_isr);
    1.35      pt_isr.ir = 0;
    1.36      VMX(vcpu,cr_isr) = pt_isr.val;
    1.37 +
    1.38      collect_interruption(vcpu);
    1.39      vmx_ia64_set_dcr(vcpu);
    1.40 +
    1.41      vmx_vcpu_get_iva(vcpu,&viva);
    1.42      regs->cr_iip = viva + vec;
    1.43  }
     2.1 --- a/xen/arch/ia64/vmx/vmx_support.c	Sun Feb 18 16:00:52 2007 -0700
     2.2 +++ b/xen/arch/ia64/vmx/vmx_support.c	Sun Feb 18 16:06:24 2007 -0700
     2.3 @@ -44,7 +44,8 @@ void vmx_io_assist(struct vcpu *v)
     2.4       */
     2.5      vio = get_vio(v->domain, v->vcpu_id);
     2.6      if (!vio)
     2.7 -	panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", (unsigned long)vio);
     2.8 +        panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n",
     2.9 +                     (unsigned long)vio);
    2.10  
    2.11      p = &vio->vp_ioreq;
    2.12  
    2.13 @@ -98,9 +99,3 @@ void vmx_send_assist_req(struct vcpu *v)
    2.14      /* the code under this line is completer phase... */
    2.15      vmx_io_assist(v);
    2.16  }
    2.17 -
    2.18 -/* Wake up a vcpu whihc is waiting for interrupts to come in */
    2.19 -void vmx_prod_vcpu(struct vcpu *v)
    2.20 -{
    2.21 -    vcpu_unblock(v);
    2.22 -}
     3.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Sun Feb 18 16:00:52 2007 -0700
     3.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Sun Feb 18 16:06:24 2007 -0700
     3.3 @@ -22,7 +22,6 @@
     3.4   *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
     3.5   *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
     3.6   */
     3.7 -
     3.8  #include <xen/sched.h>
     3.9  #include <public/xen.h>
    3.10  #include <asm/ia64_int.h>
    3.11 @@ -36,29 +35,6 @@
    3.12  #include <asm/vmx_mm_def.h>
    3.13  #include <asm/vmx.h>
    3.14  #include <asm/vmx_phy_mode.h>
    3.15 -//u64  fire_itc;
    3.16 -//u64  fire_itc2;
    3.17 -//u64  fire_itm;
    3.18 -//u64  fire_itm2;
    3.19 -/*
    3.20 - * Copyright (c) 2005 Intel Corporation.
    3.21 - *    Anthony Xu (anthony.xu@intel.com)
    3.22 - *    Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
    3.23 - *
    3.24 - * This program is free software; you can redistribute it and/or modify it
    3.25 - * under the terms and conditions of the GNU General Public License,
    3.26 - * version 2, as published by the Free Software Foundation.
    3.27 - *
    3.28 - * This program is distributed in the hope it will be useful, but WITHOUT
    3.29 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.30 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    3.31 - * more details.
    3.32 - *
    3.33 - * You should have received a copy of the GNU General Public License along with
    3.34 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    3.35 - * Place - Suite 330, Boston, MA 02111-1307 USA.
    3.36 - *
    3.37 - */
    3.38  
    3.39  /**************************************************************************
    3.40   VCPU general register access routines
     4.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Sun Feb 18 16:00:52 2007 -0700
     4.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Sun Feb 18 16:06:24 2007 -0700
     4.3 @@ -31,7 +31,8 @@
     4.4  #include <asm/virt_event.h>
     4.5  #include <asm/vmx_phy_mode.h>
     4.6  
     4.7 -void
     4.8 +#ifdef BYPASS_VMAL_OPCODE
     4.9 +static void
    4.10  ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, u64 * cause)
    4.11  {
    4.12      *cause=0;
    4.13 @@ -141,20 +142,21 @@ ia64_priv_decoder(IA64_SLOT_TYPE slot_ty
    4.14          break;
    4.15      }
    4.16  }
    4.17 +#endif
    4.18  
    4.19 -IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
    4.20 +static IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
    4.21  {
    4.22      u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
    4.23      return vmx_vcpu_reset_psr_sm(vcpu,imm24);
    4.24  }
    4.25  
    4.26 -IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
    4.27 +static IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
    4.28  {
    4.29      u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
    4.30      return vmx_vcpu_set_psr_sm(vcpu,imm24);
    4.31  }
    4.32  
    4.33 -IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
    4.34 +static IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
    4.35  {
    4.36      u64 tgt = inst.M33.r1;
    4.37      u64 val;
    4.38 @@ -172,7 +174,7 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
    4.39  /**
    4.40   * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
    4.41   */
    4.42 -IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
    4.43 +static IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
    4.44  {
    4.45      u64 val;
    4.46  
    4.47 @@ -187,7 +189,7 @@ IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu
    4.48  Privileged operation emulation routines
    4.49  **************************************************************************/
    4.50  
    4.51 -IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
    4.52 +static IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
    4.53  {
    4.54      IA64_PSR  vpsr;
    4.55      REGS *regs;
    4.56 @@ -209,7 +211,7 @@ IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST6
    4.57      return vmx_vcpu_rfi(vcpu);
    4.58  }
    4.59  
    4.60 -IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
    4.61 +static IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
    4.62  {
    4.63  #ifdef  CHECK_FAULT
    4.64      IA64_PSR  vpsr;
    4.65 @@ -224,7 +226,7 @@ IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST
    4.66     return vcpu_bsw0(vcpu);
    4.67  }
    4.68  
    4.69 -IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
    4.70 +static IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
    4.71  {
    4.72  #ifdef  CHECK_FAULT
    4.73      IA64_PSR  vpsr;
    4.74 @@ -239,12 +241,12 @@ IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST
    4.75      return vcpu_bsw1(vcpu);
    4.76  }
    4.77  
    4.78 -IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
    4.79 +static IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
    4.80  {
    4.81      return vmx_vcpu_cover(vcpu);
    4.82  }
    4.83  
    4.84 -IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
    4.85 +static IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
    4.86  {
    4.87      u64 r2,r3;
    4.88  #ifdef  VMAL_NO_FAULT_CHECK
    4.89 @@ -278,7 +280,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
    4.90      return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
    4.91  }
    4.92  
    4.93 -IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
    4.94 +static IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
    4.95  {
    4.96      u64 r3;
    4.97  #ifdef  VMAL_NO_FAULT_CHECK
    4.98 @@ -303,7 +305,7 @@ IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INS
    4.99      return vmx_vcpu_ptc_e(vcpu,r3);
   4.100  }
   4.101  
   4.102 -IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
   4.103 +static IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
   4.104  {
   4.105      u64 r2,r3;
   4.106  #ifdef  VMAL_NO_FAULT_CHECK    
   4.107 @@ -336,7 +338,7 @@ IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INS
   4.108      return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
   4.109  }
   4.110  
   4.111 -IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
   4.112 +static IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
   4.113  {
   4.114      u64 r2,r3;
   4.115  #ifdef  VMAL_NO_FAULT_CHECK    
   4.116 @@ -369,7 +371,7 @@ IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, IN
   4.117      return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
   4.118  }
   4.119  
   4.120 -IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
   4.121 +static IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
   4.122  {
   4.123      IA64FAULT	ret1, ret2;
   4.124  
   4.125 @@ -403,7 +405,7 @@ IA64FAULT ptr_fault_check(VCPU *vcpu, IN
   4.126     return IA64_NO_FAULT;
   4.127  }
   4.128  
   4.129 -IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
   4.130 +static IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
   4.131  {
   4.132      u64 r2,r3;
   4.133      if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
   4.134 @@ -411,7 +413,7 @@ IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INS
   4.135      return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
   4.136  }
   4.137  
   4.138 -IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
   4.139 +static IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
   4.140  {
   4.141      u64 r2,r3;
   4.142      if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
   4.143 @@ -420,7 +422,7 @@ IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INS
   4.144  }
   4.145  
   4.146  
   4.147 -IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
   4.148 +static IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
   4.149  {
   4.150      u64 r1,r3;
   4.151  #ifdef  CHECK_FAULT
   4.152 @@ -450,7 +452,7 @@ IA64FAULT vmx_emul_thash(VCPU *vcpu, INS
   4.153  }
   4.154  
   4.155  
   4.156 -IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
   4.157 +static IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
   4.158  {
   4.159      u64 r1,r3;
   4.160  #ifdef  CHECK_FAULT
   4.161 @@ -482,7 +484,7 @@ IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST
   4.162  }
   4.163  
   4.164  
   4.165 -IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
   4.166 +static IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
   4.167  {
   4.168      u64 r1,r3;
   4.169  #ifdef  CHECK_FAULT
   4.170 @@ -526,7 +528,7 @@ IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST6
   4.171      return(IA64_NO_FAULT);
   4.172  }
   4.173  
   4.174 -IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
   4.175 +static IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
   4.176  {
   4.177      u64 r1,r3;
   4.178  #ifdef  CHECK_FAULT
   4.179 @@ -564,7 +566,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6
   4.180   * Insert translation register/cache
   4.181  ************************************/
   4.182  
   4.183 -IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
   4.184 +static IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
   4.185  {
   4.186      u64 itir, ifa, pte, slot;
   4.187  #ifdef  VMAL_NO_FAULT_CHECK
   4.188 @@ -621,7 +623,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
   4.189      return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
   4.190  }
   4.191  
   4.192 -IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
   4.193 +static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
   4.194  {
   4.195      u64 itir, ifa, pte, slot;
   4.196  #ifdef  VMAL_NO_FAULT_CHECK
   4.197 @@ -678,7 +680,8 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
   4.198     return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
   4.199  }
   4.200  
   4.201 -IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
   4.202 +static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst,
   4.203 +                                 u64 *itir, u64 *ifa, u64 *pte)
   4.204  {
   4.205      IA64FAULT	ret1;
   4.206  
   4.207 @@ -727,7 +730,7 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
   4.208     return IA64_NO_FAULT;
   4.209  }
   4.210  
   4.211 -IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
   4.212 +static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
   4.213  {
   4.214      u64 itir, ifa, pte;
   4.215  
   4.216 @@ -738,7 +741,7 @@ IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INS
   4.217     return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
   4.218  }
   4.219  
   4.220 -IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
   4.221 +static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
   4.222  {
   4.223      u64 itir, ifa, pte;
   4.224  
   4.225 @@ -754,7 +757,7 @@ IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INS
   4.226   * Moves to semi-privileged registers
   4.227  *************************************/
   4.228  
   4.229 -IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
   4.230 +static IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
   4.231  {
   4.232      // I27 and M30 are identical for these fields
   4.233      u64 imm;
   4.234 @@ -780,7 +783,7 @@ IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *v
   4.235      return (vmx_vcpu_set_itc(vcpu, imm));
   4.236  }
   4.237  
   4.238 -IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
   4.239 +static IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
   4.240  {
   4.241      // I26 and M29 are identical for these fields
   4.242      u64 r2;
   4.243 @@ -808,7 +811,7 @@ IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *v
   4.244  }
   4.245  
   4.246  
   4.247 -IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
   4.248 +static IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
   4.249  {
   4.250      // I27 and M30 are identical for these fields
   4.251      u64 r1;
   4.252 @@ -840,7 +843,7 @@ IA64FAULT vmx_emul_mov_from_ar_reg(VCPU 
   4.253   * Moves to privileged registers
   4.254  ********************************/
   4.255  
   4.256 -IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
   4.257 +static IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
   4.258  {
   4.259      u64 r3,r2;
   4.260  #ifdef  CHECK_FAULT
   4.261 @@ -863,7 +866,7 @@ IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu
   4.262      return (vmx_vcpu_set_pkr(vcpu,r3,r2));
   4.263  }
   4.264  
   4.265 -IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
   4.266 +static IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
   4.267  {
   4.268      u64 r3,r2;
   4.269  #ifdef  CHECK_FAULT
   4.270 @@ -886,7 +889,7 @@ IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu,
   4.271      return (vmx_vcpu_set_rr(vcpu,r3,r2));
   4.272  }
   4.273  
   4.274 -IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
   4.275 +static IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
   4.276  {
   4.277      u64 r3,r2;
   4.278      return IA64_NO_FAULT;
   4.279 @@ -910,7 +913,7 @@ IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu
   4.280      return (vmx_vcpu_set_dbr(vcpu,r3,r2));
   4.281  }
   4.282  
   4.283 -IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
   4.284 +static IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
   4.285  {
   4.286      u64 r3,r2;
   4.287      return IA64_NO_FAULT;
   4.288 @@ -934,7 +937,7 @@ IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu
   4.289      return (vmx_vcpu_set_ibr(vcpu,r3,r2));
   4.290  }
   4.291  
   4.292 -IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
   4.293 +static IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
   4.294  {
   4.295      u64 r3,r2;
   4.296  #ifdef  CHECK_FAULT
   4.297 @@ -957,7 +960,7 @@ IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu
   4.298      return (vmx_vcpu_set_pmc(vcpu,r3,r2));
   4.299  }
   4.300  
   4.301 -IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
   4.302 +static IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
   4.303  {
   4.304      u64 r3,r2;
   4.305  #ifdef  CHECK_FAULT
   4.306 @@ -985,7 +988,7 @@ IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu
   4.307   * Moves from privileged registers
   4.308   **********************************/
   4.309  
   4.310 -IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
   4.311 +static IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
   4.312  {
   4.313      u64 r3,r1;
   4.314  #ifdef  CHECK_FAULT
   4.315 @@ -1021,7 +1024,7 @@ IA64FAULT vmx_emul_mov_from_rr(VCPU *vcp
   4.316      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
   4.317  }
   4.318  
   4.319 -IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
   4.320 +static IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
   4.321  {
   4.322      u64 r3,r1;
   4.323  #ifdef  CHECK_FAULT
   4.324 @@ -1058,7 +1061,7 @@ IA64FAULT vmx_emul_mov_from_pkr(VCPU *vc
   4.325      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
   4.326  }
   4.327  
   4.328 -IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
   4.329 +static IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
   4.330  {
   4.331      u64 r3,r1;
   4.332  #ifdef  CHECK_FAULT
   4.333 @@ -1095,7 +1098,7 @@ IA64FAULT vmx_emul_mov_from_dbr(VCPU *vc
   4.334      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
   4.335  }
   4.336  
   4.337 -IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
   4.338 +static IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
   4.339  {
   4.340      u64 r3,r1;
   4.341  #ifdef  CHECK_FAULT
   4.342 @@ -1132,7 +1135,7 @@ IA64FAULT vmx_emul_mov_from_ibr(VCPU *vc
   4.343      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
   4.344  }
   4.345  
   4.346 -IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
   4.347 +static IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
   4.348  {
   4.349      u64 r3,r1;
   4.350  #ifdef  CHECK_FAULT
   4.351 @@ -1169,7 +1172,7 @@ IA64FAULT vmx_emul_mov_from_pmc(VCPU *vc
   4.352      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
   4.353  }
   4.354  
   4.355 -IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
   4.356 +static IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
   4.357  {
   4.358      u64 r3,r1;
   4.359  #ifdef  CHECK_FAULT
   4.360 @@ -1197,7 +1200,7 @@ IA64FAULT vmx_emul_mov_from_cpuid(VCPU *
   4.361      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
   4.362  }
   4.363  
   4.364 -IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
   4.365 +static IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
   4.366  {
   4.367      u64 r2;
   4.368      extern u64 cr_igfld_mask(int index, u64 value);
   4.369 @@ -1275,7 +1278,7 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
   4.370      ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
   4.371          vcpu_set_gr(vcpu, tgt, val,0):fault;
   4.372  
   4.373 -IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
   4.374 +static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
   4.375  {
   4.376      u64 tgt = inst.M33.r1;
   4.377      u64 val;