ia64/xen-unstable

changeset 8917:67ea7868089b

[IA64] Clean up warnings related to VTi code. (header files)

This patch adds some functions' prototype declaration in corresponding
header file and removes issues suce as redefinition of some macros.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
author awilliam@xenbuild.aw
date Fri Feb 24 13:30:39 2006 -0700 (2006-02-24)
parents 0f59ace5442c
children c18c63f87b7d
files xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_mm_def.h xen/include/asm-ia64/vmx_pal.h xen/include/asm-ia64/vmx_phy_mode.h xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/include/asm-ia64/vmx.h	Fri Feb 24 13:29:18 2006 -0700
     1.2 +++ b/xen/include/asm-ia64/vmx.h	Fri Feb 24 13:30:39 2006 -0700
     1.3 @@ -34,7 +34,24 @@ extern void vmx_load_state(struct vcpu *
     1.4  extern void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c);
     1.5  extern void vmx_wait_io(void);
     1.6  extern void vmx_io_assist(struct vcpu *v);
     1.7 -
     1.8 +extern void vmx_load_all_rr(struct vcpu *vcpu);
     1.9 +extern void panic_domain(struct pt_regs *regs, const char *fmt, ...);
    1.10 +extern int ia64_hypercall (struct pt_regs *regs);
    1.11 +extern void vmx_save_state(struct vcpu *v);
    1.12 +extern void vmx_load_state(struct vcpu *v);
    1.13 +extern void show_registers(struct pt_regs *regs);
    1.14 +extern int vmx_alloc_contig_pages(struct domain *d);
    1.15 +extern unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
    1.16 +extern void sync_split_caches(void);
    1.17 +extern void vmx_virq_line_assist(struct vcpu *v);
    1.18 +extern void set_privileged_operation_isr (struct vcpu *vcpu,int inst);
    1.19 +extern void privilege_op (struct vcpu *vcpu);
    1.20 +extern void set_ifa_itir_iha (struct vcpu *vcpu, u64 vadr,
    1.21 +          int set_ifa, int set_itir, int set_iha);
    1.22 +extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
    1.23 +extern void vmx_intr_assist(struct vcpu *v);
    1.24 +extern void set_illegal_op_isr (struct vcpu *vcpu);
    1.25 +extern  void illegal_op (struct vcpu *vcpu);
    1.26  static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
    1.27  {
    1.28      return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
     2.1 --- a/xen/include/asm-ia64/vmx_mm_def.h	Fri Feb 24 13:29:18 2006 -0700
     2.2 +++ b/xen/include/asm-ia64/vmx_mm_def.h	Fri Feb 24 13:30:39 2006 -0700
     2.3 @@ -27,8 +27,6 @@
     2.4  //#define VHPT_SIZE   (1 << VHPT_SIZE_PS)
     2.5  #define ARCH_PAGE_SHIFT   12
     2.6  #define ARCH_PAGE_SIZE    PSIZE(ARCH_PAGE_SHIFT)
     2.7 -#define INVALID_MFN	(-1)
     2.8 -
     2.9  #define MAX_PHYS_ADDR_BITS  50
    2.10  #define PMASK(size)         (~((size) - 1))
    2.11  #define PSIZE(size)         (1UL<<(size))
    2.12 @@ -87,9 +85,6 @@
    2.13  #define STLB_TC         0
    2.14  #define STLB_TR         1
    2.15  
    2.16 -#define VMM_RR_MASK     0xfffff
    2.17 -#define VMM_RR_SHIFT        20
    2.18 -
    2.19  #define IA64_RR_SHIFT       61
    2.20  
    2.21  #define PHYS_PAGE_SHIFT     PPN_SHIFT
    2.22 @@ -109,7 +104,6 @@
    2.23  
    2.24  #define VRN_MASK        0xe000000000000000L
    2.25  #define PTA_BASE_MASK       0x3fffffffffffL
    2.26 -#define PTA_BASE_SHIFT      15
    2.27  #define VHPT_OFFSET_MASK    0x7fff
    2.28  
    2.29  #define BITS_SHIFT_256MB    28
    2.30 @@ -145,6 +139,7 @@ bits_v(uint64_t v, uint32_t bs, uint32_t
    2.31      uint64_t    result;
    2.32      __asm __volatile("shl %0=%1, %2;; shr.u %0=%0, %3;;"
    2.33          : "=r" (result): "r"(v), "r"(63-be), "r" (bs+63-be) );
    2.34 +    return result;
    2.35  }
    2.36  
    2.37  #define bits(val, bs, be)                                         \
     3.1 --- a/xen/include/asm-ia64/vmx_pal.h	Fri Feb 24 13:29:18 2006 -0700
     3.2 +++ b/xen/include/asm-ia64/vmx_pal.h	Fri Feb 24 13:30:39 2006 -0700
     3.3 @@ -114,7 +114,7 @@ ia64_pal_vp_save (u64 *vpd, u64 pal_proc
     3.4  	PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
     3.5  	return iprv.status;
     3.6  }
     3.7 -
     3.8 +extern void pal_emul(struct vcpu *vcpu);
     3.9  #define PAL_PROC_VM_BIT		(1UL << 40)
    3.10  #define PAL_PROC_VMSW_BIT	(1UL << 54)
    3.11  #endif /* _ASM_IA64_VT_PAL_H */
     4.1 --- a/xen/include/asm-ia64/vmx_phy_mode.h	Fri Feb 24 13:29:18 2006 -0700
     4.2 +++ b/xen/include/asm-ia64/vmx_phy_mode.h	Fri Feb 24 13:30:39 2006 -0700
     4.3 @@ -96,6 +96,8 @@ extern void prepare_if_physical_mode(VCP
     4.4  extern void recover_if_physical_mode(VCPU *vcpu);
     4.5  extern void vmx_init_all_rr(VCPU *vcpu);
     4.6  extern void vmx_load_all_rr(VCPU *vcpu);
     4.7 +extern void physical_itlb_miss(VCPU *vcpu, u64 vadr);
     4.8 +extern void physical_dtlb_miss(VCPU *vcpu, u64 vadr);
     4.9  /*
    4.10   * No sanity check here, since all psr changes have been
    4.11   * checked in switch_mm_mode().
     5.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Fri Feb 24 13:29:18 2006 -0700
     5.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Fri Feb 24 13:30:39 2006 -0700
     5.3 @@ -51,8 +51,7 @@
     5.4  
     5.5  #define VMM_RR_SHIFT    20
     5.6  #define VMM_RR_MASK     ((1UL<<VMM_RR_SHIFT)-1)
     5.7 -//#define VRID_2_MRID(vcpu,rid)  ((rid) & VMM_RR_MASK) | \
     5.8 -                ((vcpu->domain->domain_id) << VMM_RR_SHIFT)
     5.9 +
    5.10  extern u64 indirect_reg_igfld_MASK ( int type, int index, u64 value);
    5.11  extern u64 cr_igfld_mask (int index, u64 value);
    5.12  extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
    5.13 @@ -118,8 +117,9 @@ extern void memread_p(VCPU *vcpu, u64 *s
    5.14  extern void memread_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
    5.15  extern void memwrite_v(VCPU *vcpu, thash_data_t *vtlb, u64 *src, u64 *dest, size_t s);
    5.16  extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
    5.17 -
    5.18 -
    5.19 +extern void vcpu_load_kernel_regs(VCPU *vcpu);
    5.20 +extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
    5.21 +extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
    5.22  /**************************************************************************
    5.23   VCPU control register access routines
    5.24  **************************************************************************/
    5.25 @@ -461,7 +461,6 @@ static inline unsigned long
    5.26  vmx_vrrtomrr(VCPU *v, unsigned long val)
    5.27  {
    5.28      ia64_rr rr;
    5.29 -    u64	  rid;
    5.30  
    5.31      rr.rrval=val;
    5.32      rr.rid = rr.rid + v->arch.starting_rid;