ia64/xen-unstable

changeset 16247:2d238ca6d51a

x86: Fix MTRR/PAT virtualisation on PAE

Various 'long's should be 'u64's.

Signed-off-by: Disheng Su <disheng.su@intel.com>
author Keir Fraser <keir@xensource.com>
date Fri Oct 26 10:00:10 2007 +0100 (2007-10-26)
parents fd09283562e2
children cae485f682aa
files tools/libxc/xc_domain.c tools/libxc/xenctrl.h xen/arch/x86/hvm/mtrr.c xen/include/asm-x86/hvm/cacheattr.h
line diff
     1.1 --- a/tools/libxc/xc_domain.c	Fri Oct 26 09:58:43 2007 +0100
     1.2 +++ b/tools/libxc/xc_domain.c	Fri Oct 26 10:00:10 2007 +0100
     1.3 @@ -378,9 +378,9 @@ int xc_domain_setmaxmem(int xc_handle,
     1.4  
     1.5  int xc_domain_pin_memory_cacheattr(int xc_handle,
     1.6                                     uint32_t domid,
     1.7 -                                   unsigned long start,
     1.8 -                                   unsigned long end,
     1.9 -                                   unsigned int type)
    1.10 +                                   uint64_t start,
    1.11 +                                   uint64_t end,
    1.12 +                                   uint32_t type)
    1.13  {
    1.14      DECLARE_DOMCTL;
    1.15      domctl.cmd = XEN_DOMCTL_pin_mem_cacheattr;
     2.1 --- a/tools/libxc/xenctrl.h	Fri Oct 26 09:58:43 2007 +0100
     2.2 +++ b/tools/libxc/xenctrl.h	Fri Oct 26 10:00:10 2007 +0100
     2.3 @@ -616,9 +616,9 @@ int xc_domain_iomem_permission(int xc_ha
     2.4  
     2.5  int xc_domain_pin_memory_cacheattr(int xc_handle,
     2.6                                     uint32_t domid,
     2.7 -                                   unsigned long start,
     2.8 -                                   unsigned long end,
     2.9 -                                   unsigned int type);
    2.10 +                                   uint64_t start,
    2.11 +                                   uint64_t end,
    2.12 +                                   uint32_t type);
    2.13  
    2.14  unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
    2.15                                      unsigned long mfn);
     3.1 --- a/xen/arch/x86/hvm/mtrr.c	Fri Oct 26 09:58:43 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/mtrr.c	Fri Oct 26 10:00:10 2007 +0100
     3.3 @@ -30,25 +30,25 @@
     3.4  /* Xen holds the native MTRR MSRs */
     3.5  extern struct mtrr_state mtrr_state;
     3.6  
     3.7 -static u64 phys_base_msr_mask;
     3.8 -static u64 phys_mask_msr_mask;
     3.9 -static u32 size_or_mask;
    3.10 -static u32 size_and_mask;
    3.11 +static uint64_t phys_base_msr_mask;
    3.12 +static uint64_t phys_mask_msr_mask;
    3.13 +static uint32_t size_or_mask;
    3.14 +static uint32_t size_and_mask;
    3.15  
    3.16 -static void init_pat_entry_tbl(u64 pat);
    3.17 +static void init_pat_entry_tbl(uint64_t pat);
    3.18  static void init_mtrr_epat_tbl(void);
    3.19 -static unsigned char get_mtrr_type(struct mtrr_state *m, paddr_t pa);
    3.20 +static uint8_t get_mtrr_type(struct mtrr_state *m, paddr_t pa);
    3.21  /* get page attribute fields (PAn) from PAT MSR */
    3.22 -#define pat_cr_2_paf(pat_cr,n)  ((((u64)pat_cr) >> ((n)<<3)) & 0xff)
    3.23 +#define pat_cr_2_paf(pat_cr,n)  ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff)
    3.24  /* pat entry to PTE flags (PAT, PCD, PWT bits) */
    3.25 -static unsigned char pat_entry_2_pte_flags[8] = {
    3.26 +static uint8_t pat_entry_2_pte_flags[8] = {
    3.27      0,           _PAGE_PWT,
    3.28      _PAGE_PCD,   _PAGE_PCD | _PAGE_PWT,
    3.29      _PAGE_PAT,   _PAGE_PAT | _PAGE_PWT,
    3.30      _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
    3.31  
    3.32  /* effective mm type lookup table, according to MTRR and PAT */
    3.33 -static u8 mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
    3.34 +static uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
    3.35  /********PAT(UC,WC,RS,RS,WT,WP,WB,UC-)*/
    3.36  /* RS means reserved type(2,3), and type is hardcoded here */
    3.37   /*MTRR(UC):(UC,WC,RS,RS,UC,UC,UC,UC)*/
    3.38 @@ -70,10 +70,10 @@ static u8 mm_type_tbl[MTRR_NUM_TYPES][PA
    3.39  /* reverse lookup table, to find a pat type according to MTRR and effective
    3.40   * memory type. This table is dynamically generated
    3.41   */
    3.42 -static u8 mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES];
    3.43 +static uint8_t mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES];
    3.44  
    3.45  /* lookup table for PAT entry of a given PAT value in host pat */
    3.46 -static u8 pat_entry_tbl[PAT_TYPE_NUMS];
    3.47 +static uint8_t pat_entry_tbl[PAT_TYPE_NUMS];
    3.48  
    3.49  static void get_mtrr_range(uint64_t base_msr, uint64_t mask_msr,
    3.50                             uint64_t *base, uint64_t *end)
    3.51 @@ -104,23 +104,23 @@ static void get_mtrr_range(uint64_t base
    3.52  
    3.53  bool_t is_var_mtrr_overlapped(struct mtrr_state *m)
    3.54  {
    3.55 -    int seg, i;
    3.56 +    int32_t seg, i;
    3.57      uint64_t phys_base, phys_mask, phys_base_pre, phys_mask_pre;
    3.58      uint64_t base_pre, end_pre, base, end;
    3.59 -    uint8_t num_var_ranges = (u8)m->mtrr_cap;
    3.60 +    uint8_t num_var_ranges = (uint8_t)m->mtrr_cap;
    3.61  
    3.62      for ( i = 0; i < num_var_ranges; i++ )
    3.63      {
    3.64 -        phys_base_pre = ((u64*)m->var_ranges)[i*2];
    3.65 -        phys_mask_pre = ((u64*)m->var_ranges)[i*2 + 1];
    3.66 +        phys_base_pre = ((uint64_t*)m->var_ranges)[i*2];
    3.67 +        phys_mask_pre = ((uint64_t*)m->var_ranges)[i*2 + 1];
    3.68  
    3.69          get_mtrr_range(phys_base_pre, phys_mask_pre,
    3.70                          &base_pre, &end_pre);
    3.71  
    3.72          for ( seg = i + 1; seg < num_var_ranges; seg ++ )
    3.73          {
    3.74 -            phys_base = ((u64*)m->var_ranges)[seg*2];
    3.75 -            phys_mask = ((u64*)m->var_ranges)[seg*2 + 1];
    3.76 +            phys_base = ((uint64_t*)m->var_ranges)[seg*2];
    3.77 +            phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
    3.78  
    3.79              get_mtrr_range(phys_base, phys_mask,
    3.80                              &base, &end);
    3.81 @@ -143,12 +143,14 @@ bool_t is_var_mtrr_overlapped(struct mtr
    3.82  #define RESERVED_MTRR 2
    3.83  #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
    3.84  #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
    3.85 -bool_t mtrr_var_range_msr_set(struct mtrr_state *m, u32 msr, u64 msr_content);
    3.86 -bool_t mtrr_def_type_msr_set(struct mtrr_state *m, u64 msr_content);
    3.87 -bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, int row, u64 msr_content);
    3.88 -static void set_var_mtrr(unsigned int reg, struct mtrr_state *m,
    3.89 -                    unsigned int base, unsigned int size,
    3.90 -                    unsigned int type)
    3.91 +bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr,
    3.92 +                              uint64_t msr_content);
    3.93 +bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content);
    3.94 +bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,
    3.95 +                              uint64_t msr_content);
    3.96 +static void set_var_mtrr(uint32_t reg, struct mtrr_state *m,
    3.97 +                         uint32_t base, uint32_t size,
    3.98 +                         uint32_t type)
    3.99  {
   3.100      struct mtrr_var_range *vr;
   3.101  
   3.102 @@ -168,9 +170,9 @@ static void set_var_mtrr(unsigned int re
   3.103          vr->mask_lo = -size << PAGE_SHIFT | 0x800;
   3.104          vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
   3.105  
   3.106 -        mtrr_var_range_msr_set(m, MTRRphysBase_MSR(reg), *(unsigned long *)vr);
   3.107 +        mtrr_var_range_msr_set(m, MTRRphysBase_MSR(reg), *(uint64_t *)vr);
   3.108          mtrr_var_range_msr_set(m, MTRRphysMask_MSR(reg),
   3.109 -                               *((unsigned long *)vr + 1));
   3.110 +                               *((uint64_t *)vr + 1));
   3.111      }
   3.112  }
   3.113  /* From Intel Vol. III Section 10.11.4, the Range Size and Base Alignment has
   3.114 @@ -179,18 +181,19 @@ static void set_var_mtrr(unsigned int re
   3.115   * 2. The base address must be 2^N aligned, where the N here is equal to
   3.116   * the N in previous requirement. So a 8K range must be 8K aligned not 4K aligned.
   3.117   */
   3.118 -static unsigned int range_to_mtrr(unsigned int reg, struct mtrr_state *m,
   3.119 -    unsigned int range_startk, unsigned int range_sizek, unsigned char type)
   3.120 +static uint32_t range_to_mtrr(uint32_t reg, struct mtrr_state *m,
   3.121 +                              uint32_t range_startk, uint32_t range_sizek,
   3.122 +                              uint8_t type)
   3.123  {
   3.124      if ( !range_sizek || (reg >= ((m->mtrr_cap & 0xff) - RESERVED_MTRR)) )
   3.125          return reg;
   3.126  
   3.127      while ( range_sizek )
   3.128      {
   3.129 -        unsigned int max_align, align, sizek;
   3.130 +        uint32_t max_align, align, sizek;
   3.131  
   3.132          max_align = (range_startk == 0) ? 32 : ffs(range_startk);
   3.133 -        align = min_t(unsigned int, fls(range_sizek), max_align);
   3.134 +        align = min_t(uint32_t, fls(range_sizek), max_align);
   3.135          sizek = 1 << (align - 1);
   3.136  
   3.137          set_var_mtrr(reg++, m, range_startk, sizek, type);
   3.138 @@ -208,7 +211,7 @@ static unsigned int range_to_mtrr(unsign
   3.139  static void setup_fixed_mtrrs(struct vcpu *v)
   3.140  {
   3.141      uint64_t content;
   3.142 -    int i;
   3.143 +    int32_t i;
   3.144      struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
   3.145  
   3.146      /* 1. Map (0~A0000) as WB */
   3.147 @@ -226,21 +229,21 @@ static void setup_fixed_mtrrs(struct vcp
   3.148  static void setup_var_mtrrs(struct vcpu *v)
   3.149  {
   3.150      p2m_type_t p2m;
   3.151 -    unsigned long e820_mfn;
   3.152 -    char *p = NULL;
   3.153 -    unsigned char nr = 0;
   3.154 -    int i;
   3.155 -    unsigned int reg = 0;
   3.156 -    unsigned long size = 0;
   3.157 -    unsigned long addr = 0;
   3.158 +    uint64_t e820_mfn;
   3.159 +    int8_t *p = NULL;
   3.160 +    uint8_t nr = 0;
   3.161 +    int32_t i;
   3.162 +    uint32_t reg = 0;
   3.163 +    uint64_t size = 0;
   3.164 +    uint64_t addr = 0;
   3.165      struct e820entry *e820_table;
   3.166  
   3.167      e820_mfn = mfn_x(gfn_to_mfn(v->domain,
   3.168                      HVM_E820_PAGE >> PAGE_SHIFT, &p2m));
   3.169  
   3.170 -    p = (char *)map_domain_page(e820_mfn);
   3.171 +    p = (int8_t *)map_domain_page(e820_mfn);
   3.172  
   3.173 -    nr = *(unsigned char*)(p + HVM_E820_NR_OFFSET);
   3.174 +    nr = *(uint8_t*)(p + HVM_E820_NR_OFFSET);
   3.175      e820_table = (struct e820entry*)(p + HVM_E820_OFFSET);
   3.176      /* search E820 table, set MTRR for RAM */
   3.177      for ( i = 0; i < nr; i++)
   3.178 @@ -283,7 +286,7 @@ void init_mtrr_in_hyper(struct vcpu *v)
   3.179      v->arch.hvm_vcpu.mtrr.is_initialized = 1;
   3.180  }
   3.181  
   3.182 -static int reset_mtrr(struct mtrr_state *m)
   3.183 +static int32_t reset_mtrr(struct mtrr_state *m)
   3.184  {
   3.185      m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
   3.186      if ( m->var_ranges == NULL )
   3.187 @@ -300,8 +303,8 @@ static int reset_mtrr(struct mtrr_state 
   3.188  /* init global variables for MTRR and PAT */
   3.189  void global_init_mtrr_pat(void)
   3.190  {
   3.191 -    extern u64 host_pat;
   3.192 -    u32 phys_addr;
   3.193 +    extern uint64_t host_pat;
   3.194 +    uint32_t phys_addr;
   3.195  
   3.196      init_mtrr_epat_tbl();
   3.197      init_pat_entry_tbl(host_pat);
   3.198 @@ -311,16 +314,16 @@ void global_init_mtrr_pat(void)
   3.199      else
   3.200          phys_addr = cpuid_eax(0x80000008);
   3.201  
   3.202 -    phys_base_msr_mask = ~((((u64)1) << phys_addr) - 1) | 0xf00UL;
   3.203 -    phys_mask_msr_mask = ~((((u64)1) << phys_addr) - 1) | 0x7ffUL;
   3.204 +    phys_base_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0xf00UL;
   3.205 +    phys_mask_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0x7ffUL;
   3.206  
   3.207      size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
   3.208      size_and_mask = ~size_or_mask & 0xfff00000;
   3.209  }
   3.210  
   3.211 -static void init_pat_entry_tbl(u64 pat)
   3.212 +static void init_pat_entry_tbl(uint64_t pat)
   3.213  {
   3.214 -    int i, j;
   3.215 +    int32_t i, j;
   3.216  
   3.217      memset(&pat_entry_tbl, INVALID_MEM_TYPE,
   3.218             PAT_TYPE_NUMS * sizeof(pat_entry_tbl[0]));
   3.219 @@ -338,9 +341,9 @@ static void init_pat_entry_tbl(u64 pat)
   3.220      }
   3.221  }
   3.222  
   3.223 -unsigned char pat_type_2_pte_flags(unsigned char pat_type)
   3.224 +uint8_t pat_type_2_pte_flags(uint8_t pat_type)
   3.225  {
   3.226 -    int pat_entry = pat_entry_tbl[pat_type];
   3.227 +    int32_t pat_entry = pat_entry_tbl[pat_type];
   3.228  
   3.229      /* INVALID_MEM_TYPE, means doesn't find the pat_entry in host pat for
   3.230       * a given pat_type. If host pat covers all the pat types,
   3.231 @@ -352,22 +355,22 @@ unsigned char pat_type_2_pte_flags(unsig
   3.232      return pat_entry_2_pte_flags[pat_entry_tbl[PAT_TYPE_UNCACHABLE]];
   3.233  }
   3.234  
   3.235 -int reset_vmsr(struct mtrr_state *m, u64 *pat_ptr)
   3.236 +int32_t reset_vmsr(struct mtrr_state *m, uint64_t *pat_ptr)
   3.237  {
   3.238 -    int rc;
   3.239 +    int32_t rc;
   3.240  
   3.241      rc = reset_mtrr(m);
   3.242      if ( rc != 0 )
   3.243          return rc;
   3.244  
   3.245 -    *pat_ptr = ( (u64)PAT_TYPE_WRBACK) |                /* PAT0: WB */
   3.246 -        ( (u64)PAT_TYPE_WRTHROUGH << 8 ) |              /* PAT1: WT */
   3.247 -        ( (u64)PAT_TYPE_UC_MINUS << 16 ) |              /* PAT2: UC- */
   3.248 -        ( (u64)PAT_TYPE_UNCACHABLE << 24 ) |            /* PAT3: UC */
   3.249 -        ( (u64)PAT_TYPE_WRBACK << 32 ) |                /* PAT4: WB */
   3.250 -        ( (u64)PAT_TYPE_WRTHROUGH << 40 ) |             /* PAT5: WT */
   3.251 -        ( (u64)PAT_TYPE_UC_MINUS << 48 ) |              /* PAT6: UC- */
   3.252 -        ( (u64)PAT_TYPE_UNCACHABLE << 56 );             /* PAT7: UC */
   3.253 +    *pat_ptr = ((uint64_t)PAT_TYPE_WRBACK) |               /* PAT0: WB */
   3.254 +               ((uint64_t)PAT_TYPE_WRTHROUGH << 8) |       /* PAT1: WT */
   3.255 +               ((uint64_t)PAT_TYPE_UC_MINUS << 16) |       /* PAT2: UC- */
   3.256 +               ((uint64_t)PAT_TYPE_UNCACHABLE << 24) |     /* PAT3: UC */
   3.257 +               ((uint64_t)PAT_TYPE_WRBACK << 32) |         /* PAT4: WB */
   3.258 +               ((uint64_t)PAT_TYPE_WRTHROUGH << 40) |      /* PAT5: WT */
   3.259 +               ((uint64_t)PAT_TYPE_UC_MINUS << 48) |       /* PAT6: UC- */
   3.260 +               ((uint64_t)PAT_TYPE_UNCACHABLE << 56);      /* PAT7: UC */
   3.261  
   3.262      return 0;
   3.263  }
   3.264 @@ -375,14 +378,14 @@ int reset_vmsr(struct mtrr_state *m, u64
   3.265  /*
   3.266   * Get MTRR memory type for physical address pa.
   3.267   */
   3.268 -static unsigned char get_mtrr_type(struct mtrr_state *m, paddr_t pa)
   3.269 +static uint8_t get_mtrr_type(struct mtrr_state *m, paddr_t pa)
   3.270  {
   3.271 -   int    addr, seg, index;
   3.272 -   u8     overlap_mtrr = 0;
   3.273 -   u8     overlap_mtrr_pos = 0;
   3.274 -   u64    phys_base;
   3.275 -   u64    phys_mask;
   3.276 -   u8     num_var_ranges = m->mtrr_cap & 0xff;
   3.277 +   int32_t     addr, seg, index;
   3.278 +   uint8_t     overlap_mtrr = 0;
   3.279 +   uint8_t     overlap_mtrr_pos = 0;
   3.280 +   uint64_t    phys_base;
   3.281 +   uint64_t    phys_mask;
   3.282 +   uint8_t     num_var_ranges = m->mtrr_cap & 0xff;
   3.283  
   3.284     if ( unlikely(!(m->enabled & 0x2)) )
   3.285         return MTRR_TYPE_UNCACHABLE;
   3.286 @@ -390,7 +393,7 @@ static unsigned char get_mtrr_type(struc
   3.287     if ( (pa < 0x100000) && (m->enabled & 1) )
   3.288     {
   3.289         /* Fixed range MTRR takes effective */
   3.290 -       addr = (unsigned int) pa;
   3.291 +       addr = (uint32_t) pa;
   3.292         if ( addr < 0x80000 )
   3.293         {
   3.294             seg = (addr >> 16);
   3.295 @@ -416,11 +419,11 @@ static unsigned char get_mtrr_type(struc
   3.296     /* Match with variable MTRRs. */
   3.297     for ( seg = 0; seg < num_var_ranges; seg++ )
   3.298     {
   3.299 -       phys_base = ((u64*)m->var_ranges)[seg*2];
   3.300 -       phys_mask = ((u64*)m->var_ranges)[seg*2 + 1];
   3.301 +       phys_base = ((uint64_t*)m->var_ranges)[seg*2];
   3.302 +       phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
   3.303         if ( phys_mask & (1 << MTRR_PHYSMASK_VALID_BIT) )
   3.304         {
   3.305 -           if ( ((u64) pa & phys_mask) >> MTRR_PHYSMASK_SHIFT ==
   3.306 +           if ( ((uint64_t) pa & phys_mask) >> MTRR_PHYSMASK_SHIFT ==
   3.307                  (phys_base & phys_mask) >> MTRR_PHYSMASK_SHIFT )
   3.308             {
   3.309                 if ( unlikely(m->overlapped) )
   3.310 @@ -441,7 +444,7 @@ static unsigned char get_mtrr_type(struc
   3.311     if ( unlikely(overlap_mtrr == 0) )
   3.312         return m->def_type;
   3.313  
   3.314 -   if ( likely(!(overlap_mtrr & ~( ((u8)1) << overlap_mtrr_pos ))) )
   3.315 +   if ( likely(!(overlap_mtrr & ~( ((uint8_t)1) << overlap_mtrr_pos ))) )
   3.316         /* Covers both one variable memory range matches and
   3.317          * two or more identical match.
   3.318          */
   3.319 @@ -450,7 +453,7 @@ static unsigned char get_mtrr_type(struc
   3.320     if ( overlap_mtrr & 0x1 )
   3.321         /* Two or more match, one is UC. */
   3.322         return MTRR_TYPE_UNCACHABLE;
   3.323 -   
   3.324 +
   3.325     if ( !(overlap_mtrr & 0xaf) )
   3.326         /* Two or more match, WT and WB. */
   3.327         return MTRR_TYPE_WRTHROUGH;
   3.328 @@ -464,9 +467,9 @@ static unsigned char get_mtrr_type(struc
   3.329   * NOTE: valid only when paging is enabled.
   3.330   *       Only 4K page PTE is supported now.
   3.331   */
   3.332 -static unsigned char page_pat_type(u64 pat_cr, unsigned long pte_flags)
   3.333 +static uint8_t page_pat_type(uint64_t pat_cr, uint32_t pte_flags)
   3.334  {
   3.335 -    int pat_entry;
   3.336 +    int32_t pat_entry;
   3.337  
   3.338      /* PCD/PWT -> bit 1/0 of PAT entry */
   3.339      pat_entry = ( pte_flags >> 3 ) & 0x3;
   3.340 @@ -474,19 +477,18 @@ static unsigned char page_pat_type(u64 p
   3.341      if ( pte_flags & _PAGE_PAT )
   3.342          pat_entry |= 4;
   3.343  
   3.344 -    return (unsigned char)pat_cr_2_paf(pat_cr, pat_entry);
   3.345 +    return (uint8_t)pat_cr_2_paf(pat_cr, pat_entry);
   3.346  }
   3.347  
   3.348  /*
   3.349   * Effective memory type for leaf page.
   3.350   */
   3.351 -static u8 effective_mm_type(
   3.352 -        struct mtrr_state *m,
   3.353 -        u64 pat,
   3.354 -        paddr_t gpa,
   3.355 -        unsigned long pte_flags)
   3.356 +static uint8_t effective_mm_type(struct mtrr_state *m,
   3.357 +                                 uint64_t pat,
   3.358 +                                 paddr_t gpa,
   3.359 +                                 uint32_t pte_flags)
   3.360  {
   3.361 -    unsigned char mtrr_mtype, pat_value, effective;
   3.362 +    uint8_t mtrr_mtype, pat_value, effective;
   3.363  
   3.364      mtrr_mtype = get_mtrr_type(m, gpa);
   3.365  
   3.366 @@ -499,7 +501,7 @@ static u8 effective_mm_type(
   3.367  
   3.368  static void init_mtrr_epat_tbl(void)
   3.369  {
   3.370 -    int i, j;
   3.371 +    int32_t i, j;
   3.372      /* set default value to an invalid type, just for checking conflict */
   3.373      memset(&mtrr_epat_tbl, INVALID_MEM_TYPE, sizeof(mtrr_epat_tbl));
   3.374  
   3.375 @@ -507,22 +509,22 @@ static void init_mtrr_epat_tbl(void)
   3.376      {
   3.377          for ( j = 0; j < PAT_TYPE_NUMS; j++ )
   3.378          {
   3.379 -            int tmp = mm_type_tbl[i][j];
   3.380 +            int32_t tmp = mm_type_tbl[i][j];
   3.381              if ( (tmp >= 0) && (tmp < MEMORY_NUM_TYPES) )
   3.382                  mtrr_epat_tbl[i][tmp] = j;
   3.383          }
   3.384      }
   3.385  }
   3.386  
   3.387 -u32 get_pat_flags(struct vcpu *v,
   3.388 -                  u32 gl1e_flags,
   3.389 -                  paddr_t gpaddr,
   3.390 -                  paddr_t spaddr)
   3.391 +uint32_t get_pat_flags(struct vcpu *v,
   3.392 +                       uint32_t gl1e_flags,
   3.393 +                       paddr_t gpaddr,
   3.394 +                       paddr_t spaddr)
   3.395  {
   3.396 -    u8 guest_eff_mm_type;
   3.397 -    u8 shadow_mtrr_type;
   3.398 -    u8 pat_entry_value;
   3.399 -    u64 pat = v->arch.hvm_vcpu.pat_cr;
   3.400 +    uint8_t guest_eff_mm_type;
   3.401 +    uint8_t shadow_mtrr_type;
   3.402 +    uint8_t pat_entry_value;
   3.403 +    uint64_t pat = v->arch.hvm_vcpu.pat_cr;
   3.404      struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr;
   3.405  
   3.406      /* 1. Get the effective memory type of guest physical address,
   3.407 @@ -555,10 +557,10 @@ u32 get_pat_flags(struct vcpu *v,
   3.408  }
   3.409  
   3.410  /* Helper funtions for seting mtrr/pat */
   3.411 -bool_t pat_msr_set(u64 *pat, u64 msr_content)
   3.412 +bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content)
   3.413  {
   3.414 -    u8 *value = (u8*)&msr_content;
   3.415 -    int i;
   3.416 +    uint8_t *value = (uint8_t*)&msr_content;
   3.417 +    int32_t i;
   3.418  
   3.419      if ( *pat != msr_content )
   3.420      {
   3.421 @@ -574,10 +576,10 @@ bool_t pat_msr_set(u64 *pat, u64 msr_con
   3.422      return 1;
   3.423  }
   3.424  
   3.425 -bool_t mtrr_def_type_msr_set(struct mtrr_state *m, u64 msr_content)
   3.426 +bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content)
   3.427  {
   3.428 -    u8 def_type = msr_content & 0xff;
   3.429 -    u8 enabled = (msr_content >> 10) & 0x3;
   3.430 +    uint8_t def_type = msr_content & 0xff;
   3.431 +    uint8_t enabled = (msr_content >> 10) & 0x3;
   3.432  
   3.433      if ( unlikely(!(def_type == 0 || def_type == 1 || def_type == 4 ||
   3.434                      def_type == 5 || def_type == 6)) )
   3.435 @@ -599,14 +601,15 @@ bool_t mtrr_def_type_msr_set(struct mtrr
   3.436      return 1;
   3.437  }
   3.438  
   3.439 -bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, int row, u64 msr_content)
   3.440 +bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,
   3.441 +                              uint64_t msr_content)
   3.442  {
   3.443 -    u64 *fixed_range_base = (u64 *)m->fixed_ranges;
   3.444 +    uint64_t *fixed_range_base = (uint64_t *)m->fixed_ranges;
   3.445  
   3.446      if ( fixed_range_base[row] != msr_content )
   3.447      {
   3.448 -        u8 *range = (u8*)&msr_content;
   3.449 -        int i, type;
   3.450 +        uint8_t *range = (uint8_t*)&msr_content;
   3.451 +        int32_t i, type;
   3.452  
   3.453          for ( i = 0; i < 8; i++ )
   3.454          {
   3.455 @@ -622,17 +625,18 @@ bool_t mtrr_fix_range_msr_set(struct mtr
   3.456      return 1;
   3.457  }
   3.458  
   3.459 -bool_t mtrr_var_range_msr_set(struct mtrr_state *m, u32 msr, u64 msr_content)
   3.460 +bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr,
   3.461 +                              uint64_t msr_content)
   3.462  {
   3.463 -    u32 index;
   3.464 -    u64 msr_mask;
   3.465 -    u64 *var_range_base = (u64*)m->var_ranges;
   3.466 +    uint32_t index;
   3.467 +    uint64_t msr_mask;
   3.468 +    uint64_t *var_range_base = (uint64_t*)m->var_ranges;
   3.469  
   3.470      index = msr - MSR_IA32_MTRR_PHYSBASE0;
   3.471  
   3.472      if ( var_range_base[index] != msr_content )
   3.473      {
   3.474 -        u32 type = msr_content & 0xff;
   3.475 +        uint32_t type = msr_content & 0xff;
   3.476  
   3.477          msr_mask = (index & 1) ? phys_mask_msr_mask : phys_base_msr_mask;
   3.478  
   3.479 @@ -659,8 +663,8 @@ bool_t mtrr_pat_not_equal(struct vcpu *v
   3.480  {
   3.481      struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr;
   3.482      struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr;
   3.483 -    int res;
   3.484 -    u8 num_var_ranges = (u8)md->mtrr_cap;
   3.485 +    int32_t res;
   3.486 +    uint8_t num_var_ranges = (uint8_t)md->mtrr_cap;
   3.487  
   3.488      /* Test fixed ranges. */
   3.489      res = memcmp(md->fixed_ranges, ms->fixed_ranges,
   3.490 @@ -708,10 +712,10 @@ void hvm_destroy_cacheattr_region_list(
   3.491      }
   3.492  }
   3.493  
   3.494 -int hvm_get_mem_pinned_cacheattr(
   3.495 +int32_t hvm_get_mem_pinned_cacheattr(
   3.496      struct domain *d,
   3.497 -    unsigned long guest_fn,
   3.498 -    unsigned int *type)
   3.499 +    uint64_t guest_fn,
   3.500 +    uint32_t *type)
   3.501  {
   3.502      struct hvm_mem_pinned_cacheattr_range *range;
   3.503  
   3.504 @@ -734,11 +738,11 @@ int hvm_get_mem_pinned_cacheattr(
   3.505      return 0;
   3.506  }
   3.507  
   3.508 -int hvm_set_mem_pinned_cacheattr(
   3.509 +int32_t hvm_set_mem_pinned_cacheattr(
   3.510      struct domain *d,
   3.511 -    unsigned long gfn_start,
   3.512 -    unsigned long gfn_end,
   3.513 -    unsigned int  type)
   3.514 +    uint64_t gfn_start,
   3.515 +    uint64_t gfn_end,
   3.516 +    uint32_t  type)
   3.517  {
   3.518      struct hvm_mem_pinned_cacheattr_range *range;
   3.519  
     4.1 --- a/xen/include/asm-x86/hvm/cacheattr.h	Fri Oct 26 09:58:43 2007 +0100
     4.2 +++ b/xen/include/asm-x86/hvm/cacheattr.h	Fri Oct 26 10:00:10 2007 +0100
     4.3 @@ -4,7 +4,7 @@
     4.4  struct hvm_mem_pinned_cacheattr_range {
     4.5      struct list_head list;
     4.6      uint64_t start, end;
     4.7 -    unsigned int type;
     4.8 +    uint32_t type;
     4.9  };
    4.10  
    4.11  void hvm_init_cacheattr_region_list(
    4.12 @@ -17,17 +17,17 @@ void hvm_destroy_cacheattr_region_list(
    4.13   * if yes, return 1, and set type to value in this range
    4.14   * if no,  return 0, and set type to 0
    4.15   */
    4.16 -int hvm_get_mem_pinned_cacheattr(
    4.17 +int32_t hvm_get_mem_pinned_cacheattr(
    4.18      struct domain *d,
    4.19 -    unsigned long guest_fn,
    4.20 -    unsigned int *type);
    4.21 +    uint64_t guest_fn,
    4.22 +    uint32_t *type);
    4.23  
    4.24  
    4.25  /* Set pinned caching type for a domain. */
    4.26 -int hvm_set_mem_pinned_cacheattr(
    4.27 +int32_t hvm_set_mem_pinned_cacheattr(
    4.28      struct domain *d,
    4.29 -    unsigned long gfn_start,
    4.30 -    unsigned long gfn_end,
    4.31 -    unsigned int  type);
    4.32 +    uint64_t gfn_start,
    4.33 +    uint64_t gfn_end,
    4.34 +    uint32_t  type);
    4.35  
    4.36  #endif /* __HVM_CACHEATTR_H__ */