direct-io.hg

changeset 7598:c31edd72086d

Just some small code cleanup to shadow.c, no logic changed.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Nov 02 11:18:51 2005 +0100 (2005-11-02)
parents 5ed53e973b83
children 03d51c0b0546
files xen/arch/x86/shadow.c
line diff
     1.1 --- a/xen/arch/x86/shadow.c	Wed Nov 02 00:08:01 2005 +0100
     1.2 +++ b/xen/arch/x86/shadow.c	Wed Nov 02 11:18:51 2005 +0100
     1.3 @@ -1,19 +1,19 @@
     1.4  /******************************************************************************
     1.5 - * arch/x86/shadow_64.c
     1.6 - * 
     1.7 + * arch/x86/shadow.c
     1.8 + *
     1.9   * Copyright (c) 2005 Michael A Fetterman
    1.10   * Based on an earlier implementation by Ian Pratt et al
    1.11 - * 
    1.12 + *
    1.13   * This program is free software; you can redistribute it and/or modify
    1.14   * it under the terms of the GNU General Public License as published by
    1.15   * the Free Software Foundation; either version 2 of the License, or
    1.16   * (at your option) any later version.
    1.17 - * 
    1.18 + *
    1.19   * This program is distributed in the hope that it will be useful,
    1.20   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    1.21   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    1.22   * GNU General Public License for more details.
    1.23 - * 
    1.24 + *
    1.25   * You should have received a copy of the GNU General Public License
    1.26   * along with this program; if not, write to the Free Software
    1.27   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    1.28 @@ -55,7 +55,6 @@ static void shadow_map_into_current(stru
    1.29      unsigned long va, unsigned int from, unsigned int to);
    1.30  static inline void validate_bl2e_change( struct domain *d,
    1.31      guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
    1.32 -
    1.33  #endif
    1.34  
    1.35  /********
    1.36 @@ -102,7 +101,6 @@ shadow_promote(struct domain *d, unsigne
    1.37          return 1;
    1.38  #endif
    1.39          return 0;
    1.40 -        
    1.41      }
    1.42  
    1.43      // To convert this page to use as a page table, the writable count
    1.44 @@ -490,12 +488,12 @@ static unsigned long shadow_l2_table(
    1.45           * We could proactively fill in PDEs for pages that are already
    1.46           * shadowed *and* where the guest PDE has _PAGE_ACCESSED set
    1.47           * (restriction required for coherence of the accessed bit). However,
    1.48 -         * we tried it and it didn't help performance. This is simpler. 
    1.49 +         * we tried it and it didn't help performance. This is simpler.
    1.50           */
    1.51          memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
    1.52  
    1.53          /* Install hypervisor and 2x linear p.t. mapings. */
    1.54 -        memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
    1.55 +        memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
    1.56                 &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
    1.57                 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
    1.58  
    1.59 @@ -522,7 +520,7 @@ static unsigned long shadow_l2_table(
    1.60              //
    1.61              if ( !get_shadow_ref(hl2mfn) )
    1.62                  BUG();
    1.63 -            
    1.64 +
    1.65              spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
    1.66                  l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
    1.67          }
    1.68 @@ -532,7 +530,7 @@ static unsigned long shadow_l2_table(
    1.69      }
    1.70      else
    1.71      {
    1.72 -        memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t));        
    1.73 +        memset(spl2e, 0, L2_PAGETABLE_ENTRIES*sizeof(l2_pgentry_t));
    1.74      }
    1.75  
    1.76      unmap_domain_page(spl2e);
    1.77 @@ -543,7 +541,7 @@ static unsigned long shadow_l2_table(
    1.78  #endif
    1.79  
    1.80  static void shadow_map_l1_into_current_l2(unsigned long va)
    1.81 -{ 
    1.82 +{
    1.83      struct vcpu *v = current;
    1.84      struct domain *d = v->domain;
    1.85      l1_pgentry_t *spl1e;
    1.86 @@ -596,7 +594,7 @@ static void shadow_map_l1_into_current_l
    1.87  #if CONFIG_PAGING_LEVELS >=4
    1.88      if (d->arch.ops->guest_paging_levels == PAGING_L2)
    1.89      {
    1.90 -        /* for 32-bit VMX guest on 64-bit host, 
    1.91 +        /* for 32-bit VMX guest on 64-bit host,
    1.92           * need update two L2 entries each time
    1.93           */
    1.94          if ( !get_shadow_ref(sl1mfn))
    1.95 @@ -624,7 +622,7 @@ static void shadow_map_l1_into_current_l
    1.96          l1_pgentry_t sl1e;
    1.97          int index = guest_l1_table_offset(va);
    1.98          int min = 1, max = 0;
    1.99 -        
   1.100 +
   1.101          unsigned long entries, pt_va;
   1.102          l1_pgentry_t tmp_sl1e;
   1.103          guest_l1_pgentry_t tmp_gl1e;//Prepare for double compile
   1.104 @@ -790,7 +788,7 @@ shadow_alloc_oos_entry(struct domain *d)
   1.105  
   1.106          /* Record the allocation block so it can be correctly freed later. */
   1.107          d->arch.out_of_sync_extras_count++;
   1.108 -        *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) = 
   1.109 +        *((struct out_of_sync_entry **)&extra[out_of_sync_extra_size]) =
   1.110              d->arch.out_of_sync_extras;
   1.111          d->arch.out_of_sync_extras = &extra[0];
   1.112  
   1.113 @@ -1020,7 +1018,7 @@ static int is_out_of_sync(struct vcpu *v
   1.114  {
   1.115      struct domain *d = v->domain;
   1.116  #if defined (__x86_64__)
   1.117 -    unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)? 
   1.118 +    unsigned long l2mfn = ((v->arch.flags & TF_kernel_mode)?
   1.119                            pagetable_get_pfn(v->arch.guest_table) :
   1.120                            pagetable_get_pfn(v->arch.guest_table_user));
   1.121  #else
   1.122 @@ -1082,7 +1080,7 @@ static int is_out_of_sync(struct vcpu *v
   1.123          return 1;
   1.124  
   1.125      __guest_get_l2e(v, va, &l2e);
   1.126 -    if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) || 
   1.127 +    if ( !(guest_l2e_get_flags(l2e) & _PAGE_PRESENT) ||
   1.128           (guest_l2e_get_flags(l2e) & _PAGE_PSE))
   1.129          return 0;
   1.130  
   1.131 @@ -1155,7 +1153,7 @@ decrease_writable_pte_prediction(struct 
   1.132  }
   1.133  
   1.134  static int fix_entry(
   1.135 -    struct domain *d, 
   1.136 +    struct domain *d,
   1.137      l1_pgentry_t *pt, u32 *found, int is_l1_shadow, u32 max_refs_to_find)
   1.138  {
   1.139      l1_pgentry_t old = *pt;
   1.140 @@ -1194,19 +1192,19 @@ static u32 remove_all_write_access_in_pt
   1.141      match = l1e_from_pfn(readonly_gmfn, flags);
   1.142  
   1.143      if ( shadow_mode_external(d) ) {
   1.144 -        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
   1.145 +        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask)
   1.146              >> PGT_va_shift;
   1.147  
   1.148          if ( (i >= 0 && i <= L1_PAGETABLE_ENTRIES) &&
   1.149 -             !l1e_has_changed(pt[i], match, flags) && 
   1.150 +             !l1e_has_changed(pt[i], match, flags) &&
   1.151               fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) &&
   1.152               !prediction )
   1.153              goto out;
   1.154      }
   1.155 - 
   1.156 +
   1.157      for (i = 0; i < GUEST_L1_PAGETABLE_ENTRIES; i++)
   1.158      {
   1.159 -        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && 
   1.160 +        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) &&
   1.161               fix_entry(d, &pt[i], &found, is_l1_shadow, max_refs_to_find) )
   1.162              break;
   1.163      }
   1.164 @@ -1255,7 +1253,7 @@ static int remove_all_write_access(
   1.165      }
   1.166  
   1.167      if ( shadow_mode_external(d) ) {
   1.168 -        if (write_refs-- == 0) 
   1.169 +        if (write_refs-- == 0)
   1.170              return 0;
   1.171  
   1.172           // Use the back pointer to locate the shadow page that can contain
   1.173 @@ -1275,7 +1273,7 @@ static int remove_all_write_access(
   1.174          a = &d->arch.shadow_ht[i];
   1.175          while ( a && a->gpfn_and_flags )
   1.176          {
   1.177 -            if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow 
   1.178 +            if ( (a->gpfn_and_flags & PGT_type_mask) == PGT_l1_shadow
   1.179  #if CONFIG_PAGING_LEVELS >= 4
   1.180                || (a->gpfn_and_flags & PGT_type_mask) == PGT_fl1_shadow
   1.181  #endif
   1.182 @@ -1384,10 +1382,10 @@ static int resync_all(struct domain *d, 
   1.183                  if ( (i < min_snapshot) || (i > max_snapshot) ||
   1.184                       guest_l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) )
   1.185                  {
   1.186 -                    int error; 
   1.187 +                    int error;
   1.188  
   1.189                      error = validate_pte_change(d, guest1[i], &shadow1[i]);
   1.190 -                    if ( error ==  -1 ) 
   1.191 +                    if ( error ==  -1 )
   1.192                          unshadow_l1 = 1;
   1.193                      else {
   1.194                          need_flush |= error;
   1.195 @@ -1474,7 +1472,7 @@ static int resync_all(struct domain *d, 
   1.196              l2_pgentry_t *guest2 = guest;
   1.197              l2_pgentry_t *snapshot2 = snapshot;
   1.198              l1_pgentry_t *shadow2 = shadow;
   1.199 -            
   1.200 +
   1.201              ASSERT(shadow_mode_write_all(d));
   1.202              BUG_ON(!shadow_mode_refcounts(d)); // not yet implemented
   1.203  
   1.204 @@ -1634,7 +1632,7 @@ static void sync_all(struct domain *d)
   1.205               !shadow_get_page_from_l1e(npte, d) )
   1.206              BUG();
   1.207          *ppte = npte;
   1.208 -        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT, 
   1.209 +        set_guest_back_ptr(d, npte, (entry->writable_pl1e) >> PAGE_SHIFT,
   1.210                             (entry->writable_pl1e & ~PAGE_MASK)/sizeof(l1_pgentry_t));
   1.211          shadow_put_page_from_l1e(opte, d);
   1.212  
   1.213 @@ -1719,7 +1717,7 @@ static inline int l1pte_write_fault(
   1.214  
   1.215  static inline int l1pte_read_fault(
   1.216      struct domain *d, guest_l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
   1.217 -{ 
   1.218 +{
   1.219      guest_l1_pgentry_t gpte = *gpte_p;
   1.220      l1_pgentry_t spte = *spte_p;
   1.221      unsigned long pfn = l1e_get_pfn(gpte);
   1.222 @@ -1761,7 +1759,7 @@ static int shadow_fault_32(unsigned long
   1.223      SH_VVLOG("shadow_fault( va=%lx, code=%lu )",
   1.224               va, (unsigned long)regs->error_code);
   1.225      perfc_incrc(shadow_fault_calls);
   1.226 -    
   1.227 +
   1.228      check_pagetable(v, "pre-sf");
   1.229  
   1.230      /*
   1.231 @@ -1804,7 +1802,7 @@ static int shadow_fault_32(unsigned long
   1.232      }
   1.233  
   1.234      /* Write fault? */
   1.235 -    if ( regs->error_code & 2 )  
   1.236 +    if ( regs->error_code & 2 )
   1.237      {
   1.238          int allow_writes = 0;
   1.239  
   1.240 @@ -1818,7 +1816,7 @@ static int shadow_fault_32(unsigned long
   1.241              else
   1.242              {
   1.243                  /* Write fault on a read-only mapping. */
   1.244 -                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")", 
   1.245 +                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")",
   1.246                           l1e_get_intpte(gpte));
   1.247                  perfc_incrc(shadow_fault_bail_ro_mapping);
   1.248                  goto fail;
   1.249 @@ -1878,7 +1876,7 @@ static int shadow_fault_32(unsigned long
   1.250      check_pagetable(v, "post-sf");
   1.251      return EXCRET_fault_fixed;
   1.252  
   1.253 - fail:
   1.254 +fail:
   1.255      shadow_unlock(d);
   1.256      return 0;
   1.257  }
   1.258 @@ -1895,7 +1893,7 @@ static int do_update_va_mapping(unsigned
   1.259      shadow_lock(d);
   1.260  
   1.261      //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void *)l1e_get_intpte(val));
   1.262 -        
   1.263 +
   1.264      // This is actually overkill - we don't need to sync the L1 itself,
   1.265      // just everything involved in getting to this L1 (i.e. we need
   1.266      // linear_pg_table[l1_linear_offset(va)] to be in sync)...
   1.267 @@ -1925,7 +1923,7 @@ static int do_update_va_mapping(unsigned
   1.268   * and what it uses to get/maintain that mapping.
   1.269   *
   1.270   * SHADOW MODE:      none         enable         translate         external
   1.271 - * 
   1.272 + *
   1.273   * 4KB things:
   1.274   * guest_vtable    lin_l2     mapped per gl2   lin_l2 via hl2   mapped per gl2
   1.275   * shadow_vtable     n/a         sh_lin_l2       sh_lin_l2      mapped per gl2
   1.276 @@ -1950,7 +1948,7 @@ static void shadow_update_pagetables(str
   1.277  {
   1.278      struct domain *d = v->domain;
   1.279  #if defined (__x86_64__)
   1.280 -    unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)? 
   1.281 +    unsigned long gmfn = ((v->arch.flags & TF_kernel_mode)?
   1.282                            pagetable_get_pfn(v->arch.guest_table) :
   1.283                            pagetable_get_pfn(v->arch.guest_table_user));
   1.284  #else
   1.285 @@ -2006,7 +2004,7 @@ static void shadow_update_pagetables(str
   1.286      /*
   1.287       * arch.shadow_vtable
   1.288       */
   1.289 -    if ( max_mode == SHM_external 
   1.290 +    if ( max_mode == SHM_external
   1.291  #if CONFIG_PAGING_LEVELS >=4
   1.292           || max_mode & SHM_enable
   1.293  #endif
   1.294 @@ -2241,7 +2239,7 @@ static int check_pte(
   1.295                 page_table_page);
   1.296          FAIL("RW2 coherence");
   1.297      }
   1.298 - 
   1.299 +
   1.300      if ( eff_guest_mfn == shadow_mfn )
   1.301      {
   1.302          if ( level > 1 )
   1.303 @@ -2291,7 +2289,7 @@ static int check_l1_table(
   1.304          errors += check_pte(v, p_guest+i, p_shadow+i,
   1.305                              p_snapshot ? p_snapshot+i : NULL,
   1.306                              1, l2_idx, i);
   1.307 - 
   1.308 +
   1.309      unmap_domain_page(p_shadow);
   1.310      unmap_domain_page(p_guest);
   1.311      if ( p_snapshot )
   1.312 @@ -2327,11 +2325,11 @@ static int check_l2_table(
   1.313  
   1.314  #if 0
   1.315      if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
   1.316 -                &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
   1.317 +                &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
   1.318                  ((SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) -
   1.319                   DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)) )
   1.320      {
   1.321 -        for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE; 
   1.322 +        for ( i = DOMAIN_ENTRIES_PER_L2_PAGETABLE;
   1.323                i < (SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT);
   1.324                i++ )
   1.325              printk("+++ (%d) %lx %lx\n",i,
   1.326 @@ -2339,7 +2337,7 @@ static int check_l2_table(
   1.327          FAILPT("hypervisor entries inconsistent");
   1.328      }
   1.329  
   1.330 -    if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) != 
   1.331 +    if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
   1.332            l2_pgentry_val(gpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])) )
   1.333          FAILPT("hypervisor linear map inconsistent");
   1.334  #endif
   1.335 @@ -2399,7 +2397,7 @@ static int _check_pagetable(struct vcpu 
   1.336  {
   1.337      struct domain *d = v->domain;
   1.338  #if defined (__x86_64__)
   1.339 -    pagetable_t pt = ((v->arch.flags & TF_kernel_mode)? 
   1.340 +    pagetable_t pt = ((v->arch.flags & TF_kernel_mode)?
   1.341                        pagetable_get_pfn(v->arch.guest_table) :
   1.342                        pagetable_get_pfn(v->arch.guest_table_user));
   1.343  #else
   1.344 @@ -2434,7 +2432,7 @@ static int _check_pagetable(struct vcpu 
   1.345          oos_pdes = 1;
   1.346          ASSERT(ptbase_mfn);
   1.347      }
   1.348 - 
   1.349 +
   1.350      errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes);
   1.351  
   1.352      gpl2e = (l2_pgentry_t *) map_domain_page(ptbase_mfn);
   1.353 @@ -2565,7 +2563,6 @@ static unsigned long gva_to_gpa_pae(unsi
   1.354   * The code is for 32-bit VMX gues on 64-bit host.
   1.355   * To sync guest L2.
   1.356   */
   1.357 -
   1.358  static inline void
   1.359  validate_bl2e_change(
   1.360    struct domain *d,
   1.361 @@ -2596,7 +2593,6 @@ validate_bl2e_change(
   1.362              entry_from_pfn(sl1mfn + 1, entry_get_flags(sl2_p[sl2_idx]));
   1.363      }
   1.364      unmap_domain_page(sl2_p);
   1.365 -
   1.366  }
   1.367  
   1.368  /*
   1.369 @@ -2629,9 +2625,8 @@ static inline unsigned long init_bl2(l4_
   1.370      }
   1.371  
   1.372      unmap_domain_page(spl4e);
   1.373 +
   1.374      return smfn;
   1.375 -
   1.376 -
   1.377  }
   1.378  
   1.379  static unsigned long shadow_l4_table(
   1.380 @@ -2664,7 +2659,7 @@ static unsigned long shadow_l4_table(
   1.381           * We could proactively fill in PDEs for pages that are already
   1.382           * shadowed *and* where the guest PDE has _PAGE_ACCESSED set
   1.383           * (restriction required for coherence of the accessed bit). However,
   1.384 -         * we tried it and it didn't help performance. This is simpler. 
   1.385 +         * we tried it and it didn't help performance. This is simpler.
   1.386           */
   1.387          memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));
   1.388  
   1.389 @@ -2757,7 +2752,7 @@ static int get_shadow_mfn(struct domain 
   1.390      }
   1.391  }
   1.392  
   1.393 -static void shadow_map_into_current(struct vcpu *v, 
   1.394 +static void shadow_map_into_current(struct vcpu *v,
   1.395    unsigned long va, unsigned int from, unsigned int to)
   1.396  {
   1.397      pgentry_64_t gle, sle;
   1.398 @@ -2768,7 +2763,7 @@ static void shadow_map_into_current(stru
   1.399          return;
   1.400      }
   1.401  
   1.402 -    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to); 
   1.403 +    __rw_entry(v, va, &gle, GUEST_ENTRY | GET_ENTRY | to);
   1.404      ASSERT(entry_get_flags(gle) & _PAGE_PRESENT);
   1.405      gpfn = entry_get_pfn(gle);
   1.406  
   1.407 @@ -2784,7 +2779,7 @@ static void shadow_map_into_current(stru
   1.408  /*
   1.409   * shadow_set_lxe should be put in shadow.h
   1.410   */
   1.411 -static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e, 
   1.412 +static void shadow_set_l2e_64(unsigned long va, l2_pgentry_t sl2e,
   1.413    int create_l2_shadow, int put_ref_check)
   1.414  {
   1.415      struct vcpu *v = current;
   1.416 @@ -2934,11 +2929,11 @@ static inline int l2e_rw_fault(
   1.417          sl2e = l2e_empty();
   1.418  
   1.419      l1_mfn = ___shadow_status(d, start_gpfn | nx, PGT_fl1_shadow);
   1.420 -    
   1.421 +
   1.422      /* Check the corresponding l2e */
   1.423      if (l1_mfn) {
   1.424          /* Why it is PRESENT?*/
   1.425 -        if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) && 
   1.426 +        if ((l2e_get_flags(sl2e) & _PAGE_PRESENT) &&
   1.427                  l2e_get_pfn(sl2e) == l1_mfn) {
   1.428              ESH_LOG("sl2e PRSENT bit is set: %lx, l1_mfn = %lx\n", l2e_get_pfn(sl2e), l1_mfn);
   1.429          } else {
   1.430 @@ -2985,7 +2980,7 @@ static inline int l2e_rw_fault(
   1.431          sl1e = l1e_from_pfn(mfn, l2e_get_flags(tmp_l2e));
   1.432  
   1.433          if (!rw) {
   1.434 -            if ( shadow_mode_log_dirty(d) || 
   1.435 +            if ( shadow_mode_log_dirty(d) ||
   1.436                !(l2e_get_flags(gl2e) & _PAGE_DIRTY) || mfn_is_page_table(mfn) )
   1.437              {
   1.438                  l1e_remove_flags(sl1e, _PAGE_RW);
   1.439 @@ -3034,7 +3029,7 @@ static inline int l2e_rw_fault(
   1.440   */
   1.441  #if defined( GUEST_PGENTRY_32 )
   1.442  static inline int guest_page_fault(struct vcpu *v,
   1.443 -  unsigned long va, unsigned int error_code, 
   1.444 +  unsigned long va, unsigned int error_code,
   1.445    guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e)
   1.446  {
   1.447      /* The following check for 32-bit guest on 64-bit host */
   1.448 @@ -3076,7 +3071,7 @@ static inline int guest_page_fault(struc
   1.449  }
   1.450  #else
   1.451  static inline int guest_page_fault(struct vcpu *v,
   1.452 -  unsigned long va, unsigned int error_code, 
   1.453 +  unsigned long va, unsigned int error_code,
   1.454    guest_l2_pgentry_t *gpl2e, guest_l1_pgentry_t *gpl1e)
   1.455  {
   1.456      struct domain *d = v->domain;
   1.457 @@ -3144,7 +3139,7 @@ static int shadow_fault_64(unsigned long
   1.458  
   1.459      perfc_incrc(shadow_fault_calls);
   1.460  
   1.461 -    ESH_LOG("<shadow_fault_64> va=%lx,  rip = %lx, error code = %x\n", 
   1.462 +    ESH_LOG("<shadow_fault_64> va=%lx,  rip = %lx, error code = %x\n",
   1.463              va, regs->eip, regs->error_code);
   1.464  
   1.465      /*
   1.466 @@ -3166,12 +3161,12 @@ static int shadow_fault_64(unsigned long
   1.467              v, va, regs->error_code, &gl2e, &gl1e) ) {
   1.468          goto fail;
   1.469      }
   1.470 -    
   1.471 +
   1.472      if ( unlikely(!(guest_l2e_get_flags(gl2e) & _PAGE_PSE)) ) {
   1.473          /*
   1.474           * Handle 4K pages here
   1.475           */
   1.476 -        
   1.477 +
   1.478          /* Write fault? */
   1.479          if ( regs->error_code & 2 ) {
   1.480              if ( !l1pte_write_fault(v, &gl1e, &sl1e, va) ) {
   1.481 @@ -3194,7 +3189,7 @@ static int shadow_fault_64(unsigned long
   1.482           */
   1.483           if ( unlikely(shadow_mode_log_dirty(d)) )
   1.484              __mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gl2e)));
   1.485 - 
   1.486 +
   1.487      } else {
   1.488          /*
   1.489           * Handle 2M pages here
   1.490 @@ -3262,7 +3257,7 @@ static unsigned long gva_to_gpa_64(unsig
   1.491  
   1.492      if (guest_page_fault(v, gva, 0, &gl2e, &gl1e))
   1.493          return 0;
   1.494 -    
   1.495 +
   1.496      if (guest_l2e_get_flags(gl2e) & _PAGE_PSE)
   1.497          gpa = guest_l2e_get_paddr(gl2e) + (gva & ((1 << GUEST_L2_PAGETABLE_SHIFT) - 1));
   1.498      else