ia64/xen-unstable

changeset 15723:7953164cebb6

xen: Clean up some paging files: no tab and trailing spaces.
Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Tue Aug 07 09:07:29 2007 +0100 (2007-08-07)
parents ff2dae3ebb1d
children cd51fa91956b 0f541efbb6d6 123ad31e9c3b
files xen/arch/x86/mm/hap/guest_walk.c xen/arch/x86/mm/hap/hap.c xen/arch/x86/mm/p2m.c xen/arch/x86/mm/paging.c xen/include/asm-x86/domain.h xen/include/asm-x86/hap.h
line diff
     1.1 --- a/xen/arch/x86/mm/hap/guest_walk.c	Tue Aug 07 09:06:38 2007 +0100
     1.2 +++ b/xen/arch/x86/mm/hap/guest_walk.c	Tue Aug 07 09:07:29 2007 +0100
     1.3 @@ -84,7 +84,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
     1.4          mfn = get_mfn_from_gpfn(gpfn);
     1.5          if ( mfn == INVALID_MFN )
     1.6          {
     1.7 -            HAP_PRINTK("bad pfn=0x%lx from gva=0x%lx at lev%d\n", gpfn, gva, 
     1.8 +            HAP_PRINTK("bad pfn=0x%lx from gva=0x%lx at lev%d\n", gpfn, gva,
     1.9                         lev);
    1.10              success = 0;
    1.11              break;
     2.1 --- a/xen/arch/x86/mm/hap/hap.c	Tue Aug 07 09:06:38 2007 +0100
     2.2 +++ b/xen/arch/x86/mm/hap/hap.c	Tue Aug 07 09:07:29 2007 +0100
     2.3 @@ -73,7 +73,7 @@ int hap_disable_log_dirty(struct domain 
     2.4      hap_unlock(d);
     2.5  
     2.6      /* set l1e entries of P2M table with normal mode */
     2.7 -    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);    
     2.8 +    p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);
     2.9      return 0;
    2.10  }
    2.11  
    2.12 @@ -111,7 +111,7 @@ static struct page_info *hap_alloc(struc
    2.13  
    2.14  static void hap_free(struct domain *d, mfn_t mfn)
    2.15  {
    2.16 -    struct page_info *pg = mfn_to_page(mfn); 
    2.17 +    struct page_info *pg = mfn_to_page(mfn);
    2.18  
    2.19      ASSERT(hap_locked_by_me(d));
    2.20  
    2.21 @@ -128,7 +128,7 @@ static struct page_info *hap_alloc_p2m_p
    2.22  
    2.23  #if CONFIG_PAGING_LEVELS == 3
    2.24      /* Under PAE mode, top-level P2M table should be allocated below 4GB space
    2.25 -     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to 
    2.26 +     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
    2.27       * force this requirement, and exchange the guaranteed 32-bit-clean
    2.28       * page for the one we just hap_alloc()ed. */
    2.29      if ( d->arch.paging.hap.p2m_pages == 0
    2.30 @@ -166,9 +166,9 @@ void hap_free_p2m_page(struct domain *d,
    2.31          HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
    2.32                    pg->count_info, pg->u.inuse.type_info);
    2.33      pg->count_info = 0;
    2.34 -    /* Free should not decrement domain's total allocation, since 
    2.35 +    /* Free should not decrement domain's total allocation, since
    2.36       * these pages were allocated without an owner. */
    2.37 -    page_set_owner(pg, NULL); 
    2.38 +    page_set_owner(pg, NULL);
    2.39      free_domheap_page(pg);
    2.40      d->arch.paging.hap.p2m_pages--;
    2.41      ASSERT(d->arch.paging.hap.p2m_pages >= 0);
    2.42 @@ -221,7 +221,7 @@ hap_set_allocation(struct domain *d, uns
    2.43              pg->count_info = 0;
    2.44              free_domheap_page(pg);
    2.45          }
    2.46 -        
    2.47 +
    2.48          /* Check to see if we need to yield and try again */
    2.49          if ( preempted && hypercall_preempt_check() )
    2.50          {
    2.51 @@ -275,7 +275,7 @@ static void hap_install_xen_entries_in_l
    2.52  
    2.53      l2e = hap_map_domain_page(l2hmfn);
    2.54      ASSERT(l2e != NULL);
    2.55 -    
    2.56 +
    2.57      /* Copy the common Xen mappings from the idle domain */
    2.58      memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
    2.59             &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
    2.60 @@ -318,7 +318,7 @@ static void hap_install_xen_entries_in_l
    2.61  
    2.62      l2e = hap_map_domain_page(l2mfn);
    2.63      ASSERT(l2e != NULL);
    2.64 -    
    2.65 +
    2.66      /* Copy the common Xen mappings from the idle domain */
    2.67      memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT],
    2.68             &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
    2.69 @@ -362,7 +362,7 @@ static mfn_t hap_make_monitor_table(stru
    2.70      }
    2.71  #elif CONFIG_PAGING_LEVELS == 3
    2.72      {
    2.73 -        mfn_t m3mfn, m2mfn; 
    2.74 +        mfn_t m3mfn, m2mfn;
    2.75          l3_pgentry_t *l3e;
    2.76          l2_pgentry_t *l2e;
    2.77          int i;
    2.78 @@ -384,8 +384,8 @@ static mfn_t hap_make_monitor_table(stru
    2.79          l2e = hap_map_domain_page(m2mfn);
    2.80          for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
    2.81              l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
    2.82 -                (l3e_get_flags(l3e[i]) & _PAGE_PRESENT) 
    2.83 -                ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR) 
    2.84 +                (l3e_get_flags(l3e[i]) & _PAGE_PRESENT)
    2.85 +                ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR)
    2.86                  : l2e_empty();
    2.87          hap_unmap_domain_page(l2e);
    2.88          hap_unmap_domain_page(l3e);
    2.89 @@ -536,7 +536,7 @@ void hap_teardown(struct domain *d)
    2.90                        d->arch.paging.hap.p2m_pages);
    2.91          ASSERT(d->arch.paging.hap.total_pages == 0);
    2.92      }
    2.93 -    
    2.94 +
    2.95      d->arch.paging.mode &= ~PG_log_dirty;
    2.96  
    2.97      hap_unlock(d);
    2.98 @@ -555,7 +555,7 @@ int hap_domctl(struct domain *d, xen_dom
    2.99          hap_unlock(d);
   2.100          if ( preempted )
   2.101              /* Not finished.  Set up to re-run the call. */
   2.102 -            rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h", 
   2.103 +            rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h",
   2.104                                                 u_domctl);
   2.105          else
   2.106              /* Finished.  Return the new allocation */
   2.107 @@ -578,11 +578,11 @@ void hap_vcpu_init(struct vcpu *v)
   2.108  /************************************************/
   2.109  /*          HAP PAGING MODE FUNCTIONS           */
   2.110  /************************************************/
   2.111 -/* 
   2.112 +/*
   2.113   * HAP guests can handle page faults (in the guest page tables) without
   2.114   * needing any action from Xen, so we should not be intercepting them.
   2.115   */
   2.116 -static int hap_page_fault(struct vcpu *v, unsigned long va, 
   2.117 +static int hap_page_fault(struct vcpu *v, unsigned long va,
   2.118                            struct cpu_user_regs *regs)
   2.119  {
   2.120      HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
   2.121 @@ -591,9 +591,9 @@ static int hap_page_fault(struct vcpu *v
   2.122      return 0;
   2.123  }
   2.124  
   2.125 -/* 
   2.126 +/*
   2.127   * HAP guests can handle invlpg without needing any action from Xen, so
   2.128 - * should not be intercepting it. 
   2.129 + * should not be intercepting it.
   2.130   */
   2.131  static int hap_invlpg(struct vcpu *v, unsigned long va)
   2.132  {
   2.133 @@ -649,7 +649,7 @@ static void hap_update_paging_modes(stru
   2.134  }
   2.135  
   2.136  #if CONFIG_PAGING_LEVELS == 3
   2.137 -static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e) 
   2.138 +static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e)
   2.139  /* Special case, only used for PAE hosts: update the mapping of the p2m
   2.140   * table.  This is trivial in other paging modes (one top-level entry
   2.141   * points to the top-level p2m, no maintenance needed), but PAE makes
   2.142 @@ -660,13 +660,13 @@ static void p2m_install_entry_in_monitor
   2.143      l2_pgentry_t *ml2e;
   2.144      struct vcpu *v;
   2.145      unsigned int index;
   2.146 -    
   2.147 +
   2.148      index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
   2.149      ASSERT(index < MACHPHYS_MBYTES>>1);
   2.150 -    
   2.151 +
   2.152      for_each_vcpu ( d, v )
   2.153      {
   2.154 -        if ( pagetable_get_pfn(v->arch.monitor_table) == 0 ) 
   2.155 +        if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
   2.156              continue;
   2.157  
   2.158          ASSERT(paging_mode_external(v->domain));
   2.159 @@ -689,7 +689,7 @@ static void p2m_install_entry_in_monitor
   2.160  }
   2.161  #endif
   2.162  
   2.163 -static void 
   2.164 +static void
   2.165  hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
   2.166                      mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
   2.167  {
   2.168 @@ -698,12 +698,12 @@ hap_write_p2m_entry(struct vcpu *v, unsi
   2.169      safe_write_pte(p, new);
   2.170  #if CONFIG_PAGING_LEVELS == 3
   2.171      /* install P2M in monitor table for PAE Xen */
   2.172 -    if ( level == 3 ) 
   2.173 +    if ( level == 3 )
   2.174          /* We have written to the p2m l3: need to sync the per-vcpu
   2.175           * copies of it in the monitor tables */
   2.176          p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
   2.177  #endif
   2.178 -    
   2.179 +
   2.180      hap_unlock(v->domain);
   2.181  }
   2.182  
   2.183 @@ -715,7 +715,7 @@ static unsigned long hap_gva_to_gfn_real
   2.184  
   2.185  /* Entry points into this mode of the hap code. */
   2.186  struct paging_mode hap_paging_real_mode = {
   2.187 -    .page_fault             = hap_page_fault, 
   2.188 +    .page_fault             = hap_page_fault,
   2.189      .invlpg                 = hap_invlpg,
   2.190      .gva_to_gfn             = hap_gva_to_gfn_real_mode,
   2.191      .update_cr3             = hap_update_cr3,
   2.192 @@ -725,7 +725,7 @@ struct paging_mode hap_paging_real_mode 
   2.193  };
   2.194  
   2.195  struct paging_mode hap_paging_protected_mode = {
   2.196 -    .page_fault             = hap_page_fault, 
   2.197 +    .page_fault             = hap_page_fault,
   2.198      .invlpg                 = hap_invlpg,
   2.199      .gva_to_gfn             = hap_gva_to_gfn_2level,
   2.200      .update_cr3             = hap_update_cr3,
   2.201 @@ -735,7 +735,7 @@ struct paging_mode hap_paging_protected_
   2.202  };
   2.203  
   2.204  struct paging_mode hap_paging_pae_mode = {
   2.205 -    .page_fault             = hap_page_fault, 
   2.206 +    .page_fault             = hap_page_fault,
   2.207      .invlpg                 = hap_invlpg,
   2.208      .gva_to_gfn             = hap_gva_to_gfn_3level,
   2.209      .update_cr3             = hap_update_cr3,
   2.210 @@ -745,7 +745,7 @@ struct paging_mode hap_paging_pae_mode =
   2.211  };
   2.212  
   2.213  struct paging_mode hap_paging_long_mode = {
   2.214 -    .page_fault             = hap_page_fault, 
   2.215 +    .page_fault             = hap_page_fault,
   2.216      .invlpg                 = hap_invlpg,
   2.217      .gva_to_gfn             = hap_gva_to_gfn_4level,
   2.218      .update_cr3             = hap_update_cr3,
     3.1 --- a/xen/arch/x86/mm/p2m.c	Tue Aug 07 09:06:38 2007 +0100
     3.2 +++ b/xen/arch/x86/mm/p2m.c	Tue Aug 07 09:07:29 2007 +0100
     3.3 @@ -2,12 +2,12 @@
     3.4   * arch/x86/mm/p2m.c
     3.5   *
     3.6   * physical-to-machine mappings for automatically-translated domains.
     3.7 - * 
     3.8 + *
     3.9   * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
    3.10   * Parts of this code are Copyright (c) 2006 by XenSource Inc.
    3.11   * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
    3.12   * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
    3.13 - * 
    3.14 + *
    3.15   * This program is free software; you can redistribute it and/or modify
    3.16   * it under the terms of the GNU General Public License as published by
    3.17   * the Free Software Foundation; either version 2 of the License, or
    3.18 @@ -34,7 +34,7 @@
    3.19  
    3.20  /*
    3.21   * The P2M lock.  This protects all updates to the p2m table.
    3.22 - * Updates are expected to be safe against concurrent reads, 
    3.23 + * Updates are expected to be safe against concurrent reads,
    3.24   * which do *not* require the lock.
    3.25   *
    3.26   * Locking discipline: always acquire this lock before the shadow or HAP one
    3.27 @@ -80,7 +80,7 @@
    3.28  #define P2M_DEBUG(_f, _a...)                                 \
    3.29      debugtrace_printk("p2mdebug: %s(): " _f, __func__, ##_a)
    3.30  #else
    3.31 -#define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0) 
    3.32 +#define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0)
    3.33  #endif
    3.34  
    3.35  
    3.36 @@ -119,8 +119,8 @@ p2m_find_entry(void *table, unsigned lon
    3.37  // Returns 0 on error.
    3.38  //
    3.39  static int
    3.40 -p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table, 
    3.41 -               unsigned long *gfn_remainder, unsigned long gfn, u32 shift, 
    3.42 +p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table,
    3.43 +               unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
    3.44                 u32 max, unsigned long type)
    3.45  {
    3.46      l1_pgentry_t *p2m_entry;
    3.47 @@ -146,7 +146,7 @@ p2m_next_level(struct domain *d, mfn_t *
    3.48  
    3.49          switch ( type ) {
    3.50          case PGT_l3_page_table:
    3.51 -            paging_write_p2m_entry(d, gfn, 
    3.52 +            paging_write_p2m_entry(d, gfn,
    3.53                                     p2m_entry, *table_mfn, new_entry, 4);
    3.54              break;
    3.55          case PGT_l2_page_table:
    3.56 @@ -154,11 +154,11 @@ p2m_next_level(struct domain *d, mfn_t *
    3.57              /* for PAE mode, PDPE only has PCD/PWT/P bits available */
    3.58              new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
    3.59  #endif
    3.60 -            paging_write_p2m_entry(d, gfn, 
    3.61 +            paging_write_p2m_entry(d, gfn,
    3.62                                     p2m_entry, *table_mfn, new_entry, 3);
    3.63              break;
    3.64          case PGT_l1_page_table:
    3.65 -            paging_write_p2m_entry(d, gfn, 
    3.66 +            paging_write_p2m_entry(d, gfn,
    3.67                                     p2m_entry, *table_mfn, new_entry, 2);
    3.68              break;
    3.69          default:
    3.70 @@ -216,7 +216,7 @@ set_p2m_entry(struct domain *d, unsigned
    3.71      ASSERT(p2m_entry);
    3.72  
    3.73      /* Track the highest gfn for which we have ever had a valid mapping */
    3.74 -    if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) ) 
    3.75 +    if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) )
    3.76          d->arch.p2m.max_mapped_pfn = gfn;
    3.77  
    3.78      if ( mfn_valid(mfn) )
    3.79 @@ -229,7 +229,7 @@ set_p2m_entry(struct domain *d, unsigned
    3.80  
    3.81      /* Success */
    3.82      rv = 1;
    3.83 - 
    3.84 +
    3.85   out:
    3.86      unmap_domain_page(table);
    3.87      return rv;
    3.88 @@ -250,7 +250,7 @@ void p2m_init(struct domain *d)
    3.89  // controlled by CONFIG_PAGING_LEVELS).
    3.90  //
    3.91  // The alloc_page and free_page functions will be used to get memory to
    3.92 -// build the p2m, and to release it again at the end of day. 
    3.93 +// build the p2m, and to release it again at the end of day.
    3.94  //
    3.95  // Returns 0 for success or -errno.
    3.96  //
    3.97 @@ -264,7 +264,7 @@ int p2m_alloc_table(struct domain *d,
    3.98      struct page_info *page, *p2m_top;
    3.99      unsigned int page_count = 0;
   3.100      unsigned long gfn;
   3.101 -    
   3.102 +
   3.103      p2m_lock(d);
   3.104  
   3.105      if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
   3.106 @@ -288,7 +288,7 @@ int p2m_alloc_table(struct domain *d,
   3.107      list_add_tail(&p2m_top->list, &d->arch.p2m.pages);
   3.108  
   3.109      p2m_top->count_info = 1;
   3.110 -    p2m_top->u.inuse.type_info = 
   3.111 +    p2m_top->u.inuse.type_info =
   3.112  #if CONFIG_PAGING_LEVELS == 4
   3.113          PGT_l4_page_table
   3.114  #elif CONFIG_PAGING_LEVELS == 3
   3.115 @@ -301,7 +301,7 @@ int p2m_alloc_table(struct domain *d,
   3.116      d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
   3.117  
   3.118      P2M_PRINTK("populating p2m table\n");
   3.119 - 
   3.120 +
   3.121      /* Initialise physmap tables for slot zero. Other code assumes this. */
   3.122      gfn = 0;
   3.123      mfn = _mfn(INVALID_MFN);
   3.124 @@ -365,17 +365,17 @@ gfn_to_mfn_foreign(struct domain *d, uns
   3.125      paddr_t addr = ((paddr_t)gpfn) << PAGE_SHIFT;
   3.126      l2_pgentry_t *l2e;
   3.127      l1_pgentry_t *l1e;
   3.128 -    
   3.129 +
   3.130      ASSERT(paging_mode_translate(d));
   3.131      mfn = pagetable_get_mfn(d->arch.phys_table);
   3.132  
   3.133  
   3.134 -    if ( gpfn > d->arch.p2m.max_mapped_pfn ) 
   3.135 +    if ( gpfn > d->arch.p2m.max_mapped_pfn )
   3.136          /* This pfn is higher than the highest the p2m map currently holds */
   3.137          return _mfn(INVALID_MFN);
   3.138  
   3.139  #if CONFIG_PAGING_LEVELS >= 4
   3.140 -    { 
   3.141 +    {
   3.142          l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
   3.143          l4e += l4_table_offset(addr);
   3.144          if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
   3.145 @@ -398,7 +398,7 @@ gfn_to_mfn_foreign(struct domain *d, uns
   3.146           * the bounds of the p2m. */
   3.147          l3e += (addr >> L3_PAGETABLE_SHIFT);
   3.148  #else
   3.149 -        l3e += l3_table_offset(addr);        
   3.150 +        l3e += l3_table_offset(addr);
   3.151  #endif
   3.152          if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
   3.153          {
   3.154 @@ -443,18 +443,18 @@ static void audit_p2m(struct domain *d)
   3.155      mfn_t p2mfn;
   3.156      unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0;
   3.157      int test_linear;
   3.158 -    
   3.159 +
   3.160      if ( !paging_mode_translate(d) )
   3.161          return;
   3.162  
   3.163      //P2M_PRINTK("p2m audit starts\n");
   3.164  
   3.165 -    test_linear = ( (d == current->domain) 
   3.166 +    test_linear = ( (d == current->domain)
   3.167                      && !pagetable_is_null(current->arch.monitor_table) );
   3.168      if ( test_linear )
   3.169 -        local_flush_tlb(); 
   3.170 +        local_flush_tlb();
   3.171  
   3.172 -    /* Audit part one: walk the domain's page allocation list, checking 
   3.173 +    /* Audit part one: walk the domain's page allocation list, checking
   3.174       * the m2p entries. */
   3.175      for ( entry = d->page_list.next;
   3.176            entry != &d->page_list;
   3.177 @@ -463,11 +463,11 @@ static void audit_p2m(struct domain *d)
   3.178          page = list_entry(entry, struct page_info, list);
   3.179          mfn = mfn_x(page_to_mfn(page));
   3.180  
   3.181 -        // P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn); 
   3.182 +        // P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn);
   3.183  
   3.184          od = page_get_owner(page);
   3.185  
   3.186 -        if ( od != d ) 
   3.187 +        if ( od != d )
   3.188          {
   3.189              P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n",
   3.190                         mfn, od, (od?od->domain_id:-1), d, d->domain_id);
   3.191 @@ -475,19 +475,19 @@ static void audit_p2m(struct domain *d)
   3.192          }
   3.193  
   3.194          gfn = get_gpfn_from_mfn(mfn);
   3.195 -        if ( gfn == INVALID_M2P_ENTRY ) 
   3.196 +        if ( gfn == INVALID_M2P_ENTRY )
   3.197          {
   3.198              orphans_i++;
   3.199              //P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n",
   3.200 -            //               mfn); 
   3.201 +            //               mfn);
   3.202              continue;
   3.203          }
   3.204  
   3.205 -        if ( gfn == 0x55555555 ) 
   3.206 +        if ( gfn == 0x55555555 )
   3.207          {
   3.208              orphans_d++;
   3.209 -            //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n", 
   3.210 -            //               mfn); 
   3.211 +            //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n",
   3.212 +            //               mfn);
   3.213              continue;
   3.214          }
   3.215  
   3.216 @@ -503,7 +503,7 @@ static void audit_p2m(struct domain *d)
   3.217                          : -1u));
   3.218              /* This m2p entry is stale: the domain has another frame in
   3.219               * this physical slot.  No great disaster, but for neatness,
   3.220 -             * blow away the m2p entry. */ 
   3.221 +             * blow away the m2p entry. */
   3.222              set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY, __PAGE_HYPERVISOR|_PAGE_USER);
   3.223          }
   3.224  
   3.225 @@ -517,9 +517,9 @@ static void audit_p2m(struct domain *d)
   3.226              }
   3.227          }
   3.228  
   3.229 -        // P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx, lp2mfn=%#lx\n", 
   3.230 -        //                mfn, gfn, p2mfn, lp2mfn); 
   3.231 -    }   
   3.232 +        // P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx, lp2mfn=%#lx\n",
   3.233 +        //                mfn, gfn, p2mfn, lp2mfn);
   3.234 +    }
   3.235  
   3.236      /* Audit part two: walk the domain's p2m table, checking the entries. */
   3.237      if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
   3.238 @@ -527,7 +527,7 @@ static void audit_p2m(struct domain *d)
   3.239          l2_pgentry_t *l2e;
   3.240          l1_pgentry_t *l1e;
   3.241          int i1, i2;
   3.242 -        
   3.243 +
   3.244  #if CONFIG_PAGING_LEVELS == 4
   3.245          l4_pgentry_t *l4e;
   3.246          l3_pgentry_t *l3e;
   3.247 @@ -553,8 +553,8 @@ static void audit_p2m(struct domain *d)
   3.248              }
   3.249              l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
   3.250  #endif /* now at levels 3 or 4... */
   3.251 -            for ( i3 = 0; 
   3.252 -                  i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); 
   3.253 +            for ( i3 = 0;
   3.254 +                  i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
   3.255                    i3++ )
   3.256              {
   3.257                  if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
   3.258 @@ -572,7 +572,7 @@ static void audit_p2m(struct domain *d)
   3.259                          continue;
   3.260                      }
   3.261                      l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
   3.262 -                    
   3.263 +
   3.264                      for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
   3.265                      {
   3.266                          if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
   3.267 @@ -610,14 +610,14 @@ static void audit_p2m(struct domain *d)
   3.268      }
   3.269  
   3.270      //P2M_PRINTK("p2m audit complete\n");
   3.271 -    //if ( orphans_i | orphans_d | mpbad | pmbad ) 
   3.272 +    //if ( orphans_i | orphans_d | mpbad | pmbad )
   3.273      //    P2M_PRINTK("p2m audit found %lu orphans (%lu inval %lu debug)\n",
   3.274      //                   orphans_i + orphans_d, orphans_i, orphans_d,
   3.275 -    if ( mpbad | pmbad ) 
   3.276 +    if ( mpbad | pmbad )
   3.277          P2M_PRINTK("p2m audit found %lu odd p2m, %lu bad m2p entries\n",
   3.278                     pmbad, mpbad);
   3.279  }
   3.280 -#else 
   3.281 +#else
   3.282  #define audit_p2m(_d) do { (void)(_d); } while(0)
   3.283  #endif /* P2M_AUDIT */
   3.284  
   3.285 @@ -645,7 +645,7 @@ guest_physmap_remove_page(struct domain 
   3.286      audit_p2m(d);
   3.287      p2m_remove_page(d, gfn, mfn);
   3.288      audit_p2m(d);
   3.289 -    p2m_unlock(d);    
   3.290 +    p2m_unlock(d);
   3.291  }
   3.292  
   3.293  void
   3.294 @@ -683,11 +683,11 @@ guest_physmap_add_page(struct domain *d,
   3.295          /* This machine frame is already mapped at another physical address */
   3.296          P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
   3.297                    mfn, ogfn, gfn);
   3.298 -        if ( mfn_valid(omfn = gfn_to_mfn(d, ogfn)) ) 
   3.299 +        if ( mfn_valid(omfn = gfn_to_mfn(d, ogfn)) )
   3.300          {
   3.301 -            P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", 
   3.302 +            P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
   3.303                        ogfn , mfn_x(omfn));
   3.304 -            if ( mfn_x(omfn) == mfn ) 
   3.305 +            if ( mfn_x(omfn) == mfn )
   3.306                  p2m_remove_page(d, ogfn, mfn);
   3.307          }
   3.308      }
   3.309 @@ -720,15 +720,15 @@ void p2m_set_flags_global(struct domain 
   3.310      int i4;
   3.311  #endif /* CONFIG_PAGING_LEVELS == 4 */
   3.312  #endif /* CONFIG_PAGING_LEVELS >= 3 */
   3.313 -    
   3.314 +
   3.315      if ( !paging_mode_translate(d) )
   3.316          return;
   3.317 - 
   3.318 +
   3.319      if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
   3.320          return;
   3.321  
   3.322      p2m_lock(d);
   3.323 -        
   3.324 +
   3.325  #if CONFIG_PAGING_LEVELS == 4
   3.326      l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
   3.327  #elif CONFIG_PAGING_LEVELS == 3
   3.328 @@ -739,52 +739,52 @@ void p2m_set_flags_global(struct domain 
   3.329  
   3.330  #if CONFIG_PAGING_LEVELS >= 3
   3.331  #if CONFIG_PAGING_LEVELS >= 4
   3.332 -    for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ ) 
   3.333 +    for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
   3.334      {
   3.335 -	if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
   3.336 -	{
   3.337 -	    continue;
   3.338 -	}
   3.339 -	l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
   3.340 +        if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
   3.341 +        {
   3.342 +            continue;
   3.343 +        }
   3.344 +        l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
   3.345  #endif /* now at levels 3 or 4... */
   3.346 -	for ( i3 = 0; 
   3.347 -	      i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); 
   3.348 -	      i3++ )
   3.349 -	{
   3.350 -	    if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
   3.351 -	    {
   3.352 -		continue;
   3.353 -	    }
   3.354 -	    l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
   3.355 +        for ( i3 = 0;
   3.356 +              i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
   3.357 +              i3++ )
   3.358 +        {
   3.359 +            if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
   3.360 +            {
   3.361 +                continue;
   3.362 +            }
   3.363 +            l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
   3.364  #endif /* all levels... */
   3.365 -	    for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
   3.366 -	    {
   3.367 -		if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
   3.368 -		{
   3.369 -		    continue;
   3.370 -		}
   3.371 +            for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
   3.372 +            {
   3.373 +                if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
   3.374 +                {
   3.375 +                    continue;
   3.376 +                }
   3.377  
   3.378                  l1mfn = _mfn(l2e_get_pfn(l2e[i2]));
   3.379 -		l1e = map_domain_page(mfn_x(l1mfn));
   3.380 -		
   3.381 -		for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
   3.382 -		{
   3.383 -		    if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
   3.384 -			continue;
   3.385 -		    mfn = l1e_get_pfn(l1e[i1]);
   3.386 -		    gfn = get_gpfn_from_mfn(mfn);
   3.387 -		    /* create a new 1le entry using l1e_flags */
   3.388 -		    l1e_content = l1e_from_pfn(mfn, l1e_flags);
   3.389 -		    paging_write_p2m_entry(d, gfn, &l1e[i1], 
   3.390 +                l1e = map_domain_page(mfn_x(l1mfn));
   3.391 +
   3.392 +                for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
   3.393 +                {
   3.394 +                    if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
   3.395 +                        continue;
   3.396 +                    mfn = l1e_get_pfn(l1e[i1]);
   3.397 +                    gfn = get_gpfn_from_mfn(mfn);
   3.398 +                    /* create a new 1le entry using l1e_flags */
   3.399 +                    l1e_content = l1e_from_pfn(mfn, l1e_flags);
   3.400 +                    paging_write_p2m_entry(d, gfn, &l1e[i1],
   3.401                                             l1mfn, l1e_content, 1);
   3.402 -		}
   3.403 -		unmap_domain_page(l1e);
   3.404 -	    }
   3.405 +                }
   3.406 +                unmap_domain_page(l1e);
   3.407 +            }
   3.408  #if CONFIG_PAGING_LEVELS >= 3
   3.409 -	    unmap_domain_page(l2e);
   3.410 -	}
   3.411 +            unmap_domain_page(l2e);
   3.412 +        }
   3.413  #if CONFIG_PAGING_LEVELS >= 4
   3.414 -	unmap_domain_page(l3e);
   3.415 +        unmap_domain_page(l3e);
   3.416      }
   3.417  #endif
   3.418  #endif
   3.419 @@ -814,7 +814,7 @@ int p2m_set_flags(struct domain *d, padd
   3.420      mfn = gfn_to_mfn(d, gfn);
   3.421      if ( mfn_valid(mfn) )
   3.422          set_p2m_entry(d, gfn, mfn, l1e_flags);
   3.423 -    
   3.424 +
   3.425      p2m_unlock(d);
   3.426  
   3.427      return 1;
     4.1 --- a/xen/arch/x86/mm/paging.c	Tue Aug 07 09:06:38 2007 +0100
     4.2 +++ b/xen/arch/x86/mm/paging.c	Tue Aug 07 09:07:29 2007 +0100
     4.3 @@ -54,10 +54,10 @@ boolean_param("hap", opt_hap_enabled);
     4.4  #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
     4.5  
     4.6  /* The log-dirty lock.  This protects the log-dirty bitmap from
     4.7 - * concurrent accesses (and teardowns, etc). 
     4.8 - * 
     4.9 + * concurrent accesses (and teardowns, etc).
    4.10 + *
    4.11   * Locking discipline: always acquire shadow or HAP lock before this one.
    4.12 - * 
    4.13 + *
    4.14   * Because mark_dirty is called from a lot of places, the log-dirty lock
    4.15   * may be acquired with the shadow or HAP locks already held.  When the
    4.16   * log-dirty code makes callbacks into HAP or shadow code to reset
    4.17 @@ -105,7 +105,7 @@ int paging_alloc_log_dirty_bitmap(struct
    4.18  
    4.19      d->arch.paging.log_dirty.bitmap_size =
    4.20          (domain_get_maximum_gpfn(d) + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
    4.21 -    d->arch.paging.log_dirty.bitmap = 
    4.22 +    d->arch.paging.log_dirty.bitmap =
    4.23          xmalloc_array(unsigned long,
    4.24                        d->arch.paging.log_dirty.bitmap_size / BITS_PER_LONG);
    4.25      if ( d->arch.paging.log_dirty.bitmap == NULL )
    4.26 @@ -152,8 +152,8 @@ int paging_log_dirty_enable(struct domai
    4.27  
    4.28      log_dirty_unlock(d);
    4.29  
    4.30 -    /* Safe because the domain is paused. */    
    4.31 -    ret = d->arch.paging.log_dirty.enable_log_dirty(d);    
    4.32 +    /* Safe because the domain is paused. */
    4.33 +    ret = d->arch.paging.log_dirty.enable_log_dirty(d);
    4.34  
    4.35      /* Possibility of leaving the bitmap allocated here but it'll be
    4.36       * tidied on domain teardown. */
    4.37 @@ -202,7 +202,7 @@ void paging_mark_dirty(struct domain *d,
    4.38      pfn = get_gpfn_from_mfn(mfn_x(gmfn));
    4.39  
    4.40      /*
    4.41 -     * Values with the MSB set denote MFNs that aren't really part of the 
    4.42 +     * Values with the MSB set denote MFNs that aren't really part of the
    4.43       * domain's pseudo-physical memory map (e.g., the shared info frame).
    4.44       * Nothing to do here...
    4.45       */
    4.46 @@ -212,11 +212,11 @@ void paging_mark_dirty(struct domain *d,
    4.47          return;
    4.48      }
    4.49  
    4.50 -    if ( likely(pfn < d->arch.paging.log_dirty.bitmap_size) ) 
    4.51 -    { 
    4.52 +    if ( likely(pfn < d->arch.paging.log_dirty.bitmap_size) )
    4.53 +    {
    4.54          if ( !__test_and_set_bit(pfn, d->arch.paging.log_dirty.bitmap) )
    4.55          {
    4.56 -            PAGING_DEBUG(LOGDIRTY, 
    4.57 +            PAGING_DEBUG(LOGDIRTY,
    4.58                           "marked mfn %" PRI_mfn " (pfn=%lx), dom %d\n",
    4.59                           mfn_x(gmfn), pfn, d->domain_id);
    4.60              d->arch.paging.log_dirty.dirty_count++;
    4.61 @@ -227,21 +227,21 @@ void paging_mark_dirty(struct domain *d,
    4.62          PAGING_PRINTK("mark_dirty OOR! "
    4.63                        "mfn=%" PRI_mfn " pfn=%lx max=%x (dom %d)\n"
    4.64                        "owner=%d c=%08x t=%" PRtype_info "\n",
    4.65 -                      mfn_x(gmfn), 
    4.66 -                      pfn, 
    4.67 +                      mfn_x(gmfn),
    4.68 +                      pfn,
    4.69                        d->arch.paging.log_dirty.bitmap_size,
    4.70                        d->domain_id,
    4.71                        (page_get_owner(mfn_to_page(gmfn))
    4.72                         ? page_get_owner(mfn_to_page(gmfn))->domain_id
    4.73                         : -1),
    4.74 -                      mfn_to_page(gmfn)->count_info, 
    4.75 +                      mfn_to_page(gmfn)->count_info,
    4.76                        mfn_to_page(gmfn)->u.inuse.type_info);
    4.77      }
    4.78 -    
    4.79 +
    4.80      log_dirty_unlock(d);
    4.81  }
    4.82  
    4.83 -/* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN, 
    4.84 +/* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN,
    4.85   * clear the bitmap and stats as well. */
    4.86  int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc)
    4.87  {
    4.88 @@ -252,15 +252,15 @@ int paging_log_dirty_op(struct domain *d
    4.89  
    4.90      clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
    4.91  
    4.92 -    PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", 
    4.93 +    PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n",
    4.94                   (clean) ? "clean" : "peek",
    4.95                   d->domain_id,
    4.96 -                 d->arch.paging.log_dirty.fault_count, 
    4.97 +                 d->arch.paging.log_dirty.fault_count,
    4.98                   d->arch.paging.log_dirty.dirty_count);
    4.99  
   4.100      sc->stats.fault_count = d->arch.paging.log_dirty.fault_count;
   4.101      sc->stats.dirty_count = d->arch.paging.log_dirty.dirty_count;
   4.102 -    
   4.103 +
   4.104      if ( clean )
   4.105      {
   4.106          d->arch.paging.log_dirty.fault_count = 0;
   4.107 @@ -276,7 +276,7 @@ int paging_log_dirty_op(struct domain *d
   4.108          rv = -EINVAL; /* perhaps should be ENOMEM? */
   4.109          goto out;
   4.110      }
   4.111 - 
   4.112 +
   4.113      if ( sc->pages > d->arch.paging.log_dirty.bitmap_size )
   4.114          sc->pages = d->arch.paging.log_dirty.bitmap_size;
   4.115  
   4.116 @@ -322,11 +322,11 @@ int paging_log_dirty_op(struct domain *d
   4.117  
   4.118  
   4.119  /* Note that this function takes three function pointers. Callers must supply
   4.120 - * these functions for log dirty code to call. This function usually is 
   4.121 - * invoked when paging is enabled. Check shadow_enable() and hap_enable() for 
   4.122 + * these functions for log dirty code to call. This function usually is
   4.123 + * invoked when paging is enabled. Check shadow_enable() and hap_enable() for
   4.124   * reference.
   4.125   *
   4.126 - * These function pointers must not be followed with the log-dirty lock held. 
   4.127 + * These function pointers must not be followed with the log-dirty lock held.
   4.128   */
   4.129  void paging_log_dirty_init(struct domain *d,
   4.130                             int    (*enable_log_dirty)(struct domain *d),
   4.131 @@ -335,7 +335,7 @@ void paging_log_dirty_init(struct domain
   4.132  {
   4.133      /* We initialize log dirty lock first */
   4.134      log_dirty_lock_init(d);
   4.135 -    
   4.136 +
   4.137      d->arch.paging.log_dirty.enable_log_dirty = enable_log_dirty;
   4.138      d->arch.paging.log_dirty.disable_log_dirty = disable_log_dirty;
   4.139      d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
   4.140 @@ -387,7 +387,7 @@ int paging_domctl(struct domain *d, xen_
   4.141                   d->domain_id);
   4.142          return -EINVAL;
   4.143      }
   4.144 -    
   4.145 +
   4.146      if ( unlikely(d->is_dying) )
   4.147      {
   4.148          gdprintk(XENLOG_INFO, "Ignoring paging op on dying domain %u\n",
   4.149 @@ -401,38 +401,38 @@ int paging_domctl(struct domain *d, xen_
   4.150                       d->domain_id);
   4.151          return -EINVAL;
   4.152      }
   4.153 -    
   4.154 +
   4.155      /* Code to handle log-dirty. Note that some log dirty operations
   4.156 -     * piggy-back on shadow operations. For example, when 
   4.157 +     * piggy-back on shadow operations. For example, when
   4.158       * XEN_DOMCTL_SHADOW_OP_OFF is called, it first checks whether log dirty
   4.159 -     * mode is enabled. If does, we disables log dirty and continues with 
   4.160 -     * shadow code. For this reason, we need to further dispatch domctl 
   4.161 +     * mode is enabled. If does, we disables log dirty and continues with
   4.162 +     * shadow code. For this reason, we need to further dispatch domctl
   4.163       * to next-level paging code (shadow or hap).
   4.164       */
   4.165      switch ( sc->op )
   4.166      {
   4.167      case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
   4.168 -        return paging_log_dirty_enable(d);	
   4.169 -	
   4.170 -    case XEN_DOMCTL_SHADOW_OP_ENABLE:	
   4.171 +        return paging_log_dirty_enable(d);
   4.172 +
   4.173 +    case XEN_DOMCTL_SHADOW_OP_ENABLE:
   4.174          if ( sc->mode & XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY )
   4.175              return paging_log_dirty_enable(d);
   4.176  
   4.177      case XEN_DOMCTL_SHADOW_OP_OFF:
   4.178          if ( paging_mode_log_dirty(d) )
   4.179 -            if ( (rc = paging_log_dirty_disable(d)) != 0 ) 
   4.180 +            if ( (rc = paging_log_dirty_disable(d)) != 0 )
   4.181                  return rc;
   4.182  
   4.183      case XEN_DOMCTL_SHADOW_OP_CLEAN:
   4.184      case XEN_DOMCTL_SHADOW_OP_PEEK:
   4.185 -	return paging_log_dirty_op(d, sc);
   4.186 +        return paging_log_dirty_op(d, sc);
   4.187      }
   4.188 -	
   4.189 +
   4.190      /* Here, dispatch domctl to the appropriate paging code */
   4.191      if ( opt_hap_enabled && is_hvm_domain(d) )
   4.192 -	return hap_domctl(d, sc, u_domctl);
   4.193 +        return hap_domctl(d, sc, u_domctl);
   4.194      else
   4.195 -	return shadow_domctl(d, sc, u_domctl);
   4.196 +        return shadow_domctl(d, sc, u_domctl);
   4.197  }
   4.198  
   4.199  /* Call when destroying a domain */
   4.200 @@ -492,7 +492,7 @@ void paging_dump_vcpu_info(struct vcpu *
   4.201  {
   4.202      if ( paging_mode_enabled(v->domain) )
   4.203      {
   4.204 -        printk("    paging assistance: ");        
   4.205 +        printk("    paging assistance: ");
   4.206          if ( paging_mode_shadow(v->domain) )
   4.207          {
   4.208              if ( v->arch.paging.mode )
   4.209 @@ -504,7 +504,7 @@ void paging_dump_vcpu_info(struct vcpu *
   4.210                  printk("not shadowed\n");
   4.211          }
   4.212          else if ( paging_mode_hap(v->domain) && v->arch.paging.mode )
   4.213 -            printk("hap, %u levels\n", 
   4.214 +            printk("hap, %u levels\n",
   4.215                     v->arch.paging.mode->guest_levels);
   4.216          else
   4.217              printk("none\n");
     5.1 --- a/xen/include/asm-x86/domain.h	Tue Aug 07 09:06:38 2007 +0100
     5.2 +++ b/xen/include/asm-x86/domain.h	Tue Aug 07 09:07:29 2007 +0100
     5.3 @@ -77,10 +77,10 @@ struct shadow_domain {
     5.4      int               locker; /* processor which holds the lock */
     5.5      const char       *locker_function; /* Func that took it */
     5.6      unsigned int      opt_flags;    /* runtime tunable optimizations on/off */
     5.7 -    struct list_head  pinned_shadows; 
     5.8 +    struct list_head  pinned_shadows;
     5.9  
    5.10      /* Memory allocation */
    5.11 -    struct list_head  freelists[SHADOW_MAX_ORDER + 1]; 
    5.12 +    struct list_head  freelists[SHADOW_MAX_ORDER + 1];
    5.13      struct list_head  p2m_freelist;
    5.14      unsigned int      total_pages;  /* number of pages allocated */
    5.15      unsigned int      free_pages;   /* number of pages on freelists */
    5.16 @@ -116,7 +116,7 @@ struct hap_domain {
    5.17      spinlock_t        lock;
    5.18      int               locker;
    5.19      const char       *locker_function;
    5.20 -    
    5.21 +
    5.22      struct list_head  freelist;
    5.23      unsigned int      total_pages;  /* number of pages allocated */
    5.24      unsigned int      free_pages;   /* number of pages on freelists */
    5.25 @@ -131,13 +131,13 @@ struct p2m_domain {
    5.26      spinlock_t         lock;
    5.27      int                locker;   /* processor which holds the lock */
    5.28      const char        *locker_function; /* Func that took it */
    5.29 -    
    5.30 +
    5.31      /* Pages used to construct the p2m */
    5.32      struct list_head   pages;
    5.33  
    5.34      /* Functions to call to get or free pages for the p2m */
    5.35      struct page_info * (*alloc_page  )(struct domain *d);
    5.36 -    void               (*free_page   )(struct domain *d, 
    5.37 +    void               (*free_page   )(struct domain *d,
    5.38                                         struct page_info *pg);
    5.39  
    5.40      /* Highest guest frame that's ever been mapped in the p2m */
    5.41 @@ -177,6 +177,7 @@ struct paging_domain {
    5.42      /* log dirty support */
    5.43      struct log_dirty_domain log_dirty;
    5.44  };
    5.45 +
    5.46  struct paging_vcpu {
    5.47      /* Pointers to mode-specific entry points. */
    5.48      struct paging_mode *mode;
    5.49 @@ -184,9 +185,9 @@ struct paging_vcpu {
    5.50      unsigned int translate_enabled:1;
    5.51      /* HVM guest: last emulate was to a pagetable */
    5.52      unsigned int last_write_was_pt:1;
    5.53 -    /* Translated guest: virtual TLB */    
    5.54 +    /* Translated guest: virtual TLB */
    5.55      struct shadow_vtlb *vtlb;
    5.56 -    spinlock_t          vtlb_lock; 
    5.57 +    spinlock_t          vtlb_lock;
    5.58  
    5.59      /* paging support extension */
    5.60      struct shadow_vcpu shadow;
    5.61 @@ -303,7 +304,7 @@ struct arch_vcpu
    5.62       * shadow refcounts are in use */
    5.63      pagetable_t shadow_table[4];        /* (MFN) shadow(s) of guest */
    5.64      pagetable_t monitor_table;          /* (MFN) hypervisor PT (for HVM) */
    5.65 -    unsigned long cr3;           	    /* (MA) value to install in HW CR3 */
    5.66 +    unsigned long cr3;                  /* (MA) value to install in HW CR3 */
    5.67  
    5.68      /* Current LDT details. */
    5.69      unsigned long shadow_ldt_mapcnt;
     6.1 --- a/xen/include/asm-x86/hap.h	Tue Aug 07 09:06:38 2007 +0100
     6.2 +++ b/xen/include/asm-x86/hap.h	Tue Aug 07 09:07:29 2007 +0100
     6.3 @@ -3,7 +3,7 @@
     6.4   *
     6.5   * hardware-assisted paging
     6.6   * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
     6.7 - * 
     6.8 + *
     6.9   * Parts of this code are Copyright (c) 2006 by XenSource Inc.
    6.10   * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
    6.11   * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.