ia64/xen-unstable

changeset 10824:b42b80403ddd

map_p2m_entry only needs gpfn and gmfn as input parameters.
Current map_p2m_entry has a redundant input parameter 'va', this patch
removes it.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Thu Jul 27 13:17:58 2006 +0100 (2006-07-27)
parents 5fa2cd68d059
children d60da1c0664d
files xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c
line diff
     1.1 --- a/xen/arch/x86/shadow32.c	Thu Jul 27 13:17:17 2006 +0100
     1.2 +++ b/xen/arch/x86/shadow32.c	Thu Jul 27 13:17:58 2006 +0100
     1.3 @@ -835,12 +835,12 @@ void free_monitor_pagetable(struct vcpu 
     1.4  }
     1.5  
     1.6  static int
     1.7 -map_p2m_entry(l1_pgentry_t *l1tab, unsigned long va,
     1.8 -              unsigned long gpa, unsigned long mfn)
     1.9 +map_p2m_entry(l1_pgentry_t *l1tab, unsigned long gpfn, unsigned long mfn)
    1.10  {
    1.11      unsigned long *l0tab = NULL;
    1.12      l1_pgentry_t l1e = { 0 };
    1.13      struct page_info *page;
    1.14 +    unsigned long va = RO_MPT_VIRT_START + (gpfn * sizeof(mfn));
    1.15  
    1.16      l1e = l1tab[l1_table_offset(va)];
    1.17      if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
    1.18 @@ -858,7 +858,7 @@ map_p2m_entry(l1_pgentry_t *l1tab, unsig
    1.19      else
    1.20          l0tab = map_domain_page(l1e_get_pfn(l1e));
    1.21  
    1.22 -    l0tab[gpa & ((PAGE_SIZE / sizeof(mfn)) - 1)] = mfn;
    1.23 +    l0tab[gpfn & ((PAGE_SIZE / sizeof(mfn)) - 1)] = mfn;
    1.24  
    1.25      unmap_domain_page(l0tab);
    1.26  
    1.27 @@ -877,15 +877,9 @@ set_p2m_entry(struct domain *d, unsigned
    1.28      unsigned long va = pfn << PAGE_SHIFT;
    1.29  
    1.30      if ( shadow_mode_external(d) )
    1.31 -    {
    1.32          tabpfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table);
    1.33 -        va = RO_MPT_VIRT_START + (pfn * sizeof (unsigned long));
    1.34 -    }
    1.35      else
    1.36 -    {
    1.37          tabpfn = pagetable_get_pfn(d->arch.phys_table);
    1.38 -        va = pfn << PAGE_SHIFT;
    1.39 -    }
    1.40  
    1.41      ASSERT(tabpfn != 0);
    1.42      ASSERT(shadow_lock_is_acquired(d));
    1.43 @@ -902,12 +896,12 @@ set_p2m_entry(struct domain *d, unsigned
    1.44          l1_pgentry_t *l1tab = NULL;
    1.45          l2_pgentry_t l2e;
    1.46  
    1.47 -        l2e = l2[l2_table_offset(va)];
    1.48 +        l2e = l2[l2_table_offset(RO_MPT_VIRT_START)];
    1.49  
    1.50          ASSERT( l2e_get_flags(l2e) & _PAGE_PRESENT );
    1.51  
    1.52          l1tab = map_domain_page(l2e_get_pfn(l2e));
    1.53 -        if ( !(error = map_p2m_entry(l1tab, va, pfn, mfn)) )
    1.54 +        if ( !(error = map_p2m_entry(l1tab, pfn, mfn)) )
    1.55              domain_crash(d);
    1.56  
    1.57          unmap_domain_page(l1tab);
    1.58 @@ -952,7 +946,6 @@ static int
    1.59  alloc_p2m_table(struct domain *d)
    1.60  {
    1.61      struct list_head *list_ent;
    1.62 -    unsigned long va = RO_MPT_VIRT_START;   /* phys_to_machine_mapping */
    1.63  
    1.64      l2_pgentry_t *l2tab = NULL;
    1.65      l1_pgentry_t *l1tab = NULL;
    1.66 @@ -965,14 +958,14 @@ alloc_p2m_table(struct domain *d)
    1.67      {
    1.68          l2tab = map_domain_page(
    1.69              pagetable_get_pfn(d->vcpu[0]->arch.monitor_table));
    1.70 -        l2e = l2tab[l2_table_offset(va)];
    1.71 +        l2e = l2tab[l2_table_offset(RO_MPT_VIRT_START)];
    1.72          if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
    1.73          {
    1.74              page = alloc_domheap_page(NULL);
    1.75  
    1.76              l1tab = map_domain_page(page_to_mfn(page));
    1.77              memset(l1tab, 0, PAGE_SIZE);
    1.78 -            l2e = l2tab[l2_table_offset(va)] =
    1.79 +            l2e = l2tab[l2_table_offset(RO_MPT_VIRT_START)] =
    1.80                  l2e_from_page(page, __PAGE_HYPERVISOR);
    1.81          }
    1.82          else
    1.83 @@ -1002,14 +995,13 @@ alloc_p2m_table(struct domain *d)
    1.84          page = list_entry(list_ent, struct page_info, list);
    1.85          mfn = page_to_mfn(page);
    1.86  
    1.87 -        if ( !(error = map_p2m_entry(l1tab, va, gpfn, mfn)) )
    1.88 +        if ( !(error = map_p2m_entry(l1tab, gpfn, mfn)) )
    1.89          {
    1.90              domain_crash(d);
    1.91              break;
    1.92          }
    1.93  
    1.94          list_ent = frame_table[mfn].list.next;
    1.95 -        va += sizeof(mfn);
    1.96      }
    1.97  
    1.98      unmap_domain_page(l1tab);
     2.1 --- a/xen/arch/x86/shadow_public.c	Thu Jul 27 13:17:17 2006 +0100
     2.2 +++ b/xen/arch/x86/shadow_public.c	Thu Jul 27 13:17:58 2006 +0100
     2.3 @@ -1471,8 +1471,7 @@ int _shadow_mode_refcounts(struct domain
     2.4  }
     2.5  
     2.6  static int
     2.7 -map_p2m_entry(pgentry_64_t *top_tab, unsigned long va,
     2.8 -              unsigned long gpfn, unsigned long mfn)
     2.9 +map_p2m_entry(pgentry_64_t *top_tab, unsigned long gpfn, unsigned long mfn)
    2.10  {
    2.11  #if CONFIG_PAGING_LEVELS >= 4
    2.12      pgentry_64_t l4e = { 0 };
    2.13 @@ -1487,6 +1486,7 @@ map_p2m_entry(pgentry_64_t *top_tab, uns
    2.14      l2_pgentry_t l2e = { 0 };
    2.15      l1_pgentry_t l1e = { 0 };
    2.16      struct page_info *page;
    2.17 +    unsigned long va = RO_MPT_VIRT_START + (gpfn * sizeof(mfn));
    2.18  
    2.19  #if CONFIG_PAGING_LEVELS >= 4
    2.20      l4e = top_tab[l4_table_offset(va)];
    2.21 @@ -1568,7 +1568,7 @@ map_p2m_entry(pgentry_64_t *top_tab, uns
    2.22  
    2.23      unmap_domain_page(l1tab);
    2.24  
    2.25 -    l0tab[gpfn & ((PAGE_SIZE / sizeof (mfn)) - 1) ] = mfn;
    2.26 +    l0tab[gpfn & ((PAGE_SIZE / sizeof(mfn)) - 1)] = mfn;
    2.27  
    2.28      unmap_domain_page(l0tab);
    2.29  
    2.30 @@ -1584,7 +1584,6 @@ set_p2m_entry(struct domain *d, unsigned
    2.31                struct domain_mmap_cache *l1cache)
    2.32  {
    2.33      unsigned long tabmfn = pagetable_get_pfn(d->vcpu[0]->arch.monitor_table);
    2.34 -    unsigned long va = RO_MPT_VIRT_START + (gpfn * sizeof(unsigned long));
    2.35      pgentry_64_t *top_tab;
    2.36      int error;
    2.37  
    2.38 @@ -1593,7 +1592,7 @@ set_p2m_entry(struct domain *d, unsigned
    2.39  
    2.40      top_tab = map_domain_page_with_cache(tabmfn, l2cache);
    2.41  
    2.42 -    if ( !(error = map_p2m_entry(top_tab, va, gpfn, mfn)) )
    2.43 +    if ( !(error = map_p2m_entry(top_tab, gpfn, mfn)) )
    2.44          domain_crash(d);
    2.45  
    2.46      unmap_domain_page_with_cache(top_tab, l2cache);
    2.47 @@ -1605,10 +1604,9 @@ static int
    2.48  alloc_p2m_table(struct domain *d)
    2.49  {
    2.50      struct list_head *list_ent;
    2.51 -    unsigned long va = RO_MPT_VIRT_START; /*  phys_to_machine_mapping */
    2.52      pgentry_64_t *top_tab = NULL;
    2.53 -    unsigned long mfn;
    2.54 -    int gpfn, error = 0;
    2.55 +    unsigned long gpfn, mfn;
    2.56 +    int error = 0;
    2.57  
    2.58      ASSERT( pagetable_get_pfn(d->vcpu[0]->arch.monitor_table) );
    2.59  
    2.60 @@ -1624,14 +1622,13 @@ alloc_p2m_table(struct domain *d)
    2.61          page = list_entry(list_ent, struct page_info, list);
    2.62          mfn = page_to_mfn(page);
    2.63  
    2.64 -        if ( !(error = map_p2m_entry(top_tab, va, gpfn, mfn)) )
    2.65 +        if ( !(error = map_p2m_entry(top_tab, gpfn, mfn)) )
    2.66          {
    2.67              domain_crash(d);
    2.68              break;
    2.69          }
    2.70  
    2.71          list_ent = frame_table[mfn].list.next;
    2.72 -        va += sizeof(mfn);
    2.73      }
    2.74  
    2.75      unmap_domain_page(top_tab);