ia64/xen-unstable

changeset 11235:36220033c67f

[XEN] Fix maddr_from_mapped_domain_page().
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Aug 21 11:39:27 2006 +0100 (2006-08-21)
parents 069ac1bb7866
children 4a722773e38d
files xen/arch/x86/shadow2.c xen/arch/x86/x86_32/domain_page.c xen/include/asm-x86/domain.h xen/include/xen/domain_page.h
line diff
     1.1 --- a/xen/arch/x86/shadow2.c	Mon Aug 21 10:28:02 2006 +0100
     1.2 +++ b/xen/arch/x86/shadow2.c	Mon Aug 21 11:39:27 2006 +0100
     1.3 @@ -2293,7 +2293,7 @@ static void sh2_destroy_l3_subshadow(str
     1.4      for ( i = 0; i < GUEST_L3_PAGETABLE_ENTRIES; i++ ) 
     1.5          if ( shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT ) 
     1.6              sh2_put_ref(v, shadow_l3e_get_mfn(sl3e[i]),
     1.7 -                        mapped_domain_page_to_maddr(sl3e));
     1.8 +                        maddr_from_mapped_domain_page(sl3e));
     1.9  }
    1.10  #endif
    1.11  
     2.1 --- a/xen/arch/x86/x86_32/domain_page.c	Mon Aug 21 10:28:02 2006 +0100
     2.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Mon Aug 21 11:39:27 2006 +0100
     2.3 @@ -41,7 +41,7 @@ static inline struct vcpu *mapcache_curr
     2.4      return v;
     2.5  }
     2.6  
     2.7 -void *map_domain_page(unsigned long pfn)
     2.8 +void *map_domain_page(unsigned long mfn)
     2.9  {
    2.10      unsigned long va;
    2.11      unsigned int idx, i, vcpu;
    2.12 @@ -58,13 +58,14 @@ void *map_domain_page(unsigned long pfn)
    2.13      vcpu  = v->vcpu_id;
    2.14      cache = &v->domain->arch.mapcache;
    2.15  
    2.16 -    hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(pfn)];
    2.17 -    if ( hashent->pfn == pfn && (idx = hashent->idx) != MAPHASHENT_NOTINUSE )
    2.18 +    hashent = &cache->vcpu_maphash[vcpu].hash[MAPHASH_HASHFN(mfn)];
    2.19 +    if ( hashent->mfn == mfn )
    2.20      {
    2.21 +        idx = hashent->idx;
    2.22          hashent->refcnt++;
    2.23          ASSERT(idx < MAPCACHE_ENTRIES);
    2.24          ASSERT(hashent->refcnt != 0);
    2.25 -        ASSERT(l1e_get_pfn(cache->l1tab[idx]) == pfn);
    2.26 +        ASSERT(l1e_get_pfn(cache->l1tab[idx]) == mfn);
    2.27          goto out;
    2.28      }
    2.29  
    2.30 @@ -106,7 +107,7 @@ void *map_domain_page(unsigned long pfn)
    2.31  
    2.32      spin_unlock(&cache->lock);
    2.33  
    2.34 -    cache->l1tab[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
    2.35 +    cache->l1tab[idx] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
    2.36  
    2.37   out:
    2.38      va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT);
    2.39 @@ -118,7 +119,7 @@ void unmap_domain_page(void *va)
    2.40      unsigned int idx;
    2.41      struct vcpu *v;
    2.42      struct mapcache *cache;
    2.43 -    unsigned long pfn;
    2.44 +    unsigned long mfn;
    2.45      struct vcpu_maphash_entry *hashent;
    2.46  
    2.47      ASSERT(!in_irq());
    2.48 @@ -131,12 +132,12 @@ void unmap_domain_page(void *va)
    2.49      cache = &v->domain->arch.mapcache;
    2.50  
    2.51      idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
    2.52 -    pfn = l1e_get_pfn(cache->l1tab[idx]);
    2.53 -    hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(pfn)];
    2.54 +    mfn = l1e_get_pfn(cache->l1tab[idx]);
    2.55 +    hashent = &cache->vcpu_maphash[v->vcpu_id].hash[MAPHASH_HASHFN(mfn)];
    2.56  
    2.57      if ( hashent->idx == idx )
    2.58      {
    2.59 -        ASSERT(hashent->pfn == pfn);
    2.60 +        ASSERT(hashent->mfn == mfn);
    2.61          ASSERT(hashent->refcnt != 0);
    2.62          hashent->refcnt--;
    2.63      }
    2.64 @@ -145,14 +146,14 @@ void unmap_domain_page(void *va)
    2.65          if ( hashent->idx != MAPHASHENT_NOTINUSE )
    2.66          {
    2.67              /* /First/, zap the PTE. */
    2.68 -            ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->pfn);
    2.69 +            ASSERT(l1e_get_pfn(cache->l1tab[hashent->idx]) == hashent->mfn);
    2.70              cache->l1tab[hashent->idx] = l1e_empty();
    2.71              /* /Second/, mark as garbage. */
    2.72              set_bit(hashent->idx, cache->garbage);
    2.73          }
    2.74  
    2.75          /* Add newly-freed mapping to the maphash. */
    2.76 -        hashent->pfn = pfn;
    2.77 +        hashent->mfn = mfn;
    2.78          hashent->idx = idx;
    2.79      }
    2.80      else
    2.81 @@ -167,6 +168,7 @@ void unmap_domain_page(void *va)
    2.82  void mapcache_init(struct domain *d)
    2.83  {
    2.84      unsigned int i, j;
    2.85 +    struct vcpu_maphash_entry *hashent;
    2.86  
    2.87      d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt +
    2.88          (GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
    2.89 @@ -174,33 +176,14 @@ void mapcache_init(struct domain *d)
    2.90  
    2.91      /* Mark all maphash entries as not in use. */
    2.92      for ( i = 0; i < MAX_VIRT_CPUS; i++ )
    2.93 +    {
    2.94          for ( j = 0; j < MAPHASH_ENTRIES; j++ )
    2.95 -            d->arch.mapcache.vcpu_maphash[i].hash[j].idx =
    2.96 -                MAPHASHENT_NOTINUSE;
    2.97 -}
    2.98 -
    2.99 -paddr_t mapped_domain_page_to_maddr(void *va) 
   2.100 -/* Convert a pointer in a mapped domain page to a machine address. 
   2.101 - * Takes any pointer that's valid for use in unmap_domain_page() */
   2.102 -{
   2.103 -    unsigned int idx;
   2.104 -    struct vcpu *v;
   2.105 -    struct mapcache *cache;
   2.106 -    unsigned long pfn;
   2.107 -
   2.108 -    ASSERT(!in_irq());
   2.109 -
   2.110 -    ASSERT((void *)MAPCACHE_VIRT_START <= va);
   2.111 -    ASSERT(va < (void *)MAPCACHE_VIRT_END);
   2.112 -
   2.113 -    v = mapcache_current_vcpu();
   2.114 -
   2.115 -    cache = &v->domain->arch.mapcache;
   2.116 -
   2.117 -    idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
   2.118 -    pfn = l1e_get_pfn(cache->l1tab[idx]);
   2.119 -    return ((paddr_t) pfn << PAGE_SHIFT 
   2.120 -            | ((unsigned long) va & ~PAGE_MASK));
   2.121 +        {
   2.122 +            hashent = &d->arch.mapcache.vcpu_maphash[i].hash[j];
   2.123 +            hashent->mfn = ~0UL; /* never valid to map */
   2.124 +            hashent->idx = MAPHASHENT_NOTINUSE;
   2.125 +        }
   2.126 +    }
   2.127  }
   2.128  
   2.129  #define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
   2.130 @@ -209,7 +192,7 @@ static unsigned long garbage[BITS_TO_LON
   2.131  static unsigned int inuse_cursor;
   2.132  static DEFINE_SPINLOCK(globalmap_lock);
   2.133  
   2.134 -void *map_domain_page_global(unsigned long pfn)
   2.135 +void *map_domain_page_global(unsigned long mfn)
   2.136  {
   2.137      l2_pgentry_t *pl2e;
   2.138      l1_pgentry_t *pl1e;
   2.139 @@ -246,7 +229,7 @@ void *map_domain_page_global(unsigned lo
   2.140  
   2.141      pl2e = virt_to_xen_l2e(va);
   2.142      pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
   2.143 -    *pl1e = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
   2.144 +    *pl1e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
   2.145  
   2.146      return (void *)va;
   2.147  }
   2.148 @@ -258,7 +241,7 @@ void unmap_domain_page_global(void *va)
   2.149      l1_pgentry_t *pl1e;
   2.150      unsigned int idx;
   2.151  
   2.152 -    ASSERT((__va >= IOREMAP_VIRT_START) && (__va <= (IOREMAP_VIRT_END - 1)));
   2.153 +    ASSERT((__va >= IOREMAP_VIRT_START) && (__va < IOREMAP_VIRT_END));
   2.154  
   2.155      /* /First/, we zap the PTE. */
   2.156      pl2e = virt_to_xen_l2e(__va);
   2.157 @@ -269,3 +252,29 @@ void unmap_domain_page_global(void *va)
   2.158      idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
   2.159      set_bit(idx, garbage);
   2.160  }
   2.161 +
   2.162 +paddr_t maddr_from_mapped_domain_page(void *va) 
   2.163 +{
   2.164 +    unsigned long __va = (unsigned long)va;
   2.165 +    l2_pgentry_t *pl2e;
   2.166 +    l1_pgentry_t *pl1e;
   2.167 +    unsigned int idx;
   2.168 +    struct mapcache *cache;
   2.169 +    unsigned long mfn;
   2.170 +
   2.171 +    if ( (__va >= MAPCACHE_VIRT_START) && (__va < MAPCACHE_VIRT_END) )
   2.172 +    {
   2.173 +        cache = &mapcache_current_vcpu()->domain->arch.mapcache;
   2.174 +        idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
   2.175 +        mfn = l1e_get_pfn(cache->l1tab[idx]);
   2.176 +    }
   2.177 +    else
   2.178 +    {
   2.179 +        ASSERT((__va >= IOREMAP_VIRT_START) && (__va < IOREMAP_VIRT_END));
   2.180 +        pl2e = virt_to_xen_l2e(__va);
   2.181 +        pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
   2.182 +        mfn = l1e_get_pfn(*pl1e);
   2.183 +    }
   2.184 +    
   2.185 +    return ((paddr_t)mfn << PAGE_SHIFT) | ((unsigned long)va & ~PAGE_MASK);
   2.186 +}
     3.1 --- a/xen/include/asm-x86/domain.h	Mon Aug 21 10:28:02 2006 +0100
     3.2 +++ b/xen/include/asm-x86/domain.h	Mon Aug 21 11:39:27 2006 +0100
     3.3 @@ -18,7 +18,7 @@ struct trap_bounce {
     3.4  #define MAPHASHENT_NOTINUSE ((u16)~0U)
     3.5  struct vcpu_maphash {
     3.6      struct vcpu_maphash_entry {
     3.7 -        unsigned long pfn;
     3.8 +        unsigned long mfn;
     3.9          uint16_t      idx;
    3.10          uint16_t      refcnt;
    3.11      } hash[MAPHASH_ENTRIES];
     4.1 --- a/xen/include/xen/domain_page.h	Mon Aug 21 10:28:02 2006 +0100
     4.2 +++ b/xen/include/xen/domain_page.h	Mon Aug 21 11:39:27 2006 +0100
     4.3 @@ -18,34 +18,34 @@
     4.4   * Map a given page frame, returning the mapped virtual address. The page is
     4.5   * then accessible within the current VCPU until a corresponding unmap call.
     4.6   */
     4.7 -extern void *map_domain_page(unsigned long pfn);
     4.8 +void *map_domain_page(unsigned long mfn);
     4.9  
    4.10  /*
    4.11   * Pass a VA within a page previously mapped in the context of the
    4.12 - * currently-executing VCPU via a call to map_domain_pages().
    4.13 + * currently-executing VCPU via a call to map_domain_page().
    4.14   */
    4.15 -extern void unmap_domain_page(void *va);
    4.16 -
    4.17 -/* 
    4.18 - * Convert a VA (within a page previously mapped in the context of the
    4.19 - * currently-executing VCPU via a call to map_domain_pages()) to a machine 
    4.20 - * address 
    4.21 - */
    4.22 -extern paddr_t mapped_domain_page_to_maddr(void *va);
    4.23 +void unmap_domain_page(void *va);
    4.24  
    4.25  /*
    4.26   * Similar to the above calls, except the mapping is accessible in all
    4.27   * address spaces (not just within the VCPU that created the mapping). Global
    4.28   * mappings can also be unmapped from any context.
    4.29   */
    4.30 -extern void *map_domain_page_global(unsigned long pfn);
    4.31 -extern void unmap_domain_page_global(void *va);
    4.32 +void *map_domain_page_global(unsigned long mfn);
    4.33 +void unmap_domain_page_global(void *va);
    4.34 +
    4.35 +/* 
    4.36 + * Convert a VA (within a page previously mapped in the context of the
    4.37 + * currently-executing VCPU via a call to map_domain_page(), or via a
    4.38 + * previous call to map_domain_page_global()) to the mapped machine address.
    4.39 + */
    4.40 +paddr_t maddr_from_mapped_domain_page(void *va);
    4.41  
    4.42  #define DMCACHE_ENTRY_VALID 1U
    4.43  #define DMCACHE_ENTRY_HELD  2U
    4.44  
    4.45  struct domain_mmap_cache {
    4.46 -    unsigned long pfn;
    4.47 +    unsigned long mfn;
    4.48      void         *va;
    4.49      unsigned int  flags;
    4.50  };
    4.51 @@ -55,12 +55,12 @@ domain_mmap_cache_init(struct domain_mma
    4.52  {
    4.53      ASSERT(cache != NULL);
    4.54      cache->flags = 0;
    4.55 -    cache->pfn = 0;
    4.56 +    cache->mfn = 0;
    4.57      cache->va = NULL;
    4.58  }
    4.59  
    4.60  static inline void *
    4.61 -map_domain_page_with_cache(unsigned long pfn, struct domain_mmap_cache *cache)
    4.62 +map_domain_page_with_cache(unsigned long mfn, struct domain_mmap_cache *cache)
    4.63  {
    4.64      ASSERT(cache != NULL);
    4.65      BUG_ON(cache->flags & DMCACHE_ENTRY_HELD);
    4.66 @@ -68,13 +68,13 @@ map_domain_page_with_cache(unsigned long
    4.67      if ( likely(cache->flags & DMCACHE_ENTRY_VALID) )
    4.68      {
    4.69          cache->flags |= DMCACHE_ENTRY_HELD;
    4.70 -        if ( likely(pfn == cache->pfn) )
    4.71 +        if ( likely(mfn == cache->mfn) )
    4.72              goto done;
    4.73          unmap_domain_page(cache->va);
    4.74      }
    4.75  
    4.76 -    cache->pfn   = pfn;
    4.77 -    cache->va    = map_domain_page(pfn);
    4.78 +    cache->mfn   = mfn;
    4.79 +    cache->va    = map_domain_page(mfn);
    4.80      cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
    4.81  
    4.82   done:
    4.83 @@ -103,26 +103,22 @@ domain_mmap_cache_destroy(struct domain_
    4.84  
    4.85  #else /* !CONFIG_DOMAIN_PAGE */
    4.86  
    4.87 -#define map_domain_page(pfn)                maddr_to_virt((pfn)<<PAGE_SHIFT)
    4.88 +#define map_domain_page(mfn)                maddr_to_virt((mfn)<<PAGE_SHIFT)
    4.89  #define unmap_domain_page(va)               ((void)(va))
    4.90 -#define mapped_domain_page_to_maddr(va)     (virt_to_maddr(va))
    4.91  
    4.92 -#define map_domain_page_global(pfn)         maddr_to_virt((pfn)<<PAGE_SHIFT)
    4.93 +#define map_domain_page_global(mfn)         maddr_to_virt((mfn)<<PAGE_SHIFT)
    4.94  #define unmap_domain_page_global(va)        ((void)(va))
    4.95  
    4.96 +#define maddr_from_mapped_domain_page(va)   (virt_to_maddr(va))
    4.97 +
    4.98  struct domain_mmap_cache { 
    4.99  };
   4.100  
   4.101  #define domain_mmap_cache_init(c)           ((void)(c))
   4.102 -#define map_domain_page_with_cache(pfn,c)   (map_domain_page(pfn))
   4.103 +#define map_domain_page_with_cache(mfn,c)   (map_domain_page(mfn))
   4.104  #define unmap_domain_page_with_cache(va,c)  ((void)(va))
   4.105  #define domain_mmap_cache_destroy(c)        ((void)(c))
   4.106  
   4.107  #endif /* !CONFIG_DOMAIN_PAGE */
   4.108  
   4.109 -#define HERE_I_AM \
   4.110 -do { \
   4.111 -    printk("HERE I AM: %s %s %d\n", __func__, __FILE__, __LINE__); \
   4.112 -} while (0)
   4.113 -
   4.114  #endif /* __XEN_DOMAIN_PAGE_H__ */