ia64/xen-unstable

changeset 8550:542cb7acb21a

Add new map_domain_page_global() interface to allow mappings
that are accessible in all contexts and address spaces.
Used by shadow code and vmx code.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Jan 10 18:25:45 2006 +0100 (2006-01-10)
parents f5f703ec5223
children ed7888c838ad
files xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/vmx.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/domain_page.c xen/include/xen/domain_page.h
line diff
     1.1 --- a/xen/arch/x86/shadow.c	Tue Jan 10 17:16:30 2006 +0000
     1.2 +++ b/xen/arch/x86/shadow.c	Tue Jan 10 18:25:45 2006 +0100
     1.3 @@ -2150,8 +2150,8 @@ static void shadow_update_pagetables(str
     1.4      if ( max_mode & (SHM_enable | SHM_external) )
     1.5      {
     1.6          if ( likely(v->arch.guest_vtable != NULL) )
     1.7 -            unmap_domain_page(v->arch.guest_vtable);
     1.8 -        v->arch.guest_vtable = map_domain_page(gmfn);
     1.9 +            unmap_domain_page_global(v->arch.guest_vtable);
    1.10 +        v->arch.guest_vtable = map_domain_page_global(gmfn);
    1.11      }
    1.12  
    1.13      /*
    1.14 @@ -2187,8 +2187,8 @@ static void shadow_update_pagetables(str
    1.15          )
    1.16      {
    1.17          if ( v->arch.shadow_vtable )
    1.18 -            unmap_domain_page(v->arch.shadow_vtable);
    1.19 -        v->arch.shadow_vtable = map_domain_page(smfn);
    1.20 +            unmap_domain_page_global(v->arch.shadow_vtable);
    1.21 +        v->arch.shadow_vtable = map_domain_page_global(smfn);
    1.22      }
    1.23  
    1.24  #if CONFIG_PAGING_LEVELS == 2
    1.25 @@ -2204,8 +2204,8 @@ static void shadow_update_pagetables(str
    1.26          if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
    1.27              hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
    1.28          if ( v->arch.hl2_vtable )
    1.29 -            unmap_domain_page(v->arch.hl2_vtable);
    1.30 -        v->arch.hl2_vtable = map_domain_page(hl2mfn);
    1.31 +            unmap_domain_page_global(v->arch.hl2_vtable);
    1.32 +        v->arch.hl2_vtable = map_domain_page_global(hl2mfn);
    1.33      }
    1.34  
    1.35      /*
     2.1 --- a/xen/arch/x86/shadow32.c	Tue Jan 10 17:16:30 2006 +0000
     2.2 +++ b/xen/arch/x86/shadow32.c	Tue Jan 10 18:25:45 2006 +0100
     2.3 @@ -733,7 +733,7 @@ static void alloc_monitor_pagetable(stru
     2.4      ASSERT(mmfn_info != NULL);
     2.5  
     2.6      mmfn = page_to_pfn(mmfn_info);
     2.7 -    mpl2e = (l2_pgentry_t *)map_domain_page(mmfn);
     2.8 +    mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn);
     2.9      memset(mpl2e, 0, PAGE_SIZE);
    2.10  
    2.11      memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
    2.12 @@ -794,7 +794,7 @@ void free_monitor_pagetable(struct vcpu 
    2.13       * Then free monitor_table.
    2.14       */
    2.15      mfn = pagetable_get_pfn(v->arch.monitor_table);
    2.16 -    unmap_domain_page(v->arch.monitor_vtable);
    2.17 +    unmap_domain_page_global(v->arch.monitor_vtable);
    2.18      free_domheap_page(pfn_to_page(mfn));
    2.19  
    2.20      v->arch.monitor_table = mk_pagetable(0);
    2.21 @@ -929,7 +929,7 @@ int __shadow_mode_enable(struct domain *
    2.22          if ( v->arch.guest_vtable &&
    2.23               (v->arch.guest_vtable != __linear_l2_table) )
    2.24          {
    2.25 -            unmap_domain_page(v->arch.guest_vtable);
    2.26 +            unmap_domain_page_global(v->arch.guest_vtable);
    2.27          }
    2.28          if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
    2.29              v->arch.guest_vtable = __linear_l2_table;
    2.30 @@ -942,7 +942,7 @@ int __shadow_mode_enable(struct domain *
    2.31          if ( v->arch.shadow_vtable &&
    2.32               (v->arch.shadow_vtable != __shadow_linear_l2_table) )
    2.33          {
    2.34 -            unmap_domain_page(v->arch.shadow_vtable);
    2.35 +            unmap_domain_page_global(v->arch.shadow_vtable);
    2.36          }
    2.37          if ( !(mode & SHM_external) )
    2.38              v->arch.shadow_vtable = __shadow_linear_l2_table;
    2.39 @@ -955,7 +955,7 @@ int __shadow_mode_enable(struct domain *
    2.40          if ( v->arch.hl2_vtable &&
    2.41               (v->arch.hl2_vtable != __linear_hl2_table) )
    2.42          {
    2.43 -            unmap_domain_page(v->arch.hl2_vtable);
    2.44 +            unmap_domain_page_global(v->arch.hl2_vtable);
    2.45          }
    2.46          if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
    2.47              v->arch.hl2_vtable = __linear_hl2_table;
    2.48 @@ -2906,8 +2906,8 @@ void __update_pagetables(struct vcpu *v)
    2.49      if ( max_mode & (SHM_enable | SHM_external) )
    2.50      {
    2.51          if ( likely(v->arch.guest_vtable != NULL) )
    2.52 -            unmap_domain_page(v->arch.guest_vtable);
    2.53 -        v->arch.guest_vtable = map_domain_page(gmfn);
    2.54 +            unmap_domain_page_global(v->arch.guest_vtable);
    2.55 +        v->arch.guest_vtable = map_domain_page_global(gmfn);
    2.56      }
    2.57  
    2.58      /*
    2.59 @@ -2932,8 +2932,8 @@ void __update_pagetables(struct vcpu *v)
    2.60      if ( max_mode == SHM_external )
    2.61      {
    2.62          if ( v->arch.shadow_vtable )
    2.63 -            unmap_domain_page(v->arch.shadow_vtable);
    2.64 -        v->arch.shadow_vtable = map_domain_page(smfn);
    2.65 +            unmap_domain_page_global(v->arch.shadow_vtable);
    2.66 +        v->arch.shadow_vtable = map_domain_page_global(smfn);
    2.67      }
    2.68  
    2.69      /*
    2.70 @@ -2948,8 +2948,8 @@ void __update_pagetables(struct vcpu *v)
    2.71          if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
    2.72              hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
    2.73          if ( v->arch.hl2_vtable )
    2.74 -            unmap_domain_page(v->arch.hl2_vtable);
    2.75 -        v->arch.hl2_vtable = map_domain_page(hl2mfn);
    2.76 +            unmap_domain_page_global(v->arch.hl2_vtable);
    2.77 +        v->arch.hl2_vtable = map_domain_page_global(hl2mfn);
    2.78      }
    2.79  
    2.80      /*
     3.1 --- a/xen/arch/x86/shadow_public.c	Tue Jan 10 17:16:30 2006 +0000
     3.2 +++ b/xen/arch/x86/shadow_public.c	Tue Jan 10 18:25:45 2006 +0100
     3.3 @@ -151,6 +151,8 @@ free_shadow_fl1_table(struct domain *d, 
     3.4  
     3.5      for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
     3.6          put_page_from_l1e(pl1e[i], d);
     3.7 +
     3.8 +    unmap_domain_page(pl1e);
     3.9  }
    3.10  
    3.11  /*
    3.12 @@ -254,6 +256,7 @@ static pagetable_t page_table_convert(st
    3.13      pae_l3 = map_domain_page(pagetable_get_pfn(d->arch.phys_table));
    3.14      for (i = 0; i < PDP_ENTRIES; i++)
    3.15          l3[i] = l3e_from_pfn(l3e_get_pfn(pae_l3[i]), __PAGE_HYPERVISOR);
    3.16 +    unmap_domain_page(pae_l3);
    3.17  
    3.18      unmap_domain_page(l4);
    3.19      unmap_domain_page(l3);
    3.20 @@ -275,7 +278,7 @@ static void alloc_monitor_pagetable(stru
    3.21      ASSERT( mmfn_info );
    3.22  
    3.23      mmfn = page_to_pfn(mmfn_info);
    3.24 -    mpl4e = (l4_pgentry_t *) map_domain_page(mmfn);
    3.25 +    mpl4e = (l4_pgentry_t *) map_domain_page_global(mmfn);
    3.26      memcpy(mpl4e, &idle_pg_table[0], PAGE_SIZE);
    3.27      mpl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
    3.28          l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
    3.29 @@ -298,7 +301,7 @@ void free_monitor_pagetable(struct vcpu 
    3.30       * free monitor_table.
    3.31       */
    3.32      mfn = pagetable_get_pfn(v->arch.monitor_table);
    3.33 -    unmap_domain_page(v->arch.monitor_vtable);
    3.34 +    unmap_domain_page_global(v->arch.monitor_vtable);
    3.35      free_domheap_page(pfn_to_page(mfn));
    3.36  
    3.37      v->arch.monitor_table = mk_pagetable(0);
    3.38 @@ -332,7 +335,7 @@ static void alloc_monitor_pagetable(stru
    3.39      ASSERT(mmfn_info != NULL);
    3.40  
    3.41      mmfn = page_to_pfn(mmfn_info);
    3.42 -    mpl2e = (l2_pgentry_t *)map_domain_page(mmfn);
    3.43 +    mpl2e = (l2_pgentry_t *)map_domain_page_global(mmfn);
    3.44      memset(mpl2e, 0, PAGE_SIZE);
    3.45  
    3.46      memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
    3.47 @@ -393,7 +396,7 @@ void free_monitor_pagetable(struct vcpu 
    3.48       * Then free monitor_table.
    3.49       */
    3.50      mfn = pagetable_get_pfn(v->arch.monitor_table);
    3.51 -    unmap_domain_page(v->arch.monitor_vtable);
    3.52 +    unmap_domain_page_global(v->arch.monitor_vtable);
    3.53      free_domheap_page(pfn_to_page(mfn));
    3.54  
    3.55      v->arch.monitor_table = mk_pagetable(0);
    3.56 @@ -977,7 +980,7 @@ int __shadow_mode_enable(struct domain *
    3.57          if ( v->arch.guest_vtable &&
    3.58               (v->arch.guest_vtable != __linear_l2_table) )
    3.59          {
    3.60 -            unmap_domain_page(v->arch.guest_vtable);
    3.61 +            unmap_domain_page_global(v->arch.guest_vtable);
    3.62          }
    3.63          if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
    3.64              v->arch.guest_vtable = __linear_l2_table;
    3.65 @@ -990,7 +993,7 @@ int __shadow_mode_enable(struct domain *
    3.66          if ( v->arch.shadow_vtable &&
    3.67               (v->arch.shadow_vtable != __shadow_linear_l2_table) )
    3.68          {
    3.69 -            unmap_domain_page(v->arch.shadow_vtable);
    3.70 +            unmap_domain_page_global(v->arch.shadow_vtable);
    3.71          }
    3.72          if ( !(mode & SHM_external) && d->arch.ops->guest_paging_levels == 2)
    3.73              v->arch.shadow_vtable = __shadow_linear_l2_table;
    3.74 @@ -1004,7 +1007,7 @@ int __shadow_mode_enable(struct domain *
    3.75          if ( v->arch.hl2_vtable &&
    3.76               (v->arch.hl2_vtable != __linear_hl2_table) )
    3.77          {
    3.78 -            unmap_domain_page(v->arch.hl2_vtable);
    3.79 +            unmap_domain_page_global(v->arch.hl2_vtable);
    3.80          }
    3.81          if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
    3.82              v->arch.hl2_vtable = __linear_hl2_table;
     4.1 --- a/xen/arch/x86/vmx.c	Tue Jan 10 17:16:30 2006 +0000
     4.2 +++ b/xen/arch/x86/vmx.c	Tue Jan 10 18:25:45 2006 +0100
     4.3 @@ -98,7 +98,8 @@ void vmx_relinquish_resources(struct vcp
     4.4          /* unmap IO shared page */
     4.5          struct domain *d = v->domain;
     4.6          if ( d->arch.vmx_platform.shared_page_va )
     4.7 -            unmap_domain_page((void *)d->arch.vmx_platform.shared_page_va);
     4.8 +            unmap_domain_page_global(
     4.9 +                (void *)d->arch.vmx_platform.shared_page_va);
    4.10      }
    4.11  
    4.12      destroy_vmcs(&v->arch.arch_vmx);
     5.1 --- a/xen/arch/x86/vmx_vmcs.c	Tue Jan 10 17:16:30 2006 +0000
     5.2 +++ b/xen/arch/x86/vmx_vmcs.c	Tue Jan 10 18:25:45 2006 +0100
     5.3 @@ -193,7 +193,7 @@ static void vmx_map_io_shared_page(struc
     5.4          domain_crash_synchronous();
     5.5      }
     5.6  
     5.7 -    p = map_domain_page(mpfn);
     5.8 +    p = map_domain_page_global(mpfn);
     5.9      if (p == NULL) {
    5.10          printk("Can not map io request shared page for VMX domain.\n");
    5.11          domain_crash_synchronous();
     6.1 --- a/xen/arch/x86/x86_32/domain_page.c	Tue Jan 10 17:16:30 2006 +0000
     6.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Tue Jan 10 18:25:45 2006 +0100
     6.3 @@ -101,3 +101,71 @@ void unmap_domain_pages(void *va, unsign
     6.4      for ( i = 0; i < (1U << order); i++ )
     6.5          l1e_add_flags(cache->l1tab[idx+i], READY_FOR_TLB_FLUSH);
     6.6  }
     6.7 +
     6.8 +#define GLOBALMAP_BITS (IOREMAP_MBYTES << (20 - PAGE_SHIFT))
     6.9 +static unsigned long inuse[BITS_TO_LONGS(GLOBALMAP_BITS)];
    6.10 +static unsigned long garbage[BITS_TO_LONGS(GLOBALMAP_BITS)];
    6.11 +static unsigned int inuse_cursor;
    6.12 +static spinlock_t globalmap_lock = SPIN_LOCK_UNLOCKED;
    6.13 +
    6.14 +void *map_domain_page_global(unsigned long pfn)
    6.15 +{
    6.16 +    l2_pgentry_t *pl2e;
    6.17 +    l1_pgentry_t *pl1e;
    6.18 +    unsigned int idx, i;
    6.19 +    unsigned long va;
    6.20 +
    6.21 +    ASSERT(!in_irq() && local_irq_is_enabled());
    6.22 +
    6.23 +    spin_lock(&globalmap_lock);
    6.24 +
    6.25 +    for ( ; ; )
    6.26 +    {
    6.27 +        idx = find_next_zero_bit(inuse, GLOBALMAP_BITS, inuse_cursor);
    6.28 +        va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT);
    6.29 +
    6.30 +        /* End of round? If not then we're done in this loop. */
    6.31 +        if ( va < FIXADDR_START )
    6.32 +            break;
    6.33 +
    6.34 +        /* /First/, clean the garbage map and update the inuse list. */
    6.35 +        for ( i = 0; i < ARRAY_SIZE(garbage); i++ )
    6.36 +        {
    6.37 +            unsigned long x = xchg(&garbage[i], 0);
    6.38 +            inuse[i] &= ~x;
    6.39 +        }
    6.40 +
    6.41 +        /* /Second/, flush all TLBs to get rid of stale garbage mappings. */
    6.42 +        flush_tlb_all();
    6.43 +
    6.44 +        inuse_cursor = 0;
    6.45 +    }
    6.46 +
    6.47 +    set_bit(idx, inuse);
    6.48 +    inuse_cursor = idx + 1;
    6.49 +
    6.50 +    spin_unlock(&globalmap_lock);
    6.51 +
    6.52 +    pl2e = virt_to_xen_l2e(va);
    6.53 +    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va);
    6.54 +    *pl1e = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
    6.55 +
    6.56 +    return (void *)va;
    6.57 +}
    6.58 +
    6.59 +void unmap_domain_page_global(void *va)
    6.60 +{
    6.61 +    unsigned long __va = (unsigned long)va;
    6.62 +    l2_pgentry_t *pl2e;
    6.63 +    l1_pgentry_t *pl1e;
    6.64 +    unsigned int idx;
    6.65 +
    6.66 +    /* /First/, we zap the PTE. */
    6.67 +    pl2e = virt_to_xen_l2e(__va);
    6.68 +    pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va);
    6.69 +    *pl1e = l1e_empty();
    6.70 +
    6.71 +    /* /Second/, we add to the garbage map. */
    6.72 +    idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
    6.73 +    set_bit(idx, garbage);
    6.74 +}
     7.1 --- a/xen/include/xen/domain_page.h	Tue Jan 10 17:16:30 2006 +0000
     7.2 +++ b/xen/include/xen/domain_page.h	Tue Jan 10 18:25:45 2006 +0100
     7.3 @@ -17,16 +17,26 @@
     7.4  
     7.5  /*
     7.6   * Maps a given range of page frames, returning the mapped virtual address. The
     7.7 - * pages are now accessible until a corresponding call to unmap_domain_page().
     7.8 + * pages are now accessible within the current domain until a corresponding
     7.9 + * call to unmap_domain_page().
    7.10   */
    7.11  extern void *map_domain_pages(unsigned long pfn, unsigned int order);
    7.12  
    7.13  /*
    7.14 - * Pass a VA within the first page of a range previously mapped with
    7.15 - * map_omain_pages(). Those pages will then be removed from the mapping lists.
    7.16 + * Pass a VA within the first page of a range previously mapped in the context
    7.17 + * of the currently-executing domain via a call to map_domain_pages(). Those
    7.18 + * pages will then be removed from the mapping lists.
    7.19   */
    7.20  extern void unmap_domain_pages(void *va, unsigned int order);
    7.21  
    7.22 +/*
    7.23 + * Similar to the above calls, except the mapping is accessible in all
    7.24 + * address spaces (not just within the domain that created the mapping). Global
    7.25 + * mappings can also be unmapped from any context.
    7.26 + */
    7.27 +extern void *map_domain_page_global(unsigned long pfn);
    7.28 +extern void unmap_domain_page_global(void *va);
    7.29 +
    7.30  #define DMCACHE_ENTRY_VALID 1U
    7.31  #define DMCACHE_ENTRY_HELD  2U
    7.32  
    7.33 @@ -90,6 +100,9 @@ domain_mmap_cache_destroy(struct domain_
    7.34  #define map_domain_pages(pfn,order)         phys_to_virt((pfn)<<PAGE_SHIFT)
    7.35  #define unmap_domain_pages(va,order)        ((void)((void)(va),(void)(order)))
    7.36  
    7.37 +#define map_domain_page_global(pfn)         phys_to_virt((pfn)<<PAGE_SHIFT)
    7.38 +#define unmap_domain_page_global(va)        ((void)(va))
    7.39 +
    7.40  struct domain_mmap_cache { 
    7.41  };
    7.42