direct-io.hg

changeset 8694:ce057aa33cad

Clean up shadow-translate xen patches. Add abstractions
for adding/removing pages from a translated guest's
physmap. Define dummy functions so other architectures
will continue to build without errors.

Remove setting of XENFEAT_writable_mmu_structures. This
should set only if the hypervisor supports writable
mappings of all MMU structures (all page tables and
descriptor tables). If we want a mode where only PTEs
can be writable, we should add a feature flag for that
(but I don't think this is a useful mode to support).

TODO: The call to get the pfn hole should be a
XENMEM_ function, not another MMUEXT_OP (hopefully the
latter hypercall is not goign to grow any more as it's
gross enough already).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Jan 28 13:01:19 2006 +0100 (2006-01-28)
parents 491a8798945e
children 1db05e589fa0
files xen/arch/x86/domain.c xen/common/grant_table.c xen/common/kernel.c xen/common/memory.c xen/include/asm-x86/shadow.h xen/include/xen/shadow.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Sat Jan 28 12:09:45 2006 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Sat Jan 28 13:01:19 2006 +0100
     1.3 @@ -348,7 +348,6 @@ int arch_set_info_guest(
     1.4      struct domain *d = v->domain;
     1.5      unsigned long phys_basetab;
     1.6      int i, rc;
     1.7 -    unsigned got_basetab_type;
     1.8  
     1.9      /*
    1.10       * This is sufficient! If the descriptor DPL differs from CS RPL then we'll
    1.11 @@ -408,27 +407,25 @@ int arch_set_info_guest(
    1.12  
    1.13      v->arch.guest_table = mk_pagetable(phys_basetab);
    1.14  
    1.15 -    got_basetab_type = 0;
    1.16 +    if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
    1.17 +        return rc;
    1.18 +
    1.19      if ( shadow_mode_refcounts(d) )
    1.20      {
    1.21          if ( !get_page(pfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
    1.22 +        {
    1.23 +            destroy_gdt(v);
    1.24              return -EINVAL;
    1.25 +        }
    1.26      }
    1.27      else if ( !(c->flags & VGCF_VMX_GUEST) )
    1.28      {
    1.29          if ( !get_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT), d,
    1.30                                  PGT_base_page_table) )
    1.31 +        {
    1.32 +            destroy_gdt(v);
    1.33              return -EINVAL;
    1.34 -        got_basetab_type = 1;
    1.35 -    }
    1.36 -
    1.37 -    if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
    1.38 -    {
    1.39 -        if (got_basetab_type)
    1.40 -            put_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT));
    1.41 -        else
    1.42 -            put_page(pfn_to_page(phys_basetab>>PAGE_SHIFT));
    1.43 -        return rc;
    1.44 +        }
    1.45      }
    1.46  
    1.47      if ( c->flags & VGCF_VMX_GUEST )
     2.1 --- a/xen/common/grant_table.c	Sat Jan 28 12:09:45 2006 +0100
     2.2 +++ b/xen/common/grant_table.c	Sat Jan 28 13:01:19 2006 +0100
     2.3 @@ -521,7 +521,8 @@ gnttab_setup_table(
     2.4      {
     2.5          ASSERT(d->grant_table != NULL);
     2.6          (void)put_user(GNTST_okay, &uop->status);
     2.7 -        for ( i = 0; i < op.nr_frames; i++ ) {
     2.8 +        for ( i = 0; i < op.nr_frames; i++ )
     2.9 +        {
    2.10              mfn = __mfn_to_gpfn(d, gnttab_shared_mfn(d, d->grant_table, i));
    2.11              (void)put_user(mfn, &op.frame_list[i]);
    2.12          }
    2.13 @@ -709,7 +710,7 @@ gnttab_transfer(
    2.14      int i;
    2.15      grant_entry_t *sha;
    2.16      gnttab_transfer_t gop;
    2.17 -    unsigned long real_mfn;
    2.18 +    unsigned long mfn;
    2.19  
    2.20      for ( i = 0; i < count; i++ )
    2.21      {
    2.22 @@ -730,8 +731,8 @@ gnttab_transfer(
    2.23              continue;
    2.24          }
    2.25  
    2.26 -        real_mfn = __gpfn_to_mfn(d, gop.mfn);
    2.27 -        page = pfn_to_page(real_mfn);
    2.28 +        mfn = __gpfn_to_mfn(d, gop.mfn);
    2.29 +        page = pfn_to_page(mfn);
    2.30          if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
    2.31          { 
    2.32              DPRINTK("gnttab_transfer: xen frame %lx\n",
    2.33 @@ -792,21 +793,8 @@ gnttab_transfer(
    2.34  
    2.35          /* Tell the guest about its new page frame. */
    2.36          sha = &e->grant_table->shared[gop.ref];
    2.37 -        if (shadow_mode_translate(e)) {
    2.38 -            struct domain_mmap_cache c1, c2;
    2.39 -            unsigned long pfn = sha->frame;
    2.40 -            domain_mmap_cache_init(&c1);
    2.41 -            domain_mmap_cache_init(&c2);
    2.42 -            shadow_lock(e);
    2.43 -            shadow_sync_and_drop_references(e, page);
    2.44 -            set_p2m_entry(e, pfn, real_mfn, &c1, &c2);
    2.45 -            set_pfn_from_mfn(real_mfn, pfn);
    2.46 -            shadow_unlock(e);
    2.47 -            domain_mmap_cache_destroy(&c1);
    2.48 -            domain_mmap_cache_destroy(&c2);
    2.49 -        } else {
    2.50 -            sha->frame = real_mfn;
    2.51 -        }
    2.52 +        guest_physmap_add_page(e, sha->frame, mfn);
    2.53 +        sha->frame = mfn;
    2.54          wmb();
    2.55          sha->flags |= GTF_transfer_completed;
    2.56  
     3.1 --- a/xen/common/kernel.c	Sat Jan 28 12:09:45 2006 +0100
     3.2 +++ b/xen/common/kernel.c	Sat Jan 28 13:01:19 2006 +0100
     3.3 @@ -13,7 +13,6 @@
     3.4  #include <asm/current.h>
     3.5  #include <public/nmi.h>
     3.6  #include <public/version.h>
     3.7 -#include <asm/shadow.h>
     3.8  
     3.9  void cmdline_parse(char *cmdline)
    3.10  {
    3.11 @@ -156,10 +155,7 @@ long do_xen_version(int cmd, void *arg)
    3.12          switch ( fi.submap_idx )
    3.13          {
    3.14          case 0:
    3.15 -            if (shadow_mode_wr_pt_pte(current->domain))
    3.16 -                fi.submap = XENFEAT_writable_mmu_structures;
    3.17 -            else
    3.18 -                fi.submap = 0;
    3.19 +            fi.submap = 0;
    3.20              break;
    3.21          default:
    3.22              return -EINVAL;
     4.1 --- a/xen/common/memory.c	Sat Jan 28 12:09:45 2006 +0100
     4.2 +++ b/xen/common/memory.c	Sat Jan 28 13:01:19 2006 +0100
     4.3 @@ -75,9 +75,8 @@ populate_physmap(
     4.4      unsigned int   flags,
     4.5      int           *preempted)
     4.6  {
     4.7 -    struct pfn_info         *page;
     4.8 -    unsigned long            i, j, pfn, mfn;
     4.9 -    struct domain_mmap_cache cache1, cache2;
    4.10 +    struct pfn_info *page;
    4.11 +    unsigned long    i, j, pfn, mfn;
    4.12  
    4.13      if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
    4.14          return 0;
    4.15 @@ -86,12 +85,6 @@ populate_physmap(
    4.16           !multipage_allocation_permitted(current->domain) )
    4.17          return 0;
    4.18  
    4.19 -    if (shadow_mode_translate(d)) {
    4.20 -        domain_mmap_cache_init(&cache1);
    4.21 -        domain_mmap_cache_init(&cache2);
    4.22 -        shadow_lock(d);
    4.23 -    }
    4.24 -
    4.25      for ( i = 0; i < nr_extents; i++ )
    4.26      {
    4.27          if ( hypercall_preempt_check() )
    4.28 @@ -114,13 +107,16 @@ populate_physmap(
    4.29          if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
    4.30              goto out;
    4.31  
    4.32 -        for ( j = 0; j < (1 << extent_order); j++ ) {
    4.33 -            if (shadow_mode_translate(d))
    4.34 -                set_p2m_entry(d, pfn + j, mfn + j, &cache1, &cache2);
    4.35 -            set_pfn_from_mfn(mfn + j, pfn + j);
    4.36 +        if ( unlikely(shadow_mode_translate(d)) )
    4.37 +        {
    4.38 +            for ( j = 0; j < (1 << extent_order); j++ )
    4.39 +                guest_physmap_add_page(d, pfn + j, mfn + j);
    4.40          }
    4.41 +        else
    4.42 +        {
    4.43 +            for ( j = 0; j < (1 << extent_order); j++ )
    4.44 +                set_pfn_from_mfn(mfn + j, pfn + j);
    4.45  
    4.46 -        if (!shadow_mode_translate(d)) {
    4.47              /* Inform the domain of the new page's machine address. */ 
    4.48              if ( __put_user(mfn, &extent_list[i]) != 0 )
    4.49                  goto out;
    4.50 @@ -128,12 +124,6 @@ populate_physmap(
    4.51      }
    4.52  
    4.53   out:
    4.54 -    if (shadow_mode_translate(d)) {
    4.55 -        shadow_unlock(d);
    4.56 -        domain_mmap_cache_destroy(&cache1);
    4.57 -        domain_mmap_cache_destroy(&cache2);
    4.58 -    }
    4.59 -
    4.60      return i;
    4.61  }
    4.62      
    4.63 @@ -168,8 +158,8 @@ decrease_reservation(
    4.64              mfn = __gpfn_to_mfn(d, gpfn + j);
    4.65              if ( unlikely(mfn >= max_page) )
    4.66              {
    4.67 -                DPRINTK("Domain %u page number out of range (%lx(%lx) >= %lx)\n", 
    4.68 -                        d->domain_id, mfn, gpfn, max_page);
    4.69 +                DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
    4.70 +                        d->domain_id, mfn, max_page);
    4.71                  return i;
    4.72              }
    4.73              
    4.74 @@ -186,18 +176,8 @@ decrease_reservation(
    4.75              if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
    4.76                  put_page(page);
    4.77  
    4.78 -            if (shadow_mode_translate(d)) {
    4.79 -                struct domain_mmap_cache c1, c2;
    4.80 -                domain_mmap_cache_init(&c1);
    4.81 -                domain_mmap_cache_init(&c2);
    4.82 -                shadow_lock(d);
    4.83 -                shadow_sync_and_drop_references(d, page);
    4.84 -                set_p2m_entry(d, gpfn + j, -1, &c1, &c2);
    4.85 -                set_pfn_from_mfn(mfn + j, INVALID_M2P_ENTRY);
    4.86 -                shadow_unlock(d);
    4.87 -                domain_mmap_cache_destroy(&c1);
    4.88 -                domain_mmap_cache_destroy(&c2);
    4.89 -            }
    4.90 +            guest_physmap_remove_page(d, gpfn + j, mfn);
    4.91 +
    4.92              put_page(page);
    4.93          }
    4.94      }
     5.1 --- a/xen/include/asm-x86/shadow.h	Sat Jan 28 12:09:45 2006 +0100
     5.2 +++ b/xen/include/asm-x86/shadow.h	Sat Jan 28 13:01:19 2006 +0100
     5.3 @@ -636,6 +636,44 @@ static inline void shadow_sync_and_drop_
     5.4  }
     5.5  #endif
     5.6  
     5.7 +static inline void guest_physmap_add_page(
     5.8 +    struct domain *d, unsigned long gpfn, unsigned long mfn)
     5.9 +{
    5.10 +    struct domain_mmap_cache c1, c2;
    5.11 +
    5.12 +    if ( likely(!shadow_mode_translate(d)) )
    5.13 +        return;
    5.14 +
    5.15 +    domain_mmap_cache_init(&c1);
    5.16 +    domain_mmap_cache_init(&c2);
    5.17 +    shadow_lock(d);
    5.18 +    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
    5.19 +    set_p2m_entry(d, gpfn, mfn, &c1, &c2);
    5.20 +    set_pfn_from_mfn(mfn, gpfn);
    5.21 +    shadow_unlock(d);
    5.22 +    domain_mmap_cache_destroy(&c1);
    5.23 +    domain_mmap_cache_destroy(&c2);
    5.24 +}
    5.25 +
    5.26 +static inline void guest_physmap_remove_page(
    5.27 +    struct domain *d, unsigned long gpfn, unsigned long mfn)
    5.28 +{
    5.29 +    struct domain_mmap_cache c1, c2;
    5.30 +
    5.31 +    if ( likely(!shadow_mode_translate(d)) )
    5.32 +        return;
    5.33 +
    5.34 +    domain_mmap_cache_init(&c1);
    5.35 +    domain_mmap_cache_init(&c2);
    5.36 +    shadow_lock(d);
    5.37 +    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
    5.38 +    set_p2m_entry(d, gpfn, -1, &c1, &c2);
    5.39 +    set_pfn_from_mfn(mfn, INVALID_M2P_ENTRY);
    5.40 +    shadow_unlock(d);
    5.41 +    domain_mmap_cache_destroy(&c1);
    5.42 +    domain_mmap_cache_destroy(&c2);
    5.43 +}
    5.44 +
    5.45  /************************************************************************/
    5.46  
    5.47  /*
     6.1 --- a/xen/include/xen/shadow.h	Sat Jan 28 12:09:45 2006 +0100
     6.2 +++ b/xen/include/xen/shadow.h	Sat Jan 28 13:01:19 2006 +0100
     6.3 @@ -10,8 +10,15 @@
     6.4  
     6.5  #else
     6.6  
     6.7 -#define shadow_drop_references(_d, _p)          ((void)0)
     6.8 -#define shadow_sync_and_drop_references(_d, _p) ((void)0)
     6.9 +#define shadow_drop_references(d, p)          ((void)0)
    6.10 +#define shadow_sync_and_drop_references(d, p) ((void)0)
    6.11 +
    6.12 +#define shadow_mode_translate(d)              (0)
    6.13 +
    6.14 +#define __gpfn_to_mfn(d, p)                   (p)
    6.15 +#define __mfn_to_gpfn(d, p)                   (p)
    6.16 +#define guest_physmap_add_page(d, p, m)       ((void)0)
    6.17 +#define guest_physmap_remove_page(d, p, m)    ((void)0)
    6.18  
    6.19  #endif
    6.20