ia64/xen-unstable

changeset 9520:4c2d101a3228

Re-arrange code for followup patch and remove extra shadow_lock in function which is only called with the lock already held.

Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Wed Mar 29 16:47:46 2006 +0100 (2006-03-29)
parents 50778f42f2dd
children d102a30417a7
files xen/include/asm-x86/shadow.h
line diff
     1.1 --- a/xen/include/asm-x86/shadow.h	Wed Mar 29 16:02:40 2006 +0100
     1.2 +++ b/xen/include/asm-x86/shadow.h	Wed Mar 29 16:47:46 2006 +0100
     1.3 @@ -135,6 +135,8 @@ extern int set_p2m_entry(
     1.4      struct domain_mmap_cache *l1cache);
     1.5  extern void remove_shadow(struct domain *d, unsigned long gpfn, u32 stype);
     1.6  
     1.7 +extern void free_shadow_page(unsigned long smfn);
     1.8 +
     1.9  extern void shadow_l1_normal_pt_update(struct domain *d,
    1.10                                         paddr_t pa, l1_pgentry_t l1e,
    1.11                                         struct domain_mmap_cache *cache);
    1.12 @@ -660,55 +662,13 @@ static inline void shadow_sync_and_drop_
    1.13      if ( likely(!shadow_mode_refcounts(d)) )
    1.14          return;
    1.15  
    1.16 -    shadow_lock(d);
    1.17 -
    1.18      if ( page_out_of_sync(page) )
    1.19          __shadow_sync_mfn(d, page_to_mfn(page));
    1.20  
    1.21      shadow_remove_all_access(d, page_to_mfn(page));
    1.22 -
    1.23 -    shadow_unlock(d);
    1.24  }
    1.25  #endif
    1.26  
    1.27 -static inline void guest_physmap_add_page(
    1.28 -    struct domain *d, unsigned long gpfn, unsigned long mfn)
    1.29 -{
    1.30 -    struct domain_mmap_cache c1, c2;
    1.31 -
    1.32 -    if ( likely(!shadow_mode_translate(d)) )
    1.33 -        return;
    1.34 -
    1.35 -    domain_mmap_cache_init(&c1);
    1.36 -    domain_mmap_cache_init(&c2);
    1.37 -    shadow_lock(d);
    1.38 -    shadow_sync_and_drop_references(d, mfn_to_page(mfn));
    1.39 -    set_p2m_entry(d, gpfn, mfn, &c1, &c2);
    1.40 -    set_gpfn_from_mfn(mfn, gpfn);
    1.41 -    shadow_unlock(d);
    1.42 -    domain_mmap_cache_destroy(&c1);
    1.43 -    domain_mmap_cache_destroy(&c2);
    1.44 -}
    1.45 -
    1.46 -static inline void guest_physmap_remove_page(
    1.47 -    struct domain *d, unsigned long gpfn, unsigned long mfn)
    1.48 -{
    1.49 -    struct domain_mmap_cache c1, c2;
    1.50 -
    1.51 -    if ( likely(!shadow_mode_translate(d)) )
    1.52 -        return;
    1.53 -
    1.54 -    domain_mmap_cache_init(&c1);
    1.55 -    domain_mmap_cache_init(&c2);
    1.56 -    shadow_lock(d);
    1.57 -    shadow_sync_and_drop_references(d, mfn_to_page(mfn));
    1.58 -    set_p2m_entry(d, gpfn, -1, &c1, &c2);
    1.59 -    set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
    1.60 -    shadow_unlock(d);
    1.61 -    domain_mmap_cache_destroy(&c1);
    1.62 -    domain_mmap_cache_destroy(&c2);
    1.63 -}
    1.64 -
    1.65  /************************************************************************/
    1.66  
    1.67  /*
    1.68 @@ -739,8 +699,6 @@ get_shadow_ref(unsigned long smfn)
    1.69      return 1;
    1.70  }
    1.71  
    1.72 -extern void free_shadow_page(unsigned long smfn);
    1.73 -
    1.74  /*
    1.75   * Drop a shadow reference to smfn.
    1.76   */
    1.77 @@ -1525,6 +1483,46 @@ static inline void set_shadow_status(
    1.78  
    1.79  /************************************************************************/
    1.80  
    1.81 +static inline void guest_physmap_add_page(
    1.82 +    struct domain *d, unsigned long gpfn, unsigned long mfn)
    1.83 +{
    1.84 +    struct domain_mmap_cache c1, c2;
    1.85 +
    1.86 +    if ( likely(!shadow_mode_translate(d)) )
    1.87 +        return;
    1.88 +
    1.89 +    domain_mmap_cache_init(&c1);
    1.90 +    domain_mmap_cache_init(&c2);
    1.91 +    shadow_lock(d);
    1.92 +    shadow_sync_and_drop_references(d, mfn_to_page(mfn));
    1.93 +    set_p2m_entry(d, gpfn, mfn, &c1, &c2);
    1.94 +    set_gpfn_from_mfn(mfn, gpfn);
    1.95 +    shadow_unlock(d);
    1.96 +    domain_mmap_cache_destroy(&c1);
    1.97 +    domain_mmap_cache_destroy(&c2);
    1.98 +}
    1.99 +
   1.100 +static inline void guest_physmap_remove_page(
   1.101 +    struct domain *d, unsigned long gpfn, unsigned long mfn)
   1.102 +{
   1.103 +    struct domain_mmap_cache c1, c2;
   1.104 +
   1.105 +    if ( likely(!shadow_mode_translate(d)) )
   1.106 +        return;
   1.107 +
   1.108 +    domain_mmap_cache_init(&c1);
   1.109 +    domain_mmap_cache_init(&c2);
   1.110 +    shadow_lock(d);
   1.111 +    shadow_sync_and_drop_references(d, mfn_to_page(mfn));
   1.112 +    set_p2m_entry(d, gpfn, -1, &c1, &c2);
   1.113 +    set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
   1.114 +    shadow_unlock(d);
   1.115 +    domain_mmap_cache_destroy(&c1);
   1.116 +    domain_mmap_cache_destroy(&c2);
   1.117 +}
   1.118 +
   1.119 +/************************************************************************/
   1.120 +
   1.121  void static inline
   1.122  shadow_update_min_max(unsigned long smfn, int index)
   1.123  {