ia64/xen-unstable

changeset 7933:5c7d103efb99

Fix shadow log-dirty mode for x86_64 xenlinux. Tested
PAE xenlinux as well.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Nov 19 10:28:24 2005 +0100 (2005-11-19)
parents 317e5a7092e2
children f5dafee503ba
files xen/arch/x86/shadow.c
line diff
     1.1 --- a/xen/arch/x86/shadow.c	Sat Nov 19 04:35:37 2005 +0100
     1.2 +++ b/xen/arch/x86/shadow.c	Sat Nov 19 10:28:24 2005 +0100
     1.3 @@ -47,13 +47,14 @@ static void mark_shadows_as_reflecting_s
     1.4  #if CONFIG_PAGING_LEVELS == 3
     1.5  static unsigned long shadow_l3_table(
     1.6      struct domain *d, unsigned long gpfn, unsigned long gmfn);
     1.7 -static inline void validate_bl2e_change( struct domain *d,
     1.8 -    guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
     1.9  #endif
    1.10  
    1.11  #if CONFIG_PAGING_LEVELS == 4
    1.12  static unsigned long shadow_l4_table(
    1.13      struct domain *d, unsigned long gpfn, unsigned long gmfn);
    1.14 +#endif
    1.15 +
    1.16 +#if CONFIG_PAGING_LEVELS >= 3
    1.17  static void shadow_map_into_current(struct vcpu *v,
    1.18      unsigned long va, unsigned int from, unsigned int to);
    1.19  static inline void validate_bl2e_change( struct domain *d,
    1.20 @@ -669,6 +670,7 @@ static void shadow_map_l1_into_current_l
    1.21      }
    1.22  }
    1.23  
    1.24 +#if CONFIG_PAGING_LEVELS == 2
    1.25  static void
    1.26  shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
    1.27  {
    1.28 @@ -750,7 +752,6 @@ shadow_set_l1e(unsigned long va, l1_pgen
    1.29      shadow_update_min_max(l2e_get_pfn(sl2e), l1_table_offset(va));
    1.30  }
    1.31  
    1.32 -#if CONFIG_PAGING_LEVELS == 2
    1.33  static void shadow_invlpg_32(struct vcpu *v, unsigned long va)
    1.34  {
    1.35      struct domain *d = v->domain;
    1.36 @@ -781,6 +782,73 @@ static void shadow_invlpg_32(struct vcpu
    1.37  }
    1.38  #endif /* CONFIG_PAGING_LEVELS == 2 */
    1.39  
    1.40 +#if CONFIG_PAGING_LEVELS >= 3
    1.41 +static void shadow_set_l1e_64(
    1.42 +    unsigned long va, pgentry_64_t *sl1e_p,
    1.43 +    int create_l1_shadow)
    1.44 +{
    1.45 +    struct vcpu *v = current;
    1.46 +    struct domain *d = v->domain;
    1.47 +    pgentry_64_t sle;
    1.48 +    pgentry_64_t sle_up = {0};
    1.49 +    l1_pgentry_t old_spte;
    1.50 +    l1_pgentry_t sl1e = *(l1_pgentry_t *)sl1e_p;
    1.51 +    int i;
    1.52 +    unsigned long orig_va = 0;
    1.53 +
    1.54 +    if ( d->arch.ops->guest_paging_levels == PAGING_L2 ) 
    1.55 +    {
    1.56 +        /* This is for 32-bit VMX guest on 64-bit host */
    1.57 +        orig_va = va;
    1.58 +        va = va & (~((1<<L2_PAGETABLE_SHIFT_32)-1));
    1.59 +    }
    1.60 +
    1.61 +    for ( i = PAGING_L4; i >= PAGING_L2; i-- )
    1.62 +    {
    1.63 +        if ( !__rw_entry(v, va, &sle, SHADOW_ENTRY | GET_ENTRY | i) )
    1.64 +        {
    1.65 +            sl1e = l1e_empty();
    1.66 +            goto out;
    1.67 +        }
    1.68 +        if ( !(entry_get_flags(sle) & _PAGE_PRESENT) )
    1.69 +        {
    1.70 +            if ( create_l1_shadow )
    1.71 +            {
    1.72 +                perfc_incrc(shadow_set_l3e_force_map);
    1.73 +                shadow_map_into_current(v, va, i-1, i);
    1.74 +                __rw_entry(v, va, &sle, SHADOW_ENTRY | GET_ENTRY | i);
    1.75 +            }
    1.76 +        }
    1.77 +        if ( i < PAGING_L4 )
    1.78 +            shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
    1.79 +        sle_up = sle;
    1.80 +    }
    1.81 +
    1.82 +    if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
    1.83 +    {
    1.84 +        va = orig_va;
    1.85 +    }
    1.86 +
    1.87 +    if ( shadow_mode_refcounts(d) )
    1.88 +    {
    1.89 +        __shadow_get_l1e(v, va, &old_spte);
    1.90 +        if ( l1e_has_changed(old_spte, sl1e, _PAGE_RW | _PAGE_PRESENT) )
    1.91 +        {
    1.92 +            if ( (l1e_get_flags(sl1e) & _PAGE_PRESENT) &&
    1.93 +                 !shadow_get_page_from_l1e(sl1e, d) )
    1.94 +                sl1e = l1e_empty();
    1.95 +            if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
    1.96 +                put_page_from_l1e(old_spte, d);
    1.97 +        }
    1.98 +    }
    1.99 +
   1.100 +out:
   1.101 +    __shadow_set_l1e(v, va, &sl1e);
   1.102 +
   1.103 +    shadow_update_min_max(entry_get_pfn(sle_up), guest_l1_table_offset(va));
   1.104 +}
   1.105 +#endif /* CONFIG_PAGING_LEVELS >= 3 */
   1.106 +
   1.107  static struct out_of_sync_entry *
   1.108  shadow_alloc_oos_entry(struct domain *d)
   1.109  {
   1.110 @@ -1996,8 +2064,11 @@ static int do_update_va_mapping(unsigned
   1.111      __shadow_sync_va(v, va);
   1.112  
   1.113      l1pte_propagate_from_guest(d, *(guest_l1_pgentry_t *)&val, &spte);
   1.114 +#if CONFIG_PAGING_LEVELS == 2
   1.115      shadow_set_l1e(va, spte, 0);
   1.116 -
   1.117 +#elif CONFIG_PAGING_LEVELS >= 3
   1.118 +    shadow_set_l1e_64(va, (pgentry_64_t *) &spte, 0);
   1.119 +#endif
   1.120      /*
   1.121       * If we're in log-dirty mode then we need to note that we've updated
   1.122       * the PTE in the PT-holding page. We need the machine frame number
   1.123 @@ -3012,71 +3083,6 @@ static void shadow_set_l2e_64(unsigned l
   1.124  }
   1.125  
   1.126  
   1.127 -static void shadow_set_l1e_64(
   1.128 -    unsigned long va, pgentry_64_t *sl1e_p,
   1.129 -    int create_l1_shadow)
   1.130 -{
   1.131 -    struct vcpu *v = current;
   1.132 -    struct domain *d = v->domain;
   1.133 -    pgentry_64_t sle;
   1.134 -    pgentry_64_t sle_up = {0};
   1.135 -    l1_pgentry_t old_spte;
   1.136 -    l1_pgentry_t sl1e = *(l1_pgentry_t *)sl1e_p;
   1.137 -    int i;
   1.138 -    unsigned long orig_va = 0;
   1.139 -
   1.140 -    if ( d->arch.ops->guest_paging_levels == PAGING_L2 ) 
   1.141 -    {
   1.142 -        /* This is for 32-bit VMX guest on 64-bit host */
   1.143 -        orig_va = va;
   1.144 -        va = va & (~((1<<L2_PAGETABLE_SHIFT_32)-1));
   1.145 -    }
   1.146 -
   1.147 -    for (i = PAGING_L4; i >= PAGING_L2; i--) 
   1.148 -    {
   1.149 -        if (!__rw_entry(v, va, &sle, SHADOW_ENTRY | GET_ENTRY | i)) {
   1.150 -            printk("<%s> i = %d\n", __func__, i);
   1.151 -            BUG();
   1.152 -        }
   1.153 -        if ( !(entry_get_flags(sle) & _PAGE_PRESENT) ) {
   1.154 -            if ( create_l1_shadow ) {
   1.155 -                perfc_incrc(shadow_set_l3e_force_map);
   1.156 -                shadow_map_into_current(v, va, i-1, i);
   1.157 -                __rw_entry(v, va, &sle, SHADOW_ENTRY | GET_ENTRY | i);
   1.158 -            } else {
   1.159 -#if 0
   1.160 -                printk("For non VMX shadow, create_l1_shadow:%d\n", create_l1_shadow);
   1.161 -#endif
   1.162 -            }
   1.163 -        }
   1.164 -        if( i < PAGING_L4 )
   1.165 -            shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
   1.166 -        sle_up = sle;
   1.167 -    }
   1.168 -
   1.169 -    if ( d->arch.ops->guest_paging_levels == PAGING_L2 ) {
   1.170 -        va = orig_va;
   1.171 -    }
   1.172 -
   1.173 -    if ( shadow_mode_refcounts(d) )
   1.174 -    {
   1.175 -        __shadow_get_l1e(v, va, &old_spte);
   1.176 -        ESH_LOG("old_sl1e: %lx, new_sl1e: %lx\n", l1e_get_intpte(old_spte), l1e_get_intpte(sl1e));
   1.177 -        if ( l1e_has_changed(old_spte, sl1e, _PAGE_RW | _PAGE_PRESENT) )
   1.178 -            {
   1.179 -                if ( (l1e_get_flags(sl1e) & _PAGE_PRESENT) &&
   1.180 -                     !shadow_get_page_from_l1e(sl1e, d) )
   1.181 -                    sl1e = l1e_empty();
   1.182 -                if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
   1.183 -                    put_page_from_l1e(old_spte, d);
   1.184 -            }
   1.185 -    }
   1.186 -
   1.187 -    __shadow_set_l1e(v, va, &sl1e);
   1.188 -
   1.189 -    shadow_update_min_max(entry_get_pfn(sle_up), guest_l1_table_offset(va));
   1.190 -}
   1.191 -
   1.192  /* As 32-bit guest don't support 4M page yet,
   1.193   * we don't concern double compile for this function
   1.194   */