ia64/xen-unstable

changeset 8214:d234a8cfc4ca

Fixes unmodified 32-bit guest support on the x86_64 Xen.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Xiaohui Xin <xiaohui.xin@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Dec 03 17:41:16 2005 +0100 (2005-12-03)
parents 1c515c707296
children b8ba1bbba882
files xen/arch/x86/shadow.c
line diff
     1.1 --- a/xen/arch/x86/shadow.c	Sat Dec 03 13:36:22 2005 +0100
     1.2 +++ b/xen/arch/x86/shadow.c	Sat Dec 03 17:41:16 2005 +0100
     1.3 @@ -637,6 +637,11 @@ static void shadow_map_l1_into_current_l
     1.4          gpl1e = (guest_l1_pgentry_t *) map_domain_page(tmp_gmfn);
     1.5  
     1.6          /* If the PGT_l1_shadow has two continual pages */
     1.7 +#if CONFIG_PAGING_LEVELS >=3
     1.8 +        if (d->arch.ops->guest_paging_levels == PAGING_L2)
     1.9 +            __shadow_get_l2e(v,  va & ~((1<<L2_PAGETABLE_SHIFT_32) - 1), &tmp_sl2e);
    1.10 +        else
    1.11 +#endif
    1.12          __shadow_get_l2e(v, va, &tmp_sl2e);
    1.13          spl1e = (l1_pgentry_t *) map_domain_page(l2e_get_pfn(tmp_sl2e));
    1.14  
    1.15 @@ -1809,9 +1814,12 @@ static void sync_all(struct domain *d)
    1.16      }
    1.17  #endif
    1.18  
    1.19 -    need_flush |= resync_all(d, PGT_l2_shadow);
    1.20 -
    1.21  #if CONFIG_PAGING_LEVELS >= 3
    1.22 +    if (d->arch.ops->guest_paging_levels == PAGING_L2)
    1.23 +        need_flush |= resync_all(d, PGT_l4_shadow);
    1.24 +    else
    1.25 +        need_flush |= resync_all(d, PGT_l2_shadow);
    1.26 +
    1.27      if (d->arch.ops->guest_paging_levels >= PAGING_L3) 
    1.28      {
    1.29          need_flush |= resync_all(d, PGT_l3_shadow);
    1.30 @@ -2943,6 +2951,8 @@ validate_bl2e_change(
    1.31         sl2_p[sl2_idx + 1] =
    1.32              entry_from_pfn(sl1mfn + 1, entry_get_flags(sl2_p[sl2_idx]));
    1.33      }
    1.34 +    else
    1.35 +        sl2_p[sl2_idx + 1] = (pgentry_64_t){0};
    1.36      unmap_domain_page(sl2_p);
    1.37  
    1.38  }
    1.39 @@ -3528,9 +3538,9 @@ static void shadow_invlpg_64(struct vcpu
    1.40  
    1.41      __shadow_sync_va(v, va);
    1.42  
    1.43 -    if ( __shadow_get_l1e(v, va, &old_sl1e) )
    1.44 +    if ( shadow_mode_external(d) && __shadow_get_l1e(v, va, &old_sl1e) )
    1.45          if ( l1e_get_flags(old_sl1e) & _PAGE_PRESENT )
    1.46 -            shadow_put_page_from_l1e(old_sl1e, d);
    1.47 +            put_page_from_l1e(old_sl1e, d);
    1.48  
    1.49      sl1e = l1e_empty();
    1.50      __shadow_set_l1e(v, va, &sl1e);