ia64/xen-unstable

changeset 11394:2017f6e92bf8

[XEN] Fix shadow linear-mapping recopy code for SMP PAE HVM on 64bit Xen.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author tdeegan@york.uk.xensource.com
date Thu Aug 31 14:46:28 2006 +0100 (2006-08-31)
parents 1c3455182cee
children 6f41473da163
files xen/arch/x86/mm/shadow/multi.c
line diff
     1.1 --- a/xen/arch/x86/mm/shadow/multi.c	Thu Aug 31 12:07:46 2006 +0100
     1.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu Aug 31 14:46:28 2006 +0100
     1.3 @@ -2861,11 +2861,11 @@ static int sh_page_fault(struct vcpu *v,
     1.4      //      bunch of 4K maps.
     1.5      //
     1.6  
     1.7 +    shadow_lock(d);
     1.8 +
     1.9      SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u\n",
    1.10                     v->domain->domain_id, v->vcpu_id, va, regs->error_code);
    1.11      
    1.12 -    shadow_lock(d);
    1.13 -
    1.14      shadow_audit_tables(v);
    1.15                     
    1.16      if ( guest_walk_tables(v, va, &gw, 1) != 0 )
    1.17 @@ -3291,12 +3291,6 @@ sh_update_linear_entries(struct vcpu *v)
    1.18          {
    1.19              ml3e = __linear_l3_table;
    1.20              l3mfn = _mfn(l4e_get_pfn(__linear_l4_table[0]));
    1.21 -#if GUEST_PAGING_LEVELS == 2
    1.22 -            /* Shadow l3 tables are made up by update_cr3 */
    1.23 -            sl3e = v->arch.hvm_vcpu.hvm_lowmem_l3tab;
    1.24 -#else
    1.25 -            sl3e = v->arch.shadow_vtable;
    1.26 -#endif
    1.27          }
    1.28          else 
    1.29          {   
    1.30 @@ -3306,13 +3300,15 @@ sh_update_linear_entries(struct vcpu *v)
    1.31              l3mfn = _mfn(l4e_get_pfn(ml4e[0]));
    1.32              ml3e = sh_map_domain_page(l3mfn);
    1.33              sh_unmap_domain_page(ml4e);
    1.34 +        }
    1.35 +
    1.36  #if GUEST_PAGING_LEVELS == 2
    1.37 -            /* Shadow l3 tables are made up by update_cr3 */
    1.38 -            sl3e = v->arch.hvm_vcpu.hvm_lowmem_l3tab;
    1.39 +        /* Shadow l3 tables are made up by update_cr3 */
    1.40 +        sl3e = v->arch.hvm_vcpu.hvm_lowmem_l3tab;
    1.41  #else
    1.42 -            sl3e = sh_map_domain_page(pagetable_get_mfn(v->arch.shadow_table));
    1.43 +        /* Always safe to use shadow_vtable, because it's globally mapped */
    1.44 +        sl3e = v->arch.shadow_vtable;
    1.45  #endif
    1.46 -        }
    1.47  
    1.48          for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
    1.49          {
    1.50 @@ -3324,12 +3320,7 @@ sh_update_linear_entries(struct vcpu *v)
    1.51          }
    1.52  
    1.53          if ( v != current ) 
    1.54 -        {
    1.55              sh_unmap_domain_page(ml3e);
    1.56 -#if GUEST_PAGING_LEVELS != 2
    1.57 -            sh_unmap_domain_page(sl3e);
    1.58 -#endif
    1.59 -        }
    1.60      }
    1.61  
    1.62  #elif CONFIG_PAGING_LEVELS == 3
    1.63 @@ -3361,31 +3352,10 @@ sh_update_linear_entries(struct vcpu *v)
    1.64          
    1.65  #else /* GUEST_PAGING_LEVELS == 3 */
    1.66          
    1.67 -        /* Use local vcpu's mappings if we can; otherwise make new mappings */
    1.68 -        if ( v == current ) 
    1.69 -        {
    1.70 -            shadow_l3e = v->arch.shadow_vtable;
    1.71 -            if ( !shadow_mode_external(d) )
    1.72 -                guest_l3e = v->arch.guest_vtable;
    1.73 -        }
    1.74 -        else 
    1.75 -        {
    1.76 -            mfn_t smfn;
    1.77 -            int idx;
    1.78 -            
    1.79 -            /* Map the shadow l3 */
    1.80 -            smfn = pagetable_get_mfn(v->arch.shadow_table);
    1.81 -            idx = shadow_l3_index(&smfn, guest_index(v->arch.shadow_vtable));
    1.82 -            shadow_l3e = sh_map_domain_page(smfn);
    1.83 -            shadow_l3e += idx;
    1.84 -            if ( !shadow_mode_external(d) )
    1.85 -            {
    1.86 -                /* Also the guest l3 */
    1.87 -                mfn_t gmfn = pagetable_get_mfn(v->arch.guest_table); 
    1.88 -                guest_l3e = sh_map_domain_page(gmfn);
    1.89 -                guest_l3e += guest_index(v->arch.guest_vtable);
    1.90 -            }
    1.91 -        }
    1.92 +        /* Always safe to use *_vtable, because they're globally mapped */
    1.93 +        shadow_l3e = v->arch.shadow_vtable;
    1.94 +        guest_l3e = v->arch.guest_vtable;
    1.95 +
    1.96  #endif /* GUEST_PAGING_LEVELS */
    1.97          
    1.98          /* Choose where to write the entries, using linear maps if possible */
    1.99 @@ -3443,14 +3413,6 @@ sh_update_linear_entries(struct vcpu *v)
   1.100          if ( v != current || !shadow_mode_external(d) )
   1.101              sh_unmap_domain_page(l2e);
   1.102          
   1.103 -#if GUEST_PAGING_LEVELS == 3
   1.104 -        if ( v != current) 
   1.105 -        {
   1.106 -            sh_unmap_domain_page(shadow_l3e);
   1.107 -            if ( !shadow_mode_external(d) )
   1.108 -                sh_unmap_domain_page(guest_l3e);
   1.109 -        }
   1.110 -#endif
   1.111      }
   1.112  
   1.113  #elif CONFIG_PAGING_LEVELS == 2
   1.114 @@ -3601,7 +3563,7 @@ sh_detach_old_tables(struct vcpu *v)
   1.115           v->arch.shadow_vtable )
   1.116      {
   1.117          // Q: why does this need to use (un)map_domain_page_*global* ?
   1.118 -        //
   1.119 +        /* A: so sh_update_linear_entries can operate on other vcpus */
   1.120          sh_unmap_domain_page_global(v->arch.shadow_vtable);
   1.121          v->arch.shadow_vtable = NULL;
   1.122      }