direct-io.hg

changeset 7864:c640c0c7f821

The perdomain mapping area in Xen space requires more than
one level-1 pagetable page on PAE and x86/64. Generalise
the existing code to allocate and map an appropriate number
of level-1 pagetable pages.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Nov 17 12:52:42 2005 +0100 (2005-11-17)
parents 9da47130ff42
children 80e393599413 1a1c0f9b242e
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/include/asm-x86/config.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Thu Nov 17 12:12:57 2005 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Thu Nov 17 12:52:42 2005 +0100
     1.3 @@ -240,7 +240,10 @@ void free_vcpu_struct(struct vcpu *v)
     1.4  
     1.5  void free_perdomain_pt(struct domain *d)
     1.6  {
     1.7 -    free_xenheap_page(d->arch.mm_perdomain_pt);
     1.8 +    free_xenheap_pages(
     1.9 +        d->arch.mm_perdomain_pt,
    1.10 +        get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)));
    1.11 +
    1.12  #ifdef __x86_64__
    1.13      free_xenheap_page(d->arch.mm_perdomain_l2);
    1.14      free_xenheap_page(d->arch.mm_perdomain_l3);
    1.15 @@ -251,7 +254,7 @@ void arch_do_createdomain(struct vcpu *v
    1.16  {
    1.17      struct domain *d = v->domain;
    1.18      l1_pgentry_t gdt_l1e;
    1.19 -    int vcpuid;
    1.20 +    int vcpuid, pdpt_order;
    1.21  
    1.22      if ( is_idle_task(d) )
    1.23          return;
    1.24 @@ -263,13 +266,10 @@ void arch_do_createdomain(struct vcpu *v
    1.25      v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
    1.26      v->cpumap = CPUMAP_RUNANYWHERE;
    1.27      SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
    1.28 -    set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT,
    1.29 -            INVALID_M2P_ENTRY);
    1.30  
    1.31 -    d->arch.mm_perdomain_pt = alloc_xenheap_page();
    1.32 -    memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
    1.33 -    set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT,
    1.34 -            INVALID_M2P_ENTRY);
    1.35 +    pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
    1.36 +    d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
    1.37 +    memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order);
    1.38      v->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
    1.39  
    1.40      /*
    1.41 @@ -293,9 +293,11 @@ void arch_do_createdomain(struct vcpu *v
    1.42  
    1.43      d->arch.mm_perdomain_l2 = alloc_xenheap_page();
    1.44      memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
    1.45 -    d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
    1.46 -        l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt),
    1.47 -                        __PAGE_HYPERVISOR);
    1.48 +    for ( i = 0; i < (1 << pdpt_order); i++ )
    1.49 +        d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] =
    1.50 +            l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
    1.51 +                          __PAGE_HYPERVISOR);
    1.52 +
    1.53      d->arch.mm_perdomain_l3 = alloc_xenheap_page();
    1.54      memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
    1.55      d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
     2.1 --- a/xen/arch/x86/mm.c	Thu Nov 17 12:12:57 2005 +0100
     2.2 +++ b/xen/arch/x86/mm.c	Thu Nov 17 12:52:42 2005 +0100
     2.3 @@ -738,7 +738,7 @@ static int create_pae_xen_mappings(l3_pg
     2.4      memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
     2.5             &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
     2.6             L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
     2.7 -    for ( i = 0; i < (PERDOMAIN_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
     2.8 +    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
     2.9          pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
    2.10              l2e_from_page(
    2.11                  virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt) + i,
     3.1 --- a/xen/arch/x86/shadow.c	Thu Nov 17 12:12:57 2005 +0100
     3.2 +++ b/xen/arch/x86/shadow.c	Thu Nov 17 12:52:42 2005 +0100
     3.3 @@ -2710,7 +2710,7 @@ static unsigned long shadow_l3_table(
     3.4             &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
     3.5             L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));       
     3.6  
     3.7 -        for ( i = 0; i < (PERDOMAIN_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
     3.8 +        for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
     3.9              spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
    3.10                  l2e_from_page(
    3.11                      virt_to_page(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt) + i, 
     4.1 --- a/xen/include/asm-x86/config.h	Thu Nov 17 12:12:57 2005 +0100
     4.2 +++ b/xen/include/asm-x86/config.h	Thu Nov 17 12:52:42 2005 +0100
     4.3 @@ -291,6 +291,9 @@ extern unsigned long xenheap_phys_end; /
     4.4  
     4.5  #define PDPT_VCPU_SHIFT       5
     4.6  #define PDPT_VCPU_VA_SHIFT    (PDPT_VCPU_SHIFT + PAGE_SHIFT)
     4.7 +#define PDPT_L1_ENTRIES       (MAX_VIRT_CPUS << PDPT_VCPU_SHIFT)
     4.8 +#define PDPT_L2_ENTRIES       \
     4.9 +    ((PDPT_L1_ENTRIES + (1 << PAGETABLE_ORDER) - 1) >> PAGETABLE_ORDER)
    4.10  
    4.11  #if defined(__x86_64__)
    4.12  #define ELFSIZE 64