ia64/xen-unstable

changeset 5068:43d7cfe99358

bitkeeper revision 1.1498 (428f04afH2pmbU4ynzpdraamIHiD-Q)

Fix mapping of MPT for x86/64 guests. Remove a few more uses of
l?e_create_phys().
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat May 21 09:51:43 2005 +0000 (2005-05-21)
parents 6640eb3cb41d
children 5ee1dd151ac2
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri May 20 23:17:26 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Sat May 21 09:51:43 2005 +0000
     1.3 @@ -275,12 +275,12 @@ void arch_do_createdomain(struct exec_do
     1.4      d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
     1.5      memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
     1.6      d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] = 
     1.7 -        l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
     1.8 +        l2e_create_page(virt_to_page(d->arch.mm_perdomain_pt),
     1.9                          __PAGE_HYPERVISOR);
    1.10      d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
    1.11      memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
    1.12      d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 
    1.13 -        l3e_create_phys(__pa(d->arch.mm_perdomain_l2),
    1.14 +        l3e_create_page(virt_to_page(d->arch.mm_perdomain_l2),
    1.15                              __PAGE_HYPERVISOR);
    1.16  #endif
    1.17      
     2.1 --- a/xen/arch/x86/mm.c	Fri May 20 23:17:26 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Sat May 21 09:51:43 2005 +0000
     2.3 @@ -700,8 +700,9 @@ static int alloc_l2_table(struct pfn_inf
     2.4      pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
     2.5          l2e_create_pfn(pfn, __PAGE_HYPERVISOR);
     2.6      pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
     2.7 -        l2e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_pt),
     2.8 -                        __PAGE_HYPERVISOR);
     2.9 +        l2e_create_page(
    2.10 +            virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt),
    2.11 +            __PAGE_HYPERVISOR);
    2.12  #endif
    2.13  
    2.14      unmap_domain_mem(pl2e);
    2.15 @@ -770,8 +771,9 @@ static int alloc_l4_table(struct pfn_inf
    2.16      pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
    2.17          l4e_create_pfn(pfn, __PAGE_HYPERVISOR);
    2.18      pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
    2.19 -        l4e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_l3),
    2.20 -                        __PAGE_HYPERVISOR);
    2.21 +        l4e_create_page(
    2.22 +            virt_to_page(page_get_owner(page)->arch.mm_perdomain_l3),
    2.23 +            __PAGE_HYPERVISOR);
    2.24  
    2.25      return 1;
    2.26  
    2.27 @@ -2880,7 +2882,7 @@ int map_pages_to_xen(
    2.28              {
    2.29                  pl1e = page_to_virt(alloc_xen_pagetable());
    2.30                  clear_page(pl1e);
    2.31 -                *pl2e = l2e_create_phys(__pa(pl1e), __PAGE_HYPERVISOR);
    2.32 +                *pl2e = l2e_create_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
    2.33              }
    2.34              else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
    2.35              {
    2.36 @@ -2889,7 +2891,7 @@ int map_pages_to_xen(
    2.37                      pl1e[i] = l1e_create_pfn(
    2.38                          l2e_get_pfn(*pl2e) + i,
    2.39                          l2e_get_flags(*pl2e) & ~_PAGE_PSE);
    2.40 -                *pl2e = l2e_create_phys(__pa(pl1e), __PAGE_HYPERVISOR);
    2.41 +                *pl2e = l2e_create_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
    2.42                  local_flush_tlb_pge();
    2.43              }
    2.44  
     3.1 --- a/xen/arch/x86/x86_32/mm.c	Fri May 20 23:17:26 2005 +0000
     3.2 +++ b/xen/arch/x86/x86_32/mm.c	Sat May 21 09:51:43 2005 +0000
     3.3 @@ -58,13 +58,13 @@ void __init paging_init(void)
     3.4  {
     3.5      void *ioremap_pt;
     3.6      unsigned long v;
     3.7 -    struct pfn_info *pg;
     3.8 +    struct pfn_info *m2p_pg;
     3.9  
    3.10      /* Allocate and map the machine-to-phys table. */
    3.11 -    if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
    3.12 +    if ( (m2p_pg = alloc_domheap_pages(NULL, 10)) == NULL )
    3.13          panic("Not enough memory to bootstrap Xen.\n");
    3.14      idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)] =
    3.15 -        l2e_create_phys(page_to_phys(pg), __PAGE_HYPERVISOR | _PAGE_PSE);
    3.16 +        l2e_create_page(m2p_pg, __PAGE_HYPERVISOR | _PAGE_PSE);
    3.17      memset((void *)RDWR_MPT_VIRT_START, 0x55, 4UL << 20);
    3.18  
    3.19      /* Xen 4MB mappings can all be GLOBAL. */
    3.20 @@ -82,27 +82,25 @@ void __init paging_init(void)
    3.21      ioremap_pt = (void *)alloc_xenheap_page();
    3.22      clear_page(ioremap_pt);
    3.23      idle_pg_table[l2_table_offset(IOREMAP_VIRT_START)] =
    3.24 -        l2e_create_phys(__pa(ioremap_pt), __PAGE_HYPERVISOR);
    3.25 +        l2e_create_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
    3.26  
    3.27 -    /* Create read-only mapping of MPT for guest-OS use.
    3.28 +    /*
    3.29 +     * Create read-only mapping of MPT for guest-OS use.
    3.30       * NB. Remove the global bit so that shadow_mode_translate()==true domains
    3.31       *     can reused this address space for their phys-to-machine mapping.
    3.32       */
    3.33      idle_pg_table[l2_table_offset(RO_MPT_VIRT_START)] =
    3.34 -        l2e_create_pfn(
    3.35 -            l2e_get_pfn(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]),
    3.36 -            l2e_get_flags(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)])
    3.37 -            & ~(_PAGE_RW | _PAGE_GLOBAL));
    3.38 +        l2e_create_page(m2p_pg, (__PAGE_HYPERVISOR | _PAGE_PSE) & ~_PAGE_RW);
    3.39  
    3.40      /* Set up mapping cache for domain pages. */
    3.41      mapcache = (l1_pgentry_t *)alloc_xenheap_page();
    3.42      clear_page(mapcache);
    3.43      idle_pg_table[l2_table_offset(MAPCACHE_VIRT_START)] =
    3.44 -        l2e_create_phys(__pa(mapcache), __PAGE_HYPERVISOR);
    3.45 +        l2e_create_page(virt_to_page(mapcache), __PAGE_HYPERVISOR);
    3.46  
    3.47      /* Set up linear page table mapping. */
    3.48      idle_pg_table[l2_table_offset(LINEAR_PT_VIRT_START)] =
    3.49 -        l2e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
    3.50 +        l2e_create_page(virt_to_page(idle_pg_table), __PAGE_HYPERVISOR);
    3.51  }
    3.52  
    3.53  void __init zap_low_mappings(void)
     4.1 --- a/xen/arch/x86/x86_64/mm.c	Fri May 20 23:17:26 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_64/mm.c	Sat May 21 09:51:43 2005 +0000
     4.3 @@ -75,49 +75,43 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
     4.4  void __init paging_init(void)
     4.5  {
     4.6      unsigned long i;
     4.7 -    l3_pgentry_t *l3rw, *l3ro;
     4.8 +    l3_pgentry_t *l3_ro_mpt;
     4.9 +    l2_pgentry_t *l2_ro_mpt;
    4.10      struct pfn_info *pg;
    4.11  
    4.12 +    /* Create user-accessible L2 directory to map the MPT for guests. */
    4.13 +    l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page();
    4.14 +    clear_page(l3_ro_mpt);
    4.15 +    idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
    4.16 +        l4e_create_page(
    4.17 +            virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
    4.18 +    l2_ro_mpt = (l2_pgentry_t *)alloc_xenheap_page();
    4.19 +    clear_page(l2_ro_mpt);
    4.20 +    l3_ro_mpt[l3_table_offset(RO_MPT_VIRT_START)] =
    4.21 +        l3e_create_page(
    4.22 +            virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
    4.23 +    l2_ro_mpt += l2_table_offset(RO_MPT_VIRT_START);
    4.24 +
    4.25      /*
    4.26       * Allocate and map the machine-to-phys table.
    4.27 -     * This also ensures L3 is present for ioremap().
    4.28 +     * This also ensures L3 is present for fixmaps.
    4.29       */
    4.30      for ( i = 0; i < max_page; i += ((1UL << L2_PAGETABLE_SHIFT) / 8) )
    4.31      {
    4.32 -        pg = alloc_domheap_pages(
    4.33 -            NULL, L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT);
    4.34 +        pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER);
    4.35          if ( pg == NULL )
    4.36              panic("Not enough memory for m2p table\n");
    4.37          map_pages_to_xen(
    4.38              RDWR_MPT_VIRT_START + i*8, page_to_pfn(pg), 
    4.39 -            1UL << (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT),
    4.40 -            PAGE_HYPERVISOR | _PAGE_USER);
    4.41 +            1UL << PAGETABLE_ORDER,
    4.42 +            PAGE_HYPERVISOR);
    4.43          memset((void *)(RDWR_MPT_VIRT_START + i*8), 0x55,
    4.44                 1UL << L2_PAGETABLE_SHIFT);
    4.45 +        *l2_ro_mpt++ = l2e_create_page(
    4.46 +            pg, _PAGE_GLOBAL|_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
    4.47 +        BUG_ON(((unsigned long)l2_ro_mpt & ~PAGE_MASK) == 0);
    4.48      }
    4.49  
    4.50 -    /*
    4.51 -     * Above we mapped the M2P table as user-accessible and read-writable.
    4.52 -     * Fix security by denying user access at the top level of the page table.
    4.53 -     */
    4.54 -    l4e_remove_flags(&idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)],
    4.55 -                     _PAGE_USER);
    4.56 -
    4.57 -    /* Create read-only mapping of MPT for guest-OS use. */
    4.58 -    l3ro = (l3_pgentry_t *)alloc_xenheap_page();
    4.59 -    clear_page(l3ro);
    4.60 -    idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
    4.61 -        l4e_create_phys(__pa(l3ro),
    4.62 -                        (__PAGE_HYPERVISOR | _PAGE_USER) & ~_PAGE_RW);
    4.63 -
    4.64 -    /* Copy the L3 mappings from the RDWR_MPT area. */
    4.65 -    l3rw = l4e_to_l3e(
    4.66 -        idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]);
    4.67 -    l3rw += l3_table_offset(RDWR_MPT_VIRT_START);
    4.68 -    l3ro += l3_table_offset(RO_MPT_VIRT_START);
    4.69 -    memcpy(l3ro, l3rw,
    4.70 -           (RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START) >> L3_PAGETABLE_SHIFT);
    4.71 -
    4.72      /* Set up linear page table mapping. */
    4.73      idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
    4.74          l4e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);