ia64/xen-unstable

changeset 10799:9b7e1ea4c4d2

[HVM] Sync p2m table across all vcpus on x86_32p xen.
We found VGA acceleration can not work on SMP VMX guests on x86_32p
xen, this is caused by the way we construct p2m table today: only the 1st
l2 page table slot that maps p2m table pages is copied to none-vcpu0 vcpu
monitor page table when VMX is created. But VGA acceleration will
create some p2m table entries beyond the 1st l2 page table slot after HVM is
created, so only vcpu0 can get these p2m entries, and other vcpu can
not do VGA acceleration.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Wed Jul 26 11:34:12 2006 +0100 (2006-07-26)
parents 7137825805c7
children 14642f36a201
files xen/arch/x86/shadow_public.c
line diff
     1.1 --- a/xen/arch/x86/shadow_public.c	Wed Jul 26 11:10:26 2006 +0100
     1.2 +++ b/xen/arch/x86/shadow_public.c	Wed Jul 26 11:34:12 2006 +0100
     1.3 @@ -398,7 +398,7 @@ static void alloc_monitor_pagetable(stru
     1.4      unsigned long m2mfn, m3mfn;
     1.5      l2_pgentry_t *mpl2e;
     1.6      l3_pgentry_t *mpl3e;
     1.7 -    struct page_info *m2mfn_info, *m3mfn_info, *page;
     1.8 +    struct page_info *m2mfn_info, *m3mfn_info;
     1.9      struct domain *d = v->domain;
    1.10      int i;
    1.11  
    1.12 @@ -411,40 +411,62 @@ static void alloc_monitor_pagetable(stru
    1.13      mpl3e = (l3_pgentry_t *) map_domain_page_global(m3mfn);
    1.14      memset(mpl3e, 0, L3_PAGETABLE_ENTRIES * sizeof(l3_pgentry_t));
    1.15  
    1.16 +    v->arch.monitor_table = pagetable_from_pfn(m3mfn);
    1.17 +    v->arch.monitor_vtable = (l2_pgentry_t *) mpl3e;
    1.18 +
    1.19      m2mfn_info = alloc_domheap_page(NULL);
    1.20      ASSERT( m2mfn_info );
    1.21  
    1.22      m2mfn = page_to_mfn(m2mfn_info);
    1.23      mpl2e = (l2_pgentry_t *) map_domain_page(m2mfn);
    1.24 -    memset(mpl2e, 0, L2_PAGETABLE_ENTRIES * sizeof(l2_pgentry_t));
    1.25 +    memset(mpl2e, 0, PAGE_SIZE);
    1.26 +
    1.27 +    /* Map L2 page into L3 */
    1.28 +    mpl3e[L3_PAGETABLE_ENTRIES - 1] = l3e_from_pfn(m2mfn, _PAGE_PRESENT);
    1.29  
    1.30      memcpy(&mpl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
    1.31             &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
    1.32             L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
    1.33 -    /*
    1.34 -     * Map L2 page into L3
    1.35 -     */
    1.36 -    mpl3e[L3_PAGETABLE_ENTRIES - 1] = l3e_from_pfn(m2mfn, _PAGE_PRESENT);
    1.37 -    page = l3e_get_page(mpl3e[L3_PAGETABLE_ENTRIES - 1]);
    1.38  
    1.39      for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
    1.40          mpl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
    1.41              l2e_from_page(
    1.42 -                virt_to_page(d->arch.mm_perdomain_pt) + i, 
    1.43 +                virt_to_page(d->arch.mm_perdomain_pt) + i,
    1.44                  __PAGE_HYPERVISOR);
    1.45      for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
    1.46          mpl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
    1.47              (l3e_get_flags(mpl3e[i]) & _PAGE_PRESENT) ?
    1.48              l2e_from_pfn(l3e_get_pfn(mpl3e[i]), __PAGE_HYPERVISOR) :
    1.49              l2e_empty();
    1.50 -    for ( i = 0; i < (MACHPHYS_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
    1.51 -        mpl2e[l2_table_offset(RO_MPT_VIRT_START) + i] = l2e_empty();
    1.52 -
    1.53 -    v->arch.monitor_table = pagetable_from_pfn(m3mfn);
    1.54 -    v->arch.monitor_vtable = (l2_pgentry_t *) mpl3e;
    1.55  
    1.56      if ( v->vcpu_id == 0 )
    1.57 +    {
    1.58 +        unsigned long m1mfn;
    1.59 +        l1_pgentry_t *mpl1e;
    1.60 +        struct page_info *m1mfn_info;
    1.61 +
    1.62 +        /*
    1.63 +         * 2 l2 slots are allocated here, so that 4M for p2m table,
    1.64 +         * with this we can guarantee PCI MMIO p2m entries, especially
    1.65 +         * Cirrus VGA, can be seen by all other vcpus.
    1.66 +         */
    1.67 +        for ( i = 0; i < 2; i++ )
    1.68 +        {
    1.69 +            m1mfn_info = alloc_domheap_page(NULL);
    1.70 +            ASSERT( m1mfn_info );
    1.71 +
    1.72 +            m1mfn = page_to_mfn(m1mfn_info);
    1.73 +            mpl1e = (l1_pgentry_t *) map_domain_page(m1mfn);
    1.74 +            memset(mpl1e, 0, PAGE_SIZE);
    1.75 +            unmap_domain_page(mpl1e);
    1.76 +
    1.77 +            /* Map L1 page into L2 */
    1.78 +            mpl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
    1.79 +                l2e_from_pfn(m1mfn, __PAGE_HYPERVISOR);
    1.80 +        }
    1.81 +
    1.82          alloc_p2m_table(d);
    1.83 +    }
    1.84      else
    1.85      {
    1.86          unsigned long mfn;
    1.87 @@ -468,14 +490,9 @@ static void alloc_monitor_pagetable(stru
    1.88  
    1.89              l2tab = map_domain_page(l3e_get_pfn(l3e));
    1.90  
    1.91 -            /*
    1.92 -             * Just one l2 slot is used here, so at most 2M for p2m table:
    1.93 -             *      ((4K * 512)/sizeof(unsigned long)) * 4K = 2G
    1.94 -             * should be OK on PAE xen, since Qemu DM can only map 1.5G VMX
    1.95 -             * guest memory.
    1.96 -             */
    1.97 -            mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
    1.98 -                l2tab[l2_table_offset(RO_MPT_VIRT_START)];
    1.99 +            for ( i = 0; i < (MACHPHYS_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
   1.100 +                mpl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
   1.101 +                    l2tab[l2_table_offset(RO_MPT_VIRT_START) + i];
   1.102  
   1.103              unmap_domain_page(l2tab);
   1.104              unmap_domain_page(l3tab);