ia64/xen-unstable

changeset 12524:b39844e292f6

[IA64] fix p2m table destruction

Introduce delayed p2m table destruction

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Mon Nov 27 09:26:15 2006 -0700 (2006-11-27)
parents 0114b372dfae
children 7e7846ea4ab3
files xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/mm.c xen/include/asm-ia64/mm.h
line diff
     1.1 --- a/xen/arch/ia64/xen/domain.c	Wed Nov 22 10:13:31 2006 -0700
     1.2 +++ b/xen/arch/ia64/xen/domain.c	Mon Nov 27 09:26:15 2006 -0700
     1.3 @@ -462,11 +462,10 @@ fail_nomem1:
     1.4  
     1.5  void arch_domain_destroy(struct domain *d)
     1.6  {
     1.7 -	BUG_ON(d->arch.mm.pgd != NULL);
     1.8 +	mm_final_teardown(d);
     1.9 +
    1.10  	if (d->shared_info != NULL)
    1.11  	    free_xenheap_pages(d->shared_info, get_order_from_shift(XSI_SHIFT));
    1.12 -	if (d->arch.shadow_bitmap != NULL)
    1.13 -		xfree(d->arch.shadow_bitmap);
    1.14  
    1.15  	tlb_track_destroy(d);
    1.16  
    1.17 @@ -613,14 +612,14 @@ static void relinquish_memory(struct dom
    1.18  
    1.19  void domain_relinquish_resources(struct domain *d)
    1.20  {
    1.21 -    /* Relinquish every page of memory. */
    1.22 -
    1.23 -    // relase page traversing d->arch.mm.
    1.24 -    relinquish_mm(d);
    1.25 -
    1.26 +    /* Relinquish guest resources for VT-i domain. */
    1.27      if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0]))
    1.28  	    vmx_relinquish_guest_resources(d);
    1.29  
    1.30 +    /* Tear down shadow mode stuff. */
    1.31 +    mm_teardown(d);
    1.32 +
    1.33 +    /* Relinquish every page of memory. */
    1.34      relinquish_memory(d, &d->xenpage_list);
    1.35      relinquish_memory(d, &d->page_list);
    1.36  
     2.1 --- a/xen/arch/ia64/xen/mm.c	Wed Nov 22 10:13:31 2006 -0700
     2.2 +++ b/xen/arch/ia64/xen/mm.c	Mon Nov 27 09:26:15 2006 -0700
     2.3 @@ -249,17 +249,21 @@ try_to_clear_PGC_allocate(struct domain*
     2.4  }
     2.5  
     2.6  static void
     2.7 -relinquish_pte(struct domain* d, pte_t* pte)
     2.8 +mm_teardown_pte(struct domain* d, pte_t* pte, unsigned long offset)
     2.9  {
    2.10 -    unsigned long mfn = pte_pfn(*pte);
    2.11 +    pte_t old_pte;
    2.12 +    unsigned long mfn;
    2.13      struct page_info* page;
    2.14  
    2.15 +    old_pte = ptep_get_and_clear(&d->arch.mm, offset, pte);// acquire semantics
    2.16 +    
    2.17      // vmx domain use bit[58:56] to distinguish io region from memory.
    2.18      // see vmx_build_physmap_table() in vmx_init.c
    2.19 -    if (!pte_mem(*pte))
    2.20 +    if (!pte_mem(old_pte))
    2.21          return;
    2.22  
    2.23      // domain might map IO space or acpi table pages. check it.
    2.24 +    mfn = pte_pfn(old_pte);
    2.25      if (!mfn_valid(mfn))
    2.26          return;
    2.27      page = mfn_to_page(mfn);
    2.28 @@ -272,17 +276,17 @@ relinquish_pte(struct domain* d, pte_t* 
    2.29          return;
    2.30      }
    2.31  
    2.32 -    if (page_get_owner(page) == d) {
    2.33 +    if (pte_pgc_allocated(old_pte)) {
    2.34 +        BUG_ON(page_get_owner(page) != d);
    2.35          BUG_ON(get_gpfn_from_mfn(mfn) == INVALID_M2P_ENTRY);
    2.36          set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
    2.37 +        try_to_clear_PGC_allocate(d, page);
    2.38      }
    2.39 -
    2.40 -    try_to_clear_PGC_allocate(d, page);
    2.41      put_page(page);
    2.42  }
    2.43  
    2.44  static void
    2.45 -relinquish_pmd(struct domain* d, pmd_t* pmd, unsigned long offset)
    2.46 +mm_teardown_pmd(struct domain* d, pmd_t* pmd, unsigned long offset)
    2.47  {
    2.48      unsigned long i;
    2.49      pte_t* pte = pte_offset_map(pmd, offset);
    2.50 @@ -290,14 +294,12 @@ relinquish_pmd(struct domain* d, pmd_t* 
    2.51      for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
    2.52          if (!pte_present(*pte))
    2.53              continue;
    2.54 -
    2.55 -        relinquish_pte(d, pte);
    2.56 +        mm_teardown_pte(d, pte, offset + (i << PAGE_SHIFT));
    2.57      }
    2.58 -    pte_free_kernel(pte_offset_map(pmd, offset));
    2.59  }
    2.60  
    2.61  static void
    2.62 -relinquish_pud(struct domain* d, pud_t *pud, unsigned long offset)
    2.63 +mm_teardown_pud(struct domain* d, pud_t *pud, unsigned long offset)
    2.64  {
    2.65      unsigned long i;
    2.66      pmd_t *pmd = pmd_offset(pud, offset);
    2.67 @@ -305,14 +307,12 @@ relinquish_pud(struct domain* d, pud_t *
    2.68      for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
    2.69          if (!pmd_present(*pmd))
    2.70              continue;
    2.71 -
    2.72 -        relinquish_pmd(d, pmd, offset + (i << PMD_SHIFT));
    2.73 +        mm_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT));
    2.74      }
    2.75 -    pmd_free(pmd_offset(pud, offset));
    2.76  }
    2.77  
    2.78  static void
    2.79 -relinquish_pgd(struct domain* d, pgd_t *pgd, unsigned long offset)
    2.80 +mm_teardown_pgd(struct domain* d, pgd_t *pgd, unsigned long offset)
    2.81  {
    2.82      unsigned long i;
    2.83      pud_t *pud = pud_offset(pgd, offset);
    2.84 @@ -320,14 +320,12 @@ relinquish_pgd(struct domain* d, pgd_t *
    2.85      for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
    2.86          if (!pud_present(*pud))
    2.87              continue;
    2.88 -
    2.89 -        relinquish_pud(d, pud, offset + (i << PUD_SHIFT));
    2.90 +        mm_teardown_pud(d, pud, offset + (i << PUD_SHIFT));
    2.91      }
    2.92 -    pud_free(pud_offset(pgd, offset));
    2.93  }
    2.94  
    2.95  void
    2.96 -relinquish_mm(struct domain* d)
    2.97 +mm_teardown(struct domain* d)
    2.98  {
    2.99      struct mm_struct* mm = &d->arch.mm;
   2.100      unsigned long i;
   2.101 @@ -340,13 +338,72 @@ relinquish_mm(struct domain* d)
   2.102      for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
   2.103          if (!pgd_present(*pgd))
   2.104              continue;
   2.105 +        mm_teardown_pgd(d, pgd, i << PGDIR_SHIFT);
   2.106 +    }
   2.107 +}
   2.108  
   2.109 -        relinquish_pgd(d, pgd, i << PGDIR_SHIFT);
   2.110 +static void
   2.111 +mm_p2m_teardown_pmd(struct domain* d, pmd_t* pmd, unsigned long offset)
   2.112 +{
   2.113 +    pte_free_kernel(pte_offset_map(pmd, offset));
   2.114 +}
   2.115 +
   2.116 +static void
   2.117 +mm_p2m_teardown_pud(struct domain* d, pud_t *pud, unsigned long offset)
   2.118 +{
   2.119 +    unsigned long i;
   2.120 +    pmd_t *pmd = pmd_offset(pud, offset);
   2.121 +
   2.122 +    for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
   2.123 +        if (!pmd_present(*pmd))
   2.124 +            continue;
   2.125 +        mm_p2m_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT));
   2.126 +    }
   2.127 +    pmd_free(pmd_offset(pud, offset));
   2.128 +}
   2.129 +
   2.130 +static void
   2.131 +mm_p2m_teardown_pgd(struct domain* d, pgd_t *pgd, unsigned long offset)
   2.132 +{
   2.133 +    unsigned long i;
   2.134 +    pud_t *pud = pud_offset(pgd, offset);
   2.135 +
   2.136 +    for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
   2.137 +        if (!pud_present(*pud))
   2.138 +            continue;
   2.139 +        mm_p2m_teardown_pud(d, pud, offset + (i << PUD_SHIFT));
   2.140 +    }
   2.141 +    pud_free(pud_offset(pgd, offset));
   2.142 +}
   2.143 +
   2.144 +static void
   2.145 +mm_p2m_teardown(struct domain* d)
   2.146 +{
   2.147 +    struct mm_struct* mm = &d->arch.mm;
   2.148 +    unsigned long i;
   2.149 +    pgd_t* pgd;
   2.150 +
   2.151 +    BUG_ON(mm->pgd == NULL);
   2.152 +    pgd = pgd_offset(mm, 0);
   2.153 +    for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
   2.154 +        if (!pgd_present(*pgd))
   2.155 +            continue;
   2.156 +        mm_p2m_teardown_pgd(d, pgd, i << PGDIR_SHIFT);
   2.157      }
   2.158      pgd_free(mm->pgd);
   2.159      mm->pgd = NULL;
   2.160  }
   2.161  
   2.162 +void
   2.163 +mm_final_teardown(struct domain* d)
   2.164 +{
   2.165 +    if (d->arch.shadow_bitmap != NULL) {
   2.166 +        xfree(d->arch.shadow_bitmap);
   2.167 +        d->arch.shadow_bitmap = NULL;
   2.168 +    }
   2.169 +    mm_p2m_teardown(d);
   2.170 +}
   2.171 +
   2.172  // stolen from share_xen_page_with_guest() in xen/arch/x86/mm.c
   2.173  void
   2.174  share_xen_page_with_guest(struct page_info *page,
   2.175 @@ -400,13 +457,6 @@ gmfn_to_mfn_foreign(struct domain *d, un
   2.176  {
   2.177  	unsigned long pte;
   2.178  
   2.179 -	// This function may be called from __gnttab_copy()
   2.180 -	// during domain destruction with VNIF copy receiver.
   2.181 -	// ** FIXME: This is not SMP-safe yet about p2m table. **
   2.182 -	if (unlikely(d->arch.mm.pgd == NULL)) {
   2.183 -		BUG();
   2.184 -		return INVALID_MFN;
   2.185 -	}
   2.186  	pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT, NULL);
   2.187  	if (!pte) {
   2.188  		panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
   2.189 @@ -1308,7 +1358,7 @@ expose_p2m_page(struct domain* d, unsign
   2.190      // pte page is allocated form xen heap.(see pte_alloc_one_kernel().)
   2.191      // so that the page has NULL page owner and it's reference count
   2.192      // is useless.
   2.193 -    // see also relinquish_pte()'s page_get_owner() == NULL check.
   2.194 +    // see also mm_teardown_pte()'s page_get_owner() == NULL check.
   2.195      BUG_ON(page_get_owner(page) != NULL);
   2.196  
   2.197      return __assign_domain_page(d, mpaddr, page_to_maddr(page),
     3.1 --- a/xen/include/asm-ia64/mm.h	Wed Nov 22 10:13:31 2006 -0700
     3.2 +++ b/xen/include/asm-ia64/mm.h	Mon Nov 27 09:26:15 2006 -0700
     3.3 @@ -422,7 +422,8 @@ extern unsigned long totalram_pages;
     3.4  extern int nr_swap_pages;
     3.5  
     3.6  extern void alloc_dom_xen_and_dom_io(void);
     3.7 -extern void relinquish_mm(struct domain* d);
     3.8 +extern void mm_teardown(struct domain* d);
     3.9 +extern void mm_final_teardown(struct domain* d);
    3.10  extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
    3.11  extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
    3.12  extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);