ia64/xen-unstable

changeset 10004:666bc6079577

[IA64] xen: implemanted domain destruction again

implemented domain destruction.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Fri May 12 08:58:56 2006 -0600 (2006-05-12)
parents 8b2295822e0d
children 304b3d9011c3
files xen/arch/ia64/xen/domain.c xen/include/asm-ia64/linux-xen/asm/pgalloc.h
line diff
     1.1 --- a/xen/arch/ia64/xen/domain.c	Fri May 12 08:55:50 2006 -0600
     1.2 +++ b/xen/arch/ia64/xen/domain.c	Fri May 12 08:58:56 2006 -0600
     1.3 @@ -77,12 +77,16 @@ extern void serial_input_init(void);
     1.4  static void init_switch_stack(struct vcpu *v);
     1.5  void build_physmap_table(struct domain *d);
     1.6  
     1.7 +static void try_to_clear_PGC_allocate(struct domain* d,
     1.8 +                                      struct page_info* page);
     1.9 +
    1.10  /* this belongs in include/asm, but there doesn't seem to be a suitable place */
    1.11  void arch_domain_destroy(struct domain *d)
    1.12  {
    1.13  	struct page_info *page;
    1.14  	struct list_head *ent, *prev;
    1.15  
    1.16 +	BUG_ON(d->arch.mm->pgd != NULL);
    1.17  	if (d->arch.mm->pgd != NULL)
    1.18  	{
    1.19  		list_for_each ( ent, &d->arch.mm->pt_list )
    1.20 @@ -395,22 +399,126 @@ static void relinquish_memory(struct dom
    1.21          /* Follow the list chain and /then/ potentially free the page. */
    1.22          ent = ent->next;
    1.23  #ifdef CONFIG_XEN_IA64_DOM0_VP
    1.24 +#if 1
    1.25 +        BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);
    1.26 +#else
    1.27          //XXX this should be done at traversing the P2M table.
    1.28 -        //BUG_ON(get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
    1.29          if (page_get_owner(page) == d)
    1.30              set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
    1.31  #endif
    1.32 +#endif
    1.33          put_page(page);
    1.34      }
    1.35  
    1.36      spin_unlock_recursive(&d->page_alloc_lock);
    1.37  }
    1.38  
    1.39 +static void
    1.40 +relinquish_pte(struct domain* d, pte_t* pte)
    1.41 +{
    1.42 +    unsigned long mfn = pte_pfn(*pte);
    1.43 +    struct page_info* page;
    1.44 +
    1.45 +    // vmx domain use bit[58:56] to distinguish io region from memory.
    1.46 +    // see vmx_build_physmap_table() in vmx_init.c
    1.47 +    if (((mfn << PAGE_SHIFT) & GPFN_IO_MASK) != GPFN_MEM)
    1.48 +        return;
    1.49 +
    1.50 +    // domain might map IO space or acpi table pages. check it.
    1.51 +    if (!mfn_valid(mfn))
    1.52 +        return;
    1.53 +    page = mfn_to_page(mfn);
    1.54 +    // struct page_info corresponding to mfn may exist or not depending
    1.55 +    // on CONFIG_VIRTUAL_FRAME_TABLE.
    1.56 +    // This check is too easy.
    1.57 +    // The right way is to check whether this page is of io area or acpi pages
    1.58 +    if (page_get_owner(page) == NULL) {
    1.59 +        BUG_ON(page->count_info != 0);
    1.60 +        return;
    1.61 +    }
    1.62 +
    1.63 +#ifdef CONFIG_XEN_IA64_DOM0_VP
    1.64 +    if (page_get_owner(page) == d) {
    1.65 +        BUG_ON(get_gpfn_from_mfn(mfn) == INVALID_M2P_ENTRY);
    1.66 +        set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
    1.67 +    }
    1.68 +#endif
    1.69 +    try_to_clear_PGC_allocate(d, page);
    1.70 +    put_page(page);
    1.71 +}
    1.72 +
    1.73 +static void
    1.74 +relinquish_pmd(struct domain* d, pmd_t* pmd, unsigned long offset)
    1.75 +{
    1.76 +    unsigned long i;
    1.77 +    pte_t* pte = pte_offset_map(pmd, offset);
    1.78 +
    1.79 +    for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
    1.80 +        if (!pte_present(*pte))
    1.81 +            continue;
    1.82 +        
    1.83 +        relinquish_pte(d, pte);
    1.84 +    }
    1.85 +    pte_free_kernel(pte_offset_map(pmd, offset));
    1.86 +}
    1.87 +
    1.88 +static void
    1.89 +relinquish_pud(struct domain* d, pud_t *pud, unsigned long offset)
    1.90 +{
    1.91 +    unsigned long i;
    1.92 +    pmd_t *pmd = pmd_offset(pud, offset);
    1.93 +    
    1.94 +    for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
    1.95 +        if (!pmd_present(*pmd))
    1.96 +            continue;
    1.97 +        
    1.98 +        relinquish_pmd(d, pmd, offset + (i << PMD_SHIFT));
    1.99 +    }
   1.100 +    pmd_free(pmd_offset(pud, offset));
   1.101 +}
   1.102 +
   1.103 +static void
   1.104 +relinquish_pgd(struct domain* d, pgd_t *pgd, unsigned long offset)
   1.105 +{
   1.106 +    unsigned long i;
   1.107 +    pud_t *pud = pud_offset(pgd, offset);
   1.108 +
   1.109 +    for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
   1.110 +        if (!pud_present(*pud))
   1.111 +            continue;
   1.112 +
   1.113 +        relinquish_pud(d, pud, offset + (i << PUD_SHIFT));
   1.114 +    }
   1.115 +    pud_free(pud_offset(pgd, offset));
   1.116 +}
   1.117 +
   1.118 +static void
   1.119 +relinquish_mm(struct domain* d)
   1.120 +{
   1.121 +    struct mm_struct* mm = d->arch.mm;
   1.122 +    unsigned long i;
   1.123 +    pgd_t* pgd;
   1.124 +
   1.125 +    if (mm->pgd == NULL)
   1.126 +        return;
   1.127 +
   1.128 +    pgd = pgd_offset(mm, 0);
   1.129 +    for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
   1.130 +        if (!pgd_present(*pgd))
   1.131 +            continue;
   1.132 +
   1.133 +        relinquish_pgd(d, pgd, i << PGDIR_SHIFT);
   1.134 +    }
   1.135 +    pgd_free(mm->pgd);
   1.136 +    mm->pgd = NULL;
   1.137 +}
   1.138 +
   1.139  void domain_relinquish_resources(struct domain *d)
   1.140  {
   1.141      /* Relinquish every page of memory. */
   1.142  
   1.143 -    //XXX relase page traversing d->arch.mm.
   1.144 +    // relase page traversing d->arch.mm.
   1.145 +    relinquish_mm(d);
   1.146  
   1.147      relinquish_memory(d, &d->xenpage_list);
   1.148      relinquish_memory(d, &d->page_list);
   1.149 @@ -746,14 +854,14 @@ assign_domain_page(struct domain *d,
   1.150      struct page_info* page = mfn_to_page(physaddr >> PAGE_SHIFT);
   1.151      int ret;
   1.152  
   1.153 +    BUG_ON((physaddr & GPFN_IO_MASK) != GPFN_MEM);
   1.154      ret = get_page(page, d);
   1.155      BUG_ON(ret == 0);
   1.156      __assign_domain_page(d, mpaddr, physaddr);
   1.157  
   1.158      //XXX CONFIG_XEN_IA64_DOM0_VP
   1.159      //    TODO racy
   1.160 -    if ((physaddr & GPFN_IO_MASK) == GPFN_MEM)
   1.161 -        set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
   1.162 +    set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
   1.163  }
   1.164  
   1.165  #ifdef CONFIG_XEN_IA64_DOM0_VP
     2.1 --- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h	Fri May 12 08:55:50 2006 -0600
     2.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h	Fri May 12 08:58:56 2006 -0600
     2.3 @@ -139,12 +139,14 @@ static inline void pte_free(struct page 
     2.4  {
     2.5  	pgtable_quicklist_free(page_address(pte));
     2.6  }
     2.7 +#endif
     2.8  
     2.9  static inline void pte_free_kernel(pte_t * pte)
    2.10  {
    2.11  	pgtable_quicklist_free(pte);
    2.12  }
    2.13  
    2.14 +#ifndef XEN
    2.15  #define __pte_free_tlb(tlb, pte)	pte_free(pte)
    2.16  #endif
    2.17