ia64/xen-unstable
changeset 9163:551f7935f79a
[IA64] domU destroy
Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author | awilliam@xenbuild.aw |
---|---|
date | Fri Mar 10 08:25:54 2006 -0700 (2006-03-10) |
parents | c644eb4049ab |
children | 1abf3783975d |
files | xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/regionreg.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/regionreg.h |
line diff
1.1 --- a/xen/arch/ia64/xen/domain.c Fri Mar 10 08:23:39 2006 -0700 1.2 +++ b/xen/arch/ia64/xen/domain.c Fri Mar 10 08:25:54 2006 -0700 1.3 @@ -49,6 +49,8 @@ 1.4 #include <asm/pal.h> 1.5 #include <asm/vhpt.h> 1.6 #include <public/hvm/ioreq.h> 1.7 +#include <asm/tlbflush.h> 1.8 +#include <asm/regionreg.h> 1.9 1.10 #define CONFIG_DOMAIN0_CONTIGUOUS 1.11 unsigned long dom0_start = -1L; 1.12 @@ -69,10 +71,7 @@ extern unsigned long dom_fw_setup(struct 1.13 /* FIXME: where these declarations should be there ? */ 1.14 extern void domain_pend_keyboard_interrupt(int); 1.15 extern long platform_is_hp_ski(void); 1.16 -extern unsigned long allocate_metaphysical_rr(void); 1.17 -extern int allocate_rid_range(struct domain *, unsigned long); 1.18 extern void sync_split_caches(void); 1.19 -extern void init_all_rr(struct vcpu *); 1.20 extern void serial_input_init(void); 1.21 1.22 static void init_switch_stack(struct vcpu *v); 1.23 @@ -80,9 +79,33 @@ static void init_switch_stack(struct vcp 1.24 /* this belongs in include/asm, but there doesn't seem to be a suitable place */ 1.25 void arch_domain_destroy(struct domain *d) 1.26 { 1.27 - printf("arch_domain_destroy: not implemented\n"); 1.28 - //free_page((unsigned long)d->mm.perdomain_pt); 1.29 - free_xenheap_page(d->shared_info); 1.30 + struct page *page; 1.31 + struct list_head *ent, *prev; 1.32 + 1.33 + if (d->arch.mm->pgd != NULL) 1.34 + { 1.35 + list_for_each ( ent, &d->arch.mm->pt_list ) 1.36 + { 1.37 + page = list_entry(ent, struct page, list); 1.38 + prev = ent->prev; 1.39 + list_del(ent); 1.40 + free_xenheap_page(page_to_virt(page)); 1.41 + ent = prev; 1.42 + } 1.43 + pgd_free(d->arch.mm->pgd); 1.44 + } 1.45 + if (d->arch.mm != NULL) 1.46 + xfree(d->arch.mm); 1.47 + if (d->shared_info != NULL) 1.48 + free_xenheap_page(d->shared_info); 1.49 + 1.50 + deallocate_rid_range(d); 1.51 + 1.52 + /* It is really good in this? */ 1.53 + flush_tlb_all(); 1.54 + 1.55 + /* It is really good in this? */ 1.56 + vhpt_flush(); 1.57 } 1.58 1.59 static void default_idle(void) 1.60 @@ -187,6 +210,8 @@ struct vcpu *alloc_vcpu_struct(struct do 1.61 1.62 void free_vcpu_struct(struct vcpu *v) 1.63 { 1.64 + if (v->arch.privregs != NULL) 1.65 + free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t))); 1.66 free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER); 1.67 } 1.68 1.69 @@ -239,6 +264,7 @@ int arch_domain_create(struct domain *d) 1.70 if ((d->arch.mm = xmalloc(struct mm_struct)) == NULL) 1.71 goto fail_nomem; 1.72 memset(d->arch.mm, 0, sizeof(*d->arch.mm)); 1.73 + INIT_LIST_HEAD(&d->arch.mm->pt_list); 1.74 1.75 if ((d->arch.mm->pgd = pgd_alloc(d->arch.mm)) == NULL) 1.76 goto fail_nomem; 1.77 @@ -310,10 +336,74 @@ int arch_set_info_guest(struct vcpu *v, 1.78 return 0; 1.79 } 1.80 1.81 +static void relinquish_memory(struct domain *d, struct list_head *list) 1.82 +{ 1.83 + struct list_head *ent; 1.84 + struct page *page; 1.85 +#ifndef __ia64__ 1.86 + unsigned long x, y; 1.87 +#endif 1.88 + 1.89 + /* Use a recursive lock, as we may enter 'free_domheap_page'. */ 1.90 + spin_lock_recursive(&d->page_alloc_lock); 1.91 + ent = list->next; 1.92 + while ( ent != list ) 1.93 + { 1.94 + page = list_entry(ent, struct page, list); 1.95 + /* Grab a reference to the page so it won't disappear from under us. */ 1.96 + if ( unlikely(!get_page(page, d)) ) 1.97 + { 1.98 + /* Couldn't get a reference -- someone is freeing this page. */ 1.99 + ent = ent->next; 1.100 + continue; 1.101 + } 1.102 + 1.103 + if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) 1.104 + put_page_and_type(page); 1.105 + 1.106 + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) 1.107 + put_page(page); 1.108 + 1.109 +#ifndef __ia64__ 1.110 + /* 1.111 + * Forcibly invalidate base page tables at this point to break circular 1.112 + * 'linear page table' references. This is okay because MMU structures 1.113 + * are not shared across domains and this domain is now dead. Thus base 1.114 + * tables are not in use so a non-zero count means circular reference. 1.115 + */ 1.116 + y = page->u.inuse.type_info; 1.117 + for ( ; ; ) 1.118 + { 1.119 + x = y; 1.120 + if ( likely((x & (PGT_type_mask|PGT_validated)) != 1.121 + (PGT_base_page_table|PGT_validated)) ) 1.122 + break; 1.123 + 1.124 + y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated); 1.125 + if ( likely(y == x) ) 1.126 + { 1.127 + free_page_type(page, PGT_base_page_table); 1.128 + break; 1.129 + } 1.130 + } 1.131 +#endif 1.132 + 1.133 + /* Follow the list chain and /then/ potentially free the page. */ 1.134 + ent = ent->next; 1.135 + put_page(page); 1.136 + } 1.137 + 1.138 + spin_unlock_recursive(&d->page_alloc_lock); 1.139 +} 1.140 + 1.141 void domain_relinquish_resources(struct domain *d) 1.142 { 1.143 - /* FIXME */ 1.144 - printf("domain_relinquish_resources: not implemented\n"); 1.145 + /* Relinquish every page of memory. */ 1.146 + 1.147 + /* xenheap_list is not used in ia64. */ 1.148 + BUG_ON(!list_empty(&d->xenpage_list)); 1.149 + 1.150 + relinquish_memory(d, &d->page_list); 1.151 } 1.152 1.153 // heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread() 1.154 @@ -389,7 +479,7 @@ static struct page * assign_new_domain0_ 1.155 struct page * assign_new_domain_page(struct domain *d, unsigned long mpaddr) 1.156 { 1.157 struct mm_struct *mm = d->arch.mm; 1.158 - struct page *p = (struct page *)0; 1.159 + struct page *pt, *p = (struct page *)0; 1.160 pgd_t *pgd; 1.161 pud_t *pud; 1.162 pmd_t *pmd; 1.163 @@ -401,16 +491,28 @@ struct page * assign_new_domain_page(str 1.164 } 1.165 pgd = pgd_offset(mm,mpaddr); 1.166 if (pgd_none(*pgd)) 1.167 + { 1.168 pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr)); 1.169 + pt = maddr_to_page(pgd_val(*pgd)); 1.170 + list_add_tail(&pt->list, &d->arch.mm->pt_list); 1.171 + } 1.172 1.173 pud = pud_offset(pgd, mpaddr); 1.174 if (pud_none(*pud)) 1.175 + { 1.176 pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr)); 1.177 + pt = maddr_to_page(pud_val(*pud)); 1.178 + list_add_tail(&pt->list, &d->arch.mm->pt_list); 1.179 + } 1.180 1.181 pmd = pmd_offset(pud, mpaddr); 1.182 if (pmd_none(*pmd)) 1.183 + { 1.184 pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr)); 1.185 // pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr)); 1.186 + pt = maddr_to_page(pmd_val(*pmd)); 1.187 + list_add_tail(&pt->list, &d->arch.mm->pt_list); 1.188 + } 1.189 1.190 pte = pte_offset_map(pmd, mpaddr); 1.191 if (pte_none(*pte)) { 1.192 @@ -443,6 +545,7 @@ struct page * assign_new_domain_page(str 1.193 void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr) 1.194 { 1.195 struct mm_struct *mm = d->arch.mm; 1.196 + struct page *pt; 1.197 pgd_t *pgd; 1.198 pud_t *pud; 1.199 pmd_t *pmd; 1.200 @@ -454,16 +557,28 @@ void assign_domain_page(struct domain *d 1.201 } 1.202 pgd = pgd_offset(mm,mpaddr); 1.203 if (pgd_none(*pgd)) 1.204 + { 1.205 pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr)); 1.206 + pt = maddr_to_page(pgd_val(*pgd)); 1.207 + list_add_tail(&pt->list, &d->arch.mm->pt_list); 1.208 + } 1.209 1.210 pud = pud_offset(pgd, mpaddr); 1.211 if (pud_none(*pud)) 1.212 + { 1.213 pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr)); 1.214 + pt = maddr_to_page(pud_val(*pud)); 1.215 + list_add_tail(&pt->list, &d->arch.mm->pt_list); 1.216 + } 1.217 1.218 pmd = pmd_offset(pud, mpaddr); 1.219 if (pmd_none(*pmd)) 1.220 + { 1.221 pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr)); 1.222 // pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr)); 1.223 + pt = maddr_to_page(pmd_val(*pmd)); 1.224 + list_add_tail(&pt->list, &d->arch.mm->pt_list); 1.225 + } 1.226 1.227 pte = pte_offset_map(pmd, mpaddr); 1.228 if (pte_none(*pte)) {
2.1 --- a/xen/arch/ia64/xen/regionreg.c Fri Mar 10 08:23:39 2006 -0700 2.2 +++ b/xen/arch/ia64/xen/regionreg.c Fri Mar 10 08:25:54 2006 -0700 2.3 @@ -157,7 +157,6 @@ int deallocate_rid_range(struct domain * 2.4 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS; 2.5 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS; 2.6 2.7 - return 1; // KLUDGE ALERT 2.8 // 2.9 // not all domains will have allocated RIDs (physical mode loaders for instance) 2.10 //
3.1 --- a/xen/include/asm-ia64/domain.h Fri Mar 10 08:23:39 2006 -0700 3.2 +++ b/xen/include/asm-ia64/domain.h Fri Mar 10 08:25:54 2006 -0700 3.3 @@ -102,6 +102,8 @@ struct mm_struct { 3.4 #endif 3.5 spinlock_t page_table_lock; /* Protects task page tables and mm->rss */ 3.6 3.7 + struct list_head pt_list; /* List of pagetable */ 3.8 + 3.9 struct list_head mmlist; /* List of all active mm's. These are globally strung 3.10 * together off init_mm.mmlist, and are protected 3.11 * by mmlist_lock
4.1 --- a/xen/include/asm-ia64/regionreg.h Fri Mar 10 08:23:39 2006 -0700 4.2 +++ b/xen/include/asm-ia64/regionreg.h Fri Mar 10 08:25:54 2006 -0700 4.3 @@ -64,4 +64,13 @@ vmMangleRID(unsigned long RIDVal) 4.4 // since vmMangleRID is symmetric, use it for unmangling also 4.5 #define vmUnmangleRID(x) vmMangleRID(x) 4.6 4.7 +extern unsigned long allocate_metaphysical_rr(void); 4.8 + 4.9 +struct domain; 4.10 +extern int allocate_rid_range(struct domain *d, unsigned long ridbits); 4.11 +extern int deallocate_rid_range(struct domain *d); 4.12 + 4.13 +struct vcpu; 4.14 +extern void init_all_rr(struct vcpu *v); 4.15 + 4.16 #endif /* !_REGIONREG_H_ */