ia64/xen-unstable
changeset 15826:7e79e7f01f3d
Implement ia64 continuable domain destroy.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | kfraser@localhost.localdomain |
---|---|
date | Fri Aug 31 15:46:37 2007 +0100 (2007-08-31) |
parents | bd59dd48e208 |
children | 230000d3ef32 |
files | xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/mm.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/mm.h |
line diff
1.1 --- a/xen/arch/ia64/xen/domain.c Fri Aug 31 15:44:38 2007 +0100 1.2 +++ b/xen/arch/ia64/xen/domain.c Fri Aug 31 15:46:37 2007 +0100 1.3 @@ -563,6 +563,7 @@ int arch_domain_create(struct domain *d) 1.4 goto fail_nomem; 1.5 1.6 memset(&d->arch.mm, 0, sizeof(d->arch.mm)); 1.7 + d->arch.mm_teardown_offset = 0; 1.8 1.9 if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL) 1.10 goto fail_nomem; 1.11 @@ -938,12 +939,15 @@ static void relinquish_memory(struct dom 1.12 1.13 int domain_relinquish_resources(struct domain *d) 1.14 { 1.15 + int ret; 1.16 /* Relinquish guest resources for VT-i domain. */ 1.17 if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0])) 1.18 vmx_relinquish_guest_resources(d); 1.19 1.20 /* Tear down shadow mode stuff. */ 1.21 - mm_teardown(d); 1.22 + ret = mm_teardown(d); 1.23 + if (ret != 0) 1.24 + return ret; 1.25 1.26 /* Relinquish every page of memory. */ 1.27 relinquish_memory(d, &d->xenpage_list);
2.1 --- a/xen/arch/ia64/xen/mm.c Fri Aug 31 15:44:38 2007 +0100 2.2 +++ b/xen/arch/ia64/xen/mm.c Fri Aug 31 15:46:37 2007 +0100 2.3 @@ -215,6 +215,18 @@ alloc_dom_xen_and_dom_io(void) 2.4 BUG_ON(dom_io == NULL); 2.5 } 2.6 2.7 +static int 2.8 +mm_teardown_can_skip(struct domain* d, unsigned long offset) 2.9 +{ 2.10 + return d->arch.mm_teardown_offset > offset; 2.11 +} 2.12 + 2.13 +static void 2.14 +mm_teardown_update_offset(struct domain* d, unsigned long offset) 2.15 +{ 2.16 + d->arch.mm_teardown_offset = offset; 2.17 +} 2.18 + 2.19 static void 2.20 mm_teardown_pte(struct domain* d, volatile pte_t* pte, unsigned long offset) 2.21 { 2.22 @@ -252,46 +264,73 @@ mm_teardown_pte(struct domain* d, volati 2.23 } 2.24 } 2.25 2.26 -static void 2.27 +static int 2.28 mm_teardown_pmd(struct domain* d, volatile pmd_t* pmd, unsigned long offset) 2.29 { 2.30 unsigned long i; 2.31 volatile pte_t* pte = pte_offset_map(pmd, offset); 2.32 2.33 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 2.34 - if (!pte_present(*pte)) // acquire semantics 2.35 + unsigned long cur_offset = offset + (i << PAGE_SHIFT); 2.36 + if (mm_teardown_can_skip(d, cur_offset + PAGE_SIZE)) 2.37 + continue; 2.38 + if (!pte_present(*pte)) { // acquire semantics 2.39 + mm_teardown_update_offset(d, cur_offset); 2.40 continue; 2.41 - mm_teardown_pte(d, pte, offset + (i << PAGE_SHIFT)); 2.42 + } 2.43 + mm_teardown_update_offset(d, cur_offset); 2.44 + mm_teardown_pte(d, pte, cur_offset); 2.45 + if (hypercall_preempt_check()) 2.46 + return -EAGAIN; 2.47 } 2.48 + return 0; 2.49 } 2.50 2.51 -static void 2.52 +static int 2.53 mm_teardown_pud(struct domain* d, volatile pud_t *pud, unsigned long offset) 2.54 { 2.55 unsigned long i; 2.56 volatile pmd_t *pmd = pmd_offset(pud, offset); 2.57 2.58 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 2.59 - if (!pmd_present(*pmd)) // acquire semantics 2.60 + unsigned long cur_offset = offset + (i << PMD_SHIFT); 2.61 + if (mm_teardown_can_skip(d, cur_offset + PMD_SIZE)) 2.62 + continue; 2.63 + if (!pmd_present(*pmd)) { // acquire semantics 2.64 + mm_teardown_update_offset(d, cur_offset); 2.65 continue; 2.66 - mm_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT)); 2.67 + } 2.68 + if (mm_teardown_pmd(d, pmd, cur_offset)) 2.69 + return -EAGAIN; 2.70 } 2.71 + return 0; 2.72 } 2.73 2.74 -static void 2.75 +static int 2.76 mm_teardown_pgd(struct domain* d, volatile pgd_t *pgd, unsigned long offset) 2.77 { 2.78 unsigned long i; 2.79 volatile pud_t *pud = pud_offset(pgd, offset); 2.80 2.81 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 2.82 - if (!pud_present(*pud)) // acquire semantics 2.83 + unsigned long cur_offset = offset + (i << PUD_SHIFT); 2.84 +#ifndef __PAGETABLE_PUD_FOLDED 2.85 + if (mm_teardown_can_skip(d, cur_offset + PUD_SIZE)) 2.86 continue; 2.87 - mm_teardown_pud(d, pud, offset + (i << PUD_SHIFT)); 2.88 +#endif 2.89 + if (!pud_present(*pud)) { // acquire semantics 2.90 +#ifndef __PAGETABLE_PUD_FOLDED 2.91 + mm_teardown_update_offset(d, cur_offset); 2.92 +#endif 2.93 + continue; 2.94 + } 2.95 + if (mm_teardown_pud(d, pud, cur_offset)) 2.96 + return -EAGAIN; 2.97 } 2.98 + return 0; 2.99 } 2.100 2.101 -void 2.102 +int 2.103 mm_teardown(struct domain* d) 2.104 { 2.105 struct mm_struct* mm = &d->arch.mm;
3.1 --- a/xen/include/asm-ia64/domain.h Fri Aug 31 15:44:38 2007 +0100 3.2 +++ b/xen/include/asm-ia64/domain.h Fri Aug 31 15:46:37 2007 +0100 3.3 @@ -171,6 +171,9 @@ struct arch_domain { 3.4 #ifdef CONFIG_XEN_IA64_TLB_TRACK 3.5 struct tlb_track* tlb_track; 3.6 #endif 3.7 + 3.8 + /* for domctl_destroy_domain continuation */ 3.9 + unsigned long mm_teardown_offset; 3.10 }; 3.11 #define INT_ENABLE_OFFSET(v) \ 3.12 (sizeof(vcpu_info_t) * (v)->vcpu_id + \
4.1 --- a/xen/include/asm-ia64/mm.h Fri Aug 31 15:44:38 2007 +0100 4.2 +++ b/xen/include/asm-ia64/mm.h Fri Aug 31 15:46:37 2007 +0100 4.3 @@ -417,7 +417,7 @@ extern unsigned long totalram_pages; 4.4 extern int nr_swap_pages; 4.5 4.6 extern void alloc_dom_xen_and_dom_io(void); 4.7 -extern void mm_teardown(struct domain* d); 4.8 +extern int mm_teardown(struct domain* d); 4.9 extern void mm_final_teardown(struct domain* d); 4.10 extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr); 4.11 extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);