ia64/xen-unstable
changeset 1941:a3385e3413dc
bitkeeper revision 1.1108.28.1 (4106e7efzfLYJJxhDUfFLjrg1-JKEw)
Finish merge of old domain allocation code with tehe buddy-allocator system.
Now just needs exporting to priv guest OSes so they can allocate multi-page
physmem chunks. Then to fix Linux's pci_alloc_consistent().
Finish merge of old domain allocation code with tehe buddy-allocator system.
Now just needs exporting to priv guest OSes so they can allocate multi-page
physmem chunks. Then to fix Linux's pci_alloc_consistent().
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Tue Jul 27 23:40:31 2004 +0000 (2004-07-27) |
parents | 1e13ae874e9c |
children | d3c2a4bdc6b4 |
files | xen/arch/x86/domain.c xen/arch/x86/shadow.c xen/common/dom_mem_ops.c xen/common/domain.c xen/common/page_alloc.c xen/include/asm-x86/mm.h xen/include/xen/mm.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Tue Jul 27 19:37:43 2004 +0000 1.2 +++ b/xen/arch/x86/domain.c Tue Jul 27 23:40:31 2004 +0000 1.3 @@ -476,7 +476,7 @@ void domain_relinquish_memory(struct dom 1.4 put_page(page); 1.5 1.6 /* Relinquish all pages on the domain's allocation list. */ 1.7 - spin_lock_recursive(&d->page_alloc_lock); /* may enter free_domain_page */ 1.8 + spin_lock_recursive(&d->page_alloc_lock); /* may enter free_domheap_page */ 1.9 list_for_each_safe ( ent, tmp, &d->page_list ) 1.10 { 1.11 page = list_entry(ent, struct pfn_info, list);
2.1 --- a/xen/arch/x86/shadow.c Tue Jul 27 19:37:43 2004 +0000 2.2 +++ b/xen/arch/x86/shadow.c Tue Jul 27 23:40:31 2004 +0000 2.3 @@ -512,7 +512,7 @@ int shadow_mode_control(struct domain *d 2.4 static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m) 2.5 { 2.6 m->shadow_page_count++; 2.7 - return alloc_domheap_page(); 2.8 + return alloc_domheap_page(NULL); 2.9 } 2.10 2.11 void unshadow_table( unsigned long gpfn, unsigned int type )
3.1 --- a/xen/common/dom_mem_ops.c Tue Jul 27 19:37:43 2004 +0000 3.2 +++ b/xen/common/dom_mem_ops.c Tue Jul 27 23:40:31 2004 +0000 3.3 @@ -24,7 +24,7 @@ static long alloc_dom_mem(struct domain 3.4 3.5 for ( i = 0; i < nr_pages; i++ ) 3.6 { 3.7 - if ( unlikely((page = alloc_domain_page(d)) == NULL) ) 3.8 + if ( unlikely((page = alloc_domheap_page(d)) == NULL) ) 3.9 { 3.10 DPRINTK("Could not allocate a frame\n"); 3.11 break;
4.1 --- a/xen/common/domain.c Tue Jul 27 19:37:43 2004 +0000 4.2 +++ b/xen/common/domain.c Tue Jul 27 23:40:31 2004 +0000 4.3 @@ -15,7 +15,6 @@ 4.4 #include <xen/console.h> 4.5 #include <asm/shadow.h> 4.6 #include <hypervisor-ifs/dom0_ops.h> 4.7 -#include <asm/hardirq.h> 4.8 #include <asm/domain_page.h> 4.9 4.10 /* Both these structures are protected by the tasklist_lock. */ 4.11 @@ -194,97 +193,6 @@ void domain_shutdown(u8 reason) 4.12 __enter_scheduler(); 4.13 } 4.14 4.15 -struct pfn_info *alloc_domain_page(struct domain *d) 4.16 -{ 4.17 - struct pfn_info *page = NULL; 4.18 - unsigned long mask, pfn_stamp, cpu_stamp; 4.19 - int i; 4.20 - 4.21 - ASSERT(!in_irq()); 4.22 - 4.23 - page = alloc_domheap_page(); 4.24 - if ( unlikely(page == NULL) ) 4.25 - return NULL; 4.26 - 4.27 - if ( (mask = page->u.free.cpu_mask) != 0 ) 4.28 - { 4.29 - pfn_stamp = page->tlbflush_timestamp; 4.30 - for ( i = 0; (mask != 0) && (i < smp_num_cpus); i++ ) 4.31 - { 4.32 - if ( mask & (1<<i) ) 4.33 - { 4.34 - cpu_stamp = tlbflush_time[i]; 4.35 - if ( !NEED_FLUSH(cpu_stamp, pfn_stamp) ) 4.36 - mask &= ~(1<<i); 4.37 - } 4.38 - } 4.39 - 4.40 - if ( unlikely(mask != 0) ) 4.41 - { 4.42 - flush_tlb_mask(mask); 4.43 - perfc_incrc(need_flush_tlb_flush); 4.44 - } 4.45 - } 4.46 - 4.47 - page->u.inuse.domain = d; 4.48 - page->u.inuse.type_info = 0; 4.49 - if ( d != NULL ) 4.50 - { 4.51 - wmb(); /* Domain pointer must be visible before updating refcnt. */ 4.52 - spin_lock(&d->page_alloc_lock); 4.53 - if ( unlikely(d->tot_pages >= d->max_pages) ) 4.54 - { 4.55 - DPRINTK("Over-allocation for domain %u: %u >= %u\n", 4.56 - d->domain, d->tot_pages, d->max_pages); 4.57 - spin_unlock(&d->page_alloc_lock); 4.58 - page->u.inuse.domain = NULL; 4.59 - goto free_and_exit; 4.60 - } 4.61 - list_add_tail(&page->list, &d->page_list); 4.62 - page->u.inuse.count_info = PGC_allocated | 1; 4.63 - if ( unlikely(d->tot_pages++ == 0) ) 4.64 - get_domain(d); 4.65 - spin_unlock(&d->page_alloc_lock); 4.66 - } 4.67 - 4.68 - return page; 4.69 - 4.70 - free_and_exit: 4.71 - free_domheap_page(page); 4.72 - return NULL; 4.73 -} 4.74 - 4.75 -void free_domain_page(struct pfn_info *page) 4.76 -{ 4.77 - int drop_dom_ref; 4.78 - struct domain *d = page->u.inuse.domain; 4.79 - 4.80 - if ( unlikely(IS_XEN_HEAP_FRAME(page)) ) 4.81 - { 4.82 - spin_lock_recursive(&d->page_alloc_lock); 4.83 - drop_dom_ref = (--d->xenheap_pages == 0); 4.84 - spin_unlock_recursive(&d->page_alloc_lock); 4.85 - } 4.86 - else 4.87 - { 4.88 - page->tlbflush_timestamp = tlbflush_clock; 4.89 - page->u.free.cpu_mask = 1 << d->processor; 4.90 - 4.91 - /* NB. May recursively lock from domain_relinquish_memory(). */ 4.92 - spin_lock_recursive(&d->page_alloc_lock); 4.93 - list_del(&page->list); 4.94 - drop_dom_ref = (--d->tot_pages == 0); 4.95 - spin_unlock_recursive(&d->page_alloc_lock); 4.96 - 4.97 - page->u.inuse.count_info = 0; 4.98 - 4.99 - free_domheap_page(page); 4.100 - } 4.101 - 4.102 - if ( drop_dom_ref ) 4.103 - put_domain(d); 4.104 -} 4.105 - 4.106 unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes) 4.107 { 4.108 unsigned int alloc_pfns, nr_pages; 4.109 @@ -296,7 +204,7 @@ unsigned int alloc_new_dom_mem(struct do 4.110 /* Grow the allocation if necessary. */ 4.111 for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ ) 4.112 { 4.113 - if ( unlikely((page=alloc_domain_page(d)) == NULL) ) 4.114 + if ( unlikely((page = alloc_domheap_page(d)) == NULL) ) 4.115 { 4.116 domain_relinquish_memory(d); 4.117 return -ENOMEM;
5.1 --- a/xen/common/page_alloc.c Tue Jul 27 19:37:43 2004 +0000 5.2 +++ b/xen/common/page_alloc.c Tue Jul 27 23:40:31 2004 +0000 5.3 @@ -27,6 +27,7 @@ 5.4 #include <asm/page.h> 5.5 #include <xen/spinlock.h> 5.6 #include <xen/slab.h> 5.7 +#include <xen/irq.h> 5.8 5.9 5.10 /********************* 5.11 @@ -198,6 +199,9 @@ struct pfn_info *alloc_heap_pages(int zo 5.12 struct pfn_info *pg; 5.13 unsigned long flags; 5.14 5.15 + if ( unlikely(order < MIN_ORDER) || unlikely(order > MAX_ORDER) ) 5.16 + return NULL; 5.17 + 5.18 spin_lock_irqsave(&heap_lock, flags); 5.19 5.20 /* Find smallest order which can satisfy the request. */ 5.21 @@ -331,18 +335,116 @@ void init_domheap_pages(unsigned long ps 5.22 init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT); 5.23 } 5.24 5.25 -struct pfn_info *alloc_domheap_pages(int order) 5.26 +struct pfn_info *alloc_domheap_pages(struct domain *d, int order) 5.27 { 5.28 - struct pfn_info *pg = alloc_heap_pages(MEMZONE_DOM, order); 5.29 + struct pfn_info *pg; 5.30 + unsigned long mask, flushed_mask, pfn_stamp, cpu_stamp; 5.31 + int i; 5.32 + 5.33 + ASSERT(!in_irq()); 5.34 + 5.35 + if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) ) 5.36 + return NULL; 5.37 + 5.38 + flushed_mask = 0; 5.39 + for ( i = 0; i < (1 << order); i++ ) 5.40 + { 5.41 + pg[i].u.inuse.domain = NULL; 5.42 + pg[i].u.inuse.type_info = 0; 5.43 + 5.44 + if ( (mask = (pg[i].u.free.cpu_mask & ~flushed_mask)) != 0 ) 5.45 + { 5.46 + pfn_stamp = pg[i].tlbflush_timestamp; 5.47 + for ( i = 0; (mask != 0) && (i < smp_num_cpus); i++ ) 5.48 + { 5.49 + if ( mask & (1<<i) ) 5.50 + { 5.51 + cpu_stamp = tlbflush_time[i]; 5.52 + if ( !NEED_FLUSH(cpu_stamp, pfn_stamp) ) 5.53 + mask &= ~(1<<i); 5.54 + } 5.55 + } 5.56 + 5.57 + if ( unlikely(mask != 0) ) 5.58 + { 5.59 + flush_tlb_mask(mask); 5.60 + perfc_incrc(need_flush_tlb_flush); 5.61 + flushed_mask |= mask; 5.62 + } 5.63 + } 5.64 + } 5.65 + 5.66 + if ( d == NULL ) 5.67 + return pg; 5.68 + 5.69 + spin_lock(&d->page_alloc_lock); 5.70 + 5.71 + if ( unlikely((d->tot_pages + (1 << order)) > d->max_pages) ) 5.72 + { 5.73 + DPRINTK("Over-allocation for domain %u: %u > %u\n", 5.74 + d->domain, d->tot_pages + (1 << order), d->max_pages); 5.75 + spin_unlock(&d->page_alloc_lock); 5.76 + free_heap_pages(MEMZONE_DOM, pg, order); 5.77 + return NULL; 5.78 + } 5.79 + 5.80 + if ( unlikely(d->tot_pages == 0) ) 5.81 + get_domain(d); 5.82 + 5.83 + d->tot_pages += 1 << order; 5.84 + 5.85 + for ( i = 0; i < (1 << order); i++ ) 5.86 + { 5.87 + pg[i].u.inuse.domain = d; 5.88 + wmb(); /* Domain pointer must be visible before updating refcnt. */ 5.89 + pg->u.inuse.count_info = PGC_allocated | 1; 5.90 + list_add_tail(&pg->list, &d->page_list); 5.91 + } 5.92 + 5.93 + spin_unlock(&d->page_alloc_lock); 5.94 + 5.95 return pg; 5.96 } 5.97 5.98 void free_domheap_pages(struct pfn_info *pg, int order) 5.99 { 5.100 - free_heap_pages(MEMZONE_DOM, pg, order); 5.101 + int i, drop_dom_ref; 5.102 + struct domain *d = pg->u.inuse.domain; 5.103 + 5.104 + if ( unlikely(IS_XEN_HEAP_FRAME(pg)) ) 5.105 + { 5.106 + spin_lock_recursive(&d->page_alloc_lock); 5.107 + d->xenheap_pages -= 1 << order; 5.108 + drop_dom_ref = (d->xenheap_pages == 0); 5.109 + spin_unlock_recursive(&d->page_alloc_lock); 5.110 + } 5.111 + else 5.112 + { 5.113 + /* NB. May recursively lock from domain_relinquish_memory(). */ 5.114 + spin_lock_recursive(&d->page_alloc_lock); 5.115 + 5.116 + for ( i = 0; i < (1 << order); i++ ) 5.117 + { 5.118 + pg[i].tlbflush_timestamp = tlbflush_clock; 5.119 + pg[i].u.inuse.count_info = 0; 5.120 + pg[i].u.free.cpu_mask = 1 << d->processor; 5.121 + list_del(&pg[i].list); 5.122 + } 5.123 + 5.124 + d->tot_pages -= 1 << order; 5.125 + drop_dom_ref = (d->tot_pages == 0); 5.126 + 5.127 + spin_unlock_recursive(&d->page_alloc_lock); 5.128 + 5.129 + free_heap_pages(MEMZONE_DOM, pg, order); 5.130 + } 5.131 + 5.132 + if ( drop_dom_ref ) 5.133 + put_domain(d); 5.134 } 5.135 5.136 unsigned long avail_domheap_pages(void) 5.137 { 5.138 return avail[MEMZONE_DOM]; 5.139 } 5.140 +
6.1 --- a/xen/include/asm-x86/mm.h Tue Jul 27 19:37:43 2004 +0000 6.2 +++ b/xen/include/asm-x86/mm.h Tue Jul 27 23:40:31 2004 +0000 6.3 @@ -114,9 +114,6 @@ extern unsigned long frame_table_size; 6.4 extern unsigned long max_page; 6.5 void init_frametable(void *frametable_vstart, unsigned long nr_pages); 6.6 6.7 -struct pfn_info *alloc_domain_page(struct domain *d); 6.8 -void free_domain_page(struct pfn_info *page); 6.9 - 6.10 int alloc_page_type(struct pfn_info *page, unsigned int type); 6.11 void free_page_type(struct pfn_info *page, unsigned int type); 6.12 6.13 @@ -131,7 +128,7 @@ static inline void put_page(struct pfn_i 6.14 while ( unlikely((y = cmpxchg(&page->u.inuse.count_info, x, nx)) != x) ); 6.15 6.16 if ( unlikely((nx & PGC_count_mask) == 0) ) 6.17 - free_domain_page(page); 6.18 + free_domheap_page(page); 6.19 } 6.20 6.21
7.1 --- a/xen/include/xen/mm.h Tue Jul 27 19:37:43 2004 +0000 7.2 +++ b/xen/include/xen/mm.h Tue Jul 27 23:40:31 2004 +0000 7.3 @@ -2,7 +2,8 @@ 7.4 #ifndef __XEN_MM_H__ 7.5 #define __XEN_MM_H__ 7.6 7.7 -#include <asm/mm.h> 7.8 +struct domain; 7.9 +struct pfn_info; 7.10 7.11 /* Generic allocator */ 7.12 unsigned long init_heap_allocator( 7.13 @@ -20,10 +21,12 @@ void free_xenheap_pages(unsigned long p, 7.14 7.15 /* Domain suballocator */ 7.16 void init_domheap_pages(unsigned long ps, unsigned long pe); 7.17 -struct pfn_info *alloc_domheap_pages(int order); 7.18 +struct pfn_info *alloc_domheap_pages(struct domain *d, int order); 7.19 void free_domheap_pages(struct pfn_info *pg, int order); 7.20 unsigned long avail_domheap_pages(void); 7.21 -#define alloc_domheap_page() (alloc_domheap_pages(0)) 7.22 +#define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0)) 7.23 #define free_domheap_page(_p) (free_domheap_pages(_p,0)) 7.24 7.25 +#include <asm/mm.h> 7.26 + 7.27 #endif /* __XEN_MM_H__ */