]> xenbits.xensource.com Git - people/liuw/xen.git/commitdiff
mm: introduce arch_free_heap_page()
authorAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 9 Mar 2016 15:51:50 +0000 (16:51 +0100)
committerWei Liu <wei.liu2@citrix.com>
Tue, 15 Mar 2016 16:32:35 +0000 (16:32 +0000)
common/page_alloc.c references d->arch.relmem_list, which only exists on x86.
This only compiles on ARM because page_list_del2() discards its second
argument.

Introduce a new common arch_free_heap_page() which only uses common lists in
struct domain, and allow an architecture to override this with a custom
alternative.  x86 then provides a custom arch_free_heap_page() which takes
care of managing d->arch.relmem_list.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
xen/common/page_alloc.c
xen/include/asm-x86/mm.h
xen/include/xen/mm.h

index 7179d677e7ec2ddeb39ece35e4ace770d588a285..22e8feb34c911f8875a168b7c65d6ec74839215c 100644 (file)
@@ -1837,7 +1837,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
         spin_lock_recursive(&d->page_alloc_lock);
 
         for ( i = 0; i < (1 << order); i++ )
-            page_list_del2(&pg[i], &d->xenpage_list, &d->arch.relmem_list);
+            arch_free_heap_page(d, &pg[i]);
 
         d->xenheap_pages -= 1 << order;
         drop_dom_ref = (d->xenheap_pages == 0);
@@ -1856,7 +1856,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
             for ( i = 0; i < (1 << order); i++ )
             {
                 BUG_ON((pg[i].u.inuse.type_info & PGT_count_mask) != 0);
-                page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
+                arch_free_heap_page(d, &pg[i]);
             }
 
             drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
index 23a40922202b8e0e74d03d763e756184629fa20b..b25942b1804f0006736997e69caa011133e84410 100644 (file)
@@ -590,4 +590,9 @@ typedef struct mm_rwlock {
     const char        *locker_function; /* func that took it */
 } mm_rwlock_t;
 
+#define arch_free_heap_page(d, pg)                                      \
+    page_list_del2(pg, is_xen_heap_page(pg) ?                           \
+                       &(d)->xenpage_list : &(d)->page_list,            \
+                   &(d)->arch.relmem_list)
+
 #endif /* __ASM_X86_MM_H__ */
index a795dd6001eff7c5dd942bbaf153e3efa5202318..e76de550bfc1e744204dcc6302ba5581fa68aa19 100644 (file)
@@ -443,6 +443,12 @@ static inline unsigned int get_order_from_pages(unsigned long nr_pages)
 
 void scrub_one_page(struct page_info *);
 
+#ifndef arch_free_heap_page
+#define arch_free_heap_page(d, pg)                      \
+    page_list_del(pg, is_xen_heap_page(pg) ?            \
+                      &(d)->xenpage_list : &(d)->page_list)
+#endif
+
 int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
                               domid_t foreign_domid,
                               unsigned long idx, xen_pfn_t gpfn);