common/page_alloc.c references d->arch.relmem_list, which only exists on x86.
This only compiles on ARM because page_list_del2() discards its second
argument.
Introduce a new common arch_free_heap_page() which only uses common lists in
struct domain, and allow an architecture to override this with a custom
alternative. x86 then provides a custom arch_free_heap_page() which takes
care of managing d->arch.relmem_list.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
spin_lock_recursive(&d->page_alloc_lock);
for ( i = 0; i < (1 << order); i++ )
- page_list_del2(&pg[i], &d->xenpage_list, &d->arch.relmem_list);
+ arch_free_heap_page(d, &pg[i]);
d->xenheap_pages -= 1 << order;
drop_dom_ref = (d->xenheap_pages == 0);
for ( i = 0; i < (1 << order); i++ )
{
BUG_ON((pg[i].u.inuse.type_info & PGT_count_mask) != 0);
- page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
+ arch_free_heap_page(d, &pg[i]);
}
drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
const char *locker_function; /* func that took it */
} mm_rwlock_t;
+#define arch_free_heap_page(d, pg) \
+ page_list_del2(pg, is_xen_heap_page(pg) ? \
+ &(d)->xenpage_list : &(d)->page_list, \
+ &(d)->arch.relmem_list)
+
#endif /* __ASM_X86_MM_H__ */
void scrub_one_page(struct page_info *);
+#ifndef arch_free_heap_page
+#define arch_free_heap_page(d, pg) \
+ page_list_del(pg, is_xen_heap_page(pg) ? \
+ &(d)->xenpage_list : &(d)->page_list)
+#endif
+
int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
domid_t foreign_domid,
unsigned long idx, xen_pfn_t gpfn);