#include <arch_mm.h>
#include <mini-os/errno.h>
#include <mini-os/hypervisor.h>
+#include <mini-os/balloon.h>
#include <libfdt.h>
#include <lib.h>
}
device_tree = new_device_tree;
*max_pfn_p = to_phys(new_device_tree) >> PAGE_SHIFT;
+
+ balloon_set_nr_pages(*max_pfn_p, *max_pfn_p);
}
void arch_init_demand_mapping_area(void)
p2m_invalidate(l2_list, L2_P2M_IDX(max_pfn - 1) + 1);
p2m_invalidate(l1_list, L1_P2M_IDX(max_pfn - 1) + 1);
- if ( p2m_pages(nr_max_pages) <= p2m_pages(max_pfn) )
+ if ( p2m_pages(nr_max_pfn) <= p2m_pages(max_pfn) )
return;
- new_p2m = alloc_virt_kernel(p2m_pages(nr_max_pages));
+ new_p2m = alloc_virt_kernel(p2m_pages(nr_max_pfn));
for ( pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES )
{
map_frame_rw(new_p2m + PAGE_SIZE * (pfn / P2M_ENTRIES),
pt_base = (pgentry_t *)si->pt_base;
first_free_pfn = PFN_UP(to_phys(pt_base)) + si->nr_pt_frames;
last_free_pfn = si->nr_pages;
+ balloon_set_nr_pages(last_free_pfn, last_free_pfn);
}
#else
#include <mini-os/desc.h>
}
last_free_pfn = e820_get_maxpfn(ret);
+ balloon_set_nr_pages(ret, last_free_pfn);
}
#endif
#include <mini-os/os.h>
#include <mini-os/balloon.h>
+#include <mini-os/e820.h>
#include <mini-os/errno.h>
#include <mini-os/lib.h>
#include <mini-os/paravirt.h>
#include <xen/xen.h>
#include <xen/memory.h>
-unsigned long nr_max_pages;
-unsigned long nr_mem_pages;
+unsigned long nr_max_pfn;
+
+static unsigned long nr_max_pages;
+static unsigned long nr_mem_pfn;
+static unsigned long nr_mem_pages;
+
+void balloon_set_nr_pages(unsigned long pages, unsigned long pfn)
+{
+ nr_mem_pages = pages;
+ nr_mem_pfn = pfn;
+}
void get_max_pages(void)
{
nr_max_pages = ret;
printk("Maximum memory size: %ld pages\n", nr_max_pages);
+
+ nr_max_pfn = e820_get_maxpfn(nr_max_pages);
}
void mm_alloc_bitmap_remap(void)
{
unsigned long i, new_bitmap;
- if ( mm_alloc_bitmap_size >= ((nr_max_pages + 1) >> 3) )
+ if ( mm_alloc_bitmap_size >= ((nr_max_pfn + 1) >> 3) )
return;
- new_bitmap = alloc_virt_kernel(PFN_UP((nr_max_pages + 1) >> 3));
+ new_bitmap = alloc_virt_kernel(PFN_UP((nr_max_pfn + 1) >> 3));
for ( i = 0; i < mm_alloc_bitmap_size; i += PAGE_SIZE )
{
map_frame_rw(new_bitmap + i,
int balloon_up(unsigned long n_pages)
{
- unsigned long page, pfn;
+ unsigned long page, pfn, start_pfn;
int rc;
struct xen_memory_reservation reservation = {
.domid = DOMID_SELF
if ( n_pages > N_BALLOON_FRAMES )
n_pages = N_BALLOON_FRAMES;
+ start_pfn = e820_get_maxpfn(nr_mem_pages + 1) - 1;
+ n_pages = e820_get_max_contig_pages(start_pfn, n_pages);
+
/* Resize alloc_bitmap if necessary. */
- while ( mm_alloc_bitmap_size * 8 < nr_mem_pages + n_pages )
+ while ( mm_alloc_bitmap_size * 8 < start_pfn + n_pages )
{
page = alloc_page();
if ( !page )
mm_alloc_bitmap_size += PAGE_SIZE;
}
- rc = arch_expand_p2m(nr_mem_pages + n_pages);
+ rc = arch_expand_p2m(start_pfn + n_pages);
if ( rc )
return rc;
/* Get new memory from hypervisor. */
for ( pfn = 0; pfn < n_pages; pfn++ )
{
- balloon_frames[pfn] = nr_mem_pages + pfn;
+ balloon_frames[pfn] = start_pfn + pfn;
}
set_xen_guest_handle(reservation.extent_start, balloon_frames);
reservation.nr_extents = n_pages;
for ( pfn = 0; pfn < rc; pfn++ )
{
- arch_pfn_add(nr_mem_pages + pfn, balloon_frames[pfn]);
+ arch_pfn_add(start_pfn + pfn, balloon_frames[pfn]);
free_page(pfn_to_virt(nr_mem_pages + pfn));
}
int i;
unsigned long pfns = 0, start = 0;
- e820_get_memmap();
+ if ( !e820_entries )
+ e820_get_memmap();
for ( i = 0; i < e820_entries; i++ )
{
return start + pfns;
}
+
+unsigned long e820_get_max_contig_pages(unsigned long pfn, unsigned long pages)
+{
+ int i;
+ unsigned long end;
+
+ for ( i = 0; i < e820_entries && e820_map[i].addr <= (pfn << PAGE_SHIFT);
+ i++ )
+ {
+ end = (e820_map[i].addr + e820_map[i].size) >> PAGE_SHIFT;
+ if ( e820_map[i].type != E820_RAM || end <= pfn )
+ continue;
+
+ return ((end - pfn) > pages) ? pages : end - pfn;
+ }
+
+ return 0;
+}
*/
#define BALLOON_EMERGENCY_PAGES 64
-extern unsigned long nr_max_pages;
-extern unsigned long nr_mem_pages;
+extern unsigned long nr_max_pfn;
void get_max_pages(void);
int balloon_up(unsigned long n_pages);
+void balloon_set_nr_pages(unsigned long pages, unsigned long pfn);
void mm_alloc_bitmap_remap(void);
void arch_pfn_add(unsigned long pfn, unsigned long mfn);
{
return needed <= nr_free_pages;
}
+static inline balloon_set_nr_pages(unsigned long pages, unsigned long pfn) { }
#endif /* CONFIG_BALLOON */
#endif /* _BALLOON_H_ */
extern unsigned e820_entries;
unsigned long e820_get_maxpfn(unsigned long pages);
+unsigned long e820_get_max_contig_pages(unsigned long pfn, unsigned long pages);
#endif /*__E820_HEADER*/
printk("MM: Init\n");
- get_max_pages();
arch_init_mm(&start_pfn, &max_pfn);
+ get_max_pages();
+
/*
* now we can initialise the page allocator
*/
arch_init_p2m(max_pfn);
arch_init_demand_mapping_area();
-
-#ifdef CONFIG_BALLOON
- nr_mem_pages = max_pfn;
-#endif
}
void fini_mm(void)