#ifdef CONFIG_BALLOON
-unsigned long virt_kernel_area_end; /* TODO: find a virtual area */
-
void arch_pfn_add(unsigned long pfn, unsigned long mfn)
{
}
return to_virt(gnttab_table);
}
+
+unsigned long map_frame_virt(unsigned long mfn)
+{
+ return mfn_to_virt(mfn);
+}
#include <mini-os/paravirt.h>
#ifdef CONFIG_BALLOON
-
-unsigned long virt_kernel_area_end = VIRT_KERNEL_AREA;
-
#ifdef CONFIG_PARAVIRT
static void p2m_invalidate(unsigned long *list, unsigned long start_idx)
{
void arch_remap_p2m(unsigned long max_pfn)
{
- unsigned long pfn;
+ unsigned long pfn, new_p2m;
unsigned long *l3_list, *l2_list, *l1_list;
l3_list = p2m_l3list();
if ( p2m_pages(nr_max_pages) <= p2m_pages(max_pfn) )
return;
+ new_p2m = alloc_virt_kernel(p2m_pages(nr_max_pages));
for ( pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES )
{
- map_frame_rw(virt_kernel_area_end + PAGE_SIZE * (pfn / P2M_ENTRIES),
+ map_frame_rw(new_p2m + PAGE_SIZE * (pfn / P2M_ENTRIES),
virt_to_mfn(phys_to_machine_mapping + pfn));
}
- phys_to_machine_mapping = (unsigned long *)virt_kernel_area_end;
+ phys_to_machine_mapping = (unsigned long *)new_p2m;
printk("remapped p2m list to %p\n", phys_to_machine_mapping);
-
- virt_kernel_area_end += PAGE_SIZE * p2m_pages(nr_max_pages);
- ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
}
int arch_expand_p2m(unsigned long max_pfn)
pgentry_t *pt_base;
static unsigned long first_free_pfn;
static unsigned long last_free_pfn;
+static unsigned long virt_kernel_area_end = VIRT_KERNEL_AREA;
extern char stack[];
extern void page_walk(unsigned long va);
HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
return map_frames(frames, nr_grant_frames);
}
+
+unsigned long alloc_virt_kernel(unsigned n_pages)
+{
+ unsigned long addr;
+
+ addr = virt_kernel_area_end;
+ virt_kernel_area_end += PAGE_SIZE * n_pages;
+ ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
+
+ return addr;
+}
+
+unsigned long map_frame_virt(unsigned long mfn)
+{
+ unsigned long addr;
+
+ addr = alloc_virt_kernel(1);
+ if ( map_frame_rw(addr, mfn) )
+ return 0;
+
+ return addr;
+}
void mm_alloc_bitmap_remap(void)
{
- unsigned long i;
+ unsigned long i, new_bitmap;
if ( mm_alloc_bitmap_size >= ((nr_max_pages + 1) >> 3) )
return;
+ new_bitmap = alloc_virt_kernel(PFN_UP((nr_max_pages + 1) >> 3));
for ( i = 0; i < mm_alloc_bitmap_size; i += PAGE_SIZE )
{
- map_frame_rw(virt_kernel_area_end + i,
+ map_frame_rw(new_bitmap + i,
virt_to_mfn((unsigned long)(mm_alloc_bitmap) + i));
}
- mm_alloc_bitmap = (unsigned long *)virt_kernel_area_end;
- virt_kernel_area_end += round_pgup((nr_max_pages + 1) >> 3);
- ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
+ mm_alloc_bitmap = (unsigned long *)new_bitmap;
}
#define N_BALLOON_FRAMES 64
#define BALLOON_EMERGENCY_PAGES 64
extern unsigned long nr_max_pages;
-extern unsigned long virt_kernel_area_end;
extern unsigned long nr_mem_pages;
void get_max_pages(void);
unsigned long increment, domid_t id, int *err, unsigned long prot);
int unmap_frames(unsigned long va, unsigned long num_frames);
int map_frame_rw(unsigned long addr, unsigned long mfn);
+unsigned long map_frame_virt(unsigned long mfn);
#ifdef HAVE_LIBC
extern unsigned long heap, brk, heap_mapped, heap_end;
#endif
pgentry_t *need_pgt(unsigned long addr);
void arch_mm_preinit(void *p);
+unsigned long alloc_virt_kernel(unsigned n_pages);
#endif