#include <mini-os/os.h>
#include <mini-os/balloon.h>
+#include <mini-os/errno.h>
#include <mini-os/lib.h>
#include <mini-os/mm.h>
unsigned long virt_kernel_area_end = VIRT_KERNEL_AREA;
+static void p2m_invalidate(unsigned long *list, unsigned long start_idx)
+{
+ unsigned long idx;
+
+ for ( idx = start_idx; idx < P2M_ENTRIES; idx++ )
+ list[idx] = INVALID_P2M_ENTRY;
+}
+
+static inline unsigned long *p2m_l3list(void)
+{
+ return mfn_to_virt(HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list);
+}
+
+static inline unsigned long *p2m_to_virt(unsigned long p2m)
+{
+ return ( p2m == INVALID_P2M_ENTRY ) ? NULL : mfn_to_virt(p2m);
+}
+
void arch_remap_p2m(unsigned long max_pfn)
{
unsigned long pfn;
+ unsigned long *l3_list, *l2_list, *l1_list;
+
+ l3_list = p2m_l3list();
+ l2_list = p2m_to_virt(l3_list[L3_P2M_IDX(max_pfn - 1)]);
+ l1_list = p2m_to_virt(l2_list[L2_P2M_IDX(max_pfn - 1)]);
+
+ p2m_invalidate(l3_list, L3_P2M_IDX(max_pfn - 1) + 1);
+ p2m_invalidate(l2_list, L2_P2M_IDX(max_pfn - 1) + 1);
+ p2m_invalidate(l1_list, L1_P2M_IDX(max_pfn - 1) + 1);
if ( p2m_pages(nr_max_pages) <= p2m_pages(max_pfn) )
return;
ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
}
+int arch_expand_p2m(unsigned long max_pfn)
+{
+ unsigned long pfn;
+ unsigned long *l1_list, *l2_list, *l3_list;
+
+ p2m_chk_pfn(max_pfn - 1);
+ l3_list = p2m_l3list();
+
+ for ( pfn = (HYPERVISOR_shared_info->arch.max_pfn + P2M_MASK) & ~P2M_MASK;
+ pfn < max_pfn; pfn += P2M_ENTRIES )
+ {
+ l2_list = p2m_to_virt(l3_list[L3_P2M_IDX(pfn)]);
+ if ( !l2_list )
+ {
+ l2_list = (unsigned long*)alloc_page();
+ if ( !l2_list )
+ return -ENOMEM;
+ p2m_invalidate(l2_list, 0);
+ l3_list[L3_P2M_IDX(pfn)] = virt_to_mfn(l2_list);
+ }
+ l1_list = p2m_to_virt(l2_list[L2_P2M_IDX(pfn)]);
+ if ( !l1_list )
+ {
+ l1_list = (unsigned long*)alloc_page();
+ if ( !l1_list )
+ return -ENOMEM;
+ p2m_invalidate(l1_list, 0);
+ l2_list[L2_P2M_IDX(pfn)] = virt_to_mfn(l1_list);
+
+ if ( map_frame_rw((unsigned long)(phys_to_machine_mapping + pfn),
+ l2_list[L2_P2M_IDX(pfn)]) )
+ return -ENOMEM;
+ }
+ }
+
+ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
+
+ /* Make sure the new last page can be mapped. */
+ if ( !need_pgt((unsigned long)pfn_to_virt(max_pfn - 1)) )
+ return -ENOMEM;
+
+ return 0;
+}
+
+void arch_pfn_add(unsigned long pfn, unsigned long mfn)
+{
+ mmu_update_t mmu_updates[1];
+ pgentry_t *pgt;
+ int rc;
+
+ phys_to_machine_mapping[pfn] = mfn;
+
+ pgt = need_pgt((unsigned long)pfn_to_virt(pfn));
+ ASSERT(pgt);
+ mmu_updates[0].ptr = virt_to_mach(pgt) | MMU_NORMAL_PT_UPDATE;
+ mmu_updates[0].val = (pgentry_t)(mfn << PAGE_SHIFT) |
+ _PAGE_PRESENT | _PAGE_RW;
+ rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF);
+ if ( rc < 0 )
+ {
+ printk("ERROR: build_pagetable(): PTE could not be updated\n");
+ printk(" mmu_update failed with rc=%d\n", rc);
+ do_exit();
+ }
+}
+
#endif
#include <mini-os/os.h>
#include <mini-os/balloon.h>
+#include <mini-os/errno.h>
#include <mini-os/lib.h>
#include <xen/xen.h>
#include <xen/memory.h>
unsigned long nr_max_pages;
+unsigned long nr_mem_pages;
void get_max_pages(void)
{
virt_kernel_area_end += round_pgup((nr_max_pages + 1) >> (PAGE_SHIFT + 3));
ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
}
+
+#define N_BALLOON_FRAMES 64
+static unsigned long balloon_frames[N_BALLOON_FRAMES];
+
+int balloon_up(unsigned long n_pages)
+{
+ unsigned long page, pfn;
+ int rc;
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = 0,
+ .domid = DOMID_SELF
+ };
+
+ if ( n_pages > nr_max_pages - nr_mem_pages )
+ n_pages = nr_max_pages - nr_mem_pages;
+ if ( n_pages > N_BALLOON_FRAMES )
+ n_pages = N_BALLOON_FRAMES;
+
+ /* Resize alloc_bitmap if necessary. */
+ while ( mm_alloc_bitmap_size * 8 < nr_mem_pages + n_pages )
+ {
+ page = alloc_page();
+ if ( !page )
+ return -ENOMEM;
+
+ memset((void *)page, ~0, PAGE_SIZE);
+ if ( map_frame_rw((unsigned long)mm_alloc_bitmap + mm_alloc_bitmap_size,
+ virt_to_mfn(page)) )
+ {
+ free_page((void *)page);
+ return -ENOMEM;
+ }
+
+ mm_alloc_bitmap_size += PAGE_SIZE;
+ }
+
+ rc = arch_expand_p2m(nr_mem_pages + n_pages);
+ if ( rc )
+ return rc;
+
+ /* Get new memory from hypervisor. */
+ for ( pfn = 0; pfn < n_pages; pfn++ )
+ {
+ balloon_frames[pfn] = nr_mem_pages + pfn;
+ }
+ set_xen_guest_handle(reservation.extent_start, balloon_frames);
+ reservation.nr_extents = n_pages;
+ rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+ if ( rc <= 0 )
+ return rc;
+
+ for ( pfn = 0; pfn < rc; pfn++ )
+ {
+ arch_pfn_add(nr_mem_pages + pfn, balloon_frames[pfn]);
+ free_page(pfn_to_virt(nr_mem_pages + pfn));
+ }
+
+ nr_mem_pages += rc;
+
+ return rc;
+}