]> xenbits.xensource.com Git - unikraft/unikraft.git/commitdiff
plat: Generalize memory initialization
authorSergiu Moga <sergiu.moga@protonmail.com>
Mon, 15 May 2023 05:44:45 +0000 (08:44 +0300)
committerUnikraft <monkey@unikraft.io>
Fri, 11 Aug 2023 10:18:45 +0000 (10:18 +0000)
Re-define `mem_init` into `ukplat_mem_init` that does the same thing,
but does not assume what regions to keep or unmap. Instead, it relies
on the platform/architecture defined unmap memory region to do this.

Signed-off-by: Sergiu Moga <sergiu.moga@protonmail.com>
Reviewed-by: Michalis Pappas <michalis@unikraft.io>
Approved-by: Razvan Deaconescu <razvand@unikraft.io>
Tested-by: Unikraft CI <monkey@unikraft.io>
GitHub-Closes: #848

include/uk/plat/memory.h
plat/common/memory.c
plat/kvm/x86/setup.c

index c1b32411eb8720a0d9eec1cc4fb37e8be80855a3..1a11a665e1c548207a5e4f19fcef20a692958585 100644 (file)
@@ -237,6 +237,21 @@ struct uk_alloc *ukplat_memallocator_get(void);
  */
 void *ukplat_memregion_alloc(__sz size, int type, __u16 flags);
 
+/**
+ * Initializes the memory mapping based on the platform or architecture defined
+ * unmapping memory region descriptor (named `bpt_unmap_mrd`). Based on this
+ * descriptor, the function surrounds the kernel image with the unmappings,
+ * adding an unmapping region before and after the kernel. Therefore,
+ * `bpt_unmap_mrd`'s range must contain the kernel image range.
+ *
+ * @param bi
+ *   Pointer to the image's `struct ukplat_bootinfo` structure.
+ *
+ * @return
+ *   0 on success, not 0 otherwise.
+ */
+int ukplat_mem_init(void);
+
 #ifdef __cplusplus
 }
 #endif
index e92c3228b8ac35d66e801df98791e7790b203e2a..3d256eb2260ea45c0e4de7aabb5a9a02559dd908 100644 (file)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-
+#include <stdbool.h>
+#include <stddef.h>
 #include <uk/plat/common/sections.h>
 #include <uk/plat/common/bootinfo.h>
 #include <uk/asm/limits.h>
 #include <uk/alloc.h>
-#include <stddef.h>
-#include <stdbool.h>
+
+extern struct ukplat_memregion_desc bpt_unmap_mrd;
 
 static struct uk_alloc *plat_allocator;
 
@@ -353,3 +354,116 @@ int ukplat_memregion_get(int i, struct ukplat_memregion_desc **mrd)
        *mrd = &bi->mrds.mrds[i];
        return 0;
 }
+
+#ifdef CONFIG_HAVE_PAGING
+#include <uk/plat/paging.h>
+
+static int ukplat_memregion_list_insert_unmaps(struct ukplat_bootinfo *bi)
+{
+       __vaddr_t unmap_start, unmap_end;
+       int rc;
+
+       if (!bpt_unmap_mrd.len)
+               return 0;
+
+       /* Be PIE aware: split the unmap memory region so that we do no unmap
+        * the Kernel image.
+        */
+       unmap_start = ALIGN_DOWN(bpt_unmap_mrd.vbase, __PAGE_SIZE);
+       unmap_end = unmap_start + ALIGN_DOWN(bpt_unmap_mrd.len, __PAGE_SIZE);
+
+       /* After Kernel image */
+       rc = ukplat_memregion_list_insert(&bi->mrds,
+                       &(struct ukplat_memregion_desc){
+                               .vbase = ALIGN_UP(__END, __PAGE_SIZE),
+                               .pbase = 0,
+                               .len   = unmap_end -
+                                        ALIGN_UP(__END, __PAGE_SIZE),
+                               .type  = 0,
+                               .flags = UKPLAT_MEMRF_UNMAP,
+                       });
+       if (unlikely(rc < 0))
+               return rc;
+
+       /* Before Kernel image */
+       return ukplat_memregion_list_insert(&bi->mrds,
+                       &(struct ukplat_memregion_desc){
+                               .vbase = unmap_start,
+                               .pbase = 0,
+                               .len   = ALIGN_DOWN(__BASE_ADDR, __PAGE_SIZE) -
+                                        unmap_start,
+                               .type  = 0,
+                               .flags = UKPLAT_MEMRF_UNMAP,
+                       });
+}
+
+int ukplat_mem_init(void)
+{
+       struct ukplat_bootinfo *bi = ukplat_bootinfo_get();
+       int rc;
+
+       UK_ASSERT(bi);
+
+       rc = ukplat_memregion_list_insert_unmaps(bi);
+       if (unlikely(rc < 0))
+               return rc;
+
+       rc = ukplat_paging_init();
+       if (unlikely(rc < 0))
+               return rc;
+
+       /* Remove the two memory regions inserted by
+        * ukplat_memregion_list_insert_unmaps(). Due to their `pbase` nature
+        * and us never adding regions starting from zero-page, they are
+        * guaranteed to be the first in the list
+        */
+       ukplat_memregion_list_delete(&bi->mrds, 0);
+       ukplat_memregion_list_delete(&bi->mrds, 0);
+
+       return 0;
+}
+#else /* CONFIG_HAVE_PAGING */
+int ukplat_mem_init(void)
+{
+       struct ukplat_bootinfo *bi = ukplat_bootinfo_get();
+       struct ukplat_memregion_desc *mrdp;
+       __vaddr_t unmap_end;
+       int i;
+
+       UK_ASSERT(bi);
+
+       unmap_end = ALIGN_DOWN(bpt_unmap_mrd.vbase + bpt_unmap_mrd.len,
+                              __PAGE_SIZE);
+       for (i = (int)bi->mrds.count - 1; i >= 0; i--) {
+               ukplat_memregion_get(i, &mrdp);
+               if (mrdp->vbase >= unmap_end) {
+                       /* Region is outside the mapped area */
+                       uk_pr_info("Memory %012lx-%012lx outside mapped area\n",
+                                  mrdp->vbase, mrdp->vbase + mrdp->len);
+
+                       if (mrdp->type == UKPLAT_MEMRT_FREE)
+                               ukplat_memregion_list_delete(&bi->mrds, i);
+               } else if (mrdp->vbase + mrdp->len > unmap_end) {
+                       /* Region overlaps with unmapped area */
+                       uk_pr_info("Memory %012lx-%012lx outside mapped area\n",
+                                  unmap_end,
+                                  mrdp->vbase + mrdp->len);
+
+                       if (mrdp->type == UKPLAT_MEMRT_FREE)
+                               mrdp->len -= (mrdp->vbase + mrdp->len) -
+                                            unmap_end;
+
+                       /* Since regions are non-overlapping and ordered, we
+                        * can stop here, as the next region would be fully
+                        * mapped anyways
+                        */
+                       break;
+               } else {
+                       /* Region is fully mapped */
+                       break;
+               }
+       }
+
+       return 0;
+}
+#endif /* !CONFIG_HAVE_PAGING */
index 0e23a06271b5b9c96bb4201e6a2241027302e61d..86d2af303a110cf7043fd0074fc0c31ce6c50a78 100644 (file)
 #include <uk/plat/common/sections.h>
 #include <uk/plat/common/bootinfo.h>
 
-#ifdef CONFIG_HAVE_PAGING
-#include <uk/plat/paging.h>
-#include <uk/falloc.h>
-#endif /* CONFIG_HAVE_PAGING */
-
-#define PLATFORM_MAX_MEM_ADDR 0x100000000 /* 4 GiB */
-
-#ifdef CONFIG_HAVE_PAGING
-static int mem_init(struct ukplat_bootinfo *bi)
-{
-       int rc;
-
-       /* TODO: Until we generate the boot page table at compile time, we
-        * manually add an untyped unmap region to the boot info to force an
-        * unmapping of the 1:1 mapping after the kernel image before mapping
-        * only the necessary parts.
-        */
-       rc = ukplat_memregion_list_insert(&bi->mrds,
-               &(struct ukplat_memregion_desc){
-                       .vbase = PAGE_ALIGN_UP(__END),
-                       .pbase = 0,
-                       .len   = PLATFORM_MAX_MEM_ADDR - PAGE_ALIGN_UP(__END),
-                       .type  = 0,
-                       .flags = UKPLAT_MEMRF_UNMAP,
-               });
-       if (unlikely(rc < 0))
-               return rc;
-
-       return ukplat_paging_init();
-}
-#else /* CONFIG_HAVE_PAGING */
-static int mem_init(struct ukplat_bootinfo *bi)
-{
-       struct ukplat_memregion_desc *mrdp;
-       int i;
-
-       /* The static boot page table maps only the first 4 GiB. Remove all
-        * free memory regions above this limit so we won't use them for the
-        * heap. Start from the tail as the memory list is ordered by address.
-        * We can stop at the first area that is completely in the mapped area.
-        */
-       for (i = (int)bi->mrds.count - 1; i >= 0; i--) {
-               ukplat_memregion_get(i, &mrdp);
-               if (mrdp->vbase >= PLATFORM_MAX_MEM_ADDR) {
-                       /* Region is outside the mapped area */
-                       uk_pr_info("Memory %012lx-%012lx outside mapped area\n",
-                                  mrdp->vbase, mrdp->vbase + mrdp->len);
-
-                       if (mrdp->type == UKPLAT_MEMRT_FREE)
-                               ukplat_memregion_list_delete(&bi->mrds, i);
-               } else if (mrdp->vbase + mrdp->len > PLATFORM_MAX_MEM_ADDR) {
-                       /* Region overlaps with unmapped area */
-                       uk_pr_info("Memory %012lx-%012lx outside mapped area\n",
-                                  PLATFORM_MAX_MEM_ADDR,
-                                  mrdp->vbase + mrdp->len);
-
-                       if (mrdp->type == UKPLAT_MEMRT_FREE)
-                               mrdp->len -= (mrdp->vbase + mrdp->len) -
-                                               PLATFORM_MAX_MEM_ADDR;
-
-                       /* Since regions are non-overlapping and ordered, we
-                        * can stop here, as the next region would be fully
-                        * mapped anyways
-                        */
-                       break;
-               } else {
-                       /* Region is fully mapped */
-                       break;
-               }
-       }
-
-       return 0;
-}
-#endif /* !CONFIG_HAVE_PAGING */
-
 static char *cmdline;
 static __sz cmdline_len;
 
@@ -179,7 +104,7 @@ void _ukplat_entry(struct lcpu *lcpu, struct ukplat_bootinfo *bi)
        bstack = (void *)((__uptr)bstack + __STACK_SIZE);
 
        /* Initialize memory */
-       rc = mem_init(bi);
+       rc = ukplat_mem_init();
        if (unlikely(rc))
                UK_CRASH("Mem init failed: %d\n", rc);