#endif /* CONFIG_UKPLAT_MEMRNAME */
} __packed __align(__SIZEOF_LONG__);
+/**
+ * Check whether the memory region descriptor overlaps with [pstart, pend) in
+ * the physical address space.
+ *
+ * @param mrd
+ * Pointer to the memory region descriptor to check against
+ * @param pstart
+ * Start of the physical memory region
+ * @param pend
+ * End of the physical memory region
+ * @return
+ * Zero if the two specified regions have no overlap, a non-zero value
+ * otherwise
+ */
+static inline int
+ukplat_memregion_desc_overlap(const struct ukplat_memregion_desc *mrd,
+ __paddr_t pstart, __paddr_t pend)
+{
+ return RANGE_OVERLAP(mrd->pbase, mrd->len, pstart, pend - pstart);
+}
+
/**
* Returns the number of available memory regions
*/
return (int)i;
}
+/**
+ * Insert a new region into the memory region list. This extends
+ * ukplat_memregion_list_insert to carve out the area of any pre-existing
+ * overlapping regions. The virtual addresses will leave out any carved out
+ * region.
+ * If there is not enough space for all resulting regions, the function will
+ * insert as many as possible and then return an error.
+ * @param list
+ * The memory region list to insert the range into
+ * @param mrd
+ * The memory range to insert
+ * @param min_size
+ * The minimum size an inserted region has to have. Regions smaller than this
+ * size won't be added. Setting this parameter to zero will disable this
+ * behavior.
+ * @returns
+ * Zero if the operation was successful, a negative errno otherwise
+ */
+static inline int
+ukplat_memregion_list_insert_split_phys(struct ukplat_memregion_list *list,
+ const struct ukplat_memregion_desc *mrd,
+ const __sz min_size)
+{
+ struct ukplat_memregion_desc *mrdp;
+ struct ukplat_memregion_desc mrdc;
+ __paddr_t pstart, pend;
+ __vaddr_t voffset;
+ int i;
+ int rc;
+
+ voffset = mrd->vbase - mrd->pbase;
+ pstart = mrd->pbase;
+ pend = mrd->pbase + mrd->len;
+
+ mrdc = *mrd;
+
+ /* TODO: The following code does not make use of the tracked iteration
+ * index to insert elements at the correct location and instead uses the
+ * generic insertion routine. For large memory region lists this could
+ * be potentially slow.
+ */
+ for (i = 0; i < (int)list->count; i++) {
+ mrdp = &list->mrds[i];
+ if (!ukplat_memregion_desc_overlap(mrdp, pstart, pend))
+ continue;
+ if (pend <= mrdp->pbase)
+ break;
+
+ if (!mrdp->type)
+ continue;
+
+ if (pstart < mrdp->pbase) {
+ /* Some part of the inserted region is before the
+ * overlapping region. Try to insert that part if it's
+ * large enough.
+ */
+ mrdc.pbase = pstart;
+ mrdc.vbase = pstart + voffset;
+ mrdc.len = mrdp->pbase - pstart;
+
+ if (mrdc.len >= min_size) {
+ rc = ukplat_memregion_list_insert(list, &mrdc);
+ if (unlikely(rc < 0))
+ return rc;
+ }
+ }
+
+ pstart = mrdp->pbase + mrdp->len;
+ }
+
+ if (pend - pstart < min_size)
+ return 0;
+
+ mrdc.pbase = pstart;
+ mrdc.vbase = pstart + voffset;
+ mrdc.len = pend - pstart;
+
+ /* Add the remaining region */
+ rc = ukplat_memregion_list_insert(list, &mrdc);
+ return rc < 0 ? rc : 0;
+}
+
/**
* Delete the specified region from the memory list.
*
void _ukplat_entry(struct lcpu *lcpu, struct ukplat_bootinfo *bi);
-static inline int mrd_overlap(__paddr_t pstart, __paddr_t pend,
- const struct ukplat_memregion_desc *mrd)
-{
- return ((pend > mrd->pbase) && (pstart < mrd->pbase + mrd->len));
-}
-
static inline void mrd_insert(struct ukplat_bootinfo *bi,
const struct ukplat_memregion_desc *mrd)
{
{
struct ukplat_bootinfo *bi;
struct ukplat_memregion_desc mrd = {0};
- struct ukplat_memregion_desc *mrdp;
multiboot_memory_map_t *m;
multiboot_module_t *mods;
__sz offset, cmdline_len;
__paddr_t start, end;
__u32 i;
+ int rc;
bi = ukplat_bootinfo_get();
if (unlikely(!bi))
if (end <= start)
continue;
+ mrd.pbase = start;
+ mrd.vbase = start; /* 1:1 mapping */
+ mrd.len = end - start;
+
if (m->type == MULTIBOOT_MEMORY_AVAILABLE) {
mrd.type = UKPLAT_MEMRT_FREE;
mrd.flags = UKPLAT_MEMRF_READ |
UKPLAT_MEMRF_WRITE;
- for (i = 0; i < bi->mrds.count; i++) {
- ukplat_memregion_get(i, &mrdp);
- if (!mrd_overlap(start, end, mrdp))
- continue;
-
- if (!mrdp->type)
- continue;
-
- if (start < mrdp->pbase) {
- mrd.pbase = start;
- mrd.vbase = start; /* 1:1 map */
- mrd.len = mrdp->pbase - start;
-
- if (mrd.len >= PAGE_SIZE)
- mrd_insert(bi, &mrd);
- }
-
- start = mrdp->pbase + mrdp->len;
- }
-
- if (end - start < PAGE_SIZE)
- continue;
+ rc = ukplat_memregion_list_insert_split_phys(
+ &bi->mrds, &mrd, __PAGE_SIZE);
+ if (unlikely(rc < 0))
+ multiboot_crash("Unable to add region",
+ rc);
} else {
mrd.type = UKPLAT_MEMRT_RESERVED;
mrd.flags = UKPLAT_MEMRF_READ |
/* We assume that reserved regions cannot
* overlap with loaded modules.
*/
+ mrd_insert(bi, &mrd);
}
-
- mrd.pbase = start;
- mrd.vbase = start; /* 1:1 mapping */
- mrd.len = end - start;
-
- mrd_insert(bi, &mrd);
}
}