default n
help
Provide helpers for allocators defining exclusively malloc and free
- config LIBUKALLOC_IFSTATS
- bool "Statistics interface"
- default n
- help
- Provide interfaces for querying allocator status
endif
(struct uk_alloc *a, void *ptr, unsigned long num_pages);
typedef int (*uk_alloc_addmem_func_t)
(struct uk_alloc *a, void *base, size_t size);
-#if CONFIG_LIBUKALLOC_IFSTATS
-typedef ssize_t (*uk_alloc_availmem_func_t)
+typedef ssize_t (*uk_alloc_getsize_func_t)
(struct uk_alloc *a);
-#endif
struct uk_alloc {
/* memory allocation */
/* page allocation interface */
uk_alloc_palloc_func_t palloc;
uk_alloc_pfree_func_t pfree;
-#if CONFIG_LIBUKALLOC_IFSTATS
- /* optional interface */
- uk_alloc_availmem_func_t availmem;
-#endif
+ /* optional interfaces, but recommended */
+ uk_alloc_getsize_func_t maxalloc; /* biggest alloc req. (bytes) */
+ uk_alloc_getsize_func_t availmem; /* total memory available (bytes) */
/* optional interface */
uk_alloc_addmem_func_t addmem;
else
return -ENOTSUP;
}
-#if CONFIG_LIBUKALLOC_IFSTATS
+
+/* current biggest allocation request possible */
+static inline ssize_t uk_alloc_maxalloc(struct uk_alloc *a)
+{
+ UK_ASSERT(a);
+ if (!a->maxalloc)
+ return (ssize_t) -ENOTSUP;
+ return a->maxalloc(a);
+}
+
static inline ssize_t uk_alloc_availmem(struct uk_alloc *a)
{
UK_ASSERT(a);
return (ssize_t) -ENOTSUP;
return a->availmem(a);
}
-#endif /* CONFIG_LIBUKALLOC_IFSTATS */
#ifdef __cplusplus
}
* palloc() or pfree()
*/
#define uk_alloc_init_malloc(a, malloc_f, calloc_f, realloc_f, free_f, \
- posix_memalign_f, memalign_f, addmem_f) \
+ posix_memalign_f, memalign_f, maxalloc_f, \
+ availmem_f, addmem_f) \
do { \
(a)->malloc = (malloc_f); \
(a)->calloc = (calloc_f); \
(a)->free = (free_f); \
(a)->palloc = uk_palloc_compat; \
(a)->pfree = uk_pfree_compat; \
+ (a)->availmem = (availmem_f); \
+ (a)->maxalloc = (maxalloc_f); \
(a)->addmem = (addmem_f); \
\
uk_alloc_register((a)); \
} while (0)
#if CONFIG_LIBUKALLOC_IFMALLOC
-#define uk_alloc_init_malloc_ifmalloc(a, malloc_f, free_f, addmem_f) \
+#define uk_alloc_init_malloc_ifmalloc(a, malloc_f, free_f, maxalloc_f, \
+ availmem_f, addmem_f) \
do { \
(a)->malloc = uk_malloc_ifmalloc; \
(a)->calloc = uk_calloc_compat; \
(a)->free = uk_free_ifmalloc; \
(a)->palloc = uk_palloc_compat; \
(a)->pfree = uk_pfree_compat; \
+ (a)->availmem = (availmem_f); \
+ (a)->maxalloc = (maxalloc_f); \
(a)->addmem = (addmem_f); \
\
uk_alloc_register((a)); \
b->nr_free_pages += nr_pages;
}
-#if CONFIG_LIBUKALLOC_IFSTATS
static ssize_t bbuddy_availmem(struct uk_alloc *a)
{
struct uk_bbpalloc *b;
b = (struct uk_bbpalloc *)&a->priv;
return (ssize_t) b->nr_free_pages << __PAGE_SHIFT;
}
-#endif
/* return log of the next power of two of passed number */
static inline unsigned long num_pages_to_order(unsigned long num_pages)
/* initialize and register allocator interface */
uk_alloc_init_palloc(a, bbuddy_palloc, bbuddy_pfree,
bbuddy_addmem);
-#if CONFIG_LIBUKALLOC_IFSTATS
a->availmem = bbuddy_availmem;
-#endif
if (max > min + metalen) {
/* add left memory - ignore return value */
_prepend_free_obj(p, obj[i]);
}
-#if CONFIG_LIBUKALLOC_IFSTATS
static ssize_t pool_availmem(struct uk_alloc *a)
{
struct uk_allocpool *p = ukalloc2pool(a);
- return (size_t) p->free_obj_count * p->obj_len;
+ return (ssize_t) (p->free_obj_count * p->obj_len);
+}
+
+static ssize_t pool_maxalloc(struct uk_alloc *a)
+{
+ struct uk_allocpool *p = ukalloc2pool(a);
+
+ return (ssize_t) p->obj_len;
}
-#endif
size_t uk_allocpool_reqmem(unsigned int obj_count, size_t obj_len,
size_t obj_align)
pool_free,
pool_posix_memalign,
uk_memalign_compat,
+ pool_maxalloc,
+ pool_availmem,
NULL);
-#if CONFIG_LIBUKALLOC_IFSTATS
- p->self.availmem = pool_availmem;
-#endif
uk_pr_debug("%p: Pool created (%"__PRIsz" B): %u objs of %"__PRIsz" B, aligned to %"__PRIsz" B\n",
p, len, p->obj_count, p->obj_len, p->obj_align);
"ukallocregion\n", a);
}
+/* NOTE: We use `uk_allocregion_leftspace()` for `maxalloc` and `availmem`
+ * because it is the same for this region allocator
+ */
+static ssize_t uk_allocregion_leftspace(struct uk_alloc *a)
+{
+ struct uk_allocregion *b;
+
+ UK_ASSERT(a != NULL);
+
+ b = (struct uk_allocregion *) &a->priv;
+
+ UK_ASSERT(b != NULL);
+
+ return (uintptr_t) b->heap_top - (uintptr_t) b->heap_base;
+}
+
int uk_allocregion_addmem(struct uk_alloc *a __unused, void *base __unused,
size_t size __unused)
{
uk_alloc_init_malloc(a, uk_allocregion_malloc, uk_calloc_compat,
uk_realloc_compat, uk_allocregion_free,
uk_allocregion_posix_memalign,
- uk_memalign_compat, NULL);
+ uk_memalign_compat, uk_allocregion_leftspace,
+ uk_allocregion_leftspace,
+ uk_allocregion_addmem);
return a;
}