if (!_uk_alloc_head) {
_uk_alloc_head = a;
- a->next = NULL;
+ a->next = __NULL;
return 0;
}
while (this && this->next)
this = this->next;
this->next = a;
- a->next = NULL;
+ a->next = __NULL;
return 0;
}
static struct metadata_ifpages *uk_get_metadata(const void *ptr)
{
- uintptr_t metadata;
+ __uptr metadata;
/* a ptr less or equal to page size would mean that the actual allocated
* object started at 0x0, so it was NULL.
* also imply that the actual allocated object started at 0x0 because
* we need space to store metadata.
*/
- UK_ASSERT((uintptr_t) ptr >= __PAGE_SIZE +
+ UK_ASSERT((__uptr) ptr >= __PAGE_SIZE +
sizeof(struct metadata_ifpages));
- metadata = ALIGN_DOWN((uintptr_t) ptr, (uintptr_t) __PAGE_SIZE);
- if (metadata == (uintptr_t) ptr) {
+ metadata = ALIGN_DOWN((__uptr) ptr, (__uptr) __PAGE_SIZE);
+ if (metadata == (__uptr) ptr) {
/* special case: the memory was page-aligned.
* In this case the metadata lies at the start of the
* previous page, with the rest of that page unused.
return (struct metadata_ifpages *) metadata;
}
-static size_t uk_getmallocsize(const void *ptr)
+static __sz uk_getmallocsize(const void *ptr)
{
struct metadata_ifpages *metadata = uk_get_metadata(ptr);
- return (size_t)metadata->base + (size_t)(metadata->num_pages) *
- __PAGE_SIZE - (size_t)ptr;
+ return (__sz)metadata->base + (__sz)(metadata->num_pages) *
+ __PAGE_SIZE - (__sz)ptr;
}
/* This is a very simple, naive implementation of malloc.
* locking support yet. Eventually, this should probably be replaced by
* something better.
*/
-void *uk_malloc_ifpages(struct uk_alloc *a, size_t size)
+void *uk_malloc_ifpages(struct uk_alloc *a, __sz size)
{
- uintptr_t intptr;
+ __uptr intptr;
unsigned long num_pages;
struct metadata_ifpages *metadata;
- size_t realsize = sizeof(*metadata) + size;
+ __sz realsize = sizeof(*metadata) + size;
UK_ASSERT(a);
/* check for invalid size and overflow */
if (!size || realsize < size)
- return NULL;
+ return __NULL;
num_pages = size_to_num_pages(realsize);
- intptr = (uintptr_t)uk_palloc(a, num_pages);
+ intptr = (__uptr)uk_palloc(a, num_pages);
if (!intptr)
- return NULL;
+ return __NULL;
metadata = (struct metadata_ifpages *) intptr;
metadata->num_pages = num_pages;
metadata = uk_get_metadata(ptr);
- UK_ASSERT(metadata->base != NULL);
+ UK_ASSERT(metadata->base != __NULL);
UK_ASSERT(metadata->num_pages != 0);
uk_pfree(a, metadata->base, metadata->num_pages);
}
-void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size)
+void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, __sz size)
{
void *retptr;
- size_t mallocsize;
+ __sz mallocsize;
UK_ASSERT(a);
if (!ptr)
if (ptr && !size) {
uk_free_ifpages(a, ptr);
- return NULL;
+ return __NULL;
}
retptr = uk_malloc_ifpages(a, size);
if (!retptr)
- return NULL;
+ return __NULL;
mallocsize = uk_getmallocsize(ptr);
}
int uk_posix_memalign_ifpages(struct uk_alloc *a,
- void **memptr, size_t align, size_t size)
+ void **memptr, __sz align, __sz size)
{
struct metadata_ifpages *metadata;
unsigned long num_pages;
- uintptr_t intptr;
- size_t realsize, padding;
+ __uptr intptr;
+ __sz realsize, padding;
UK_ASSERT(a);
if (((align - 1) & align) != 0
return EINVAL;
num_pages = size_to_num_pages(realsize);
- intptr = (uintptr_t) uk_palloc(a, num_pages);
+ intptr = (__uptr) uk_palloc(a, num_pages);
if (!intptr)
return ENOMEM;
*memptr = (void *) ALIGN_UP(intptr + sizeof(*metadata),
- (uintptr_t) align);
+ (__uptr) align);
metadata = uk_get_metadata(*memptr);
/* check for underflow (should not happen) */
- UK_ASSERT(intptr <= (uintptr_t) metadata);
+ UK_ASSERT(intptr <= (__uptr) metadata);
metadata->num_pages = num_pages;
metadata->base = (void *) intptr;
return 0;
}
-ssize_t uk_alloc_maxalloc_ifpages(struct uk_alloc *a)
+__ssz uk_alloc_maxalloc_ifpages(struct uk_alloc *a)
{
long num_pages;
- ssize_t maxalloc;
+ __ssz maxalloc;
UK_ASSERT(a);
num_pages = uk_alloc_pmaxalloc(a);
if (num_pages < 0) {
/* forward error code */
- return (ssize_t) num_pages;
+ return (__ssz) num_pages;
}
- maxalloc = ((ssize_t) num_pages) << __PAGE_SHIFT;
+ maxalloc = ((__ssz) num_pages) << __PAGE_SHIFT;
if (maxalloc <= METADATA_IFPAGES_SIZE_POW2)
return 0;
return maxalloc;
}
-ssize_t uk_alloc_availmem_ifpages(struct uk_alloc *a)
+__ssz uk_alloc_availmem_ifpages(struct uk_alloc *a)
{
long num_pages;
num_pages = uk_alloc_pavailmem(a);
if (num_pages < 0)
- return (ssize_t) num_pages;
+ return (__ssz) num_pages;
- return ((ssize_t) num_pages) << __PAGE_SHIFT;
+ return ((__ssz) num_pages) << __PAGE_SHIFT;
}
#if CONFIG_LIBUKALLOC_IFMALLOC
struct metadata_ifmalloc {
- size_t size;
+ __sz size;
void *base;
};
static struct metadata_ifmalloc *uk_get_metadata_ifmalloc(const void *ptr)
{
- return (struct metadata_ifmalloc *)((uintptr_t) ptr -
+ return (struct metadata_ifmalloc *)((__uptr) ptr -
METADATA_IFMALLOC_SIZE_POW2);
}
-static size_t uk_getmallocsize_ifmalloc(const void *ptr)
+static __sz uk_getmallocsize_ifmalloc(const void *ptr)
{
struct metadata_ifmalloc *metadata = uk_get_metadata_ifmalloc(ptr);
- return (size_t) ((uintptr_t) metadata->base + metadata->size -
- (uintptr_t) ptr);
+ return (__sz) ((__uptr) metadata->base + metadata->size -
+ (__uptr) ptr);
}
void uk_free_ifmalloc(struct uk_alloc *a, void *ptr)
a->free_backend(a, metadata->base);
}
-void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size)
+void *uk_malloc_ifmalloc(struct uk_alloc *a, __sz size)
{
struct metadata_ifmalloc *metadata;
- size_t realsize = size + METADATA_IFMALLOC_SIZE_POW2;
+ __sz realsize = size + METADATA_IFMALLOC_SIZE_POW2;
void *ptr;
UK_ASSERT(a);
/* check for overflow */
if (unlikely(realsize < size))
- return NULL;
+ return __NULL;
ptr = a->malloc_backend(a, realsize);
if (!ptr)
- return NULL;
+ return __NULL;
metadata = ptr;
metadata->size = realsize;
metadata->base = ptr;
- return (void *) ((uintptr_t) ptr + METADATA_IFMALLOC_SIZE_POW2);
+ return (void *) ((__uptr) ptr + METADATA_IFMALLOC_SIZE_POW2);
}
-void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size)
+void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, __sz size)
{
void *retptr;
- size_t mallocsize;
+ __sz mallocsize;
UK_ASSERT(a);
if (!ptr)
if (ptr && !size) {
uk_free_ifmalloc(a, ptr);
- return NULL;
+ return __NULL;
}
retptr = uk_malloc_ifmalloc(a, size);
if (!retptr)
- return NULL;
+ return __NULL;
mallocsize = uk_getmallocsize_ifmalloc(ptr);
}
int uk_posix_memalign_ifmalloc(struct uk_alloc *a,
- void **memptr, size_t align, size_t size)
+ void **memptr, __sz align, __sz size)
{
struct metadata_ifmalloc *metadata;
- size_t realsize, padding;
- uintptr_t intptr;
+ __sz realsize, padding;
+ __uptr intptr;
UK_ASSERT(a);
if (((align - 1) & align) != 0
if (unlikely(realsize < size))
return ENOMEM;
- intptr = (uintptr_t) a->malloc_backend(a, realsize);
+ intptr = (__uptr) a->malloc_backend(a, realsize);
if (!intptr)
return ENOMEM;
*memptr = (void *) ALIGN_UP(intptr + METADATA_IFMALLOC_SIZE_POW2,
- (uintptr_t) align);
+ (__uptr) align);
metadata = uk_get_metadata_ifmalloc(*memptr);
/* check for underflow */
- UK_ASSERT(intptr <= (uintptr_t) metadata);
+ UK_ASSERT(intptr <= (__uptr) metadata);
metadata->size = realsize;
metadata->base = (void *) intptr;
UK_ASSERT(a);
/* check for overflow */
- if (num_pages > (~(size_t)0)/__PAGE_SIZE)
- return NULL;
+ if (num_pages > (~(__sz)0)/__PAGE_SIZE)
+ return __NULL;
if (uk_posix_memalign(a, &ptr, __PAGE_SIZE, num_pages * __PAGE_SIZE))
- return NULL;
+ return __NULL;
return ptr;
}
-void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size)
+void *uk_realloc_compat(struct uk_alloc *a, void *ptr, __sz size)
{
void *retptr;
if (ptr && !size) {
uk_free(a, ptr);
- return NULL;
+ return __NULL;
}
retptr = uk_malloc(a, size);
if (!retptr)
- return NULL;
+ return __NULL;
memcpy(retptr, ptr, size);
return retptr;
}
-void *uk_calloc_compat(struct uk_alloc *a, size_t nmemb, size_t size)
+void *uk_calloc_compat(struct uk_alloc *a, __sz nmemb, __sz size)
{
void *ptr;
- size_t tlen = nmemb * size;
+ __sz tlen = nmemb * size;
/* check for overflow */
- if (nmemb > (~(size_t)0)/size)
- return NULL;
+ if (nmemb > (~(__sz)0)/size)
+ return __NULL;
UK_ASSERT(a);
ptr = uk_malloc(a, tlen);
if (!ptr)
- return NULL;
+ return __NULL;
memset(ptr, 0, tlen);
return ptr;
}
-void *uk_memalign_compat(struct uk_alloc *a, size_t align, size_t size)
+void *uk_memalign_compat(struct uk_alloc *a, __sz align, __sz size)
{
void *ptr;
UK_ASSERT(a);
if (uk_posix_memalign(a, &ptr, align, size) != 0)
- return NULL;
+ return __NULL;
return ptr;
}
long uk_alloc_pmaxalloc_compat(struct uk_alloc *a)
{
- ssize_t mem;
+ __ssz mem;
UK_ASSERT(a);
long uk_alloc_pavailmem_compat(struct uk_alloc *a)
{
- ssize_t mem;
+ __ssz mem;
UK_ASSERT(a);
return (long) (mem >> __PAGE_SHIFT);
}
-size_t uk_alloc_availmem_total(void)
+__sz uk_alloc_availmem_total(void)
{
struct uk_alloc *a;
- ssize_t availmem;
- size_t total;
+ __ssz availmem;
+ __sz total;
total = 0;
uk_alloc_foreach(a) {
#ifndef __UK_ALLOC_H__
#define __UK_ALLOC_H__
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <errno.h>
+#include <uk/arch/types.h>
#include <uk/config.h>
#include <uk/assert.h>
#include <uk/essentials.h>
+#include <errno.h>
#ifdef __cplusplus
extern "C" {
struct uk_alloc;
typedef void* (*uk_alloc_malloc_func_t)
- (struct uk_alloc *a, size_t size);
+ (struct uk_alloc *a, __sz size);
typedef void* (*uk_alloc_calloc_func_t)
- (struct uk_alloc *a, size_t nmemb, size_t size);
+ (struct uk_alloc *a, __sz nmemb, __sz size);
typedef int (*uk_alloc_posix_memalign_func_t)
- (struct uk_alloc *a, void **memptr, size_t align, size_t size);
+ (struct uk_alloc *a, void **memptr, __sz align, __sz size);
typedef void* (*uk_alloc_memalign_func_t)
- (struct uk_alloc *a, size_t align, size_t size);
+ (struct uk_alloc *a, __sz align, __sz size);
typedef void* (*uk_alloc_realloc_func_t)
- (struct uk_alloc *a, void *ptr, size_t size);
+ (struct uk_alloc *a, void *ptr, __sz size);
typedef void (*uk_alloc_free_func_t)
(struct uk_alloc *a, void *ptr);
typedef void* (*uk_alloc_palloc_func_t)
typedef void (*uk_alloc_pfree_func_t)
(struct uk_alloc *a, void *ptr, unsigned long num_pages);
typedef int (*uk_alloc_addmem_func_t)
- (struct uk_alloc *a, void *base, size_t size);
-typedef ssize_t (*uk_alloc_getsize_func_t)
+ (struct uk_alloc *a, void *base, __sz size);
+typedef __ssz (*uk_alloc_getsize_func_t)
(struct uk_alloc *a);
typedef long (*uk_alloc_getpsize_func_t)
(struct uk_alloc *a);
#if CONFIG_LIBUKALLOC_IFSTATS
struct uk_alloc_stats {
- size_t last_alloc_size; /* size of the last allocation */
- size_t max_alloc_size; /* biggest satisfied allocation size */
- size_t min_alloc_size; /* smallest satisfied allocation size */
+ __sz last_alloc_size; /* size of the last allocation */
+ __sz max_alloc_size; /* biggest satisfied allocation size */
+ __sz min_alloc_size; /* smallest satisfied allocation size */
- uint64_t tot_nb_allocs; /* total number of satisfied allocations */
- uint64_t tot_nb_frees; /* total number of satisfied free operations */
- int64_t cur_nb_allocs; /* current number of active allocations */
- int64_t max_nb_allocs; /* maximum number of active allocations */
+ __u64 tot_nb_allocs; /* total number of satisfied allocations */
+ __u64 tot_nb_frees; /* total number of satisfied free operations */
+ __s64 cur_nb_allocs; /* current number of active allocations */
+ __s64 max_nb_allocs; /* maximum number of active allocations */
- ssize_t cur_mem_use; /* current used memory by allocations */
- ssize_t max_mem_use; /* maximum amount of memory used by allocations */
+ __ssz cur_mem_use; /* current used memory by allocations */
+ __ssz max_mem_use; /* maximum amount of memory used by allocations */
- uint64_t nb_enomem; /* number of times failing allocation requests */
+ __u64 nb_enomem; /* number of times failing allocation requests */
};
#endif /* CONFIG_LIBUKALLOC_IFSTATS */
/* internal */
struct uk_alloc *next;
- int8_t priv[];
+ __u8 priv[];
};
extern struct uk_alloc *_uk_alloc_head;
/* Iterate over all registered allocators */
#define uk_alloc_foreach(iter) \
for (iter = _uk_alloc_head; \
- iter != NULL; \
+ iter != __NULL; \
iter = iter->next)
#if CONFIG_LIBUKALLOC_IFSTATS_PERLIB
#endif /* !CONFIG_LIBUKALLOC_IFSTATS_PERLIB */
/* wrapper functions */
-static inline void *uk_do_malloc(struct uk_alloc *a, size_t size)
+static inline void *uk_do_malloc(struct uk_alloc *a, __sz size)
{
UK_ASSERT(a);
return a->malloc(a, size);
}
-static inline void *uk_malloc(struct uk_alloc *a, size_t size)
+static inline void *uk_malloc(struct uk_alloc *a, __sz size)
{
if (unlikely(!a)) {
errno = ENOMEM;
- return NULL;
+ return __NULL;
}
return uk_do_malloc(a, size);
}
static inline void *uk_do_calloc(struct uk_alloc *a,
- size_t nmemb, size_t size)
+ __sz nmemb, __sz size)
{
UK_ASSERT(a);
return a->calloc(a, nmemb, size);
}
static inline void *uk_calloc(struct uk_alloc *a,
- size_t nmemb, size_t size)
+ __sz nmemb, __sz size)
{
if (unlikely(!a)) {
errno = ENOMEM;
- return NULL;
+ return __NULL;
}
return uk_do_calloc(a, nmemb, size);
}
#define uk_zalloc(a, size) uk_calloc((a), 1, (size))
static inline void *uk_do_realloc(struct uk_alloc *a,
- void *ptr, size_t size)
+ void *ptr, __sz size)
{
UK_ASSERT(a);
return a->realloc(a, ptr, size);
}
-static inline void *uk_realloc(struct uk_alloc *a, void *ptr, size_t size)
+static inline void *uk_realloc(struct uk_alloc *a, void *ptr, __sz size)
{
if (unlikely(!a)) {
errno = ENOMEM;
- return NULL;
+ return __NULL;
}
return uk_do_realloc(a, ptr, size);
}
static inline int uk_do_posix_memalign(struct uk_alloc *a, void **memptr,
- size_t align, size_t size)
+ __sz align, __sz size)
{
UK_ASSERT(a);
return a->posix_memalign(a, memptr, align, size);
}
static inline int uk_posix_memalign(struct uk_alloc *a, void **memptr,
- size_t align, size_t size)
+ __sz align, __sz size)
{
if (unlikely(!a)) {
- *memptr = NULL;
+ *memptr = __NULL;
return ENOMEM;
}
return uk_do_posix_memalign(a, memptr, align, size);
}
static inline void *uk_do_memalign(struct uk_alloc *a,
- size_t align, size_t size)
+ __sz align, __sz size)
{
UK_ASSERT(a);
return a->memalign(a, align, size);
}
static inline void *uk_memalign(struct uk_alloc *a,
- size_t align, size_t size)
+ __sz align, __sz size)
{
if (unlikely(!a))
- return NULL;
+ return __NULL;
return uk_do_memalign(a, align, size);
}
static inline void *uk_palloc(struct uk_alloc *a, unsigned long num_pages)
{
if (unlikely(!a || !a->palloc))
- return NULL;
+ return __NULL;
return uk_do_palloc(a, num_pages);
}
}
static inline int uk_alloc_addmem(struct uk_alloc *a, void *base,
- size_t size)
+ __sz size)
{
UK_ASSERT(a);
if (a->addmem)
}
/* current biggest allocation request possible */
-static inline ssize_t uk_alloc_maxalloc(struct uk_alloc *a)
+static inline __ssz uk_alloc_maxalloc(struct uk_alloc *a)
{
UK_ASSERT(a);
if (!a->maxalloc)
- return (ssize_t) -ENOTSUP;
+ return (__ssz) -ENOTSUP;
return a->maxalloc(a);
}
}
/* total free memory of the allocator */
-static inline ssize_t uk_alloc_availmem(struct uk_alloc *a)
+static inline __ssz uk_alloc_availmem(struct uk_alloc *a)
{
UK_ASSERT(a);
if (!a->availmem)
- return (ssize_t) -ENOTSUP;
+ return (__ssz) -ENOTSUP;
return a->availmem(a);
}
return a->pavailmem(a);
}
-size_t uk_alloc_availmem_total(void);
+__sz uk_alloc_availmem_total(void);
unsigned long uk_alloc_pavailmem_total(void);
/* Functions that can be used by allocators that implement palloc(),
* pfree() and potentially pavail(), pmaxalloc() only
*/
-void *uk_malloc_ifpages(struct uk_alloc *a, size_t size);
-void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size);
+void *uk_malloc_ifpages(struct uk_alloc *a, __sz size);
+void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, __sz size);
int uk_posix_memalign_ifpages(struct uk_alloc *a, void **memptr,
- size_t align, size_t size);
+ __sz align, __sz size);
void uk_free_ifpages(struct uk_alloc *a, void *ptr);
-ssize_t uk_alloc_availmem_ifpages(struct uk_alloc *a);
-ssize_t uk_alloc_maxalloc_ifpages(struct uk_alloc *a);
+__ssz uk_alloc_availmem_ifpages(struct uk_alloc *a);
+__ssz uk_alloc_maxalloc_ifpages(struct uk_alloc *a);
#if CONFIG_LIBUKALLOC_IFMALLOC
-void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size);
-void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size);
+void *uk_malloc_ifmalloc(struct uk_alloc *a, __sz size);
+void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, __sz size);
int uk_posix_memalign_ifmalloc(struct uk_alloc *a, void **memptr,
- size_t align, size_t size);
+ __sz align, __sz size);
void uk_free_ifmalloc(struct uk_alloc *a, void *ptr);
#endif
/* Functionality that is provided based on malloc() and posix_memalign() */
-void *uk_calloc_compat(struct uk_alloc *a, size_t num, size_t len);
-void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size);
-void *uk_memalign_compat(struct uk_alloc *a, size_t align, size_t len);
+void *uk_calloc_compat(struct uk_alloc *a, __sz num, __sz len);
+void *uk_realloc_compat(struct uk_alloc *a, void *ptr, __sz size);
+void *uk_memalign_compat(struct uk_alloc *a, __sz align, __sz len);
void *uk_palloc_compat(struct uk_alloc *a, unsigned long num_pages);
void uk_pfree_compat(struct uk_alloc *a, void *ptr, unsigned long num_pages);
long uk_alloc_pavailmem_compat(struct uk_alloc *a);
/* NOTE: Please do not use this function directly */
static inline void _uk_alloc_stats_count_alloc(struct uk_alloc_stats *stats,
- void *ptr, size_t size)
+ void *ptr, __sz size)
{
/* TODO: SMP safety */
uk_preempt_disable();
/* NOTE: Please do not use this function directly */
static inline void _uk_alloc_stats_count_free(struct uk_alloc_stats *stats,
- void *ptr, size_t size)
+ void *ptr, __sz size)
{
/* TODO: SMP safety */
uk_preempt_disable();
} while (0)
#define uk_alloc_stats_count_palloc(a, ptr, num_pages) \
uk_alloc_stats_count_alloc((a), (ptr), \
- ((size_t) (num_pages)) << __PAGE_SHIFT)
+ ((__sz) (num_pages)) << __PAGE_SHIFT)
#define uk_alloc_stats_count_enomem(a, size) \
uk_alloc_stats_count_alloc((a), NULL, (size))
#define uk_alloc_stats_count_penomem(a, num_pages) \
uk_alloc_stats_count_enomem((a), \
- ((size_t) (num_pages)) << __PAGE_SHIFT)
+ ((__sz) (num_pages)) << __PAGE_SHIFT)
/* Note: if ptr is NULL, nothing is counted */
#define uk_alloc_stats_count_free(a, ptr, freed_size) \
} while (0)
#define uk_alloc_stats_count_pfree(a, ptr, num_pages) \
uk_alloc_stats_count_free((a), (ptr), \
- ((size_t) (num_pages)) << __PAGE_SHIFT)
+ ((__sz) (num_pages)) << __PAGE_SHIFT)
#define uk_alloc_stats_reset(a) \
memset(&(a)->_stats, 0, sizeof((a)->_stats))
}
#define WATCH_STATS_START(p) \
- ssize_t _before_mem_use; \
- size_t _before_nb_allocs; \
- size_t _before_tot_nb_allocs; \
- size_t _before_nb_enomem; \
+ __ssz _before_mem_use; \
+ __sz _before_nb_allocs; \
+ __sz _before_tot_nb_allocs; \
+ __sz _before_nb_enomem; \
\
uk_preempt_disable(); \
_before_mem_use = (p)->_stats.cur_mem_use; \
#define WATCH_STATS_END(p, nb_allocs_diff, nb_enomem_diff, \
mem_use_diff, alloc_size) \
- size_t _nb_allocs = (p)->_stats.tot_nb_allocs \
- - _before_tot_nb_allocs; \
+ __sz _nb_allocs = (p)->_stats.tot_nb_allocs \
+ - _before_tot_nb_allocs; \
\
/* NOTE: We assume that an allocator call does at
* most one allocation. Otherwise we cannot currently
\
*(mem_use_diff) = (p)->_stats.cur_mem_use \
- _before_mem_use; \
- *(nb_allocs_diff) = (ssize_t) (p)->_stats.cur_nb_allocs \
+ *(nb_allocs_diff) = (__ssz) (p)->_stats.cur_nb_allocs \
- _before_nb_allocs; \
- *(nb_enomem_diff) = (ssize_t) (p)->_stats.nb_enomem \
+ *(nb_enomem_diff) = (__ssz) (p)->_stats.nb_enomem \
- _before_nb_enomem; \
if (_nb_allocs > 0) \
*(alloc_size) = (p)->_stats.last_alloc_size; \
uk_preempt_enable();
static inline void update_stats(struct uk_alloc_stats *stats,
- ssize_t nb_allocs_diff,
- ssize_t nb_enomem_diff,
- ssize_t mem_use_diff,
- size_t last_alloc_size)
+ __ssz nb_allocs_diff,
+ __ssz nb_enomem_diff,
+ __ssz mem_use_diff,
+ __sz last_alloc_size)
{
uk_preempt_disable();
if (nb_allocs_diff >= 0)
uk_preempt_enable();
}
-static void *wrapper_malloc(struct uk_alloc *a, size_t size)
+static void *wrapper_malloc(struct uk_alloc *a, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
return ret;
}
-static void *wrapper_calloc(struct uk_alloc *a, size_t nmemb, size_t size)
+static void *wrapper_calloc(struct uk_alloc *a, __sz nmemb, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
}
static int wrapper_posix_memalign(struct uk_alloc *a, void **memptr,
- size_t align, size_t size)
+ __sz align, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
int ret;
UK_ASSERT(p);
return ret;
}
-static void *wrapper_memalign(struct uk_alloc *a, size_t align, size_t size)
+static void *wrapper_memalign(struct uk_alloc *a, __sz align, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
return ret;
}
-static void *wrapper_realloc(struct uk_alloc *a, void *ptr, size_t size)
+static void *wrapper_realloc(struct uk_alloc *a, void *ptr, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
static void wrapper_free(struct uk_alloc *a, void *ptr)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
UK_ASSERT(p);
static void *wrapper_palloc(struct uk_alloc *a, unsigned long num_pages)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
void *ret;
UK_ASSERT(p);
unsigned long num_pages)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
- ssize_t nb_allocs, mem_use, nb_enomem;
- size_t alloc_size;
+ __ssz nb_allocs, mem_use, nb_enomem;
+ __sz alloc_size;
UK_ASSERT(p);
/* The following interfaces do not change allocation statistics,
* this is why we just forward the calls
*/
-static int wrapper_addmem(struct uk_alloc *a __unused, void *base, size_t size)
+static int wrapper_addmem(struct uk_alloc *a __unused, void *base, __sz size)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
return uk_alloc_addmem(p, base, size);
}
-static size_t wrapper_maxalloc(struct uk_alloc *a __unused)
+static __ssz wrapper_maxalloc(struct uk_alloc *a __unused)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
return uk_alloc_maxalloc(p);
}
-static size_t wrapper_availmem(struct uk_alloc *a __unused)
+static __ssz wrapper_availmem(struct uk_alloc *a __unused)
{
struct uk_alloc *p = _uk_alloc_get_actual_default();
* an introduction to region-based memory management.
*/
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/types.h>
#include <uk/allocregion.h>
#include <uk/alloc_impl.h>
#include <uk/page.h> /* round_pgup() */
#ifndef __UKPLAT_COMMON_PCI_BUS_H__
#define __UKPLAT_COMMON_PCI_BUS_H__
+#include <stdint.h>
+#include <stddef.h>
#include <uk/bus.h>
#include <uk/alloc.h>
#include <uk/ctors.h>
#include <uk/plat/memory.h>
#include <uk/plat/common/memory.h>
+#include <uk/alloc.h>
+#include <stddef.h>
static struct uk_alloc *plat_allocator;
****************************************************************************
*/
#include <stdint.h>
+#include <stddef.h>
#ifdef DBGGNT
#include <string.h>
#endif
*/
#include <string.h>
+#include <stdint.h>
+#include <stddef.h>
#include <uk/plat/common/sections.h>
#include <common/gnttab.h>
/* Taken from Mini-OS */
#include <stdint.h>
+#include <stddef.h>
#include <uk/print.h>
#include <xen/xen.h>
#include <xen/grant_table.h>