From ed1ef9aef8fcca237335f3a05ab28e403afe7550 Mon Sep 17 00:00:00 2001 From: Simon Kuenzer Date: Tue, 17 Nov 2020 17:27:19 +0100 Subject: [PATCH] lib/ukalloc: Use Unikraft internal types Use Unikraft-internal types for the API headers in order to have minimal dependency to libc definitions. This is done so that libcs can integrate ` directly with their header files without breaking the declaration set of libc headers. This is done towards receiving per-library statistics (see the following commit). Signed-off-by: Simon Kuenzer Reviewed-by: Cezar Craciunoiu Tested-by: Unikraft CI GitHub-Pull-Request: #229 --- lib/ukalloc/alloc.c | 142 ++++++++++++++-------------- lib/ukalloc/include/uk/alloc.h | 88 +++++++++-------- lib/ukalloc/include/uk/alloc_impl.h | 32 +++---- lib/ukalloc/libstats.c | 72 +++++++------- lib/ukallocregion/region.c | 3 + plat/common/include/pci/pci_bus.h | 2 + plat/common/memory.c | 2 + plat/xen/gnttab.c | 1 + plat/xen/memory.c | 2 + plat/xen/x86/gnttab.c | 1 + 10 files changed, 177 insertions(+), 168 deletions(-) diff --git a/lib/ukalloc/alloc.c b/lib/ukalloc/alloc.c index 7edf3ef52..720ead2ad 100644 --- a/lib/ukalloc/alloc.c +++ b/lib/ukalloc/alloc.c @@ -53,14 +53,14 @@ int uk_alloc_register(struct uk_alloc *a) if (!_uk_alloc_head) { _uk_alloc_head = a; - a->next = NULL; + a->next = __NULL; return 0; } while (this && this->next) this = this->next; this->next = a; - a->next = NULL; + a->next = __NULL; return 0; } @@ -81,7 +81,7 @@ UK_CTASSERT(!(sizeof(struct metadata_ifpages) > METADATA_IFPAGES_SIZE_POW2)); static struct metadata_ifpages *uk_get_metadata(const void *ptr) { - uintptr_t metadata; + __uptr metadata; /* a ptr less or equal to page size would mean that the actual allocated * object started at 0x0, so it was NULL. @@ -89,11 +89,11 @@ static struct metadata_ifpages *uk_get_metadata(const void *ptr) * also imply that the actual allocated object started at 0x0 because * we need space to store metadata. */ - UK_ASSERT((uintptr_t) ptr >= __PAGE_SIZE + + UK_ASSERT((__uptr) ptr >= __PAGE_SIZE + sizeof(struct metadata_ifpages)); - metadata = ALIGN_DOWN((uintptr_t) ptr, (uintptr_t) __PAGE_SIZE); - if (metadata == (uintptr_t) ptr) { + metadata = ALIGN_DOWN((__uptr) ptr, (__uptr) __PAGE_SIZE); + if (metadata == (__uptr) ptr) { /* special case: the memory was page-aligned. * In this case the metadata lies at the start of the * previous page, with the rest of that page unused. @@ -104,12 +104,12 @@ static struct metadata_ifpages *uk_get_metadata(const void *ptr) return (struct metadata_ifpages *) metadata; } -static size_t uk_getmallocsize(const void *ptr) +static __sz uk_getmallocsize(const void *ptr) { struct metadata_ifpages *metadata = uk_get_metadata(ptr); - return (size_t)metadata->base + (size_t)(metadata->num_pages) * - __PAGE_SIZE - (size_t)ptr; + return (__sz)metadata->base + (__sz)(metadata->num_pages) * + __PAGE_SIZE - (__sz)ptr; } /* This is a very simple, naive implementation of malloc. @@ -124,23 +124,23 @@ static size_t uk_getmallocsize(const void *ptr) * locking support yet. Eventually, this should probably be replaced by * something better. */ -void *uk_malloc_ifpages(struct uk_alloc *a, size_t size) +void *uk_malloc_ifpages(struct uk_alloc *a, __sz size) { - uintptr_t intptr; + __uptr intptr; unsigned long num_pages; struct metadata_ifpages *metadata; - size_t realsize = sizeof(*metadata) + size; + __sz realsize = sizeof(*metadata) + size; UK_ASSERT(a); /* check for invalid size and overflow */ if (!size || realsize < size) - return NULL; + return __NULL; num_pages = size_to_num_pages(realsize); - intptr = (uintptr_t)uk_palloc(a, num_pages); + intptr = (__uptr)uk_palloc(a, num_pages); if (!intptr) - return NULL; + return __NULL; metadata = (struct metadata_ifpages *) intptr; metadata->num_pages = num_pages; @@ -159,15 +159,15 @@ void uk_free_ifpages(struct uk_alloc *a, void *ptr) metadata = uk_get_metadata(ptr); - UK_ASSERT(metadata->base != NULL); + UK_ASSERT(metadata->base != __NULL); UK_ASSERT(metadata->num_pages != 0); uk_pfree(a, metadata->base, metadata->num_pages); } -void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size) +void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, __sz size) { void *retptr; - size_t mallocsize; + __sz mallocsize; UK_ASSERT(a); if (!ptr) @@ -175,12 +175,12 @@ void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size) if (ptr && !size) { uk_free_ifpages(a, ptr); - return NULL; + return __NULL; } retptr = uk_malloc_ifpages(a, size); if (!retptr) - return NULL; + return __NULL; mallocsize = uk_getmallocsize(ptr); @@ -194,12 +194,12 @@ void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size) } int uk_posix_memalign_ifpages(struct uk_alloc *a, - void **memptr, size_t align, size_t size) + void **memptr, __sz align, __sz size) { struct metadata_ifpages *metadata; unsigned long num_pages; - uintptr_t intptr; - size_t realsize, padding; + __uptr intptr; + __sz realsize, padding; UK_ASSERT(a); if (((align - 1) & align) != 0 @@ -252,18 +252,18 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a, return EINVAL; num_pages = size_to_num_pages(realsize); - intptr = (uintptr_t) uk_palloc(a, num_pages); + intptr = (__uptr) uk_palloc(a, num_pages); if (!intptr) return ENOMEM; *memptr = (void *) ALIGN_UP(intptr + sizeof(*metadata), - (uintptr_t) align); + (__uptr) align); metadata = uk_get_metadata(*memptr); /* check for underflow (should not happen) */ - UK_ASSERT(intptr <= (uintptr_t) metadata); + UK_ASSERT(intptr <= (__uptr) metadata); metadata->num_pages = num_pages; metadata->base = (void *) intptr; @@ -271,20 +271,20 @@ int uk_posix_memalign_ifpages(struct uk_alloc *a, return 0; } -ssize_t uk_alloc_maxalloc_ifpages(struct uk_alloc *a) +__ssz uk_alloc_maxalloc_ifpages(struct uk_alloc *a) { long num_pages; - ssize_t maxalloc; + __ssz maxalloc; UK_ASSERT(a); num_pages = uk_alloc_pmaxalloc(a); if (num_pages < 0) { /* forward error code */ - return (ssize_t) num_pages; + return (__ssz) num_pages; } - maxalloc = ((ssize_t) num_pages) << __PAGE_SHIFT; + maxalloc = ((__ssz) num_pages) << __PAGE_SHIFT; if (maxalloc <= METADATA_IFPAGES_SIZE_POW2) return 0; @@ -293,7 +293,7 @@ ssize_t uk_alloc_maxalloc_ifpages(struct uk_alloc *a) return maxalloc; } -ssize_t uk_alloc_availmem_ifpages(struct uk_alloc *a) +__ssz uk_alloc_availmem_ifpages(struct uk_alloc *a) { long num_pages; @@ -301,15 +301,15 @@ ssize_t uk_alloc_availmem_ifpages(struct uk_alloc *a) num_pages = uk_alloc_pavailmem(a); if (num_pages < 0) - return (ssize_t) num_pages; + return (__ssz) num_pages; - return ((ssize_t) num_pages) << __PAGE_SHIFT; + return ((__ssz) num_pages) << __PAGE_SHIFT; } #if CONFIG_LIBUKALLOC_IFMALLOC struct metadata_ifmalloc { - size_t size; + __sz size; void *base; }; @@ -318,16 +318,16 @@ UK_CTASSERT(!(sizeof(struct metadata_ifmalloc) > METADATA_IFMALLOC_SIZE_POW2)); static struct metadata_ifmalloc *uk_get_metadata_ifmalloc(const void *ptr) { - return (struct metadata_ifmalloc *)((uintptr_t) ptr - + return (struct metadata_ifmalloc *)((__uptr) ptr - METADATA_IFMALLOC_SIZE_POW2); } -static size_t uk_getmallocsize_ifmalloc(const void *ptr) +static __sz uk_getmallocsize_ifmalloc(const void *ptr) { struct metadata_ifmalloc *metadata = uk_get_metadata_ifmalloc(ptr); - return (size_t) ((uintptr_t) metadata->base + metadata->size - - (uintptr_t) ptr); + return (__sz) ((__uptr) metadata->base + metadata->size - + (__uptr) ptr); } void uk_free_ifmalloc(struct uk_alloc *a, void *ptr) @@ -343,10 +343,10 @@ void uk_free_ifmalloc(struct uk_alloc *a, void *ptr) a->free_backend(a, metadata->base); } -void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size) +void *uk_malloc_ifmalloc(struct uk_alloc *a, __sz size) { struct metadata_ifmalloc *metadata; - size_t realsize = size + METADATA_IFMALLOC_SIZE_POW2; + __sz realsize = size + METADATA_IFMALLOC_SIZE_POW2; void *ptr; UK_ASSERT(a); @@ -354,23 +354,23 @@ void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size) /* check for overflow */ if (unlikely(realsize < size)) - return NULL; + return __NULL; ptr = a->malloc_backend(a, realsize); if (!ptr) - return NULL; + return __NULL; metadata = ptr; metadata->size = realsize; metadata->base = ptr; - return (void *) ((uintptr_t) ptr + METADATA_IFMALLOC_SIZE_POW2); + return (void *) ((__uptr) ptr + METADATA_IFMALLOC_SIZE_POW2); } -void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size) +void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, __sz size) { void *retptr; - size_t mallocsize; + __sz mallocsize; UK_ASSERT(a); if (!ptr) @@ -378,12 +378,12 @@ void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size) if (ptr && !size) { uk_free_ifmalloc(a, ptr); - return NULL; + return __NULL; } retptr = uk_malloc_ifmalloc(a, size); if (!retptr) - return NULL; + return __NULL; mallocsize = uk_getmallocsize_ifmalloc(ptr); @@ -394,11 +394,11 @@ void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size) } int uk_posix_memalign_ifmalloc(struct uk_alloc *a, - void **memptr, size_t align, size_t size) + void **memptr, __sz align, __sz size) { struct metadata_ifmalloc *metadata; - size_t realsize, padding; - uintptr_t intptr; + __sz realsize, padding; + __uptr intptr; UK_ASSERT(a); if (((align - 1) & align) != 0 @@ -426,18 +426,18 @@ int uk_posix_memalign_ifmalloc(struct uk_alloc *a, if (unlikely(realsize < size)) return ENOMEM; - intptr = (uintptr_t) a->malloc_backend(a, realsize); + intptr = (__uptr) a->malloc_backend(a, realsize); if (!intptr) return ENOMEM; *memptr = (void *) ALIGN_UP(intptr + METADATA_IFMALLOC_SIZE_POW2, - (uintptr_t) align); + (__uptr) align); metadata = uk_get_metadata_ifmalloc(*memptr); /* check for underflow */ - UK_ASSERT(intptr <= (uintptr_t) metadata); + UK_ASSERT(intptr <= (__uptr) metadata); metadata->size = realsize; metadata->base = (void *) intptr; @@ -465,16 +465,16 @@ void *uk_palloc_compat(struct uk_alloc *a, unsigned long num_pages) UK_ASSERT(a); /* check for overflow */ - if (num_pages > (~(size_t)0)/__PAGE_SIZE) - return NULL; + if (num_pages > (~(__sz)0)/__PAGE_SIZE) + return __NULL; if (uk_posix_memalign(a, &ptr, __PAGE_SIZE, num_pages * __PAGE_SIZE)) - return NULL; + return __NULL; return ptr; } -void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size) +void *uk_realloc_compat(struct uk_alloc *a, void *ptr, __sz size) { void *retptr; @@ -484,12 +484,12 @@ void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size) if (ptr && !size) { uk_free(a, ptr); - return NULL; + return __NULL; } retptr = uk_malloc(a, size); if (!retptr) - return NULL; + return __NULL; memcpy(retptr, ptr, size); @@ -497,38 +497,38 @@ void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size) return retptr; } -void *uk_calloc_compat(struct uk_alloc *a, size_t nmemb, size_t size) +void *uk_calloc_compat(struct uk_alloc *a, __sz nmemb, __sz size) { void *ptr; - size_t tlen = nmemb * size; + __sz tlen = nmemb * size; /* check for overflow */ - if (nmemb > (~(size_t)0)/size) - return NULL; + if (nmemb > (~(__sz)0)/size) + return __NULL; UK_ASSERT(a); ptr = uk_malloc(a, tlen); if (!ptr) - return NULL; + return __NULL; memset(ptr, 0, tlen); return ptr; } -void *uk_memalign_compat(struct uk_alloc *a, size_t align, size_t size) +void *uk_memalign_compat(struct uk_alloc *a, __sz align, __sz size) { void *ptr; UK_ASSERT(a); if (uk_posix_memalign(a, &ptr, align, size) != 0) - return NULL; + return __NULL; return ptr; } long uk_alloc_pmaxalloc_compat(struct uk_alloc *a) { - ssize_t mem; + __ssz mem; UK_ASSERT(a); @@ -541,7 +541,7 @@ long uk_alloc_pmaxalloc_compat(struct uk_alloc *a) long uk_alloc_pavailmem_compat(struct uk_alloc *a) { - ssize_t mem; + __ssz mem; UK_ASSERT(a); @@ -552,11 +552,11 @@ long uk_alloc_pavailmem_compat(struct uk_alloc *a) return (long) (mem >> __PAGE_SHIFT); } -size_t uk_alloc_availmem_total(void) +__sz uk_alloc_availmem_total(void) { struct uk_alloc *a; - ssize_t availmem; - size_t total; + __ssz availmem; + __sz total; total = 0; uk_alloc_foreach(a) { diff --git a/lib/ukalloc/include/uk/alloc.h b/lib/ukalloc/include/uk/alloc.h index 2f3e9ae45..24af1e810 100644 --- a/lib/ukalloc/include/uk/alloc.h +++ b/lib/ukalloc/include/uk/alloc.h @@ -34,13 +34,11 @@ #ifndef __UK_ALLOC_H__ #define __UK_ALLOC_H__ -#include -#include -#include -#include +#include #include #include #include +#include #ifdef __cplusplus extern "C" { @@ -49,15 +47,15 @@ extern "C" { struct uk_alloc; typedef void* (*uk_alloc_malloc_func_t) - (struct uk_alloc *a, size_t size); + (struct uk_alloc *a, __sz size); typedef void* (*uk_alloc_calloc_func_t) - (struct uk_alloc *a, size_t nmemb, size_t size); + (struct uk_alloc *a, __sz nmemb, __sz size); typedef int (*uk_alloc_posix_memalign_func_t) - (struct uk_alloc *a, void **memptr, size_t align, size_t size); + (struct uk_alloc *a, void **memptr, __sz align, __sz size); typedef void* (*uk_alloc_memalign_func_t) - (struct uk_alloc *a, size_t align, size_t size); + (struct uk_alloc *a, __sz align, __sz size); typedef void* (*uk_alloc_realloc_func_t) - (struct uk_alloc *a, void *ptr, size_t size); + (struct uk_alloc *a, void *ptr, __sz size); typedef void (*uk_alloc_free_func_t) (struct uk_alloc *a, void *ptr); typedef void* (*uk_alloc_palloc_func_t) @@ -65,27 +63,27 @@ typedef void* (*uk_alloc_palloc_func_t) typedef void (*uk_alloc_pfree_func_t) (struct uk_alloc *a, void *ptr, unsigned long num_pages); typedef int (*uk_alloc_addmem_func_t) - (struct uk_alloc *a, void *base, size_t size); -typedef ssize_t (*uk_alloc_getsize_func_t) + (struct uk_alloc *a, void *base, __sz size); +typedef __ssz (*uk_alloc_getsize_func_t) (struct uk_alloc *a); typedef long (*uk_alloc_getpsize_func_t) (struct uk_alloc *a); #if CONFIG_LIBUKALLOC_IFSTATS struct uk_alloc_stats { - size_t last_alloc_size; /* size of the last allocation */ - size_t max_alloc_size; /* biggest satisfied allocation size */ - size_t min_alloc_size; /* smallest satisfied allocation size */ + __sz last_alloc_size; /* size of the last allocation */ + __sz max_alloc_size; /* biggest satisfied allocation size */ + __sz min_alloc_size; /* smallest satisfied allocation size */ - uint64_t tot_nb_allocs; /* total number of satisfied allocations */ - uint64_t tot_nb_frees; /* total number of satisfied free operations */ - int64_t cur_nb_allocs; /* current number of active allocations */ - int64_t max_nb_allocs; /* maximum number of active allocations */ + __u64 tot_nb_allocs; /* total number of satisfied allocations */ + __u64 tot_nb_frees; /* total number of satisfied free operations */ + __s64 cur_nb_allocs; /* current number of active allocations */ + __s64 max_nb_allocs; /* maximum number of active allocations */ - ssize_t cur_mem_use; /* current used memory by allocations */ - ssize_t max_mem_use; /* maximum amount of memory used by allocations */ + __ssz cur_mem_use; /* current used memory by allocations */ + __ssz max_mem_use; /* maximum amount of memory used by allocations */ - uint64_t nb_enomem; /* number of times failing allocation requests */ + __u64 nb_enomem; /* number of times failing allocation requests */ }; #endif /* CONFIG_LIBUKALLOC_IFSTATS */ @@ -120,7 +118,7 @@ struct uk_alloc { /* internal */ struct uk_alloc *next; - int8_t priv[]; + __u8 priv[]; }; extern struct uk_alloc *_uk_alloc_head; @@ -128,7 +126,7 @@ extern struct uk_alloc *_uk_alloc_head; /* Iterate over all registered allocators */ #define uk_alloc_foreach(iter) \ for (iter = _uk_alloc_head; \ - iter != NULL; \ + iter != __NULL; \ iter = iter->next) #if CONFIG_LIBUKALLOC_IFSTATS_PERLIB @@ -141,34 +139,34 @@ static inline struct uk_alloc *uk_alloc_get_default(void) #endif /* !CONFIG_LIBUKALLOC_IFSTATS_PERLIB */ /* wrapper functions */ -static inline void *uk_do_malloc(struct uk_alloc *a, size_t size) +static inline void *uk_do_malloc(struct uk_alloc *a, __sz size) { UK_ASSERT(a); return a->malloc(a, size); } -static inline void *uk_malloc(struct uk_alloc *a, size_t size) +static inline void *uk_malloc(struct uk_alloc *a, __sz size) { if (unlikely(!a)) { errno = ENOMEM; - return NULL; + return __NULL; } return uk_do_malloc(a, size); } static inline void *uk_do_calloc(struct uk_alloc *a, - size_t nmemb, size_t size) + __sz nmemb, __sz size) { UK_ASSERT(a); return a->calloc(a, nmemb, size); } static inline void *uk_calloc(struct uk_alloc *a, - size_t nmemb, size_t size) + __sz nmemb, __sz size) { if (unlikely(!a)) { errno = ENOMEM; - return NULL; + return __NULL; } return uk_do_calloc(a, nmemb, size); } @@ -177,50 +175,50 @@ static inline void *uk_calloc(struct uk_alloc *a, #define uk_zalloc(a, size) uk_calloc((a), 1, (size)) static inline void *uk_do_realloc(struct uk_alloc *a, - void *ptr, size_t size) + void *ptr, __sz size) { UK_ASSERT(a); return a->realloc(a, ptr, size); } -static inline void *uk_realloc(struct uk_alloc *a, void *ptr, size_t size) +static inline void *uk_realloc(struct uk_alloc *a, void *ptr, __sz size) { if (unlikely(!a)) { errno = ENOMEM; - return NULL; + return __NULL; } return uk_do_realloc(a, ptr, size); } static inline int uk_do_posix_memalign(struct uk_alloc *a, void **memptr, - size_t align, size_t size) + __sz align, __sz size) { UK_ASSERT(a); return a->posix_memalign(a, memptr, align, size); } static inline int uk_posix_memalign(struct uk_alloc *a, void **memptr, - size_t align, size_t size) + __sz align, __sz size) { if (unlikely(!a)) { - *memptr = NULL; + *memptr = __NULL; return ENOMEM; } return uk_do_posix_memalign(a, memptr, align, size); } static inline void *uk_do_memalign(struct uk_alloc *a, - size_t align, size_t size) + __sz align, __sz size) { UK_ASSERT(a); return a->memalign(a, align, size); } static inline void *uk_memalign(struct uk_alloc *a, - size_t align, size_t size) + __sz align, __sz size) { if (unlikely(!a)) - return NULL; + return __NULL; return uk_do_memalign(a, align, size); } @@ -244,7 +242,7 @@ static inline void *uk_do_palloc(struct uk_alloc *a, unsigned long num_pages) static inline void *uk_palloc(struct uk_alloc *a, unsigned long num_pages) { if (unlikely(!a || !a->palloc)) - return NULL; + return __NULL; return uk_do_palloc(a, num_pages); } @@ -262,7 +260,7 @@ static inline void uk_pfree(struct uk_alloc *a, void *ptr, } static inline int uk_alloc_addmem(struct uk_alloc *a, void *base, - size_t size) + __sz size) { UK_ASSERT(a); if (a->addmem) @@ -272,11 +270,11 @@ static inline int uk_alloc_addmem(struct uk_alloc *a, void *base, } /* current biggest allocation request possible */ -static inline ssize_t uk_alloc_maxalloc(struct uk_alloc *a) +static inline __ssz uk_alloc_maxalloc(struct uk_alloc *a) { UK_ASSERT(a); if (!a->maxalloc) - return (ssize_t) -ENOTSUP; + return (__ssz) -ENOTSUP; return a->maxalloc(a); } @@ -289,11 +287,11 @@ static inline long uk_alloc_pmaxalloc(struct uk_alloc *a) } /* total free memory of the allocator */ -static inline ssize_t uk_alloc_availmem(struct uk_alloc *a) +static inline __ssz uk_alloc_availmem(struct uk_alloc *a) { UK_ASSERT(a); if (!a->availmem) - return (ssize_t) -ENOTSUP; + return (__ssz) -ENOTSUP; return a->availmem(a); } @@ -305,7 +303,7 @@ static inline long uk_alloc_pavailmem(struct uk_alloc *a) return a->pavailmem(a); } -size_t uk_alloc_availmem_total(void); +__sz uk_alloc_availmem_total(void); unsigned long uk_alloc_pavailmem_total(void); diff --git a/lib/ukalloc/include/uk/alloc_impl.h b/lib/ukalloc/include/uk/alloc_impl.h index 2da500599..e169b8edb 100644 --- a/lib/ukalloc/include/uk/alloc_impl.h +++ b/lib/ukalloc/include/uk/alloc_impl.h @@ -56,26 +56,26 @@ int uk_alloc_register(struct uk_alloc *a); /* Functions that can be used by allocators that implement palloc(), * pfree() and potentially pavail(), pmaxalloc() only */ -void *uk_malloc_ifpages(struct uk_alloc *a, size_t size); -void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, size_t size); +void *uk_malloc_ifpages(struct uk_alloc *a, __sz size); +void *uk_realloc_ifpages(struct uk_alloc *a, void *ptr, __sz size); int uk_posix_memalign_ifpages(struct uk_alloc *a, void **memptr, - size_t align, size_t size); + __sz align, __sz size); void uk_free_ifpages(struct uk_alloc *a, void *ptr); -ssize_t uk_alloc_availmem_ifpages(struct uk_alloc *a); -ssize_t uk_alloc_maxalloc_ifpages(struct uk_alloc *a); +__ssz uk_alloc_availmem_ifpages(struct uk_alloc *a); +__ssz uk_alloc_maxalloc_ifpages(struct uk_alloc *a); #if CONFIG_LIBUKALLOC_IFMALLOC -void *uk_malloc_ifmalloc(struct uk_alloc *a, size_t size); -void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, size_t size); +void *uk_malloc_ifmalloc(struct uk_alloc *a, __sz size); +void *uk_realloc_ifmalloc(struct uk_alloc *a, void *ptr, __sz size); int uk_posix_memalign_ifmalloc(struct uk_alloc *a, void **memptr, - size_t align, size_t size); + __sz align, __sz size); void uk_free_ifmalloc(struct uk_alloc *a, void *ptr); #endif /* Functionality that is provided based on malloc() and posix_memalign() */ -void *uk_calloc_compat(struct uk_alloc *a, size_t num, size_t len); -void *uk_realloc_compat(struct uk_alloc *a, void *ptr, size_t size); -void *uk_memalign_compat(struct uk_alloc *a, size_t align, size_t len); +void *uk_calloc_compat(struct uk_alloc *a, __sz num, __sz len); +void *uk_realloc_compat(struct uk_alloc *a, void *ptr, __sz size); +void *uk_memalign_compat(struct uk_alloc *a, __sz align, __sz len); void *uk_palloc_compat(struct uk_alloc *a, unsigned long num_pages); void uk_pfree_compat(struct uk_alloc *a, void *ptr, unsigned long num_pages); long uk_alloc_pavailmem_compat(struct uk_alloc *a); @@ -112,7 +112,7 @@ static inline void _uk_alloc_stats_refresh_minmax(struct uk_alloc_stats *stats) /* NOTE: Please do not use this function directly */ static inline void _uk_alloc_stats_count_alloc(struct uk_alloc_stats *stats, - void *ptr, size_t size) + void *ptr, __sz size) { /* TODO: SMP safety */ uk_preempt_disable(); @@ -131,7 +131,7 @@ static inline void _uk_alloc_stats_count_alloc(struct uk_alloc_stats *stats, /* NOTE: Please do not use this function directly */ static inline void _uk_alloc_stats_count_free(struct uk_alloc_stats *stats, - void *ptr, size_t size) + void *ptr, __sz size) { /* TODO: SMP safety */ uk_preempt_disable(); @@ -170,13 +170,13 @@ static inline void _uk_alloc_stats_count_free(struct uk_alloc_stats *stats, } while (0) #define uk_alloc_stats_count_palloc(a, ptr, num_pages) \ uk_alloc_stats_count_alloc((a), (ptr), \ - ((size_t) (num_pages)) << __PAGE_SHIFT) + ((__sz) (num_pages)) << __PAGE_SHIFT) #define uk_alloc_stats_count_enomem(a, size) \ uk_alloc_stats_count_alloc((a), NULL, (size)) #define uk_alloc_stats_count_penomem(a, num_pages) \ uk_alloc_stats_count_enomem((a), \ - ((size_t) (num_pages)) << __PAGE_SHIFT) + ((__sz) (num_pages)) << __PAGE_SHIFT) /* Note: if ptr is NULL, nothing is counted */ #define uk_alloc_stats_count_free(a, ptr, freed_size) \ @@ -187,7 +187,7 @@ static inline void _uk_alloc_stats_count_free(struct uk_alloc_stats *stats, } while (0) #define uk_alloc_stats_count_pfree(a, ptr, num_pages) \ uk_alloc_stats_count_free((a), (ptr), \ - ((size_t) (num_pages)) << __PAGE_SHIFT) + ((__sz) (num_pages)) << __PAGE_SHIFT) #define uk_alloc_stats_reset(a) \ memset(&(a)->_stats, 0, sizeof((a)->_stats)) diff --git a/lib/ukalloc/libstats.c b/lib/ukalloc/libstats.c index 14d91e8a4..c6864a906 100644 --- a/lib/ukalloc/libstats.c +++ b/lib/ukalloc/libstats.c @@ -53,10 +53,10 @@ static inline struct uk_alloc *_uk_alloc_get_actual_default(void) } #define WATCH_STATS_START(p) \ - ssize_t _before_mem_use; \ - size_t _before_nb_allocs; \ - size_t _before_tot_nb_allocs; \ - size_t _before_nb_enomem; \ + __ssz _before_mem_use; \ + __sz _before_nb_allocs; \ + __sz _before_tot_nb_allocs; \ + __sz _before_nb_enomem; \ \ uk_preempt_disable(); \ _before_mem_use = (p)->_stats.cur_mem_use; \ @@ -66,8 +66,8 @@ static inline struct uk_alloc *_uk_alloc_get_actual_default(void) #define WATCH_STATS_END(p, nb_allocs_diff, nb_enomem_diff, \ mem_use_diff, alloc_size) \ - size_t _nb_allocs = (p)->_stats.tot_nb_allocs \ - - _before_tot_nb_allocs; \ + __sz _nb_allocs = (p)->_stats.tot_nb_allocs \ + - _before_tot_nb_allocs; \ \ /* NOTE: We assume that an allocator call does at * most one allocation. Otherwise we cannot currently @@ -77,9 +77,9 @@ static inline struct uk_alloc *_uk_alloc_get_actual_default(void) \ *(mem_use_diff) = (p)->_stats.cur_mem_use \ - _before_mem_use; \ - *(nb_allocs_diff) = (ssize_t) (p)->_stats.cur_nb_allocs \ + *(nb_allocs_diff) = (__ssz) (p)->_stats.cur_nb_allocs \ - _before_nb_allocs; \ - *(nb_enomem_diff) = (ssize_t) (p)->_stats.nb_enomem \ + *(nb_enomem_diff) = (__ssz) (p)->_stats.nb_enomem \ - _before_nb_enomem; \ if (_nb_allocs > 0) \ *(alloc_size) = (p)->_stats.last_alloc_size; \ @@ -88,10 +88,10 @@ static inline struct uk_alloc *_uk_alloc_get_actual_default(void) uk_preempt_enable(); static inline void update_stats(struct uk_alloc_stats *stats, - ssize_t nb_allocs_diff, - ssize_t nb_enomem_diff, - ssize_t mem_use_diff, - size_t last_alloc_size) + __ssz nb_allocs_diff, + __ssz nb_enomem_diff, + __ssz mem_use_diff, + __sz last_alloc_size) { uk_preempt_disable(); if (nb_allocs_diff >= 0) @@ -113,11 +113,11 @@ static inline void update_stats(struct uk_alloc_stats *stats, uk_preempt_enable(); } -static void *wrapper_malloc(struct uk_alloc *a, size_t size) +static void *wrapper_malloc(struct uk_alloc *a, __sz size) { struct uk_alloc *p = _uk_alloc_get_actual_default(); - ssize_t nb_allocs, mem_use, nb_enomem; - size_t alloc_size; + __ssz nb_allocs, mem_use, nb_enomem; + __sz alloc_size; void *ret; UK_ASSERT(p); @@ -132,11 +132,11 @@ static void *wrapper_malloc(struct uk_alloc *a, size_t size) return ret; } -static void *wrapper_calloc(struct uk_alloc *a, size_t nmemb, size_t size) +static void *wrapper_calloc(struct uk_alloc *a, __sz nmemb, __sz size) { struct uk_alloc *p = _uk_alloc_get_actual_default(); - ssize_t nb_allocs, mem_use, nb_enomem; - size_t alloc_size; + __ssz nb_allocs, mem_use, nb_enomem; + __sz alloc_size; void *ret; UK_ASSERT(p); @@ -151,11 +151,11 @@ static void *wrapper_calloc(struct uk_alloc *a, size_t nmemb, size_t size) } static int wrapper_posix_memalign(struct uk_alloc *a, void **memptr, - size_t align, size_t size) + __sz align, __sz size) { struct uk_alloc *p = _uk_alloc_get_actual_default(); - ssize_t nb_allocs, mem_use, nb_enomem; - size_t alloc_size; + __ssz nb_allocs, mem_use, nb_enomem; + __sz alloc_size; int ret; UK_ASSERT(p); @@ -169,11 +169,11 @@ static int wrapper_posix_memalign(struct uk_alloc *a, void **memptr, return ret; } -static void *wrapper_memalign(struct uk_alloc *a, size_t align, size_t size) +static void *wrapper_memalign(struct uk_alloc *a, __sz align, __sz size) { struct uk_alloc *p = _uk_alloc_get_actual_default(); - ssize_t nb_allocs, mem_use, nb_enomem; - size_t alloc_size; + __ssz nb_allocs, mem_use, nb_enomem; + __sz alloc_size; void *ret; UK_ASSERT(p); @@ -187,11 +187,11 @@ static void *wrapper_memalign(struct uk_alloc *a, size_t align, size_t size) return ret; } -static void *wrapper_realloc(struct uk_alloc *a, void *ptr, size_t size) +static void *wrapper_realloc(struct uk_alloc *a, void *ptr, __sz size) { struct uk_alloc *p = _uk_alloc_get_actual_default(); - ssize_t nb_allocs, mem_use, nb_enomem; - size_t alloc_size; + __ssz nb_allocs, mem_use, nb_enomem; + __sz alloc_size; void *ret; UK_ASSERT(p); @@ -208,8 +208,8 @@ static void *wrapper_realloc(struct uk_alloc *a, void *ptr, size_t size) static void wrapper_free(struct uk_alloc *a, void *ptr) { struct uk_alloc *p = _uk_alloc_get_actual_default(); - ssize_t nb_allocs, mem_use, nb_enomem; - size_t alloc_size; + __ssz nb_allocs, mem_use, nb_enomem; + __sz alloc_size; UK_ASSERT(p); @@ -223,8 +223,8 @@ static void wrapper_free(struct uk_alloc *a, void *ptr) static void *wrapper_palloc(struct uk_alloc *a, unsigned long num_pages) { struct uk_alloc *p = _uk_alloc_get_actual_default(); - ssize_t nb_allocs, mem_use, nb_enomem; - size_t alloc_size; + __ssz nb_allocs, mem_use, nb_enomem; + __sz alloc_size; void *ret; UK_ASSERT(p); @@ -242,8 +242,8 @@ static void wrapper_pfree(struct uk_alloc *a, void *ptr, unsigned long num_pages) { struct uk_alloc *p = _uk_alloc_get_actual_default(); - ssize_t nb_allocs, mem_use, nb_enomem; - size_t alloc_size; + __ssz nb_allocs, mem_use, nb_enomem; + __sz alloc_size; UK_ASSERT(p); @@ -257,7 +257,7 @@ static void wrapper_pfree(struct uk_alloc *a, void *ptr, /* The following interfaces do not change allocation statistics, * this is why we just forward the calls */ -static int wrapper_addmem(struct uk_alloc *a __unused, void *base, size_t size) +static int wrapper_addmem(struct uk_alloc *a __unused, void *base, __sz size) { struct uk_alloc *p = _uk_alloc_get_actual_default(); @@ -265,7 +265,7 @@ static int wrapper_addmem(struct uk_alloc *a __unused, void *base, size_t size) return uk_alloc_addmem(p, base, size); } -static size_t wrapper_maxalloc(struct uk_alloc *a __unused) +static __ssz wrapper_maxalloc(struct uk_alloc *a __unused) { struct uk_alloc *p = _uk_alloc_get_actual_default(); @@ -273,7 +273,7 @@ static size_t wrapper_maxalloc(struct uk_alloc *a __unused) return uk_alloc_maxalloc(p); } -static size_t wrapper_availmem(struct uk_alloc *a __unused) +static __ssz wrapper_availmem(struct uk_alloc *a __unused) { struct uk_alloc *p = _uk_alloc_get_actual_default(); diff --git a/lib/ukallocregion/region.c b/lib/ukallocregion/region.c index 685c3993a..6207bd193 100644 --- a/lib/ukallocregion/region.c +++ b/lib/ukallocregion/region.c @@ -47,6 +47,9 @@ * an introduction to region-based memory management. */ +#include +#include +#include #include #include #include /* round_pgup() */ diff --git a/plat/common/include/pci/pci_bus.h b/plat/common/include/pci/pci_bus.h index 86cab11b8..485420f7e 100644 --- a/plat/common/include/pci/pci_bus.h +++ b/plat/common/include/pci/pci_bus.h @@ -64,6 +64,8 @@ #ifndef __UKPLAT_COMMON_PCI_BUS_H__ #define __UKPLAT_COMMON_PCI_BUS_H__ +#include +#include #include #include #include diff --git a/plat/common/memory.c b/plat/common/memory.c index ddcb7084d..4245c637d 100644 --- a/plat/common/memory.c +++ b/plat/common/memory.c @@ -32,6 +32,8 @@ #include #include +#include +#include static struct uk_alloc *plat_allocator; diff --git a/plat/xen/gnttab.c b/plat/xen/gnttab.c index 4f28df991..fd0565722 100644 --- a/plat/xen/gnttab.c +++ b/plat/xen/gnttab.c @@ -17,6 +17,7 @@ **************************************************************************** */ #include +#include #ifdef DBGGNT #include #endif diff --git a/plat/xen/memory.c b/plat/xen/memory.c index b390aab62..970465b99 100644 --- a/plat/xen/memory.c +++ b/plat/xen/memory.c @@ -32,6 +32,8 @@ */ #include +#include +#include #include #include diff --git a/plat/xen/x86/gnttab.c b/plat/xen/x86/gnttab.c index b09d52bf9..7a647481b 100644 --- a/plat/xen/x86/gnttab.c +++ b/plat/xen/x86/gnttab.c @@ -24,6 +24,7 @@ /* Taken from Mini-OS */ #include +#include #include #include #include -- 2.39.5