From: Antti Kantee Date: Sat, 18 Apr 2015 16:42:16 +0000 (+0000) Subject: Cache thread stack sized allocations. X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=6468d469c16720d4e3222263598e1a41aca94003;p=people%2Fliuw%2Frumprun.git Cache thread stack sized allocations. --- diff --git a/platform/baremetal/kernel.c b/platform/baremetal/kernel.c index 1944300..1ddc582 100644 --- a/platform/baremetal/kernel.c +++ b/platform/baremetal/kernel.c @@ -34,25 +34,43 @@ #include #include #include +#include #include unsigned long bmk_membase; unsigned long bmk_memsize; +LIST_HEAD(, stackcache) cacheofstacks = LIST_HEAD_INITIALIZER(cacheofstacks); +struct stackcache { + void *sc_stack; + LIST_ENTRY(stackcache) sc_entries; +}; + /* - * we don't need freepg - * (for the humour impaired: it was a joke, on the TODO ... but really, + * We don't need freepg. + * + * For the humour impaired: it was a joke, on the TODO ... but really, * it's not that urgent since the rump kernel uses its own caching * allocators, so once the backing pages are allocated, they tend to - * never get freed) + * never get freed. The only thing that in practical terms gets + * deallocated is thread stacks, and for now we simply cache those + * as a special case. (nb. even that holds only for native thread stacks, + * not pthread stacks). */ void * bmk_allocpg(size_t howmany) { + struct stackcache *sc; static size_t current = 0; unsigned long rv; + if (howmany == 1< bmk_memsize) @@ -72,6 +90,13 @@ void bmk_platform_freepg2(void *mem, int shift) { + if (shift == BMK_THREAD_STACK_PAGE_ORDER) { + struct stackcache *sc = mem; + + LIST_INSERT_HEAD(&cacheofstacks, sc, sc_entries); + return; + } + bmk_printf("WARNING: freepg2 called! (%p, %d)\n", mem, shift); }