From 516a646308cdb697c6bb148c13c98bbce4d34f44 Mon Sep 17 00:00:00 2001 From: Antti Kantee Date: Mon, 20 Apr 2015 19:49:26 +0000 Subject: [PATCH] Match baremetal "curlwp" with xen i.e. store thread pointer at the end of the stack. This is done merely to minimize diffs for now, and will be fixed with tls. --- include/bmk-core/sched.h | 3 +- platform/baremetal/arch/i386/cpu_sched.c | 19 ++++++++- platform/baremetal/include/bmk/kernel.h | 3 +- platform/baremetal/kernel.c | 50 +++++++++++++++++++----- platform/baremetal/sched.c | 18 ++++----- 5 files changed, 71 insertions(+), 22 deletions(-) diff --git a/include/bmk-core/sched.h b/include/bmk-core/sched.h index edc127c..4193f75 100644 --- a/include/bmk-core/sched.h +++ b/include/bmk-core/sched.h @@ -61,7 +61,7 @@ int bmk_sched_nanosleep_abstime(bmk_time_t); void *bmk_sched_gettls(struct bmk_thread *, unsigned int); void bmk_sched_settls(struct bmk_thread *, unsigned int, void *); -void bmk_cpu_sched_create(struct bmk_tcb *, +void bmk_cpu_sched_create(struct bmk_thread *, struct bmk_tcb *, void (*)(void *), void *, void *, unsigned long); void bmk_cpu_sched_switch(struct bmk_tcb *, struct bmk_tcb *); @@ -70,6 +70,7 @@ void bmk_sched_set_hook(void (*)(void *, void *)); struct bmk_thread *bmk_sched_init_mainlwp(void *); struct bmk_thread *bmk_sched_current(void); +struct bmk_thread *bmk_cpu_sched_current(void); int *bmk_sched_geterrno(void); const char *bmk_sched_threadname(struct bmk_thread *); diff --git a/platform/baremetal/arch/i386/cpu_sched.c b/platform/baremetal/arch/i386/cpu_sched.c index 3481531..245cca3 100644 --- a/platform/baremetal/arch/i386/cpu_sched.c +++ b/platform/baremetal/arch/i386/cpu_sched.c @@ -38,8 +38,11 @@ */ #include +#include #include +#include + static void stack_push(void **stackp, unsigned long value) { @@ -51,12 +54,17 @@ stack_push(void **stackp, unsigned long value) } void -bmk_cpu_sched_create(struct bmk_tcb *tcb, +bmk_cpu_sched_create(struct bmk_thread *thread, struct bmk_tcb *tcb, void (*f)(void *), void *arg, void *stack_base, unsigned long stack_size) { void *stack_top = (char *)stack_base + stack_size; + bmk_assert(stack_size == BMK_THREAD_STACKSIZE); + + /* Save pointer to the thread on the stack, used by current macro */ + *(unsigned long *)stack_base = (unsigned long)thread; + /* these values are used by bmk_cpu_sched_bouncer() */ stack_push(&stack_top, (unsigned long)f); stack_push(&stack_top, (unsigned long)arg); @@ -64,3 +72,12 @@ bmk_cpu_sched_create(struct bmk_tcb *tcb, tcb->btcb_sp = (unsigned long)stack_top; tcb->btcb_ip = (unsigned long)bmk_cpu_sched_bouncer; } + +struct bmk_thread * +bmk_cpu_sched_current(void) +{ + struct bmk_thread **current; + + current = (void *)((unsigned long)¤t & ~(bmk_stacksize-1)); + return *current; +}; diff --git a/platform/baremetal/include/bmk/kernel.h b/platform/baremetal/include/bmk/kernel.h index dd0515f..26d9f7a 100644 --- a/platform/baremetal/include/bmk/kernel.h +++ b/platform/baremetal/include/bmk/kernel.h @@ -5,7 +5,8 @@ #include #define MEMSTART 0x100000 -#define PAGE_SIZE 0x1000 +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1< bmk_memsize) + return NULL; + + return (void *)rv; +} + +/* + * Allocate a 2^n chunk of pages, aligned at 2^n. This is currently + * for the benefit of thread stack allocation, and should be going + * away in some time when the migration to TLS is complete. + */ +static void * +alignedpgalloc(int shift) +{ + struct stackcache *sc; + int align = 1< bmk_memsize) - return NULL; + if (align > MAXPAGEALIGN) + align = MAXPAGEALIGN; - return (void *)rv; + /* need to leave this much space until the next aligned alloc */ + alignedoff = (bmk_membase + currentpg*PAGE_SIZE) % (align*PAGE_SIZE); + if (alignedoff) + currentpg += align - (alignedoff>>PAGE_SHIFT); + + rv = bmk_allocpg(1<addr + ossize; bmk_memsize = memsize - ossize; + bmk_assert((bmk_membase & (PAGE_SIZE-1)) == 0); + return 0; } diff --git a/platform/baremetal/sched.c b/platform/baremetal/sched.c index fd46ed8..67159d2 100644 --- a/platform/baremetal/sched.c +++ b/platform/baremetal/sched.c @@ -108,14 +108,6 @@ static TAILQ_HEAD(, bmk_thread) threads = TAILQ_HEAD_INITIALIZER(threads); static void (*scheduler_hook)(void *, void *); -static struct bmk_thread *current_thread = NULL; -struct bmk_thread * -bmk_sched_current(void) -{ - - return current_thread; -} - static int is_runnable(struct bmk_thread *thread) { @@ -166,10 +158,15 @@ sched_switch(struct bmk_thread *prev, struct bmk_thread *next) if (scheduler_hook) scheduler_hook(prev->bt_cookie, next->bt_cookie); - current_thread = next; bmk_cpu_sched_switch(&prev->bt_tcb, &next->bt_tcb); } +struct bmk_thread * +bmk_sched_current(void) +{ + + return bmk_cpu_sched_current(); +} void bmk_sched_dumpqueue(void) @@ -313,7 +310,8 @@ bmk_sched_create(const char *name, void *cookie, int joinable, if (joinable) thread->bt_flags |= THREAD_MUSTJOIN; - bmk_cpu_sched_create(&thread->bt_tcb, f, data, stack_base, stack_size); + bmk_cpu_sched_create(thread, &thread->bt_tcb, f, data, + stack_base, stack_size); thread->bt_cookie = cookie; -- 2.39.5