void *bmk_sched_gettls(struct bmk_thread *, unsigned int);
void bmk_sched_settls(struct bmk_thread *, unsigned int, void *);
-void bmk_cpu_sched_create(struct bmk_tcb *,
+void bmk_cpu_sched_create(struct bmk_thread *, struct bmk_tcb *,
void (*)(void *), void *,
void *, unsigned long);
void bmk_cpu_sched_switch(struct bmk_tcb *, struct bmk_tcb *);
struct bmk_thread *bmk_sched_init_mainlwp(void *);
struct bmk_thread *bmk_sched_current(void);
+struct bmk_thread *bmk_cpu_sched_current(void);
int *bmk_sched_geterrno(void);
const char *bmk_sched_threadname(struct bmk_thread *);
*/
#include <bmk/types.h>
+#include <bmk/kernel.h>
#include <bmk/sched.h>
+#include <bmk-core/core.h>
+
static void
stack_push(void **stackp, unsigned long value)
{
}
void
-bmk_cpu_sched_create(struct bmk_tcb *tcb,
+bmk_cpu_sched_create(struct bmk_thread *thread, struct bmk_tcb *tcb,
void (*f)(void *), void *arg,
void *stack_base, unsigned long stack_size)
{
void *stack_top = (char *)stack_base + stack_size;
+ bmk_assert(stack_size == BMK_THREAD_STACKSIZE);
+
+ /* Save pointer to the thread on the stack, used by current macro */
+ *(unsigned long *)stack_base = (unsigned long)thread;
+
/* these values are used by bmk_cpu_sched_bouncer() */
stack_push(&stack_top, (unsigned long)f);
stack_push(&stack_top, (unsigned long)arg);
tcb->btcb_sp = (unsigned long)stack_top;
tcb->btcb_ip = (unsigned long)bmk_cpu_sched_bouncer;
}
+
+struct bmk_thread *
+bmk_cpu_sched_current(void)
+{
+ struct bmk_thread **current;
+
+ current = (void *)((unsigned long)¤t & ~(bmk_stacksize-1));
+ return *current;
+};
#include <bmk/types.h>
#define MEMSTART 0x100000
-#define PAGE_SIZE 0x1000
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1<<PAGE_SHIFT)
#define STACK_SIZE 0x2000
#define round_page(x) (((x) + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1))
* as a special case. (nb. even that holds only for native thread stacks,
* not pthread stacks).
*/
+static size_t currentpg;
+#define MAXPAGEALIGN (1<<BMK_THREAD_STACK_PAGE_ORDER)
void *
bmk_allocpg(size_t howmany)
{
- struct stackcache *sc;
- static size_t current = 0;
unsigned long rv;
- if (howmany == 1<<BMK_THREAD_STACK_PAGE_ORDER &&
+ rv = bmk_membase + PAGE_SIZE*currentpg;
+ currentpg += howmany;
+ if (currentpg*PAGE_SIZE > bmk_memsize)
+ return NULL;
+
+ return (void *)rv;
+}
+
+/*
+ * Allocate a 2^n chunk of pages, aligned at 2^n. This is currently
+ * for the benefit of thread stack allocation, and should be going
+ * away in some time when the migration to TLS is complete.
+ */
+static void *
+alignedpgalloc(int shift)
+{
+ struct stackcache *sc;
+ int align = 1<<shift;
+ size_t alignedoff;
+ void *rv;
+
+ if (shift == BMK_THREAD_STACK_PAGE_ORDER &&
(sc = LIST_FIRST(&cacheofstacks)) != NULL) {
LIST_REMOVE(sc, sc_entries);
return sc;
}
- rv = bmk_membase + PAGE_SIZE*current;
- current += howmany;
- if (current*PAGE_SIZE > bmk_memsize)
- return NULL;
+ if (align > MAXPAGEALIGN)
+ align = MAXPAGEALIGN;
- return (void *)rv;
+ /* need to leave this much space until the next aligned alloc */
+ alignedoff = (bmk_membase + currentpg*PAGE_SIZE) % (align*PAGE_SIZE);
+ if (alignedoff)
+ currentpg += align - (alignedoff>>PAGE_SHIFT);
+
+ rv = bmk_allocpg(1<<shift);
+ if (((unsigned long)rv & (align*PAGE_SIZE-1)) != 0) {
+ bmk_printf("wanted %d aligned, got memory at %p\n",
+ align, rv);
+ bmk_platform_halt("fail");
+ }
+ return rv;
}
void *
bmk_platform_allocpg2(int shift)
{
- return bmk_allocpg(1<<shift);
+ return alignedpgalloc(shift);
}
void
bmk_membase = mbm->addr + ossize;
bmk_memsize = memsize - ossize;
+ bmk_assert((bmk_membase & (PAGE_SIZE-1)) == 0);
+
return 0;
}
static void (*scheduler_hook)(void *, void *);
-static struct bmk_thread *current_thread = NULL;
-struct bmk_thread *
-bmk_sched_current(void)
-{
-
- return current_thread;
-}
-
static int
is_runnable(struct bmk_thread *thread)
{
if (scheduler_hook)
scheduler_hook(prev->bt_cookie, next->bt_cookie);
- current_thread = next;
bmk_cpu_sched_switch(&prev->bt_tcb, &next->bt_tcb);
}
+struct bmk_thread *
+bmk_sched_current(void)
+{
+
+ return bmk_cpu_sched_current();
+}
void
bmk_sched_dumpqueue(void)
if (joinable)
thread->bt_flags |= THREAD_MUSTJOIN;
- bmk_cpu_sched_create(&thread->bt_tcb, f, data, stack_base, stack_size);
+ bmk_cpu_sched_create(thread, &thread->bt_tcb, f, data,
+ stack_base, stack_size);
thread->bt_cookie = cookie;