2008-02-23 00:25:31 +01:00
|
|
|
#include <stdint.h>
|
2008-02-24 20:07:46 +01:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include "atomic.h"
|
|
|
|
#include "memalloc.h"
|
|
|
|
|
|
|
|
#include "rtos/context.h"
|
|
|
|
#include "rtos/spinlock.h"
|
|
|
|
|
|
|
|
/* linked list of ready contexts, #1 is the running thread */
|
|
|
|
struct context *run_queue = NULL;
|
|
|
|
|
|
|
|
/* pointer to the running thread */
|
|
|
|
struct context *current_context = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SWI_Handler:
|
|
|
|
* Entry with SVC mode, IRQs are disabled, FIQ is enabled
|
|
|
|
*/
|
|
|
|
__attribute__((naked)) void SWI_Handler(void)
|
|
|
|
{
|
|
|
|
/* store register context to current_context */
|
|
|
|
asm volatile (
|
|
|
|
/* r0-1 -> svc_stack */
|
|
|
|
"stmdb sp!, {r0-r1} \n\t"
|
|
|
|
|
|
|
|
/* get top of struct register_context */
|
|
|
|
"ldr r0, =current_context \n\t"
|
|
|
|
"ldr r0, [r0] \n\t"
|
|
|
|
"add r0, r0, #68 \n\t"
|
|
|
|
|
|
|
|
/* save usermode cpsr & r2-14 */
|
|
|
|
"mrs r1, spsr \n\t"
|
|
|
|
"stmdb r0, {r1-r14}^ \n\t"
|
|
|
|
"nop \n\t"
|
|
|
|
"sub r0, r0, #56 \n\t"
|
|
|
|
|
|
|
|
/* save r0-1 and svc_lr (= pc) */
|
|
|
|
"ldmia sp!, {r1, r2} \n\t"
|
|
|
|
"stmdb r0!, {r1, r2, r14} \n\t"
|
|
|
|
);
|
|
|
|
|
|
|
|
/* we're no longer #1 in run_queue, switch to new #1 */
|
|
|
|
if (current_context != run_queue)
|
|
|
|
current_context = run_queue;
|
|
|
|
|
|
|
|
/* restore register context from current_context */
|
|
|
|
asm volatile (
|
|
|
|
/* label for first thread creation */
|
|
|
|
".global __restore_ctx \n\t"
|
|
|
|
"__restore_ctx: \n\t"
|
|
|
|
|
|
|
|
/* get pointer to struct register_context */
|
|
|
|
"ldr r0, =current_context \n\t"
|
|
|
|
"ldr r0, [r0] \n\t"
|
|
|
|
|
|
|
|
/* get values of r0-1 and pc (= svc_lr) */
|
|
|
|
"ldmia r0!, {r1-r2,r14} \n\t"
|
|
|
|
"stmdb sp!, {r1-r2} \n\t"
|
|
|
|
|
|
|
|
/* restore usermode cpsr & r2-14 */
|
|
|
|
"ldmia r0, {r1-r14}^ \n\t"
|
|
|
|
"nop \n\t"
|
|
|
|
"msr spsr, r1 \n\t"
|
|
|
|
|
|
|
|
/* get r0-1 from svc_stack, jump back */
|
|
|
|
"ldmia sp!, {r0, r1} \n\t"
|
|
|
|
"movs pc, lr \n\t"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* inserts context into run_queue */
|
|
|
|
static void isr_context_ready(struct context *ctx)
|
|
|
|
{
|
|
|
|
struct context *q = run_queue;
|
|
|
|
struct context **qprev = &run_queue;
|
|
|
|
|
|
|
|
while (q && (q->priority <= ctx->priority)) {
|
|
|
|
qprev = &q->run_queue;
|
|
|
|
q = q->run_queue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->run_queue = q;
|
|
|
|
*qprev = ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t isr_context_switch(void)
|
|
|
|
{
|
|
|
|
// TODO: when called from ISR.. this will go boom :)
|
|
|
|
asm volatile ("swi");
|
|
|
|
|
|
|
|
return current_context->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static uint8_t __isr_context_wait(struct spinlock *lock, uint8_t sleepstate)
|
|
|
|
{
|
|
|
|
isr_spinlock_unlock(lock);
|
|
|
|
|
|
|
|
run_queue = current_context->run_queue;
|
|
|
|
current_context->state = sleepstate;
|
|
|
|
|
|
|
|
uint8_t retval = isr_context_switch();
|
|
|
|
|
|
|
|
isr_spinlock_lock(lock);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t isr_context_wait(struct spinlock *lock)
|
|
|
|
{
|
|
|
|
return __isr_context_wait(lock, CONTEXT_SLEEP);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t context_wait(struct spinlock *lock)
|
|
|
|
{
|
|
|
|
disable_irqs();
|
|
|
|
uint8_t retval = __isr_context_wait(lock, CONTEXT_SLEEP);
|
|
|
|
restore_irqs();
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t context_wait_queue(struct spinlock *lock, struct context **queue)
|
|
|
|
{
|
|
|
|
disable_irqs();
|
|
|
|
current_context->sleep_queue = *queue;
|
|
|
|
*queue = current_context;
|
|
|
|
|
|
|
|
uint8_t retval = __isr_context_wait(lock, CONTEXT_SLEEP_QUEUE);
|
|
|
|
restore_irqs();
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t context_wait_pri_queue(struct spinlock *lock, struct context **queue)
|
|
|
|
{
|
|
|
|
disable_irqs();
|
|
|
|
|
|
|
|
struct context *q = *queue;
|
|
|
|
while (q && (q->priority <= lock->priority_unlocked)) {
|
|
|
|
queue = &q->sleep_queue;
|
|
|
|
q = q->sleep_queue;
|
|
|
|
}
|
|
|
|
|
|
|
|
current_context->sleep_queue = q;
|
|
|
|
*queue = current_context;
|
|
|
|
|
|
|
|
uint8_t retval = __isr_context_wait(lock, CONTEXT_SLEEP_QUEUE);
|
|
|
|
restore_irqs();
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
void isr_context_signal(struct context *c)
|
|
|
|
{
|
|
|
|
if (c->state == CONTEXT_SLEEP) {
|
|
|
|
c->state = CONTEXT_READY;
|
|
|
|
isr_context_ready(c);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void context_signal(struct context *c)
|
|
|
|
{
|
|
|
|
disable_irqs();
|
|
|
|
isr_context_signal(c);
|
|
|
|
restore_irqs();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t context_signal_queue(struct context **queue)
|
|
|
|
{
|
|
|
|
disable_irqs();
|
|
|
|
|
|
|
|
uint32_t retval = 0;
|
|
|
|
if (*queue) {
|
|
|
|
struct context *c = *queue;
|
|
|
|
*queue = c->sleep_queue;
|
|
|
|
c->state = CONTEXT_READY;
|
|
|
|
isr_context_ready(c);
|
|
|
|
retval = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
restore_irqs();
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
void isr_context_interrupt(struct context *c)
|
|
|
|
{
|
|
|
|
if (c->state == CONTEXT_SLEEP) {
|
|
|
|
c->state = CONTEXT_INTERRUPTED;
|
|
|
|
isr_context_ready(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void context_interrupt(struct context *c)
|
|
|
|
{
|
|
|
|
disable_irqs();
|
|
|
|
isr_context_signal(c);
|
|
|
|
restore_irqs();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t context_interrupt_queue(struct context *c, struct context **queue)
|
|
|
|
{
|
|
|
|
disable_irqs();
|
|
|
|
|
|
|
|
struct context *q = *queue;
|
|
|
|
while (q && (q != c)) {
|
|
|
|
queue = &q->sleep_queue;
|
|
|
|
q = q->sleep_queue;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t retval = 0;
|
|
|
|
|
|
|
|
if (q) {
|
|
|
|
*queue = c->sleep_queue;
|
|
|
|
c->state = CONTEXT_INTERRUPTED;
|
|
|
|
isr_context_ready(c);
|
|
|
|
retval = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
restore_irqs();
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct context * create_ctx(uint32_t stacksize, uint8_t priority, void (* code)(void *arg), void *arg)
|
|
|
|
{
|
|
|
|
void *stack = static_alloc(sizeof(struct context) + stacksize);
|
|
|
|
|
|
|
|
memset(stack, 0, stacksize);
|
|
|
|
|
|
|
|
struct context *ctx = (struct context *)((uint8_t *)stack + stacksize);
|
|
|
|
|
|
|
|
ctx->regs.r0 = (uint32_t)arg;
|
|
|
|
ctx->regs.pc = (uint32_t)code;
|
|
|
|
ctx->regs.cpsr = 0x0000001F;
|
|
|
|
ctx->regs.sp = (uint32_t)ctx;
|
|
|
|
|
|
|
|
ctx->regs.r1 = 0x01010101;
|
|
|
|
ctx->regs.r2 = 0x02020202;
|
|
|
|
ctx->regs.r3 = 0x03030303;
|
|
|
|
ctx->regs.r4 = 0x04040404;
|
|
|
|
ctx->regs.r5 = 0x05050505;
|
|
|
|
ctx->regs.r6 = 0x06060606;
|
|
|
|
ctx->regs.r7 = 0x07070707;
|
|
|
|
ctx->regs.r8 = 0x08080808;
|
|
|
|
ctx->regs.r9 = 0x09090909;
|
|
|
|
ctx->regs.r10 = 0x10101010;
|
|
|
|
ctx->regs.r11 = 0x11111111;
|
|
|
|
ctx->regs.r12 = 0x12121212;
|
|
|
|
ctx->regs.r14 = 0x14141414;
|
|
|
|
|
|
|
|
ctx->stacksize = stacksize;
|
|
|
|
ctx->priority = priority;
|
|
|
|
|
|
|
|
// TODO: disable irqs?
|
|
|
|
isr_context_ready(ctx);
|
|
|
|
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|