////////////////////////////////////////////////////////////////////////////////// // File : ctx_handler.c // Date : 01/04/2012 // Authors : alain greiner & joel porquet // Copyright (c) UPMC-LIP6 ////////////////////////////////////////////////////////////////////////////////// #include #include #include #include #include #include #include #include extern void _task_switch(unsigned int *, unsigned int *); ///////////////////////////////////////////////////////////////////////////////// // This function performs a context switch between the running task // and another task, using a round-robin sheduling policy between all // tasks allocated to a given processor (static allocation). // It selects the next runable task to resume execution. // If the only runable task is the current task, return without context switch. // If there is no runable task, the scheduler switch to the default "idle" task. ///////////////////////////////////////////////////////////////////////////////// // Implementation note // The return address contained in $31 is saved in the current task context // (in the ctx[31] slot), and the function actually returns to the address // contained in the ctx[31] slot of the next task context. ///////////////////////////////////////////////////////////////////////////////// void _ctx_switch() { unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid / NB_PROCS_MAX; unsigned int lpid = gpid % NB_PROCS_MAX; // get scheduler address static_scheduler_t* psched = (static_scheduler_t*)_get_sched(); // get number of tasks allocated to scheduler unsigned int tasks = psched->tasks; // get current task index unsigned int curr_task_id = psched->current; // select the next task using a round-robin policy unsigned int next_task_id; unsigned int tid; unsigned int found = 0; for (tid = curr_task_id + 1; tid < curr_task_id + 1 + tasks; tid++) { next_task_id = tid % tasks; // test if the task is runable if ( psched->context[next_task_id][CTX_RUN_ID] ) { found = 1; break; } } // launch "idle" task if no runable task if (found == 0) { next_task_id = IDLE_TASK_INDEX; } #if GIET_DEBUG_SWITCH unsigned int x = cluster_xy >> Y_WIDTH; unsigned int y = cluster_xy & ((1< (%d) on processor[%d,%d,%d] at cycle %d\n", curr_task_id, next_task_id, x, y , lpid, _get_proctime() ); #endif if (curr_task_id != next_task_id) // actual task switch required { unsigned int* curr_ctx_vaddr = &(psched->context[curr_task_id][0]); unsigned int* next_ctx_vaddr = &(psched->context[next_task_id][0]); // reset timer counter. In each cluster, // the NB_PROCS_MAX timers are system timers (TICK) #if USE_XCU _xcu_timer_reset_cpt( cluster_xy, lpid ); #else _timer_reset_cpt( cluster_xy, lpid); #endif // set current task index psched->current = next_task_id; // makes context switch _task_switch(curr_ctx_vaddr, next_ctx_vaddr); } } //end _ctx_switch() ///////////////////////////////////////////////////////////////////////////////////// // This function is executed as the"idle" task when no other task can be executed ///////////////////////////////////////////////////////////////////////////////////// void _idle_task() { while(1) { unsigned int count = GIET_IDLE_TASK_PERIOD; // decounting loop asm volatile( "move $3, %0 \n" "_idle_task_loop: \n" "addi $3, $3, -1 \n" "bnez $3, _idle_task_loop \n" "nop \n" : : "r"(count) : "$3" ); // warning message unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid / NB_PROCS_MAX; unsigned int lpid = gpid % NB_PROCS_MAX; unsigned int x = cluster_xy >> Y_WIDTH; unsigned int y = cluster_xy & ((1<