| [1] | 1 | /* | 
|---|
 | 2 |  * scheduler.c - Core scheduler implementation. | 
|---|
 | 3 |  *  | 
|---|
 | 4 |  * Author    Alain Greiner (2016) | 
|---|
 | 5 |  * | 
|---|
 | 6 |  * Copyright (c)  UPMC Sorbonne Universites | 
|---|
 | 7 |  * | 
|---|
 | 8 |  * This file is part of ALMOS-MKH. | 
|---|
 | 9 |  * | 
|---|
 | 10 |  * ALMOS-MKH. is free software; you can redistribute it and/or modify it | 
|---|
 | 11 |  * under the terms of the GNU General Public License as published by | 
|---|
 | 12 |  * the Free Software Foundation; version 2.0 of the License. | 
|---|
 | 13 |  * | 
|---|
 | 14 |  * ALMOS-MKH. is distributed in the hope that it will be useful, but | 
|---|
 | 15 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|---|
 | 16 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|---|
 | 17 |  * General Public License for more details. | 
|---|
 | 18 |  * | 
|---|
 | 19 |  * You should have received a copy of the GNU General Public License | 
|---|
 | 20 |  * along with ALMOS-MKH.; if not, write to the Free Software Foundation, | 
|---|
 | 21 |  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 
|---|
 | 22 |  */ | 
|---|
 | 23 |  | 
|---|
| [14] | 24 | #include <kernel_config.h> | 
|---|
| [1] | 25 | #include <hal_types.h> | 
|---|
| [407] | 26 | #include <hal_switch.h> | 
|---|
| [1] | 27 | #include <hal_irqmask.h> | 
|---|
 | 28 | #include <hal_context.h> | 
|---|
 | 29 | #include <printk.h> | 
|---|
 | 30 | #include <list.h> | 
|---|
 | 31 | #include <core.h> | 
|---|
 | 32 | #include <thread.h> | 
|---|
| [296] | 33 | #include <chdev.h> | 
|---|
| [1] | 34 | #include <scheduler.h> | 
|---|
 | 35 |  | 
|---|
| [296] | 36 | /////////////////////////////////////////////////////////////////////////////////////////// | 
|---|
 | 37 | // Extern global variables | 
|---|
 | 38 | /////////////////////////////////////////////////////////////////////////////////////////// | 
|---|
| [1] | 39 |  | 
|---|
| [296] | 40 | extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file | 
|---|
| [407] | 41 | extern uint32_t             switch_save_sr[];     // allocated in kernel_init.c file | 
|---|
| [296] | 42 |  | 
|---|
| [1] | 43 | //////////////////////////////// | 
|---|
 | 44 | void sched_init( core_t * core ) | 
|---|
 | 45 | { | 
|---|
 | 46 |     scheduler_t * sched = &core->scheduler; | 
|---|
 | 47 |  | 
|---|
 | 48 |     sched->u_threads_nr   = 0; | 
|---|
 | 49 |     sched->k_threads_nr   = 0; | 
|---|
 | 50 |  | 
|---|
| [279] | 51 |     sched->current        = CURRENT_THREAD; | 
|---|
 | 52 |     sched->idle           = NULL;             // initialized in kernel_init() | 
|---|
 | 53 |     sched->u_last         = NULL;             // initialized in sched_register_thread() | 
|---|
 | 54 |     sched->k_last         = NULL;             // initialized in sched_register_thread() | 
|---|
| [1] | 55 |  | 
|---|
 | 56 |     // initialise threads lists | 
|---|
 | 57 |     list_root_init( &sched->u_root ); | 
|---|
 | 58 |     list_root_init( &sched->k_root ); | 
|---|
 | 59 |  | 
|---|
| [416] | 60 |     sched->req_ack_pending = false;           // no pending request | 
|---|
| [409] | 61 |  | 
|---|
| [1] | 62 | }  // end sched_init() | 
|---|
 | 63 |  | 
|---|
 | 64 | //////////////////////////////////////////// | 
|---|
 | 65 | void sched_register_thread( core_t   * core, | 
|---|
 | 66 |                             thread_t * thread ) | 
|---|
 | 67 | { | 
|---|
 | 68 |     scheduler_t * sched = &core->scheduler; | 
|---|
 | 69 |     thread_type_t type  = thread->type; | 
|---|
 | 70 |  | 
|---|
 | 71 |     // take lock protecting sheduler lists | 
|---|
 | 72 |     spinlock_lock( &sched->lock ); | 
|---|
 | 73 |  | 
|---|
 | 74 |     if( type == THREAD_USER ) | 
|---|
 | 75 |     { | 
|---|
 | 76 |         list_add_last( &sched->u_root , &thread->sched_list ); | 
|---|
 | 77 |         sched->u_threads_nr++; | 
|---|
| [279] | 78 |         if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; | 
|---|
| [1] | 79 |     } | 
|---|
 | 80 |     else // kernel thread | 
|---|
 | 81 |     { | 
|---|
 | 82 |         list_add_last( &sched->k_root , &thread->sched_list ); | 
|---|
 | 83 |         sched->k_threads_nr++; | 
|---|
| [279] | 84 |         if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;  | 
|---|
| [1] | 85 |     } | 
|---|
 | 86 |  | 
|---|
 | 87 |     // release lock  | 
|---|
| [428] | 88 |     hal_fence(); | 
|---|
| [1] | 89 |     spinlock_unlock( &sched->lock ); | 
|---|
 | 90 |  | 
|---|
| [409] | 91 | }  // end sched_register_thread() | 
|---|
| [1] | 92 |  | 
|---|
| [408] | 93 | ////////////////////////////////////////////// | 
|---|
 | 94 | thread_t * sched_select( scheduler_t * sched ) | 
|---|
| [1] | 95 | { | 
|---|
| [408] | 96 |     thread_t     * thread; | 
|---|
 | 97 |     list_entry_t * current; | 
|---|
 | 98 |     list_entry_t * last; | 
|---|
| [437] | 99 |     list_entry_t * root; | 
|---|
 | 100 |     bool_t         done; | 
|---|
| [1] | 101 |  | 
|---|
 | 102 |     // take lock protecting sheduler lists | 
|---|
 | 103 |     spinlock_lock( &sched->lock ); | 
|---|
 | 104 |  | 
|---|
| [437] | 105 |     // first : scan the kernel threads list if not empty  | 
|---|
| [279] | 106 |     if( list_is_empty( &sched->k_root ) == false ) | 
|---|
| [1] | 107 |     { | 
|---|
| [437] | 108 |         root    = &sched->k_root; | 
|---|
| [279] | 109 |         last    = sched->k_last; | 
|---|
| [437] | 110 |         current = last; | 
|---|
 | 111 |         done    = false; | 
|---|
 | 112 |  | 
|---|
 | 113 |         while( done == false ) | 
|---|
| [279] | 114 |         { | 
|---|
 | 115 |             // get next entry in kernel list | 
|---|
| [437] | 116 |             current = current->next; | 
|---|
| [1] | 117 |  | 
|---|
| [437] | 118 |             // check exit condition | 
|---|
 | 119 |             if( current == last ) done = true; | 
|---|
 | 120 |  | 
|---|
| [279] | 121 |             // skip the root that does not contain a thread | 
|---|
| [437] | 122 |             if( current == root ) continue; | 
|---|
| [1] | 123 |  | 
|---|
| [279] | 124 |             // get thread pointer for this entry | 
|---|
 | 125 |             thread = LIST_ELEMENT( current , thread_t , sched_list ); | 
|---|
| [1] | 126 |  | 
|---|
| [440] | 127 |             // select kernel thread if non blocked and non IDLE | 
|---|
 | 128 |             if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) ) | 
|---|
| [279] | 129 |             { | 
|---|
| [438] | 130 |                 spinlock_unlock( &sched->lock ); | 
|---|
 | 131 |                 return thread; | 
|---|
 | 132 |             } | 
|---|
| [437] | 133 |         } // end loop on kernel threads | 
|---|
 | 134 |     } // end if kernel threads | 
|---|
 | 135 |  | 
|---|
 | 136 |     // second : scan the user threads list if not empty  | 
|---|
| [279] | 137 |     if( list_is_empty( &sched->u_root ) == false ) | 
|---|
| [1] | 138 |     { | 
|---|
| [437] | 139 |         root    = &sched->u_root; | 
|---|
| [279] | 140 |         last    = sched->u_last; | 
|---|
| [437] | 141 |         current = last; | 
|---|
 | 142 |         done    = false; | 
|---|
 | 143 |  | 
|---|
 | 144 |         while( done == false ) | 
|---|
| [279] | 145 |         { | 
|---|
 | 146 |             // get next entry in user list | 
|---|
| [437] | 147 |             current = current->next; | 
|---|
| [1] | 148 |  | 
|---|
| [437] | 149 |             // check exit condition | 
|---|
 | 150 |             if( current == last ) done = true; | 
|---|
 | 151 |  | 
|---|
| [279] | 152 |             // skip the root that does not contain a thread | 
|---|
| [437] | 153 |             if( current == root ) continue; | 
|---|
| [1] | 154 |  | 
|---|
| [279] | 155 |             // get thread pointer for this entry | 
|---|
 | 156 |             thread = LIST_ELEMENT( current , thread_t , sched_list ); | 
|---|
| [1] | 157 |  | 
|---|
| [438] | 158 |             // return thread if non blocked | 
|---|
| [279] | 159 |             if( thread->blocked == 0 ) | 
|---|
 | 160 |             { | 
|---|
 | 161 |                 spinlock_unlock( &sched->lock ); | 
|---|
 | 162 |                 return thread; | 
|---|
 | 163 |             } | 
|---|
| [437] | 164 |         } // end loop on user threads | 
|---|
 | 165 |     } // end if user threads | 
|---|
| [1] | 166 |  | 
|---|
| [437] | 167 |     // third : return idle thread if no other runnable thread | 
|---|
| [1] | 168 |     spinlock_unlock( &sched->lock ); | 
|---|
 | 169 |     return sched->idle; | 
|---|
 | 170 |  | 
|---|
| [296] | 171 | }  // end sched_select() | 
|---|
| [1] | 172 |  | 
|---|
| [416] | 173 | /////////////////////////////////////////// | 
|---|
| [433] | 174 | void sched_handle_signals( core_t * core ) | 
|---|
| [1] | 175 | { | 
|---|
| [437] | 176 |  | 
|---|
| [1] | 177 |     list_entry_t * iter; | 
|---|
| [440] | 178 |     list_entry_t * root; | 
|---|
| [1] | 179 |     thread_t     * thread; | 
|---|
| [428] | 180 |     process_t    * process; | 
|---|
| [409] | 181 |  | 
|---|
| [440] | 182 |     // get pointer on scheduler | 
|---|
| [1] | 183 |     scheduler_t  * sched = &core->scheduler; | 
|---|
 | 184 |  | 
|---|
| [440] | 185 |     // get pointer on user threads root | 
|---|
 | 186 |     root = &sched->u_root; | 
|---|
 | 187 |  | 
|---|
| [1] | 188 |     // take lock protecting threads lists | 
|---|
 | 189 |     spinlock_lock( &sched->lock ); | 
|---|
 | 190 |  | 
|---|
| [440] | 191 |     // We use a while to scan the user threads, to control the iterator increment, | 
|---|
 | 192 |     // because some threads will be destroyed, and we cannot use a LIST_FOREACH() | 
|---|
 | 193 |  | 
|---|
 | 194 |     // initialise list iterator | 
|---|
 | 195 |     iter = root->next; | 
|---|
 | 196 |  | 
|---|
| [416] | 197 |     // scan all user threads | 
|---|
| [440] | 198 |     while( iter != root ) | 
|---|
| [1] | 199 |     { | 
|---|
| [440] | 200 |         // get pointer on thread | 
|---|
| [1] | 201 |         thread = LIST_ELEMENT( iter , thread_t , sched_list ); | 
|---|
 | 202 |  | 
|---|
| [440] | 203 |         // increment iterator | 
|---|
 | 204 |         iter = iter->next; | 
|---|
 | 205 |  | 
|---|
| [416] | 206 |         // handle REQ_ACK  | 
|---|
 | 207 |         if( thread->flags & THREAD_FLAG_REQ_ACK ) | 
|---|
| [408] | 208 |         { | 
|---|
| [416] | 209 |             // check thread blocked | 
|---|
 | 210 |             assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) ,  | 
|---|
 | 211 |             __FUNCTION__ , "thread not blocked" ); | 
|---|
 | 212 |   | 
|---|
 | 213 |             // decrement response counter | 
|---|
 | 214 |             hal_atomic_add( thread->ack_rsp_count , -1 ); | 
|---|
| [408] | 215 |  | 
|---|
| [416] | 216 |             // reset REQ_ACK in thread descriptor | 
|---|
 | 217 |             thread_reset_req_ack( thread ); | 
|---|
| [408] | 218 |         } | 
|---|
| [416] | 219 |  | 
|---|
 | 220 |         // handle REQ_DELETE | 
|---|
 | 221 |         if( thread->flags & THREAD_FLAG_REQ_DELETE ) | 
|---|
 | 222 |         { | 
|---|
| [428] | 223 |             // get thread process descriptor | 
|---|
 | 224 |             process = thread->process; | 
|---|
| [416] | 225 |  | 
|---|
 | 226 |                 // release FPU if required | 
|---|
 | 227 |                 if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL; | 
|---|
 | 228 |  | 
|---|
| [428] | 229 |             // remove thread from scheduler (scheduler lock already taken) | 
|---|
 | 230 |             uint32_t threads_nr = sched->u_threads_nr; | 
|---|
| [440] | 231 |  | 
|---|
| [428] | 232 |             assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" ); | 
|---|
| [440] | 233 |  | 
|---|
| [428] | 234 |             sched->u_threads_nr = threads_nr - 1; | 
|---|
| [416] | 235 |             list_unlink( &thread->sched_list ); | 
|---|
| [428] | 236 |             if( threads_nr == 1 ) sched->u_last = NULL; | 
|---|
| [416] | 237 |  | 
|---|
| [428] | 238 |             // delete thread | 
|---|
| [416] | 239 |             thread_destroy( thread ); | 
|---|
 | 240 |  | 
|---|
| [438] | 241 | #if DEBUG_SCHED_HANDLE_SIGNALS | 
|---|
| [440] | 242 | uint32_t cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 243 | if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) | 
|---|
| [440] | 244 | printk("\n[DBG] %s : thread %x in proces %x (%x) deleted / cycle %d\n", | 
|---|
 | 245 | __FUNCTION__ , thread , process->pid , process , cycle ); | 
|---|
| [433] | 246 | #endif | 
|---|
| [416] | 247 |             // destroy process descriptor if no more threads | 
|---|
| [428] | 248 |             if( process->th_nr == 0 )  | 
|---|
 | 249 |             { | 
|---|
 | 250 |                 // delete process     | 
|---|
 | 251 |                 process_destroy( process ); | 
|---|
 | 252 |  | 
|---|
| [438] | 253 | #if DEBUG_SCHED_HANDLE_SIGNALS | 
|---|
| [433] | 254 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 255 | if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) | 
|---|
| [437] | 256 | printk("\n[DBG] %s : process %x has been deleted / cycle %d\n", | 
|---|
 | 257 | __FUNCTION__ , process->pid , cycle ); | 
|---|
| [433] | 258 | #endif | 
|---|
| [428] | 259 |  | 
|---|
 | 260 |             } | 
|---|
| [416] | 261 |         } | 
|---|
| [1] | 262 |     } | 
|---|
 | 263 |  | 
|---|
 | 264 |     // release lock  | 
|---|
| [428] | 265 |     hal_fence(); | 
|---|
| [1] | 266 |     spinlock_unlock( &sched->lock ); | 
|---|
 | 267 |  | 
|---|
| [433] | 268 | } // end sched_handle_signals() | 
|---|
| [416] | 269 |  | 
|---|
| [408] | 270 | //////////////////////////////// | 
|---|
 | 271 | void sched_yield( char * cause ) | 
|---|
| [1] | 272 | { | 
|---|
| [407] | 273 |     thread_t    * next; | 
|---|
| [1] | 274 |     thread_t    * current = CURRENT_THREAD; | 
|---|
| [409] | 275 |     core_t      * core    = current->core; | 
|---|
 | 276 |     scheduler_t * sched   = &core->scheduler; | 
|---|
| [407] | 277 |   | 
|---|
| [438] | 278 | #if (DEBUG_SCHED_YIELD & 0x1) | 
|---|
 | 279 | if( DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() ) | 
|---|
| [433] | 280 | sched_display( core->lid ); | 
|---|
| [407] | 281 | #endif | 
|---|
| [1] | 282 |  | 
|---|
| [337] | 283 |     // delay the yield if current thread has locks | 
|---|
| [407] | 284 |     if( (current->local_locks != 0) || (current->remote_locks != 0) ) | 
|---|
| [337] | 285 |     { | 
|---|
 | 286 |         current->flags |= THREAD_FLAG_SCHED; | 
|---|
 | 287 |         return; | 
|---|
 | 288 |     } | 
|---|
| [1] | 289 |  | 
|---|
| [435] | 290 |     // enter critical section / save SR in current thread descriptor | 
|---|
 | 291 |     hal_disable_irq( &CURRENT_THREAD->save_sr ); | 
|---|
| [408] | 292 |  | 
|---|
| [407] | 293 |     // loop on threads to select next thread  | 
|---|
| [408] | 294 |     next = sched_select( sched ); | 
|---|
| [1] | 295 |  | 
|---|
| [436] | 296 |     // check next thread kernel_stack overflow | 
|---|
 | 297 |     assert( (next->signature == THREAD_SIGNATURE), | 
|---|
 | 298 |     __FUNCTION__ , "kernel stack overflow for thread %x\n", next ); | 
|---|
 | 299 |  | 
|---|
| [296] | 300 |     // check next thread attached to same core as the calling thread | 
|---|
| [436] | 301 |     assert( (next->core == current->core), | 
|---|
 | 302 |     __FUNCTION__ , "next core %x != current core %x\n", next->core, current->core ); | 
|---|
| [296] | 303 |  | 
|---|
| [407] | 304 |     // check next thread not blocked when type != IDLE | 
|---|
| [428] | 305 |     assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) , __FUNCTION__ , | 
|---|
| [407] | 306 |     "next thread %x (%s) is blocked on core[%x,%d]\n",  | 
|---|
| [409] | 307 |     next->trdid , thread_type_str(next->type) , local_cxy , core->lid ); | 
|---|
| [296] | 308 |  | 
|---|
 | 309 |     // switch contexts and update scheduler state if next != current | 
|---|
 | 310 |         if( next != current ) | 
|---|
| [1] | 311 |     { | 
|---|
 | 312 |  | 
|---|
| [440] | 313 | if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)current == 0xcc000) ) | 
|---|
 | 314 | printk("\n@@@@@ cc000 exit at cycle %d\n", (uint32_t)hal_get_cycles() ); | 
|---|
 | 315 |  | 
|---|
 | 316 | if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)next == 0xcc000) ) | 
|---|
 | 317 | printk("\n@@@@@ cc000 enter at cycle %d\n", (uint32_t)hal_get_cycles() ); | 
|---|
 | 318 |  | 
|---|
| [438] | 319 | #if DEBUG_SCHED_YIELD | 
|---|
| [433] | 320 | uint32_t cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 321 | if( DEBUG_SCHED_YIELD < cycle ) | 
|---|
| [433] | 322 | printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" | 
|---|
| [408] | 323 | "      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", | 
|---|
| [409] | 324 | __FUNCTION__, local_cxy, core->lid, cause,  | 
|---|
| [407] | 325 | current, thread_type_str(current->type), current->process->pid, current->trdid, | 
|---|
| [433] | 326 | next , thread_type_str(next->type) , next->process->pid , next->trdid , cycle ); | 
|---|
 | 327 | #endif | 
|---|
| [279] | 328 |  | 
|---|
| [296] | 329 |         // update scheduler  | 
|---|
| [408] | 330 |         sched->current = next; | 
|---|
 | 331 |         if( next->type == THREAD_USER ) sched->u_last = &next->sched_list; | 
|---|
 | 332 |         else                            sched->k_last = &next->sched_list; | 
|---|
| [1] | 333 |  | 
|---|
| [407] | 334 |         // handle FPU ownership | 
|---|
| [306] | 335 |             if( next->type == THREAD_USER ) | 
|---|
| [296] | 336 |         { | 
|---|
| [407] | 337 |                 if( next == current->core->fpu_owner )  hal_fpu_enable(); | 
|---|
 | 338 |                 else                                    hal_fpu_disable(); | 
|---|
| [296] | 339 |         } | 
|---|
| [1] | 340 |  | 
|---|
| [435] | 341 |         // switch CPU from current thread context to new thread context | 
|---|
| [407] | 342 |         hal_do_cpu_switch( current->cpu_context, next->cpu_context ); | 
|---|
| [296] | 343 |     } | 
|---|
 | 344 |     else | 
|---|
 | 345 |     { | 
|---|
| [407] | 346 |  | 
|---|
| [438] | 347 | #if (DEBUG_SCHED_YIELD & 1) | 
|---|
| [433] | 348 | uint32_t cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 349 | if( DEBUG_SCHED_YIELD < cycle ) | 
|---|
| [435] | 350 | printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" | 
|---|
 | 351 | "      thread %x (%s) (%x,%x) continue / cycle %d\n", | 
|---|
| [409] | 352 | __FUNCTION__, local_cxy, core->lid, cause, | 
|---|
| [433] | 353 | current, thread_type_str(current->type), current->process->pid, current->trdid, cycle ); | 
|---|
| [428] | 354 | #endif | 
|---|
| [407] | 355 |  | 
|---|
| [296] | 356 |     } | 
|---|
| [408] | 357 |  | 
|---|
| [416] | 358 |     // handle pending requests for all threads executing on this core. | 
|---|
| [433] | 359 |     sched_handle_signals( core ); | 
|---|
| [409] | 360 |  | 
|---|
| [435] | 361 |     // exit critical section / restore SR from current thread descriptor | 
|---|
 | 362 |     hal_restore_irq( CURRENT_THREAD->save_sr ); | 
|---|
| [408] | 363 |  | 
|---|
| [1] | 364 | }  // end sched_yield() | 
|---|
 | 365 |  | 
|---|
| [407] | 366 |  | 
|---|
 | 367 | /////////////////////////////// | 
|---|
 | 368 | void sched_display( lid_t lid ) | 
|---|
| [1] | 369 | { | 
|---|
| [296] | 370 |     list_entry_t * iter; | 
|---|
 | 371 |     thread_t     * thread; | 
|---|
 | 372 |     uint32_t       save_sr; | 
|---|
| [1] | 373 |  | 
|---|
| [436] | 374 |     assert( (lid < LOCAL_CLUSTER->cores_nr), __FUNCTION__, "illegal core index %d\n", lid); | 
|---|
| [407] | 375 |  | 
|---|
 | 376 |     core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid]; | 
|---|
| [296] | 377 |     scheduler_t  * sched   = &core->scheduler; | 
|---|
 | 378 |      | 
|---|
 | 379 |     // get pointers on TXT0 chdev | 
|---|
| [407] | 380 |     xptr_t    txt0_xp  = chdev_dir.txt_tx[0]; | 
|---|
| [296] | 381 |     cxy_t     txt0_cxy = GET_CXY( txt0_xp ); | 
|---|
 | 382 |     chdev_t * txt0_ptr = GET_PTR( txt0_xp ); | 
|---|
| [1] | 383 |  | 
|---|
| [296] | 384 |     // get extended pointer on remote TXT0 chdev lock | 
|---|
 | 385 |     xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); | 
|---|
| [1] | 386 |  | 
|---|
| [296] | 387 |     // get TXT0 lock in busy waiting mode | 
|---|
 | 388 |     remote_spinlock_lock_busy( lock_xp , &save_sr ); | 
|---|
 | 389 |  | 
|---|
| [437] | 390 |     nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", | 
|---|
 | 391 |             local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() ); | 
|---|
| [296] | 392 |  | 
|---|
 | 393 |     // display kernel threads | 
|---|
 | 394 |     LIST_FOREACH( &sched->k_root , iter ) | 
|---|
| [1] | 395 |     { | 
|---|
| [296] | 396 |         thread = LIST_ELEMENT( iter , thread_t , sched_list ); | 
|---|
| [408] | 397 |         if (thread->type == THREAD_DEV)  | 
|---|
 | 398 |         { | 
|---|
| [416] | 399 |             nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n", | 
|---|
| [408] | 400 |             thread_type_str( thread->type ), thread->process->pid, thread->trdid, | 
|---|
| [416] | 401 |             thread, thread->blocked, thread->flags, thread->chdev->name ); | 
|---|
| [408] | 402 |         } | 
|---|
 | 403 |         else | 
|---|
 | 404 |         { | 
|---|
| [437] | 405 |             nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", | 
|---|
| [408] | 406 |             thread_type_str( thread->type ), thread->process->pid, thread->trdid, | 
|---|
| [437] | 407 |             thread, thread->blocked, thread->flags ); | 
|---|
| [408] | 408 |         } | 
|---|
| [1] | 409 |     } | 
|---|
 | 410 |  | 
|---|
| [296] | 411 |     // display user threads | 
|---|
 | 412 |     LIST_FOREACH( &sched->u_root , iter ) | 
|---|
| [1] | 413 |     { | 
|---|
| [296] | 414 |         thread = LIST_ELEMENT( iter , thread_t , sched_list ); | 
|---|
| [416] | 415 |         nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", | 
|---|
| [408] | 416 |         thread_type_str( thread->type ), thread->process->pid, thread->trdid, | 
|---|
| [416] | 417 |         thread, thread->blocked, thread->flags ); | 
|---|
| [1] | 418 |     } | 
|---|
 | 419 |  | 
|---|
| [296] | 420 |     // release TXT0 lock | 
|---|
 | 421 |     remote_spinlock_unlock_busy( lock_xp , save_sr ); | 
|---|
| [1] | 422 |  | 
|---|
| [296] | 423 | }  // end sched_display() | 
|---|
| [1] | 424 |  | 
|---|