source: trunk/kernel/kern/scheduler.c @ 690

Last change on this file since 690 was 683, checked in by alain, 4 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 23.6 KB
Line 
1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner       (2016,2017,2018,2019,2020)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_kernel_types.h>
26#include <hal_switch.h>
27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <rpc.h>
32#include <core.h>
33#include <thread.h>
34#include <chdev.h>
35#include <scheduler.h>
36
37
38///////////////////////////////////////////////////////////////////////////////////////////
39//         global variables
40///////////////////////////////////////////////////////////////////////////////////////////
41
42extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
43extern process_t            process_zero;       // allocated in kernel_init.c
44
45///////////////////////////////////////////////////////////////////////////////////////////
46//         private functions
47///////////////////////////////////////////////////////////////////////////////////////////
48
49
50////////////////////////////////////////////////////////////////////////////////////////////
51// This static function does NOT modify the scheduler state.
52// It just select a thread in the list of attached threads, implementing the following
53// three steps policy:
54// 1) It scan the list of kernel threads, from the next thread after the last executed one,
55//    and returns the first runnable found : not IDLE, not blocked, client queue not empty.
56//    It can be the current thread.
57// 2) If no kernel thread found, it scan the list of user thread, from the next thread after
58//    the last executed one, and returns the first runable found : not blocked.
59//    It can be the current thread.
60// 3) If no runable thread found, it returns the idle thread.
61////////////////////////////////////////////////////////////////////////////////////////////
62// @ sched   : local pointer on scheduler.
63// @ returns pointer on selected thread descriptor
64////////////////////////////////////////////////////////////////////////////////////////////
65static thread_t * __attribute__((__noinline__))sched_select( scheduler_t * sched )
66{
67    thread_t     * thread;
68    list_entry_t * current;
69    list_entry_t * last;
70    list_entry_t * root;
71    bool_t         done;
72    uint32_t       count;
73
74    // first : scan the kernel threads list if not empty
75    if( list_is_empty( &sched->k_root ) == false )
76    {
77        root    = &sched->k_root;
78        last    = sched->k_last;
79        done    = false;
80        count   = 0;
81        current = last;
82
83        while( done == false )
84        {
85            // get next entry in kernel list
86            current = current->next;
87
88            // check exit condition
89            if( current == last ) done = true;
90
91            // skip the root that does not contain a thread
92            if( current == root ) continue;
93            else                  count++;
94
95            // get thread pointer for this entry
96            thread = LIST_ELEMENT( current , thread_t , sched_list );
97
98            // select kernel thread if non blocked and non THREAD_IDLE
99            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) ) return thread;
100
101        } // end loop on kernel threads
102    } // end kernel threads
103
104    // second : scan the user threads list if not empty
105    if( list_is_empty( &sched->u_root ) == false )
106    {
107        root    = &sched->u_root;
108        last    = sched->u_last;
109        done    = false;
110        count   = 0;
111        current = last;
112
113        while( done == false )
114        {
115            // get next entry in user list
116            current = current->next;
117
118            // check exit condition
119            if( current == last ) done = true;
120
121            // skip the root that does not contain a thread
122            if( current == root ) continue;
123            else                  count++;
124
125            // get thread pointer for this entry
126            thread = LIST_ELEMENT( current , thread_t , sched_list );
127
128            // select thread if non blocked
129            if( thread->blocked == 0 )  return thread;
130
131        } // end loop on user threads
132    } // end user threads
133
134    // third : return idle thread if no other runnable thread
135    return sched->idle;
136
137}  // end sched_select()
138
139////////////////////////////////////////////////////////////////////////////////////////////
140// This static function is the only function that can actually delete a thread,
141// (and the associated process descriptor if required).
142// It is private, because it is only called by the sched_yield() public function.
143// It scan all threads attached to a given scheduler, and executes the relevant
144// actions for two types of pending requests:
145//
146// - REQ_ACK : it checks that target thread is blocked, decrements the response counter
147//   to acknowledge the client thread, and reset the pending request.
148// - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it
149//   from the scheduler list, and release the memory allocated to thread descriptor.
150//   For an user thread, it destroys the process descriptor it the target thread is
151//   the last thread in the local process descriptor.
152//
153// Implementation note:
154// We use a while to scan the threads in scheduler lists, because some threads can
155// be destroyed, and we want not use a LIST_FOREACH()
156////////////////////////////////////////////////////////////////////////////////////////////
157// @ core    : local pointer on the core descriptor.
158////////////////////////////////////////////////////////////////////////////////////////////
159static void sched_handle_signals( core_t * core )
160{
161
162    list_entry_t * iter;
163    list_entry_t * root;
164    thread_t     * thread;
165    process_t    * process;
166    scheduler_t  * sched;
167    uint32_t       threads_nr;   // number of threads in scheduler list
168    ltid_t         ltid;         // thread local index
169    uint32_t       count;        // number of threads in local process
170
171    // get pointer on scheduler
172    sched = &core->scheduler;
173
174    ////////////////// scan user threads to handle ACK and DELETE requests
175    root = &sched->u_root;
176    iter = root->next;
177    while( iter != root )
178    {
179        // get pointer on thread
180        thread = LIST_ELEMENT( iter , thread_t , sched_list );
181
182        // increment iterator
183        iter = iter->next;
184
185        // handle REQ_ACK
186        if( thread->flags & THREAD_FLAG_REQ_ACK )
187        {
188
189// check target thread blocked
190assert( __FUNCTION__, (thread->blocked & THREAD_BLOCKED_GLOBAL) , "thread not blocked" );
191 
192            // decrement response counter
193            hal_atomic_add( thread->ack_rsp_count , -1 );
194
195            // reset REQ_ACK in thread descriptor
196            thread_reset_req_ack( thread );
197        }
198
199        // handle REQ_DELETE only if target thread != calling thread
200        if( thread->flags & THREAD_FLAG_REQ_DELETE )
201        {
202
203// check calling thread != target thread
204assert( __FUNCTION__, (thread != CURRENT_THREAD) , "calling thread cannot delete itself" );
205 
206            // get thread process descriptor
207            process = thread->process;
208
209            // get thread ltid
210            ltid = LTID_FROM_TRDID( thread->trdid);
211
212            // take the lock protecting sheduler state
213            busylock_acquire( &sched->lock );
214
215            // update scheduler state
216            threads_nr = sched->u_threads_nr;
217            sched->u_threads_nr = threads_nr - 1;
218            list_unlink( &thread->sched_list );
219            if( sched->u_last == &thread->sched_list )
220            {
221                if( threads_nr == 1 ) 
222                {
223                    sched->u_last = NULL;
224                }
225                else if( sched->u_root.next == &thread->sched_list )
226                {
227                    sched->u_last = sched->u_root.pred;
228                }
229                else
230                {
231                    sched->u_last = sched->u_root.next;
232                }
233            }
234
235            // release the lock protecting sheduler state
236            busylock_release( &sched->lock );
237
238            // release memory allocated for thread
239            count = thread_destroy( thread );
240
241            hal_fence();
242
243#if DEBUG_SCHED_HANDLE_SIGNALS
244uint32_t cycle = (uint32_t)hal_get_cycles();
245if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
246printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
247__FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, cycle );
248#endif
249            // destroy process descriptor if last thread
250            if( count == 1 ) 
251            {
252                // delete process   
253                process_destroy( process );
254
255#if DEBUG_SCHED_HANDLE_SIGNALS
256cycle = (uint32_t)hal_get_cycles();
257if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
258printk("\n[%s] process %x in cluster %x deleted / cycle %d\n",
259__FUNCTION__ , process->pid , local_cxy , cycle );
260#endif
261            }
262        }
263    }  // end user threads
264
265    ///////////// scan kernel threads for DELETE only
266    root = &sched->k_root;
267    iter = root->next;
268    while( iter != root )
269    {
270        // get pointer on thread
271        thread = LIST_ELEMENT( iter , thread_t , sched_list );
272
273        // increment iterator
274        iter = iter->next;
275
276        // handle REQ_DELETE only if target thread != calling thread
277        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
278        {
279
280// check process descriptor is local kernel process
281assert( __FUNCTION__, ( thread->process == &process_zero ) , "illegal process descriptor");
282
283            // get thread ltid
284            ltid = LTID_FROM_TRDID( thread->trdid);
285
286            // take the lock protecting sheduler state
287            busylock_acquire( &sched->lock );
288
289            // update scheduler state
290            threads_nr = sched->k_threads_nr;
291            sched->k_threads_nr = threads_nr - 1;
292            list_unlink( &thread->sched_list );
293            if( sched->k_last == &thread->sched_list )
294            {
295                if( threads_nr == 1 ) 
296                {
297                    sched->k_last = NULL;
298                }
299                else if( sched->k_root.next == &thread->sched_list )
300                {
301                    sched->k_last = sched->k_root.pred;
302                }
303                else
304                {
305                    sched->k_last = sched->k_root.next;
306                }
307            }
308
309            // release the lock protecting sheduler state
310            busylock_release( &sched->lock );
311
312            // get number of threads in local kernel process
313            count = process_zero.th_nr;
314
315// check th_nr value
316assert( __FUNCTION__, (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0" );
317
318            // remove thread from process th_tbl[]
319            process_zero.th_tbl[ltid] = NULL;
320            hal_atomic_add( &process_zero.th_nr , - 1 );
321 
322            // delete thread descriptor
323            thread_destroy( thread );
324
325#if DEBUG_SCHED_HANDLE_SIGNALS
326uint32_t cycle = (uint32_t)hal_get_cycles();
327if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
328printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
329__FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle );
330#endif
331        }
332    }
333} // end sched_handle_signals()
334
335////////////////////////////////////////////////////////////////////////////////////////////
336// This static function is called by the sched_yield function when the RFC_FIFO
337// associated to the core is not empty.
338// It search an idle RPC thread for this core, and unblock it if found.
339// It creates a new RPC thread if no idle RPC thread is found.
340////////////////////////////////////////////////////////////////////////////////////////////
341// @ sched   : local pointer on scheduler.
342////////////////////////////////////////////////////////////////////////////////////////////
343static void sched_rpc_activate( scheduler_t * sched )
344{
345    error_t         error;
346    thread_t      * thread; 
347    list_entry_t  * iter;
348    lid_t           lid = CURRENT_THREAD->core->lid;
349    bool_t          found = false;
350
351    // search one IDLE RPC thread associated to the selected core   
352    LIST_FOREACH( &sched->k_root , iter )
353    {
354        thread = LIST_ELEMENT( iter , thread_t , sched_list );
355
356        if( (thread->type == THREAD_RPC) && 
357            (thread->blocked == THREAD_BLOCKED_IDLE ) ) 
358        {
359            found = true;
360            break;
361        }
362    }
363
364    if( found == false )     // create new RPC thread     
365    {
366        error = thread_kernel_create( &thread,
367                                      THREAD_RPC, 
368                                              &rpc_server_func, 
369                                      NULL,
370                                          lid );
371        // check memory
372        if ( error )
373        {
374            printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n",
375            __FUNCTION__, local_cxy );
376        }
377        else
378        {
379            // unblock created RPC thread
380            thread->blocked = 0;
381
382            // update RPC threads counter 
383            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 );
384
385#if DEBUG_SCHED_RPC_ACTIVATE
386uint32_t cycle = (uint32_t)hal_get_cycles();
387if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 
388printk("\n[%s] new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n",
389__FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle );
390#endif
391        }
392    }
393    else                 // RPC thread found => unblock it
394    {
395        // unblock found RPC thread
396        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE );
397
398#if DEBUG_SCHED_RPC_ACTIVATE
399uint32_t cycle = (uint32_t)hal_get_cycles();
400if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 
401printk("\n[%s] idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n",
402__FUNCTION__, thread->trdid, local_cxy, lid, cycle );
403#endif
404
405    }
406
407} // end sched_rpc_activate()
408
409
410
411///////////////////////////////////////////////////////////////////////////////////////////
412//         public functions
413///////////////////////////////////////////////////////////////////////////////////////////
414
415////////////////////////////////
416void sched_init( core_t * core )
417{
418    scheduler_t * sched = &core->scheduler;
419
420    sched->u_threads_nr   = 0;
421    sched->k_threads_nr   = 0;
422
423    sched->current        = CURRENT_THREAD;
424    sched->idle           = NULL;               // initialized in kernel_init()
425    sched->u_last         = NULL;               // initialized in sched_register_thread()
426    sched->k_last         = NULL;               // initialized in sched_register_thread()
427
428    // initialise threads lists
429    list_root_init( &sched->u_root );
430    list_root_init( &sched->k_root );
431
432    // init lock
433    busylock_init( &sched->lock , LOCK_SCHED_STATE );
434
435    sched->req_ack_pending = false;             // no pending request
436    sched->trace           = false;             // context switches trace desactivated
437
438}  // end sched_init()
439
440////////////////////////////////////////////
441void sched_register_thread( core_t   * core,
442                            thread_t * thread )
443{
444    scheduler_t * sched = &core->scheduler;
445    thread_type_t type  = thread->type;
446
447    // take lock protecting sheduler state
448    busylock_acquire( &sched->lock );
449
450    if( type == THREAD_USER )
451    {
452        list_add_last( &sched->u_root , &thread->sched_list );
453        sched->u_threads_nr++;
454        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
455    }
456    else // kernel thread
457    {
458        list_add_last( &sched->k_root , &thread->sched_list );
459        sched->k_threads_nr++;
460        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
461    }
462
463    // release lock
464    busylock_release( &sched->lock );
465
466}  // end sched_register_thread()
467
468//////////////////////////////////////////////////////////////////
469void sched_yield( const char * cause __attribute__((__unused__)) )
470{
471    thread_t      * next;
472    thread_t      * current = CURRENT_THREAD;
473    core_t        * core    = current->core;
474    lid_t           lid     = core->lid;
475    scheduler_t   * sched   = &core->scheduler;
476    remote_fifo_t * fifo    = &LOCAL_CLUSTER->rpc_fifo[lid]; 
477 
478#if DEBUG_SCHED_YIELD
479uint32_t cycle = (uint32_t)hal_get_cycles();
480#endif
481
482#if (DEBUG_SCHED_YIELD & 0x1)
483if( sched->trace || (cycle > DEBUG_SCHED_YIELD) )
484sched_remote_display( local_cxy , lid );
485#endif
486
487// This assert should always be true, as this check has been
488// done before, by any function that can possibly deschedule...
489assert( __FUNCTION__, (current->busylocks == 0),
490"current thread hold %d busylocks\n", current->busylocks ); 
491
492    // activate or create an RPC thread if RPC_FIFO non empty
493    if( remote_fifo_is_empty( fifo ) == false )  sched_rpc_activate( sched );
494
495    // disable IRQs / save SR in current thread descriptor
496    hal_disable_irq( &current->save_sr );
497
498    // take lock protecting sheduler state
499    busylock_acquire( &sched->lock );
500   
501    // select next thread
502    next = sched_select( sched );
503
504// check next thread kernel_stack overflow
505assert( __FUNCTION__, (next->signature == THREAD_SIGNATURE),
506"kernel stack overflow for thread %x on core[%x,%d]", next, local_cxy, lid );
507
508// check next thread attached to same core as the current thread
509assert( __FUNCTION__, (next->core == current->core),
510"next_core_lid %d / current_core_lid %d", current->core->lid, next->core->lid );
511   
512// check next thread not blocked when type != IDLE
513assert( __FUNCTION__, ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
514"next thread %x (%s) is blocked on core[%x,%d]", 
515next->trdid , thread_type_str(next->type) , local_cxy , lid );
516
517    // switch contexts and update scheduler state if next != current
518        if( next != current )
519    {
520        // update scheduler
521        sched->current = next;
522        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
523        else                            sched->k_last = &next->sched_list;
524
525        // handle FPU ownership
526            if( next->type == THREAD_USER )
527        {
528                if( next == current->core->fpu_owner )  hal_fpu_enable();
529                else                                    hal_fpu_disable();
530        }
531
532        // release lock protecting scheduler state
533        busylock_release( &sched->lock );
534
535#if DEBUG_SCHED_YIELD
536if( sched->trace || (cycle > DEBUG_SCHED_YIELD) )
537printk("\n[%s] core[%x,%d] / cause = %s\n"
538"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
539__FUNCTION__, local_cxy, lid, cause, 
540current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
541thread_type_str(next->type) , next->process->pid , next->trdid , cycle );
542#endif
543
544        // switch CPU from current thread context to new thread context
545        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
546    }
547    else
548    {
549        // release lock protecting scheduler state
550        busylock_release( &sched->lock );
551
552#if DEBUG_SCHED_YIELD
553if( sched->trace || (cycle > DEBUG_SCHED_YIELD) )
554printk("\n[%s] core[%x,%d] / cause = %s\n"
555"      thread %x (%s) (%x,%x) continue / cycle %d\n",
556__FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type),
557current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
558#endif
559
560    }
561
562    // handle pending requests for all threads executing on this core.
563    sched_handle_signals( core );
564
565    // exit critical section / restore SR from current thread descriptor
566    hal_restore_irq( CURRENT_THREAD->save_sr );
567
568}  // end sched_yield()
569
570
571/////////////////////////////////////
572void sched_remote_display( cxy_t cxy,
573                           lid_t lid )
574{
575    thread_t     * thread;
576
577    // get local pointer on target scheduler
578    core_t      * core  = &LOCAL_CLUSTER->core_tbl[lid];
579    scheduler_t * sched = &core->scheduler;
580
581    // get local pointer on current thread in target scheduler
582    thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) );
583
584    // get local pointer on the first kernel and user threads list_entry
585    list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) );
586    list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) );
587   
588    // get pointers on TXT0 chdev
589    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
590    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
591    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
592
593    // get extended pointer on remote TXT0 chdev lock
594    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
595
596    // get TXT0 lock
597    remote_busylock_acquire( lock_xp );
598
599    // get rpc_threads
600    uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) );
601 
602    // display header
603    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
604    cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() );
605    nolock_printk("  type | pid        | trdid      | desc       | block      | flags      | func\n");
606
607    // display kernel threads
608    while( k_entry != &sched->k_root )
609    {
610        // get local pointer on kernel_thread
611        thread = LIST_ELEMENT( k_entry , thread_t , sched_list );
612
613        // get relevant thead info
614        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
615        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
616        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
617        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
618        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
619        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
620
621        // display thread info
622        if (type == THREAD_DEV) 
623        {
624            char      name[16];
625            chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) );
626            hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , chdev->name ) );
627
628            nolock_printk(" - %s | %X | %X | %X | %X | %X | %s\n",
629            thread_type_str( type ), pid, trdid, thread, blocked, flags, name );
630        }
631        else
632        {
633            nolock_printk(" - %s | %X | %X | %X | %X | %X |\n",
634            thread_type_str( type ), pid, trdid, thread, blocked, flags );
635        }
636
637        // get next remote kernel thread list_entry
638        k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) );
639    }
640
641    // display user threads
642    while( u_entry != &sched->u_root )
643    {
644        // get local pointer on user_thread
645        thread = LIST_ELEMENT( u_entry , thread_t , sched_list );
646
647        // get relevant thead info
648        thread_type_t type    = hal_remote_l32 ( XPTR( cxy , &thread->type ) );
649        trdid_t       trdid   = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) );
650        uint32_t      blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) );
651        uint32_t      flags   = hal_remote_l32 ( XPTR( cxy , &thread->flags ) );
652        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
653        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
654        void      *   func    = hal_remote_lpt ( XPTR( cxy , &thread->entry_func ) );
655
656        nolock_printk(" - %s | %X | %X | %X | %X | %X | %x\n",
657        thread_type_str( type ), pid, trdid, thread, blocked, flags, (uint32_t)func );
658
659        // get next user thread list_entry
660        u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) );
661    }
662
663    // release TXT0 lock
664    remote_busylock_release( lock_xp );
665
666}  // end sched_remote_display()
667
668
Note: See TracBrowser for help on using the repository browser.