source: trunk/kernel/kern/cluster.c @ 547

Last change on this file since 547 was 530, checked in by nicolas.van.phan@…, 6 years ago

Hack to compile on both IOB and LETI for now

File size: 21.3 KB
Line 
1/*
2 * cluster.c - Cluster-Manager related operations
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
6 *         Alain Greiner (2016,2017,2018)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_atomic.h>
29#include <hal_special.h>
30#include <hal_ppm.h>
31#include <remote_fifo.h>
32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
36#include <chdev.h>
37#include <scheduler.h>
38#include <list.h>
39#include <cluster.h>
40#include <boot_info.h>
41#include <bits.h>
42#include <ppm.h>
43#include <thread.h>
44#include <kmem.h>
45#include <process.h>
46#include <dqdt.h>
47
48/////////////////////////////////////////////////////////////////////////////////////
49// Extern global variables
50/////////////////////////////////////////////////////////////////////////////////////
51
52extern process_t           process_zero;     // allocated in kernel_init.c file
53extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
54
55///////////////////////////////////////////////n
56error_t cluster_init( struct boot_info_s * info )
57{
58    error_t         error;
59    lpid_t          lpid;     // local process_index
60    lid_t           lid;      // local core index
61    uint32_t        i;        // index in loop on external peripherals
62    boot_device_t * dev;      // pointer on external peripheral
63    uint32_t        func;     // external peripheral functionnal type
64
65        cluster_t * cluster = LOCAL_CLUSTER;
66
67    // initialize cluster global parameters
68        cluster->paddr_width     = info->paddr_width;
69        cluster->x_width         = info->x_width;
70        cluster->y_width         = info->y_width;
71        cluster->x_size          = info->x_size;
72        cluster->y_size          = info->y_size;
73    cluster->x_max           = info->x_max; // [FIXME]
74        cluster->y_max           = info->y_max; // [FIXME]
75        cluster->io_cxy          = info->io_cxy;
76
77    // initialize external peripherals channels
78    for( i = 0 ; i < info->ext_dev_nr ; i++ )
79    {
80        dev  = &info->ext_dev[i];
81        func = FUNC_FROM_TYPE( dev->type );   
82        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
83        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
84        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
85        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
86    }
87
88    // initialize cluster local parameters
89        cluster->cores_nr        = info->cores_nr;
90
91    // initialize the lock protecting the embedded kcm allocator
92        spinlock_init( &cluster->kcm_lock );
93
94#if DEBUG_CLUSTER_INIT
95uint32_t cycle = (uint32_t)hal_get_cycles();
96if( DEBUG_CLUSTER_INIT < cycle )
97printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
98__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
99#endif
100
101    // initialises DQDT
102    cluster->dqdt_root_level = dqdt_init( info->x_max, // [FIXME]
103                                          info->y_max, // [FIXME]
104                                          info->y_width ) - 1;
105
106    // initialises embedded PPM
107        error = hal_ppm_init( info );
108
109    if( error )
110    {
111        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
112               __FUNCTION__ , local_cxy );
113        return ENOMEM;
114    }
115
116#if( DEBUG_CLUSTER_INIT & 1 )
117cycle = (uint32_t)hal_get_cycles();
118if( DEBUG_CLUSTER_INIT < cycle )
119printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
120__FUNCTION__ , local_cxy , cycle );
121#endif
122
123    // initialises embedded KHM
124        khm_init( &cluster->khm );
125
126#if( DEBUG_CLUSTER_INIT & 1 )
127cycle = (uint32_t)hal_get_cycles();
128if( DEBUG_CLUSTER_INIT < cycle )
129printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
130__FUNCTION__ , local_cxy , hal_get_cycles() );
131#endif
132
133    // initialises embedded KCM
134        kcm_init( &cluster->kcm , KMEM_KCM );
135
136#if( DEBUG_CLUSTER_INIT & 1 )
137cycle = (uint32_t)hal_get_cycles();
138if( DEBUG_CLUSTER_INIT < cycle )
139printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
140__FUNCTION__ , local_cxy , hal_get_cycles() );
141#endif
142
143    // initialises all cores descriptors
144        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
145        {
146                core_init( &cluster->core_tbl[lid],    // target core descriptor
147                       lid,                        // local core index
148                       info->core[lid].gid );      // gid from boot_info_t
149        }
150
151#if( DEBUG_CLUSTER_INIT & 1 )
152cycle = (uint32_t)hal_get_cycles();
153if( DEBUG_CLUSTER_INIT < cycle )
154printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
155__FUNCTION__ , local_cxy , cycle );
156#endif
157
158    // initialises RPC FIFOs
159        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
160    {
161            local_fifo_init( &cluster->rpc_fifo[lid] );
162        cluster->rpc_threads[lid] = 0;
163    }
164
165#if( DEBUG_CLUSTER_INIT & 1 )
166cycle = (uint32_t)hal_get_cycles();
167if( DEBUG_CLUSTER_INIT < cycle )
168printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
169__FUNCTION__ , local_cxy , hal_get_cycles() );
170#endif
171
172    // initialise pref_tbl[] in process manager
173        spinlock_init( &cluster->pmgr.pref_lock );
174    cluster->pmgr.pref_nr = 0;
175    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
176    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
177    {
178        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
179    }
180
181    // initialise local_list in process manager
182        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
183    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
184    cluster->pmgr.local_nr = 0;
185
186    // initialise copies_lists in process manager
187    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
188    {
189            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
190        cluster->pmgr.copies_nr[lpid] = 0;
191        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
192    }
193
194#if DEBUG_CLUSTER_INIT
195cycle = (uint32_t)hal_get_cycles();
196if( DEBUG_CLUSTER_INIT < cycle )
197printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
198__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
199#endif
200
201    hal_fence();
202
203        return 0;
204} // end cluster_init()
205
206////////////////////////////////////////
207bool_t cluster_is_undefined( cxy_t cxy )
208{
209    cluster_t * cluster = LOCAL_CLUSTER;
210
211    uint32_t y_width = cluster->y_width;
212
213    uint32_t x = cxy >> y_width;
214    uint32_t y = cxy & ((1<<y_width)-1);
215
216    if( x >= cluster->x_size ) return true;
217    if( y >= cluster->y_size ) return true;
218
219    return false;
220}
221
222////////////////////////////////////////////////////////////////////////////////////
223//  Cores related functions
224////////////////////////////////////////////////////////////////////////////////////
225
226/////////////////////////////////
227lid_t cluster_select_local_core( void )
228{
229    uint32_t      min = 1000;
230    lid_t         sel = 0;
231    uint32_t      nthreads;
232    lid_t         lid;
233    scheduler_t * sched;
234
235    cluster_t * cluster = LOCAL_CLUSTER;
236
237    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
238    {
239        sched    = &cluster->core_tbl[lid].scheduler;
240        nthreads = sched->u_threads_nr + sched->k_threads_nr;
241
242        if( nthreads < min )
243        {
244            min = nthreads;
245            sel = lid;
246        }
247    }
248    return sel;
249}
250
251////////////////////////////////////////////////////////////////////////////////////
252//  Process related functions
253////////////////////////////////////////////////////////////////////////////////////
254
255
256//////////////////////////////////////////////////////
257xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
258                                            pid_t pid )
259{
260    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
261    xptr_t      lock_xp;       // xptr on lock protecting this list
262    xptr_t      iter_xp;       // iterator
263    xptr_t      current_xp;    // xptr on current process descriptor
264    bool_t      found;
265
266    cluster_t * cluster = LOCAL_CLUSTER;
267
268    // get owner cluster and lpid
269    cxy_t   owner_cxy = CXY_FROM_PID( pid );
270    lpid_t  lpid      = LPID_FROM_PID( pid );
271
272    // get lock & root of list of copies from owner cluster
273    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
274    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
275
276    // take the lock protecting the list of processes
277    remote_spinlock_lock( lock_xp );
278
279    // scan list of processes
280    found = false;
281    XLIST_FOREACH( root_xp , iter_xp )
282    {
283        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
284
285        if( GET_CXY( current_xp ) == cxy )
286        {
287            found = true;
288            break;
289        }
290    }
291
292    // release the lock protecting the list of processes
293    remote_spinlock_unlock( lock_xp );
294
295    // return extended pointer on process descriptor in owner cluster
296    if( found ) return current_xp;
297    else        return XPTR_NULL;
298
299}  // end cluster_get_process_from_pid_in_cxy()
300
301
302//////////////////////////////////////////////////////
303xptr_t cluster_get_owner_process_from_pid( pid_t pid )
304{
305    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
306    xptr_t      lock_xp;       // xptr on lock protecting this list
307    xptr_t      iter_xp;       // iterator
308    xptr_t      current_xp;    // xptr on current process descriptor
309    process_t * current_ptr;   // local pointer on current process
310    pid_t       current_pid;   // current process identifier
311    bool_t      found;
312
313    cluster_t * cluster = LOCAL_CLUSTER;
314
315    // get owner cluster and lpid
316    cxy_t  owner_cxy = CXY_FROM_PID( pid );
317
318    // get lock & root of list of process in owner cluster
319    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
320    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
321
322    // take the lock protecting the list of processes
323    remote_spinlock_lock( lock_xp );
324
325    // scan list of processes in owner cluster
326    found = false;
327    XLIST_FOREACH( root_xp , iter_xp )
328    {
329        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
330        current_ptr = GET_PTR( current_xp );
331        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
332
333        if( current_pid == pid )
334        {
335            found = true;
336            break;
337        }
338    }
339
340    // release the lock protecting the list of processes
341    remote_spinlock_unlock( lock_xp );
342
343    // return extended pointer on process descriptor in owner cluster
344    if( found ) return current_xp;
345    else        return XPTR_NULL;
346
347}  // end cluster_get_owner_process_from_pid()
348
349
350//////////////////////////////////////////////////////////
351xptr_t cluster_get_reference_process_from_pid( pid_t pid )
352{
353    xptr_t ref_xp;   // extended pointer on reference process descriptor
354
355    cluster_t * cluster = LOCAL_CLUSTER;
356
357    // get owner cluster and lpid
358    cxy_t  owner_cxy = CXY_FROM_PID( pid );
359    lpid_t lpid      = LPID_FROM_PID( pid );
360
361    // Check valid PID
362    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
363
364    if( local_cxy == owner_cxy )   // local cluster is owner cluster
365    {
366        ref_xp = cluster->pmgr.pref_tbl[lpid];
367    }
368    else                              // use a remote_lwd to access owner cluster
369    {
370        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
371    }
372
373    return ref_xp;
374}
375
376///////////////////////////////////////////////
377error_t cluster_pid_alloc( process_t * process,
378                           pid_t     * pid )
379{
380    lpid_t      lpid;
381    bool_t      found;
382
383#if DEBUG_CLUSTER_PID_ALLOC
384uint32_t cycle = (uint32_t)hal_get_cycles();
385if( DEBUG_CLUSTER_PID_ALLOC < cycle )
386printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
387__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
388#endif
389
390    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
391
392    // get the process manager lock
393    spinlock_lock( &pm->pref_lock );
394
395    // search an empty slot
396    found = false;
397    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
398    {
399        if( pm->pref_tbl[lpid] == XPTR_NULL )
400        {
401            found = true;
402            break;
403        }
404    }
405
406    if( found )
407    {
408        // register process in pref_tbl[]
409        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
410        pm->pref_nr++;
411
412        // returns pid
413        *pid = PID( local_cxy , lpid );
414
415        // release the processs_manager lock
416        spinlock_unlock( &pm->pref_lock );
417
418        return 0;
419    }
420    else
421    {
422        // release the processs_manager lock
423        spinlock_unlock( &pm->pref_lock );
424
425        return -1;
426    }
427
428#if DEBUG_CLUSTER_PID_ALLOC
429cycle = (uint32_t)hal_get_cycles();
430if( DEBUG_CLUSTER_PID_ALLOC < cycle )
431printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
432__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
433#endif
434
435} // end cluster_pid_alloc()
436
437/////////////////////////////////////
438void cluster_pid_release( pid_t pid )
439{
440
441#if DEBUG_CLUSTER_PID_RELEASE
442uint32_t cycle = (uint32_t)hal_get_cycles();
443if( DEBUG_CLUSTER_PID_RELEASE < cycle )
444printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
445__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
446#endif
447
448    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
449    lpid_t lpid       = LPID_FROM_PID( pid );
450
451    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
452
453    // check lpid
454    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
455    "illegal LPID = %d" , lpid );
456
457    // check owner cluster
458    assert( (owner_cxy == local_cxy) ,
459    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
460
461    // get the process manager lock
462    spinlock_lock( &pm->pref_lock );
463
464    // remove process from pref_tbl[]
465    pm->pref_tbl[lpid] = XPTR_NULL;
466    pm->pref_nr--;
467
468    // release the processs_manager lock
469    spinlock_unlock( &pm->pref_lock );
470
471#if DEBUG_CLUSTER_PID_RELEASE
472cycle = (uint32_t)hal_get_cycles();
473if( DEBUG_CLUSTER_PID_RELEASE < cycle )
474printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
475__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
476#endif
477
478} // end cluster_pid_release()
479
480///////////////////////////////////////////////////////////
481process_t * cluster_get_local_process_from_pid( pid_t pid )
482{
483    xptr_t         process_xp;
484    process_t    * process_ptr;
485    xptr_t         root_xp;
486    xptr_t         iter_xp;
487    bool_t         found;
488
489    found   = false;
490    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
491
492    XLIST_FOREACH( root_xp , iter_xp )
493    {
494        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
495        process_ptr = (process_t *)GET_PTR( process_xp );
496        if( process_ptr->pid == pid )
497        {
498            found = true;
499            break;
500        }
501    }
502
503    if (found ) return process_ptr;
504    else        return NULL;
505
506}  // end cluster_get_local_process_from_pid()
507
508//////////////////////////////////////////////////////
509void cluster_process_local_link( process_t * process )
510{
511    reg_t    save_sr;
512
513    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
514
515    // get extended pointers on local process list root & lock
516    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
517    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
518
519    // get lock protecting the process manager local list
520    remote_spinlock_lock_busy( lock_xp , &save_sr );
521
522    // register process in local list
523    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
524    pm->local_nr++;
525
526    // release lock protecting the process manager local list
527    remote_spinlock_unlock_busy( lock_xp , save_sr );
528}
529
530////////////////////////////////////////////////////////
531void cluster_process_local_unlink( process_t * process )
532{
533    reg_t save_sr;
534
535    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
536
537    // get extended pointers on local process list lock
538    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
539
540    // get lock protecting the process manager local list
541    remote_spinlock_lock_busy( lock_xp , &save_sr );
542
543    // remove process from local list
544    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
545    pm->local_nr--;
546
547    // release lock protecting the process manager local list
548    remote_spinlock_unlock_busy( lock_xp , save_sr );
549}
550
551///////////////////////////////////////////////////////
552void cluster_process_copies_link( process_t * process )
553{
554    reg_t    irq_state;
555    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
556
557#if DEBUG_CLUSTER_PROCESS_COPIES
558uint32_t cycle = (uint32_t)hal_get_cycles();
559if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
560printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
561__FUNCTION__ , local_cxy , process , cycle );
562#endif
563
564    // get owner cluster identifier CXY and process LPID
565    pid_t    pid        = process->pid;
566    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
567    lpid_t   lpid       = LPID_FROM_PID( pid );
568
569    // get extended pointer on lock protecting copies_list[lpid]
570    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
571
572    // get extended pointer on the copies_list[lpid] root
573    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
574
575    // get extended pointer on the local copies_list entry
576    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
577
578    // get lock protecting copies_list[lpid]
579    remote_spinlock_lock_busy( copies_lock , &irq_state );
580
581    // add copy to copies_list
582    xlist_add_first( copies_root , copies_entry );
583    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
584
585    // release lock protecting copies_list[lpid]
586    remote_spinlock_unlock_busy( copies_lock , irq_state );
587
588#if DEBUG_CLUSTER_PROCESS_COPIES
589cycle = (uint32_t)hal_get_cycles();
590if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
591printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
592__FUNCTION__ , local_cxy , process , cycle );
593#endif
594
595}  // end cluster_process_copies_link()
596
597/////////////////////////////////////////////////////////
598void cluster_process_copies_unlink( process_t * process )
599{
600    uint32_t irq_state;
601    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
602
603#if DEBUG_CLUSTER_PROCESS_COPIES
604uint32_t cycle = (uint32_t)hal_get_cycles();
605if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
606printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
607__FUNCTION__ , local_cxy , process , cycle );
608#endif
609
610    // get owner cluster identifier CXY and process LPID
611    pid_t    pid        = process->pid;
612    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
613    lpid_t   lpid       = LPID_FROM_PID( pid );
614
615    // get extended pointer on lock protecting copies_list[lpid]
616    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
617
618    // get extended pointer on the local copies_list entry
619    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
620
621    // get lock protecting copies_list[lpid]
622    remote_spinlock_lock_busy( copies_lock , &irq_state );
623
624    // remove copy from copies_list
625    xlist_unlink( copies_entry );
626    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
627
628    // release lock protecting copies_list[lpid]
629    remote_spinlock_unlock_busy( copies_lock , irq_state );
630
631#if DEBUG_CLUSTER_PROCESS_COPIES
632cycle = (uint32_t)hal_get_cycles();
633if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
634printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
635__FUNCTION__ , local_cxy , process , cycle );
636#endif
637
638}  // end cluster_process_copies_unlink()
639
640///////////////////////////////////////////
641void cluster_processes_display( cxy_t cxy )
642{
643    xptr_t        root_xp;
644    xptr_t        lock_xp;
645    xptr_t        iter_xp;
646    xptr_t        process_xp;
647    cxy_t         txt0_cxy;
648    chdev_t     * txt0_ptr;
649    xptr_t        txt0_xp;
650    xptr_t        txt0_lock_xp;
651    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
652
653    assert( (cluster_is_undefined( cxy ) == false),
654    "illegal cluster index" );
655
656    // get extended pointer on root and lock for local process list in cluster
657    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
658    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
659
660    // get pointers on TXT0 chdev
661    txt0_xp  = chdev_dir.txt_tx[0];
662    txt0_cxy = GET_CXY( txt0_xp );
663    txt0_ptr = GET_PTR( txt0_xp );
664
665    // get extended pointer on TXT0 lock
666    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
667
668    // get lock on local process list
669    remote_spinlock_lock( lock_xp );
670
671    // get TXT0 lock in busy waiting mode
672    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
673     
674    // display header
675    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
676    cxy , (uint32_t)hal_get_cycles() );
677
678    // loop on all processes in cluster cxy
679    XLIST_FOREACH( root_xp , iter_xp )
680    {
681        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
682        process_display( process_xp );
683    }
684
685    // release TXT0 lock in busy waiting mode
686    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
687
688    // release lock on local process list
689    remote_spinlock_unlock( lock_xp );
690
691}  // end cluster_processes_display()
692
693
Note: See TracBrowser for help on using the repository browser.