source: trunk/kernel/kern/cluster.c @ 532

Last change on this file since 532 was 530, checked in by nicolas.van.phan@…, 6 years ago

Hack to compile on both IOB and LETI for now

File size: 21.3 KB
RevLine 
[1]1/*
2 * cluster.c - Cluster-Manager related operations
[19]3 *
[1]4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
[437]6 *         Alain Greiner (2016,2017,2018)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH..
11 *
12 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH. is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[456]27#include <hal_kernel_types.h>
[1]28#include <hal_atomic.h>
29#include <hal_special.h>
[50]30#include <hal_ppm.h>
[407]31#include <remote_fifo.h>
[1]32#include <printk.h>
33#include <errno.h>
34#include <spinlock.h>
35#include <core.h>
[443]36#include <chdev.h>
[1]37#include <scheduler.h>
38#include <list.h>
39#include <cluster.h>
40#include <boot_info.h>
41#include <bits.h>
42#include <ppm.h>
43#include <thread.h>
44#include <kmem.h>
45#include <process.h>
46#include <dqdt.h>
47
[408]48/////////////////////////////////////////////////////////////////////////////////////
[1]49// Extern global variables
[408]50/////////////////////////////////////////////////////////////////////////////////////
[1]51
[443]52extern process_t           process_zero;     // allocated in kernel_init.c file
53extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c file
[1]54
[443]55///////////////////////////////////////////////n
[1]56error_t cluster_init( struct boot_info_s * info )
57{
[428]58    error_t         error;
59    lpid_t          lpid;     // local process_index
60    lid_t           lid;      // local core index
61    uint32_t        i;        // index in loop on external peripherals
62    boot_device_t * dev;      // pointer on external peripheral
63    uint32_t        func;     // external peripheral functionnal type
[1]64
65        cluster_t * cluster = LOCAL_CLUSTER;
66
67    // initialize cluster global parameters
[19]68        cluster->paddr_width     = info->paddr_width;
[1]69        cluster->x_width         = info->x_width;
70        cluster->y_width         = info->y_width;
71        cluster->x_size          = info->x_size;
72        cluster->y_size          = info->y_size;
[530]73    cluster->x_max           = info->x_max; // [FIXME]
74        cluster->y_max           = info->y_max; // [FIXME]
[1]75        cluster->io_cxy          = info->io_cxy;
76
[428]77    // initialize external peripherals channels
78    for( i = 0 ; i < info->ext_dev_nr ; i++ )
79    {
80        dev  = &info->ext_dev[i];
81        func = FUNC_FROM_TYPE( dev->type );   
82        if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels;
83        if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels;
84        if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels;
85        if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels;
86    }
87
[1]88    // initialize cluster local parameters
89        cluster->cores_nr        = info->cores_nr;
90
[19]91    // initialize the lock protecting the embedded kcm allocator
[1]92        spinlock_init( &cluster->kcm_lock );
93
[438]94#if DEBUG_CLUSTER_INIT
[433]95uint32_t cycle = (uint32_t)hal_get_cycles();
[438]96if( DEBUG_CLUSTER_INIT < cycle )
[437]97printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n",
98__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]99#endif
[50]100
[19]101    // initialises DQDT
[530]102    cluster->dqdt_root_level = dqdt_init( info->x_max, // [FIXME]
103                                          info->y_max, // [FIXME]
[438]104                                          info->y_width ) - 1;
[1]105
106    // initialises embedded PPM
[50]107        error = hal_ppm_init( info );
[1]108
[50]109    if( error )
110    {
111        printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n",
112               __FUNCTION__ , local_cxy );
113        return ENOMEM;
114    }
115
[438]116#if( DEBUG_CLUSTER_INIT & 1 )
[433]117cycle = (uint32_t)hal_get_cycles();
[438]118if( DEBUG_CLUSTER_INIT < cycle )
[437]119printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",
[433]120__FUNCTION__ , local_cxy , cycle );
121#endif
[50]122
[1]123    // initialises embedded KHM
124        khm_init( &cluster->khm );
[19]125
[438]126#if( DEBUG_CLUSTER_INIT & 1 )
[457]127cycle = (uint32_t)hal_get_cycles();
[438]128if( DEBUG_CLUSTER_INIT < cycle )
[437]129printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
130__FUNCTION__ , local_cxy , hal_get_cycles() );
131#endif
[50]132
[19]133    // initialises embedded KCM
[5]134        kcm_init( &cluster->kcm , KMEM_KCM );
[1]135
[438]136#if( DEBUG_CLUSTER_INIT & 1 )
[457]137cycle = (uint32_t)hal_get_cycles();
[438]138if( DEBUG_CLUSTER_INIT < cycle )
[437]139printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
140__FUNCTION__ , local_cxy , hal_get_cycles() );
141#endif
[50]142
[296]143    // initialises all cores descriptors
[1]144        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
145        {
146                core_init( &cluster->core_tbl[lid],    // target core descriptor
147                       lid,                        // local core index
148                       info->core[lid].gid );      // gid from boot_info_t
149        }
[19]150
[438]151#if( DEBUG_CLUSTER_INIT & 1 )
[433]152cycle = (uint32_t)hal_get_cycles();
[438]153if( DEBUG_CLUSTER_INIT < cycle )
[437]154printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",
[433]155__FUNCTION__ , local_cxy , cycle );
156#endif
[50]157
[440]158    // initialises RPC FIFOs
159        for( lid = 0 ; lid < cluster->cores_nr; lid++ )
160    {
161            local_fifo_init( &cluster->rpc_fifo[lid] );
162        cluster->rpc_threads[lid] = 0;
163    }
[1]164
[438]165#if( DEBUG_CLUSTER_INIT & 1 )
[437]166cycle = (uint32_t)hal_get_cycles();
[438]167if( DEBUG_CLUSTER_INIT < cycle )
[437]168printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
[407]169__FUNCTION__ , local_cxy , hal_get_cycles() );
[437]170#endif
[50]171
[1]172    // initialise pref_tbl[] in process manager
173        spinlock_init( &cluster->pmgr.pref_lock );
174    cluster->pmgr.pref_nr = 0;
[19]175    cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero );
[1]176    for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
177    {
178        cluster->pmgr.pref_tbl[lpid] = XPTR_NULL;
179    }
180
181    // initialise local_list in process manager
[23]182        remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
183    xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) );
[1]184    cluster->pmgr.local_nr = 0;
185
186    // initialise copies_lists in process manager
[101]187    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
[1]188    {
189            remote_spinlock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ) );
190        cluster->pmgr.copies_nr[lpid] = 0;
191        xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) );
[19]192    }
[1]193
[438]194#if DEBUG_CLUSTER_INIT
[433]195cycle = (uint32_t)hal_get_cycles();
[438]196if( DEBUG_CLUSTER_INIT < cycle )
[437]197printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n",
198__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
[433]199#endif
[50]200
[124]201    hal_fence();
[1]202
203        return 0;
204} // end cluster_init()
205
206////////////////////////////////////////
207bool_t cluster_is_undefined( cxy_t cxy )
208{
209    cluster_t * cluster = LOCAL_CLUSTER;
210
211    uint32_t y_width = cluster->y_width;
212
213    uint32_t x = cxy >> y_width;
214    uint32_t y = cxy & ((1<<y_width)-1);
215
[19]216    if( x >= cluster->x_size ) return true;
217    if( y >= cluster->y_size ) return true;
[1]218
219    return false;
220}
221
222////////////////////////////////////////////////////////////////////////////////////
223//  Cores related functions
224////////////////////////////////////////////////////////////////////////////////////
225
226/////////////////////////////////
[485]227lid_t cluster_select_local_core( void )
[1]228{
[440]229    uint32_t      min = 1000;
230    lid_t         sel = 0;
231    uint32_t      nthreads;
232    lid_t         lid;
233    scheduler_t * sched;
[1]234
235    cluster_t * cluster = LOCAL_CLUSTER;
236
237    for( lid = 0 ; lid < cluster->cores_nr ; lid++ )
238    {
[440]239        sched    = &cluster->core_tbl[lid].scheduler;
240        nthreads = sched->u_threads_nr + sched->k_threads_nr;
241
242        if( nthreads < min )
[1]243        {
[440]244            min = nthreads;
[1]245            sel = lid;
246        }
[19]247    }
[1]248    return sel;
249}
250
251////////////////////////////////////////////////////////////////////////////////////
[428]252//  Process related functions
[1]253////////////////////////////////////////////////////////////////////////////////////
254
[433]255
256//////////////////////////////////////////////////////
[443]257xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy,
258                                            pid_t pid )
259{
260    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
261    xptr_t      lock_xp;       // xptr on lock protecting this list
262    xptr_t      iter_xp;       // iterator
263    xptr_t      current_xp;    // xptr on current process descriptor
264    bool_t      found;
265
266    cluster_t * cluster = LOCAL_CLUSTER;
267
268    // get owner cluster and lpid
269    cxy_t   owner_cxy = CXY_FROM_PID( pid );
270    lpid_t  lpid      = LPID_FROM_PID( pid );
271
272    // get lock & root of list of copies from owner cluster
273    root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
274    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
275
276    // take the lock protecting the list of processes
277    remote_spinlock_lock( lock_xp );
278
279    // scan list of processes
280    found = false;
281    XLIST_FOREACH( root_xp , iter_xp )
282    {
283        current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
284
285        if( GET_CXY( current_xp ) == cxy )
286        {
287            found = true;
288            break;
289        }
290    }
291
292    // release the lock protecting the list of processes
293    remote_spinlock_unlock( lock_xp );
294
295    // return extended pointer on process descriptor in owner cluster
296    if( found ) return current_xp;
297    else        return XPTR_NULL;
298
299}  // end cluster_get_process_from_pid_in_cxy()
300
301
302//////////////////////////////////////////////////////
[433]303xptr_t cluster_get_owner_process_from_pid( pid_t pid )
304{
305    xptr_t      root_xp;       // xptr on root of list of processes in owner cluster
[436]306    xptr_t      lock_xp;       // xptr on lock protecting this list
[433]307    xptr_t      iter_xp;       // iterator
308    xptr_t      current_xp;    // xptr on current process descriptor
309    process_t * current_ptr;   // local pointer on current process
310    pid_t       current_pid;   // current process identifier
311    bool_t      found;
312
313    cluster_t * cluster = LOCAL_CLUSTER;
314
315    // get owner cluster and lpid
316    cxy_t  owner_cxy = CXY_FROM_PID( pid );
317
318    // get lock & root of list of process in owner cluster
319    root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root );
320    lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock );
321
322    // take the lock protecting the list of processes
323    remote_spinlock_lock( lock_xp );
324
325    // scan list of processes in owner cluster
326    found = false;
327    XLIST_FOREACH( root_xp , iter_xp )
328    {
329        current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
330        current_ptr = GET_PTR( current_xp );
331        current_pid = hal_remote_lw( XPTR( owner_cxy , &current_ptr->pid ) );
332
333        if( current_pid == pid )
334        {
335            found = true;
336            break;
337        }
338    }
339
340    // release the lock protecting the list of processes
341    remote_spinlock_unlock( lock_xp );
342
343    // return extended pointer on process descriptor in owner cluster
344    if( found ) return current_xp;
345    else        return XPTR_NULL;
346
[436]347}  // end cluster_get_owner_process_from_pid()
348
[443]349
[1]350//////////////////////////////////////////////////////////
351xptr_t cluster_get_reference_process_from_pid( pid_t pid )
[19]352{
[23]353    xptr_t ref_xp;   // extended pointer on reference process descriptor
[1]354
355    cluster_t * cluster = LOCAL_CLUSTER;
356
357    // get owner cluster and lpid
358    cxy_t  owner_cxy = CXY_FROM_PID( pid );
359    lpid_t lpid      = LPID_FROM_PID( pid );
360
[19]361    // Check valid PID
[23]362    if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL;
[1]363
364    if( local_cxy == owner_cxy )   // local cluster is owner cluster
[19]365    {
[23]366        ref_xp = cluster->pmgr.pref_tbl[lpid];
[1]367    }
368    else                              // use a remote_lwd to access owner cluster
369    {
[23]370        ref_xp = (xptr_t)hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) );
[1]371    }
372
[23]373    return ref_xp;
[1]374}
375
[416]376///////////////////////////////////////////////
377error_t cluster_pid_alloc( process_t * process,
378                           pid_t     * pid )
[1]379{
380    lpid_t      lpid;
381    bool_t      found;
382
[440]383#if DEBUG_CLUSTER_PID_ALLOC
384uint32_t cycle = (uint32_t)hal_get_cycles();
385if( DEBUG_CLUSTER_PID_ALLOC < cycle )
386printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n",
387__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
388#endif
389
[1]390    pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr;
391
392    // get the process manager lock
393    spinlock_lock( &pm->pref_lock );
394
395    // search an empty slot
396    found = false;
397    for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ )
398    {
399        if( pm->pref_tbl[lpid] == XPTR_NULL )
400        {
401            found = true;
402            break;
403        }
404    }
405
406    if( found )
407    {
408        // register process in pref_tbl[]
[416]409        pm->pref_tbl[lpid] = XPTR( local_cxy , process );
[1]410        pm->pref_nr++;
411
412        // returns pid
413        *pid = PID( local_cxy , lpid );
414
[416]415        // release the processs_manager lock
416        spinlock_unlock( &pm->pref_lock );
417
418        return 0;
[1]419    }
420    else
421    {
[416]422        // release the processs_manager lock
423        spinlock_unlock( &pm->pref_lock );
424
425        return -1;
[19]426    }
[1]427
[440]428#if DEBUG_CLUSTER_PID_ALLOC
429cycle = (uint32_t)hal_get_cycles();
430if( DEBUG_CLUSTER_PID_ALLOC < cycle )
431printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n",
432__FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle );
433#endif
434
[1]435} // end cluster_pid_alloc()
436
437/////////////////////////////////////
438void cluster_pid_release( pid_t pid )
439{
[440]440
441#if DEBUG_CLUSTER_PID_RELEASE
442uint32_t cycle = (uint32_t)hal_get_cycles();
443if( DEBUG_CLUSTER_PID_RELEASE < cycle )
444printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n",
445__FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle );
446#endif
447
[1]448    cxy_t  owner_cxy  = CXY_FROM_PID( pid );
449    lpid_t lpid       = LPID_FROM_PID( pid );
450
[409]451    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
452
[440]453    // check lpid
[492]454    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER),
[440]455    "illegal LPID = %d" , lpid );
[1]456
[440]457    // check owner cluster
[492]458    assert( (owner_cxy == local_cxy) ,
[440]459    "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy );
460
[1]461    // get the process manager lock
462    spinlock_lock( &pm->pref_lock );
463
464    // remove process from pref_tbl[]
465    pm->pref_tbl[lpid] = XPTR_NULL;
466    pm->pref_nr--;
467
468    // release the processs_manager lock
469    spinlock_unlock( &pm->pref_lock );
470
[440]471#if DEBUG_CLUSTER_PID_RELEASE
472cycle = (uint32_t)hal_get_cycles();
473if( DEBUG_CLUSTER_PID_RELEASE < cycle )
474printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n",
475__FUNCTION__ , CURRENT_THREAD , local_cxy , cycle );
476#endif
477
[1]478} // end cluster_pid_release()
479
480///////////////////////////////////////////////////////////
481process_t * cluster_get_local_process_from_pid( pid_t pid )
482{
[23]483    xptr_t         process_xp;
484    process_t    * process_ptr;
485    xptr_t         root_xp;
486    xptr_t         iter_xp;
487    bool_t         found;
[19]488
[23]489    found   = false;
490    root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root );
491
492    XLIST_FOREACH( root_xp , iter_xp )
[1]493    {
[23]494        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
495        process_ptr = (process_t *)GET_PTR( process_xp );
496        if( process_ptr->pid == pid )
[1]497        {
[23]498            found = true;
[1]499            break;
500        }
501    }
502
[23]503    if (found ) return process_ptr;
504    else        return NULL;
505
[1]506}  // end cluster_get_local_process_from_pid()
507
508//////////////////////////////////////////////////////
509void cluster_process_local_link( process_t * process )
510{
[443]511    reg_t    save_sr;
512
[1]513    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
514
[443]515    // get extended pointers on local process list root & lock
516    xptr_t root_xp = XPTR( local_cxy , &pm->local_root );
517    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
518
[1]519    // get lock protecting the process manager local list
[443]520    remote_spinlock_lock_busy( lock_xp , &save_sr );
[1]521
[443]522    // register process in local list
523    xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) );
[1]524    pm->local_nr++;
525
526    // release lock protecting the process manager local list
[443]527    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]528}
529
530////////////////////////////////////////////////////////
531void cluster_process_local_unlink( process_t * process )
532{
[443]533    reg_t save_sr;
534
[1]535    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
536
[443]537    // get extended pointers on local process list lock
538    xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock );
539
[1]540    // get lock protecting the process manager local list
[443]541    remote_spinlock_lock_busy( lock_xp , &save_sr );
[1]542
[443]543    // remove process from local list
[23]544    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[1]545    pm->local_nr--;
546
547    // release lock protecting the process manager local list
[443]548    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]549}
550
551///////////////////////////////////////////////////////
552void cluster_process_copies_link( process_t * process )
553{
[436]554    reg_t    irq_state;
[1]555    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
556
[438]557#if DEBUG_CLUSTER_PROCESS_COPIES
[436]558uint32_t cycle = (uint32_t)hal_get_cycles();
[438]559if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]560printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
561__FUNCTION__ , local_cxy , process , cycle );
562#endif
563
[1]564    // get owner cluster identifier CXY and process LPID
565    pid_t    pid        = process->pid;
566    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
567    lpid_t   lpid       = LPID_FROM_PID( pid );
568
569    // get extended pointer on lock protecting copies_list[lpid]
[120]570    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]571
572    // get extended pointer on the copies_list[lpid] root
[120]573    xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] );
[1]574
575    // get extended pointer on the local copies_list entry
576    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
577
[19]578    // get lock protecting copies_list[lpid]
[407]579    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]580
[436]581    // add copy to copies_list
[1]582    xlist_add_first( copies_root , copies_entry );
583    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 );
584
[19]585    // release lock protecting copies_list[lpid]
[407]586    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]587
[438]588#if DEBUG_CLUSTER_PROCESS_COPIES
[436]589cycle = (uint32_t)hal_get_cycles();
[438]590if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]591printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
592__FUNCTION__ , local_cxy , process , cycle );
593#endif
594
595}  // end cluster_process_copies_link()
596
[1]597/////////////////////////////////////////////////////////
598void cluster_process_copies_unlink( process_t * process )
599{
[407]600    uint32_t irq_state;
[1]601    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
602
[438]603#if DEBUG_CLUSTER_PROCESS_COPIES
[436]604uint32_t cycle = (uint32_t)hal_get_cycles();
[438]605if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]606printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n",
607__FUNCTION__ , local_cxy , process , cycle );
608#endif
609
[1]610    // get owner cluster identifier CXY and process LPID
611    pid_t    pid        = process->pid;
612    cxy_t    owner_cxy  = CXY_FROM_PID( pid );
613    lpid_t   lpid       = LPID_FROM_PID( pid );
614
615    // get extended pointer on lock protecting copies_list[lpid]
[436]616    xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] );
[1]617
618    // get extended pointer on the local copies_list entry
619    xptr_t copies_entry = XPTR( local_cxy , &process->copies_list );
620
[19]621    // get lock protecting copies_list[lpid]
[407]622    remote_spinlock_lock_busy( copies_lock , &irq_state );
[1]623
[436]624    // remove copy from copies_list
[1]625    xlist_unlink( copies_entry );
626    hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 );
627
[19]628    // release lock protecting copies_list[lpid]
[407]629    remote_spinlock_unlock_busy( copies_lock , irq_state );
[1]630
[438]631#if DEBUG_CLUSTER_PROCESS_COPIES
[436]632cycle = (uint32_t)hal_get_cycles();
[438]633if( DEBUG_CLUSTER_PROCESS_COPIES < cycle )
[436]634printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n",
635__FUNCTION__ , local_cxy , process , cycle );
636#endif
637
638}  // end cluster_process_copies_unlink()
639
[428]640///////////////////////////////////////////
641void cluster_processes_display( cxy_t cxy )
[1]642{
[428]643    xptr_t        root_xp;
[443]644    xptr_t        lock_xp;
[428]645    xptr_t        iter_xp;
[443]646    xptr_t        process_xp;
647    cxy_t         txt0_cxy;
648    chdev_t     * txt0_ptr;
649    xptr_t        txt0_xp;
650    xptr_t        txt0_lock_xp;
651    reg_t         txt0_save_sr;     // save SR to take TXT0 lock in busy mode     
[1]652
[443]653    assert( (cluster_is_undefined( cxy ) == false),
[492]654    "illegal cluster index" );
[443]655
656    // get extended pointer on root and lock for local process list in cluster
[428]657    root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root );
[443]658    lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock );
[1]659
[443]660    // get pointers on TXT0 chdev
661    txt0_xp  = chdev_dir.txt_tx[0];
662    txt0_cxy = GET_CXY( txt0_xp );
663    txt0_ptr = GET_PTR( txt0_xp );
[1]664
[443]665    // get extended pointer on TXT0 lock
666    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
667
668    // get lock on local process list
669    remote_spinlock_lock( lock_xp );
670
671    // get TXT0 lock in busy waiting mode
672    remote_spinlock_lock_busy( txt0_lock_xp , &txt0_save_sr );
673     
674    // display header
675    nolock_printk("\n***** processes in cluster %x / cycle %d\n",
676    cxy , (uint32_t)hal_get_cycles() );
677
678    // loop on all processes in cluster cxy
[428]679    XLIST_FOREACH( root_xp , iter_xp )
680    {
681        process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
682        process_display( process_xp );
683    }
[443]684
685    // release TXT0 lock in busy waiting mode
686    remote_spinlock_unlock_busy( txt0_lock_xp , txt0_save_sr );
687
688    // release lock on local process list
689    remote_spinlock_unlock( lock_xp );
690
[428]691}  // end cluster_processes_display()
[1]692
[19]693
Note: See TracBrowser for help on using the repository browser.