| [1] | 1 | /* | 
|---|
 | 2 |  * cluster.c - Cluster-Manager related operations | 
|---|
| [19] | 3 |  * | 
|---|
| [1] | 4 |  * Author  Ghassan Almaless (2008,2009,2010,2011,2012) | 
|---|
 | 5 |  *         Mohamed Lamine Karaoui (2015) | 
|---|
| [657] | 6 |  *         Alain Greiner (2016,2017,2018,2019,2020) | 
|---|
| [1] | 7 |  * | 
|---|
 | 8 |  * Copyright (c) UPMC Sorbonne Universites | 
|---|
 | 9 |  * | 
|---|
 | 10 |  * This file is part of ALMOS-MKH.. | 
|---|
 | 11 |  * | 
|---|
 | 12 |  * ALMOS-MKH. is free software; you can redistribute it and/or modify it | 
|---|
 | 13 |  * under the terms of the GNU General Public License as published by | 
|---|
 | 14 |  * the Free Software Foundation; version 2.0 of the License. | 
|---|
 | 15 |  * | 
|---|
 | 16 |  * ALMOS-MKH. is distributed in the hope that it will be useful, but | 
|---|
 | 17 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|---|
 | 18 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|---|
 | 19 |  * General Public License for more details. | 
|---|
 | 20 |  * | 
|---|
 | 21 |  * You should have received a copy of the GNU General Public License | 
|---|
 | 22 |  * along with ALMOS-MKH.; if not, write to the Free Software Foundation, | 
|---|
 | 23 |  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 
|---|
 | 24 |  */ | 
|---|
 | 25 |  | 
|---|
| [14] | 26 | #include <kernel_config.h> | 
|---|
| [456] | 27 | #include <hal_kernel_types.h> | 
|---|
| [1] | 28 | #include <hal_atomic.h> | 
|---|
 | 29 | #include <hal_special.h> | 
|---|
| [50] | 30 | #include <hal_ppm.h> | 
|---|
| [564] | 31 | #include <hal_macros.h> | 
|---|
| [407] | 32 | #include <remote_fifo.h> | 
|---|
| [1] | 33 | #include <printk.h> | 
|---|
 | 34 | #include <errno.h> | 
|---|
| [564] | 35 | #include <queuelock.h> | 
|---|
| [1] | 36 | #include <core.h> | 
|---|
| [443] | 37 | #include <chdev.h> | 
|---|
| [1] | 38 | #include <scheduler.h> | 
|---|
 | 39 | #include <list.h> | 
|---|
 | 40 | #include <cluster.h> | 
|---|
 | 41 | #include <boot_info.h> | 
|---|
 | 42 | #include <bits.h> | 
|---|
 | 43 | #include <ppm.h> | 
|---|
 | 44 | #include <thread.h> | 
|---|
 | 45 | #include <kmem.h> | 
|---|
 | 46 | #include <process.h> | 
|---|
 | 47 | #include <dqdt.h> | 
|---|
 | 48 |  | 
|---|
| [408] | 49 | ///////////////////////////////////////////////////////////////////////////////////// | 
|---|
| [1] | 50 | // Extern global variables | 
|---|
| [408] | 51 | ///////////////////////////////////////////////////////////////////////////////////// | 
|---|
| [1] | 52 |  | 
|---|
| [564] | 53 | extern process_t           process_zero;     // allocated in kernel_init.c | 
|---|
 | 54 | extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c | 
|---|
| [1] | 55 |  | 
|---|
| [564] | 56 |  | 
|---|
 | 57 |  | 
|---|
 | 58 | /////////////////////////////////////////////////// | 
|---|
 | 59 | void cluster_info_init( struct boot_info_s * info ) | 
|---|
| [1] | 60 | { | 
|---|
| [428] | 61 |     boot_device_t * dev;      // pointer on external peripheral | 
|---|
 | 62 |     uint32_t        func;     // external peripheral functionnal type | 
|---|
| [564] | 63 |     uint32_t        x; | 
|---|
 | 64 |     uint32_t        y; | 
|---|
 | 65 |     uint32_t        i;    | 
|---|
| [1] | 66 |  | 
|---|
 | 67 |         cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
 | 68 |  | 
|---|
 | 69 |     // initialize cluster global parameters | 
|---|
| [19] | 70 |         cluster->paddr_width     = info->paddr_width; | 
|---|
| [1] | 71 |         cluster->x_width         = info->x_width; | 
|---|
 | 72 |         cluster->y_width         = info->y_width; | 
|---|
 | 73 |         cluster->x_size          = info->x_size; | 
|---|
 | 74 |         cluster->y_size          = info->y_size; | 
|---|
 | 75 |         cluster->io_cxy          = info->io_cxy; | 
|---|
 | 76 |  | 
|---|
| [557] | 77 |     // initialize the cluster_info[][] array | 
|---|
| [637] | 78 |     for( x = 0 ; x < CONFIG_MAX_CLUSTERS_X ; x++ )  | 
|---|
| [564] | 79 |     { | 
|---|
| [637] | 80 |         for( y = 0; y < CONFIG_MAX_CLUSTERS_Y ; y++ )  | 
|---|
| [564] | 81 |         { | 
|---|
| [557] | 82 |             cluster->cluster_info[x][y] = info->cluster_info[x][y]; | 
|---|
 | 83 |         } | 
|---|
 | 84 |     } | 
|---|
| [564] | 85 |  | 
|---|
| [428] | 86 |     // initialize external peripherals channels | 
|---|
 | 87 |     for( i = 0 ; i < info->ext_dev_nr ; i++ ) | 
|---|
 | 88 |     { | 
|---|
 | 89 |         dev  = &info->ext_dev[i]; | 
|---|
 | 90 |         func = FUNC_FROM_TYPE( dev->type );    | 
|---|
 | 91 |         if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels; | 
|---|
 | 92 |         if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels; | 
|---|
 | 93 |         if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels; | 
|---|
 | 94 |         if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels; | 
|---|
 | 95 |     } | 
|---|
 | 96 |  | 
|---|
| [637] | 97 |     // initialize number of local cores | 
|---|
| [564] | 98 |         cluster->cores_nr  = info->cores_nr; | 
|---|
| [1] | 99 |  | 
|---|
| [564] | 100 | }  // end cluster_info_init() | 
|---|
 | 101 |  | 
|---|
| [637] | 102 | ////////////////////////////////////// | 
|---|
 | 103 | void cluster_info_display( cxy_t cxy ) | 
|---|
 | 104 | { | 
|---|
 | 105 |     uint32_t  x; | 
|---|
 | 106 |     uint32_t  y; | 
|---|
 | 107 |     uint32_t  ncores; | 
|---|
 | 108 |  | 
|---|
 | 109 |     cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
 | 110 |  | 
|---|
 | 111 |     // get x_size & y_size from target cluster | 
|---|
 | 112 |     uint32_t  x_size = hal_remote_l32( XPTR( cxy , &cluster->x_size ) ); | 
|---|
 | 113 |     uint32_t  y_size = hal_remote_l32( XPTR( cxy , &cluster->y_size ) ); | 
|---|
 | 114 |  | 
|---|
 | 115 |     // get pointers on TXT0 chdev | 
|---|
 | 116 |     xptr_t    txt0_xp  = chdev_dir.txt_tx[0]; | 
|---|
 | 117 |     cxy_t     txt0_cxy = GET_CXY( txt0_xp ); | 
|---|
 | 118 |     chdev_t * txt0_ptr = GET_PTR( txt0_xp ); | 
|---|
 | 119 |  | 
|---|
 | 120 |     // get extended pointer on remote TXT0 lock | 
|---|
 | 121 |     xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); | 
|---|
 | 122 |  | 
|---|
 | 123 |     // get TXT0 lock  | 
|---|
 | 124 |     remote_busylock_acquire( lock_xp ); | 
|---|
 | 125 |  | 
|---|
 | 126 |     nolock_printk("\n***** cluster_info in cluster %x / x_size %d / y_size %d\n", | 
|---|
 | 127 |     cxy, x_size, y_size ); | 
|---|
 | 128 |    | 
|---|
 | 129 |     for( x = 0 ; x < x_size ; x++ ) | 
|---|
 | 130 |     { | 
|---|
 | 131 |         for( y = 0 ; y < y_size ; y++ ) | 
|---|
 | 132 |         { | 
|---|
 | 133 |             ncores = (uint32_t)hal_remote_lb( XPTR( cxy , &cluster->cluster_info[x][y] ) ); | 
|---|
 | 134 |             nolock_printk(" - ncores[%d][%d] = %d\n", x, y, ncores ); | 
|---|
 | 135 |         } | 
|---|
 | 136 |     } | 
|---|
 | 137 |  | 
|---|
 | 138 |     // release TXT0 lock | 
|---|
 | 139 |     remote_busylock_release( lock_xp ); | 
|---|
 | 140 |  | 
|---|
 | 141 | }  // end cluster_info_display() | 
|---|
 | 142 |  | 
|---|
| [564] | 143 | ///////////////////////////////////////////////////////// | 
|---|
 | 144 | error_t cluster_manager_init( struct boot_info_s * info ) | 
|---|
 | 145 | { | 
|---|
 | 146 |     error_t         error; | 
|---|
 | 147 |     lpid_t          lpid;     // local process_index | 
|---|
 | 148 |     lid_t           lid;      // local core index | 
|---|
 | 149 |  | 
|---|
 | 150 |         cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
 | 151 |  | 
|---|
| [438] | 152 | #if DEBUG_CLUSTER_INIT | 
|---|
| [593] | 153 | uint32_t   cycle = (uint32_t)hal_get_cycles(); | 
|---|
 | 154 | thread_t * this  = CURRENT_THREAD; | 
|---|
| [438] | 155 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [593] | 156 | printk("\n[%s] thread[%x,%x] enters for cluster %x / cycle %d\n", | 
|---|
 | 157 | __FUNCTION__, this->process->pid, this->trdid, local_cxy , cycle ); | 
|---|
| [433] | 158 | #endif | 
|---|
| [50] | 159 |  | 
|---|
| [637] | 160 | #if (DEBUG_CLUSTER_INIT & 1) | 
|---|
 | 161 | cluster_info_display( local_cxy ); | 
|---|
 | 162 | #endif | 
|---|
 | 163 |  | 
|---|
| [1] | 164 |     // initialises embedded PPM | 
|---|
| [50] | 165 |         error = hal_ppm_init( info ); | 
|---|
| [1] | 166 |  | 
|---|
| [50] | 167 |     if( error ) | 
|---|
 | 168 |     { | 
|---|
 | 169 |         printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n", | 
|---|
 | 170 |                __FUNCTION__ , local_cxy ); | 
|---|
 | 171 |         return ENOMEM; | 
|---|
 | 172 |     } | 
|---|
 | 173 |  | 
|---|
| [438] | 174 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [433] | 175 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 176 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [593] | 177 | printk("\n[%s] PPM initialized in cluster %x / cycle %d\n", | 
|---|
| [433] | 178 | __FUNCTION__ , local_cxy , cycle ); | 
|---|
 | 179 | #endif | 
|---|
| [50] | 180 |  | 
|---|
| [1] | 181 |     // initialises embedded KHM | 
|---|
 | 182 |         khm_init( &cluster->khm ); | 
|---|
| [19] | 183 |  | 
|---|
| [438] | 184 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [457] | 185 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 186 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [593] | 187 | printk("\n[%s] KHM initialized in cluster %x at cycle %d\n", | 
|---|
| [437] | 188 | __FUNCTION__ , local_cxy , hal_get_cycles() ); | 
|---|
 | 189 | #endif | 
|---|
| [50] | 190 |  | 
|---|
| [19] | 191 |     // initialises embedded KCM | 
|---|
| [635] | 192 |     uint32_t  i; | 
|---|
 | 193 |     for( i = 0 ; i < 6 ; i++ ) kcm_init( &cluster->kcm[i] , i+6 ); | 
|---|
| [1] | 194 |  | 
|---|
| [438] | 195 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [457] | 196 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 197 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [635] | 198 | printk("\n[%s] KCM[6:11] initialized in cluster %x at cycle %d\n", | 
|---|
| [437] | 199 | __FUNCTION__ , local_cxy , hal_get_cycles() ); | 
|---|
 | 200 | #endif | 
|---|
| [50] | 201 |  | 
|---|
| [296] | 202 |     // initialises all cores descriptors  | 
|---|
| [1] | 203 |         for( lid = 0 ; lid < cluster->cores_nr; lid++ ) | 
|---|
 | 204 |         { | 
|---|
 | 205 |                 core_init( &cluster->core_tbl[lid],    // target core descriptor | 
|---|
 | 206 |                        lid,                        // local core index | 
|---|
 | 207 |                        info->core[lid].gid );      // gid from boot_info_t | 
|---|
 | 208 |         } | 
|---|
| [19] | 209 |  | 
|---|
| [438] | 210 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [433] | 211 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 212 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [593] | 213 | printk("\n[%s] cores initialized in cluster %x / cycle %d\n", | 
|---|
| [433] | 214 | __FUNCTION__ , local_cxy , cycle ); | 
|---|
 | 215 | #endif | 
|---|
| [50] | 216 |  | 
|---|
| [440] | 217 |     // initialises RPC FIFOs | 
|---|
 | 218 |         for( lid = 0 ; lid < cluster->cores_nr; lid++ ) | 
|---|
 | 219 |     { | 
|---|
| [564] | 220 |             remote_fifo_init( &cluster->rpc_fifo[lid] ); | 
|---|
| [440] | 221 |         cluster->rpc_threads[lid] = 0; | 
|---|
 | 222 |     } | 
|---|
| [1] | 223 |  | 
|---|
| [438] | 224 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [437] | 225 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 226 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [593] | 227 | printk("\n[%s] RPC fifo inialized in cluster %x at cycle %d\n", | 
|---|
| [407] | 228 | __FUNCTION__ , local_cxy , hal_get_cycles() ); | 
|---|
| [437] | 229 | #endif | 
|---|
| [50] | 230 |  | 
|---|
| [1] | 231 |     // initialise pref_tbl[] in process manager | 
|---|
| [564] | 232 |         queuelock_init( &cluster->pmgr.pref_lock , LOCK_CLUSTER_PREFTBL ); | 
|---|
| [1] | 233 |     cluster->pmgr.pref_nr = 0; | 
|---|
| [19] | 234 |     cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero ); | 
|---|
| [580] | 235 |     for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) | 
|---|
| [1] | 236 |     { | 
|---|
 | 237 |         cluster->pmgr.pref_tbl[lpid] = XPTR_NULL; | 
|---|
 | 238 |     } | 
|---|
 | 239 |  | 
|---|
 | 240 |     // initialise local_list in process manager | 
|---|
| [23] | 241 |     xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) ); | 
|---|
| [1] | 242 |     cluster->pmgr.local_nr = 0; | 
|---|
| [564] | 243 |         remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) , | 
|---|
 | 244 |                            LOCK_CLUSTER_LOCALS ); | 
|---|
| [1] | 245 |  | 
|---|
 | 246 |     // initialise copies_lists in process manager | 
|---|
| [101] | 247 |     for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) | 
|---|
| [1] | 248 |     { | 
|---|
 | 249 |         cluster->pmgr.copies_nr[lpid] = 0; | 
|---|
 | 250 |         xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) ); | 
|---|
| [564] | 251 |             remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ), | 
|---|
 | 252 |                                LOCK_CLUSTER_COPIES ); | 
|---|
| [19] | 253 |     } | 
|---|
| [1] | 254 |  | 
|---|
| [438] | 255 | #if DEBUG_CLUSTER_INIT | 
|---|
| [433] | 256 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 257 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [593] | 258 | printk("\n[%s] thread[%x,%x] exit for cluster %x / cycle %d\n", | 
|---|
 | 259 | __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); | 
|---|
| [433] | 260 | #endif | 
|---|
| [50] | 261 |  | 
|---|
| [124] | 262 |     hal_fence(); | 
|---|
| [1] | 263 |  | 
|---|
 | 264 |         return 0; | 
|---|
| [564] | 265 | } // end cluster_manager_init() | 
|---|
| [1] | 266 |  | 
|---|
| [564] | 267 | /////////////////////////////////// | 
|---|
| [561] | 268 | cxy_t cluster_random_select( void ) | 
|---|
 | 269 | { | 
|---|
 | 270 |     uint32_t  index; | 
|---|
| [564] | 271 |     uint32_t  x;     | 
|---|
| [561] | 272 |     uint32_t  y; | 
|---|
| [564] | 273 |     cxy_t     cxy; | 
|---|
| [561] | 274 |  | 
|---|
| [564] | 275 |     uint32_t  x_size    = LOCAL_CLUSTER->x_size; | 
|---|
 | 276 |     uint32_t  y_size    = LOCAL_CLUSTER->y_size; | 
|---|
 | 277 |  | 
|---|
 | 278 |     do  | 
|---|
 | 279 |     { | 
|---|
| [561] | 280 |         index     = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size); | 
|---|
 | 281 |         x         = index / y_size; | 
|---|
 | 282 |         y         = index % y_size; | 
|---|
| [564] | 283 |         cxy       = HAL_CXY_FROM_XY( x , y ); | 
|---|
 | 284 |     } | 
|---|
 | 285 |     while ( cluster_is_active( cxy ) == false ); | 
|---|
| [561] | 286 |  | 
|---|
| [564] | 287 |     return ( cxy ); | 
|---|
| [561] | 288 | } | 
|---|
 | 289 |  | 
|---|
| [637] | 290 | ///////////////////////////////////////////// | 
|---|
 | 291 | inline bool_t cluster_is_active ( cxy_t cxy ) | 
|---|
| [1] | 292 | { | 
|---|
| [564] | 293 |     uint32_t x = HAL_X_FROM_CXY( cxy ); | 
|---|
 | 294 |     uint32_t y = HAL_Y_FROM_CXY( cxy ); | 
|---|
 | 295 |  | 
|---|
 | 296 |     return ( LOCAL_CLUSTER->cluster_info[x][y] != 0 ); | 
|---|
 | 297 | } | 
|---|
 | 298 |  | 
|---|
| [1] | 299 | //////////////////////////////////////////////////////////////////////////////////// | 
|---|
 | 300 | //  Cores related functions | 
|---|
 | 301 | //////////////////////////////////////////////////////////////////////////////////// | 
|---|
 | 302 |  | 
|---|
| [637] | 303 | ///////////////////////////////////////////// | 
|---|
 | 304 | lid_t cluster_select_local_core( cxy_t  cxy ) | 
|---|
| [1] | 305 | { | 
|---|
| [637] | 306 |     uint32_t      min = 1000000; | 
|---|
| [440] | 307 |     lid_t         sel = 0; | 
|---|
 | 308 |     uint32_t      nthreads; | 
|---|
 | 309 |     lid_t         lid; | 
|---|
 | 310 |     scheduler_t * sched; | 
|---|
| [637] | 311 |     cluster_t   * cluster = LOCAL_CLUSTER; | 
|---|
 | 312 |     uint32_t      ncores = hal_remote_l32( XPTR( cxy , &cluster->cores_nr ) ); | 
|---|
| [1] | 313 |  | 
|---|
| [637] | 314 |     for( lid = 0 ; lid < ncores ; lid++ ) | 
|---|
| [1] | 315 |     { | 
|---|
| [637] | 316 |         sched  = &cluster->core_tbl[lid].scheduler; | 
|---|
| [440] | 317 |  | 
|---|
| [637] | 318 |         nthreads = hal_remote_l32( XPTR( cxy , &sched->u_threads_nr ) ) + | 
|---|
 | 319 |                    hal_remote_l32( XPTR( cxy , &sched->k_threads_nr ) ); | 
|---|
 | 320 |  | 
|---|
| [440] | 321 |         if( nthreads < min ) | 
|---|
| [1] | 322 |         { | 
|---|
| [440] | 323 |             min = nthreads; | 
|---|
| [1] | 324 |             sel = lid; | 
|---|
 | 325 |         } | 
|---|
| [19] | 326 |     } | 
|---|
| [1] | 327 |     return sel; | 
|---|
 | 328 | } | 
|---|
 | 329 |  | 
|---|
 | 330 | //////////////////////////////////////////////////////////////////////////////////// | 
|---|
| [428] | 331 | //  Process related functions | 
|---|
| [1] | 332 | //////////////////////////////////////////////////////////////////////////////////// | 
|---|
 | 333 |  | 
|---|
| [433] | 334 |  | 
|---|
 | 335 | ////////////////////////////////////////////////////// | 
|---|
| [443] | 336 | xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy, | 
|---|
 | 337 |                                             pid_t pid ) | 
|---|
 | 338 | { | 
|---|
 | 339 |     xptr_t      root_xp;       // xptr on root of list of processes in owner cluster | 
|---|
 | 340 |     xptr_t      lock_xp;       // xptr on lock protecting this list | 
|---|
 | 341 |     xptr_t      iter_xp;       // iterator | 
|---|
 | 342 |     xptr_t      current_xp;    // xptr on current process descriptor | 
|---|
 | 343 |     bool_t      found; | 
|---|
 | 344 |  | 
|---|
 | 345 |     cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
 | 346 |  | 
|---|
 | 347 |     // get owner cluster and lpid | 
|---|
 | 348 |     cxy_t   owner_cxy = CXY_FROM_PID( pid ); | 
|---|
 | 349 |     lpid_t  lpid      = LPID_FROM_PID( pid ); | 
|---|
 | 350 |  | 
|---|
 | 351 |     // get lock & root of list of copies from owner cluster | 
|---|
 | 352 |     root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ); | 
|---|
 | 353 |     lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); | 
|---|
 | 354 |  | 
|---|
 | 355 |     // take the lock protecting the list of processes | 
|---|
| [564] | 356 |     remote_queuelock_acquire( lock_xp ); | 
|---|
| [443] | 357 |  | 
|---|
 | 358 |     // scan list of processes | 
|---|
 | 359 |     found = false; | 
|---|
 | 360 |     XLIST_FOREACH( root_xp , iter_xp ) | 
|---|
 | 361 |     { | 
|---|
 | 362 |         current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list ); | 
|---|
 | 363 |  | 
|---|
 | 364 |         if( GET_CXY( current_xp ) == cxy ) | 
|---|
 | 365 |         { | 
|---|
 | 366 |             found = true; | 
|---|
 | 367 |             break; | 
|---|
 | 368 |         } | 
|---|
 | 369 |     } | 
|---|
 | 370 |  | 
|---|
 | 371 |     // release the lock protecting the list of processes | 
|---|
| [564] | 372 |     remote_queuelock_release( lock_xp ); | 
|---|
| [443] | 373 |  | 
|---|
 | 374 |     // return extended pointer on process descriptor in owner cluster | 
|---|
 | 375 |     if( found ) return current_xp; | 
|---|
 | 376 |     else        return XPTR_NULL; | 
|---|
 | 377 |  | 
|---|
 | 378 | }  // end cluster_get_process_from_pid_in_cxy() | 
|---|
 | 379 |  | 
|---|
 | 380 |  | 
|---|
 | 381 | ////////////////////////////////////////////////////// | 
|---|
| [433] | 382 | xptr_t cluster_get_owner_process_from_pid( pid_t pid ) | 
|---|
 | 383 | { | 
|---|
 | 384 |     xptr_t      root_xp;       // xptr on root of list of processes in owner cluster | 
|---|
| [436] | 385 |     xptr_t      lock_xp;       // xptr on lock protecting this list | 
|---|
| [433] | 386 |     xptr_t      iter_xp;       // iterator | 
|---|
 | 387 |     xptr_t      current_xp;    // xptr on current process descriptor | 
|---|
 | 388 |     process_t * current_ptr;   // local pointer on current process | 
|---|
 | 389 |     pid_t       current_pid;   // current process identifier | 
|---|
 | 390 |     bool_t      found; | 
|---|
 | 391 |  | 
|---|
 | 392 |     cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
 | 393 |  | 
|---|
 | 394 |     // get owner cluster and lpid | 
|---|
 | 395 |     cxy_t  owner_cxy = CXY_FROM_PID( pid ); | 
|---|
 | 396 |  | 
|---|
 | 397 |     // get lock & root of list of process in owner cluster | 
|---|
 | 398 |     root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root ); | 
|---|
 | 399 |     lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock ); | 
|---|
 | 400 |  | 
|---|
 | 401 |     // take the lock protecting the list of processes | 
|---|
| [564] | 402 |     remote_queuelock_acquire( lock_xp ); | 
|---|
| [433] | 403 |  | 
|---|
 | 404 |     // scan list of processes in owner cluster | 
|---|
 | 405 |     found = false; | 
|---|
 | 406 |     XLIST_FOREACH( root_xp , iter_xp ) | 
|---|
 | 407 |     { | 
|---|
 | 408 |         current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list ); | 
|---|
 | 409 |         current_ptr = GET_PTR( current_xp ); | 
|---|
| [564] | 410 |         current_pid = hal_remote_l32( XPTR( owner_cxy , ¤t_ptr->pid ) ); | 
|---|
| [433] | 411 |  | 
|---|
 | 412 |         if( current_pid == pid ) | 
|---|
 | 413 |         { | 
|---|
 | 414 |             found = true; | 
|---|
 | 415 |             break; | 
|---|
 | 416 |         } | 
|---|
 | 417 |     } | 
|---|
 | 418 |  | 
|---|
 | 419 |     // release the lock protecting the list of processes | 
|---|
| [564] | 420 |     remote_queuelock_release( lock_xp ); | 
|---|
| [433] | 421 |  | 
|---|
 | 422 |     // return extended pointer on process descriptor in owner cluster | 
|---|
 | 423 |     if( found ) return current_xp; | 
|---|
 | 424 |     else        return XPTR_NULL; | 
|---|
 | 425 |  | 
|---|
| [436] | 426 | }  // end cluster_get_owner_process_from_pid() | 
|---|
 | 427 |  | 
|---|
| [443] | 428 |  | 
|---|
| [1] | 429 | ////////////////////////////////////////////////////////// | 
|---|
 | 430 | xptr_t cluster_get_reference_process_from_pid( pid_t pid ) | 
|---|
| [19] | 431 | { | 
|---|
| [23] | 432 |     xptr_t ref_xp;   // extended pointer on reference process descriptor | 
|---|
| [1] | 433 |  | 
|---|
 | 434 |     cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
 | 435 |  | 
|---|
 | 436 |     // get owner cluster and lpid | 
|---|
 | 437 |     cxy_t  owner_cxy = CXY_FROM_PID( pid ); | 
|---|
 | 438 |     lpid_t lpid      = LPID_FROM_PID( pid ); | 
|---|
 | 439 |  | 
|---|
| [19] | 440 |     // Check valid PID | 
|---|
| [23] | 441 |     if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL; | 
|---|
| [1] | 442 |  | 
|---|
 | 443 |     if( local_cxy == owner_cxy )   // local cluster is owner cluster | 
|---|
| [19] | 444 |     { | 
|---|
| [23] | 445 |         ref_xp = cluster->pmgr.pref_tbl[lpid]; | 
|---|
| [1] | 446 |     } | 
|---|
 | 447 |     else                              // use a remote_lwd to access owner cluster | 
|---|
 | 448 |     { | 
|---|
| [564] | 449 |         ref_xp = (xptr_t)hal_remote_l64( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) ); | 
|---|
| [1] | 450 |     } | 
|---|
 | 451 |  | 
|---|
| [23] | 452 |     return ref_xp; | 
|---|
| [1] | 453 | } | 
|---|
 | 454 |  | 
|---|
| [416] | 455 | /////////////////////////////////////////////// | 
|---|
 | 456 | error_t cluster_pid_alloc( process_t * process, | 
|---|
 | 457 |                            pid_t     * pid ) | 
|---|
| [1] | 458 | { | 
|---|
 | 459 |     lpid_t      lpid; | 
|---|
 | 460 |     bool_t      found; | 
|---|
 | 461 |  | 
|---|
| [440] | 462 | #if DEBUG_CLUSTER_PID_ALLOC | 
|---|
| [593] | 463 | uint32_t   cycle = (uint32_t)hal_get_cycles(); | 
|---|
 | 464 | thread_t * this  = CURRENT_THREAD; | 
|---|
| [440] | 465 | if( DEBUG_CLUSTER_PID_ALLOC < cycle ) | 
|---|
| [593] | 466 | printk("\n[%s] thread[%x,%x] enters in cluster %x / cycle %d\n", | 
|---|
 | 467 | __FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle ); | 
|---|
| [440] | 468 | #endif | 
|---|
 | 469 |  | 
|---|
| [1] | 470 |     pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr; | 
|---|
 | 471 |  | 
|---|
| [564] | 472 |     // get the lock protecting pref_tbl | 
|---|
 | 473 |     queuelock_acquire( &pm->pref_lock ); | 
|---|
| [1] | 474 |  | 
|---|
 | 475 |     // search an empty slot | 
|---|
 | 476 |     found = false; | 
|---|
 | 477 |     for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) | 
|---|
 | 478 |     { | 
|---|
 | 479 |         if( pm->pref_tbl[lpid] == XPTR_NULL ) | 
|---|
 | 480 |         { | 
|---|
 | 481 |             found = true; | 
|---|
 | 482 |             break; | 
|---|
 | 483 |         } | 
|---|
 | 484 |     } | 
|---|
 | 485 |  | 
|---|
 | 486 |     if( found ) | 
|---|
 | 487 |     { | 
|---|
 | 488 |         // register process in pref_tbl[] | 
|---|
| [416] | 489 |         pm->pref_tbl[lpid] = XPTR( local_cxy , process ); | 
|---|
| [1] | 490 |         pm->pref_nr++; | 
|---|
 | 491 |  | 
|---|
 | 492 |         // returns pid | 
|---|
 | 493 |         *pid = PID( local_cxy , lpid ); | 
|---|
 | 494 |  | 
|---|
| [416] | 495 |         // release the processs_manager lock | 
|---|
| [564] | 496 |         queuelock_release( &pm->pref_lock ); | 
|---|
| [416] | 497 |  | 
|---|
 | 498 |         return 0; | 
|---|
| [1] | 499 |     } | 
|---|
 | 500 |     else | 
|---|
 | 501 |     { | 
|---|
| [564] | 502 |         // release the lock | 
|---|
 | 503 |         queuelock_release( &pm->pref_lock ); | 
|---|
| [416] | 504 |  | 
|---|
| [564] | 505 |         return 0xFFFFFFFF; | 
|---|
| [19] | 506 |     } | 
|---|
| [1] | 507 |  | 
|---|
| [440] | 508 | #if DEBUG_CLUSTER_PID_ALLOC | 
|---|
 | 509 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
 | 510 | if( DEBUG_CLUSTER_PID_ALLOC < cycle ) | 
|---|
| [593] | 511 | printk("\n[%s] thread[%x,%x] exit in cluster %x / cycle %d\n", | 
|---|
 | 512 | __FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle ); | 
|---|
| [440] | 513 | #endif | 
|---|
 | 514 |  | 
|---|
| [1] | 515 | } // end cluster_pid_alloc() | 
|---|
 | 516 |  | 
|---|
 | 517 | ///////////////////////////////////// | 
|---|
 | 518 | void cluster_pid_release( pid_t pid ) | 
|---|
 | 519 | { | 
|---|
| [440] | 520 |  | 
|---|
 | 521 | #if DEBUG_CLUSTER_PID_RELEASE | 
|---|
| [593] | 522 | uint32_t   cycle = (uint32_t)hal_get_cycles(); | 
|---|
 | 523 | thread_t * this  = CURRENT_THREAD; | 
|---|
 | 524 | if( DEBUG_CLUSTER_PID_ALLOC < cycle ) | 
|---|
 | 525 | printk("\n[%s] thread[%x,%x] enters in cluster %x / pid %x / cycle %d\n", | 
|---|
 | 526 | __FUNCTION__ , this->process->pid , this->trdid , local_cxy , pid, cycle ); | 
|---|
| [440] | 527 | #endif | 
|---|
 | 528 |  | 
|---|
| [1] | 529 |     cxy_t  owner_cxy  = CXY_FROM_PID( pid ); | 
|---|
 | 530 |     lpid_t lpid       = LPID_FROM_PID( pid ); | 
|---|
 | 531 |  | 
|---|
| [409] | 532 |     pmgr_t  * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
 | 533 |  | 
|---|
| [440] | 534 |     // check lpid  | 
|---|
| [669] | 535 |     assert( __FUNCTION__, (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER), | 
|---|
| [440] | 536 |     "illegal LPID = %d" , lpid ); | 
|---|
| [1] | 537 |  | 
|---|
| [440] | 538 |     // check owner cluster | 
|---|
| [669] | 539 |     assert( __FUNCTION__, (owner_cxy == local_cxy) , | 
|---|
| [440] | 540 |     "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy ); | 
|---|
 | 541 |  | 
|---|
| [564] | 542 |     // get the lock protecting pref_tbl | 
|---|
 | 543 |     queuelock_acquire( &pm->pref_lock ); | 
|---|
| [1] | 544 |  | 
|---|
 | 545 |     // remove process from pref_tbl[] | 
|---|
 | 546 |     pm->pref_tbl[lpid] = XPTR_NULL; | 
|---|
 | 547 |     pm->pref_nr--; | 
|---|
 | 548 |  | 
|---|
 | 549 |     // release the processs_manager lock | 
|---|
| [564] | 550 |     queuelock_release( &pm->pref_lock ); | 
|---|
| [1] | 551 |  | 
|---|
| [440] | 552 | #if DEBUG_CLUSTER_PID_RELEASE | 
|---|
 | 553 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [593] | 554 | if( DEBUG_CLUSTER_PID_ALLOC < cycle ) | 
|---|
 | 555 | printk("\n[%s] thread[%x,%x] exit in cluster %x / cycle %d\n", | 
|---|
 | 556 | __FUNCTION__ , this->process->pid , this->trdid , local_cxy , cycle ); | 
|---|
| [440] | 557 | #endif | 
|---|
 | 558 |  | 
|---|
| [1] | 559 | } // end cluster_pid_release() | 
|---|
 | 560 |  | 
|---|
 | 561 | /////////////////////////////////////////////////////////// | 
|---|
 | 562 | process_t * cluster_get_local_process_from_pid( pid_t pid ) | 
|---|
 | 563 | { | 
|---|
| [23] | 564 |     xptr_t         process_xp; | 
|---|
 | 565 |     process_t    * process_ptr; | 
|---|
 | 566 |     xptr_t         root_xp; | 
|---|
 | 567 |     xptr_t         iter_xp; | 
|---|
 | 568 |     bool_t         found; | 
|---|
| [19] | 569 |  | 
|---|
| [23] | 570 |     found   = false; | 
|---|
 | 571 |     root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root ); | 
|---|
 | 572 |  | 
|---|
 | 573 |     XLIST_FOREACH( root_xp , iter_xp ) | 
|---|
| [1] | 574 |     { | 
|---|
| [23] | 575 |         process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list ); | 
|---|
 | 576 |         process_ptr = (process_t *)GET_PTR( process_xp ); | 
|---|
 | 577 |         if( process_ptr->pid == pid ) | 
|---|
| [1] | 578 |         { | 
|---|
| [23] | 579 |             found = true; | 
|---|
| [1] | 580 |             break; | 
|---|
 | 581 |         } | 
|---|
 | 582 |     } | 
|---|
 | 583 |  | 
|---|
| [23] | 584 |     if (found ) return process_ptr; | 
|---|
 | 585 |     else        return NULL; | 
|---|
 | 586 |  | 
|---|
| [1] | 587 | }  // end cluster_get_local_process_from_pid() | 
|---|
 | 588 |  | 
|---|
 | 589 | ////////////////////////////////////////////////////// | 
|---|
 | 590 | void cluster_process_local_link( process_t * process ) | 
|---|
 | 591 | { | 
|---|
 | 592 |     pmgr_t * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
 | 593 |  | 
|---|
| [443] | 594 |     // get extended pointers on local process list root & lock | 
|---|
 | 595 |     xptr_t root_xp = XPTR( local_cxy , &pm->local_root ); | 
|---|
 | 596 |     xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock ); | 
|---|
 | 597 |  | 
|---|
| [564] | 598 |     // get lock protecting the local list | 
|---|
 | 599 |     remote_queuelock_acquire( lock_xp ); | 
|---|
| [1] | 600 |  | 
|---|
| [443] | 601 |     // register process in local list | 
|---|
 | 602 |     xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) ); | 
|---|
| [1] | 603 |     pm->local_nr++; | 
|---|
 | 604 |  | 
|---|
| [564] | 605 |     // release lock protecting the local list | 
|---|
 | 606 |     remote_queuelock_release( lock_xp ); | 
|---|
| [1] | 607 | } | 
|---|
 | 608 |  | 
|---|
 | 609 | //////////////////////////////////////////////////////// | 
|---|
 | 610 | void cluster_process_local_unlink( process_t * process ) | 
|---|
 | 611 | { | 
|---|
 | 612 |     pmgr_t * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
 | 613 |  | 
|---|
| [443] | 614 |     // get extended pointers on local process list lock | 
|---|
 | 615 |     xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock ); | 
|---|
 | 616 |  | 
|---|
| [564] | 617 |     // get lock protecting the local list | 
|---|
 | 618 |     remote_queuelock_acquire( lock_xp ); | 
|---|
| [1] | 619 |  | 
|---|
| [443] | 620 |     // remove process from local list | 
|---|
| [23] | 621 |     xlist_unlink( XPTR( local_cxy , &process->local_list ) ); | 
|---|
| [1] | 622 |     pm->local_nr--; | 
|---|
 | 623 |  | 
|---|
| [564] | 624 |     // release lock protecting the local list | 
|---|
 | 625 |     remote_queuelock_release( lock_xp ); | 
|---|
| [1] | 626 | } | 
|---|
 | 627 |  | 
|---|
 | 628 | /////////////////////////////////////////////////////// | 
|---|
 | 629 | void cluster_process_copies_link( process_t * process ) | 
|---|
 | 630 | { | 
|---|
 | 631 |     pmgr_t * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
 | 632 |  | 
|---|
| [438] | 633 | #if DEBUG_CLUSTER_PROCESS_COPIES | 
|---|
| [593] | 634 | uint32_t   cycle = (uint32_t)hal_get_cycles(); | 
|---|
 | 635 | thread_t * this  = CURRENT_THREAD; | 
|---|
| [438] | 636 | if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) | 
|---|
| [593] | 637 | printk("\n[%s] thread[%x,%x] enters for process %x / cycle %d\n", | 
|---|
 | 638 | __FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle ); | 
|---|
| [436] | 639 | #endif | 
|---|
 | 640 |  | 
|---|
| [1] | 641 |     // get owner cluster identifier CXY and process LPID | 
|---|
 | 642 |     pid_t    pid        = process->pid; | 
|---|
 | 643 |     cxy_t    owner_cxy  = CXY_FROM_PID( pid ); | 
|---|
 | 644 |     lpid_t   lpid       = LPID_FROM_PID( pid ); | 
|---|
 | 645 |  | 
|---|
 | 646 |     // get extended pointer on lock protecting copies_list[lpid] | 
|---|
| [120] | 647 |     xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] ); | 
|---|
| [1] | 648 |  | 
|---|
 | 649 |     // get extended pointer on the copies_list[lpid] root | 
|---|
| [120] | 650 |     xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] ); | 
|---|
| [1] | 651 |  | 
|---|
 | 652 |     // get extended pointer on the local copies_list entry | 
|---|
 | 653 |     xptr_t copies_entry = XPTR( local_cxy , &process->copies_list ); | 
|---|
 | 654 |  | 
|---|
| [19] | 655 |     // get lock protecting copies_list[lpid] | 
|---|
| [564] | 656 |     remote_queuelock_acquire( copies_lock ); | 
|---|
| [1] | 657 |  | 
|---|
| [436] | 658 |     // add copy to copies_list | 
|---|
| [1] | 659 |     xlist_add_first( copies_root , copies_entry ); | 
|---|
 | 660 |     hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 ); | 
|---|
 | 661 |  | 
|---|
| [19] | 662 |     // release lock protecting copies_list[lpid] | 
|---|
| [564] | 663 |     remote_queuelock_release( copies_lock ); | 
|---|
| [1] | 664 |  | 
|---|
| [438] | 665 | #if DEBUG_CLUSTER_PROCESS_COPIES | 
|---|
| [436] | 666 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 667 | if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) | 
|---|
| [593] | 668 | printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", | 
|---|
 | 669 | __FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle ); | 
|---|
| [436] | 670 | #endif | 
|---|
 | 671 |  | 
|---|
 | 672 | }  // end cluster_process_copies_link() | 
|---|
 | 673 |  | 
|---|
| [1] | 674 | ///////////////////////////////////////////////////////// | 
|---|
 | 675 | void cluster_process_copies_unlink( process_t * process ) | 
|---|
 | 676 | { | 
|---|
 | 677 |     pmgr_t * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
 | 678 |  | 
|---|
| [438] | 679 | #if DEBUG_CLUSTER_PROCESS_COPIES | 
|---|
| [593] | 680 | uint32_t   cycle = (uint32_t)hal_get_cycles(); | 
|---|
 | 681 | thread_t * this  = CURRENT_THREAD; | 
|---|
| [438] | 682 | if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) | 
|---|
| [593] | 683 | printk("\n[%s] thread[%x,%x] enters for process %x / cycle %d\n", | 
|---|
 | 684 | __FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle ); | 
|---|
| [436] | 685 | #endif | 
|---|
 | 686 |  | 
|---|
| [1] | 687 |     // get owner cluster identifier CXY and process LPID | 
|---|
 | 688 |     pid_t    pid        = process->pid; | 
|---|
 | 689 |     cxy_t    owner_cxy  = CXY_FROM_PID( pid ); | 
|---|
 | 690 |     lpid_t   lpid       = LPID_FROM_PID( pid ); | 
|---|
 | 691 |  | 
|---|
 | 692 |     // get extended pointer on lock protecting copies_list[lpid] | 
|---|
| [436] | 693 |     xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] ); | 
|---|
| [1] | 694 |  | 
|---|
 | 695 |     // get extended pointer on the local copies_list entry | 
|---|
 | 696 |     xptr_t copies_entry = XPTR( local_cxy , &process->copies_list ); | 
|---|
 | 697 |  | 
|---|
| [19] | 698 |     // get lock protecting copies_list[lpid] | 
|---|
| [564] | 699 |     remote_queuelock_acquire( copies_lock ); | 
|---|
| [1] | 700 |  | 
|---|
| [436] | 701 |     // remove copy from copies_list | 
|---|
| [1] | 702 |     xlist_unlink( copies_entry ); | 
|---|
 | 703 |     hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 ); | 
|---|
 | 704 |  | 
|---|
| [19] | 705 |     // release lock protecting copies_list[lpid] | 
|---|
| [564] | 706 |     remote_queuelock_release( copies_lock ); | 
|---|
| [1] | 707 |  | 
|---|
| [438] | 708 | #if DEBUG_CLUSTER_PROCESS_COPIES | 
|---|
| [436] | 709 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 710 | if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) | 
|---|
| [593] | 711 | printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", | 
|---|
 | 712 | __FUNCTION__ , this->process->pid , this->trdid , process->pid , cycle ); | 
|---|
| [436] | 713 | #endif | 
|---|
 | 714 |  | 
|---|
 | 715 | }  // end cluster_process_copies_unlink() | 
|---|
 | 716 |  | 
|---|
| [583] | 717 | //////////////////////////////////////////// | 
|---|
 | 718 | void cluster_processes_display( cxy_t   cxy, | 
|---|
 | 719 |                                 bool_t  owned ) | 
|---|
| [1] | 720 | { | 
|---|
| [428] | 721 |     xptr_t        root_xp; | 
|---|
| [443] | 722 |     xptr_t        lock_xp; | 
|---|
| [428] | 723 |     xptr_t        iter_xp; | 
|---|
| [443] | 724 |     xptr_t        process_xp; | 
|---|
| [583] | 725 |     process_t   * process_ptr; | 
|---|
 | 726 |     cxy_t         process_cxy; | 
|---|
 | 727 |     pid_t         pid; | 
|---|
| [443] | 728 |     cxy_t         txt0_cxy; | 
|---|
 | 729 |     chdev_t     * txt0_ptr; | 
|---|
 | 730 |     xptr_t        txt0_xp; | 
|---|
 | 731 |     xptr_t        txt0_lock_xp; | 
|---|
| [627] | 732 |     uint32_t      pref_nr;       // number of owned processes in cluster cxy | 
|---|
| [1] | 733 |  | 
|---|
| [669] | 734 | assert( __FUNCTION__, (cluster_is_active( cxy ) ), "illegal cluster index" ); | 
|---|
| [443] | 735 |  | 
|---|
 | 736 |     // get extended pointer on root and lock for local process list in cluster | 
|---|
| [428] | 737 |     root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root ); | 
|---|
| [443] | 738 |     lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock ); | 
|---|
| [1] | 739 |  | 
|---|
| [627] | 740 |     // get number of owned processes in cluster cxy | 
|---|
 | 741 |     pref_nr = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->pmgr.pref_nr ) ); | 
|---|
 | 742 |  | 
|---|
 | 743 |     // display nothing if no user process in cluster cxy | 
|---|
 | 744 |     if( (owned != false) && (pref_nr < 2) ) return; | 
|---|
 | 745 |      | 
|---|
| [443] | 746 |     // get pointers on TXT0 chdev | 
|---|
 | 747 |     txt0_xp  = chdev_dir.txt_tx[0]; | 
|---|
 | 748 |     txt0_cxy = GET_CXY( txt0_xp ); | 
|---|
 | 749 |     txt0_ptr = GET_PTR( txt0_xp ); | 
|---|
| [1] | 750 |  | 
|---|
| [443] | 751 |     // get extended pointer on TXT0 lock | 
|---|
 | 752 |     txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); | 
|---|
 | 753 |  | 
|---|
 | 754 |     // get lock on local process list  | 
|---|
| [564] | 755 |     remote_queuelock_acquire( lock_xp ); | 
|---|
| [443] | 756 |  | 
|---|
| [564] | 757 |     // get TXT0 lock | 
|---|
 | 758 |     remote_busylock_acquire( txt0_lock_xp ); | 
|---|
| [443] | 759 |       | 
|---|
 | 760 |     nolock_printk("\n***** processes in cluster %x / cycle %d\n", | 
|---|
 | 761 |     cxy , (uint32_t)hal_get_cycles() ); | 
|---|
 | 762 |  | 
|---|
 | 763 |     // loop on all processes in cluster cxy  | 
|---|
| [428] | 764 |     XLIST_FOREACH( root_xp , iter_xp ) | 
|---|
 | 765 |     { | 
|---|
| [583] | 766 |         process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list ); | 
|---|
 | 767 |         process_ptr = GET_PTR( process_xp ); | 
|---|
 | 768 |         process_cxy = GET_CXY( process_xp ); | 
|---|
 | 769 |  | 
|---|
 | 770 |         // get process PID | 
|---|
 | 771 |         pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); | 
|---|
 | 772 |  | 
|---|
 | 773 |         if( owned )  // display only user & owned processes  | 
|---|
 | 774 |         { | 
|---|
 | 775 |             if( (CXY_FROM_PID( pid ) == cxy) && (LPID_FROM_PID( pid ) != 0) ) | 
|---|
 | 776 |             { | 
|---|
 | 777 |                 process_display( process_xp ); | 
|---|
 | 778 |             } | 
|---|
 | 779 |         } | 
|---|
 | 780 |         else         // display all local processes | 
|---|
 | 781 |         { | 
|---|
 | 782 |             process_display( process_xp ); | 
|---|
 | 783 |         } | 
|---|
| [428] | 784 |     } | 
|---|
| [443] | 785 |  | 
|---|
| [564] | 786 |     // release TXT0 lock  | 
|---|
 | 787 |     remote_busylock_release( txt0_lock_xp ); | 
|---|
| [443] | 788 |  | 
|---|
 | 789 |     // release lock on local process list | 
|---|
| [564] | 790 |     remote_queuelock_release( lock_xp ); | 
|---|
| [443] | 791 |  | 
|---|
| [428] | 792 | }  // end cluster_processes_display()  | 
|---|
| [1] | 793 |  | 
|---|
| [19] | 794 |  | 
|---|