| [1] | 1 | /* | 
|---|
|  | 2 | * cluster.c - Cluster-Manager related operations | 
|---|
| [19] | 3 | * | 
|---|
| [1] | 4 | * Author  Ghassan Almaless (2008,2009,2010,2011,2012) | 
|---|
|  | 5 | *         Mohamed Lamine Karaoui (2015) | 
|---|
| [437] | 6 | *         Alain Greiner (2016,2017,2018) | 
|---|
| [1] | 7 | * | 
|---|
|  | 8 | * Copyright (c) UPMC Sorbonne Universites | 
|---|
|  | 9 | * | 
|---|
|  | 10 | * This file is part of ALMOS-MKH.. | 
|---|
|  | 11 | * | 
|---|
|  | 12 | * ALMOS-MKH. is free software; you can redistribute it and/or modify it | 
|---|
|  | 13 | * under the terms of the GNU General Public License as published by | 
|---|
|  | 14 | * the Free Software Foundation; version 2.0 of the License. | 
|---|
|  | 15 | * | 
|---|
|  | 16 | * ALMOS-MKH. is distributed in the hope that it will be useful, but | 
|---|
|  | 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|---|
|  | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|---|
|  | 19 | * General Public License for more details. | 
|---|
|  | 20 | * | 
|---|
|  | 21 | * You should have received a copy of the GNU General Public License | 
|---|
|  | 22 | * along with ALMOS-MKH.; if not, write to the Free Software Foundation, | 
|---|
|  | 23 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 
|---|
|  | 24 | */ | 
|---|
|  | 25 |  | 
|---|
| [14] | 26 | #include <kernel_config.h> | 
|---|
| [456] | 27 | #include <hal_kernel_types.h> | 
|---|
| [1] | 28 | #include <hal_atomic.h> | 
|---|
|  | 29 | #include <hal_special.h> | 
|---|
| [50] | 30 | #include <hal_ppm.h> | 
|---|
| [564] | 31 | #include <hal_macros.h> | 
|---|
| [407] | 32 | #include <remote_fifo.h> | 
|---|
| [1] | 33 | #include <printk.h> | 
|---|
|  | 34 | #include <errno.h> | 
|---|
| [564] | 35 | #include <queuelock.h> | 
|---|
| [1] | 36 | #include <core.h> | 
|---|
| [443] | 37 | #include <chdev.h> | 
|---|
| [1] | 38 | #include <scheduler.h> | 
|---|
|  | 39 | #include <list.h> | 
|---|
|  | 40 | #include <cluster.h> | 
|---|
|  | 41 | #include <boot_info.h> | 
|---|
|  | 42 | #include <bits.h> | 
|---|
|  | 43 | #include <ppm.h> | 
|---|
|  | 44 | #include <thread.h> | 
|---|
|  | 45 | #include <kmem.h> | 
|---|
|  | 46 | #include <process.h> | 
|---|
|  | 47 | #include <dqdt.h> | 
|---|
|  | 48 |  | 
|---|
| [408] | 49 | ///////////////////////////////////////////////////////////////////////////////////// | 
|---|
| [1] | 50 | // Extern global variables | 
|---|
| [408] | 51 | ///////////////////////////////////////////////////////////////////////////////////// | 
|---|
| [1] | 52 |  | 
|---|
| [564] | 53 | extern process_t           process_zero;     // allocated in kernel_init.c | 
|---|
|  | 54 | extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c | 
|---|
| [1] | 55 |  | 
|---|
| [564] | 56 |  | 
|---|
|  | 57 |  | 
|---|
|  | 58 | /////////////////////////////////////////////////// | 
|---|
|  | 59 | void cluster_info_init( struct boot_info_s * info ) | 
|---|
| [1] | 60 | { | 
|---|
| [428] | 61 | boot_device_t * dev;      // pointer on external peripheral | 
|---|
|  | 62 | uint32_t        func;     // external peripheral functionnal type | 
|---|
| [564] | 63 | uint32_t        x; | 
|---|
|  | 64 | uint32_t        y; | 
|---|
|  | 65 | uint32_t        i; | 
|---|
| [1] | 66 |  | 
|---|
|  | 67 | cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
|  | 68 |  | 
|---|
|  | 69 | // initialize cluster global parameters | 
|---|
| [19] | 70 | cluster->paddr_width     = info->paddr_width; | 
|---|
| [1] | 71 | cluster->x_width         = info->x_width; | 
|---|
|  | 72 | cluster->y_width         = info->y_width; | 
|---|
|  | 73 | cluster->x_size          = info->x_size; | 
|---|
|  | 74 | cluster->y_size          = info->y_size; | 
|---|
|  | 75 | cluster->io_cxy          = info->io_cxy; | 
|---|
|  | 76 |  | 
|---|
| [557] | 77 | // initialize the cluster_info[][] array | 
|---|
| [564] | 78 | for (x = 0; x < CONFIG_MAX_CLUSTERS_X; x++) | 
|---|
|  | 79 | { | 
|---|
|  | 80 | for (y = 0; y < CONFIG_MAX_CLUSTERS_Y;y++) | 
|---|
|  | 81 | { | 
|---|
| [557] | 82 | cluster->cluster_info[x][y] = info->cluster_info[x][y]; | 
|---|
|  | 83 | } | 
|---|
|  | 84 | } | 
|---|
| [564] | 85 |  | 
|---|
| [428] | 86 | // initialize external peripherals channels | 
|---|
|  | 87 | for( i = 0 ; i < info->ext_dev_nr ; i++ ) | 
|---|
|  | 88 | { | 
|---|
|  | 89 | dev  = &info->ext_dev[i]; | 
|---|
|  | 90 | func = FUNC_FROM_TYPE( dev->type ); | 
|---|
|  | 91 | if( func == DEV_FUNC_TXT ) cluster->nb_txt_channels = dev->channels; | 
|---|
|  | 92 | if( func == DEV_FUNC_NIC ) cluster->nb_nic_channels = dev->channels; | 
|---|
|  | 93 | if( func == DEV_FUNC_IOC ) cluster->nb_ioc_channels = dev->channels; | 
|---|
|  | 94 | if( func == DEV_FUNC_FBF ) cluster->nb_fbf_channels = dev->channels; | 
|---|
|  | 95 | } | 
|---|
|  | 96 |  | 
|---|
| [564] | 97 | // initialize number of cores | 
|---|
|  | 98 | cluster->cores_nr  = info->cores_nr; | 
|---|
| [1] | 99 |  | 
|---|
| [564] | 100 | }  // end cluster_info_init() | 
|---|
|  | 101 |  | 
|---|
|  | 102 | ///////////////////////////////////////////////////////// | 
|---|
|  | 103 | error_t cluster_manager_init( struct boot_info_s * info ) | 
|---|
|  | 104 | { | 
|---|
|  | 105 | error_t         error; | 
|---|
|  | 106 | lpid_t          lpid;     // local process_index | 
|---|
|  | 107 | lid_t           lid;      // local core index | 
|---|
|  | 108 |  | 
|---|
|  | 109 | cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
|  | 110 |  | 
|---|
| [19] | 111 | // initialize the lock protecting the embedded kcm allocator | 
|---|
| [564] | 112 | busylock_init( &cluster->kcm_lock , LOCK_CLUSTER_KCM ); | 
|---|
| [1] | 113 |  | 
|---|
| [438] | 114 | #if DEBUG_CLUSTER_INIT | 
|---|
| [433] | 115 | uint32_t cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 116 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [564] | 117 | printk("\n[DBG] %s : thread %x in process %x enters for cluster %x / cycle %d\n", | 
|---|
|  | 118 | __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, local_cxy , cycle ); | 
|---|
| [433] | 119 | #endif | 
|---|
| [50] | 120 |  | 
|---|
| [19] | 121 | // initialises DQDT | 
|---|
| [562] | 122 | cluster->dqdt_root_level = dqdt_init( info->x_size, | 
|---|
| [564] | 123 | info->y_size ) - 1; | 
|---|
| [1] | 124 |  | 
|---|
| [564] | 125 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
|  | 126 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
|  | 127 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
|  | 128 | printk("\n[DBG] %s : DQDT initialized in cluster %x / cycle %d\n", | 
|---|
|  | 129 | __FUNCTION__ , local_cxy , cycle ); | 
|---|
|  | 130 | #endif | 
|---|
|  | 131 |  | 
|---|
| [1] | 132 | // initialises embedded PPM | 
|---|
| [50] | 133 | error = hal_ppm_init( info ); | 
|---|
| [1] | 134 |  | 
|---|
| [50] | 135 | if( error ) | 
|---|
|  | 136 | { | 
|---|
|  | 137 | printk("\n[ERROR] in %s : cannot initialize PPM in cluster %x\n", | 
|---|
|  | 138 | __FUNCTION__ , local_cxy ); | 
|---|
|  | 139 | return ENOMEM; | 
|---|
|  | 140 | } | 
|---|
|  | 141 |  | 
|---|
| [438] | 142 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [433] | 143 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 144 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [437] | 145 | printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n", | 
|---|
| [433] | 146 | __FUNCTION__ , local_cxy , cycle ); | 
|---|
|  | 147 | #endif | 
|---|
| [50] | 148 |  | 
|---|
| [1] | 149 | // initialises embedded KHM | 
|---|
|  | 150 | khm_init( &cluster->khm ); | 
|---|
| [19] | 151 |  | 
|---|
| [438] | 152 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [457] | 153 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 154 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [437] | 155 | printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n", | 
|---|
|  | 156 | __FUNCTION__ , local_cxy , hal_get_cycles() ); | 
|---|
|  | 157 | #endif | 
|---|
| [50] | 158 |  | 
|---|
| [19] | 159 | // initialises embedded KCM | 
|---|
| [5] | 160 | kcm_init( &cluster->kcm , KMEM_KCM ); | 
|---|
| [1] | 161 |  | 
|---|
| [438] | 162 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [457] | 163 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 164 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [437] | 165 | printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n", | 
|---|
|  | 166 | __FUNCTION__ , local_cxy , hal_get_cycles() ); | 
|---|
|  | 167 | #endif | 
|---|
| [50] | 168 |  | 
|---|
| [296] | 169 | // initialises all cores descriptors | 
|---|
| [1] | 170 | for( lid = 0 ; lid < cluster->cores_nr; lid++ ) | 
|---|
|  | 171 | { | 
|---|
|  | 172 | core_init( &cluster->core_tbl[lid],    // target core descriptor | 
|---|
|  | 173 | lid,                        // local core index | 
|---|
|  | 174 | info->core[lid].gid );      // gid from boot_info_t | 
|---|
|  | 175 | } | 
|---|
| [19] | 176 |  | 
|---|
| [438] | 177 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [433] | 178 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 179 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [437] | 180 | printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n", | 
|---|
| [433] | 181 | __FUNCTION__ , local_cxy , cycle ); | 
|---|
|  | 182 | #endif | 
|---|
| [50] | 183 |  | 
|---|
| [440] | 184 | // initialises RPC FIFOs | 
|---|
|  | 185 | for( lid = 0 ; lid < cluster->cores_nr; lid++ ) | 
|---|
|  | 186 | { | 
|---|
| [564] | 187 | remote_fifo_init( &cluster->rpc_fifo[lid] ); | 
|---|
| [440] | 188 | cluster->rpc_threads[lid] = 0; | 
|---|
|  | 189 | } | 
|---|
| [1] | 190 |  | 
|---|
| [438] | 191 | #if( DEBUG_CLUSTER_INIT & 1 ) | 
|---|
| [437] | 192 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 193 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [437] | 194 | printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n", | 
|---|
| [407] | 195 | __FUNCTION__ , local_cxy , hal_get_cycles() ); | 
|---|
| [437] | 196 | #endif | 
|---|
| [50] | 197 |  | 
|---|
| [1] | 198 | // initialise pref_tbl[] in process manager | 
|---|
| [564] | 199 | queuelock_init( &cluster->pmgr.pref_lock , LOCK_CLUSTER_PREFTBL ); | 
|---|
| [1] | 200 | cluster->pmgr.pref_nr = 0; | 
|---|
| [19] | 201 | cluster->pmgr.pref_tbl[0] = XPTR( local_cxy , &process_zero ); | 
|---|
| [1] | 202 | for( lpid = 1 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) | 
|---|
|  | 203 | { | 
|---|
|  | 204 | cluster->pmgr.pref_tbl[lpid] = XPTR_NULL; | 
|---|
|  | 205 | } | 
|---|
|  | 206 |  | 
|---|
|  | 207 | // initialise local_list in process manager | 
|---|
| [23] | 208 | xlist_root_init( XPTR( local_cxy , &cluster->pmgr.local_root ) ); | 
|---|
| [1] | 209 | cluster->pmgr.local_nr = 0; | 
|---|
| [564] | 210 | remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.local_lock ) , | 
|---|
|  | 211 | LOCK_CLUSTER_LOCALS ); | 
|---|
| [1] | 212 |  | 
|---|
|  | 213 | // initialise copies_lists in process manager | 
|---|
| [101] | 214 | for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) | 
|---|
| [1] | 215 | { | 
|---|
|  | 216 | cluster->pmgr.copies_nr[lpid] = 0; | 
|---|
|  | 217 | xlist_root_init( XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ) ); | 
|---|
| [564] | 218 | remote_queuelock_init( XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ), | 
|---|
|  | 219 | LOCK_CLUSTER_COPIES ); | 
|---|
| [19] | 220 | } | 
|---|
| [1] | 221 |  | 
|---|
| [438] | 222 | #if DEBUG_CLUSTER_INIT | 
|---|
| [433] | 223 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 224 | if( DEBUG_CLUSTER_INIT < cycle ) | 
|---|
| [564] | 225 | printk("\n[DBG] %s : thread %x in process %x exit for cluster %x / cycle %d\n", | 
|---|
|  | 226 | __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid , local_cxy , cycle ); | 
|---|
| [433] | 227 | #endif | 
|---|
| [50] | 228 |  | 
|---|
| [124] | 229 | hal_fence(); | 
|---|
| [1] | 230 |  | 
|---|
|  | 231 | return 0; | 
|---|
| [564] | 232 | } // end cluster_manager_init() | 
|---|
| [1] | 233 |  | 
|---|
| [564] | 234 | /////////////////////////////////// | 
|---|
| [561] | 235 | cxy_t cluster_random_select( void ) | 
|---|
|  | 236 | { | 
|---|
|  | 237 | uint32_t  index; | 
|---|
| [564] | 238 | uint32_t  x; | 
|---|
| [561] | 239 | uint32_t  y; | 
|---|
| [564] | 240 | cxy_t     cxy; | 
|---|
| [561] | 241 |  | 
|---|
| [564] | 242 | uint32_t  x_size    = LOCAL_CLUSTER->x_size; | 
|---|
|  | 243 | uint32_t  y_size    = LOCAL_CLUSTER->y_size; | 
|---|
|  | 244 |  | 
|---|
|  | 245 | do | 
|---|
|  | 246 | { | 
|---|
| [561] | 247 | index     = ( hal_get_cycles() + hal_get_gid() ) % (x_size * y_size); | 
|---|
|  | 248 | x         = index / y_size; | 
|---|
|  | 249 | y         = index % y_size; | 
|---|
| [564] | 250 | cxy       = HAL_CXY_FROM_XY( x , y ); | 
|---|
|  | 251 | } | 
|---|
|  | 252 | while ( cluster_is_active( cxy ) == false ); | 
|---|
| [561] | 253 |  | 
|---|
| [564] | 254 | return ( cxy ); | 
|---|
| [561] | 255 | } | 
|---|
|  | 256 |  | 
|---|
| [1] | 257 | //////////////////////////////////////// | 
|---|
|  | 258 | bool_t cluster_is_undefined( cxy_t cxy ) | 
|---|
|  | 259 | { | 
|---|
| [564] | 260 | uint32_t  x_size = LOCAL_CLUSTER->x_size; | 
|---|
|  | 261 | uint32_t  y_size = LOCAL_CLUSTER->y_size; | 
|---|
| [1] | 262 |  | 
|---|
| [564] | 263 | uint32_t  x      = HAL_X_FROM_CXY( cxy ); | 
|---|
|  | 264 | uint32_t  y      = HAL_Y_FROM_CXY( cxy ); | 
|---|
| [1] | 265 |  | 
|---|
| [564] | 266 | if( x >= x_size ) return true; | 
|---|
|  | 267 | if( y >= y_size ) return true; | 
|---|
| [1] | 268 |  | 
|---|
|  | 269 | return false; | 
|---|
|  | 270 | } | 
|---|
|  | 271 |  | 
|---|
| [564] | 272 | ////////////////////////////////////// | 
|---|
|  | 273 | bool_t cluster_is_active ( cxy_t cxy ) | 
|---|
|  | 274 | { | 
|---|
|  | 275 | uint32_t x = HAL_X_FROM_CXY( cxy ); | 
|---|
|  | 276 | uint32_t y = HAL_Y_FROM_CXY( cxy ); | 
|---|
|  | 277 |  | 
|---|
|  | 278 | return ( LOCAL_CLUSTER->cluster_info[x][y] != 0 ); | 
|---|
|  | 279 | } | 
|---|
|  | 280 |  | 
|---|
| [1] | 281 | //////////////////////////////////////////////////////////////////////////////////// | 
|---|
|  | 282 | //  Cores related functions | 
|---|
|  | 283 | //////////////////////////////////////////////////////////////////////////////////// | 
|---|
|  | 284 |  | 
|---|
|  | 285 | ///////////////////////////////// | 
|---|
| [485] | 286 | lid_t cluster_select_local_core( void ) | 
|---|
| [1] | 287 | { | 
|---|
| [440] | 288 | uint32_t      min = 1000; | 
|---|
|  | 289 | lid_t         sel = 0; | 
|---|
|  | 290 | uint32_t      nthreads; | 
|---|
|  | 291 | lid_t         lid; | 
|---|
|  | 292 | scheduler_t * sched; | 
|---|
| [1] | 293 |  | 
|---|
|  | 294 | cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
|  | 295 |  | 
|---|
|  | 296 | for( lid = 0 ; lid < cluster->cores_nr ; lid++ ) | 
|---|
|  | 297 | { | 
|---|
| [440] | 298 | sched    = &cluster->core_tbl[lid].scheduler; | 
|---|
|  | 299 | nthreads = sched->u_threads_nr + sched->k_threads_nr; | 
|---|
|  | 300 |  | 
|---|
|  | 301 | if( nthreads < min ) | 
|---|
| [1] | 302 | { | 
|---|
| [440] | 303 | min = nthreads; | 
|---|
| [1] | 304 | sel = lid; | 
|---|
|  | 305 | } | 
|---|
| [19] | 306 | } | 
|---|
| [1] | 307 | return sel; | 
|---|
|  | 308 | } | 
|---|
|  | 309 |  | 
|---|
|  | 310 | //////////////////////////////////////////////////////////////////////////////////// | 
|---|
| [428] | 311 | //  Process related functions | 
|---|
| [1] | 312 | //////////////////////////////////////////////////////////////////////////////////// | 
|---|
|  | 313 |  | 
|---|
| [433] | 314 |  | 
|---|
|  | 315 | ////////////////////////////////////////////////////// | 
|---|
| [443] | 316 | xptr_t cluster_get_process_from_pid_in_cxy( cxy_t cxy, | 
|---|
|  | 317 | pid_t pid ) | 
|---|
|  | 318 | { | 
|---|
|  | 319 | xptr_t      root_xp;       // xptr on root of list of processes in owner cluster | 
|---|
|  | 320 | xptr_t      lock_xp;       // xptr on lock protecting this list | 
|---|
|  | 321 | xptr_t      iter_xp;       // iterator | 
|---|
|  | 322 | xptr_t      current_xp;    // xptr on current process descriptor | 
|---|
|  | 323 | bool_t      found; | 
|---|
|  | 324 |  | 
|---|
|  | 325 | cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
|  | 326 |  | 
|---|
|  | 327 | // get owner cluster and lpid | 
|---|
|  | 328 | cxy_t   owner_cxy = CXY_FROM_PID( pid ); | 
|---|
|  | 329 | lpid_t  lpid      = LPID_FROM_PID( pid ); | 
|---|
|  | 330 |  | 
|---|
|  | 331 | // get lock & root of list of copies from owner cluster | 
|---|
|  | 332 | root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ); | 
|---|
|  | 333 | lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); | 
|---|
|  | 334 |  | 
|---|
|  | 335 | // take the lock protecting the list of processes | 
|---|
| [564] | 336 | remote_queuelock_acquire( lock_xp ); | 
|---|
| [443] | 337 |  | 
|---|
|  | 338 | // scan list of processes | 
|---|
|  | 339 | found = false; | 
|---|
|  | 340 | XLIST_FOREACH( root_xp , iter_xp ) | 
|---|
|  | 341 | { | 
|---|
|  | 342 | current_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list ); | 
|---|
|  | 343 |  | 
|---|
|  | 344 | if( GET_CXY( current_xp ) == cxy ) | 
|---|
|  | 345 | { | 
|---|
|  | 346 | found = true; | 
|---|
|  | 347 | break; | 
|---|
|  | 348 | } | 
|---|
|  | 349 | } | 
|---|
|  | 350 |  | 
|---|
|  | 351 | // release the lock protecting the list of processes | 
|---|
| [564] | 352 | remote_queuelock_release( lock_xp ); | 
|---|
| [443] | 353 |  | 
|---|
|  | 354 | // return extended pointer on process descriptor in owner cluster | 
|---|
|  | 355 | if( found ) return current_xp; | 
|---|
|  | 356 | else        return XPTR_NULL; | 
|---|
|  | 357 |  | 
|---|
|  | 358 | }  // end cluster_get_process_from_pid_in_cxy() | 
|---|
|  | 359 |  | 
|---|
|  | 360 |  | 
|---|
|  | 361 | ////////////////////////////////////////////////////// | 
|---|
| [433] | 362 | xptr_t cluster_get_owner_process_from_pid( pid_t pid ) | 
|---|
|  | 363 | { | 
|---|
|  | 364 | xptr_t      root_xp;       // xptr on root of list of processes in owner cluster | 
|---|
| [436] | 365 | xptr_t      lock_xp;       // xptr on lock protecting this list | 
|---|
| [433] | 366 | xptr_t      iter_xp;       // iterator | 
|---|
|  | 367 | xptr_t      current_xp;    // xptr on current process descriptor | 
|---|
|  | 368 | process_t * current_ptr;   // local pointer on current process | 
|---|
|  | 369 | pid_t       current_pid;   // current process identifier | 
|---|
|  | 370 | bool_t      found; | 
|---|
|  | 371 |  | 
|---|
|  | 372 | cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
|  | 373 |  | 
|---|
|  | 374 | // get owner cluster and lpid | 
|---|
|  | 375 | cxy_t  owner_cxy = CXY_FROM_PID( pid ); | 
|---|
|  | 376 |  | 
|---|
|  | 377 | // get lock & root of list of process in owner cluster | 
|---|
|  | 378 | root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root ); | 
|---|
|  | 379 | lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock ); | 
|---|
|  | 380 |  | 
|---|
|  | 381 | // take the lock protecting the list of processes | 
|---|
| [564] | 382 | remote_queuelock_acquire( lock_xp ); | 
|---|
| [433] | 383 |  | 
|---|
|  | 384 | // scan list of processes in owner cluster | 
|---|
|  | 385 | found = false; | 
|---|
|  | 386 | XLIST_FOREACH( root_xp , iter_xp ) | 
|---|
|  | 387 | { | 
|---|
|  | 388 | current_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list ); | 
|---|
|  | 389 | current_ptr = GET_PTR( current_xp ); | 
|---|
| [564] | 390 | current_pid = hal_remote_l32( XPTR( owner_cxy , ¤t_ptr->pid ) ); | 
|---|
| [433] | 391 |  | 
|---|
|  | 392 | if( current_pid == pid ) | 
|---|
|  | 393 | { | 
|---|
|  | 394 | found = true; | 
|---|
|  | 395 | break; | 
|---|
|  | 396 | } | 
|---|
|  | 397 | } | 
|---|
|  | 398 |  | 
|---|
|  | 399 | // release the lock protecting the list of processes | 
|---|
| [564] | 400 | remote_queuelock_release( lock_xp ); | 
|---|
| [433] | 401 |  | 
|---|
|  | 402 | // return extended pointer on process descriptor in owner cluster | 
|---|
|  | 403 | if( found ) return current_xp; | 
|---|
|  | 404 | else        return XPTR_NULL; | 
|---|
|  | 405 |  | 
|---|
| [436] | 406 | }  // end cluster_get_owner_process_from_pid() | 
|---|
|  | 407 |  | 
|---|
| [443] | 408 |  | 
|---|
| [1] | 409 | ////////////////////////////////////////////////////////// | 
|---|
|  | 410 | xptr_t cluster_get_reference_process_from_pid( pid_t pid ) | 
|---|
| [19] | 411 | { | 
|---|
| [23] | 412 | xptr_t ref_xp;   // extended pointer on reference process descriptor | 
|---|
| [1] | 413 |  | 
|---|
|  | 414 | cluster_t * cluster = LOCAL_CLUSTER; | 
|---|
|  | 415 |  | 
|---|
|  | 416 | // get owner cluster and lpid | 
|---|
|  | 417 | cxy_t  owner_cxy = CXY_FROM_PID( pid ); | 
|---|
|  | 418 | lpid_t lpid      = LPID_FROM_PID( pid ); | 
|---|
|  | 419 |  | 
|---|
| [19] | 420 | // Check valid PID | 
|---|
| [23] | 421 | if( lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER )  return XPTR_NULL; | 
|---|
| [1] | 422 |  | 
|---|
|  | 423 | if( local_cxy == owner_cxy )   // local cluster is owner cluster | 
|---|
| [19] | 424 | { | 
|---|
| [23] | 425 | ref_xp = cluster->pmgr.pref_tbl[lpid]; | 
|---|
| [1] | 426 | } | 
|---|
|  | 427 | else                              // use a remote_lwd to access owner cluster | 
|---|
|  | 428 | { | 
|---|
| [564] | 429 | ref_xp = (xptr_t)hal_remote_l64( XPTR( owner_cxy , &cluster->pmgr.pref_tbl[lpid] ) ); | 
|---|
| [1] | 430 | } | 
|---|
|  | 431 |  | 
|---|
| [23] | 432 | return ref_xp; | 
|---|
| [1] | 433 | } | 
|---|
|  | 434 |  | 
|---|
| [416] | 435 | /////////////////////////////////////////////// | 
|---|
|  | 436 | error_t cluster_pid_alloc( process_t * process, | 
|---|
|  | 437 | pid_t     * pid ) | 
|---|
| [1] | 438 | { | 
|---|
|  | 439 | lpid_t      lpid; | 
|---|
|  | 440 | bool_t      found; | 
|---|
|  | 441 |  | 
|---|
| [440] | 442 | #if DEBUG_CLUSTER_PID_ALLOC | 
|---|
|  | 443 | uint32_t cycle = (uint32_t)hal_get_cycles(); | 
|---|
|  | 444 | if( DEBUG_CLUSTER_PID_ALLOC < cycle ) | 
|---|
|  | 445 | printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n", | 
|---|
|  | 446 | __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); | 
|---|
|  | 447 | #endif | 
|---|
|  | 448 |  | 
|---|
| [1] | 449 | pmgr_t    * pm         = &LOCAL_CLUSTER->pmgr; | 
|---|
|  | 450 |  | 
|---|
| [564] | 451 | // get the lock protecting pref_tbl | 
|---|
|  | 452 | queuelock_acquire( &pm->pref_lock ); | 
|---|
| [1] | 453 |  | 
|---|
|  | 454 | // search an empty slot | 
|---|
|  | 455 | found = false; | 
|---|
|  | 456 | for( lpid = 0 ; lpid < CONFIG_MAX_PROCESS_PER_CLUSTER ; lpid++ ) | 
|---|
|  | 457 | { | 
|---|
|  | 458 | if( pm->pref_tbl[lpid] == XPTR_NULL ) | 
|---|
|  | 459 | { | 
|---|
|  | 460 | found = true; | 
|---|
|  | 461 | break; | 
|---|
|  | 462 | } | 
|---|
|  | 463 | } | 
|---|
|  | 464 |  | 
|---|
|  | 465 | if( found ) | 
|---|
|  | 466 | { | 
|---|
|  | 467 | // register process in pref_tbl[] | 
|---|
| [416] | 468 | pm->pref_tbl[lpid] = XPTR( local_cxy , process ); | 
|---|
| [1] | 469 | pm->pref_nr++; | 
|---|
|  | 470 |  | 
|---|
|  | 471 | // returns pid | 
|---|
|  | 472 | *pid = PID( local_cxy , lpid ); | 
|---|
|  | 473 |  | 
|---|
| [416] | 474 | // release the processs_manager lock | 
|---|
| [564] | 475 | queuelock_release( &pm->pref_lock ); | 
|---|
| [416] | 476 |  | 
|---|
|  | 477 | return 0; | 
|---|
| [1] | 478 | } | 
|---|
|  | 479 | else | 
|---|
|  | 480 | { | 
|---|
| [564] | 481 | // release the lock | 
|---|
|  | 482 | queuelock_release( &pm->pref_lock ); | 
|---|
| [416] | 483 |  | 
|---|
| [564] | 484 | return 0xFFFFFFFF; | 
|---|
| [19] | 485 | } | 
|---|
| [1] | 486 |  | 
|---|
| [440] | 487 | #if DEBUG_CLUSTER_PID_ALLOC | 
|---|
|  | 488 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
|  | 489 | if( DEBUG_CLUSTER_PID_ALLOC < cycle ) | 
|---|
|  | 490 | printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n", | 
|---|
|  | 491 | __FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle ); | 
|---|
|  | 492 | #endif | 
|---|
|  | 493 |  | 
|---|
| [1] | 494 | } // end cluster_pid_alloc() | 
|---|
|  | 495 |  | 
|---|
|  | 496 | ///////////////////////////////////// | 
|---|
|  | 497 | void cluster_pid_release( pid_t pid ) | 
|---|
|  | 498 | { | 
|---|
| [440] | 499 |  | 
|---|
|  | 500 | #if DEBUG_CLUSTER_PID_RELEASE | 
|---|
|  | 501 | uint32_t cycle = (uint32_t)hal_get_cycles(); | 
|---|
|  | 502 | if( DEBUG_CLUSTER_PID_RELEASE < cycle ) | 
|---|
|  | 503 | printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n", | 
|---|
|  | 504 | __FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle ); | 
|---|
|  | 505 | #endif | 
|---|
|  | 506 |  | 
|---|
| [1] | 507 | cxy_t  owner_cxy  = CXY_FROM_PID( pid ); | 
|---|
|  | 508 | lpid_t lpid       = LPID_FROM_PID( pid ); | 
|---|
|  | 509 |  | 
|---|
| [409] | 510 | pmgr_t  * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
|  | 511 |  | 
|---|
| [440] | 512 | // check lpid | 
|---|
| [492] | 513 | assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER), | 
|---|
| [440] | 514 | "illegal LPID = %d" , lpid ); | 
|---|
| [1] | 515 |  | 
|---|
| [440] | 516 | // check owner cluster | 
|---|
| [492] | 517 | assert( (owner_cxy == local_cxy) , | 
|---|
| [440] | 518 | "local_cluster %x !=  owner_cluster %x" , local_cxy , owner_cxy ); | 
|---|
|  | 519 |  | 
|---|
| [564] | 520 | // get the lock protecting pref_tbl | 
|---|
|  | 521 | queuelock_acquire( &pm->pref_lock ); | 
|---|
| [1] | 522 |  | 
|---|
|  | 523 | // remove process from pref_tbl[] | 
|---|
|  | 524 | pm->pref_tbl[lpid] = XPTR_NULL; | 
|---|
|  | 525 | pm->pref_nr--; | 
|---|
|  | 526 |  | 
|---|
|  | 527 | // release the processs_manager lock | 
|---|
| [564] | 528 | queuelock_release( &pm->pref_lock ); | 
|---|
| [1] | 529 |  | 
|---|
| [440] | 530 | #if DEBUG_CLUSTER_PID_RELEASE | 
|---|
|  | 531 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
|  | 532 | if( DEBUG_CLUSTER_PID_RELEASE < cycle ) | 
|---|
|  | 533 | printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n", | 
|---|
|  | 534 | __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); | 
|---|
|  | 535 | #endif | 
|---|
|  | 536 |  | 
|---|
| [1] | 537 | } // end cluster_pid_release() | 
|---|
|  | 538 |  | 
|---|
|  | 539 | /////////////////////////////////////////////////////////// | 
|---|
|  | 540 | process_t * cluster_get_local_process_from_pid( pid_t pid ) | 
|---|
|  | 541 | { | 
|---|
| [23] | 542 | xptr_t         process_xp; | 
|---|
|  | 543 | process_t    * process_ptr; | 
|---|
|  | 544 | xptr_t         root_xp; | 
|---|
|  | 545 | xptr_t         iter_xp; | 
|---|
|  | 546 | bool_t         found; | 
|---|
| [19] | 547 |  | 
|---|
| [23] | 548 | found   = false; | 
|---|
|  | 549 | root_xp = XPTR( local_cxy , &LOCAL_CLUSTER->pmgr.local_root ); | 
|---|
|  | 550 |  | 
|---|
|  | 551 | XLIST_FOREACH( root_xp , iter_xp ) | 
|---|
| [1] | 552 | { | 
|---|
| [23] | 553 | process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list ); | 
|---|
|  | 554 | process_ptr = (process_t *)GET_PTR( process_xp ); | 
|---|
|  | 555 | if( process_ptr->pid == pid ) | 
|---|
| [1] | 556 | { | 
|---|
| [23] | 557 | found = true; | 
|---|
| [1] | 558 | break; | 
|---|
|  | 559 | } | 
|---|
|  | 560 | } | 
|---|
|  | 561 |  | 
|---|
| [23] | 562 | if (found ) return process_ptr; | 
|---|
|  | 563 | else        return NULL; | 
|---|
|  | 564 |  | 
|---|
| [1] | 565 | }  // end cluster_get_local_process_from_pid() | 
|---|
|  | 566 |  | 
|---|
|  | 567 | ////////////////////////////////////////////////////// | 
|---|
|  | 568 | void cluster_process_local_link( process_t * process ) | 
|---|
|  | 569 | { | 
|---|
|  | 570 | pmgr_t * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
|  | 571 |  | 
|---|
| [443] | 572 | // get extended pointers on local process list root & lock | 
|---|
|  | 573 | xptr_t root_xp = XPTR( local_cxy , &pm->local_root ); | 
|---|
|  | 574 | xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock ); | 
|---|
|  | 575 |  | 
|---|
| [564] | 576 | // get lock protecting the local list | 
|---|
|  | 577 | remote_queuelock_acquire( lock_xp ); | 
|---|
| [1] | 578 |  | 
|---|
| [443] | 579 | // register process in local list | 
|---|
|  | 580 | xlist_add_last( root_xp , XPTR( local_cxy , &process->local_list ) ); | 
|---|
| [1] | 581 | pm->local_nr++; | 
|---|
|  | 582 |  | 
|---|
| [564] | 583 | // release lock protecting the local list | 
|---|
|  | 584 | remote_queuelock_release( lock_xp ); | 
|---|
| [1] | 585 | } | 
|---|
|  | 586 |  | 
|---|
|  | 587 | //////////////////////////////////////////////////////// | 
|---|
|  | 588 | void cluster_process_local_unlink( process_t * process ) | 
|---|
|  | 589 | { | 
|---|
|  | 590 | pmgr_t * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
|  | 591 |  | 
|---|
| [443] | 592 | // get extended pointers on local process list lock | 
|---|
|  | 593 | xptr_t lock_xp = XPTR( local_cxy , &pm->local_lock ); | 
|---|
|  | 594 |  | 
|---|
| [564] | 595 | // get lock protecting the local list | 
|---|
|  | 596 | remote_queuelock_acquire( lock_xp ); | 
|---|
| [1] | 597 |  | 
|---|
| [443] | 598 | // remove process from local list | 
|---|
| [23] | 599 | xlist_unlink( XPTR( local_cxy , &process->local_list ) ); | 
|---|
| [1] | 600 | pm->local_nr--; | 
|---|
|  | 601 |  | 
|---|
| [564] | 602 | // release lock protecting the local list | 
|---|
|  | 603 | remote_queuelock_release( lock_xp ); | 
|---|
| [1] | 604 | } | 
|---|
|  | 605 |  | 
|---|
|  | 606 | /////////////////////////////////////////////////////// | 
|---|
|  | 607 | void cluster_process_copies_link( process_t * process ) | 
|---|
|  | 608 | { | 
|---|
|  | 609 | pmgr_t * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
|  | 610 |  | 
|---|
| [438] | 611 | #if DEBUG_CLUSTER_PROCESS_COPIES | 
|---|
| [436] | 612 | uint32_t cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 613 | if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) | 
|---|
| [436] | 614 | printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n", | 
|---|
|  | 615 | __FUNCTION__ , local_cxy , process , cycle ); | 
|---|
|  | 616 | #endif | 
|---|
|  | 617 |  | 
|---|
| [1] | 618 | // get owner cluster identifier CXY and process LPID | 
|---|
|  | 619 | pid_t    pid        = process->pid; | 
|---|
|  | 620 | cxy_t    owner_cxy  = CXY_FROM_PID( pid ); | 
|---|
|  | 621 | lpid_t   lpid       = LPID_FROM_PID( pid ); | 
|---|
|  | 622 |  | 
|---|
|  | 623 | // get extended pointer on lock protecting copies_list[lpid] | 
|---|
| [120] | 624 | xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] ); | 
|---|
| [1] | 625 |  | 
|---|
|  | 626 | // get extended pointer on the copies_list[lpid] root | 
|---|
| [120] | 627 | xptr_t copies_root  = XPTR( owner_cxy , &pm->copies_root[lpid] ); | 
|---|
| [1] | 628 |  | 
|---|
|  | 629 | // get extended pointer on the local copies_list entry | 
|---|
|  | 630 | xptr_t copies_entry = XPTR( local_cxy , &process->copies_list ); | 
|---|
|  | 631 |  | 
|---|
| [19] | 632 | // get lock protecting copies_list[lpid] | 
|---|
| [564] | 633 | remote_queuelock_acquire( copies_lock ); | 
|---|
| [1] | 634 |  | 
|---|
| [436] | 635 | // add copy to copies_list | 
|---|
| [1] | 636 | xlist_add_first( copies_root , copies_entry ); | 
|---|
|  | 637 | hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 ); | 
|---|
|  | 638 |  | 
|---|
| [19] | 639 | // release lock protecting copies_list[lpid] | 
|---|
| [564] | 640 | remote_queuelock_release( copies_lock ); | 
|---|
| [1] | 641 |  | 
|---|
| [438] | 642 | #if DEBUG_CLUSTER_PROCESS_COPIES | 
|---|
| [436] | 643 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 644 | if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) | 
|---|
| [436] | 645 | printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n", | 
|---|
|  | 646 | __FUNCTION__ , local_cxy , process , cycle ); | 
|---|
|  | 647 | #endif | 
|---|
|  | 648 |  | 
|---|
|  | 649 | }  // end cluster_process_copies_link() | 
|---|
|  | 650 |  | 
|---|
| [1] | 651 | ///////////////////////////////////////////////////////// | 
|---|
|  | 652 | void cluster_process_copies_unlink( process_t * process ) | 
|---|
|  | 653 | { | 
|---|
|  | 654 | pmgr_t * pm = &LOCAL_CLUSTER->pmgr; | 
|---|
|  | 655 |  | 
|---|
| [438] | 656 | #if DEBUG_CLUSTER_PROCESS_COPIES | 
|---|
| [436] | 657 | uint32_t cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 658 | if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) | 
|---|
| [436] | 659 | printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n", | 
|---|
|  | 660 | __FUNCTION__ , local_cxy , process , cycle ); | 
|---|
|  | 661 | #endif | 
|---|
|  | 662 |  | 
|---|
| [1] | 663 | // get owner cluster identifier CXY and process LPID | 
|---|
|  | 664 | pid_t    pid        = process->pid; | 
|---|
|  | 665 | cxy_t    owner_cxy  = CXY_FROM_PID( pid ); | 
|---|
|  | 666 | lpid_t   lpid       = LPID_FROM_PID( pid ); | 
|---|
|  | 667 |  | 
|---|
|  | 668 | // get extended pointer on lock protecting copies_list[lpid] | 
|---|
| [436] | 669 | xptr_t copies_lock  = XPTR( owner_cxy , &pm->copies_lock[lpid] ); | 
|---|
| [1] | 670 |  | 
|---|
|  | 671 | // get extended pointer on the local copies_list entry | 
|---|
|  | 672 | xptr_t copies_entry = XPTR( local_cxy , &process->copies_list ); | 
|---|
|  | 673 |  | 
|---|
| [19] | 674 | // get lock protecting copies_list[lpid] | 
|---|
| [564] | 675 | remote_queuelock_acquire( copies_lock ); | 
|---|
| [1] | 676 |  | 
|---|
| [436] | 677 | // remove copy from copies_list | 
|---|
| [1] | 678 | xlist_unlink( copies_entry ); | 
|---|
|  | 679 | hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 ); | 
|---|
|  | 680 |  | 
|---|
| [19] | 681 | // release lock protecting copies_list[lpid] | 
|---|
| [564] | 682 | remote_queuelock_release( copies_lock ); | 
|---|
| [1] | 683 |  | 
|---|
| [438] | 684 | #if DEBUG_CLUSTER_PROCESS_COPIES | 
|---|
| [436] | 685 | cycle = (uint32_t)hal_get_cycles(); | 
|---|
| [438] | 686 | if( DEBUG_CLUSTER_PROCESS_COPIES < cycle ) | 
|---|
| [436] | 687 | printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n", | 
|---|
|  | 688 | __FUNCTION__ , local_cxy , process , cycle ); | 
|---|
|  | 689 | #endif | 
|---|
|  | 690 |  | 
|---|
|  | 691 | }  // end cluster_process_copies_unlink() | 
|---|
|  | 692 |  | 
|---|
| [428] | 693 | /////////////////////////////////////////// | 
|---|
|  | 694 | void cluster_processes_display( cxy_t cxy ) | 
|---|
| [1] | 695 | { | 
|---|
| [428] | 696 | xptr_t        root_xp; | 
|---|
| [443] | 697 | xptr_t        lock_xp; | 
|---|
| [428] | 698 | xptr_t        iter_xp; | 
|---|
| [443] | 699 | xptr_t        process_xp; | 
|---|
|  | 700 | cxy_t         txt0_cxy; | 
|---|
|  | 701 | chdev_t     * txt0_ptr; | 
|---|
|  | 702 | xptr_t        txt0_xp; | 
|---|
|  | 703 | xptr_t        txt0_lock_xp; | 
|---|
| [1] | 704 |  | 
|---|
| [443] | 705 | assert( (cluster_is_undefined( cxy ) == false), | 
|---|
| [492] | 706 | "illegal cluster index" ); | 
|---|
| [443] | 707 |  | 
|---|
|  | 708 | // get extended pointer on root and lock for local process list in cluster | 
|---|
| [428] | 709 | root_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_root ); | 
|---|
| [443] | 710 | lock_xp = XPTR( cxy , &LOCAL_CLUSTER->pmgr.local_lock ); | 
|---|
| [1] | 711 |  | 
|---|
| [443] | 712 | // get pointers on TXT0 chdev | 
|---|
|  | 713 | txt0_xp  = chdev_dir.txt_tx[0]; | 
|---|
|  | 714 | txt0_cxy = GET_CXY( txt0_xp ); | 
|---|
|  | 715 | txt0_ptr = GET_PTR( txt0_xp ); | 
|---|
| [1] | 716 |  | 
|---|
| [443] | 717 | // get extended pointer on TXT0 lock | 
|---|
|  | 718 | txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); | 
|---|
|  | 719 |  | 
|---|
|  | 720 | // get lock on local process list | 
|---|
| [564] | 721 | remote_queuelock_acquire( lock_xp ); | 
|---|
| [443] | 722 |  | 
|---|
| [564] | 723 | // get TXT0 lock | 
|---|
|  | 724 | remote_busylock_acquire( txt0_lock_xp ); | 
|---|
| [443] | 725 |  | 
|---|
|  | 726 | // display header | 
|---|
|  | 727 | nolock_printk("\n***** processes in cluster %x / cycle %d\n", | 
|---|
|  | 728 | cxy , (uint32_t)hal_get_cycles() ); | 
|---|
|  | 729 |  | 
|---|
|  | 730 | // loop on all processes in cluster cxy | 
|---|
| [428] | 731 | XLIST_FOREACH( root_xp , iter_xp ) | 
|---|
|  | 732 | { | 
|---|
|  | 733 | process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); | 
|---|
|  | 734 | process_display( process_xp ); | 
|---|
|  | 735 | } | 
|---|
| [443] | 736 |  | 
|---|
| [564] | 737 | // release TXT0 lock | 
|---|
|  | 738 | remote_busylock_release( txt0_lock_xp ); | 
|---|
| [443] | 739 |  | 
|---|
|  | 740 | // release lock on local process list | 
|---|
| [564] | 741 | remote_queuelock_release( lock_xp ); | 
|---|
| [443] | 742 |  | 
|---|
| [428] | 743 | }  // end cluster_processes_display() | 
|---|
| [1] | 744 |  | 
|---|
| [19] | 745 |  | 
|---|