source: trunk/kernel/kern/process.c @ 664

Last change on this file since 664 was 662, checked in by alain, 4 years ago

Introduce the ksocket.h & ksocket.c files in kernel/kern.

File size: 94.0 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[657]6 *          Alain Greiner (2016,2017,2018,2019,2020)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[623]31#include <hal_vmm.h>
[1]32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
[428]42#include <chdev.h>
[1]43#include <list.h>
[407]44#include <string.h>
[1]45#include <scheduler.h>
[564]46#include <busylock.h>
47#include <queuelock.h>
48#include <remote_queuelock.h>
49#include <rwlock.h>
50#include <remote_rwlock.h>
[1]51#include <dqdt.h>
52#include <cluster.h>
53#include <ppm.h>
54#include <boot_info.h>
55#include <process.h>
56#include <elf.h>
[23]57#include <syscalls.h>
[435]58#include <shared_syscalls.h>
[1]59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Extern global variables
62//////////////////////////////////////////////////////////////////////////////////////////
63
[428]64extern process_t           process_zero;     // allocated in kernel_init.c
65extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]66
67//////////////////////////////////////////////////////////////////////////////////////////
68// Process initialisation related functions
69//////////////////////////////////////////////////////////////////////////////////////////
70
[583]71/////////////////////////////////
[503]72process_t * process_alloc( void )
[1]73{
[635]74        kmem_req_t req;
[1]75
[635]76    req.type  = KMEM_KCM;
77        req.order = bits_log2( sizeof(process_t) );
[1]78        req.flags = AF_KERNEL;
79
[635]80    return kmem_alloc( &req );
[1]81}
82
83////////////////////////////////////////
84void process_free( process_t * process )
85{
86    kmem_req_t  req;
87
[635]88        req.type = KMEM_KCM;
[1]89        req.ptr  = process;
90        kmem_free( &req );
91}
92
[625]93////////////////////////////////////////////////////
94error_t process_reference_init( process_t * process,
95                                pid_t       pid,
96                                xptr_t      parent_xp )
[1]97{
[625]98    error_t     error;
[610]99    xptr_t      process_xp;
[428]100    cxy_t       parent_cxy;
101    process_t * parent_ptr;
[407]102    xptr_t      stdin_xp;
103    xptr_t      stdout_xp;
104    xptr_t      stderr_xp;
105    uint32_t    stdin_id;
106    uint32_t    stdout_id;
107    uint32_t    stderr_id;
[428]108    uint32_t    txt_id;
109    char        rx_path[40];
110    char        tx_path[40];
[440]111    xptr_t      file_xp;
[428]112    xptr_t      chdev_xp;
[625]113    chdev_t   * chdev_ptr;
[428]114    cxy_t       chdev_cxy;
115    pid_t       parent_pid;
[625]116    vmm_t     * vmm;
[1]117
[610]118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
[625]121    // get pointer on process vmm
122    vmm = &process->vmm;
123
[428]124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
[435]126    parent_ptr = GET_PTR( parent_xp );
[204]127
[457]128    // get parent_pid
[564]129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]130
[438]131#if DEBUG_PROCESS_REFERENCE_INIT
[610]132thread_t * this = CURRENT_THREAD;
[433]133uint32_t cycle = (uint32_t)hal_get_cycles();
[610]134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
[433]137#endif
[428]138
[610]139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
[443]142    process->owner_xp   = XPTR( local_cxy , process );
[433]143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
[428]145
[610]146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
[625]150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[564]153
[625]154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
[635]168    // initialize VSL lock
[625]169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
170
[635]171    // register kernel vsegs in user process VMM as required by the architecture
[625]172    error = hal_vmm_kernel_update( process );
173    if( error ) 
174    {
175        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
176        return -1;
177    }
178
179#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
180if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[635]181printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n",
[625]182__FUNCTION__, parent_pid, this->trdid, pid );
183#endif
184
185    // create "args" and "envs" vsegs
186    // create "stacks" and "mmap" vsegs allocators
187    // initialize locks protecting GPT and VSL
188    error = vmm_user_init( process );
189    if( error ) 
190    {
191        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
192        return -1;
193    }
[415]194 
[438]195#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]196cycle = (uint32_t)hal_get_cycles();
[610]197if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]198printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
199__FUNCTION__, parent_pid, this->trdid, pid );
[433]200#endif
[1]201
[409]202    // initialize fd_array as empty
[408]203    process_fd_init( process );
[1]204
[428]205    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]206    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]207    {
[581]208        // select a TXT channel
209        if( pid == 1 )  txt_id = 0;                     // INIT
210        else            txt_id = process_txt_alloc();   // KSH
[428]211
[457]212        // attach process to TXT
[428]213        process_txt_attach( process , txt_id ); 
214
[457]215#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
216cycle = (uint32_t)hal_get_cycles();
[610]217if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
218printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
219__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]220#endif
[428]221        // build path to TXT_RX[i] and TXT_TX[i] chdevs
222        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
223        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
224
225        // create stdin pseudo file         
[610]226        error = vfs_open(  process->vfs_root_xp,
[428]227                           rx_path,
[610]228                           process_xp,
[408]229                           O_RDONLY, 
230                           0,                // FIXME chmod
231                           &stdin_xp, 
232                           &stdin_id );
[625]233        if( error )
234        {
235            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
236            return -1;
237        }
[1]238
[564]239assert( (stdin_id == 0) , "stdin index must be 0" );
[428]240
[440]241#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
242cycle = (uint32_t)hal_get_cycles();
[610]243if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
244printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
245__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]246#endif
247
[428]248        // create stdout pseudo file         
[610]249        error = vfs_open(  process->vfs_root_xp,
[428]250                           tx_path,
[610]251                           process_xp,
[408]252                           O_WRONLY, 
253                           0,                // FIXME chmod
254                           &stdout_xp, 
255                           &stdout_id );
[625]256        if( error )
257        {
258            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
259            return -1;
260        }
[1]261
[625]262assert( (stdout_id == 1) , "stdout index must be 1" );
[428]263
[440]264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
265cycle = (uint32_t)hal_get_cycles();
[610]266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]269#endif
270
[428]271        // create stderr pseudo file         
[610]272        error = vfs_open(  process->vfs_root_xp,
[428]273                           tx_path,
[610]274                           process_xp,
[408]275                           O_WRONLY, 
276                           0,                // FIXME chmod
277                           &stderr_xp, 
278                           &stderr_id );
[625]279        if( error )
280        {
281            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
282            return -1;
283        }
[428]284
[625]285assert( (stderr_id == 2) , "stderr index must be 2" );
[428]286
[440]287#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
288cycle = (uint32_t)hal_get_cycles();
[610]289if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
290printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
291__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]292#endif
293
[408]294    }
[428]295    else                                            // normal user process
[408]296    {
[457]297        // get extended pointer on stdin pseudo file in parent process
[625]298        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
299                                                &parent_ptr->fd_array.array[0] ) );
[440]300
[457]301        // get extended pointer on parent process TXT chdev
[440]302        chdev_xp = chdev_from_file( file_xp );
[428]303 
304        // get cluster and local pointer on chdev
305        chdev_cxy = GET_CXY( chdev_xp );
[435]306        chdev_ptr = GET_PTR( chdev_xp );
[428]307 
[564]308        // get parent process TXT terminal index
309        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]310
[564]311        // attach child process to parent process TXT terminal
[428]312        process_txt_attach( process , txt_id ); 
[407]313
[457]314        // copy all open files from parent process fd_array to this process
[428]315        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]316                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]317    }
[407]318
[610]319    // initialize lock protecting CWD changes
[625]320    remote_busylock_init( XPTR( local_cxy , 
321                                &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]322
[438]323#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]324cycle = (uint32_t)hal_get_cycles();
[610]325if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
326printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
327__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]328#endif
[407]329
[408]330    // reset children list root
331    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
332    process->children_nr     = 0;
[625]333    remote_queuelock_init( XPTR( local_cxy,
334                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]335
[611]336    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]337    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
338    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
339    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
340    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[625]341    remote_queuelock_init( XPTR( local_cxy , 
342                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]343
[611]344    // reset open directories root and lock
345    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
[625]346    remote_queuelock_init( XPTR( local_cxy , 
347                                 &process->dir_lock ), LOCK_PROCESS_DIR );
[611]348
[408]349    // register new process in the local cluster manager pref_tbl[]
350    lpid_t lpid = LPID_FROM_PID( pid );
351    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]352
[408]353    // register new process descriptor in local cluster manager local_list
354    cluster_process_local_link( process );
[407]355
[408]356    // register new process descriptor in local cluster manager copies_list
357    cluster_process_copies_link( process );
[172]358
[564]359    // initialize th_tbl[] array and associated threads
[1]360    uint32_t i;
[564]361
362    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]363        {
364        process->th_tbl[i] = NULL;
365    }
366    process->th_nr  = 0;
[564]367    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]368
[124]369        hal_fence();
[1]370
[438]371#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]372cycle = (uint32_t)hal_get_cycles();
[610]373if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
374printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
375__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]376#endif
[101]377
[635]378#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
379hal_vmm_display( parent_xp , false );
380hal_vmm_display( XPTR( local_cxy , process ) , false );
381#endif
382
[625]383    return 0;
384
[428]385}  // process_reference_init()
[204]386
[1]387/////////////////////////////////////////////////////
388error_t process_copy_init( process_t * local_process,
389                           xptr_t      reference_process_xp )
390{
[625]391    error_t   error;
392    vmm_t   * vmm;
[415]393
[23]394    // get reference process cluster and local pointer
395    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]396    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]397
[625]398    // get pointer on process vmm
399    vmm = &local_process->vmm;
400
[428]401    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]402    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
403    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]404    local_process->ref_xp     = reference_process_xp;
[443]405    local_process->owner_xp   = reference_process_xp;
[433]406    local_process->term_state = 0;
[407]407
[564]408#if DEBUG_PROCESS_COPY_INIT
[610]409thread_t * this = CURRENT_THREAD; 
[433]410uint32_t cycle = (uint32_t)hal_get_cycles();
[610]411if( DEBUG_PROCESS_COPY_INIT < cycle )
412printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
413__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]414#endif
[407]415
[564]416// check user process
[625]417assert( (local_process->pid != 0), "LPID cannot be 0" );
[564]418
[625]419    // initialize VSL as empty
420    vmm->vsegs_nr = 0;
421        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[1]422
[625]423    // create an empty GPT as required by the architecture
424    error = hal_gpt_create( &vmm->gpt );
425    if( error ) 
426    {
427        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
428        return -1;
429    }
430
431    // initialize GPT and VSL locks
432        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
433
434    // register kernel vsegs in VMM as required by the architecture
435    error = hal_vmm_kernel_update( local_process );
436    if( error ) 
437    {
438        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
439        return -1;
440    }
441
442    // create "args" and "envs" vsegs
443    // create "stacks" and "mmap" vsegs allocators
444    // initialize locks protecting GPT and VSL
445    error = vmm_user_init( local_process );
446    if( error ) 
447    {
448        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
449        return -1;
450    }
451 
452#if (DEBUG_PROCESS_COPY_INIT & 1)
453cycle = (uint32_t)hal_get_cycles();
454if( DEBUG_PROCESS_COPY_INIT < cycle )
455printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
456__FUNCTION__, parent_pid, this->trdid, pid, cycle );
457#endif
458
459    // set process file descriptors array
[23]460        process_fd_init( local_process );
[1]461
[625]462    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]463    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
464    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]465    local_process->cwd_xp      = XPTR_NULL;
[1]466
467    // reset children list root (not used in a process descriptor copy)
468    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]469    local_process->children_nr   = 0;
[564]470    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
471                           LOCK_PROCESS_CHILDREN );
[1]472
[428]473    // reset children_list (not used in a process descriptor copy)
474    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]475
476    // reset semaphores list root (not used in a process descriptor copy)
477    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]478    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
479    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
480    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]481
[564]482    // initialize th_tbl[] array and associated fields
[1]483    uint32_t i;
[564]484    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]485        {
486        local_process->th_tbl[i] = NULL;
487    }
488    local_process->th_nr  = 0;
[564]489    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]490
491    // register new process descriptor in local cluster manager local_list
492    cluster_process_local_link( local_process );
493
494    // register new process descriptor in owner cluster manager copies_list
495    cluster_process_copies_link( local_process );
496
[124]497        hal_fence();
[1]498
[438]499#if DEBUG_PROCESS_COPY_INIT
[433]500cycle = (uint32_t)hal_get_cycles();
[610]501if( DEBUG_PROCESS_COPY_INIT < cycle )
502printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
503__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]504#endif
[279]505
[1]506    return 0;
507
[204]508} // end process_copy_init()
509
[1]510///////////////////////////////////////////
511void process_destroy( process_t * process )
512{
[428]513    xptr_t      parent_xp;
514    process_t * parent_ptr;
515    cxy_t       parent_cxy;
516    xptr_t      children_lock_xp;
[446]517    xptr_t      children_nr_xp;
[1]518
[437]519    pid_t       pid = process->pid;
520
[593]521// check no more threads
[618]522assert( (process->th_nr == 0),
523"process %x in cluster %x contains threads", pid , local_cxy );
[428]524
[438]525#if DEBUG_PROCESS_DESTROY
[610]526thread_t * this = CURRENT_THREAD;
[433]527uint32_t cycle = (uint32_t)hal_get_cycles();
[610]528if( DEBUG_PROCESS_DESTROY < cycle )
529printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
530__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]531#endif
[428]532
[618]533    // Destroy VMM
534    vmm_destroy( process );
535
536#if (DEBUG_PROCESS_DESTROY & 1)
537if( DEBUG_PROCESS_DESTROY < cycle )
538printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
539__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
540#endif
541
[436]542    // remove process from local_list in local cluster manager
543    cluster_process_local_unlink( process );
[1]544
[618]545#if (DEBUG_PROCESS_DESTROY & 1)
546if( DEBUG_PROCESS_DESTROY < cycle )
547printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
548__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
549#endif
550
[436]551    // remove process from copies_list in owner cluster manager
552    cluster_process_copies_unlink( process );
[23]553
[618]554#if (DEBUG_PROCESS_DESTROY & 1)
555if( DEBUG_PROCESS_DESTROY < cycle )
556printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
557__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
558#endif
559
[625]560    // when target process cluster is the owner cluster
561    // - remove process from TXT list and transfer ownership
562    // - remove process from children_list
563    // - release PID
[437]564    if( CXY_FROM_PID( pid ) == local_cxy )
[428]565    {
[625]566        process_txt_detach( XPTR( local_cxy , process ) );
567
568#if (DEBUG_PROCESS_DESTROY & 1)
569if( DEBUG_PROCESS_DESTROY < cycle )
570printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
571__FUNCTION__, this->process->pid, this->trdid, pid );
572#endif
573
[428]574        // get pointers on parent process
575        parent_xp  = process->parent_xp;
576        parent_cxy = GET_CXY( parent_xp );
577        parent_ptr = GET_PTR( parent_xp );
578
579        // get extended pointer on children_lock in parent process
580        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]581        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]582
583        // remove process from children_list
[564]584        remote_queuelock_acquire( children_lock_xp );
[428]585        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]586            hal_remote_atomic_add( children_nr_xp , -1 );
[564]587        remote_queuelock_release( children_lock_xp );
[450]588
[618]589#if (DEBUG_PROCESS_DESTROY & 1)
590if( DEBUG_PROCESS_DESTROY < cycle )
[625]591printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
592__FUNCTION__, this->process->pid, this->trdid, pid );
[618]593#endif
594
[564]595        // release the process PID to cluster manager
596        cluster_pid_release( pid );
[428]597
[618]598#if (DEBUG_PROCESS_DESTROY & 1)
599if( DEBUG_PROCESS_DESTROY < cycle )
600printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
601__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
602#endif
[23]603
[618]604    }
[1]605
[623]606    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
607
[618]608    // FIXME close all open files [AG]
[623]609
[618]610    // FIXME synchronize dirty files [AG]
[1]611
[416]612    // release memory allocated to process descriptor
613    process_free( process );
[1]614
[438]615#if DEBUG_PROCESS_DESTROY
[433]616cycle = (uint32_t)hal_get_cycles();
[610]617if( DEBUG_PROCESS_DESTROY < cycle )
618printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
619__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]620#endif
[428]621
[407]622}  // end process_destroy()
623
[583]624///////////////////////////////////////////////////////////////////
[527]625const char * process_action_str( process_sigactions_t action_type )
[409]626{
[583]627    switch ( action_type )
628    {
629        case BLOCK_ALL_THREADS:   return "BLOCK";
630        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
631        case DELETE_ALL_THREADS:  return "DELETE";
632        default:                  return "undefined";
633    }
[409]634}
635
[435]636////////////////////////////////////////
637void process_sigaction( pid_t       pid,
[457]638                        uint32_t    type )
[409]639{
640    cxy_t              owner_cxy;         // owner cluster identifier
641    lpid_t             lpid;              // process index in owner cluster
642    cluster_t        * cluster;           // pointer on cluster manager
643    xptr_t             root_xp;           // extended pointer on root of copies
644    xptr_t             lock_xp;           // extended pointer on lock protecting copies
645    xptr_t             iter_xp;           // iterator on copies list
646    xptr_t             process_xp;        // extended pointer on process copy
647    cxy_t              process_cxy;       // process copy cluster identifier
[457]648    process_t        * process_ptr;       // local pointer on process copy
[436]649    reg_t              save_sr;           // for critical section
[457]650    thread_t         * client;            // pointer on client thread
651    xptr_t             client_xp;         // extended pointer on client thread
652    process_t        * local;             // pointer on process copy in local cluster
653    uint32_t           remote_nr;         // number of remote process copies
[619]654    rpc_desc_t         rpc;               // shared RPC descriptor
655    uint32_t           responses;         // shared RPC responses counter
[409]656
[457]657    client    = CURRENT_THREAD;
658    client_xp = XPTR( local_cxy , client );
659    local     = NULL;
660    remote_nr = 0;
[435]661
[583]662    // check calling thread can yield
663    thread_assert_can_yield( client , __FUNCTION__ );
[564]664
[438]665#if DEBUG_PROCESS_SIGACTION
[433]666uint32_t cycle = (uint32_t)hal_get_cycles();
[438]667if( DEBUG_PROCESS_SIGACTION < cycle )
[593]668printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]669__FUNCTION__ , client->process->pid, client->trdid,
[457]670process_action_str( type ) , pid , cycle );
[433]671#endif
[409]672
[436]673    // get pointer on local cluster manager
[416]674    cluster = LOCAL_CLUSTER;
675
[409]676    // get owner cluster identifier and process lpid
[435]677    owner_cxy = CXY_FROM_PID( pid );
678    lpid      = LPID_FROM_PID( pid );
[409]679
[593]680    // get root of list of copies and lock from owner cluster
[436]681    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
682    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]683
[583]684// check action type
685assert( ((type == DELETE_ALL_THREADS ) ||
686         (type == BLOCK_ALL_THREADS )  ||
687         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]688             
[593]689    // This client thread send parallel RPCs to all remote clusters containing
[564]690    // target process copies, wait all responses, and then handles directly
691    // the threads in local cluster, when required.
[457]692    // The client thread allocates a - shared - RPC descriptor in the stack,
693    // because all parallel, non-blocking, server threads use the same input
694    // arguments, and use the shared RPC response field
[436]695
696    // mask IRQs
697    hal_disable_irq( &save_sr);
698
[457]699    // client thread blocks itself
700    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]701
[619]702    // initialize RPC responses counter
703    responses = 0;
704
[436]705    // initialize shared RPC descriptor
[619]706    // can be shared, because no out arguments
707    rpc.rsp       = &responses;
[438]708    rpc.blocking  = false;
709    rpc.index     = RPC_PROCESS_SIGACTION;
710    rpc.thread    = client;
711    rpc.lid       = client->core->lid;
[611]712    rpc.args[0]   = pid;
713    rpc.args[1]   = type;
[436]714
[611]715    // take the lock protecting process copies
716    remote_queuelock_acquire( lock_xp );
717
[457]718    // scan list of process copies
[409]719    XLIST_FOREACH( root_xp , iter_xp )
720    {
[457]721        // get extended pointers and cluster on process
[440]722        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
723        process_cxy = GET_CXY( process_xp );
[457]724        process_ptr = GET_PTR( process_xp );
[440]725
[593]726        if( process_cxy == local_cxy )    // process copy is local
[457]727        { 
728            local = process_ptr;
729        }
[593]730        else                              // process copy is remote
[457]731        {
732            // update number of remote process copies
733            remote_nr++;
734
[619]735            // atomically increment RPC responses counter
736            hal_atomic_add( &responses , 1 );
[457]737
[438]738#if DEBUG_PROCESS_SIGACTION
739if( DEBUG_PROCESS_SIGACTION < cycle )
[593]740printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]741__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]742#endif
[457]743            // call RPC in target cluster
[619]744            rpc_send( process_cxy , &rpc );
[457]745        }
746    }  // end list of copies
747
[409]748    // release the lock protecting process copies
[564]749    remote_queuelock_release( lock_xp );
[409]750
[436]751    // restore IRQs
752    hal_restore_irq( save_sr);
[409]753
[457]754    // - if there is remote process copies, the client thread deschedules,
755    //   (it will be unblocked by the last RPC server thread).
756    // - if there is no remote copies, the client thread unblock itself.
757    if( remote_nr )
758    {
759        sched_yield("blocked on rpc_process_sigaction");
760    } 
761    else
762    {
763        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
764    }
[409]765
[457]766    // handle the local process copy if required
767    if( local != NULL )
768    {
769
770#if DEBUG_PROCESS_SIGACTION
771if( DEBUG_PROCESS_SIGACTION < cycle )
[593]772printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]773__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]774#endif
775        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]776        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]777        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
778    }
779
[438]780#if DEBUG_PROCESS_SIGACTION
[433]781cycle = (uint32_t)hal_get_cycles();
[438]782if( DEBUG_PROCESS_SIGACTION < cycle )
[593]783printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]784__FUNCTION__, client->process->pid, client->trdid,
[457]785process_action_str( type ), pid, cycle );
[433]786#endif
[416]787
[409]788}  // end process_sigaction()
789
[433]790/////////////////////////////////////////////////
[583]791void process_block_threads( process_t * process )
[1]792{
[409]793    thread_t          * target;         // pointer on target thread
[433]794    thread_t          * this;           // pointer on calling thread
[564]795    uint32_t            ltid;           // index in process th_tbl[]
[409]796    uint32_t            count;          // requests counter
[593]797    volatile uint32_t   ack_count;      // acknowledges counter
[1]798
[416]799    // get calling thread pointer
[433]800    this = CURRENT_THREAD;
[407]801
[438]802#if DEBUG_PROCESS_SIGACTION
[564]803pid_t pid = process->pid;
[433]804uint32_t cycle = (uint32_t)hal_get_cycles();
[438]805if( DEBUG_PROCESS_SIGACTION < cycle )
[593]806printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]807__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]808#endif
[409]809
[564]810// check target process is an user process
[619]811assert( (LPID_FROM_PID( process->pid ) != 0 ),
812"process %x is not an user process\n", process->pid );
[564]813
[409]814    // get lock protecting process th_tbl[]
[564]815    rwlock_rd_acquire( &process->th_lock );
[1]816
[440]817    // loop on target process local threads
[409]818    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]819    // - if the calling thread and the target thread are not running on the same
820    //   core, we ask the target scheduler to acknowlege the blocking
821    //   to be sure that the target thread is not running.
822    // - if the calling thread and the target thread are running on the same core,
823    //   we don't need confirmation from scheduler.
824           
[436]825    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]826    {
[409]827        target = process->th_tbl[ltid];
[1]828
[436]829        if( target != NULL )                                 // thread exist
[1]830        {
831            count++;
[409]832
[583]833            // set the global blocked bit in target thread descriptor.
834            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]835 
[583]836            if( this->core->lid != target->core->lid )
837            {
838                // increment responses counter
839                hal_atomic_add( (void*)&ack_count , 1 );
[409]840
[583]841                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
842                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]843
[583]844                // force scheduling on target thread
845                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]846            }
[1]847        }
[172]848    }
849
[428]850    // release lock protecting process th_tbl[]
[564]851    rwlock_rd_release( &process->th_lock );
[416]852
[593]853    // wait other threads acknowledges  TODO this could be improved...
[409]854    while( 1 )
855    {
[610]856        // exit when all scheduler acknowledges received
[436]857        if ( ack_count == 0 ) break;
[409]858   
859        // wait 1000 cycles before retry
860        hal_fixed_delay( 1000 );
861    }
[1]862
[438]863#if DEBUG_PROCESS_SIGACTION
[433]864cycle = (uint32_t)hal_get_cycles();
[438]865if( DEBUG_PROCESS_SIGACTION < cycle )
[593]866printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
867__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]868#endif
[409]869
[428]870}  // end process_block_threads()
[409]871
[440]872/////////////////////////////////////////////////
873void process_delete_threads( process_t * process,
874                             xptr_t      client_xp )
[409]875{
[440]876    thread_t          * target;        // local pointer on target thread
877    xptr_t              target_xp;     // extended pointer on target thread
878    cxy_t               owner_cxy;     // owner process cluster
[409]879    uint32_t            ltid;          // index in process th_tbl
[440]880    uint32_t            count;         // threads counter
[409]881
[433]882    // get calling thread pointer
[409]883
[440]884    // get target process owner cluster
885    owner_cxy = CXY_FROM_PID( process->pid );
886
[438]887#if DEBUG_PROCESS_SIGACTION
[633]888thread_t * this  = CURRENT_THREAD;
889uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]890if( DEBUG_PROCESS_SIGACTION < cycle )
[625]891printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
892__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]893#endif
894
[564]895// check target process is an user process
[619]896assert( (LPID_FROM_PID( process->pid ) != 0),
897"process %x is not an user process\n", process->pid );
[564]898
[409]899    // get lock protecting process th_tbl[]
[583]900    rwlock_wr_acquire( &process->th_lock );
[409]901
[440]902    // loop on target process local threads                       
[416]903    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]904    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]905    {
[409]906        target = process->th_tbl[ltid];
[1]907
[440]908        if( target != NULL )    // valid thread 
[1]909        {
[416]910            count++;
[440]911            target_xp = XPTR( local_cxy , target );
[1]912
[564]913            // main thread and client thread should not be deleted
[440]914            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
915                (client_xp) != target_xp )                           // not client thread
916            {
917                // mark target thread for delete and block it
[651]918                thread_delete( target_xp , true );                   // forced
[440]919            }
[409]920        }
921    }
[1]922
[428]923    // release lock protecting process th_tbl[]
[583]924    rwlock_wr_release( &process->th_lock );
[407]925
[438]926#if DEBUG_PROCESS_SIGACTION
[433]927cycle = (uint32_t)hal_get_cycles();
[438]928if( DEBUG_PROCESS_SIGACTION < cycle )
[593]929printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
930__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]931#endif
[407]932
[440]933}  // end process_delete_threads()
[409]934
[440]935///////////////////////////////////////////////////
936void process_unblock_threads( process_t * process )
[409]937{
[440]938    thread_t          * target;        // pointer on target thead
[409]939    uint32_t            ltid;          // index in process th_tbl
[440]940    uint32_t            count;         // requests counter
[409]941
[438]942#if DEBUG_PROCESS_SIGACTION
[633]943thread_t * this  = CURRENT_THREAD;
944pid_t      pid   = process->pid;
945uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]946if( DEBUG_PROCESS_SIGACTION < cycle )
[593]947printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]948__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]949#endif
950
[564]951// check target process is an user process
[619]952assert( ( LPID_FROM_PID( process->pid ) != 0 ),
953"process %x is not an user process\n", process->pid );
[564]954
[416]955    // get lock protecting process th_tbl[]
[564]956    rwlock_rd_acquire( &process->th_lock );
[416]957
[440]958    // loop on process threads to unblock all threads
[416]959    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]960    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]961    {
[416]962        target = process->th_tbl[ltid];
[409]963
[440]964        if( target != NULL )             // thread found
[409]965        {
966            count++;
[440]967
968            // reset the global blocked bit in target thread descriptor.
969            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]970        }
971    }
972
[428]973    // release lock protecting process th_tbl[]
[564]974    rwlock_rd_release( &process->th_lock );
[407]975
[438]976#if DEBUG_PROCESS_SIGACTION
[433]977cycle = (uint32_t)hal_get_cycles();
[438]978if( DEBUG_PROCESS_SIGACTION < cycle )
[593]979printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]980__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]981#endif
[1]982
[440]983}  // end process_unblock_threads()
[407]984
[1]985///////////////////////////////////////////////
986process_t * process_get_local_copy( pid_t pid )
987{
988    error_t        error;
[172]989    process_t    * process_ptr;   // local pointer on process
[23]990    xptr_t         process_xp;    // extended pointer on process
[1]991
992    cluster_t * cluster = LOCAL_CLUSTER;
993
[564]994#if DEBUG_PROCESS_GET_LOCAL_COPY
995thread_t * this = CURRENT_THREAD;
996uint32_t cycle = (uint32_t)hal_get_cycles();
997if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]998printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]999__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]1000#endif
1001
[1]1002    // get lock protecting local list of processes
[564]1003    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1004
1005    // scan the local list of process descriptors to find the process
[23]1006    xptr_t  iter;
1007    bool_t  found = false;
1008    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]1009    {
[23]1010        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]1011        process_ptr = GET_PTR( process_xp );
[23]1012        if( process_ptr->pid == pid )
[1]1013        {
1014            found = true;
1015            break;
1016        }
1017    }
1018
1019    // release lock protecting local list of processes
[564]1020    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1021
[172]1022    // allocate memory for a new local process descriptor
[440]1023    // and initialise it from reference cluster if not found
[1]1024    if( !found )
1025    {
1026        // get extended pointer on reference process descriptor
[23]1027        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]1028
[492]1029        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]1030
[1]1031        // allocate memory for local process descriptor
[23]1032        process_ptr = process_alloc();
[443]1033
[23]1034        if( process_ptr == NULL )  return NULL;
[1]1035
1036        // initialize local process descriptor copy
[23]1037        error = process_copy_init( process_ptr , ref_xp );
[443]1038
[1]1039        if( error ) return NULL;
1040    }
1041
[440]1042#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]1043cycle = (uint32_t)hal_get_cycles();
[440]1044if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1045printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]1046__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]1047#endif
1048
[23]1049    return process_ptr;
[1]1050
[409]1051}  // end process_get_local_copy()
1052
[436]1053////////////////////////////////////////////
1054pid_t process_get_ppid( xptr_t  process_xp )
1055{
1056    cxy_t       process_cxy;
1057    process_t * process_ptr;
1058    xptr_t      parent_xp;
1059    cxy_t       parent_cxy;
1060    process_t * parent_ptr;
1061
1062    // get process cluster and local pointer
1063    process_cxy = GET_CXY( process_xp );
1064    process_ptr = GET_PTR( process_xp );
1065
1066    // get pointers on parent process
[564]1067    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]1068    parent_cxy = GET_CXY( parent_xp );
1069    parent_ptr = GET_PTR( parent_xp );
1070
[564]1071    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]1072}
1073
[1]1074//////////////////////////////////////////////////////////////////////////////////////////
1075// File descriptor array related functions
1076//////////////////////////////////////////////////////////////////////////////////////////
1077
1078///////////////////////////////////////////
[662]1079char * process_fd_type_str( uint32_t type )
1080{
1081    switch( type )
1082    {
1083        case INODE_TYPE_FILE : return "FILE";
1084        case INODE_TYPE_DIR  : return "DIR";
1085        case INODE_TYPE_FIFO : return "FIFO";
1086        case INODE_TYPE_PIPE : return "PIPE";
1087        case INODE_TYPE_SOCK : return "SOCK";
1088        case INODE_TYPE_DEV  : return "DEV";
1089        case INODE_TYPE_BLK  : return "BLK";
1090        case INODE_TYPE_SYML : return "SYML";
1091       
1092        default              : return "undefined";
1093    }
1094}
1095   
1096///////////////////////////////////////////
[1]1097void process_fd_init( process_t * process )
1098{
1099    uint32_t fd;
1100
[610]1101    // initialize lock
[564]1102    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]1103
[610]1104    // initialize number of open files
[662]1105    process->fd_array.max = 0;
[23]1106
[1]1107    // initialize array
[23]1108    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1109    {
1110        process->fd_array.array[fd] = XPTR_NULL;
1111    }
1112}
[635]1113
[610]1114////////////////////////////////////////////////////
1115error_t process_fd_register( xptr_t      process_xp,
[407]1116                             xptr_t      file_xp,
1117                             uint32_t  * fdid )
[1]1118{
1119    bool_t    found;
[23]1120    uint32_t  id;
[662]1121    uint32_t  max;             // current value of max non-free slot index
1122    xptr_t    entry_xp;        // current value of one fd_array entry
1123    xptr_t    lock_xp;         // extended pointer on lock protecting fd_array
1124    xptr_t    max_xp;          // extended pointer on max field in fd_array
[1]1125
[657]1126    // get target process cluster and local pointer
[610]1127    process_t * process_ptr = GET_PTR( process_xp );
1128    cxy_t       process_cxy = GET_CXY( process_xp );
[23]1129
[662]1130// check target process is owner process
1131assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
1132"process must be owner process\n" );
[610]1133
1134#if DEBUG_PROCESS_FD_REGISTER
1135thread_t * this  = CURRENT_THREAD;
1136uint32_t   cycle = (uint32_t)hal_get_cycles();
1137pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1138if( DEBUG_PROCESS_FD_REGISTER < cycle )
1139printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1140__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1141#endif
1142
[662]1143    // build extended pointers on lock & max
1144    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1145    max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
[610]1146
[23]1147    // take lock protecting reference fd_array
[610]1148        remote_queuelock_acquire( lock_xp );
[23]1149
[1]1150    found   = false;
1151
[662]1152    // get current value of max_fdid
1153    max = hal_remote_l32( max_xp );
1154
[23]1155    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1156    {
[662]1157        // get fd_array entry
1158        entry_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1159       
1160        if ( entry_xp == XPTR_NULL )
[1]1161        {
[662]1162            // update  fd_array
[610]1163            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
[564]1164
[662]1165            // update max when required
1166            if( id > max ) hal_remote_s32( max_xp , id );
1167
1168            // increase file refcount
1169            vfs_file_count_up( file_xp );
1170
1171            // exit loop
[564]1172                        *fdid = id;
[1]1173            found = true;
1174            break;
1175        }
1176    }
1177
[610]1178    // release lock protecting fd_array
1179        remote_queuelock_release( lock_xp );
[1]1180
[610]1181#if DEBUG_PROCESS_FD_REGISTER
1182cycle = (uint32_t)hal_get_cycles();
1183if( DEBUG_PROCESS_FD_REGISTER < cycle )
1184printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1185__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1186#endif
1187
[428]1188    if ( !found ) return -1;
[1]1189    else          return 0;
1190
[610]1191}  // end process_fd_register()
1192
[657]1193/////////////////////////////////////////////
1194void process_fd_remove( xptr_t    process_xp,
1195                        uint32_t  fdid )
1196{
1197    pid_t       pid;           // target process PID
1198    lpid_t      lpid;          // target process LPID
[662]1199    xptr_t      file_xp;       // extended pointer on file descriptor
[657]1200    xptr_t      iter_xp;       // iterator for list of process copies
1201    xptr_t      copy_xp;       // extended pointer on process copy
1202    process_t * copy_ptr;      // local pointer on process copy 
1203    cxy_t       copy_cxy;      // process copy cluster identifier
1204
1205    // get target process cluster and local pointer
1206    process_t * process_ptr = GET_PTR( process_xp );
1207    cxy_t       process_cxy = GET_CXY( process_xp );
1208
[662]1209// check target process is owner process
1210assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
1211"process must be owner process\n" );
1212
[657]1213    // get target process pid and lpid
1214    pid  = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1215    lpid = LPID_FROM_PID( pid );
1216
1217#if DEBUG_PROCESS_FD_REMOVE
1218uint32_t    cycle = (uint32_t)hal_get_cycles();
1219thread_t  * this  = CURRENT_THREAD;
1220if( DEBUG_PROCESS_FD_REMOVE < cycle )
1221printk("\n[%s] thread[%x,%x] enter for fdid %d in process %x / cycle %d\n",
1222__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1223#endif
1224
[662]1225    // get extended pointer on file descriptor
1226    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1227
[657]1228    // build extended pointers on list_of_copies root and lock (in owner cluster)
1229    xptr_t copies_root_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_root[lpid] );
1230    xptr_t copies_lock_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_lock[lpid] );
1231 
[662]1232    // build extended pointer on fd_array lock and max
1233    xptr_t fd_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1234    xptr_t fd_max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
[657]1235
[662]1236    // take lock protecting fd_array
[657]1237        remote_queuelock_acquire( fd_lock_xp );
1238
1239    // take the lock protecting the list of copies
1240    remote_queuelock_acquire( copies_lock_xp );
1241
[662]1242    // get max value
1243    uint32_t max = hal_remote_l32( fd_max_xp );
1244
[657]1245    // loop on list of process copies
1246    XLIST_FOREACH( copies_root_xp , iter_xp )
1247    {
1248        // get pointers on process copy
1249        copy_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
1250        copy_ptr = GET_PTR( copy_xp );
1251        copy_cxy = GET_CXY( copy_xp );
1252
1253        // release the fd_array entry in process copy
1254        hal_remote_s64( XPTR( copy_cxy , &copy_ptr->fd_array.array[fdid] ), XPTR_NULL );
[662]1255
1256        // decrease file refcount
1257        vfs_file_count_down( file_xp );
[657]1258    }
1259
[662]1260    // update max when required
1261    if( fdid == max ) hal_remote_s32( fd_max_xp , max-1 );
1262
[657]1263    // release the lock protecting reference fd_array
1264        remote_queuelock_release( fd_lock_xp );
1265
1266    // release the lock protecting the list of copies
1267    remote_queuelock_release( copies_lock_xp );
1268
1269#if DEBUG_PROCESS_FD_REMOVE
1270cycle = (uint32_t)hal_get_cycles();
1271if( DEBUG_PROCESS_FD_REMOVE < cycle )
1272printk("\n[%s] thread[%x,%x] exit for fdid %d in process %x / cycle %d\n",
1273__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1274#endif
1275
1276}  // end process_fd_remove()
1277
[662]1278//////////////////////////////////////////////
1279void process_fd_clean_all( xptr_t process_xp )
[1]1280{
[662]1281    uint32_t  id;
1282    xptr_t    file_xp;         // one fd_array entry
1283    xptr_t    lock_xp;         // extendad pointer on lock protecting fd_array
1284    uint32_t  max;             // number of registered files
1285    error_t   error;
1286
1287    // get process cluster, local pointer and PID
1288    process_t * process_ptr = GET_PTR( process_xp );
1289    cxy_t       process_cxy = GET_CXY( process_xp );
1290    pid_t       pid         = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1291
1292// check target process is owner process
1293assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) ),
1294"process must be owner process\n" );
1295
1296#if DEBUG_PROCESS_FD_CLEAN_ALL
1297thread_t * this  = CURRENT_THREAD;
1298uint32_t   cycle = (uint32_t)hal_get_cycles();
1299if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1300printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1301__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1302
1303process_fd_display( process_xp );
1304#endif
1305
1306    // build extended pointer on lock protecting the fd_array
1307    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1308
1309    // get max index for fd_array
1310    max = hal_remote_l32( XPTR( process_cxy , &process_ptr->fd_array.max ));
1311
1312    // take lock protecting fd_array
1313        remote_queuelock_acquire( lock_xp );
1314
1315    for ( id = 0; id <= max ; id++ )
1316    {
1317        // get fd_array entry
1318        file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1319       
1320        if ( file_xp != XPTR_NULL )
1321        {
1322            // close the file or socket
1323            error = sys_close( id );
1324
1325            if( error )
1326            printk("/n[ERROR] in %s : cannot close the file %d for process %x\n",
1327            __FUNCTION__, id, pid );
1328        }
1329    }
1330
1331    // release lock protecting fd_array
1332        remote_queuelock_release( lock_xp );
1333
1334#if DEBUG_PROCESS_FD_CLEAN_ALL
1335cycle = (uint32_t)hal_get_cycles();
1336if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1337printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1338__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1339#endif
1340
1341}  // end process_fd_clean_all()
1342
1343//////////////////////////////////////////////////////////////
1344xptr_t process_fd_get_xptr_from_owner( xptr_t      process_xp,
1345                                       uint32_t    fdid )
1346{
1347    cxy_t       process_cxy = GET_CXY( process_xp );
1348    process_t * process_ptr = GET_PTR( process_xp );
1349
1350assert( (hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) == process_xp),
1351"process_xp argument must be the owner process" );
1352
1353    // access owner process fd_array
1354    return hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1355
1356}  // end process_fd_get_xptr_from_owner()
1357
1358///////////////////////////////////////////////////////////
1359xptr_t process_fd_get_xptr_from_local( process_t * process,
1360                                       uint32_t    fdid )
1361{
[23]1362    xptr_t  file_xp;
[564]1363    xptr_t  lock_xp;
[1]1364
[23]1365    // access local copy of process descriptor
[407]1366    file_xp = process->fd_array.array[fdid];
[1]1367
[23]1368    if( file_xp == XPTR_NULL )
1369    {
[662]1370        // get owner process cluster and local pointer
1371        xptr_t      owner_xp  = process->owner_xp;
1372        cxy_t       owner_cxy = GET_CXY( owner_xp );
1373        process_t * owner_ptr = GET_PTR( owner_xp );
[1]1374
[662]1375        // build extended pointer on lock protecting fd_array
1376        lock_xp = XPTR( owner_cxy , &owner_ptr->fd_array.lock );
[564]1377
[662]1378        // take lock protecting fd_array
[564]1379            remote_queuelock_acquire( lock_xp );
1380
[23]1381        // access reference process descriptor
[662]1382        file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[fdid] ) );
[1]1383
[662]1384        if( file_xp != XPTR_NULL ) 
1385        {
1386           // update local fd_array
1387            process->fd_array.array[fdid] = file_xp;
[564]1388       
[662]1389            // increase file refcount
1390            vfs_file_count_up( file_xp );
1391        }
1392
1393        // release lock protecting fd_array
[564]1394            remote_queuelock_release( lock_xp );
[23]1395    }
[1]1396
[23]1397    return file_xp;
[1]1398
[662]1399}  // end process_fd_get_xptr_from_local()
[407]1400
[1]1401///////////////////////////////////////////
1402void process_fd_remote_copy( xptr_t dst_xp,
1403                             xptr_t src_xp )
1404{
1405    uint32_t fd;
1406    xptr_t   entry;
1407
1408    // get cluster and local pointer for src fd_array
1409    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1410    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1411
1412    // get cluster and local pointer for dst fd_array
1413    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1414    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1415
1416    // get the remote lock protecting the src fd_array
[564]1417        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1418
[428]1419    // loop on all fd_array entries
1420    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1421        {
[564]1422                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1423
1424                if( entry != XPTR_NULL )
1425                {
[459]1426            // increment file descriptor refcount
[1]1427            vfs_file_count_up( entry );
1428
1429                        // copy entry in destination process fd_array
[564]1430                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1431                }
1432        }
1433
1434    // release lock on source process fd_array
[564]1435        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1436
[407]1437}  // end process_fd_remote_copy()
1438
[564]1439
1440////////////////////////////////////
1441bool_t process_fd_array_full( void )
1442{
[662]1443    // get extended pointer on owner process
1444    xptr_t owner_xp = CURRENT_THREAD->process->owner_xp;
[564]1445
[662]1446    // get owner process cluster and local pointer
1447    process_t * owner_ptr = GET_PTR( owner_xp );
1448    cxy_t       owner_cxy = GET_CXY( owner_xp );
[564]1449
[662]1450    // get number of open file descriptors from  fd_array
1451    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
[564]1452
[662]1453        return ( max == CONFIG_PROCESS_FILE_MAX_NR - 1 );
[564]1454}
1455
[662]1456////////////////////////////////////////////
1457void process_fd_display( xptr_t process_xp )
1458{
1459    uint32_t      fdid;
1460    xptr_t        file_xp;
1461    vfs_file_t *  file_ptr;
1462    cxy_t         file_cxy;
1463    uint32_t      file_type;
1464    xptr_t        inode_xp;
1465    vfs_inode_t * inode_ptr;
[564]1466
[662]1467    char          name[CONFIG_VFS_MAX_NAME_LENGTH];
1468
1469    // get process cluster and local pointer
1470    process_t * process_ptr = GET_PTR( process_xp );
1471    cxy_t       process_cxy = GET_CXY( process_xp );
1472
1473    // get process PID
1474    pid_t  pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ));
1475
1476    // get pointers on owner process descriptor
1477    xptr_t      owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ));
1478    process_t * owner_ptr = GET_PTR( owner_xp );
1479    cxy_t       owner_cxy = GET_CXY( owner_xp );
1480
1481    // get max fdid from owner process descriptor
1482    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1483
1484    printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n",
1485    pid, process_cxy, max );
1486
1487    for( fdid = 0 ; fdid <= max ; fdid++ )
1488    {
1489        // get pointers on file descriptor
1490        file_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1491        file_ptr = GET_PTR( file_xp );
1492        file_cxy = GET_CXY( file_xp );
1493
1494        if( file_xp != XPTR_NULL )
1495        {
1496            // get file type
1497            file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type )); 
1498
1499            // get file name for a true file
1500            if( file_type == INODE_TYPE_FILE )
1501            {
1502                // get inode pointers
1503                inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ));
1504                inode_xp  = XPTR( file_cxy , inode_ptr );
1505
1506                // get file name
1507                vfs_inode_get_name( inode_xp , name );
1508
1509                // display relevant file decriptor info
1510                printk(" - %d : type %s (%s)\n",
1511                fdid , process_fd_type_str(file_type), name );
1512            }
1513            else
1514            {
1515                // display relevant file decriptor info
1516                printk(" - %d : type %s\n",
1517                fdid , process_fd_type_str(file_type) );
1518            }
1519        }
1520        else
1521        {
1522            // display relevant file decriptor info
1523            printk(" - %d : empty slot\n",
1524            fdid );
1525        }
1526    }
1527}   // end process_fd_display()
1528
[1]1529////////////////////////////////////////////////////////////////////////////////////
1530//  Thread related functions
1531////////////////////////////////////////////////////////////////////////////////////
1532
1533/////////////////////////////////////////////////////
1534error_t process_register_thread( process_t * process,
1535                                 thread_t  * thread,
1536                                 trdid_t   * trdid )
1537{
[472]1538    ltid_t         ltid;
1539    bool_t         found = false;
1540 
[564]1541// check arguments
1542assert( (process != NULL) , "process argument is NULL" );
1543assert( (thread != NULL) , "thread argument is NULL" );
[1]1544
[564]1545    // get the lock protecting th_tbl for all threads
1546    // but the idle thread executing kernel_init (cannot yield)
1547    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1548
[583]1549    // scan th_tbl
[564]1550    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1551    {
1552        if( process->th_tbl[ltid] == NULL )
1553        {
1554            found = true;
1555            break;
1556        }
1557    }
1558
1559    if( found )
1560    {
1561        // register thread in th_tbl[]
1562        process->th_tbl[ltid] = thread;
1563        process->th_nr++;
1564
1565        // returns trdid
1566        *trdid = TRDID( local_cxy , ltid );
1567    }
1568
[583]1569    // release the lock protecting th_tbl
[564]1570    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1571
[564]1572    return (found) ? 0 : 0xFFFFFFFF;
[204]1573
1574}  // end process_register_thread()
1575
[625]1576///////////////////////////////////////////////////
1577uint32_t process_remove_thread( thread_t * thread )
[1]1578{
[443]1579    uint32_t count;  // number of threads in local process descriptor
1580
[625]1581// check thread
1582assert( (thread != NULL) , "thread argument is NULL" );
1583
[1]1584    process_t * process = thread->process;
1585
1586    // get thread local index
1587    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1588   
1589    // get the lock protecting th_tbl[]
1590    rwlock_wr_acquire( &process->th_lock );
[428]1591
[583]1592    // get number of threads
[443]1593    count = process->th_nr;
[428]1594
[564]1595// check th_nr value
[624]1596assert( (count > 0) , "process th_nr cannot be 0" );
[443]1597
[1]1598    // remove thread from th_tbl[]
1599    process->th_tbl[ltid] = NULL;
[450]1600    process->th_nr = count-1;
[1]1601
[583]1602    // release lock protecting th_tbl
[564]1603    rwlock_wr_release( &process->th_lock );
[428]1604
[625]1605    return count;
[443]1606
[450]1607}  // end process_remove_thread()
[204]1608
[408]1609/////////////////////////////////////////////////////////
1610error_t process_make_fork( xptr_t      parent_process_xp,
1611                           xptr_t      parent_thread_xp,
1612                           pid_t     * child_pid,
1613                           thread_t ** child_thread )
[1]1614{
[408]1615    process_t * process;         // local pointer on child process descriptor
1616    thread_t  * thread;          // local pointer on child thread descriptor
1617    pid_t       new_pid;         // process identifier for child process
1618    pid_t       parent_pid;      // process identifier for parent process
1619    xptr_t      ref_xp;          // extended pointer on reference process
[428]1620    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1621    error_t     error;
[1]1622
[408]1623    // get cluster and local pointer for parent process
1624    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1625    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1626
[428]1627    // get parent process PID and extended pointer on .elf file
[564]1628    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1629    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1630
[564]1631    // get extended pointer on reference process
1632    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1633
[564]1634// check parent process is the reference process
1635assert( (parent_process_xp == ref_xp ) ,
[624]1636"parent process must be the reference process" );
[407]1637
[438]1638#if DEBUG_PROCESS_MAKE_FORK
[635]1639uint32_t   cycle;
[583]1640thread_t * this  = CURRENT_THREAD;
1641trdid_t    trdid = this->trdid;
1642pid_t      pid   = this->process->pid;
[635]1643#endif
1644
1645#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1646cycle   = (uint32_t)hal_get_cycles();
[438]1647if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1648printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1649__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1650#endif
[172]1651
[408]1652    // allocate a process descriptor
1653    process = process_alloc();
[635]1654
[408]1655    if( process == NULL )
1656    {
1657        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1658        __FUNCTION__, local_cxy ); 
1659        return -1;
1660    }
[1]1661
[408]1662    // allocate a child PID from local cluster
[416]1663    error = cluster_pid_alloc( process , &new_pid );
[428]1664    if( error ) 
[1]1665    {
[408]1666        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1667        __FUNCTION__, local_cxy ); 
1668        process_free( process );
1669        return -1;
[1]1670    }
[408]1671
[469]1672#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1673cycle = (uint32_t)hal_get_cycles();
1674if( DEBUG_PROCESS_MAKE_FORK < cycle )
[625]1675printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
[583]1676__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1677#endif
1678
[408]1679    // initializes child process descriptor from parent process descriptor
[625]1680    error = process_reference_init( process,
1681                                    new_pid,
1682                                    parent_process_xp );
1683    if( error ) 
1684    {
1685        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1686        __FUNCTION__, local_cxy ); 
1687        process_free( process );
1688        return -1;
1689    }
[408]1690
[438]1691#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1692cycle = (uint32_t)hal_get_cycles();
[438]1693if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1694printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1695__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1696#endif
[408]1697
1698    // copy VMM from parent descriptor to child descriptor
1699    error = vmm_fork_copy( process,
1700                           parent_process_xp );
1701    if( error )
[101]1702    {
[408]1703        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1704        __FUNCTION__, local_cxy ); 
1705        process_free( process );
1706        cluster_pid_release( new_pid );
1707        return -1;
[101]1708    }
[172]1709
[438]1710#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1711cycle = (uint32_t)hal_get_cycles();
[438]1712if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1713printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1714__FUNCTION__, pid, trdid, cycle );
[635]1715hal_vmm_display( XPTR( local_cxy , process ) , true );
[433]1716#endif
[407]1717
[564]1718    // if parent_process is INIT, or if parent_process is the TXT owner,
1719    // the child_process becomes the owner of its TXT terminal
1720    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1721    {
1722        process_txt_set_ownership( XPTR( local_cxy , process ) );
1723
1724#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1725cycle = (uint32_t)hal_get_cycles();
[626]1726if( DEBUG_PROCESS_MAKE_FORK < cycle )
[635]1727printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n",
1728__FUNCTION__ , pid, trdid, new_pid, cycle );
[457]1729#endif
1730
1731    }
1732
[428]1733    // update extended pointer on .elf file
1734    process->vfs_bin_xp = vfs_bin_xp;
1735
[408]1736    // create child thread descriptor from parent thread descriptor
1737    error = thread_user_fork( parent_thread_xp,
1738                              process,
1739                              &thread );
1740    if( error )
1741    {
1742        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1743        __FUNCTION__, local_cxy ); 
1744        process_free( process );
1745        cluster_pid_release( new_pid );
1746        return -1;
1747    }
[172]1748
[564]1749// check main thread LTID
1750assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
[624]1751"main thread must have LTID == 0" );
[428]1752
[564]1753#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1754cycle = (uint32_t)hal_get_cycles();
[438]1755if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1756printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1757__FUNCTION__, pid, trdid, thread, cycle );
[433]1758#endif
[1]1759
[635]1760    // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM
[629]1761    // this includes all parent process copies in all clusters
[408]1762    if( parent_process_cxy == local_cxy )   // reference is local
1763    {
1764        vmm_set_cow( parent_process_ptr );
1765    }
1766    else                                    // reference is remote
1767    {
1768        rpc_vmm_set_cow_client( parent_process_cxy,
1769                                parent_process_ptr );
1770    }
[1]1771
[625]1772    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
[433]1773    vmm_set_cow( process );
1774 
[438]1775#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1776cycle = (uint32_t)hal_get_cycles();
[438]1777if( DEBUG_PROCESS_MAKE_FORK < cycle )
[635]1778printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n",
[583]1779__FUNCTION__, pid, trdid, cycle );
[433]1780#endif
[101]1781
[428]1782    // get extended pointers on parent children_root, children_lock and children_nr
1783    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1784    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1785    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1786
[428]1787    // register process in parent children list
[564]1788    remote_queuelock_acquire( children_lock_xp );
[428]1789        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1790        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1791    remote_queuelock_release( children_lock_xp );
[204]1792
[408]1793    // return success
1794    *child_thread = thread;
1795    *child_pid    = new_pid;
[1]1796
[438]1797#if DEBUG_PROCESS_MAKE_FORK
[433]1798cycle = (uint32_t)hal_get_cycles();
[438]1799if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1800printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1801__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1802#endif
[428]1803
[408]1804    return 0;
1805
[416]1806}   // end process_make_fork()
[408]1807
1808/////////////////////////////////////////////////////
1809error_t process_make_exec( exec_info_t  * exec_info )
1810{
[457]1811    thread_t       * thread;                  // local pointer on this thread
1812    process_t      * process;                 // local pointer on this process
1813    pid_t            pid;                     // this process identifier
[610]1814    xptr_t           ref_xp;                  // reference process for this process
[441]1815        error_t          error;                   // value returned by called functions
[457]1816    char           * path;                    // path to .elf file
1817    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1818    uint32_t         file_id;                 // file index in fd_array
1819    uint32_t         args_nr;                 // number of main thread arguments
1820    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1821
[625]1822    // get calling thread, process, pid and ref_xp
[457]1823    thread  = CURRENT_THREAD;
1824    process = thread->process;
1825    pid     = process->pid;
[610]1826    ref_xp  = process->ref_xp;
[408]1827
[457]1828        // get relevant infos from exec_info
1829        path          = exec_info->path;
1830    args_nr       = exec_info->args_nr;
1831    args_pointers = exec_info->args_pointers;
[408]1832
[438]1833#if DEBUG_PROCESS_MAKE_EXEC
[433]1834uint32_t cycle = (uint32_t)hal_get_cycles();
[635]1835if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1836printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1837__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1838#endif
[408]1839
[457]1840    // open the file identified by <path>
1841    file_xp = XPTR_NULL;
[564]1842    file_id = 0xFFFFFFFF;
[610]1843        error   = vfs_open( process->vfs_root_xp,
[457]1844                            path,
[610]1845                        ref_xp,
[457]1846                            O_RDONLY,
1847                            0,
1848                            &file_xp,
1849                            &file_id );
1850        if( error )
1851        {
1852                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1853                return -1;
1854        }
1855
[446]1856#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1857cycle = (uint32_t)hal_get_cycles();
[635]1858if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1859printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1860__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1861#endif
1862
[457]1863    // delete all threads other than this main thread in all clusters
1864    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1865
[469]1866#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1867cycle = (uint32_t)hal_get_cycles();
[635]1868if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[625]1869printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n",
[583]1870__FUNCTION__, pid, thread->trdid, cycle );
[469]1871#endif
1872
[625]1873    // reset calling process VMM
1874    vmm_user_reset( process );
[446]1875
[457]1876#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1877cycle = (uint32_t)hal_get_cycles();
[635]1878if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[625]1879printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n",
[583]1880__FUNCTION__, pid, thread->trdid, cycle );
[457]1881#endif
[408]1882
[625]1883    // re-initialize the VMM (args/envs vsegs registration)
1884    error = vmm_user_init( process );
[457]1885    if( error )
[416]1886    {
[457]1887        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1888        vfs_close( file_xp , file_id );
[623]1889        // FIXME restore old process VMM [AG]
[416]1890        return -1;
1891    }
[457]1892   
[438]1893#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1894cycle = (uint32_t)hal_get_cycles();
[635]1895if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[625]1896printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n",
[583]1897__FUNCTION__, pid, thread->trdid, cycle );
[433]1898#endif
[428]1899
[457]1900    // register code & data vsegs as well as entry-point in process VMM,
[428]1901    // and register extended pointer on .elf file in process descriptor
[457]1902        error = elf_load_process( file_xp , process );
[441]1903    if( error )
[1]1904        {
[441]1905                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1906        vfs_close( file_xp , file_id );
[623]1907        // FIXME restore old process VMM [AG]
[408]1908        return -1;
[1]1909        }
1910
[438]1911#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1912cycle = (uint32_t)hal_get_cycles();
[635]1913if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[625]1914printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n",
[583]1915__FUNCTION__, pid, thread->trdid, cycle );
[433]1916#endif
[1]1917
[457]1918    // update the existing main thread descriptor... and jump to user code
1919    error = thread_user_exec( (void *)process->vmm.entry_point,
1920                              args_nr,
1921                              args_pointers );
1922    if( error )
1923    {
[469]1924        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1925        vfs_close( file_xp , file_id );
1926        // FIXME restore old process VMM
[408]1927        return -1;
[457]1928    }
[1]1929
[492]1930    assert( false, "we should not execute this code");
[457]1931 
[409]1932        return 0;
1933
1934}  // end process_make_exec()
1935
[457]1936
[623]1937////////////////////////////////////////////////
1938void process_zero_create( process_t   * process,
1939                          boot_info_t * info )
[428]1940{
[580]1941    error_t error;
1942    pid_t   pid;
[428]1943
[438]1944#if DEBUG_PROCESS_ZERO_CREATE
[433]1945uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1946if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1947printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1948__FUNCTION__, local_cxy, cycle );
[433]1949#endif
[428]1950
[624]1951    // get pointer on VMM
1952    vmm_t * vmm = &process->vmm;
1953
[580]1954    // get PID from local cluster manager for this kernel process
1955    error = cluster_pid_alloc( process , &pid );
1956
1957    if( error || (LPID_FROM_PID( pid ) != 0) )
1958    {
1959        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1960        __FUNCTION__ , local_cxy, pid );
1961        hal_core_sleep();
1962    }
1963
[635]1964#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1965if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1966printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy );
1967#endif
1968
[428]1969    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1970    // the kernel process_zero is its own parent_process,
1971    // reference_process, and owner_process, and cannot be killed...
1972    process->pid        = pid;
[433]1973    process->ref_xp     = XPTR( local_cxy , process );
[443]1974    process->owner_xp   = XPTR( local_cxy , process );
[580]1975    process->parent_xp  = XPTR( local_cxy , process );
[433]1976    process->term_state = 0;
[428]1977
[635]1978    // initialize VSL as empty
[624]1979    vmm->vsegs_nr = 0;
1980        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[623]1981
[635]1982#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1983if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1984printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy );
1985#endif
1986
1987    // initialize GPT as empty
[624]1988    error = hal_gpt_create( &vmm->gpt );
[635]1989
[624]1990    if( error ) 
1991    {
1992        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
1993        hal_core_sleep();
1994    }
1995
[635]1996#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1997if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1998printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy );
1999#endif
2000
[625]2001    // initialize VSL and GPT locks
[629]2002    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
[624]2003   
2004    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
2005    error = hal_vmm_kernel_init( info );
[635]2006
[624]2007    if( error ) 
2008    {
2009        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
2010        hal_core_sleep();
2011    }
2012
[635]2013#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2014if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2015printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
2016#endif
2017
[564]2018    // reset th_tbl[] array and associated fields
[428]2019    uint32_t i;
[564]2020    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]2021        {
2022        process->th_tbl[i] = NULL;
2023    }
2024    process->th_nr  = 0;
[564]2025    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]2026
[635]2027#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2028if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2029printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy );
2030#endif
[564]2031
[428]2032    // reset children list as empty
2033    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
2034    process->children_nr = 0;
[564]2035    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
2036                           LOCK_PROCESS_CHILDREN );
[428]2037
[635]2038#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2039if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2040printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy );
2041#endif
2042
[580]2043    // register kernel process in cluster manager local_list
2044    cluster_process_local_link( process );
2045   
[428]2046        hal_fence();
2047
[438]2048#if DEBUG_PROCESS_ZERO_CREATE
[433]2049cycle = (uint32_t)hal_get_cycles();
[438]2050if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]2051printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]2052__FUNCTION__, local_cxy, cycle );
[433]2053#endif
[428]2054
[610]2055}  // end process_zero_create()
[428]2056
[564]2057////////////////////////////////
[485]2058void process_init_create( void )
[1]2059{
[428]2060    process_t      * process;       // local pointer on process descriptor
[409]2061    pid_t            pid;           // process_init identifier
2062    thread_t       * thread;        // local pointer on main thread
2063    pthread_attr_t   attr;          // main thread attributes
2064    lid_t            lid;           // selected core local index for main thread
[457]2065    xptr_t           file_xp;       // extended pointer on .elf file descriptor
2066    uint32_t         file_id;       // file index in fd_array
[409]2067    error_t          error;
[1]2068
[438]2069#if DEBUG_PROCESS_INIT_CREATE
[610]2070thread_t * this = CURRENT_THREAD;
[433]2071uint32_t cycle = (uint32_t)hal_get_cycles();
[438]2072if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2073printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
2074__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]2075#endif
[1]2076
[408]2077    // allocates memory for process descriptor from local cluster
2078        process = process_alloc(); 
[625]2079    if( process == NULL )
2080    {
2081        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
2082        hal_core_sleep();
2083    }
[101]2084
[610]2085    // set the CWD and VFS_ROOT fields in process descriptor
2086    process->cwd_xp      = process_zero.vfs_root_xp;
2087    process->vfs_root_xp = process_zero.vfs_root_xp;
2088
[409]2089    // get PID from local cluster
[416]2090    error = cluster_pid_alloc( process , &pid );
[625]2091    if( error ) 
2092    {
2093        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
2094        hal_core_sleep();
2095    }
2096    if( pid != 1 ) 
2097    {
2098        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
2099        hal_core_sleep();
2100    }
[408]2101
[409]2102    // initialize process descriptor / parent is local process_zero
[625]2103    error = process_reference_init( process,
2104                                    pid,
2105                                    XPTR( local_cxy , &process_zero ) ); 
2106    if( error )
2107    {
2108        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
2109        hal_core_sleep();
2110    }
[408]2111
[564]2112#if(DEBUG_PROCESS_INIT_CREATE & 1)
2113if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2114printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
2115__FUNCTION__, this->process->pid, this->trdid );
[564]2116#endif
2117
[457]2118    // open the file identified by CONFIG_PROCESS_INIT_PATH
2119    file_xp = XPTR_NULL;
2120    file_id = -1;
[610]2121        error   = vfs_open( process->vfs_root_xp,
[457]2122                            CONFIG_PROCESS_INIT_PATH,
[610]2123                        XPTR( local_cxy , process ),
[457]2124                            O_RDONLY,
2125                            0,
2126                            &file_xp,
2127                            &file_id );
[625]2128    if( error )
2129    {
2130        printk("\n[PANIC] in %s : cannot open file <%s>\n",
2131         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2132        hal_core_sleep();
2133    }
[457]2134
[564]2135#if(DEBUG_PROCESS_INIT_CREATE & 1)
2136if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2137printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
2138__FUNCTION__, this->process->pid, this->trdid );
[564]2139#endif
2140
[625]2141    // register "code" and "data" vsegs as well as entry-point
[409]2142    // in process VMM, using information contained in the elf file.
[457]2143        error = elf_load_process( file_xp , process );
[101]2144
[625]2145    if( error ) 
2146    {
2147        printk("\n[PANIC] in %s : cannot access file <%s>\n",
2148         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2149        hal_core_sleep();
2150    }
[457]2151
[625]2152
[564]2153#if(DEBUG_PROCESS_INIT_CREATE & 1)
2154if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2155printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
2156__FUNCTION__, this->process->pid, this->trdid );
[564]2157#endif
2158
[625]2159#if (DEBUG_PROCESS_INIT_CREATE & 1)
[635]2160hal_vmm_display( XPTR( local_cxy , process ) , true );
[625]2161#endif
2162
[428]2163    // get extended pointers on process_zero children_root, children_lock
2164    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
2165    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
2166
[564]2167    // take lock protecting kernel process children list
2168    remote_queuelock_acquire( children_lock_xp );
2169
[428]2170    // register process INIT in parent local process_zero
2171        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
2172        hal_atomic_add( &process_zero.children_nr , 1 );
2173
[564]2174    // release lock protecting kernel process children list
2175    remote_queuelock_release( children_lock_xp );
2176
2177#if(DEBUG_PROCESS_INIT_CREATE & 1)
2178if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2179printk("\n[%s] thread[%x,%x] registered init process in parent\n",
2180__FUNCTION__, this->process->pid, this->trdid );
[564]2181#endif
2182
[409]2183    // select a core in local cluster to execute the main thread
[637]2184    lid  = cluster_select_local_core( local_cxy );
[409]2185
2186    // initialize pthread attributes for main thread
2187    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
2188    attr.cxy        = local_cxy;
2189    attr.lid        = lid;
2190
2191    // create and initialize thread descriptor
2192        error = thread_user_create( pid,
2193                                (void *)process->vmm.entry_point,
2194                                NULL,
2195                                &attr,
2196                                &thread );
[1]2197
[625]2198    if( error )
2199    {
2200        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
2201        hal_core_sleep();
2202    }
2203    if( thread->trdid != 0 )
2204    {
2205        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
2206        hal_core_sleep();
2207    }
[428]2208
[564]2209#if(DEBUG_PROCESS_INIT_CREATE & 1)
2210if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2211printk("\n[%s] thread[%x,%x] created main thread\n",
2212__FUNCTION__, this->process->pid, this->trdid );
[564]2213#endif
2214
[409]2215    // activate thread
2216        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
2217
[124]2218    hal_fence();
[1]2219
[438]2220#if DEBUG_PROCESS_INIT_CREATE
[433]2221cycle = (uint32_t)hal_get_cycles();
[438]2222if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]2223printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
2224__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]2225#endif
[409]2226
[204]2227}  // end process_init_create()
2228
[428]2229/////////////////////////////////////////
2230void process_display( xptr_t process_xp )
2231{
2232    process_t   * process_ptr;
2233    cxy_t         process_cxy;
[443]2234
[428]2235    xptr_t        parent_xp;       // extended pointer on parent process
2236    process_t   * parent_ptr;
2237    cxy_t         parent_cxy;
2238
[443]2239    xptr_t        owner_xp;        // extended pointer on owner process
2240    process_t   * owner_ptr;
2241    cxy_t         owner_cxy;
2242
[428]2243    pid_t         pid;
2244    pid_t         ppid;
[580]2245    lpid_t        lpid;
[428]2246    uint32_t      state;
2247    uint32_t      th_nr;
2248
[443]2249    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
2250    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
2251    chdev_t     * txt_chdev_ptr;
2252    cxy_t         txt_chdev_cxy;
2253    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]2254
2255    xptr_t        elf_file_xp;     // extended pointer on .elf file
2256    cxy_t         elf_file_cxy;
2257    vfs_file_t  * elf_file_ptr;
2258    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
2259
2260    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
2261    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
2262
2263    // get cluster and local pointer on process
2264    process_ptr = GET_PTR( process_xp );
2265    process_cxy = GET_CXY( process_xp );
2266
[580]2267    // get process PID, LPID, and state
[564]2268    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]2269    lpid  = LPID_FROM_PID( pid );
[564]2270    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]2271
[580]2272    // get process PPID
[564]2273    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]2274    parent_cxy = GET_CXY( parent_xp );
2275    parent_ptr = GET_PTR( parent_xp );
[564]2276    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]2277
2278    // get number of threads
[564]2279    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]2280
[443]2281    // get pointers on owner process descriptor
[564]2282    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]2283    owner_cxy = GET_CXY( owner_xp );
2284    owner_ptr = GET_PTR( owner_xp );
[428]2285
[580]2286    // get process TXT name and .elf name
2287    if( lpid )                                   // user process
2288    {
[443]2289
[580]2290        // get extended pointer on file descriptor associated to TXT_RX
2291        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]2292
[580]2293        assert( (txt_file_xp != XPTR_NULL) ,
[624]2294        "process must be attached to one TXT terminal" ); 
[443]2295
[580]2296        // get TXT_RX chdev pointers
2297        txt_chdev_xp  = chdev_from_file( txt_file_xp );
2298        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
2299        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
2300
2301        // get TXT_RX name and ownership
2302        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
2303                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]2304   
[580]2305        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
2306                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]2307
[580]2308        // get process .elf name
2309        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2310        elf_file_cxy  = GET_CXY( elf_file_xp );
2311        elf_file_ptr  = GET_PTR( elf_file_xp );
2312        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2313        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2314    }
2315    else                                         // kernel process_zero
2316    {
2317        // TXT name and .elf name are not registered in kernel process_zero
2318        strcpy( txt_name , "txt0_rx" );
2319        txt_owner_xp = process_xp; 
2320        strcpy( elf_name , "kernel.elf" );
2321    }
2322
[428]2323    // display process info
[443]2324    if( txt_owner_xp == process_xp )
[428]2325    {
[581]2326        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2327        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2328    }
2329    else
2330    {
[581]2331        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2332        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2333    }
2334}  // end process_display()
2335
2336
2337////////////////////////////////////////////////////////////////////////////////////////
2338//     Terminals related functions
2339////////////////////////////////////////////////////////////////////////////////////////
2340
[581]2341//////////////////////////////////
[485]2342uint32_t process_txt_alloc( void )
[428]2343{
2344    uint32_t  index;       // TXT terminal index
2345    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2346    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2347    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2348    xptr_t    root_xp;     // extended pointer on owner field in chdev
2349
2350    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2351    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2352    {
2353        // get pointers on TXT_RX[index]
2354        chdev_xp  = chdev_dir.txt_rx[index];
2355        chdev_cxy = GET_CXY( chdev_xp );
2356        chdev_ptr = GET_PTR( chdev_xp );
2357
2358        // get extended pointer on root of attached process
2359        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2360
2361        // return free TXT index if found
2362        if( xlist_is_empty( root_xp ) ) return index; 
2363    }
2364
[492]2365    assert( false , "no free TXT terminal found" );
[428]2366
2367    return -1;
2368
2369} // end process_txt_alloc()
2370
2371/////////////////////////////////////////////
2372void process_txt_attach( process_t * process,
2373                         uint32_t    txt_id )
2374{
2375    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2376    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2377    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2378    xptr_t      root_xp;      // extended pointer on list root in chdev
2379    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2380
[564]2381// check process is in owner cluster
2382assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
2383"process descriptor not in owner cluster" );
[428]2384
[564]2385// check terminal index
2386assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2387"illegal TXT terminal index" );
[428]2388
2389    // get pointers on TXT_RX[txt_id] chdev
2390    chdev_xp  = chdev_dir.txt_rx[txt_id];
2391    chdev_cxy = GET_CXY( chdev_xp );
2392    chdev_ptr = GET_PTR( chdev_xp );
2393
2394    // get extended pointer on root & lock of attached process list
2395    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2396    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2397
[564]2398    // get lock protecting list of processes attached to TXT
2399    remote_busylock_acquire( lock_xp );
2400
[428]2401    // insert process in attached process list
2402    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
2403
[564]2404    // release lock protecting list of processes attached to TXT
2405    remote_busylock_release( lock_xp );
2406
[446]2407#if DEBUG_PROCESS_TXT
[610]2408thread_t * this = CURRENT_THREAD;
[457]2409uint32_t cycle = (uint32_t)hal_get_cycles();
[446]2410if( DEBUG_PROCESS_TXT < cycle )
[610]2411printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2412__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]2413#endif
[428]2414
2415} // end process_txt_attach()
2416
[436]2417/////////////////////////////////////////////
2418void process_txt_detach( xptr_t  process_xp )
[428]2419{
[436]2420    process_t * process_ptr;  // local pointer on process in owner cluster
2421    cxy_t       process_cxy;  // process owner cluster
2422    pid_t       process_pid;  // process identifier
2423    xptr_t      file_xp;      // extended pointer on stdin file
[428]2424    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2425    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2426    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2427    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2428
[436]2429    // get process cluster, local pointer, and PID
2430    process_cxy = GET_CXY( process_xp );
2431    process_ptr = GET_PTR( process_xp );
[564]2432    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2433
[564]2434// check process descriptor in owner cluster
2435assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2436"process descriptor not in owner cluster" );
[436]2437
2438    // release TXT ownership (does nothing if not TXT owner)
2439    process_txt_transfer_ownership( process_xp );
[428]2440
[625]2441    // get extended pointer on process stdin pseudo file
[564]2442    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]2443
2444    // get pointers on TXT_RX chdev
2445    chdev_xp  = chdev_from_file( file_xp );
[428]2446    chdev_cxy = GET_CXY( chdev_xp );
2447    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2448
[436]2449    // get extended pointer on lock protecting attached process list
[428]2450    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2451
[564]2452    // get lock protecting list of processes attached to TXT
2453    remote_busylock_acquire( lock_xp );
2454
[428]2455    // unlink process from attached process list
[436]2456    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2457
[564]2458    // release lock protecting list of processes attached to TXT
2459    remote_busylock_release( lock_xp );
2460
[446]2461#if DEBUG_PROCESS_TXT
[610]2462thread_t * this = CURRENT_THREAD;
[457]2463uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2464uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]2465if( DEBUG_PROCESS_TXT < cycle )
[625]2466printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
[610]2467__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]2468#endif
[428]2469
2470} // end process_txt_detach()
2471
2472///////////////////////////////////////////////////
2473void process_txt_set_ownership( xptr_t process_xp )
2474{
2475    process_t * process_ptr;
2476    cxy_t       process_cxy;
[436]2477    pid_t       process_pid;
[428]2478    xptr_t      file_xp;
2479    xptr_t      txt_xp;     
2480    chdev_t   * txt_ptr;
2481    cxy_t       txt_cxy;
2482
[436]2483    // get pointers on process in owner cluster
[428]2484    process_cxy = GET_CXY( process_xp );
[435]2485    process_ptr = GET_PTR( process_xp );
[564]2486    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2487
2488    // check owner cluster
[492]2489    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2490    "process descriptor not in owner cluster" );
[436]2491
[428]2492    // get extended pointer on stdin pseudo file
[564]2493    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2494
2495    // get pointers on TXT chdev
2496    txt_xp  = chdev_from_file( file_xp );
2497    txt_cxy = GET_CXY( txt_xp );
[435]2498    txt_ptr = GET_PTR( txt_xp );
[428]2499
2500    // set owner field in TXT chdev
[564]2501    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2502
[446]2503#if DEBUG_PROCESS_TXT
[610]2504thread_t * this = CURRENT_THREAD;
[457]2505uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2506uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2507if( DEBUG_PROCESS_TXT < cycle )
[625]2508printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n",
[610]2509__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]2510#endif
2511
[428]2512}  // end process_txt_set ownership()
2513
[436]2514////////////////////////////////////////////////////////
2515void process_txt_transfer_ownership( xptr_t process_xp )
[428]2516{
[436]2517    process_t * process_ptr;     // local pointer on process releasing ownership
2518    cxy_t       process_cxy;     // process cluster
2519    pid_t       process_pid;     // process identifier
[428]2520    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2521    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2522    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2523    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2524    uint32_t    txt_id;          // TXT_RX channel
[428]2525    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2526    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2527    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2528    xptr_t      iter_xp;         // iterator for xlist
2529    xptr_t      current_xp;      // extended pointer on current process
[625]2530    bool_t      found;
[428]2531
[457]2532#if DEBUG_PROCESS_TXT
[610]2533thread_t * this  = CURRENT_THREAD;
2534uint32_t   cycle;
[457]2535#endif
2536
[625]2537    // get pointers on target process
[428]2538    process_cxy = GET_CXY( process_xp );
[435]2539    process_ptr = GET_PTR( process_xp );
[564]2540    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2541
[625]2542// check owner cluster
2543assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2544"process descriptor not in owner cluster" );
[436]2545
[428]2546    // get extended pointer on stdin pseudo file
[564]2547    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2548
2549    // get pointers on TXT chdev
2550    txt_xp  = chdev_from_file( file_xp );
2551    txt_cxy = GET_CXY( txt_xp );
[433]2552    txt_ptr = GET_PTR( txt_xp );
[428]2553
[625]2554    // get relevant infos from chdev descriptor
[564]2555    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[625]2556    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2557
[625]2558    // transfer ownership only if target process is the TXT owner
[436]2559    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2560    {
[436]2561        // get extended pointers on root and lock of attached processes list
2562        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2563        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2564
[625]2565        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2566        {
2567            // get lock
2568            remote_busylock_acquire( lock_xp );
[436]2569
2570            // scan attached process list to find KSH process
[625]2571            found = false;
2572            for( iter_xp = hal_remote_l64( root_xp ) ;
2573                 (iter_xp != root_xp) && (found == false) ;
2574                 iter_xp = hal_remote_l64( iter_xp ) )
[436]2575            {
[625]2576                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
[435]2577
[436]2578                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2579                {
2580                    // set owner field in TXT chdev
[564]2581                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2582
[446]2583#if DEBUG_PROCESS_TXT
[610]2584cycle = (uint32_t)hal_get_cycles();
[446]2585if( DEBUG_PROCESS_TXT < cycle )
[625]2586printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2587__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2588#endif
[625]2589                    found = true;
[436]2590                }
2591            }
[625]2592
[436]2593            // release lock
[564]2594            remote_busylock_release( lock_xp );
[436]2595
[625]2596// It must exist a KSH process for each user TXT channel
2597assert( (found == true), "KSH process not found for TXT%d", txt_id );
[436]2598
2599        }
[625]2600        else                                           // target process is KSH
[436]2601        {
[625]2602            // get lock
2603            remote_busylock_acquire( lock_xp );
2604
[436]2605            // scan attached process list to find another process
[625]2606            found = false;
2607            for( iter_xp = hal_remote_l64( root_xp ) ;
2608                 (iter_xp != root_xp) && (found == false) ;
2609                 iter_xp = hal_remote_l64( iter_xp ) )
[428]2610            {
[436]2611                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2612
2613                if( current_xp != process_xp )            // current is not KSH
2614                {
2615                    // set owner field in TXT chdev
[564]2616                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2617
[446]2618#if DEBUG_PROCESS_TXT
[610]2619cycle  = (uint32_t)hal_get_cycles();
[625]2620cxy_t       current_cxy = GET_CXY( current_xp );
2621process_t * current_ptr = GET_PTR( current_xp );
2622uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2623if( DEBUG_PROCESS_TXT < cycle )
[625]2624printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
[610]2625__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[436]2626#endif
[625]2627                    found = true;
[436]2628                }
[428]2629            }
[436]2630
2631            // release lock
[564]2632            remote_busylock_release( lock_xp );
[436]2633
2634            // no more owner for TXT if no other process found
[625]2635            if( found == false )
2636            {
2637                // set owner field in TXT chdev
2638                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2639
[446]2640#if DEBUG_PROCESS_TXT
[436]2641cycle = (uint32_t)hal_get_cycles();
[446]2642if( DEBUG_PROCESS_TXT < cycle )
[625]2643printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
[610]2644__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2645#endif
[625]2646            }
[428]2647        }
[436]2648    }
2649    else
2650    {
[433]2651
[446]2652#if DEBUG_PROCESS_TXT
[436]2653cycle = (uint32_t)hal_get_cycles();
[446]2654if( DEBUG_PROCESS_TXT < cycle )
[625]2655printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2656__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
[436]2657#endif
2658
[428]2659    }
[625]2660
[436]2661}  // end process_txt_transfer_ownership()
[428]2662
2663
[564]2664////////////////////////////////////////////////
2665bool_t process_txt_is_owner( xptr_t process_xp )
[457]2666{
2667    // get local pointer and cluster of process in owner cluster
2668    cxy_t       process_cxy = GET_CXY( process_xp );
2669    process_t * process_ptr = GET_PTR( process_xp );
2670
[564]2671// check calling thread execute in target process owner cluster
2672pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2673assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2674"process descriptor not in owner cluster" );
[457]2675
2676    // get extended pointer on stdin pseudo file
[564]2677    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2678
2679    // get pointers on TXT chdev
2680    xptr_t    txt_xp  = chdev_from_file( file_xp );
2681    cxy_t     txt_cxy = GET_CXY( txt_xp );
2682    chdev_t * txt_ptr = GET_PTR( txt_xp );
2683
2684    // get extended pointer on TXT_RX owner process
[564]2685    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2686
2687    return (process_xp == owner_xp);
2688
2689}   // end process_txt_is_owner()
2690
[436]2691////////////////////////////////////////////////     
2692xptr_t process_txt_get_owner( uint32_t channel )
[435]2693{
2694    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2695    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2696    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2697
[564]2698    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2699
[457]2700}  // end process_txt_get_owner()
2701
[435]2702///////////////////////////////////////////
2703void process_txt_display( uint32_t txt_id )
2704{
2705    xptr_t      chdev_xp;
2706    cxy_t       chdev_cxy;
2707    chdev_t   * chdev_ptr;
2708    xptr_t      root_xp;
2709    xptr_t      lock_xp;
2710    xptr_t      current_xp;
2711    xptr_t      iter_xp;
[443]2712    cxy_t       txt0_cxy;
2713    chdev_t   * txt0_ptr;
2714    xptr_t      txt0_xp;
2715    xptr_t      txt0_lock_xp;
2716   
[435]2717    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2718    "illegal TXT terminal index" );
[435]2719
[443]2720    // get pointers on TXT0 chdev
2721    txt0_xp  = chdev_dir.txt_tx[0];
2722    txt0_cxy = GET_CXY( txt0_xp );
2723    txt0_ptr = GET_PTR( txt0_xp );
2724
2725    // get extended pointer on TXT0 lock
2726    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2727
[435]2728    // get pointers on TXT_RX[txt_id] chdev
2729    chdev_xp  = chdev_dir.txt_rx[txt_id];
2730    chdev_cxy = GET_CXY( chdev_xp );
2731    chdev_ptr = GET_PTR( chdev_xp );
2732
2733    // get extended pointer on root & lock of attached process list
2734    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2735    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2736
[443]2737    // get lock on attached process list
[564]2738    remote_busylock_acquire( lock_xp );
[443]2739
2740    // get TXT0 lock in busy waiting mode
[564]2741    remote_busylock_acquire( txt0_lock_xp );
[443]2742
[435]2743    // display header
[443]2744    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2745    txt_id , (uint32_t)hal_get_cycles() );
[435]2746
[436]2747    // scan attached process list
[435]2748    XLIST_FOREACH( root_xp , iter_xp )
2749    {
2750        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2751        process_display( current_xp );
2752    }
2753
[443]2754    // release TXT0 lock in busy waiting mode
[564]2755    remote_busylock_release( txt0_lock_xp );
[443]2756
2757    // release lock on attached process list
[564]2758    remote_busylock_release( lock_xp );
[435]2759
2760}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.