source: trunk/kernel/kern/process.c @ 630

Last change on this file since 630 was 629, checked in by alain, 6 years ago

Remove the "giant" rwlock protecting the GPT, and
use the GPT_LOCKED attribute in each PTE to prevent
concurrent modifications of one GPT entry.
The version number has been incremented to 2.1.

File size: 83.4 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[618]6 *          Alain Greiner (2016,2017,2018,2019)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[623]31#include <hal_vmm.h>
[1]32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
[428]42#include <chdev.h>
[1]43#include <list.h>
[407]44#include <string.h>
[1]45#include <scheduler.h>
[564]46#include <busylock.h>
47#include <queuelock.h>
48#include <remote_queuelock.h>
49#include <rwlock.h>
50#include <remote_rwlock.h>
[1]51#include <dqdt.h>
52#include <cluster.h>
53#include <ppm.h>
54#include <boot_info.h>
55#include <process.h>
56#include <elf.h>
[23]57#include <syscalls.h>
[435]58#include <shared_syscalls.h>
[1]59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Extern global variables
62//////////////////////////////////////////////////////////////////////////////////////////
63
[428]64extern process_t           process_zero;     // allocated in kernel_init.c
65extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]66
67//////////////////////////////////////////////////////////////////////////////////////////
68// Process initialisation related functions
69//////////////////////////////////////////////////////////////////////////////////////////
70
[583]71/////////////////////////////////
[503]72process_t * process_alloc( void )
[1]73{
74        kmem_req_t   req;
75
76    req.type  = KMEM_PROCESS;
77        req.size  = sizeof(process_t);
78        req.flags = AF_KERNEL;
79
80    return (process_t *)kmem_alloc( &req );
81}
82
83////////////////////////////////////////
84void process_free( process_t * process )
85{
86    kmem_req_t  req;
87
88        req.type = KMEM_PROCESS;
89        req.ptr  = process;
90        kmem_free( &req );
91}
92
[625]93////////////////////////////////////////////////////
94error_t process_reference_init( process_t * process,
95                                pid_t       pid,
96                                xptr_t      parent_xp )
[1]97{
[625]98    error_t     error;
[610]99    xptr_t      process_xp;
[428]100    cxy_t       parent_cxy;
101    process_t * parent_ptr;
[407]102    xptr_t      stdin_xp;
103    xptr_t      stdout_xp;
104    xptr_t      stderr_xp;
105    uint32_t    stdin_id;
106    uint32_t    stdout_id;
107    uint32_t    stderr_id;
[428]108    uint32_t    txt_id;
109    char        rx_path[40];
110    char        tx_path[40];
[440]111    xptr_t      file_xp;
[428]112    xptr_t      chdev_xp;
[625]113    chdev_t   * chdev_ptr;
[428]114    cxy_t       chdev_cxy;
115    pid_t       parent_pid;
[625]116    vmm_t     * vmm;
[1]117
[610]118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
[625]121    // get pointer on process vmm
122    vmm = &process->vmm;
123
[428]124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
[435]126    parent_ptr = GET_PTR( parent_xp );
[204]127
[457]128    // get parent_pid
[564]129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]130
[438]131#if DEBUG_PROCESS_REFERENCE_INIT
[610]132thread_t * this = CURRENT_THREAD;
[433]133uint32_t cycle = (uint32_t)hal_get_cycles();
[610]134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
[433]137#endif
[428]138
[610]139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
[443]142    process->owner_xp   = XPTR( local_cxy , process );
[433]143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
[428]145
[610]146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
[625]150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[564]153
[625]154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
[629]168    // initialize VSL locks
[625]169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
170
171    // register kernel vsegs in VMM as required by the architecture
172    error = hal_vmm_kernel_update( process );
173    if( error ) 
174    {
175        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
176        return -1;
177    }
178
179#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
180if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
181printk("\n[%s] thread[%x,%x] registered kernel vsegs for process %x\n",
182__FUNCTION__, parent_pid, this->trdid, pid );
183#endif
184
185    // create "args" and "envs" vsegs
186    // create "stacks" and "mmap" vsegs allocators
187    // initialize locks protecting GPT and VSL
188    error = vmm_user_init( process );
189    if( error ) 
190    {
191        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
192        return -1;
193    }
[415]194 
[438]195#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]196cycle = (uint32_t)hal_get_cycles();
[610]197if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]198printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
199__FUNCTION__, parent_pid, this->trdid, pid );
[433]200#endif
[1]201
[409]202    // initialize fd_array as empty
[408]203    process_fd_init( process );
[1]204
[428]205    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]206    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]207    {
[581]208        // select a TXT channel
209        if( pid == 1 )  txt_id = 0;                     // INIT
210        else            txt_id = process_txt_alloc();   // KSH
[428]211
[457]212        // attach process to TXT
[428]213        process_txt_attach( process , txt_id ); 
214
[457]215#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
216cycle = (uint32_t)hal_get_cycles();
[610]217if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
218printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
219__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]220#endif
[428]221        // build path to TXT_RX[i] and TXT_TX[i] chdevs
222        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
223        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
224
225        // create stdin pseudo file         
[610]226        error = vfs_open(  process->vfs_root_xp,
[428]227                           rx_path,
[610]228                           process_xp,
[408]229                           O_RDONLY, 
230                           0,                // FIXME chmod
231                           &stdin_xp, 
232                           &stdin_id );
[625]233        if( error )
234        {
235            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
236            return -1;
237        }
[1]238
[564]239assert( (stdin_id == 0) , "stdin index must be 0" );
[428]240
[440]241#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
242cycle = (uint32_t)hal_get_cycles();
[610]243if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
244printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
245__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]246#endif
247
[428]248        // create stdout pseudo file         
[610]249        error = vfs_open(  process->vfs_root_xp,
[428]250                           tx_path,
[610]251                           process_xp,
[408]252                           O_WRONLY, 
253                           0,                // FIXME chmod
254                           &stdout_xp, 
255                           &stdout_id );
[625]256        if( error )
257        {
258            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
259            return -1;
260        }
[1]261
[625]262assert( (stdout_id == 1) , "stdout index must be 1" );
[428]263
[440]264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
265cycle = (uint32_t)hal_get_cycles();
[610]266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]269#endif
270
[428]271        // create stderr pseudo file         
[610]272        error = vfs_open(  process->vfs_root_xp,
[428]273                           tx_path,
[610]274                           process_xp,
[408]275                           O_WRONLY, 
276                           0,                // FIXME chmod
277                           &stderr_xp, 
278                           &stderr_id );
[625]279        if( error )
280        {
281            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
282            return -1;
283        }
[428]284
[625]285assert( (stderr_id == 2) , "stderr index must be 2" );
[428]286
[440]287#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
288cycle = (uint32_t)hal_get_cycles();
[610]289if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
290printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
291__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]292#endif
293
[408]294    }
[428]295    else                                            // normal user process
[408]296    {
[457]297        // get extended pointer on stdin pseudo file in parent process
[625]298        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
299                                                &parent_ptr->fd_array.array[0] ) );
[440]300
[457]301        // get extended pointer on parent process TXT chdev
[440]302        chdev_xp = chdev_from_file( file_xp );
[428]303 
304        // get cluster and local pointer on chdev
305        chdev_cxy = GET_CXY( chdev_xp );
[435]306        chdev_ptr = GET_PTR( chdev_xp );
[428]307 
[564]308        // get parent process TXT terminal index
309        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]310
[564]311        // attach child process to parent process TXT terminal
[428]312        process_txt_attach( process , txt_id ); 
[407]313
[457]314        // copy all open files from parent process fd_array to this process
[428]315        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]316                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]317    }
[407]318
[610]319    // initialize lock protecting CWD changes
[625]320    remote_busylock_init( XPTR( local_cxy , 
321                                &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]322
[438]323#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]324cycle = (uint32_t)hal_get_cycles();
[610]325if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
326printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
327__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]328#endif
[407]329
[408]330    // reset children list root
331    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
332    process->children_nr     = 0;
[625]333    remote_queuelock_init( XPTR( local_cxy,
334                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]335
[611]336    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]337    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
338    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
339    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
340    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[625]341    remote_queuelock_init( XPTR( local_cxy , 
342                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]343
[611]344    // reset open directories root and lock
345    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
[625]346    remote_queuelock_init( XPTR( local_cxy , 
347                                 &process->dir_lock ), LOCK_PROCESS_DIR );
[611]348
[408]349    // register new process in the local cluster manager pref_tbl[]
350    lpid_t lpid = LPID_FROM_PID( pid );
351    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]352
[408]353    // register new process descriptor in local cluster manager local_list
354    cluster_process_local_link( process );
[407]355
[408]356    // register new process descriptor in local cluster manager copies_list
357    cluster_process_copies_link( process );
[172]358
[564]359    // initialize th_tbl[] array and associated threads
[1]360    uint32_t i;
[564]361
362    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]363        {
364        process->th_tbl[i] = NULL;
365    }
366    process->th_nr  = 0;
[564]367    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]368
[124]369        hal_fence();
[1]370
[438]371#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]372cycle = (uint32_t)hal_get_cycles();
[610]373if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
374printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
375__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]376#endif
[101]377
[625]378    return 0;
379
[428]380}  // process_reference_init()
[204]381
[1]382/////////////////////////////////////////////////////
383error_t process_copy_init( process_t * local_process,
384                           xptr_t      reference_process_xp )
385{
[625]386    error_t   error;
387    vmm_t   * vmm;
[415]388
[23]389    // get reference process cluster and local pointer
390    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]391    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]392
[625]393    // get pointer on process vmm
394    vmm = &local_process->vmm;
395
[428]396    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]397    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
398    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]399    local_process->ref_xp     = reference_process_xp;
[443]400    local_process->owner_xp   = reference_process_xp;
[433]401    local_process->term_state = 0;
[407]402
[564]403#if DEBUG_PROCESS_COPY_INIT
[610]404thread_t * this = CURRENT_THREAD; 
[433]405uint32_t cycle = (uint32_t)hal_get_cycles();
[610]406if( DEBUG_PROCESS_COPY_INIT < cycle )
407printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
408__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]409#endif
[407]410
[564]411// check user process
[625]412assert( (local_process->pid != 0), "LPID cannot be 0" );
[564]413
[625]414    // initialize VSL as empty
415    vmm->vsegs_nr = 0;
416        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[1]417
[625]418    // create an empty GPT as required by the architecture
419    error = hal_gpt_create( &vmm->gpt );
420    if( error ) 
421    {
422        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
423        return -1;
424    }
425
426    // initialize GPT and VSL locks
427        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
428
429    // register kernel vsegs in VMM as required by the architecture
430    error = hal_vmm_kernel_update( local_process );
431    if( error ) 
432    {
433        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
434        return -1;
435    }
436
437    // create "args" and "envs" vsegs
438    // create "stacks" and "mmap" vsegs allocators
439    // initialize locks protecting GPT and VSL
440    error = vmm_user_init( local_process );
441    if( error ) 
442    {
443        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
444        return -1;
445    }
446 
447#if (DEBUG_PROCESS_COPY_INIT & 1)
448cycle = (uint32_t)hal_get_cycles();
449if( DEBUG_PROCESS_COPY_INIT < cycle )
450printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
451__FUNCTION__, parent_pid, this->trdid, pid, cycle );
452#endif
453
454    // set process file descriptors array
[23]455        process_fd_init( local_process );
[1]456
[625]457    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]458    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
459    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]460    local_process->cwd_xp      = XPTR_NULL;
[1]461
462    // reset children list root (not used in a process descriptor copy)
463    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]464    local_process->children_nr   = 0;
[564]465    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
466                           LOCK_PROCESS_CHILDREN );
[1]467
[428]468    // reset children_list (not used in a process descriptor copy)
469    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]470
471    // reset semaphores list root (not used in a process descriptor copy)
472    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]473    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
474    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
475    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]476
[564]477    // initialize th_tbl[] array and associated fields
[1]478    uint32_t i;
[564]479    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]480        {
481        local_process->th_tbl[i] = NULL;
482    }
483    local_process->th_nr  = 0;
[564]484    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]485
486    // register new process descriptor in local cluster manager local_list
487    cluster_process_local_link( local_process );
488
489    // register new process descriptor in owner cluster manager copies_list
490    cluster_process_copies_link( local_process );
491
[124]492        hal_fence();
[1]493
[438]494#if DEBUG_PROCESS_COPY_INIT
[433]495cycle = (uint32_t)hal_get_cycles();
[610]496if( DEBUG_PROCESS_COPY_INIT < cycle )
497printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
498__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]499#endif
[279]500
[1]501    return 0;
502
[204]503} // end process_copy_init()
504
[1]505///////////////////////////////////////////
506void process_destroy( process_t * process )
507{
[428]508    xptr_t      parent_xp;
509    process_t * parent_ptr;
510    cxy_t       parent_cxy;
511    xptr_t      children_lock_xp;
[446]512    xptr_t      children_nr_xp;
[1]513
[437]514    pid_t       pid = process->pid;
515
[593]516// check no more threads
[618]517assert( (process->th_nr == 0),
518"process %x in cluster %x contains threads", pid , local_cxy );
[428]519
[438]520#if DEBUG_PROCESS_DESTROY
[610]521thread_t * this = CURRENT_THREAD;
[433]522uint32_t cycle = (uint32_t)hal_get_cycles();
[610]523if( DEBUG_PROCESS_DESTROY < cycle )
524printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
525__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]526#endif
[428]527
[618]528    // Destroy VMM
529    vmm_destroy( process );
530
531#if (DEBUG_PROCESS_DESTROY & 1)
532if( DEBUG_PROCESS_DESTROY < cycle )
533printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
534__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
535#endif
536
[436]537    // remove process from local_list in local cluster manager
538    cluster_process_local_unlink( process );
[1]539
[618]540#if (DEBUG_PROCESS_DESTROY & 1)
541if( DEBUG_PROCESS_DESTROY < cycle )
542printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
543__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
544#endif
545
[436]546    // remove process from copies_list in owner cluster manager
547    cluster_process_copies_unlink( process );
[23]548
[618]549#if (DEBUG_PROCESS_DESTROY & 1)
550if( DEBUG_PROCESS_DESTROY < cycle )
551printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
552__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
553#endif
554
[625]555    // when target process cluster is the owner cluster
556    // - remove process from TXT list and transfer ownership
557    // - remove process from children_list
558    // - release PID
[437]559    if( CXY_FROM_PID( pid ) == local_cxy )
[428]560    {
[625]561        process_txt_detach( XPTR( local_cxy , process ) );
562
563#if (DEBUG_PROCESS_DESTROY & 1)
564if( DEBUG_PROCESS_DESTROY < cycle )
565printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
566__FUNCTION__, this->process->pid, this->trdid, pid );
567#endif
568
[428]569        // get pointers on parent process
570        parent_xp  = process->parent_xp;
571        parent_cxy = GET_CXY( parent_xp );
572        parent_ptr = GET_PTR( parent_xp );
573
574        // get extended pointer on children_lock in parent process
575        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]576        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]577
578        // remove process from children_list
[564]579        remote_queuelock_acquire( children_lock_xp );
[428]580        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]581            hal_remote_atomic_add( children_nr_xp , -1 );
[564]582        remote_queuelock_release( children_lock_xp );
[450]583
[618]584#if (DEBUG_PROCESS_DESTROY & 1)
585if( DEBUG_PROCESS_DESTROY < cycle )
[625]586printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
587__FUNCTION__, this->process->pid, this->trdid, pid );
[618]588#endif
589
[564]590        // release the process PID to cluster manager
591        cluster_pid_release( pid );
[428]592
[618]593#if (DEBUG_PROCESS_DESTROY & 1)
594if( DEBUG_PROCESS_DESTROY < cycle )
595printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
596__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
597#endif
[23]598
[618]599    }
[1]600
[623]601    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
602
[618]603    // FIXME close all open files [AG]
[623]604
[618]605    // FIXME synchronize dirty files [AG]
[1]606
[416]607    // release memory allocated to process descriptor
608    process_free( process );
[1]609
[438]610#if DEBUG_PROCESS_DESTROY
[433]611cycle = (uint32_t)hal_get_cycles();
[610]612if( DEBUG_PROCESS_DESTROY < cycle )
613printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
614__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]615#endif
[428]616
[407]617}  // end process_destroy()
618
[583]619///////////////////////////////////////////////////////////////////
[527]620const char * process_action_str( process_sigactions_t action_type )
[409]621{
[583]622    switch ( action_type )
623    {
624        case BLOCK_ALL_THREADS:   return "BLOCK";
625        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
626        case DELETE_ALL_THREADS:  return "DELETE";
627        default:                  return "undefined";
628    }
[409]629}
630
[435]631////////////////////////////////////////
632void process_sigaction( pid_t       pid,
[457]633                        uint32_t    type )
[409]634{
635    cxy_t              owner_cxy;         // owner cluster identifier
636    lpid_t             lpid;              // process index in owner cluster
637    cluster_t        * cluster;           // pointer on cluster manager
638    xptr_t             root_xp;           // extended pointer on root of copies
639    xptr_t             lock_xp;           // extended pointer on lock protecting copies
640    xptr_t             iter_xp;           // iterator on copies list
641    xptr_t             process_xp;        // extended pointer on process copy
642    cxy_t              process_cxy;       // process copy cluster identifier
[457]643    process_t        * process_ptr;       // local pointer on process copy
[436]644    reg_t              save_sr;           // for critical section
[457]645    thread_t         * client;            // pointer on client thread
646    xptr_t             client_xp;         // extended pointer on client thread
647    process_t        * local;             // pointer on process copy in local cluster
648    uint32_t           remote_nr;         // number of remote process copies
[619]649    rpc_desc_t         rpc;               // shared RPC descriptor
650    uint32_t           responses;         // shared RPC responses counter
[409]651
[457]652    client    = CURRENT_THREAD;
653    client_xp = XPTR( local_cxy , client );
654    local     = NULL;
655    remote_nr = 0;
[435]656
[583]657    // check calling thread can yield
658    thread_assert_can_yield( client , __FUNCTION__ );
[564]659
[438]660#if DEBUG_PROCESS_SIGACTION
[433]661uint32_t cycle = (uint32_t)hal_get_cycles();
[438]662if( DEBUG_PROCESS_SIGACTION < cycle )
[593]663printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]664__FUNCTION__ , client->process->pid, client->trdid,
[457]665process_action_str( type ) , pid , cycle );
[433]666#endif
[409]667
[436]668    // get pointer on local cluster manager
[416]669    cluster = LOCAL_CLUSTER;
670
[409]671    // get owner cluster identifier and process lpid
[435]672    owner_cxy = CXY_FROM_PID( pid );
673    lpid      = LPID_FROM_PID( pid );
[409]674
[593]675    // get root of list of copies and lock from owner cluster
[436]676    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
677    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]678
[583]679// check action type
680assert( ((type == DELETE_ALL_THREADS ) ||
681         (type == BLOCK_ALL_THREADS )  ||
682         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]683             
[593]684    // This client thread send parallel RPCs to all remote clusters containing
[564]685    // target process copies, wait all responses, and then handles directly
686    // the threads in local cluster, when required.
[457]687    // The client thread allocates a - shared - RPC descriptor in the stack,
688    // because all parallel, non-blocking, server threads use the same input
689    // arguments, and use the shared RPC response field
[436]690
691    // mask IRQs
692    hal_disable_irq( &save_sr);
693
[457]694    // client thread blocks itself
695    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]696
[619]697    // initialize RPC responses counter
698    responses = 0;
699
[436]700    // initialize shared RPC descriptor
[619]701    // can be shared, because no out arguments
702    rpc.rsp       = &responses;
[438]703    rpc.blocking  = false;
704    rpc.index     = RPC_PROCESS_SIGACTION;
705    rpc.thread    = client;
706    rpc.lid       = client->core->lid;
[611]707    rpc.args[0]   = pid;
708    rpc.args[1]   = type;
[436]709
[611]710    // take the lock protecting process copies
711    remote_queuelock_acquire( lock_xp );
712
[457]713    // scan list of process copies
[409]714    XLIST_FOREACH( root_xp , iter_xp )
715    {
[457]716        // get extended pointers and cluster on process
[440]717        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
718        process_cxy = GET_CXY( process_xp );
[457]719        process_ptr = GET_PTR( process_xp );
[440]720
[593]721        if( process_cxy == local_cxy )    // process copy is local
[457]722        { 
723            local = process_ptr;
724        }
[593]725        else                              // process copy is remote
[457]726        {
727            // update number of remote process copies
728            remote_nr++;
729
[619]730            // atomically increment RPC responses counter
731            hal_atomic_add( &responses , 1 );
[457]732
[438]733#if DEBUG_PROCESS_SIGACTION
734if( DEBUG_PROCESS_SIGACTION < cycle )
[593]735printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]736__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]737#endif
[457]738            // call RPC in target cluster
[619]739            rpc_send( process_cxy , &rpc );
[457]740        }
741    }  // end list of copies
742
[409]743    // release the lock protecting process copies
[564]744    remote_queuelock_release( lock_xp );
[409]745
[436]746    // restore IRQs
747    hal_restore_irq( save_sr);
[409]748
[457]749    // - if there is remote process copies, the client thread deschedules,
750    //   (it will be unblocked by the last RPC server thread).
751    // - if there is no remote copies, the client thread unblock itself.
752    if( remote_nr )
753    {
754        sched_yield("blocked on rpc_process_sigaction");
755    } 
756    else
757    {
758        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
759    }
[409]760
[457]761    // handle the local process copy if required
762    if( local != NULL )
763    {
764
765#if DEBUG_PROCESS_SIGACTION
766if( DEBUG_PROCESS_SIGACTION < cycle )
[593]767printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]768__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]769#endif
770        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]771        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]772        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
773    }
774
[438]775#if DEBUG_PROCESS_SIGACTION
[433]776cycle = (uint32_t)hal_get_cycles();
[438]777if( DEBUG_PROCESS_SIGACTION < cycle )
[593]778printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]779__FUNCTION__, client->process->pid, client->trdid,
[457]780process_action_str( type ), pid, cycle );
[433]781#endif
[416]782
[409]783}  // end process_sigaction()
784
[433]785/////////////////////////////////////////////////
[583]786void process_block_threads( process_t * process )
[1]787{
[409]788    thread_t          * target;         // pointer on target thread
[433]789    thread_t          * this;           // pointer on calling thread
[564]790    uint32_t            ltid;           // index in process th_tbl[]
[436]791    cxy_t               owner_cxy;      // target process owner cluster
[409]792    uint32_t            count;          // requests counter
[593]793    volatile uint32_t   ack_count;      // acknowledges counter
[1]794
[416]795    // get calling thread pointer
[433]796    this = CURRENT_THREAD;
[407]797
[438]798#if DEBUG_PROCESS_SIGACTION
[564]799pid_t pid = process->pid;
[433]800uint32_t cycle = (uint32_t)hal_get_cycles();
[438]801if( DEBUG_PROCESS_SIGACTION < cycle )
[593]802printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]803__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]804#endif
[409]805
[564]806// check target process is an user process
[619]807assert( (LPID_FROM_PID( process->pid ) != 0 ),
808"process %x is not an user process\n", process->pid );
[564]809
[610]810    // get target process owner cluster
[564]811    owner_cxy = CXY_FROM_PID( process->pid );
812
[409]813    // get lock protecting process th_tbl[]
[564]814    rwlock_rd_acquire( &process->th_lock );
[1]815
[440]816    // loop on target process local threads
[409]817    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]818    // - if the calling thread and the target thread are not running on the same
819    //   core, we ask the target scheduler to acknowlege the blocking
820    //   to be sure that the target thread is not running.
821    // - if the calling thread and the target thread are running on the same core,
822    //   we don't need confirmation from scheduler.
823           
[436]824    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]825    {
[409]826        target = process->th_tbl[ltid];
[1]827
[436]828        if( target != NULL )                                 // thread exist
[1]829        {
830            count++;
[409]831
[583]832            // set the global blocked bit in target thread descriptor.
833            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]834 
[583]835            if( this->core->lid != target->core->lid )
836            {
837                // increment responses counter
838                hal_atomic_add( (void*)&ack_count , 1 );
[409]839
[583]840                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
841                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]842
[583]843                // force scheduling on target thread
844                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]845            }
[1]846        }
[172]847    }
848
[428]849    // release lock protecting process th_tbl[]
[564]850    rwlock_rd_release( &process->th_lock );
[416]851
[593]852    // wait other threads acknowledges  TODO this could be improved...
[409]853    while( 1 )
854    {
[610]855        // exit when all scheduler acknowledges received
[436]856        if ( ack_count == 0 ) break;
[409]857   
858        // wait 1000 cycles before retry
859        hal_fixed_delay( 1000 );
860    }
[1]861
[438]862#if DEBUG_PROCESS_SIGACTION
[433]863cycle = (uint32_t)hal_get_cycles();
[438]864if( DEBUG_PROCESS_SIGACTION < cycle )
[593]865printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
866__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]867#endif
[409]868
[428]869}  // end process_block_threads()
[409]870
[440]871/////////////////////////////////////////////////
872void process_delete_threads( process_t * process,
873                             xptr_t      client_xp )
[409]874{
[433]875    thread_t          * this;          // pointer on calling thread
[440]876    thread_t          * target;        // local pointer on target thread
877    xptr_t              target_xp;     // extended pointer on target thread
878    cxy_t               owner_cxy;     // owner process cluster
[409]879    uint32_t            ltid;          // index in process th_tbl
[440]880    uint32_t            count;         // threads counter
[409]881
[433]882    // get calling thread pointer
883    this = CURRENT_THREAD;
[409]884
[440]885    // get target process owner cluster
886    owner_cxy = CXY_FROM_PID( process->pid );
887
[438]888#if DEBUG_PROCESS_SIGACTION
[433]889uint32_t cycle = (uint32_t)hal_get_cycles();
[438]890if( DEBUG_PROCESS_SIGACTION < cycle )
[625]891printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
892__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]893#endif
894
[564]895// check target process is an user process
[619]896assert( (LPID_FROM_PID( process->pid ) != 0),
897"process %x is not an user process\n", process->pid );
[564]898
[409]899    // get lock protecting process th_tbl[]
[583]900    rwlock_wr_acquire( &process->th_lock );
[409]901
[440]902    // loop on target process local threads                       
[416]903    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]904    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]905    {
[409]906        target = process->th_tbl[ltid];
[1]907
[440]908        if( target != NULL )    // valid thread 
[1]909        {
[416]910            count++;
[440]911            target_xp = XPTR( local_cxy , target );
[1]912
[564]913            // main thread and client thread should not be deleted
[440]914            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
915                (client_xp) != target_xp )                           // not client thread
916            {
917                // mark target thread for delete and block it
918                thread_delete( target_xp , process->pid , false );   // not forced
919            }
[409]920        }
921    }
[1]922
[428]923    // release lock protecting process th_tbl[]
[583]924    rwlock_wr_release( &process->th_lock );
[407]925
[438]926#if DEBUG_PROCESS_SIGACTION
[433]927cycle = (uint32_t)hal_get_cycles();
[438]928if( DEBUG_PROCESS_SIGACTION < cycle )
[593]929printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
930__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]931#endif
[407]932
[440]933}  // end process_delete_threads()
[409]934
[440]935///////////////////////////////////////////////////
936void process_unblock_threads( process_t * process )
[409]937{
[440]938    thread_t          * target;        // pointer on target thead
939    thread_t          * this;          // pointer on calling thread
[409]940    uint32_t            ltid;          // index in process th_tbl
[440]941    uint32_t            count;         // requests counter
[409]942
[440]943    // get calling thread pointer
944    this = CURRENT_THREAD;
945
[438]946#if DEBUG_PROCESS_SIGACTION
[564]947pid_t pid = process->pid;
[433]948uint32_t cycle = (uint32_t)hal_get_cycles();
[438]949if( DEBUG_PROCESS_SIGACTION < cycle )
[593]950printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]951__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]952#endif
953
[564]954// check target process is an user process
[619]955assert( ( LPID_FROM_PID( process->pid ) != 0 ),
956"process %x is not an user process\n", process->pid );
[564]957
[416]958    // get lock protecting process th_tbl[]
[564]959    rwlock_rd_acquire( &process->th_lock );
[416]960
[440]961    // loop on process threads to unblock all threads
[416]962    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]963    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]964    {
[416]965        target = process->th_tbl[ltid];
[409]966
[440]967        if( target != NULL )             // thread found
[409]968        {
969            count++;
[440]970
971            // reset the global blocked bit in target thread descriptor.
972            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]973        }
974    }
975
[428]976    // release lock protecting process th_tbl[]
[564]977    rwlock_rd_release( &process->th_lock );
[407]978
[438]979#if DEBUG_PROCESS_SIGACTION
[433]980cycle = (uint32_t)hal_get_cycles();
[438]981if( DEBUG_PROCESS_SIGACTION < cycle )
[593]982printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]983__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]984#endif
[1]985
[440]986}  // end process_unblock_threads()
[407]987
[1]988///////////////////////////////////////////////
989process_t * process_get_local_copy( pid_t pid )
990{
991    error_t        error;
[172]992    process_t    * process_ptr;   // local pointer on process
[23]993    xptr_t         process_xp;    // extended pointer on process
[1]994
995    cluster_t * cluster = LOCAL_CLUSTER;
996
[564]997#if DEBUG_PROCESS_GET_LOCAL_COPY
998thread_t * this = CURRENT_THREAD;
999uint32_t cycle = (uint32_t)hal_get_cycles();
1000if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1001printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]1002__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]1003#endif
1004
[1]1005    // get lock protecting local list of processes
[564]1006    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1007
1008    // scan the local list of process descriptors to find the process
[23]1009    xptr_t  iter;
1010    bool_t  found = false;
1011    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]1012    {
[23]1013        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]1014        process_ptr = GET_PTR( process_xp );
[23]1015        if( process_ptr->pid == pid )
[1]1016        {
1017            found = true;
1018            break;
1019        }
1020    }
1021
1022    // release lock protecting local list of processes
[564]1023    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1024
[172]1025    // allocate memory for a new local process descriptor
[440]1026    // and initialise it from reference cluster if not found
[1]1027    if( !found )
1028    {
1029        // get extended pointer on reference process descriptor
[23]1030        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]1031
[492]1032        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]1033
[1]1034        // allocate memory for local process descriptor
[23]1035        process_ptr = process_alloc();
[443]1036
[23]1037        if( process_ptr == NULL )  return NULL;
[1]1038
1039        // initialize local process descriptor copy
[23]1040        error = process_copy_init( process_ptr , ref_xp );
[443]1041
[1]1042        if( error ) return NULL;
1043    }
1044
[440]1045#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]1046cycle = (uint32_t)hal_get_cycles();
[440]1047if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1048printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]1049__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]1050#endif
1051
[23]1052    return process_ptr;
[1]1053
[409]1054}  // end process_get_local_copy()
1055
[436]1056////////////////////////////////////////////
1057pid_t process_get_ppid( xptr_t  process_xp )
1058{
1059    cxy_t       process_cxy;
1060    process_t * process_ptr;
1061    xptr_t      parent_xp;
1062    cxy_t       parent_cxy;
1063    process_t * parent_ptr;
1064
1065    // get process cluster and local pointer
1066    process_cxy = GET_CXY( process_xp );
1067    process_ptr = GET_PTR( process_xp );
1068
1069    // get pointers on parent process
[564]1070    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]1071    parent_cxy = GET_CXY( parent_xp );
1072    parent_ptr = GET_PTR( parent_xp );
1073
[564]1074    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]1075}
1076
[1]1077//////////////////////////////////////////////////////////////////////////////////////////
1078// File descriptor array related functions
1079//////////////////////////////////////////////////////////////////////////////////////////
1080
1081///////////////////////////////////////////
1082void process_fd_init( process_t * process )
1083{
1084    uint32_t fd;
1085
[610]1086    // initialize lock
[564]1087    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]1088
[610]1089    // initialize number of open files
[23]1090    process->fd_array.current = 0;
1091
[1]1092    // initialize array
[23]1093    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1094    {
1095        process->fd_array.array[fd] = XPTR_NULL;
1096    }
1097}
[610]1098////////////////////////////////////////////////////
1099error_t process_fd_register( xptr_t      process_xp,
[407]1100                             xptr_t      file_xp,
1101                             uint32_t  * fdid )
[1]1102{
1103    bool_t    found;
[23]1104    uint32_t  id;
1105    xptr_t    xp;
[1]1106
[23]1107    // get reference process cluster and local pointer
[610]1108    process_t * process_ptr = GET_PTR( process_xp );
1109    cxy_t       process_cxy = GET_CXY( process_xp );
[23]1110
[610]1111// check client process is reference process
1112assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
1113"client process must be reference process\n" );
1114
1115#if DEBUG_PROCESS_FD_REGISTER
1116thread_t * this  = CURRENT_THREAD;
1117uint32_t   cycle = (uint32_t)hal_get_cycles();
1118pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1119if( DEBUG_PROCESS_FD_REGISTER < cycle )
1120printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1121__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1122#endif
1123
1124    // build extended pointer on lock protecting reference fd_array
1125    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1126
[23]1127    // take lock protecting reference fd_array
[610]1128        remote_queuelock_acquire( lock_xp );
[23]1129
[1]1130    found   = false;
1131
[23]1132    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1133    {
[610]1134        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
[23]1135        if ( xp == XPTR_NULL )
[1]1136        {
[564]1137            // update reference fd_array
[610]1138            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1139                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
[564]1140
1141            // exit
1142                        *fdid = id;
[1]1143            found = true;
1144            break;
1145        }
1146    }
1147
[610]1148    // release lock protecting fd_array
1149        remote_queuelock_release( lock_xp );
[1]1150
[610]1151#if DEBUG_PROCESS_FD_REGISTER
1152cycle = (uint32_t)hal_get_cycles();
1153if( DEBUG_PROCESS_FD_REGISTER < cycle )
1154printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1155__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1156#endif
1157
[428]1158    if ( !found ) return -1;
[1]1159    else          return 0;
1160
[610]1161}  // end process_fd_register()
1162
[172]1163////////////////////////////////////////////////
[23]1164xptr_t process_fd_get_xptr( process_t * process,
[407]1165                            uint32_t    fdid )
[1]1166{
[23]1167    xptr_t  file_xp;
[564]1168    xptr_t  lock_xp;
[1]1169
[23]1170    // access local copy of process descriptor
[407]1171    file_xp = process->fd_array.array[fdid];
[1]1172
[23]1173    if( file_xp == XPTR_NULL )
1174    {
1175        // get reference process cluster and local pointer
1176        xptr_t      ref_xp  = process->ref_xp;
1177        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]1178        process_t * ref_ptr = GET_PTR( ref_xp );
[1]1179
[564]1180        // build extended pointer on lock protecting reference fd_array
1181        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1182
1183        // take lock protecting reference fd_array
1184            remote_queuelock_acquire( lock_xp );
1185
[23]1186        // access reference process descriptor
[564]1187        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]1188
[23]1189        // update local fd_array if found
[564]1190        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1191       
1192        // release lock protecting reference fd_array
1193            remote_queuelock_release( lock_xp );
[23]1194    }
[1]1195
[23]1196    return file_xp;
[1]1197
[407]1198}  // end process_fd_get_xptr()
1199
[1]1200///////////////////////////////////////////
1201void process_fd_remote_copy( xptr_t dst_xp,
1202                             xptr_t src_xp )
1203{
1204    uint32_t fd;
1205    xptr_t   entry;
1206
1207    // get cluster and local pointer for src fd_array
1208    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1209    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1210
1211    // get cluster and local pointer for dst fd_array
1212    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1213    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1214
1215    // get the remote lock protecting the src fd_array
[564]1216        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1217
[428]1218    // loop on all fd_array entries
1219    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1220        {
[564]1221                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1222
1223                if( entry != XPTR_NULL )
1224                {
[459]1225            // increment file descriptor refcount
[1]1226            vfs_file_count_up( entry );
1227
1228                        // copy entry in destination process fd_array
[564]1229                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1230                }
1231        }
1232
1233    // release lock on source process fd_array
[564]1234        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1235
[407]1236}  // end process_fd_remote_copy()
1237
[564]1238
1239////////////////////////////////////
1240bool_t process_fd_array_full( void )
1241{
1242    // get extended pointer on reference process
1243    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1244
1245    // get reference process cluster and local pointer
1246    process_t * ref_ptr = GET_PTR( ref_xp );
1247    cxy_t       ref_cxy = GET_CXY( ref_xp );
1248
1249    // get number of open file descriptors from reference fd_array
1250    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1251
1252        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1253}
1254
1255
[1]1256////////////////////////////////////////////////////////////////////////////////////
1257//  Thread related functions
1258////////////////////////////////////////////////////////////////////////////////////
1259
1260/////////////////////////////////////////////////////
1261error_t process_register_thread( process_t * process,
1262                                 thread_t  * thread,
1263                                 trdid_t   * trdid )
1264{
[472]1265    ltid_t         ltid;
1266    bool_t         found = false;
1267 
[564]1268// check arguments
1269assert( (process != NULL) , "process argument is NULL" );
1270assert( (thread != NULL) , "thread argument is NULL" );
[1]1271
[564]1272    // get the lock protecting th_tbl for all threads
1273    // but the idle thread executing kernel_init (cannot yield)
1274    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1275
[583]1276    // scan th_tbl
[564]1277    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1278    {
1279        if( process->th_tbl[ltid] == NULL )
1280        {
1281            found = true;
1282            break;
1283        }
1284    }
1285
1286    if( found )
1287    {
1288        // register thread in th_tbl[]
1289        process->th_tbl[ltid] = thread;
1290        process->th_nr++;
1291
1292        // returns trdid
1293        *trdid = TRDID( local_cxy , ltid );
1294    }
1295
[583]1296    // release the lock protecting th_tbl
[564]1297    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1298
[564]1299    return (found) ? 0 : 0xFFFFFFFF;
[204]1300
1301}  // end process_register_thread()
1302
[625]1303///////////////////////////////////////////////////
1304uint32_t process_remove_thread( thread_t * thread )
[1]1305{
[443]1306    uint32_t count;  // number of threads in local process descriptor
1307
[625]1308// check thread
1309assert( (thread != NULL) , "thread argument is NULL" );
1310
[1]1311    process_t * process = thread->process;
1312
1313    // get thread local index
1314    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1315   
1316    // get the lock protecting th_tbl[]
1317    rwlock_wr_acquire( &process->th_lock );
[428]1318
[583]1319    // get number of threads
[443]1320    count = process->th_nr;
[428]1321
[564]1322// check th_nr value
[624]1323assert( (count > 0) , "process th_nr cannot be 0" );
[443]1324
[1]1325    // remove thread from th_tbl[]
1326    process->th_tbl[ltid] = NULL;
[450]1327    process->th_nr = count-1;
[1]1328
[583]1329    // release lock protecting th_tbl
[564]1330    rwlock_wr_release( &process->th_lock );
[428]1331
[625]1332    return count;
[443]1333
[450]1334}  // end process_remove_thread()
[204]1335
[408]1336/////////////////////////////////////////////////////////
1337error_t process_make_fork( xptr_t      parent_process_xp,
1338                           xptr_t      parent_thread_xp,
1339                           pid_t     * child_pid,
1340                           thread_t ** child_thread )
[1]1341{
[408]1342    process_t * process;         // local pointer on child process descriptor
1343    thread_t  * thread;          // local pointer on child thread descriptor
1344    pid_t       new_pid;         // process identifier for child process
1345    pid_t       parent_pid;      // process identifier for parent process
1346    xptr_t      ref_xp;          // extended pointer on reference process
[428]1347    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1348    error_t     error;
[1]1349
[408]1350    // get cluster and local pointer for parent process
1351    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1352    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1353
[428]1354    // get parent process PID and extended pointer on .elf file
[564]1355    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1356    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1357
[564]1358    // get extended pointer on reference process
1359    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1360
[564]1361// check parent process is the reference process
1362assert( (parent_process_xp == ref_xp ) ,
[624]1363"parent process must be the reference process" );
[407]1364
[438]1365#if DEBUG_PROCESS_MAKE_FORK
[583]1366uint32_t cycle   = (uint32_t)hal_get_cycles();
1367thread_t * this  = CURRENT_THREAD;
1368trdid_t    trdid = this->trdid;
1369pid_t      pid   = this->process->pid;
[438]1370if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1371printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1372__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1373#endif
[172]1374
[408]1375    // allocate a process descriptor
1376    process = process_alloc();
1377    if( process == NULL )
1378    {
1379        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1380        __FUNCTION__, local_cxy ); 
1381        return -1;
1382    }
[1]1383
[408]1384    // allocate a child PID from local cluster
[416]1385    error = cluster_pid_alloc( process , &new_pid );
[428]1386    if( error ) 
[1]1387    {
[408]1388        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1389        __FUNCTION__, local_cxy ); 
1390        process_free( process );
1391        return -1;
[1]1392    }
[408]1393
[469]1394#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1395cycle = (uint32_t)hal_get_cycles();
1396if( DEBUG_PROCESS_MAKE_FORK < cycle )
[625]1397printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
[583]1398__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1399#endif
1400
[408]1401    // initializes child process descriptor from parent process descriptor
[625]1402    error = process_reference_init( process,
1403                                    new_pid,
1404                                    parent_process_xp );
1405    if( error ) 
1406    {
1407        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1408        __FUNCTION__, local_cxy ); 
1409        process_free( process );
1410        return -1;
1411    }
[408]1412
[438]1413#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1414cycle = (uint32_t)hal_get_cycles();
[438]1415if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1416printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1417__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1418#endif
[408]1419
1420    // copy VMM from parent descriptor to child descriptor
1421    error = vmm_fork_copy( process,
1422                           parent_process_xp );
1423    if( error )
[101]1424    {
[408]1425        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1426        __FUNCTION__, local_cxy ); 
1427        process_free( process );
1428        cluster_pid_release( new_pid );
1429        return -1;
[101]1430    }
[172]1431
[438]1432#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1433cycle = (uint32_t)hal_get_cycles();
[438]1434if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1435printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1436__FUNCTION__, pid, trdid, cycle );
[433]1437#endif
[407]1438
[564]1439    // if parent_process is INIT, or if parent_process is the TXT owner,
1440    // the child_process becomes the owner of its TXT terminal
1441    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1442    {
1443        process_txt_set_ownership( XPTR( local_cxy , process ) );
1444
1445#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1446cycle = (uint32_t)hal_get_cycles();
[626]1447if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1448printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",
[583]1449__FUNCTION__ , pid, trdid, cycle );
[457]1450#endif
1451
1452    }
1453
[428]1454    // update extended pointer on .elf file
1455    process->vfs_bin_xp = vfs_bin_xp;
1456
[408]1457    // create child thread descriptor from parent thread descriptor
1458    error = thread_user_fork( parent_thread_xp,
1459                              process,
1460                              &thread );
1461    if( error )
1462    {
1463        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1464        __FUNCTION__, local_cxy ); 
1465        process_free( process );
1466        cluster_pid_release( new_pid );
1467        return -1;
1468    }
[172]1469
[564]1470// check main thread LTID
1471assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
[624]1472"main thread must have LTID == 0" );
[428]1473
[564]1474#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1475cycle = (uint32_t)hal_get_cycles();
[438]1476if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1477printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1478__FUNCTION__, pid, trdid, thread, cycle );
[433]1479#endif
[1]1480
[625]1481    // set COW flag in DATA, ANON, REMOTE vsegs for parent process VMM
[629]1482    // this includes all parent process copies in all clusters
[408]1483    if( parent_process_cxy == local_cxy )   // reference is local
1484    {
1485        vmm_set_cow( parent_process_ptr );
1486    }
1487    else                                    // reference is remote
1488    {
1489        rpc_vmm_set_cow_client( parent_process_cxy,
1490                                parent_process_ptr );
1491    }
[1]1492
[625]1493    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
[433]1494    vmm_set_cow( process );
1495 
[438]1496#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1497cycle = (uint32_t)hal_get_cycles();
[438]1498if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1499printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",
[583]1500__FUNCTION__, pid, trdid, cycle );
[433]1501#endif
[101]1502
[428]1503    // get extended pointers on parent children_root, children_lock and children_nr
1504    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1505    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1506    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1507
[428]1508    // register process in parent children list
[564]1509    remote_queuelock_acquire( children_lock_xp );
[428]1510        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1511        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1512    remote_queuelock_release( children_lock_xp );
[204]1513
[408]1514    // return success
1515    *child_thread = thread;
1516    *child_pid    = new_pid;
[1]1517
[438]1518#if DEBUG_PROCESS_MAKE_FORK
[433]1519cycle = (uint32_t)hal_get_cycles();
[438]1520if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1521printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1522__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1523#endif
[428]1524
[408]1525    return 0;
1526
[416]1527}   // end process_make_fork()
[408]1528
1529/////////////////////////////////////////////////////
1530error_t process_make_exec( exec_info_t  * exec_info )
1531{
[457]1532    thread_t       * thread;                  // local pointer on this thread
1533    process_t      * process;                 // local pointer on this process
1534    pid_t            pid;                     // this process identifier
[610]1535    xptr_t           ref_xp;                  // reference process for this process
[441]1536        error_t          error;                   // value returned by called functions
[457]1537    char           * path;                    // path to .elf file
1538    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1539    uint32_t         file_id;                 // file index in fd_array
1540    uint32_t         args_nr;                 // number of main thread arguments
1541    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1542
[625]1543    // get calling thread, process, pid and ref_xp
[457]1544    thread  = CURRENT_THREAD;
1545    process = thread->process;
1546    pid     = process->pid;
[610]1547    ref_xp  = process->ref_xp;
[408]1548
[457]1549        // get relevant infos from exec_info
1550        path          = exec_info->path;
1551    args_nr       = exec_info->args_nr;
1552    args_pointers = exec_info->args_pointers;
[408]1553
[438]1554#if DEBUG_PROCESS_MAKE_EXEC
[433]1555uint32_t cycle = (uint32_t)hal_get_cycles();
[626]1556if( local_cxy == 0x11 )
[593]1557printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1558__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1559#endif
[408]1560
[457]1561    // open the file identified by <path>
1562    file_xp = XPTR_NULL;
[564]1563    file_id = 0xFFFFFFFF;
[610]1564        error   = vfs_open( process->vfs_root_xp,
[457]1565                            path,
[610]1566                        ref_xp,
[457]1567                            O_RDONLY,
1568                            0,
1569                            &file_xp,
1570                            &file_id );
1571        if( error )
1572        {
1573                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1574                return -1;
1575        }
1576
[446]1577#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1578cycle = (uint32_t)hal_get_cycles();
[626]1579if( local_cxy == 0x11 )
[593]1580printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1581__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1582#endif
1583
[457]1584    // delete all threads other than this main thread in all clusters
1585    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1586
[469]1587#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1588cycle = (uint32_t)hal_get_cycles();
[626]1589if( local_cxy == 0x11 )
[625]1590printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n",
[583]1591__FUNCTION__, pid, thread->trdid, cycle );
[469]1592#endif
1593
[625]1594    // reset calling process VMM
1595    vmm_user_reset( process );
[446]1596
[457]1597#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1598cycle = (uint32_t)hal_get_cycles();
[626]1599if( local_cxy == 0x11 )
[625]1600printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n",
[583]1601__FUNCTION__, pid, thread->trdid, cycle );
[457]1602#endif
[408]1603
[625]1604    // re-initialize the VMM (args/envs vsegs registration)
1605    error = vmm_user_init( process );
[457]1606    if( error )
[416]1607    {
[457]1608        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1609        vfs_close( file_xp , file_id );
[623]1610        // FIXME restore old process VMM [AG]
[416]1611        return -1;
1612    }
[457]1613   
[438]1614#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1615cycle = (uint32_t)hal_get_cycles();
[626]1616if( local_cxy == 0x11 )
[625]1617printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n",
[583]1618__FUNCTION__, pid, thread->trdid, cycle );
[433]1619#endif
[428]1620
[457]1621    // register code & data vsegs as well as entry-point in process VMM,
[428]1622    // and register extended pointer on .elf file in process descriptor
[457]1623        error = elf_load_process( file_xp , process );
[441]1624    if( error )
[1]1625        {
[441]1626                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1627        vfs_close( file_xp , file_id );
[623]1628        // FIXME restore old process VMM [AG]
[408]1629        return -1;
[1]1630        }
1631
[438]1632#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1633cycle = (uint32_t)hal_get_cycles();
[626]1634if( local_cxy == 0x11 )
[625]1635printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n",
[583]1636__FUNCTION__, pid, thread->trdid, cycle );
[433]1637#endif
[1]1638
[457]1639    // update the existing main thread descriptor... and jump to user code
1640    error = thread_user_exec( (void *)process->vmm.entry_point,
1641                              args_nr,
1642                              args_pointers );
1643    if( error )
1644    {
[469]1645        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1646        vfs_close( file_xp , file_id );
1647        // FIXME restore old process VMM
[408]1648        return -1;
[457]1649    }
[1]1650
[492]1651    assert( false, "we should not execute this code");
[457]1652 
[409]1653        return 0;
1654
1655}  // end process_make_exec()
1656
[457]1657
[623]1658////////////////////////////////////////////////
1659void process_zero_create( process_t   * process,
1660                          boot_info_t * info )
[428]1661{
[580]1662    error_t error;
1663    pid_t   pid;
[428]1664
[438]1665#if DEBUG_PROCESS_ZERO_CREATE
[433]1666uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1667if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1668printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1669__FUNCTION__, local_cxy, cycle );
[433]1670#endif
[428]1671
[624]1672    // get pointer on VMM
1673    vmm_t * vmm = &process->vmm;
1674
[580]1675    // get PID from local cluster manager for this kernel process
1676    error = cluster_pid_alloc( process , &pid );
1677
1678    if( error || (LPID_FROM_PID( pid ) != 0) )
1679    {
1680        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1681        __FUNCTION__ , local_cxy, pid );
1682        hal_core_sleep();
1683    }
1684
[428]1685    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1686    // the kernel process_zero is its own parent_process,
1687    // reference_process, and owner_process, and cannot be killed...
1688    process->pid        = pid;
[433]1689    process->ref_xp     = XPTR( local_cxy , process );
[443]1690    process->owner_xp   = XPTR( local_cxy , process );
[580]1691    process->parent_xp  = XPTR( local_cxy , process );
[433]1692    process->term_state = 0;
[428]1693
[624]1694    // initilise VSL as empty
1695    vmm->vsegs_nr = 0;
1696        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[623]1697
[624]1698    // initialise GPT as empty
1699    error = hal_gpt_create( &vmm->gpt );
1700    if( error ) 
1701    {
1702        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
1703        hal_core_sleep();
1704    }
1705
[625]1706    // initialize VSL and GPT locks
[629]1707    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
[624]1708   
1709    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
1710    error = hal_vmm_kernel_init( info );
1711    if( error ) 
1712    {
1713        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
1714        hal_core_sleep();
1715    }
1716
[564]1717    // reset th_tbl[] array and associated fields
[428]1718    uint32_t i;
[564]1719    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]1720        {
1721        process->th_tbl[i] = NULL;
1722    }
1723    process->th_nr  = 0;
[564]1724    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]1725
[564]1726
[428]1727    // reset children list as empty
1728    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1729    process->children_nr = 0;
[564]1730    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1731                           LOCK_PROCESS_CHILDREN );
[428]1732
[580]1733    // register kernel process in cluster manager local_list
1734    cluster_process_local_link( process );
1735   
[428]1736        hal_fence();
1737
[438]1738#if DEBUG_PROCESS_ZERO_CREATE
[433]1739cycle = (uint32_t)hal_get_cycles();
[438]1740if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1741printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]1742__FUNCTION__, local_cxy, cycle );
[433]1743#endif
[428]1744
[610]1745}  // end process_zero_create()
[428]1746
[564]1747////////////////////////////////
[485]1748void process_init_create( void )
[1]1749{
[428]1750    process_t      * process;       // local pointer on process descriptor
[409]1751    pid_t            pid;           // process_init identifier
1752    thread_t       * thread;        // local pointer on main thread
1753    pthread_attr_t   attr;          // main thread attributes
1754    lid_t            lid;           // selected core local index for main thread
[457]1755    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1756    uint32_t         file_id;       // file index in fd_array
[409]1757    error_t          error;
[1]1758
[438]1759#if DEBUG_PROCESS_INIT_CREATE
[610]1760thread_t * this = CURRENT_THREAD;
[433]1761uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1762if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1763printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1764__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1765#endif
[1]1766
[408]1767    // allocates memory for process descriptor from local cluster
1768        process = process_alloc(); 
[625]1769    if( process == NULL )
1770    {
1771        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
1772        hal_core_sleep();
1773    }
[101]1774
[610]1775    // set the CWD and VFS_ROOT fields in process descriptor
1776    process->cwd_xp      = process_zero.vfs_root_xp;
1777    process->vfs_root_xp = process_zero.vfs_root_xp;
1778
[409]1779    // get PID from local cluster
[416]1780    error = cluster_pid_alloc( process , &pid );
[625]1781    if( error ) 
1782    {
1783        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
1784        hal_core_sleep();
1785    }
1786    if( pid != 1 ) 
1787    {
1788        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
1789        hal_core_sleep();
1790    }
[408]1791
[409]1792    // initialize process descriptor / parent is local process_zero
[625]1793    error = process_reference_init( process,
1794                                    pid,
1795                                    XPTR( local_cxy , &process_zero ) ); 
1796    if( error )
1797    {
1798        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
1799        hal_core_sleep();
1800    }
[408]1801
[564]1802#if(DEBUG_PROCESS_INIT_CREATE & 1)
1803if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1804printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1805__FUNCTION__, this->process->pid, this->trdid );
[564]1806#endif
1807
[457]1808    // open the file identified by CONFIG_PROCESS_INIT_PATH
1809    file_xp = XPTR_NULL;
1810    file_id = -1;
[610]1811        error   = vfs_open( process->vfs_root_xp,
[457]1812                            CONFIG_PROCESS_INIT_PATH,
[610]1813                        XPTR( local_cxy , process ),
[457]1814                            O_RDONLY,
1815                            0,
1816                            &file_xp,
1817                            &file_id );
[625]1818    if( error )
1819    {
1820        printk("\n[PANIC] in %s : cannot open file <%s>\n",
1821         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1822        hal_core_sleep();
1823    }
[457]1824
[564]1825#if(DEBUG_PROCESS_INIT_CREATE & 1)
1826if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1827printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1828__FUNCTION__, this->process->pid, this->trdid );
[564]1829#endif
1830
[625]1831    // register "code" and "data" vsegs as well as entry-point
[409]1832    // in process VMM, using information contained in the elf file.
[457]1833        error = elf_load_process( file_xp , process );
[101]1834
[625]1835    if( error ) 
1836    {
1837        printk("\n[PANIC] in %s : cannot access file <%s>\n",
1838         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1839        hal_core_sleep();
1840    }
[457]1841
[625]1842
[564]1843#if(DEBUG_PROCESS_INIT_CREATE & 1)
1844if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1845printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1846__FUNCTION__, this->process->pid, this->trdid );
[564]1847#endif
1848
[625]1849#if (DEBUG_PROCESS_INIT_CREATE & 1)
1850hal_vmm_display( process , true );
1851#endif
1852
[428]1853    // get extended pointers on process_zero children_root, children_lock
1854    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1855    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1856
[564]1857    // take lock protecting kernel process children list
1858    remote_queuelock_acquire( children_lock_xp );
1859
[428]1860    // register process INIT in parent local process_zero
1861        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1862        hal_atomic_add( &process_zero.children_nr , 1 );
1863
[564]1864    // release lock protecting kernel process children list
1865    remote_queuelock_release( children_lock_xp );
1866
1867#if(DEBUG_PROCESS_INIT_CREATE & 1)
1868if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1869printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1870__FUNCTION__, this->process->pid, this->trdid );
[564]1871#endif
1872
[409]1873    // select a core in local cluster to execute the main thread
1874    lid  = cluster_select_local_core();
1875
1876    // initialize pthread attributes for main thread
1877    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1878    attr.cxy        = local_cxy;
1879    attr.lid        = lid;
1880
1881    // create and initialize thread descriptor
1882        error = thread_user_create( pid,
1883                                (void *)process->vmm.entry_point,
1884                                NULL,
1885                                &attr,
1886                                &thread );
[1]1887
[625]1888    if( error )
1889    {
1890        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
1891        hal_core_sleep();
1892    }
1893    if( thread->trdid != 0 )
1894    {
1895        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
1896        hal_core_sleep();
1897    }
[428]1898
[564]1899#if(DEBUG_PROCESS_INIT_CREATE & 1)
1900if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1901printk("\n[%s] thread[%x,%x] created main thread\n",
1902__FUNCTION__, this->process->pid, this->trdid );
[564]1903#endif
1904
[409]1905    // activate thread
1906        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1907
[124]1908    hal_fence();
[1]1909
[438]1910#if DEBUG_PROCESS_INIT_CREATE
[433]1911cycle = (uint32_t)hal_get_cycles();
[438]1912if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1913printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1914__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1915#endif
[409]1916
[204]1917}  // end process_init_create()
1918
[428]1919/////////////////////////////////////////
1920void process_display( xptr_t process_xp )
1921{
1922    process_t   * process_ptr;
1923    cxy_t         process_cxy;
[443]1924
[428]1925    xptr_t        parent_xp;       // extended pointer on parent process
1926    process_t   * parent_ptr;
1927    cxy_t         parent_cxy;
1928
[443]1929    xptr_t        owner_xp;        // extended pointer on owner process
1930    process_t   * owner_ptr;
1931    cxy_t         owner_cxy;
1932
[428]1933    pid_t         pid;
1934    pid_t         ppid;
[580]1935    lpid_t        lpid;
[428]1936    uint32_t      state;
1937    uint32_t      th_nr;
1938
[443]1939    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1940    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1941    chdev_t     * txt_chdev_ptr;
1942    cxy_t         txt_chdev_cxy;
1943    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1944
1945    xptr_t        elf_file_xp;     // extended pointer on .elf file
1946    cxy_t         elf_file_cxy;
1947    vfs_file_t  * elf_file_ptr;
1948    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1949
1950    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1951    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1952
1953    // get cluster and local pointer on process
1954    process_ptr = GET_PTR( process_xp );
1955    process_cxy = GET_CXY( process_xp );
1956
[580]1957    // get process PID, LPID, and state
[564]1958    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]1959    lpid  = LPID_FROM_PID( pid );
[564]1960    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1961
[580]1962    // get process PPID
[564]1963    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]1964    parent_cxy = GET_CXY( parent_xp );
1965    parent_ptr = GET_PTR( parent_xp );
[564]1966    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]1967
1968    // get number of threads
[564]1969    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]1970
[443]1971    // get pointers on owner process descriptor
[564]1972    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]1973    owner_cxy = GET_CXY( owner_xp );
1974    owner_ptr = GET_PTR( owner_xp );
[428]1975
[580]1976    // get process TXT name and .elf name
1977    if( lpid )                                   // user process
1978    {
[443]1979
[580]1980        // get extended pointer on file descriptor associated to TXT_RX
1981        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]1982
[580]1983        assert( (txt_file_xp != XPTR_NULL) ,
[624]1984        "process must be attached to one TXT terminal" ); 
[443]1985
[580]1986        // get TXT_RX chdev pointers
1987        txt_chdev_xp  = chdev_from_file( txt_file_xp );
1988        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1989        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1990
1991        // get TXT_RX name and ownership
1992        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
1993                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]1994   
[580]1995        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
1996                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]1997
[580]1998        // get process .elf name
1999        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2000        elf_file_cxy  = GET_CXY( elf_file_xp );
2001        elf_file_ptr  = GET_PTR( elf_file_xp );
2002        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2003        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2004    }
2005    else                                         // kernel process_zero
2006    {
2007        // TXT name and .elf name are not registered in kernel process_zero
2008        strcpy( txt_name , "txt0_rx" );
2009        txt_owner_xp = process_xp; 
2010        strcpy( elf_name , "kernel.elf" );
2011    }
2012
[428]2013    // display process info
[443]2014    if( txt_owner_xp == process_xp )
[428]2015    {
[581]2016        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2017        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2018    }
2019    else
2020    {
[581]2021        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2022        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2023    }
2024}  // end process_display()
2025
2026
2027////////////////////////////////////////////////////////////////////////////////////////
2028//     Terminals related functions
2029////////////////////////////////////////////////////////////////////////////////////////
2030
[581]2031//////////////////////////////////
[485]2032uint32_t process_txt_alloc( void )
[428]2033{
2034    uint32_t  index;       // TXT terminal index
2035    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2036    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2037    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2038    xptr_t    root_xp;     // extended pointer on owner field in chdev
2039
2040    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2041    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2042    {
2043        // get pointers on TXT_RX[index]
2044        chdev_xp  = chdev_dir.txt_rx[index];
2045        chdev_cxy = GET_CXY( chdev_xp );
2046        chdev_ptr = GET_PTR( chdev_xp );
2047
2048        // get extended pointer on root of attached process
2049        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2050
2051        // return free TXT index if found
2052        if( xlist_is_empty( root_xp ) ) return index; 
2053    }
2054
[492]2055    assert( false , "no free TXT terminal found" );
[428]2056
2057    return -1;
2058
2059} // end process_txt_alloc()
2060
2061/////////////////////////////////////////////
2062void process_txt_attach( process_t * process,
2063                         uint32_t    txt_id )
2064{
2065    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2066    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2067    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2068    xptr_t      root_xp;      // extended pointer on list root in chdev
2069    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2070
[564]2071// check process is in owner cluster
2072assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
2073"process descriptor not in owner cluster" );
[428]2074
[564]2075// check terminal index
2076assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2077"illegal TXT terminal index" );
[428]2078
2079    // get pointers on TXT_RX[txt_id] chdev
2080    chdev_xp  = chdev_dir.txt_rx[txt_id];
2081    chdev_cxy = GET_CXY( chdev_xp );
2082    chdev_ptr = GET_PTR( chdev_xp );
2083
2084    // get extended pointer on root & lock of attached process list
2085    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2086    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2087
[564]2088    // get lock protecting list of processes attached to TXT
2089    remote_busylock_acquire( lock_xp );
2090
[428]2091    // insert process in attached process list
2092    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
2093
[564]2094    // release lock protecting list of processes attached to TXT
2095    remote_busylock_release( lock_xp );
2096
[446]2097#if DEBUG_PROCESS_TXT
[610]2098thread_t * this = CURRENT_THREAD;
[457]2099uint32_t cycle = (uint32_t)hal_get_cycles();
[446]2100if( DEBUG_PROCESS_TXT < cycle )
[610]2101printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2102__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]2103#endif
[428]2104
2105} // end process_txt_attach()
2106
[436]2107/////////////////////////////////////////////
2108void process_txt_detach( xptr_t  process_xp )
[428]2109{
[436]2110    process_t * process_ptr;  // local pointer on process in owner cluster
2111    cxy_t       process_cxy;  // process owner cluster
2112    pid_t       process_pid;  // process identifier
2113    xptr_t      file_xp;      // extended pointer on stdin file
[428]2114    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2115    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2116    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2117    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2118
[436]2119    // get process cluster, local pointer, and PID
2120    process_cxy = GET_CXY( process_xp );
2121    process_ptr = GET_PTR( process_xp );
[564]2122    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2123
[564]2124// check process descriptor in owner cluster
2125assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2126"process descriptor not in owner cluster" );
[436]2127
2128    // release TXT ownership (does nothing if not TXT owner)
2129    process_txt_transfer_ownership( process_xp );
[428]2130
[625]2131    // get extended pointer on process stdin pseudo file
[564]2132    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]2133
2134    // get pointers on TXT_RX chdev
2135    chdev_xp  = chdev_from_file( file_xp );
[428]2136    chdev_cxy = GET_CXY( chdev_xp );
2137    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2138
[436]2139    // get extended pointer on lock protecting attached process list
[428]2140    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2141
[564]2142    // get lock protecting list of processes attached to TXT
2143    remote_busylock_acquire( lock_xp );
2144
[428]2145    // unlink process from attached process list
[436]2146    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2147
[564]2148    // release lock protecting list of processes attached to TXT
2149    remote_busylock_release( lock_xp );
2150
[446]2151#if DEBUG_PROCESS_TXT
[610]2152thread_t * this = CURRENT_THREAD;
[457]2153uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2154uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]2155if( DEBUG_PROCESS_TXT < cycle )
[625]2156printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
[610]2157__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]2158#endif
[428]2159
2160} // end process_txt_detach()
2161
2162///////////////////////////////////////////////////
2163void process_txt_set_ownership( xptr_t process_xp )
2164{
2165    process_t * process_ptr;
2166    cxy_t       process_cxy;
[436]2167    pid_t       process_pid;
[428]2168    xptr_t      file_xp;
2169    xptr_t      txt_xp;     
2170    chdev_t   * txt_ptr;
2171    cxy_t       txt_cxy;
2172
[436]2173    // get pointers on process in owner cluster
[428]2174    process_cxy = GET_CXY( process_xp );
[435]2175    process_ptr = GET_PTR( process_xp );
[564]2176    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2177
2178    // check owner cluster
[492]2179    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2180    "process descriptor not in owner cluster" );
[436]2181
[428]2182    // get extended pointer on stdin pseudo file
[564]2183    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2184
2185    // get pointers on TXT chdev
2186    txt_xp  = chdev_from_file( file_xp );
2187    txt_cxy = GET_CXY( txt_xp );
[435]2188    txt_ptr = GET_PTR( txt_xp );
[428]2189
2190    // set owner field in TXT chdev
[564]2191    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2192
[446]2193#if DEBUG_PROCESS_TXT
[610]2194thread_t * this = CURRENT_THREAD;
[457]2195uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2196uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2197if( DEBUG_PROCESS_TXT < cycle )
[625]2198printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n",
[610]2199__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]2200#endif
2201
[428]2202}  // end process_txt_set ownership()
2203
[436]2204////////////////////////////////////////////////////////
2205void process_txt_transfer_ownership( xptr_t process_xp )
[428]2206{
[436]2207    process_t * process_ptr;     // local pointer on process releasing ownership
2208    cxy_t       process_cxy;     // process cluster
2209    pid_t       process_pid;     // process identifier
[428]2210    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2211    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2212    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2213    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2214    uint32_t    txt_id;          // TXT_RX channel
[428]2215    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2216    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2217    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2218    xptr_t      iter_xp;         // iterator for xlist
2219    xptr_t      current_xp;      // extended pointer on current process
[625]2220    bool_t      found;
[428]2221
[457]2222#if DEBUG_PROCESS_TXT
[610]2223thread_t * this  = CURRENT_THREAD;
2224uint32_t   cycle;
[457]2225#endif
2226
[625]2227    // get pointers on target process
[428]2228    process_cxy = GET_CXY( process_xp );
[435]2229    process_ptr = GET_PTR( process_xp );
[564]2230    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2231
[625]2232// check owner cluster
2233assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2234"process descriptor not in owner cluster" );
[436]2235
[428]2236    // get extended pointer on stdin pseudo file
[564]2237    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2238
2239    // get pointers on TXT chdev
2240    txt_xp  = chdev_from_file( file_xp );
2241    txt_cxy = GET_CXY( txt_xp );
[433]2242    txt_ptr = GET_PTR( txt_xp );
[428]2243
[625]2244    // get relevant infos from chdev descriptor
[564]2245    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[625]2246    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2247
[625]2248    // transfer ownership only if target process is the TXT owner
[436]2249    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2250    {
[436]2251        // get extended pointers on root and lock of attached processes list
2252        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2253        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2254
[625]2255        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2256        {
2257            // get lock
2258            remote_busylock_acquire( lock_xp );
[436]2259
2260            // scan attached process list to find KSH process
[625]2261            found = false;
2262            for( iter_xp = hal_remote_l64( root_xp ) ;
2263                 (iter_xp != root_xp) && (found == false) ;
2264                 iter_xp = hal_remote_l64( iter_xp ) )
[436]2265            {
[625]2266                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
[435]2267
[436]2268                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2269                {
2270                    // set owner field in TXT chdev
[564]2271                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2272
[446]2273#if DEBUG_PROCESS_TXT
[610]2274cycle = (uint32_t)hal_get_cycles();
[446]2275if( DEBUG_PROCESS_TXT < cycle )
[625]2276printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2277__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2278#endif
[625]2279                    found = true;
[436]2280                }
2281            }
[625]2282
[436]2283            // release lock
[564]2284            remote_busylock_release( lock_xp );
[436]2285
[625]2286// It must exist a KSH process for each user TXT channel
2287assert( (found == true), "KSH process not found for TXT%d", txt_id );
[436]2288
2289        }
[625]2290        else                                           // target process is KSH
[436]2291        {
[625]2292            // get lock
2293            remote_busylock_acquire( lock_xp );
2294
[436]2295            // scan attached process list to find another process
[625]2296            found = false;
2297            for( iter_xp = hal_remote_l64( root_xp ) ;
2298                 (iter_xp != root_xp) && (found == false) ;
2299                 iter_xp = hal_remote_l64( iter_xp ) )
[428]2300            {
[436]2301                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2302
2303                if( current_xp != process_xp )            // current is not KSH
2304                {
2305                    // set owner field in TXT chdev
[564]2306                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2307
[446]2308#if DEBUG_PROCESS_TXT
[610]2309cycle  = (uint32_t)hal_get_cycles();
[625]2310cxy_t       current_cxy = GET_CXY( current_xp );
2311process_t * current_ptr = GET_PTR( current_xp );
2312uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2313if( DEBUG_PROCESS_TXT < cycle )
[625]2314printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
[610]2315__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[436]2316#endif
[625]2317                    found = true;
[436]2318                }
[428]2319            }
[436]2320
2321            // release lock
[564]2322            remote_busylock_release( lock_xp );
[436]2323
2324            // no more owner for TXT if no other process found
[625]2325            if( found == false )
2326            {
2327                // set owner field in TXT chdev
2328                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2329
[446]2330#if DEBUG_PROCESS_TXT
[436]2331cycle = (uint32_t)hal_get_cycles();
[446]2332if( DEBUG_PROCESS_TXT < cycle )
[625]2333printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
[610]2334__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2335#endif
[625]2336            }
[428]2337        }
[436]2338    }
2339    else
2340    {
[433]2341
[446]2342#if DEBUG_PROCESS_TXT
[436]2343cycle = (uint32_t)hal_get_cycles();
[446]2344if( DEBUG_PROCESS_TXT < cycle )
[625]2345printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2346__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
[436]2347#endif
2348
[428]2349    }
[625]2350
[436]2351}  // end process_txt_transfer_ownership()
[428]2352
2353
[564]2354////////////////////////////////////////////////
2355bool_t process_txt_is_owner( xptr_t process_xp )
[457]2356{
2357    // get local pointer and cluster of process in owner cluster
2358    cxy_t       process_cxy = GET_CXY( process_xp );
2359    process_t * process_ptr = GET_PTR( process_xp );
2360
[564]2361// check calling thread execute in target process owner cluster
2362pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2363assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2364"process descriptor not in owner cluster" );
[457]2365
2366    // get extended pointer on stdin pseudo file
[564]2367    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2368
2369    // get pointers on TXT chdev
2370    xptr_t    txt_xp  = chdev_from_file( file_xp );
2371    cxy_t     txt_cxy = GET_CXY( txt_xp );
2372    chdev_t * txt_ptr = GET_PTR( txt_xp );
2373
2374    // get extended pointer on TXT_RX owner process
[564]2375    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2376
2377    return (process_xp == owner_xp);
2378
2379}   // end process_txt_is_owner()
2380
[436]2381////////////////////////////////////////////////     
2382xptr_t process_txt_get_owner( uint32_t channel )
[435]2383{
2384    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2385    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2386    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2387
[564]2388    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2389
[457]2390}  // end process_txt_get_owner()
2391
[435]2392///////////////////////////////////////////
2393void process_txt_display( uint32_t txt_id )
2394{
2395    xptr_t      chdev_xp;
2396    cxy_t       chdev_cxy;
2397    chdev_t   * chdev_ptr;
2398    xptr_t      root_xp;
2399    xptr_t      lock_xp;
2400    xptr_t      current_xp;
2401    xptr_t      iter_xp;
[443]2402    cxy_t       txt0_cxy;
2403    chdev_t   * txt0_ptr;
2404    xptr_t      txt0_xp;
2405    xptr_t      txt0_lock_xp;
2406   
[435]2407    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2408    "illegal TXT terminal index" );
[435]2409
[443]2410    // get pointers on TXT0 chdev
2411    txt0_xp  = chdev_dir.txt_tx[0];
2412    txt0_cxy = GET_CXY( txt0_xp );
2413    txt0_ptr = GET_PTR( txt0_xp );
2414
2415    // get extended pointer on TXT0 lock
2416    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2417
[435]2418    // get pointers on TXT_RX[txt_id] chdev
2419    chdev_xp  = chdev_dir.txt_rx[txt_id];
2420    chdev_cxy = GET_CXY( chdev_xp );
2421    chdev_ptr = GET_PTR( chdev_xp );
2422
2423    // get extended pointer on root & lock of attached process list
2424    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2425    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2426
[443]2427    // get lock on attached process list
[564]2428    remote_busylock_acquire( lock_xp );
[443]2429
2430    // get TXT0 lock in busy waiting mode
[564]2431    remote_busylock_acquire( txt0_lock_xp );
[443]2432
[435]2433    // display header
[443]2434    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2435    txt_id , (uint32_t)hal_get_cycles() );
[435]2436
[436]2437    // scan attached process list
[435]2438    XLIST_FOREACH( root_xp , iter_xp )
2439    {
2440        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2441        process_display( current_xp );
2442    }
2443
[443]2444    // release TXT0 lock in busy waiting mode
[564]2445    remote_busylock_release( txt0_lock_xp );
[443]2446
2447    // release lock on attached process list
[564]2448    remote_busylock_release( lock_xp );
[435]2449
2450}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.