source: trunk/kernel/kern/process.c @ 636

Last change on this file since 636 was 635, checked in by alain, 5 years ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 84.5 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[618]6 *          Alain Greiner (2016,2017,2018,2019)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[623]31#include <hal_vmm.h>
[1]32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
[428]42#include <chdev.h>
[1]43#include <list.h>
[407]44#include <string.h>
[1]45#include <scheduler.h>
[564]46#include <busylock.h>
47#include <queuelock.h>
48#include <remote_queuelock.h>
49#include <rwlock.h>
50#include <remote_rwlock.h>
[1]51#include <dqdt.h>
52#include <cluster.h>
53#include <ppm.h>
54#include <boot_info.h>
55#include <process.h>
56#include <elf.h>
[23]57#include <syscalls.h>
[435]58#include <shared_syscalls.h>
[1]59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Extern global variables
62//////////////////////////////////////////////////////////////////////////////////////////
63
[428]64extern process_t           process_zero;     // allocated in kernel_init.c
65extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]66
67//////////////////////////////////////////////////////////////////////////////////////////
68// Process initialisation related functions
69//////////////////////////////////////////////////////////////////////////////////////////
70
[583]71/////////////////////////////////
[503]72process_t * process_alloc( void )
[1]73{
[635]74        kmem_req_t req;
[1]75
[635]76    req.type  = KMEM_KCM;
77        req.order = bits_log2( sizeof(process_t) );
[1]78        req.flags = AF_KERNEL;
79
[635]80    return kmem_alloc( &req );
[1]81}
82
83////////////////////////////////////////
84void process_free( process_t * process )
85{
86    kmem_req_t  req;
87
[635]88        req.type = KMEM_KCM;
[1]89        req.ptr  = process;
90        kmem_free( &req );
91}
92
[625]93////////////////////////////////////////////////////
94error_t process_reference_init( process_t * process,
95                                pid_t       pid,
96                                xptr_t      parent_xp )
[1]97{
[625]98    error_t     error;
[610]99    xptr_t      process_xp;
[428]100    cxy_t       parent_cxy;
101    process_t * parent_ptr;
[407]102    xptr_t      stdin_xp;
103    xptr_t      stdout_xp;
104    xptr_t      stderr_xp;
105    uint32_t    stdin_id;
106    uint32_t    stdout_id;
107    uint32_t    stderr_id;
[428]108    uint32_t    txt_id;
109    char        rx_path[40];
110    char        tx_path[40];
[440]111    xptr_t      file_xp;
[428]112    xptr_t      chdev_xp;
[625]113    chdev_t   * chdev_ptr;
[428]114    cxy_t       chdev_cxy;
115    pid_t       parent_pid;
[625]116    vmm_t     * vmm;
[1]117
[610]118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
[625]121    // get pointer on process vmm
122    vmm = &process->vmm;
123
[428]124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
[435]126    parent_ptr = GET_PTR( parent_xp );
[204]127
[457]128    // get parent_pid
[564]129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]130
[438]131#if DEBUG_PROCESS_REFERENCE_INIT
[610]132thread_t * this = CURRENT_THREAD;
[433]133uint32_t cycle = (uint32_t)hal_get_cycles();
[610]134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
[433]137#endif
[428]138
[610]139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
[443]142    process->owner_xp   = XPTR( local_cxy , process );
[433]143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
[428]145
[610]146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
[625]150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[564]153
[625]154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
[635]168    // initialize VSL lock
[625]169        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
170
[635]171    // register kernel vsegs in user process VMM as required by the architecture
[625]172    error = hal_vmm_kernel_update( process );
173    if( error ) 
174    {
175        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
176        return -1;
177    }
178
179#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
180if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[635]181printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n",
[625]182__FUNCTION__, parent_pid, this->trdid, pid );
183#endif
184
185    // create "args" and "envs" vsegs
186    // create "stacks" and "mmap" vsegs allocators
187    // initialize locks protecting GPT and VSL
188    error = vmm_user_init( process );
189    if( error ) 
190    {
191        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
192        return -1;
193    }
[415]194 
[438]195#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]196cycle = (uint32_t)hal_get_cycles();
[610]197if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]198printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
199__FUNCTION__, parent_pid, this->trdid, pid );
[433]200#endif
[1]201
[409]202    // initialize fd_array as empty
[408]203    process_fd_init( process );
[1]204
[428]205    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]206    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]207    {
[581]208        // select a TXT channel
209        if( pid == 1 )  txt_id = 0;                     // INIT
210        else            txt_id = process_txt_alloc();   // KSH
[428]211
[457]212        // attach process to TXT
[428]213        process_txt_attach( process , txt_id ); 
214
[457]215#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
216cycle = (uint32_t)hal_get_cycles();
[610]217if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
218printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
219__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]220#endif
[428]221        // build path to TXT_RX[i] and TXT_TX[i] chdevs
222        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
223        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
224
225        // create stdin pseudo file         
[610]226        error = vfs_open(  process->vfs_root_xp,
[428]227                           rx_path,
[610]228                           process_xp,
[408]229                           O_RDONLY, 
230                           0,                // FIXME chmod
231                           &stdin_xp, 
232                           &stdin_id );
[625]233        if( error )
234        {
235            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
236            return -1;
237        }
[1]238
[564]239assert( (stdin_id == 0) , "stdin index must be 0" );
[428]240
[440]241#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
242cycle = (uint32_t)hal_get_cycles();
[610]243if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
244printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
245__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]246#endif
247
[428]248        // create stdout pseudo file         
[610]249        error = vfs_open(  process->vfs_root_xp,
[428]250                           tx_path,
[610]251                           process_xp,
[408]252                           O_WRONLY, 
253                           0,                // FIXME chmod
254                           &stdout_xp, 
255                           &stdout_id );
[625]256        if( error )
257        {
258            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
259            return -1;
260        }
[1]261
[625]262assert( (stdout_id == 1) , "stdout index must be 1" );
[428]263
[440]264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
265cycle = (uint32_t)hal_get_cycles();
[610]266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]269#endif
270
[428]271        // create stderr pseudo file         
[610]272        error = vfs_open(  process->vfs_root_xp,
[428]273                           tx_path,
[610]274                           process_xp,
[408]275                           O_WRONLY, 
276                           0,                // FIXME chmod
277                           &stderr_xp, 
278                           &stderr_id );
[625]279        if( error )
280        {
281            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
282            return -1;
283        }
[428]284
[625]285assert( (stderr_id == 2) , "stderr index must be 2" );
[428]286
[440]287#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
288cycle = (uint32_t)hal_get_cycles();
[610]289if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
290printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
291__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]292#endif
293
[408]294    }
[428]295    else                                            // normal user process
[408]296    {
[457]297        // get extended pointer on stdin pseudo file in parent process
[625]298        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
299                                                &parent_ptr->fd_array.array[0] ) );
[440]300
[457]301        // get extended pointer on parent process TXT chdev
[440]302        chdev_xp = chdev_from_file( file_xp );
[428]303 
304        // get cluster and local pointer on chdev
305        chdev_cxy = GET_CXY( chdev_xp );
[435]306        chdev_ptr = GET_PTR( chdev_xp );
[428]307 
[564]308        // get parent process TXT terminal index
309        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]310
[564]311        // attach child process to parent process TXT terminal
[428]312        process_txt_attach( process , txt_id ); 
[407]313
[457]314        // copy all open files from parent process fd_array to this process
[428]315        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]316                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]317    }
[407]318
[610]319    // initialize lock protecting CWD changes
[625]320    remote_busylock_init( XPTR( local_cxy , 
321                                &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]322
[438]323#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]324cycle = (uint32_t)hal_get_cycles();
[610]325if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
326printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
327__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]328#endif
[407]329
[408]330    // reset children list root
331    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
332    process->children_nr     = 0;
[625]333    remote_queuelock_init( XPTR( local_cxy,
334                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]335
[611]336    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]337    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
338    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
339    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
340    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[625]341    remote_queuelock_init( XPTR( local_cxy , 
342                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]343
[611]344    // reset open directories root and lock
345    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
[625]346    remote_queuelock_init( XPTR( local_cxy , 
347                                 &process->dir_lock ), LOCK_PROCESS_DIR );
[611]348
[408]349    // register new process in the local cluster manager pref_tbl[]
350    lpid_t lpid = LPID_FROM_PID( pid );
351    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]352
[408]353    // register new process descriptor in local cluster manager local_list
354    cluster_process_local_link( process );
[407]355
[408]356    // register new process descriptor in local cluster manager copies_list
357    cluster_process_copies_link( process );
[172]358
[564]359    // initialize th_tbl[] array and associated threads
[1]360    uint32_t i;
[564]361
362    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]363        {
364        process->th_tbl[i] = NULL;
365    }
366    process->th_nr  = 0;
[564]367    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]368
[124]369        hal_fence();
[1]370
[438]371#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]372cycle = (uint32_t)hal_get_cycles();
[610]373if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
374printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
375__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]376#endif
[101]377
[635]378#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
379hal_vmm_display( parent_xp , false );
380hal_vmm_display( XPTR( local_cxy , process ) , false );
381#endif
382
[625]383    return 0;
384
[428]385}  // process_reference_init()
[204]386
[1]387/////////////////////////////////////////////////////
388error_t process_copy_init( process_t * local_process,
389                           xptr_t      reference_process_xp )
390{
[625]391    error_t   error;
392    vmm_t   * vmm;
[415]393
[23]394    // get reference process cluster and local pointer
395    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]396    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]397
[625]398    // get pointer on process vmm
399    vmm = &local_process->vmm;
400
[428]401    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]402    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
403    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]404    local_process->ref_xp     = reference_process_xp;
[443]405    local_process->owner_xp   = reference_process_xp;
[433]406    local_process->term_state = 0;
[407]407
[564]408#if DEBUG_PROCESS_COPY_INIT
[610]409thread_t * this = CURRENT_THREAD; 
[433]410uint32_t cycle = (uint32_t)hal_get_cycles();
[610]411if( DEBUG_PROCESS_COPY_INIT < cycle )
412printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
413__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]414#endif
[407]415
[564]416// check user process
[625]417assert( (local_process->pid != 0), "LPID cannot be 0" );
[564]418
[625]419    // initialize VSL as empty
420    vmm->vsegs_nr = 0;
421        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[1]422
[625]423    // create an empty GPT as required by the architecture
424    error = hal_gpt_create( &vmm->gpt );
425    if( error ) 
426    {
427        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
428        return -1;
429    }
430
431    // initialize GPT and VSL locks
432        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
433
434    // register kernel vsegs in VMM as required by the architecture
435    error = hal_vmm_kernel_update( local_process );
436    if( error ) 
437    {
438        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
439        return -1;
440    }
441
442    // create "args" and "envs" vsegs
443    // create "stacks" and "mmap" vsegs allocators
444    // initialize locks protecting GPT and VSL
445    error = vmm_user_init( local_process );
446    if( error ) 
447    {
448        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
449        return -1;
450    }
451 
452#if (DEBUG_PROCESS_COPY_INIT & 1)
453cycle = (uint32_t)hal_get_cycles();
454if( DEBUG_PROCESS_COPY_INIT < cycle )
455printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
456__FUNCTION__, parent_pid, this->trdid, pid, cycle );
457#endif
458
459    // set process file descriptors array
[23]460        process_fd_init( local_process );
[1]461
[625]462    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]463    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
464    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]465    local_process->cwd_xp      = XPTR_NULL;
[1]466
467    // reset children list root (not used in a process descriptor copy)
468    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]469    local_process->children_nr   = 0;
[564]470    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
471                           LOCK_PROCESS_CHILDREN );
[1]472
[428]473    // reset children_list (not used in a process descriptor copy)
474    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]475
476    // reset semaphores list root (not used in a process descriptor copy)
477    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]478    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
479    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
480    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]481
[564]482    // initialize th_tbl[] array and associated fields
[1]483    uint32_t i;
[564]484    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]485        {
486        local_process->th_tbl[i] = NULL;
487    }
488    local_process->th_nr  = 0;
[564]489    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]490
491    // register new process descriptor in local cluster manager local_list
492    cluster_process_local_link( local_process );
493
494    // register new process descriptor in owner cluster manager copies_list
495    cluster_process_copies_link( local_process );
496
[124]497        hal_fence();
[1]498
[438]499#if DEBUG_PROCESS_COPY_INIT
[433]500cycle = (uint32_t)hal_get_cycles();
[610]501if( DEBUG_PROCESS_COPY_INIT < cycle )
502printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
503__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]504#endif
[279]505
[1]506    return 0;
507
[204]508} // end process_copy_init()
509
[1]510///////////////////////////////////////////
511void process_destroy( process_t * process )
512{
[428]513    xptr_t      parent_xp;
514    process_t * parent_ptr;
515    cxy_t       parent_cxy;
516    xptr_t      children_lock_xp;
[446]517    xptr_t      children_nr_xp;
[1]518
[437]519    pid_t       pid = process->pid;
520
[593]521// check no more threads
[618]522assert( (process->th_nr == 0),
523"process %x in cluster %x contains threads", pid , local_cxy );
[428]524
[438]525#if DEBUG_PROCESS_DESTROY
[610]526thread_t * this = CURRENT_THREAD;
[433]527uint32_t cycle = (uint32_t)hal_get_cycles();
[610]528if( DEBUG_PROCESS_DESTROY < cycle )
529printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
530__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]531#endif
[428]532
[618]533    // Destroy VMM
534    vmm_destroy( process );
535
536#if (DEBUG_PROCESS_DESTROY & 1)
537if( DEBUG_PROCESS_DESTROY < cycle )
538printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
539__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
540#endif
541
[436]542    // remove process from local_list in local cluster manager
543    cluster_process_local_unlink( process );
[1]544
[618]545#if (DEBUG_PROCESS_DESTROY & 1)
546if( DEBUG_PROCESS_DESTROY < cycle )
547printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
548__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
549#endif
550
[436]551    // remove process from copies_list in owner cluster manager
552    cluster_process_copies_unlink( process );
[23]553
[618]554#if (DEBUG_PROCESS_DESTROY & 1)
555if( DEBUG_PROCESS_DESTROY < cycle )
556printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
557__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
558#endif
559
[625]560    // when target process cluster is the owner cluster
561    // - remove process from TXT list and transfer ownership
562    // - remove process from children_list
563    // - release PID
[437]564    if( CXY_FROM_PID( pid ) == local_cxy )
[428]565    {
[625]566        process_txt_detach( XPTR( local_cxy , process ) );
567
568#if (DEBUG_PROCESS_DESTROY & 1)
569if( DEBUG_PROCESS_DESTROY < cycle )
570printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
571__FUNCTION__, this->process->pid, this->trdid, pid );
572#endif
573
[428]574        // get pointers on parent process
575        parent_xp  = process->parent_xp;
576        parent_cxy = GET_CXY( parent_xp );
577        parent_ptr = GET_PTR( parent_xp );
578
579        // get extended pointer on children_lock in parent process
580        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]581        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]582
583        // remove process from children_list
[564]584        remote_queuelock_acquire( children_lock_xp );
[428]585        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]586            hal_remote_atomic_add( children_nr_xp , -1 );
[564]587        remote_queuelock_release( children_lock_xp );
[450]588
[618]589#if (DEBUG_PROCESS_DESTROY & 1)
590if( DEBUG_PROCESS_DESTROY < cycle )
[625]591printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
592__FUNCTION__, this->process->pid, this->trdid, pid );
[618]593#endif
594
[564]595        // release the process PID to cluster manager
596        cluster_pid_release( pid );
[428]597
[618]598#if (DEBUG_PROCESS_DESTROY & 1)
599if( DEBUG_PROCESS_DESTROY < cycle )
600printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
601__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
602#endif
[23]603
[618]604    }
[1]605
[623]606    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
607
[618]608    // FIXME close all open files [AG]
[623]609
[618]610    // FIXME synchronize dirty files [AG]
[1]611
[416]612    // release memory allocated to process descriptor
613    process_free( process );
[1]614
[438]615#if DEBUG_PROCESS_DESTROY
[433]616cycle = (uint32_t)hal_get_cycles();
[610]617if( DEBUG_PROCESS_DESTROY < cycle )
618printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
619__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]620#endif
[428]621
[407]622}  // end process_destroy()
623
[583]624///////////////////////////////////////////////////////////////////
[527]625const char * process_action_str( process_sigactions_t action_type )
[409]626{
[583]627    switch ( action_type )
628    {
629        case BLOCK_ALL_THREADS:   return "BLOCK";
630        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
631        case DELETE_ALL_THREADS:  return "DELETE";
632        default:                  return "undefined";
633    }
[409]634}
635
[435]636////////////////////////////////////////
637void process_sigaction( pid_t       pid,
[457]638                        uint32_t    type )
[409]639{
640    cxy_t              owner_cxy;         // owner cluster identifier
641    lpid_t             lpid;              // process index in owner cluster
642    cluster_t        * cluster;           // pointer on cluster manager
643    xptr_t             root_xp;           // extended pointer on root of copies
644    xptr_t             lock_xp;           // extended pointer on lock protecting copies
645    xptr_t             iter_xp;           // iterator on copies list
646    xptr_t             process_xp;        // extended pointer on process copy
647    cxy_t              process_cxy;       // process copy cluster identifier
[457]648    process_t        * process_ptr;       // local pointer on process copy
[436]649    reg_t              save_sr;           // for critical section
[457]650    thread_t         * client;            // pointer on client thread
651    xptr_t             client_xp;         // extended pointer on client thread
652    process_t        * local;             // pointer on process copy in local cluster
653    uint32_t           remote_nr;         // number of remote process copies
[619]654    rpc_desc_t         rpc;               // shared RPC descriptor
655    uint32_t           responses;         // shared RPC responses counter
[409]656
[457]657    client    = CURRENT_THREAD;
658    client_xp = XPTR( local_cxy , client );
659    local     = NULL;
660    remote_nr = 0;
[435]661
[583]662    // check calling thread can yield
663    thread_assert_can_yield( client , __FUNCTION__ );
[564]664
[438]665#if DEBUG_PROCESS_SIGACTION
[433]666uint32_t cycle = (uint32_t)hal_get_cycles();
[438]667if( DEBUG_PROCESS_SIGACTION < cycle )
[593]668printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]669__FUNCTION__ , client->process->pid, client->trdid,
[457]670process_action_str( type ) , pid , cycle );
[433]671#endif
[409]672
[436]673    // get pointer on local cluster manager
[416]674    cluster = LOCAL_CLUSTER;
675
[409]676    // get owner cluster identifier and process lpid
[435]677    owner_cxy = CXY_FROM_PID( pid );
678    lpid      = LPID_FROM_PID( pid );
[409]679
[593]680    // get root of list of copies and lock from owner cluster
[436]681    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
682    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]683
[583]684// check action type
685assert( ((type == DELETE_ALL_THREADS ) ||
686         (type == BLOCK_ALL_THREADS )  ||
687         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]688             
[593]689    // This client thread send parallel RPCs to all remote clusters containing
[564]690    // target process copies, wait all responses, and then handles directly
691    // the threads in local cluster, when required.
[457]692    // The client thread allocates a - shared - RPC descriptor in the stack,
693    // because all parallel, non-blocking, server threads use the same input
694    // arguments, and use the shared RPC response field
[436]695
696    // mask IRQs
697    hal_disable_irq( &save_sr);
698
[457]699    // client thread blocks itself
700    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]701
[619]702    // initialize RPC responses counter
703    responses = 0;
704
[436]705    // initialize shared RPC descriptor
[619]706    // can be shared, because no out arguments
707    rpc.rsp       = &responses;
[438]708    rpc.blocking  = false;
709    rpc.index     = RPC_PROCESS_SIGACTION;
710    rpc.thread    = client;
711    rpc.lid       = client->core->lid;
[611]712    rpc.args[0]   = pid;
713    rpc.args[1]   = type;
[436]714
[611]715    // take the lock protecting process copies
716    remote_queuelock_acquire( lock_xp );
717
[457]718    // scan list of process copies
[409]719    XLIST_FOREACH( root_xp , iter_xp )
720    {
[457]721        // get extended pointers and cluster on process
[440]722        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
723        process_cxy = GET_CXY( process_xp );
[457]724        process_ptr = GET_PTR( process_xp );
[440]725
[593]726        if( process_cxy == local_cxy )    // process copy is local
[457]727        { 
728            local = process_ptr;
729        }
[593]730        else                              // process copy is remote
[457]731        {
732            // update number of remote process copies
733            remote_nr++;
734
[619]735            // atomically increment RPC responses counter
736            hal_atomic_add( &responses , 1 );
[457]737
[438]738#if DEBUG_PROCESS_SIGACTION
739if( DEBUG_PROCESS_SIGACTION < cycle )
[593]740printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]741__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]742#endif
[457]743            // call RPC in target cluster
[619]744            rpc_send( process_cxy , &rpc );
[457]745        }
746    }  // end list of copies
747
[409]748    // release the lock protecting process copies
[564]749    remote_queuelock_release( lock_xp );
[409]750
[436]751    // restore IRQs
752    hal_restore_irq( save_sr);
[409]753
[457]754    // - if there is remote process copies, the client thread deschedules,
755    //   (it will be unblocked by the last RPC server thread).
756    // - if there is no remote copies, the client thread unblock itself.
757    if( remote_nr )
758    {
759        sched_yield("blocked on rpc_process_sigaction");
760    } 
761    else
762    {
763        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
764    }
[409]765
[457]766    // handle the local process copy if required
767    if( local != NULL )
768    {
769
770#if DEBUG_PROCESS_SIGACTION
771if( DEBUG_PROCESS_SIGACTION < cycle )
[593]772printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]773__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]774#endif
775        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]776        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]777        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
778    }
779
[438]780#if DEBUG_PROCESS_SIGACTION
[433]781cycle = (uint32_t)hal_get_cycles();
[438]782if( DEBUG_PROCESS_SIGACTION < cycle )
[593]783printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]784__FUNCTION__, client->process->pid, client->trdid,
[457]785process_action_str( type ), pid, cycle );
[433]786#endif
[416]787
[409]788}  // end process_sigaction()
789
[433]790/////////////////////////////////////////////////
[583]791void process_block_threads( process_t * process )
[1]792{
[409]793    thread_t          * target;         // pointer on target thread
[433]794    thread_t          * this;           // pointer on calling thread
[564]795    uint32_t            ltid;           // index in process th_tbl[]
[409]796    uint32_t            count;          // requests counter
[593]797    volatile uint32_t   ack_count;      // acknowledges counter
[1]798
[416]799    // get calling thread pointer
[433]800    this = CURRENT_THREAD;
[407]801
[438]802#if DEBUG_PROCESS_SIGACTION
[564]803pid_t pid = process->pid;
[433]804uint32_t cycle = (uint32_t)hal_get_cycles();
[438]805if( DEBUG_PROCESS_SIGACTION < cycle )
[593]806printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]807__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]808#endif
[409]809
[564]810// check target process is an user process
[619]811assert( (LPID_FROM_PID( process->pid ) != 0 ),
812"process %x is not an user process\n", process->pid );
[564]813
[409]814    // get lock protecting process th_tbl[]
[564]815    rwlock_rd_acquire( &process->th_lock );
[1]816
[440]817    // loop on target process local threads
[409]818    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]819    // - if the calling thread and the target thread are not running on the same
820    //   core, we ask the target scheduler to acknowlege the blocking
821    //   to be sure that the target thread is not running.
822    // - if the calling thread and the target thread are running on the same core,
823    //   we don't need confirmation from scheduler.
824           
[436]825    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]826    {
[409]827        target = process->th_tbl[ltid];
[1]828
[436]829        if( target != NULL )                                 // thread exist
[1]830        {
831            count++;
[409]832
[583]833            // set the global blocked bit in target thread descriptor.
834            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]835 
[583]836            if( this->core->lid != target->core->lid )
837            {
838                // increment responses counter
839                hal_atomic_add( (void*)&ack_count , 1 );
[409]840
[583]841                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
842                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]843
[583]844                // force scheduling on target thread
845                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]846            }
[1]847        }
[172]848    }
849
[428]850    // release lock protecting process th_tbl[]
[564]851    rwlock_rd_release( &process->th_lock );
[416]852
[593]853    // wait other threads acknowledges  TODO this could be improved...
[409]854    while( 1 )
855    {
[610]856        // exit when all scheduler acknowledges received
[436]857        if ( ack_count == 0 ) break;
[409]858   
859        // wait 1000 cycles before retry
860        hal_fixed_delay( 1000 );
861    }
[1]862
[438]863#if DEBUG_PROCESS_SIGACTION
[433]864cycle = (uint32_t)hal_get_cycles();
[438]865if( DEBUG_PROCESS_SIGACTION < cycle )
[593]866printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
867__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]868#endif
[409]869
[428]870}  // end process_block_threads()
[409]871
[440]872/////////////////////////////////////////////////
873void process_delete_threads( process_t * process,
874                             xptr_t      client_xp )
[409]875{
[440]876    thread_t          * target;        // local pointer on target thread
877    xptr_t              target_xp;     // extended pointer on target thread
878    cxy_t               owner_cxy;     // owner process cluster
[409]879    uint32_t            ltid;          // index in process th_tbl
[440]880    uint32_t            count;         // threads counter
[409]881
[433]882    // get calling thread pointer
[409]883
[440]884    // get target process owner cluster
885    owner_cxy = CXY_FROM_PID( process->pid );
886
[438]887#if DEBUG_PROCESS_SIGACTION
[633]888thread_t * this  = CURRENT_THREAD;
889uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]890if( DEBUG_PROCESS_SIGACTION < cycle )
[625]891printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
892__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]893#endif
894
[564]895// check target process is an user process
[619]896assert( (LPID_FROM_PID( process->pid ) != 0),
897"process %x is not an user process\n", process->pid );
[564]898
[409]899    // get lock protecting process th_tbl[]
[583]900    rwlock_wr_acquire( &process->th_lock );
[409]901
[440]902    // loop on target process local threads                       
[416]903    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]904    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]905    {
[409]906        target = process->th_tbl[ltid];
[1]907
[440]908        if( target != NULL )    // valid thread 
[1]909        {
[416]910            count++;
[440]911            target_xp = XPTR( local_cxy , target );
[1]912
[564]913            // main thread and client thread should not be deleted
[440]914            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
915                (client_xp) != target_xp )                           // not client thread
916            {
917                // mark target thread for delete and block it
918                thread_delete( target_xp , process->pid , false );   // not forced
919            }
[409]920        }
921    }
[1]922
[428]923    // release lock protecting process th_tbl[]
[583]924    rwlock_wr_release( &process->th_lock );
[407]925
[438]926#if DEBUG_PROCESS_SIGACTION
[433]927cycle = (uint32_t)hal_get_cycles();
[438]928if( DEBUG_PROCESS_SIGACTION < cycle )
[593]929printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
930__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]931#endif
[407]932
[440]933}  // end process_delete_threads()
[409]934
[440]935///////////////////////////////////////////////////
936void process_unblock_threads( process_t * process )
[409]937{
[440]938    thread_t          * target;        // pointer on target thead
[409]939    uint32_t            ltid;          // index in process th_tbl
[440]940    uint32_t            count;         // requests counter
[409]941
[438]942#if DEBUG_PROCESS_SIGACTION
[633]943thread_t * this  = CURRENT_THREAD;
944pid_t      pid   = process->pid;
945uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]946if( DEBUG_PROCESS_SIGACTION < cycle )
[593]947printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]948__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]949#endif
950
[564]951// check target process is an user process
[619]952assert( ( LPID_FROM_PID( process->pid ) != 0 ),
953"process %x is not an user process\n", process->pid );
[564]954
[416]955    // get lock protecting process th_tbl[]
[564]956    rwlock_rd_acquire( &process->th_lock );
[416]957
[440]958    // loop on process threads to unblock all threads
[416]959    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]960    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]961    {
[416]962        target = process->th_tbl[ltid];
[409]963
[440]964        if( target != NULL )             // thread found
[409]965        {
966            count++;
[440]967
968            // reset the global blocked bit in target thread descriptor.
969            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]970        }
971    }
972
[428]973    // release lock protecting process th_tbl[]
[564]974    rwlock_rd_release( &process->th_lock );
[407]975
[438]976#if DEBUG_PROCESS_SIGACTION
[433]977cycle = (uint32_t)hal_get_cycles();
[438]978if( DEBUG_PROCESS_SIGACTION < cycle )
[593]979printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]980__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]981#endif
[1]982
[440]983}  // end process_unblock_threads()
[407]984
[1]985///////////////////////////////////////////////
986process_t * process_get_local_copy( pid_t pid )
987{
988    error_t        error;
[172]989    process_t    * process_ptr;   // local pointer on process
[23]990    xptr_t         process_xp;    // extended pointer on process
[1]991
992    cluster_t * cluster = LOCAL_CLUSTER;
993
[564]994#if DEBUG_PROCESS_GET_LOCAL_COPY
995thread_t * this = CURRENT_THREAD;
996uint32_t cycle = (uint32_t)hal_get_cycles();
997if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]998printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]999__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]1000#endif
1001
[1]1002    // get lock protecting local list of processes
[564]1003    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1004
1005    // scan the local list of process descriptors to find the process
[23]1006    xptr_t  iter;
1007    bool_t  found = false;
1008    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]1009    {
[23]1010        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]1011        process_ptr = GET_PTR( process_xp );
[23]1012        if( process_ptr->pid == pid )
[1]1013        {
1014            found = true;
1015            break;
1016        }
1017    }
1018
1019    // release lock protecting local list of processes
[564]1020    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1021
[172]1022    // allocate memory for a new local process descriptor
[440]1023    // and initialise it from reference cluster if not found
[1]1024    if( !found )
1025    {
1026        // get extended pointer on reference process descriptor
[23]1027        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]1028
[492]1029        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]1030
[1]1031        // allocate memory for local process descriptor
[23]1032        process_ptr = process_alloc();
[443]1033
[23]1034        if( process_ptr == NULL )  return NULL;
[1]1035
1036        // initialize local process descriptor copy
[23]1037        error = process_copy_init( process_ptr , ref_xp );
[443]1038
[1]1039        if( error ) return NULL;
1040    }
1041
[440]1042#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]1043cycle = (uint32_t)hal_get_cycles();
[440]1044if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1045printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]1046__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]1047#endif
1048
[23]1049    return process_ptr;
[1]1050
[409]1051}  // end process_get_local_copy()
1052
[436]1053////////////////////////////////////////////
1054pid_t process_get_ppid( xptr_t  process_xp )
1055{
1056    cxy_t       process_cxy;
1057    process_t * process_ptr;
1058    xptr_t      parent_xp;
1059    cxy_t       parent_cxy;
1060    process_t * parent_ptr;
1061
1062    // get process cluster and local pointer
1063    process_cxy = GET_CXY( process_xp );
1064    process_ptr = GET_PTR( process_xp );
1065
1066    // get pointers on parent process
[564]1067    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]1068    parent_cxy = GET_CXY( parent_xp );
1069    parent_ptr = GET_PTR( parent_xp );
1070
[564]1071    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]1072}
1073
[1]1074//////////////////////////////////////////////////////////////////////////////////////////
1075// File descriptor array related functions
1076//////////////////////////////////////////////////////////////////////////////////////////
1077
1078///////////////////////////////////////////
1079void process_fd_init( process_t * process )
1080{
1081    uint32_t fd;
1082
[610]1083    // initialize lock
[564]1084    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]1085
[610]1086    // initialize number of open files
[23]1087    process->fd_array.current = 0;
1088
[1]1089    // initialize array
[23]1090    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1091    {
1092        process->fd_array.array[fd] = XPTR_NULL;
1093    }
1094}
[635]1095
[610]1096////////////////////////////////////////////////////
1097error_t process_fd_register( xptr_t      process_xp,
[407]1098                             xptr_t      file_xp,
1099                             uint32_t  * fdid )
[1]1100{
1101    bool_t    found;
[23]1102    uint32_t  id;
1103    xptr_t    xp;
[1]1104
[23]1105    // get reference process cluster and local pointer
[610]1106    process_t * process_ptr = GET_PTR( process_xp );
1107    cxy_t       process_cxy = GET_CXY( process_xp );
[23]1108
[610]1109// check client process is reference process
1110assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
1111"client process must be reference process\n" );
1112
1113#if DEBUG_PROCESS_FD_REGISTER
1114thread_t * this  = CURRENT_THREAD;
1115uint32_t   cycle = (uint32_t)hal_get_cycles();
1116pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1117if( DEBUG_PROCESS_FD_REGISTER < cycle )
1118printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1119__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1120#endif
1121
1122    // build extended pointer on lock protecting reference fd_array
1123    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1124
[23]1125    // take lock protecting reference fd_array
[610]1126        remote_queuelock_acquire( lock_xp );
[23]1127
[1]1128    found   = false;
1129
[23]1130    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1131    {
[610]1132        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
[23]1133        if ( xp == XPTR_NULL )
[1]1134        {
[564]1135            // update reference fd_array
[610]1136            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1137                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
[564]1138
1139            // exit
1140                        *fdid = id;
[1]1141            found = true;
1142            break;
1143        }
1144    }
1145
[610]1146    // release lock protecting fd_array
1147        remote_queuelock_release( lock_xp );
[1]1148
[610]1149#if DEBUG_PROCESS_FD_REGISTER
1150cycle = (uint32_t)hal_get_cycles();
1151if( DEBUG_PROCESS_FD_REGISTER < cycle )
1152printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1153__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1154#endif
1155
[428]1156    if ( !found ) return -1;
[1]1157    else          return 0;
1158
[610]1159}  // end process_fd_register()
1160
[172]1161////////////////////////////////////////////////
[23]1162xptr_t process_fd_get_xptr( process_t * process,
[407]1163                            uint32_t    fdid )
[1]1164{
[23]1165    xptr_t  file_xp;
[564]1166    xptr_t  lock_xp;
[1]1167
[23]1168    // access local copy of process descriptor
[407]1169    file_xp = process->fd_array.array[fdid];
[1]1170
[23]1171    if( file_xp == XPTR_NULL )
1172    {
1173        // get reference process cluster and local pointer
1174        xptr_t      ref_xp  = process->ref_xp;
1175        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]1176        process_t * ref_ptr = GET_PTR( ref_xp );
[1]1177
[564]1178        // build extended pointer on lock protecting reference fd_array
1179        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1180
1181        // take lock protecting reference fd_array
1182            remote_queuelock_acquire( lock_xp );
1183
[23]1184        // access reference process descriptor
[564]1185        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]1186
[23]1187        // update local fd_array if found
[564]1188        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1189       
1190        // release lock protecting reference fd_array
1191            remote_queuelock_release( lock_xp );
[23]1192    }
[1]1193
[23]1194    return file_xp;
[1]1195
[407]1196}  // end process_fd_get_xptr()
1197
[1]1198///////////////////////////////////////////
1199void process_fd_remote_copy( xptr_t dst_xp,
1200                             xptr_t src_xp )
1201{
1202    uint32_t fd;
1203    xptr_t   entry;
1204
1205    // get cluster and local pointer for src fd_array
1206    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1207    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1208
1209    // get cluster and local pointer for dst fd_array
1210    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1211    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1212
1213    // get the remote lock protecting the src fd_array
[564]1214        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1215
[428]1216    // loop on all fd_array entries
1217    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1218        {
[564]1219                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1220
1221                if( entry != XPTR_NULL )
1222                {
[459]1223            // increment file descriptor refcount
[1]1224            vfs_file_count_up( entry );
1225
1226                        // copy entry in destination process fd_array
[564]1227                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1228                }
1229        }
1230
1231    // release lock on source process fd_array
[564]1232        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1233
[407]1234}  // end process_fd_remote_copy()
1235
[564]1236
1237////////////////////////////////////
1238bool_t process_fd_array_full( void )
1239{
1240    // get extended pointer on reference process
1241    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1242
1243    // get reference process cluster and local pointer
1244    process_t * ref_ptr = GET_PTR( ref_xp );
1245    cxy_t       ref_cxy = GET_CXY( ref_xp );
1246
1247    // get number of open file descriptors from reference fd_array
1248    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1249
1250        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1251}
1252
1253
[1]1254////////////////////////////////////////////////////////////////////////////////////
1255//  Thread related functions
1256////////////////////////////////////////////////////////////////////////////////////
1257
1258/////////////////////////////////////////////////////
1259error_t process_register_thread( process_t * process,
1260                                 thread_t  * thread,
1261                                 trdid_t   * trdid )
1262{
[472]1263    ltid_t         ltid;
1264    bool_t         found = false;
1265 
[564]1266// check arguments
1267assert( (process != NULL) , "process argument is NULL" );
1268assert( (thread != NULL) , "thread argument is NULL" );
[1]1269
[564]1270    // get the lock protecting th_tbl for all threads
1271    // but the idle thread executing kernel_init (cannot yield)
1272    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1273
[583]1274    // scan th_tbl
[564]1275    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1276    {
1277        if( process->th_tbl[ltid] == NULL )
1278        {
1279            found = true;
1280            break;
1281        }
1282    }
1283
1284    if( found )
1285    {
1286        // register thread in th_tbl[]
1287        process->th_tbl[ltid] = thread;
1288        process->th_nr++;
1289
1290        // returns trdid
1291        *trdid = TRDID( local_cxy , ltid );
1292    }
1293
[583]1294    // release the lock protecting th_tbl
[564]1295    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1296
[564]1297    return (found) ? 0 : 0xFFFFFFFF;
[204]1298
1299}  // end process_register_thread()
1300
[625]1301///////////////////////////////////////////////////
1302uint32_t process_remove_thread( thread_t * thread )
[1]1303{
[443]1304    uint32_t count;  // number of threads in local process descriptor
1305
[625]1306// check thread
1307assert( (thread != NULL) , "thread argument is NULL" );
1308
[1]1309    process_t * process = thread->process;
1310
1311    // get thread local index
1312    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1313   
1314    // get the lock protecting th_tbl[]
1315    rwlock_wr_acquire( &process->th_lock );
[428]1316
[583]1317    // get number of threads
[443]1318    count = process->th_nr;
[428]1319
[564]1320// check th_nr value
[624]1321assert( (count > 0) , "process th_nr cannot be 0" );
[443]1322
[1]1323    // remove thread from th_tbl[]
1324    process->th_tbl[ltid] = NULL;
[450]1325    process->th_nr = count-1;
[1]1326
[583]1327    // release lock protecting th_tbl
[564]1328    rwlock_wr_release( &process->th_lock );
[428]1329
[625]1330    return count;
[443]1331
[450]1332}  // end process_remove_thread()
[204]1333
[408]1334/////////////////////////////////////////////////////////
1335error_t process_make_fork( xptr_t      parent_process_xp,
1336                           xptr_t      parent_thread_xp,
1337                           pid_t     * child_pid,
1338                           thread_t ** child_thread )
[1]1339{
[408]1340    process_t * process;         // local pointer on child process descriptor
1341    thread_t  * thread;          // local pointer on child thread descriptor
1342    pid_t       new_pid;         // process identifier for child process
1343    pid_t       parent_pid;      // process identifier for parent process
1344    xptr_t      ref_xp;          // extended pointer on reference process
[428]1345    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1346    error_t     error;
[1]1347
[408]1348    // get cluster and local pointer for parent process
1349    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1350    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1351
[428]1352    // get parent process PID and extended pointer on .elf file
[564]1353    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1354    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1355
[564]1356    // get extended pointer on reference process
1357    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1358
[564]1359// check parent process is the reference process
1360assert( (parent_process_xp == ref_xp ) ,
[624]1361"parent process must be the reference process" );
[407]1362
[438]1363#if DEBUG_PROCESS_MAKE_FORK
[635]1364uint32_t   cycle;
[583]1365thread_t * this  = CURRENT_THREAD;
1366trdid_t    trdid = this->trdid;
1367pid_t      pid   = this->process->pid;
[635]1368#endif
1369
1370#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1371cycle   = (uint32_t)hal_get_cycles();
[438]1372if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1373printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1374__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1375#endif
[172]1376
[408]1377    // allocate a process descriptor
1378    process = process_alloc();
[635]1379
[408]1380    if( process == NULL )
1381    {
1382        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1383        __FUNCTION__, local_cxy ); 
1384        return -1;
1385    }
[1]1386
[408]1387    // allocate a child PID from local cluster
[416]1388    error = cluster_pid_alloc( process , &new_pid );
[428]1389    if( error ) 
[1]1390    {
[408]1391        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1392        __FUNCTION__, local_cxy ); 
1393        process_free( process );
1394        return -1;
[1]1395    }
[408]1396
[469]1397#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1398cycle = (uint32_t)hal_get_cycles();
1399if( DEBUG_PROCESS_MAKE_FORK < cycle )
[625]1400printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
[583]1401__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1402#endif
1403
[408]1404    // initializes child process descriptor from parent process descriptor
[625]1405    error = process_reference_init( process,
1406                                    new_pid,
1407                                    parent_process_xp );
1408    if( error ) 
1409    {
1410        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1411        __FUNCTION__, local_cxy ); 
1412        process_free( process );
1413        return -1;
1414    }
[408]1415
[438]1416#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1417cycle = (uint32_t)hal_get_cycles();
[438]1418if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1419printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1420__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1421#endif
[408]1422
1423    // copy VMM from parent descriptor to child descriptor
1424    error = vmm_fork_copy( process,
1425                           parent_process_xp );
1426    if( error )
[101]1427    {
[408]1428        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1429        __FUNCTION__, local_cxy ); 
1430        process_free( process );
1431        cluster_pid_release( new_pid );
1432        return -1;
[101]1433    }
[172]1434
[438]1435#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1436cycle = (uint32_t)hal_get_cycles();
[438]1437if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1438printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1439__FUNCTION__, pid, trdid, cycle );
[635]1440hal_vmm_display( XPTR( local_cxy , process ) , true );
[433]1441#endif
[407]1442
[564]1443    // if parent_process is INIT, or if parent_process is the TXT owner,
1444    // the child_process becomes the owner of its TXT terminal
1445    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1446    {
1447        process_txt_set_ownership( XPTR( local_cxy , process ) );
1448
1449#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1450cycle = (uint32_t)hal_get_cycles();
[626]1451if( DEBUG_PROCESS_MAKE_FORK < cycle )
[635]1452printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n",
1453__FUNCTION__ , pid, trdid, new_pid, cycle );
[457]1454#endif
1455
1456    }
1457
[428]1458    // update extended pointer on .elf file
1459    process->vfs_bin_xp = vfs_bin_xp;
1460
[408]1461    // create child thread descriptor from parent thread descriptor
1462    error = thread_user_fork( parent_thread_xp,
1463                              process,
1464                              &thread );
1465    if( error )
1466    {
1467        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1468        __FUNCTION__, local_cxy ); 
1469        process_free( process );
1470        cluster_pid_release( new_pid );
1471        return -1;
1472    }
[172]1473
[564]1474// check main thread LTID
1475assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
[624]1476"main thread must have LTID == 0" );
[428]1477
[564]1478#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1479cycle = (uint32_t)hal_get_cycles();
[438]1480if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1481printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1482__FUNCTION__, pid, trdid, thread, cycle );
[433]1483#endif
[1]1484
[635]1485    // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM
[629]1486    // this includes all parent process copies in all clusters
[408]1487    if( parent_process_cxy == local_cxy )   // reference is local
1488    {
1489        vmm_set_cow( parent_process_ptr );
1490    }
1491    else                                    // reference is remote
1492    {
1493        rpc_vmm_set_cow_client( parent_process_cxy,
1494                                parent_process_ptr );
1495    }
[1]1496
[625]1497    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
[433]1498    vmm_set_cow( process );
1499 
[438]1500#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1501cycle = (uint32_t)hal_get_cycles();
[438]1502if( DEBUG_PROCESS_MAKE_FORK < cycle )
[635]1503printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n",
[583]1504__FUNCTION__, pid, trdid, cycle );
[433]1505#endif
[101]1506
[428]1507    // get extended pointers on parent children_root, children_lock and children_nr
1508    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1509    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1510    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1511
[428]1512    // register process in parent children list
[564]1513    remote_queuelock_acquire( children_lock_xp );
[428]1514        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1515        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1516    remote_queuelock_release( children_lock_xp );
[204]1517
[408]1518    // return success
1519    *child_thread = thread;
1520    *child_pid    = new_pid;
[1]1521
[438]1522#if DEBUG_PROCESS_MAKE_FORK
[433]1523cycle = (uint32_t)hal_get_cycles();
[438]1524if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1525printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1526__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1527#endif
[428]1528
[408]1529    return 0;
1530
[416]1531}   // end process_make_fork()
[408]1532
1533/////////////////////////////////////////////////////
1534error_t process_make_exec( exec_info_t  * exec_info )
1535{
[457]1536    thread_t       * thread;                  // local pointer on this thread
1537    process_t      * process;                 // local pointer on this process
1538    pid_t            pid;                     // this process identifier
[610]1539    xptr_t           ref_xp;                  // reference process for this process
[441]1540        error_t          error;                   // value returned by called functions
[457]1541    char           * path;                    // path to .elf file
1542    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1543    uint32_t         file_id;                 // file index in fd_array
1544    uint32_t         args_nr;                 // number of main thread arguments
1545    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1546
[625]1547    // get calling thread, process, pid and ref_xp
[457]1548    thread  = CURRENT_THREAD;
1549    process = thread->process;
1550    pid     = process->pid;
[610]1551    ref_xp  = process->ref_xp;
[408]1552
[457]1553        // get relevant infos from exec_info
1554        path          = exec_info->path;
1555    args_nr       = exec_info->args_nr;
1556    args_pointers = exec_info->args_pointers;
[408]1557
[438]1558#if DEBUG_PROCESS_MAKE_EXEC
[433]1559uint32_t cycle = (uint32_t)hal_get_cycles();
[635]1560if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1561printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1562__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1563#endif
[408]1564
[457]1565    // open the file identified by <path>
1566    file_xp = XPTR_NULL;
[564]1567    file_id = 0xFFFFFFFF;
[610]1568        error   = vfs_open( process->vfs_root_xp,
[457]1569                            path,
[610]1570                        ref_xp,
[457]1571                            O_RDONLY,
1572                            0,
1573                            &file_xp,
1574                            &file_id );
1575        if( error )
1576        {
1577                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1578                return -1;
1579        }
1580
[446]1581#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1582cycle = (uint32_t)hal_get_cycles();
[635]1583if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1584printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1585__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1586#endif
1587
[457]1588    // delete all threads other than this main thread in all clusters
1589    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1590
[469]1591#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1592cycle = (uint32_t)hal_get_cycles();
[635]1593if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[625]1594printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n",
[583]1595__FUNCTION__, pid, thread->trdid, cycle );
[469]1596#endif
1597
[625]1598    // reset calling process VMM
1599    vmm_user_reset( process );
[446]1600
[457]1601#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1602cycle = (uint32_t)hal_get_cycles();
[635]1603if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[625]1604printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n",
[583]1605__FUNCTION__, pid, thread->trdid, cycle );
[457]1606#endif
[408]1607
[625]1608    // re-initialize the VMM (args/envs vsegs registration)
1609    error = vmm_user_init( process );
[457]1610    if( error )
[416]1611    {
[457]1612        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1613        vfs_close( file_xp , file_id );
[623]1614        // FIXME restore old process VMM [AG]
[416]1615        return -1;
1616    }
[457]1617   
[438]1618#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1619cycle = (uint32_t)hal_get_cycles();
[635]1620if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[625]1621printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n",
[583]1622__FUNCTION__, pid, thread->trdid, cycle );
[433]1623#endif
[428]1624
[457]1625    // register code & data vsegs as well as entry-point in process VMM,
[428]1626    // and register extended pointer on .elf file in process descriptor
[457]1627        error = elf_load_process( file_xp , process );
[441]1628    if( error )
[1]1629        {
[441]1630                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1631        vfs_close( file_xp , file_id );
[623]1632        // FIXME restore old process VMM [AG]
[408]1633        return -1;
[1]1634        }
1635
[438]1636#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1637cycle = (uint32_t)hal_get_cycles();
[635]1638if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[625]1639printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n",
[583]1640__FUNCTION__, pid, thread->trdid, cycle );
[433]1641#endif
[1]1642
[457]1643    // update the existing main thread descriptor... and jump to user code
1644    error = thread_user_exec( (void *)process->vmm.entry_point,
1645                              args_nr,
1646                              args_pointers );
1647    if( error )
1648    {
[469]1649        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1650        vfs_close( file_xp , file_id );
1651        // FIXME restore old process VMM
[408]1652        return -1;
[457]1653    }
[1]1654
[492]1655    assert( false, "we should not execute this code");
[457]1656 
[409]1657        return 0;
1658
1659}  // end process_make_exec()
1660
[457]1661
[623]1662////////////////////////////////////////////////
1663void process_zero_create( process_t   * process,
1664                          boot_info_t * info )
[428]1665{
[580]1666    error_t error;
1667    pid_t   pid;
[428]1668
[438]1669#if DEBUG_PROCESS_ZERO_CREATE
[433]1670uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1671if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1672printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1673__FUNCTION__, local_cxy, cycle );
[433]1674#endif
[428]1675
[624]1676    // get pointer on VMM
1677    vmm_t * vmm = &process->vmm;
1678
[580]1679    // get PID from local cluster manager for this kernel process
1680    error = cluster_pid_alloc( process , &pid );
1681
1682    if( error || (LPID_FROM_PID( pid ) != 0) )
1683    {
1684        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1685        __FUNCTION__ , local_cxy, pid );
1686        hal_core_sleep();
1687    }
1688
[635]1689#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1690if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1691printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy );
1692#endif
1693
[428]1694    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1695    // the kernel process_zero is its own parent_process,
1696    // reference_process, and owner_process, and cannot be killed...
1697    process->pid        = pid;
[433]1698    process->ref_xp     = XPTR( local_cxy , process );
[443]1699    process->owner_xp   = XPTR( local_cxy , process );
[580]1700    process->parent_xp  = XPTR( local_cxy , process );
[433]1701    process->term_state = 0;
[428]1702
[635]1703    // initialize VSL as empty
[624]1704    vmm->vsegs_nr = 0;
1705        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[623]1706
[635]1707#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1708if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1709printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy );
1710#endif
1711
1712    // initialize GPT as empty
[624]1713    error = hal_gpt_create( &vmm->gpt );
[635]1714
[624]1715    if( error ) 
1716    {
1717        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
1718        hal_core_sleep();
1719    }
1720
[635]1721#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1722if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1723printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy );
1724#endif
1725
[625]1726    // initialize VSL and GPT locks
[629]1727    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
[624]1728   
1729    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
1730    error = hal_vmm_kernel_init( info );
[635]1731
[624]1732    if( error ) 
1733    {
1734        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
1735        hal_core_sleep();
1736    }
1737
[635]1738#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1739if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1740printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
1741#endif
1742
[564]1743    // reset th_tbl[] array and associated fields
[428]1744    uint32_t i;
[564]1745    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]1746        {
1747        process->th_tbl[i] = NULL;
1748    }
1749    process->th_nr  = 0;
[564]1750    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]1751
[635]1752#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1753if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1754printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy );
1755#endif
[564]1756
[428]1757    // reset children list as empty
1758    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1759    process->children_nr = 0;
[564]1760    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1761                           LOCK_PROCESS_CHILDREN );
[428]1762
[635]1763#if (DEBUG_PROCESS_ZERO_CREATE & 1)
1764if( DEBUG_PROCESS_ZERO_CREATE < cycle )
1765printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy );
1766#endif
1767
[580]1768    // register kernel process in cluster manager local_list
1769    cluster_process_local_link( process );
1770   
[428]1771        hal_fence();
1772
[438]1773#if DEBUG_PROCESS_ZERO_CREATE
[433]1774cycle = (uint32_t)hal_get_cycles();
[438]1775if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1776printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]1777__FUNCTION__, local_cxy, cycle );
[433]1778#endif
[428]1779
[610]1780}  // end process_zero_create()
[428]1781
[564]1782////////////////////////////////
[485]1783void process_init_create( void )
[1]1784{
[428]1785    process_t      * process;       // local pointer on process descriptor
[409]1786    pid_t            pid;           // process_init identifier
1787    thread_t       * thread;        // local pointer on main thread
1788    pthread_attr_t   attr;          // main thread attributes
1789    lid_t            lid;           // selected core local index for main thread
[457]1790    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1791    uint32_t         file_id;       // file index in fd_array
[409]1792    error_t          error;
[1]1793
[438]1794#if DEBUG_PROCESS_INIT_CREATE
[610]1795thread_t * this = CURRENT_THREAD;
[433]1796uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1797if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1798printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1799__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1800#endif
[1]1801
[408]1802    // allocates memory for process descriptor from local cluster
1803        process = process_alloc(); 
[635]1804
1805
[625]1806    if( process == NULL )
1807    {
1808        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
1809        hal_core_sleep();
1810    }
[101]1811
[610]1812    // set the CWD and VFS_ROOT fields in process descriptor
1813    process->cwd_xp      = process_zero.vfs_root_xp;
1814    process->vfs_root_xp = process_zero.vfs_root_xp;
1815
[409]1816    // get PID from local cluster
[416]1817    error = cluster_pid_alloc( process , &pid );
[625]1818    if( error ) 
1819    {
1820        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
1821        hal_core_sleep();
1822    }
1823    if( pid != 1 ) 
1824    {
1825        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
1826        hal_core_sleep();
1827    }
[408]1828
[409]1829    // initialize process descriptor / parent is local process_zero
[625]1830    error = process_reference_init( process,
1831                                    pid,
1832                                    XPTR( local_cxy , &process_zero ) ); 
1833    if( error )
1834    {
1835        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
1836        hal_core_sleep();
1837    }
[408]1838
[564]1839#if(DEBUG_PROCESS_INIT_CREATE & 1)
1840if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1841printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1842__FUNCTION__, this->process->pid, this->trdid );
[564]1843#endif
1844
[457]1845    // open the file identified by CONFIG_PROCESS_INIT_PATH
1846    file_xp = XPTR_NULL;
1847    file_id = -1;
[610]1848        error   = vfs_open( process->vfs_root_xp,
[457]1849                            CONFIG_PROCESS_INIT_PATH,
[610]1850                        XPTR( local_cxy , process ),
[457]1851                            O_RDONLY,
1852                            0,
1853                            &file_xp,
1854                            &file_id );
[625]1855    if( error )
1856    {
1857        printk("\n[PANIC] in %s : cannot open file <%s>\n",
1858         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1859        hal_core_sleep();
1860    }
[457]1861
[564]1862#if(DEBUG_PROCESS_INIT_CREATE & 1)
1863if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1864printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1865__FUNCTION__, this->process->pid, this->trdid );
[564]1866#endif
1867
[625]1868    // register "code" and "data" vsegs as well as entry-point
[409]1869    // in process VMM, using information contained in the elf file.
[457]1870        error = elf_load_process( file_xp , process );
[101]1871
[625]1872    if( error ) 
1873    {
1874        printk("\n[PANIC] in %s : cannot access file <%s>\n",
1875         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1876        hal_core_sleep();
1877    }
[457]1878
[625]1879
[564]1880#if(DEBUG_PROCESS_INIT_CREATE & 1)
1881if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1882printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1883__FUNCTION__, this->process->pid, this->trdid );
[564]1884#endif
1885
[625]1886#if (DEBUG_PROCESS_INIT_CREATE & 1)
[635]1887hal_vmm_display( XPTR( local_cxy , process ) , true );
[625]1888#endif
1889
[428]1890    // get extended pointers on process_zero children_root, children_lock
1891    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1892    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1893
[564]1894    // take lock protecting kernel process children list
1895    remote_queuelock_acquire( children_lock_xp );
1896
[428]1897    // register process INIT in parent local process_zero
1898        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1899        hal_atomic_add( &process_zero.children_nr , 1 );
1900
[564]1901    // release lock protecting kernel process children list
1902    remote_queuelock_release( children_lock_xp );
1903
1904#if(DEBUG_PROCESS_INIT_CREATE & 1)
1905if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1906printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1907__FUNCTION__, this->process->pid, this->trdid );
[564]1908#endif
1909
[409]1910    // select a core in local cluster to execute the main thread
1911    lid  = cluster_select_local_core();
1912
1913    // initialize pthread attributes for main thread
1914    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1915    attr.cxy        = local_cxy;
1916    attr.lid        = lid;
1917
1918    // create and initialize thread descriptor
1919        error = thread_user_create( pid,
1920                                (void *)process->vmm.entry_point,
1921                                NULL,
1922                                &attr,
1923                                &thread );
[1]1924
[625]1925    if( error )
1926    {
1927        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
1928        hal_core_sleep();
1929    }
1930    if( thread->trdid != 0 )
1931    {
1932        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
1933        hal_core_sleep();
1934    }
[428]1935
[564]1936#if(DEBUG_PROCESS_INIT_CREATE & 1)
1937if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1938printk("\n[%s] thread[%x,%x] created main thread\n",
1939__FUNCTION__, this->process->pid, this->trdid );
[564]1940#endif
1941
[409]1942    // activate thread
1943        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1944
[124]1945    hal_fence();
[1]1946
[438]1947#if DEBUG_PROCESS_INIT_CREATE
[433]1948cycle = (uint32_t)hal_get_cycles();
[438]1949if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1950printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1951__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1952#endif
[409]1953
[204]1954}  // end process_init_create()
1955
[428]1956/////////////////////////////////////////
1957void process_display( xptr_t process_xp )
1958{
1959    process_t   * process_ptr;
1960    cxy_t         process_cxy;
[443]1961
[428]1962    xptr_t        parent_xp;       // extended pointer on parent process
1963    process_t   * parent_ptr;
1964    cxy_t         parent_cxy;
1965
[443]1966    xptr_t        owner_xp;        // extended pointer on owner process
1967    process_t   * owner_ptr;
1968    cxy_t         owner_cxy;
1969
[428]1970    pid_t         pid;
1971    pid_t         ppid;
[580]1972    lpid_t        lpid;
[428]1973    uint32_t      state;
1974    uint32_t      th_nr;
1975
[443]1976    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1977    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1978    chdev_t     * txt_chdev_ptr;
1979    cxy_t         txt_chdev_cxy;
1980    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1981
1982    xptr_t        elf_file_xp;     // extended pointer on .elf file
1983    cxy_t         elf_file_cxy;
1984    vfs_file_t  * elf_file_ptr;
1985    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1986
1987    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1988    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1989
1990    // get cluster and local pointer on process
1991    process_ptr = GET_PTR( process_xp );
1992    process_cxy = GET_CXY( process_xp );
1993
[580]1994    // get process PID, LPID, and state
[564]1995    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]1996    lpid  = LPID_FROM_PID( pid );
[564]1997    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1998
[580]1999    // get process PPID
[564]2000    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]2001    parent_cxy = GET_CXY( parent_xp );
2002    parent_ptr = GET_PTR( parent_xp );
[564]2003    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]2004
2005    // get number of threads
[564]2006    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]2007
[443]2008    // get pointers on owner process descriptor
[564]2009    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]2010    owner_cxy = GET_CXY( owner_xp );
2011    owner_ptr = GET_PTR( owner_xp );
[428]2012
[580]2013    // get process TXT name and .elf name
2014    if( lpid )                                   // user process
2015    {
[443]2016
[580]2017        // get extended pointer on file descriptor associated to TXT_RX
2018        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]2019
[580]2020        assert( (txt_file_xp != XPTR_NULL) ,
[624]2021        "process must be attached to one TXT terminal" ); 
[443]2022
[580]2023        // get TXT_RX chdev pointers
2024        txt_chdev_xp  = chdev_from_file( txt_file_xp );
2025        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
2026        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
2027
2028        // get TXT_RX name and ownership
2029        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
2030                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]2031   
[580]2032        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
2033                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]2034
[580]2035        // get process .elf name
2036        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2037        elf_file_cxy  = GET_CXY( elf_file_xp );
2038        elf_file_ptr  = GET_PTR( elf_file_xp );
2039        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2040        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2041    }
2042    else                                         // kernel process_zero
2043    {
2044        // TXT name and .elf name are not registered in kernel process_zero
2045        strcpy( txt_name , "txt0_rx" );
2046        txt_owner_xp = process_xp; 
2047        strcpy( elf_name , "kernel.elf" );
2048    }
2049
[428]2050    // display process info
[443]2051    if( txt_owner_xp == process_xp )
[428]2052    {
[581]2053        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2054        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2055    }
2056    else
2057    {
[581]2058        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2059        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2060    }
2061}  // end process_display()
2062
2063
2064////////////////////////////////////////////////////////////////////////////////////////
2065//     Terminals related functions
2066////////////////////////////////////////////////////////////////////////////////////////
2067
[581]2068//////////////////////////////////
[485]2069uint32_t process_txt_alloc( void )
[428]2070{
2071    uint32_t  index;       // TXT terminal index
2072    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2073    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2074    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2075    xptr_t    root_xp;     // extended pointer on owner field in chdev
2076
2077    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2078    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2079    {
2080        // get pointers on TXT_RX[index]
2081        chdev_xp  = chdev_dir.txt_rx[index];
2082        chdev_cxy = GET_CXY( chdev_xp );
2083        chdev_ptr = GET_PTR( chdev_xp );
2084
2085        // get extended pointer on root of attached process
2086        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2087
2088        // return free TXT index if found
2089        if( xlist_is_empty( root_xp ) ) return index; 
2090    }
2091
[492]2092    assert( false , "no free TXT terminal found" );
[428]2093
2094    return -1;
2095
2096} // end process_txt_alloc()
2097
2098/////////////////////////////////////////////
2099void process_txt_attach( process_t * process,
2100                         uint32_t    txt_id )
2101{
2102    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2103    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2104    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2105    xptr_t      root_xp;      // extended pointer on list root in chdev
2106    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2107
[564]2108// check process is in owner cluster
2109assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
2110"process descriptor not in owner cluster" );
[428]2111
[564]2112// check terminal index
2113assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2114"illegal TXT terminal index" );
[428]2115
2116    // get pointers on TXT_RX[txt_id] chdev
2117    chdev_xp  = chdev_dir.txt_rx[txt_id];
2118    chdev_cxy = GET_CXY( chdev_xp );
2119    chdev_ptr = GET_PTR( chdev_xp );
2120
2121    // get extended pointer on root & lock of attached process list
2122    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2123    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2124
[564]2125    // get lock protecting list of processes attached to TXT
2126    remote_busylock_acquire( lock_xp );
2127
[428]2128    // insert process in attached process list
2129    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
2130
[564]2131    // release lock protecting list of processes attached to TXT
2132    remote_busylock_release( lock_xp );
2133
[446]2134#if DEBUG_PROCESS_TXT
[610]2135thread_t * this = CURRENT_THREAD;
[457]2136uint32_t cycle = (uint32_t)hal_get_cycles();
[446]2137if( DEBUG_PROCESS_TXT < cycle )
[610]2138printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2139__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]2140#endif
[428]2141
2142} // end process_txt_attach()
2143
[436]2144/////////////////////////////////////////////
2145void process_txt_detach( xptr_t  process_xp )
[428]2146{
[436]2147    process_t * process_ptr;  // local pointer on process in owner cluster
2148    cxy_t       process_cxy;  // process owner cluster
2149    pid_t       process_pid;  // process identifier
2150    xptr_t      file_xp;      // extended pointer on stdin file
[428]2151    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2152    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2153    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2154    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2155
[436]2156    // get process cluster, local pointer, and PID
2157    process_cxy = GET_CXY( process_xp );
2158    process_ptr = GET_PTR( process_xp );
[564]2159    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2160
[564]2161// check process descriptor in owner cluster
2162assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2163"process descriptor not in owner cluster" );
[436]2164
2165    // release TXT ownership (does nothing if not TXT owner)
2166    process_txt_transfer_ownership( process_xp );
[428]2167
[625]2168    // get extended pointer on process stdin pseudo file
[564]2169    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]2170
2171    // get pointers on TXT_RX chdev
2172    chdev_xp  = chdev_from_file( file_xp );
[428]2173    chdev_cxy = GET_CXY( chdev_xp );
2174    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2175
[436]2176    // get extended pointer on lock protecting attached process list
[428]2177    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2178
[564]2179    // get lock protecting list of processes attached to TXT
2180    remote_busylock_acquire( lock_xp );
2181
[428]2182    // unlink process from attached process list
[436]2183    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2184
[564]2185    // release lock protecting list of processes attached to TXT
2186    remote_busylock_release( lock_xp );
2187
[446]2188#if DEBUG_PROCESS_TXT
[610]2189thread_t * this = CURRENT_THREAD;
[457]2190uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2191uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]2192if( DEBUG_PROCESS_TXT < cycle )
[625]2193printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
[610]2194__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]2195#endif
[428]2196
2197} // end process_txt_detach()
2198
2199///////////////////////////////////////////////////
2200void process_txt_set_ownership( xptr_t process_xp )
2201{
2202    process_t * process_ptr;
2203    cxy_t       process_cxy;
[436]2204    pid_t       process_pid;
[428]2205    xptr_t      file_xp;
2206    xptr_t      txt_xp;     
2207    chdev_t   * txt_ptr;
2208    cxy_t       txt_cxy;
2209
[436]2210    // get pointers on process in owner cluster
[428]2211    process_cxy = GET_CXY( process_xp );
[435]2212    process_ptr = GET_PTR( process_xp );
[564]2213    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2214
2215    // check owner cluster
[492]2216    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2217    "process descriptor not in owner cluster" );
[436]2218
[428]2219    // get extended pointer on stdin pseudo file
[564]2220    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2221
2222    // get pointers on TXT chdev
2223    txt_xp  = chdev_from_file( file_xp );
2224    txt_cxy = GET_CXY( txt_xp );
[435]2225    txt_ptr = GET_PTR( txt_xp );
[428]2226
2227    // set owner field in TXT chdev
[564]2228    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2229
[446]2230#if DEBUG_PROCESS_TXT
[610]2231thread_t * this = CURRENT_THREAD;
[457]2232uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2233uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2234if( DEBUG_PROCESS_TXT < cycle )
[625]2235printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n",
[610]2236__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]2237#endif
2238
[428]2239}  // end process_txt_set ownership()
2240
[436]2241////////////////////////////////////////////////////////
2242void process_txt_transfer_ownership( xptr_t process_xp )
[428]2243{
[436]2244    process_t * process_ptr;     // local pointer on process releasing ownership
2245    cxy_t       process_cxy;     // process cluster
2246    pid_t       process_pid;     // process identifier
[428]2247    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2248    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2249    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2250    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2251    uint32_t    txt_id;          // TXT_RX channel
[428]2252    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2253    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2254    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2255    xptr_t      iter_xp;         // iterator for xlist
2256    xptr_t      current_xp;      // extended pointer on current process
[625]2257    bool_t      found;
[428]2258
[457]2259#if DEBUG_PROCESS_TXT
[610]2260thread_t * this  = CURRENT_THREAD;
2261uint32_t   cycle;
[457]2262#endif
2263
[625]2264    // get pointers on target process
[428]2265    process_cxy = GET_CXY( process_xp );
[435]2266    process_ptr = GET_PTR( process_xp );
[564]2267    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2268
[625]2269// check owner cluster
2270assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2271"process descriptor not in owner cluster" );
[436]2272
[428]2273    // get extended pointer on stdin pseudo file
[564]2274    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2275
2276    // get pointers on TXT chdev
2277    txt_xp  = chdev_from_file( file_xp );
2278    txt_cxy = GET_CXY( txt_xp );
[433]2279    txt_ptr = GET_PTR( txt_xp );
[428]2280
[625]2281    // get relevant infos from chdev descriptor
[564]2282    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[625]2283    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2284
[625]2285    // transfer ownership only if target process is the TXT owner
[436]2286    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2287    {
[436]2288        // get extended pointers on root and lock of attached processes list
2289        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2290        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2291
[625]2292        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2293        {
2294            // get lock
2295            remote_busylock_acquire( lock_xp );
[436]2296
2297            // scan attached process list to find KSH process
[625]2298            found = false;
2299            for( iter_xp = hal_remote_l64( root_xp ) ;
2300                 (iter_xp != root_xp) && (found == false) ;
2301                 iter_xp = hal_remote_l64( iter_xp ) )
[436]2302            {
[625]2303                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
[435]2304
[436]2305                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2306                {
2307                    // set owner field in TXT chdev
[564]2308                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2309
[446]2310#if DEBUG_PROCESS_TXT
[610]2311cycle = (uint32_t)hal_get_cycles();
[446]2312if( DEBUG_PROCESS_TXT < cycle )
[625]2313printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2314__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2315#endif
[625]2316                    found = true;
[436]2317                }
2318            }
[625]2319
[436]2320            // release lock
[564]2321            remote_busylock_release( lock_xp );
[436]2322
[625]2323// It must exist a KSH process for each user TXT channel
2324assert( (found == true), "KSH process not found for TXT%d", txt_id );
[436]2325
2326        }
[625]2327        else                                           // target process is KSH
[436]2328        {
[625]2329            // get lock
2330            remote_busylock_acquire( lock_xp );
2331
[436]2332            // scan attached process list to find another process
[625]2333            found = false;
2334            for( iter_xp = hal_remote_l64( root_xp ) ;
2335                 (iter_xp != root_xp) && (found == false) ;
2336                 iter_xp = hal_remote_l64( iter_xp ) )
[428]2337            {
[436]2338                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2339
2340                if( current_xp != process_xp )            // current is not KSH
2341                {
2342                    // set owner field in TXT chdev
[564]2343                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2344
[446]2345#if DEBUG_PROCESS_TXT
[610]2346cycle  = (uint32_t)hal_get_cycles();
[625]2347cxy_t       current_cxy = GET_CXY( current_xp );
2348process_t * current_ptr = GET_PTR( current_xp );
2349uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2350if( DEBUG_PROCESS_TXT < cycle )
[625]2351printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
[610]2352__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[436]2353#endif
[625]2354                    found = true;
[436]2355                }
[428]2356            }
[436]2357
2358            // release lock
[564]2359            remote_busylock_release( lock_xp );
[436]2360
2361            // no more owner for TXT if no other process found
[625]2362            if( found == false )
2363            {
2364                // set owner field in TXT chdev
2365                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2366
[446]2367#if DEBUG_PROCESS_TXT
[436]2368cycle = (uint32_t)hal_get_cycles();
[446]2369if( DEBUG_PROCESS_TXT < cycle )
[625]2370printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
[610]2371__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2372#endif
[625]2373            }
[428]2374        }
[436]2375    }
2376    else
2377    {
[433]2378
[446]2379#if DEBUG_PROCESS_TXT
[436]2380cycle = (uint32_t)hal_get_cycles();
[446]2381if( DEBUG_PROCESS_TXT < cycle )
[625]2382printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2383__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
[436]2384#endif
2385
[428]2386    }
[625]2387
[436]2388}  // end process_txt_transfer_ownership()
[428]2389
2390
[564]2391////////////////////////////////////////////////
2392bool_t process_txt_is_owner( xptr_t process_xp )
[457]2393{
2394    // get local pointer and cluster of process in owner cluster
2395    cxy_t       process_cxy = GET_CXY( process_xp );
2396    process_t * process_ptr = GET_PTR( process_xp );
2397
[564]2398// check calling thread execute in target process owner cluster
2399pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2400assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2401"process descriptor not in owner cluster" );
[457]2402
2403    // get extended pointer on stdin pseudo file
[564]2404    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2405
2406    // get pointers on TXT chdev
2407    xptr_t    txt_xp  = chdev_from_file( file_xp );
2408    cxy_t     txt_cxy = GET_CXY( txt_xp );
2409    chdev_t * txt_ptr = GET_PTR( txt_xp );
2410
2411    // get extended pointer on TXT_RX owner process
[564]2412    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2413
2414    return (process_xp == owner_xp);
2415
2416}   // end process_txt_is_owner()
2417
[436]2418////////////////////////////////////////////////     
2419xptr_t process_txt_get_owner( uint32_t channel )
[435]2420{
2421    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2422    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2423    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2424
[564]2425    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2426
[457]2427}  // end process_txt_get_owner()
2428
[435]2429///////////////////////////////////////////
2430void process_txt_display( uint32_t txt_id )
2431{
2432    xptr_t      chdev_xp;
2433    cxy_t       chdev_cxy;
2434    chdev_t   * chdev_ptr;
2435    xptr_t      root_xp;
2436    xptr_t      lock_xp;
2437    xptr_t      current_xp;
2438    xptr_t      iter_xp;
[443]2439    cxy_t       txt0_cxy;
2440    chdev_t   * txt0_ptr;
2441    xptr_t      txt0_xp;
2442    xptr_t      txt0_lock_xp;
2443   
[435]2444    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2445    "illegal TXT terminal index" );
[435]2446
[443]2447    // get pointers on TXT0 chdev
2448    txt0_xp  = chdev_dir.txt_tx[0];
2449    txt0_cxy = GET_CXY( txt0_xp );
2450    txt0_ptr = GET_PTR( txt0_xp );
2451
2452    // get extended pointer on TXT0 lock
2453    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2454
[435]2455    // get pointers on TXT_RX[txt_id] chdev
2456    chdev_xp  = chdev_dir.txt_rx[txt_id];
2457    chdev_cxy = GET_CXY( chdev_xp );
2458    chdev_ptr = GET_PTR( chdev_xp );
2459
2460    // get extended pointer on root & lock of attached process list
2461    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2462    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2463
[443]2464    // get lock on attached process list
[564]2465    remote_busylock_acquire( lock_xp );
[443]2466
2467    // get TXT0 lock in busy waiting mode
[564]2468    remote_busylock_acquire( txt0_lock_xp );
[443]2469
[435]2470    // display header
[443]2471    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2472    txt_id , (uint32_t)hal_get_cycles() );
[435]2473
[436]2474    // scan attached process list
[435]2475    XLIST_FOREACH( root_xp , iter_xp )
2476    {
2477        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2478        process_display( current_xp );
2479    }
2480
[443]2481    // release TXT0 lock in busy waiting mode
[564]2482    remote_busylock_release( txt0_lock_xp );
[443]2483
2484    // release lock on attached process list
[564]2485    remote_busylock_release( lock_xp );
[435]2486
2487}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.