source: trunk/kernel/kern/process.c @ 626

Last change on this file since 626 was 626, checked in by alain, 5 years ago

This version has been tested on the sort multithreaded application
for TSAR_IOB architectures ranging from 1 to 8 clusters.
It fixes three bigs bugs:
1) the dev_ioc device API has been modified: the dev_ioc_sync_read()
and dev_ioc_sync_write() function use now extended pointers on the
kernel buffer to access a mapper stored in any cluster.
2) the hal_uspace API has been modified: the hal_copy_to_uspace()
and hal_copy_from_uspace() functions use now a (cxy,ptr) couple
to identify the target buffer (equivalent to an extended pointer.
3) an implementation bug has been fixed in the assembly code contained
in the hal_copy_to_uspace() and hal_copy_from_uspace() functions.

File size: 83.6 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[618]6 *          Alain Greiner (2016,2017,2018,2019)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[623]31#include <hal_vmm.h>
[1]32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
[428]42#include <chdev.h>
[1]43#include <list.h>
[407]44#include <string.h>
[1]45#include <scheduler.h>
[564]46#include <busylock.h>
47#include <queuelock.h>
48#include <remote_queuelock.h>
49#include <rwlock.h>
50#include <remote_rwlock.h>
[1]51#include <dqdt.h>
52#include <cluster.h>
53#include <ppm.h>
54#include <boot_info.h>
55#include <process.h>
56#include <elf.h>
[23]57#include <syscalls.h>
[435]58#include <shared_syscalls.h>
[1]59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Extern global variables
62//////////////////////////////////////////////////////////////////////////////////////////
63
[428]64extern process_t           process_zero;     // allocated in kernel_init.c
65extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]66
67//////////////////////////////////////////////////////////////////////////////////////////
68// Process initialisation related functions
69//////////////////////////////////////////////////////////////////////////////////////////
70
[583]71/////////////////////////////////
[503]72process_t * process_alloc( void )
[1]73{
74        kmem_req_t   req;
75
76    req.type  = KMEM_PROCESS;
77        req.size  = sizeof(process_t);
78        req.flags = AF_KERNEL;
79
80    return (process_t *)kmem_alloc( &req );
81}
82
83////////////////////////////////////////
84void process_free( process_t * process )
85{
86    kmem_req_t  req;
87
88        req.type = KMEM_PROCESS;
89        req.ptr  = process;
90        kmem_free( &req );
91}
92
[625]93////////////////////////////////////////////////////
94error_t process_reference_init( process_t * process,
95                                pid_t       pid,
96                                xptr_t      parent_xp )
[1]97{
[625]98    error_t     error;
[610]99    xptr_t      process_xp;
[428]100    cxy_t       parent_cxy;
101    process_t * parent_ptr;
[407]102    xptr_t      stdin_xp;
103    xptr_t      stdout_xp;
104    xptr_t      stderr_xp;
105    uint32_t    stdin_id;
106    uint32_t    stdout_id;
107    uint32_t    stderr_id;
[428]108    uint32_t    txt_id;
109    char        rx_path[40];
110    char        tx_path[40];
[440]111    xptr_t      file_xp;
[428]112    xptr_t      chdev_xp;
[625]113    chdev_t   * chdev_ptr;
[428]114    cxy_t       chdev_cxy;
115    pid_t       parent_pid;
[625]116    vmm_t     * vmm;
[1]117
[610]118    // build extended pointer on this reference process
119    process_xp = XPTR( local_cxy , process );
120
[625]121    // get pointer on process vmm
122    vmm = &process->vmm;
123
[428]124    // get parent process cluster and local pointer
125    parent_cxy = GET_CXY( parent_xp );
[435]126    parent_ptr = GET_PTR( parent_xp );
[204]127
[457]128    // get parent_pid
[564]129    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]130
[438]131#if DEBUG_PROCESS_REFERENCE_INIT
[610]132thread_t * this = CURRENT_THREAD;
[433]133uint32_t cycle = (uint32_t)hal_get_cycles();
[610]134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
[433]137#endif
[428]138
[610]139    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]140        process->pid        = pid;
141    process->ref_xp     = XPTR( local_cxy , process );
[443]142    process->owner_xp   = XPTR( local_cxy , process );
[433]143    process->parent_xp  = parent_xp;
144    process->term_state = 0;
[428]145
[610]146    // initialize VFS root inode and CWD inode
147    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
149
[625]150    // initialize VSL as empty
151    vmm->vsegs_nr = 0;
152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[564]153
[625]154    // create an empty GPT as required by the architecture
155    error = hal_gpt_create( &vmm->gpt );
156    if( error ) 
157    {
158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
159        return -1;
160    }
161
162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
165__FUNCTION__, parent_pid, this->trdid, pid );
166#endif
167
168    // initialize GPT and VSL locks
169    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
170        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
171
172    // register kernel vsegs in VMM as required by the architecture
173    error = hal_vmm_kernel_update( process );
174    if( error ) 
175    {
176        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
177        return -1;
178    }
179
180#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
181if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
182printk("\n[%s] thread[%x,%x] registered kernel vsegs for process %x\n",
183__FUNCTION__, parent_pid, this->trdid, pid );
184#endif
185
186    // create "args" and "envs" vsegs
187    // create "stacks" and "mmap" vsegs allocators
188    // initialize locks protecting GPT and VSL
189    error = vmm_user_init( process );
190    if( error ) 
191    {
192        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
193        return -1;
194    }
[415]195 
[438]196#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]197cycle = (uint32_t)hal_get_cycles();
[610]198if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
[625]199printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
200__FUNCTION__, parent_pid, this->trdid, pid );
[433]201#endif
[1]202
[409]203    // initialize fd_array as empty
[408]204    process_fd_init( process );
[1]205
[428]206    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]207    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]208    {
[581]209        // select a TXT channel
210        if( pid == 1 )  txt_id = 0;                     // INIT
211        else            txt_id = process_txt_alloc();   // KSH
[428]212
[457]213        // attach process to TXT
[428]214        process_txt_attach( process , txt_id ); 
215
[457]216#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
217cycle = (uint32_t)hal_get_cycles();
[610]218if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
219printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
220__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]221#endif
[428]222        // build path to TXT_RX[i] and TXT_TX[i] chdevs
223        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
224        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
225
226        // create stdin pseudo file         
[610]227        error = vfs_open(  process->vfs_root_xp,
[428]228                           rx_path,
[610]229                           process_xp,
[408]230                           O_RDONLY, 
231                           0,                // FIXME chmod
232                           &stdin_xp, 
233                           &stdin_id );
[625]234        if( error )
235        {
236            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
237            return -1;
238        }
[1]239
[564]240assert( (stdin_id == 0) , "stdin index must be 0" );
[428]241
[440]242#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
243cycle = (uint32_t)hal_get_cycles();
[610]244if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
245printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
246__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]247#endif
248
[428]249        // create stdout pseudo file         
[610]250        error = vfs_open(  process->vfs_root_xp,
[428]251                           tx_path,
[610]252                           process_xp,
[408]253                           O_WRONLY, 
254                           0,                // FIXME chmod
255                           &stdout_xp, 
256                           &stdout_id );
[625]257        if( error )
258        {
259            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
260            return -1;
261        }
[1]262
[625]263assert( (stdout_id == 1) , "stdout index must be 1" );
[428]264
[440]265#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
266cycle = (uint32_t)hal_get_cycles();
[610]267if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
268printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
269__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]270#endif
271
[428]272        // create stderr pseudo file         
[610]273        error = vfs_open(  process->vfs_root_xp,
[428]274                           tx_path,
[610]275                           process_xp,
[408]276                           O_WRONLY, 
277                           0,                // FIXME chmod
278                           &stderr_xp, 
279                           &stderr_id );
[625]280        if( error )
281        {
282            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
283            return -1;
284        }
[428]285
[625]286assert( (stderr_id == 2) , "stderr index must be 2" );
[428]287
[440]288#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
289cycle = (uint32_t)hal_get_cycles();
[610]290if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
291printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
292__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]293#endif
294
[408]295    }
[428]296    else                                            // normal user process
[408]297    {
[457]298        // get extended pointer on stdin pseudo file in parent process
[625]299        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
300                                                &parent_ptr->fd_array.array[0] ) );
[440]301
[457]302        // get extended pointer on parent process TXT chdev
[440]303        chdev_xp = chdev_from_file( file_xp );
[428]304 
305        // get cluster and local pointer on chdev
306        chdev_cxy = GET_CXY( chdev_xp );
[435]307        chdev_ptr = GET_PTR( chdev_xp );
[428]308 
[564]309        // get parent process TXT terminal index
310        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]311
[564]312        // attach child process to parent process TXT terminal
[428]313        process_txt_attach( process , txt_id ); 
[407]314
[457]315        // copy all open files from parent process fd_array to this process
[428]316        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]317                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]318    }
[407]319
[610]320    // initialize lock protecting CWD changes
[625]321    remote_busylock_init( XPTR( local_cxy , 
322                                &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]323
[438]324#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]325cycle = (uint32_t)hal_get_cycles();
[610]326if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
327printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
328__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]329#endif
[407]330
[408]331    // reset children list root
332    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
333    process->children_nr     = 0;
[625]334    remote_queuelock_init( XPTR( local_cxy,
335                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]336
[611]337    // reset semaphore / mutex / barrier / condvar list roots and lock
[408]338    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
339    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
340    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
341    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[625]342    remote_queuelock_init( XPTR( local_cxy , 
343                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]344
[611]345    // reset open directories root and lock
346    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
[625]347    remote_queuelock_init( XPTR( local_cxy , 
348                                 &process->dir_lock ), LOCK_PROCESS_DIR );
[611]349
[408]350    // register new process in the local cluster manager pref_tbl[]
351    lpid_t lpid = LPID_FROM_PID( pid );
352    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]353
[408]354    // register new process descriptor in local cluster manager local_list
355    cluster_process_local_link( process );
[407]356
[408]357    // register new process descriptor in local cluster manager copies_list
358    cluster_process_copies_link( process );
[172]359
[564]360    // initialize th_tbl[] array and associated threads
[1]361    uint32_t i;
[564]362
363    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]364        {
365        process->th_tbl[i] = NULL;
366    }
367    process->th_nr  = 0;
[564]368    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]369
[124]370        hal_fence();
[1]371
[438]372#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]373cycle = (uint32_t)hal_get_cycles();
[610]374if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
375printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
376__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]377#endif
[101]378
[625]379    return 0;
380
[428]381}  // process_reference_init()
[204]382
[1]383/////////////////////////////////////////////////////
384error_t process_copy_init( process_t * local_process,
385                           xptr_t      reference_process_xp )
386{
[625]387    error_t   error;
388    vmm_t   * vmm;
[415]389
[23]390    // get reference process cluster and local pointer
391    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]392    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]393
[625]394    // get pointer on process vmm
395    vmm = &local_process->vmm;
396
[428]397    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]398    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
399    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]400    local_process->ref_xp     = reference_process_xp;
[443]401    local_process->owner_xp   = reference_process_xp;
[433]402    local_process->term_state = 0;
[407]403
[564]404#if DEBUG_PROCESS_COPY_INIT
[610]405thread_t * this = CURRENT_THREAD; 
[433]406uint32_t cycle = (uint32_t)hal_get_cycles();
[610]407if( DEBUG_PROCESS_COPY_INIT < cycle )
408printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
409__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]410#endif
[407]411
[564]412// check user process
[625]413assert( (local_process->pid != 0), "LPID cannot be 0" );
[564]414
[625]415    // initialize VSL as empty
416    vmm->vsegs_nr = 0;
417        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[1]418
[625]419    // create an empty GPT as required by the architecture
420    error = hal_gpt_create( &vmm->gpt );
421    if( error ) 
422    {
423        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
424        return -1;
425    }
426
427    // initialize GPT and VSL locks
428    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
429        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
430
431    // register kernel vsegs in VMM as required by the architecture
432    error = hal_vmm_kernel_update( local_process );
433    if( error ) 
434    {
435        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
436        return -1;
437    }
438
439    // create "args" and "envs" vsegs
440    // create "stacks" and "mmap" vsegs allocators
441    // initialize locks protecting GPT and VSL
442    error = vmm_user_init( local_process );
443    if( error ) 
444    {
445        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
446        return -1;
447    }
448 
449#if (DEBUG_PROCESS_COPY_INIT & 1)
450cycle = (uint32_t)hal_get_cycles();
451if( DEBUG_PROCESS_COPY_INIT < cycle )
452printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
453__FUNCTION__, parent_pid, this->trdid, pid, cycle );
454#endif
455
456    // set process file descriptors array
[23]457        process_fd_init( local_process );
[1]458
[625]459    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]460    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
461    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]462    local_process->cwd_xp      = XPTR_NULL;
[1]463
464    // reset children list root (not used in a process descriptor copy)
465    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]466    local_process->children_nr   = 0;
[564]467    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
468                           LOCK_PROCESS_CHILDREN );
[1]469
[428]470    // reset children_list (not used in a process descriptor copy)
471    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]472
473    // reset semaphores list root (not used in a process descriptor copy)
474    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]475    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
476    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
477    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]478
[564]479    // initialize th_tbl[] array and associated fields
[1]480    uint32_t i;
[564]481    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]482        {
483        local_process->th_tbl[i] = NULL;
484    }
485    local_process->th_nr  = 0;
[564]486    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]487
488    // register new process descriptor in local cluster manager local_list
489    cluster_process_local_link( local_process );
490
491    // register new process descriptor in owner cluster manager copies_list
492    cluster_process_copies_link( local_process );
493
[124]494        hal_fence();
[1]495
[438]496#if DEBUG_PROCESS_COPY_INIT
[433]497cycle = (uint32_t)hal_get_cycles();
[610]498if( DEBUG_PROCESS_COPY_INIT < cycle )
499printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
500__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]501#endif
[279]502
[1]503    return 0;
504
[204]505} // end process_copy_init()
506
[1]507///////////////////////////////////////////
508void process_destroy( process_t * process )
509{
[428]510    xptr_t      parent_xp;
511    process_t * parent_ptr;
512    cxy_t       parent_cxy;
513    xptr_t      children_lock_xp;
[446]514    xptr_t      children_nr_xp;
[1]515
[437]516    pid_t       pid = process->pid;
517
[593]518// check no more threads
[618]519assert( (process->th_nr == 0),
520"process %x in cluster %x contains threads", pid , local_cxy );
[428]521
[438]522#if DEBUG_PROCESS_DESTROY
[610]523thread_t * this = CURRENT_THREAD;
[433]524uint32_t cycle = (uint32_t)hal_get_cycles();
[610]525if( DEBUG_PROCESS_DESTROY < cycle )
526printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
527__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]528#endif
[428]529
[618]530    // Destroy VMM
531    vmm_destroy( process );
532
533#if (DEBUG_PROCESS_DESTROY & 1)
534if( DEBUG_PROCESS_DESTROY < cycle )
535printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
536__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
537#endif
538
[436]539    // remove process from local_list in local cluster manager
540    cluster_process_local_unlink( process );
[1]541
[618]542#if (DEBUG_PROCESS_DESTROY & 1)
543if( DEBUG_PROCESS_DESTROY < cycle )
544printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
545__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
546#endif
547
[436]548    // remove process from copies_list in owner cluster manager
549    cluster_process_copies_unlink( process );
[23]550
[618]551#if (DEBUG_PROCESS_DESTROY & 1)
552if( DEBUG_PROCESS_DESTROY < cycle )
553printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
554__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
555#endif
556
[625]557    // when target process cluster is the owner cluster
558    // - remove process from TXT list and transfer ownership
559    // - remove process from children_list
560    // - release PID
[437]561    if( CXY_FROM_PID( pid ) == local_cxy )
[428]562    {
[625]563        process_txt_detach( XPTR( local_cxy , process ) );
564
565#if (DEBUG_PROCESS_DESTROY & 1)
566if( DEBUG_PROCESS_DESTROY < cycle )
567printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
568__FUNCTION__, this->process->pid, this->trdid, pid );
569#endif
570
[428]571        // get pointers on parent process
572        parent_xp  = process->parent_xp;
573        parent_cxy = GET_CXY( parent_xp );
574        parent_ptr = GET_PTR( parent_xp );
575
576        // get extended pointer on children_lock in parent process
577        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]578        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]579
580        // remove process from children_list
[564]581        remote_queuelock_acquire( children_lock_xp );
[428]582        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]583            hal_remote_atomic_add( children_nr_xp , -1 );
[564]584        remote_queuelock_release( children_lock_xp );
[450]585
[618]586#if (DEBUG_PROCESS_DESTROY & 1)
587if( DEBUG_PROCESS_DESTROY < cycle )
[625]588printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
589__FUNCTION__, this->process->pid, this->trdid, pid );
[618]590#endif
591
[564]592        // release the process PID to cluster manager
593        cluster_pid_release( pid );
[428]594
[618]595#if (DEBUG_PROCESS_DESTROY & 1)
596if( DEBUG_PROCESS_DESTROY < cycle )
597printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
598__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
599#endif
[23]600
[618]601    }
[1]602
[623]603    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
604
[618]605    // FIXME close all open files [AG]
[623]606
[618]607    // FIXME synchronize dirty files [AG]
[1]608
[416]609    // release memory allocated to process descriptor
610    process_free( process );
[1]611
[438]612#if DEBUG_PROCESS_DESTROY
[433]613cycle = (uint32_t)hal_get_cycles();
[610]614if( DEBUG_PROCESS_DESTROY < cycle )
615printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
616__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]617#endif
[428]618
[407]619}  // end process_destroy()
620
[583]621///////////////////////////////////////////////////////////////////
[527]622const char * process_action_str( process_sigactions_t action_type )
[409]623{
[583]624    switch ( action_type )
625    {
626        case BLOCK_ALL_THREADS:   return "BLOCK";
627        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
628        case DELETE_ALL_THREADS:  return "DELETE";
629        default:                  return "undefined";
630    }
[409]631}
632
[435]633////////////////////////////////////////
634void process_sigaction( pid_t       pid,
[457]635                        uint32_t    type )
[409]636{
637    cxy_t              owner_cxy;         // owner cluster identifier
638    lpid_t             lpid;              // process index in owner cluster
639    cluster_t        * cluster;           // pointer on cluster manager
640    xptr_t             root_xp;           // extended pointer on root of copies
641    xptr_t             lock_xp;           // extended pointer on lock protecting copies
642    xptr_t             iter_xp;           // iterator on copies list
643    xptr_t             process_xp;        // extended pointer on process copy
644    cxy_t              process_cxy;       // process copy cluster identifier
[457]645    process_t        * process_ptr;       // local pointer on process copy
[436]646    reg_t              save_sr;           // for critical section
[457]647    thread_t         * client;            // pointer on client thread
648    xptr_t             client_xp;         // extended pointer on client thread
649    process_t        * local;             // pointer on process copy in local cluster
650    uint32_t           remote_nr;         // number of remote process copies
[619]651    rpc_desc_t         rpc;               // shared RPC descriptor
652    uint32_t           responses;         // shared RPC responses counter
[409]653
[457]654    client    = CURRENT_THREAD;
655    client_xp = XPTR( local_cxy , client );
656    local     = NULL;
657    remote_nr = 0;
[435]658
[583]659    // check calling thread can yield
660    thread_assert_can_yield( client , __FUNCTION__ );
[564]661
[438]662#if DEBUG_PROCESS_SIGACTION
[433]663uint32_t cycle = (uint32_t)hal_get_cycles();
[438]664if( DEBUG_PROCESS_SIGACTION < cycle )
[593]665printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]666__FUNCTION__ , client->process->pid, client->trdid,
[457]667process_action_str( type ) , pid , cycle );
[433]668#endif
[409]669
[436]670    // get pointer on local cluster manager
[416]671    cluster = LOCAL_CLUSTER;
672
[409]673    // get owner cluster identifier and process lpid
[435]674    owner_cxy = CXY_FROM_PID( pid );
675    lpid      = LPID_FROM_PID( pid );
[409]676
[593]677    // get root of list of copies and lock from owner cluster
[436]678    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
679    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]680
[583]681// check action type
682assert( ((type == DELETE_ALL_THREADS ) ||
683         (type == BLOCK_ALL_THREADS )  ||
684         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]685             
[593]686    // This client thread send parallel RPCs to all remote clusters containing
[564]687    // target process copies, wait all responses, and then handles directly
688    // the threads in local cluster, when required.
[457]689    // The client thread allocates a - shared - RPC descriptor in the stack,
690    // because all parallel, non-blocking, server threads use the same input
691    // arguments, and use the shared RPC response field
[436]692
693    // mask IRQs
694    hal_disable_irq( &save_sr);
695
[457]696    // client thread blocks itself
697    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]698
[619]699    // initialize RPC responses counter
700    responses = 0;
701
[436]702    // initialize shared RPC descriptor
[619]703    // can be shared, because no out arguments
704    rpc.rsp       = &responses;
[438]705    rpc.blocking  = false;
706    rpc.index     = RPC_PROCESS_SIGACTION;
707    rpc.thread    = client;
708    rpc.lid       = client->core->lid;
[611]709    rpc.args[0]   = pid;
710    rpc.args[1]   = type;
[436]711
[611]712    // take the lock protecting process copies
713    remote_queuelock_acquire( lock_xp );
714
[457]715    // scan list of process copies
[409]716    XLIST_FOREACH( root_xp , iter_xp )
717    {
[457]718        // get extended pointers and cluster on process
[440]719        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
720        process_cxy = GET_CXY( process_xp );
[457]721        process_ptr = GET_PTR( process_xp );
[440]722
[593]723        if( process_cxy == local_cxy )    // process copy is local
[457]724        { 
725            local = process_ptr;
726        }
[593]727        else                              // process copy is remote
[457]728        {
729            // update number of remote process copies
730            remote_nr++;
731
[619]732            // atomically increment RPC responses counter
733            hal_atomic_add( &responses , 1 );
[457]734
[438]735#if DEBUG_PROCESS_SIGACTION
736if( DEBUG_PROCESS_SIGACTION < cycle )
[593]737printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]738__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]739#endif
[457]740            // call RPC in target cluster
[619]741            rpc_send( process_cxy , &rpc );
[457]742        }
743    }  // end list of copies
744
[409]745    // release the lock protecting process copies
[564]746    remote_queuelock_release( lock_xp );
[409]747
[436]748    // restore IRQs
749    hal_restore_irq( save_sr);
[409]750
[457]751    // - if there is remote process copies, the client thread deschedules,
752    //   (it will be unblocked by the last RPC server thread).
753    // - if there is no remote copies, the client thread unblock itself.
754    if( remote_nr )
755    {
756        sched_yield("blocked on rpc_process_sigaction");
757    } 
758    else
759    {
760        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
761    }
[409]762
[457]763    // handle the local process copy if required
764    if( local != NULL )
765    {
766
767#if DEBUG_PROCESS_SIGACTION
768if( DEBUG_PROCESS_SIGACTION < cycle )
[593]769printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]770__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]771#endif
772        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]773        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]774        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
775    }
776
[438]777#if DEBUG_PROCESS_SIGACTION
[433]778cycle = (uint32_t)hal_get_cycles();
[438]779if( DEBUG_PROCESS_SIGACTION < cycle )
[593]780printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]781__FUNCTION__, client->process->pid, client->trdid,
[457]782process_action_str( type ), pid, cycle );
[433]783#endif
[416]784
[409]785}  // end process_sigaction()
786
[433]787/////////////////////////////////////////////////
[583]788void process_block_threads( process_t * process )
[1]789{
[409]790    thread_t          * target;         // pointer on target thread
[433]791    thread_t          * this;           // pointer on calling thread
[564]792    uint32_t            ltid;           // index in process th_tbl[]
[436]793    cxy_t               owner_cxy;      // target process owner cluster
[409]794    uint32_t            count;          // requests counter
[593]795    volatile uint32_t   ack_count;      // acknowledges counter
[1]796
[416]797    // get calling thread pointer
[433]798    this = CURRENT_THREAD;
[407]799
[438]800#if DEBUG_PROCESS_SIGACTION
[564]801pid_t pid = process->pid;
[433]802uint32_t cycle = (uint32_t)hal_get_cycles();
[438]803if( DEBUG_PROCESS_SIGACTION < cycle )
[593]804printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]805__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]806#endif
[409]807
[564]808// check target process is an user process
[619]809assert( (LPID_FROM_PID( process->pid ) != 0 ),
810"process %x is not an user process\n", process->pid );
[564]811
[610]812    // get target process owner cluster
[564]813    owner_cxy = CXY_FROM_PID( process->pid );
814
[409]815    // get lock protecting process th_tbl[]
[564]816    rwlock_rd_acquire( &process->th_lock );
[1]817
[440]818    // loop on target process local threads
[409]819    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]820    // - if the calling thread and the target thread are not running on the same
821    //   core, we ask the target scheduler to acknowlege the blocking
822    //   to be sure that the target thread is not running.
823    // - if the calling thread and the target thread are running on the same core,
824    //   we don't need confirmation from scheduler.
825           
[436]826    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]827    {
[409]828        target = process->th_tbl[ltid];
[1]829
[436]830        if( target != NULL )                                 // thread exist
[1]831        {
832            count++;
[409]833
[583]834            // set the global blocked bit in target thread descriptor.
835            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]836 
[583]837            if( this->core->lid != target->core->lid )
838            {
839                // increment responses counter
840                hal_atomic_add( (void*)&ack_count , 1 );
[409]841
[583]842                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
843                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]844
[583]845                // force scheduling on target thread
846                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]847            }
[1]848        }
[172]849    }
850
[428]851    // release lock protecting process th_tbl[]
[564]852    rwlock_rd_release( &process->th_lock );
[416]853
[593]854    // wait other threads acknowledges  TODO this could be improved...
[409]855    while( 1 )
856    {
[610]857        // exit when all scheduler acknowledges received
[436]858        if ( ack_count == 0 ) break;
[409]859   
860        // wait 1000 cycles before retry
861        hal_fixed_delay( 1000 );
862    }
[1]863
[438]864#if DEBUG_PROCESS_SIGACTION
[433]865cycle = (uint32_t)hal_get_cycles();
[438]866if( DEBUG_PROCESS_SIGACTION < cycle )
[593]867printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
868__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]869#endif
[409]870
[428]871}  // end process_block_threads()
[409]872
[440]873/////////////////////////////////////////////////
874void process_delete_threads( process_t * process,
875                             xptr_t      client_xp )
[409]876{
[433]877    thread_t          * this;          // pointer on calling thread
[440]878    thread_t          * target;        // local pointer on target thread
879    xptr_t              target_xp;     // extended pointer on target thread
880    cxy_t               owner_cxy;     // owner process cluster
[409]881    uint32_t            ltid;          // index in process th_tbl
[440]882    uint32_t            count;         // threads counter
[409]883
[433]884    // get calling thread pointer
885    this = CURRENT_THREAD;
[409]886
[440]887    // get target process owner cluster
888    owner_cxy = CXY_FROM_PID( process->pid );
889
[438]890#if DEBUG_PROCESS_SIGACTION
[433]891uint32_t cycle = (uint32_t)hal_get_cycles();
[438]892if( DEBUG_PROCESS_SIGACTION < cycle )
[625]893printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
894__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]895#endif
896
[564]897// check target process is an user process
[619]898assert( (LPID_FROM_PID( process->pid ) != 0),
899"process %x is not an user process\n", process->pid );
[564]900
[409]901    // get lock protecting process th_tbl[]
[583]902    rwlock_wr_acquire( &process->th_lock );
[409]903
[440]904    // loop on target process local threads                       
[416]905    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]906    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]907    {
[409]908        target = process->th_tbl[ltid];
[1]909
[440]910        if( target != NULL )    // valid thread 
[1]911        {
[416]912            count++;
[440]913            target_xp = XPTR( local_cxy , target );
[1]914
[564]915            // main thread and client thread should not be deleted
[440]916            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
917                (client_xp) != target_xp )                           // not client thread
918            {
919                // mark target thread for delete and block it
920                thread_delete( target_xp , process->pid , false );   // not forced
921            }
[409]922        }
923    }
[1]924
[428]925    // release lock protecting process th_tbl[]
[583]926    rwlock_wr_release( &process->th_lock );
[407]927
[438]928#if DEBUG_PROCESS_SIGACTION
[433]929cycle = (uint32_t)hal_get_cycles();
[438]930if( DEBUG_PROCESS_SIGACTION < cycle )
[593]931printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
932__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]933#endif
[407]934
[440]935}  // end process_delete_threads()
[409]936
[440]937///////////////////////////////////////////////////
938void process_unblock_threads( process_t * process )
[409]939{
[440]940    thread_t          * target;        // pointer on target thead
941    thread_t          * this;          // pointer on calling thread
[409]942    uint32_t            ltid;          // index in process th_tbl
[440]943    uint32_t            count;         // requests counter
[409]944
[440]945    // get calling thread pointer
946    this = CURRENT_THREAD;
947
[438]948#if DEBUG_PROCESS_SIGACTION
[564]949pid_t pid = process->pid;
[433]950uint32_t cycle = (uint32_t)hal_get_cycles();
[438]951if( DEBUG_PROCESS_SIGACTION < cycle )
[593]952printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]953__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]954#endif
955
[564]956// check target process is an user process
[619]957assert( ( LPID_FROM_PID( process->pid ) != 0 ),
958"process %x is not an user process\n", process->pid );
[564]959
[416]960    // get lock protecting process th_tbl[]
[564]961    rwlock_rd_acquire( &process->th_lock );
[416]962
[440]963    // loop on process threads to unblock all threads
[416]964    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]965    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]966    {
[416]967        target = process->th_tbl[ltid];
[409]968
[440]969        if( target != NULL )             // thread found
[409]970        {
971            count++;
[440]972
973            // reset the global blocked bit in target thread descriptor.
974            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]975        }
976    }
977
[428]978    // release lock protecting process th_tbl[]
[564]979    rwlock_rd_release( &process->th_lock );
[407]980
[438]981#if DEBUG_PROCESS_SIGACTION
[433]982cycle = (uint32_t)hal_get_cycles();
[438]983if( DEBUG_PROCESS_SIGACTION < cycle )
[593]984printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]985__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]986#endif
[1]987
[440]988}  // end process_unblock_threads()
[407]989
[1]990///////////////////////////////////////////////
991process_t * process_get_local_copy( pid_t pid )
992{
993    error_t        error;
[172]994    process_t    * process_ptr;   // local pointer on process
[23]995    xptr_t         process_xp;    // extended pointer on process
[1]996
997    cluster_t * cluster = LOCAL_CLUSTER;
998
[564]999#if DEBUG_PROCESS_GET_LOCAL_COPY
1000thread_t * this = CURRENT_THREAD;
1001uint32_t cycle = (uint32_t)hal_get_cycles();
1002if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1003printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]1004__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]1005#endif
1006
[1]1007    // get lock protecting local list of processes
[564]1008    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1009
1010    // scan the local list of process descriptors to find the process
[23]1011    xptr_t  iter;
1012    bool_t  found = false;
1013    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]1014    {
[23]1015        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]1016        process_ptr = GET_PTR( process_xp );
[23]1017        if( process_ptr->pid == pid )
[1]1018        {
1019            found = true;
1020            break;
1021        }
1022    }
1023
1024    // release lock protecting local list of processes
[564]1025    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]1026
[172]1027    // allocate memory for a new local process descriptor
[440]1028    // and initialise it from reference cluster if not found
[1]1029    if( !found )
1030    {
1031        // get extended pointer on reference process descriptor
[23]1032        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]1033
[492]1034        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]1035
[1]1036        // allocate memory for local process descriptor
[23]1037        process_ptr = process_alloc();
[443]1038
[23]1039        if( process_ptr == NULL )  return NULL;
[1]1040
1041        // initialize local process descriptor copy
[23]1042        error = process_copy_init( process_ptr , ref_xp );
[443]1043
[1]1044        if( error ) return NULL;
1045    }
1046
[440]1047#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]1048cycle = (uint32_t)hal_get_cycles();
[440]1049if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]1050printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]1051__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]1052#endif
1053
[23]1054    return process_ptr;
[1]1055
[409]1056}  // end process_get_local_copy()
1057
[436]1058////////////////////////////////////////////
1059pid_t process_get_ppid( xptr_t  process_xp )
1060{
1061    cxy_t       process_cxy;
1062    process_t * process_ptr;
1063    xptr_t      parent_xp;
1064    cxy_t       parent_cxy;
1065    process_t * parent_ptr;
1066
1067    // get process cluster and local pointer
1068    process_cxy = GET_CXY( process_xp );
1069    process_ptr = GET_PTR( process_xp );
1070
1071    // get pointers on parent process
[564]1072    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]1073    parent_cxy = GET_CXY( parent_xp );
1074    parent_ptr = GET_PTR( parent_xp );
1075
[564]1076    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]1077}
1078
[1]1079//////////////////////////////////////////////////////////////////////////////////////////
1080// File descriptor array related functions
1081//////////////////////////////////////////////////////////////////////////////////////////
1082
1083///////////////////////////////////////////
1084void process_fd_init( process_t * process )
1085{
1086    uint32_t fd;
1087
[610]1088    // initialize lock
[564]1089    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]1090
[610]1091    // initialize number of open files
[23]1092    process->fd_array.current = 0;
1093
[1]1094    // initialize array
[23]1095    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1096    {
1097        process->fd_array.array[fd] = XPTR_NULL;
1098    }
1099}
[610]1100////////////////////////////////////////////////////
1101error_t process_fd_register( xptr_t      process_xp,
[407]1102                             xptr_t      file_xp,
1103                             uint32_t  * fdid )
[1]1104{
1105    bool_t    found;
[23]1106    uint32_t  id;
1107    xptr_t    xp;
[1]1108
[23]1109    // get reference process cluster and local pointer
[610]1110    process_t * process_ptr = GET_PTR( process_xp );
1111    cxy_t       process_cxy = GET_CXY( process_xp );
[23]1112
[610]1113// check client process is reference process
1114assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
1115"client process must be reference process\n" );
1116
1117#if DEBUG_PROCESS_FD_REGISTER
1118thread_t * this  = CURRENT_THREAD;
1119uint32_t   cycle = (uint32_t)hal_get_cycles();
1120pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1121if( DEBUG_PROCESS_FD_REGISTER < cycle )
1122printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1123__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1124#endif
1125
1126    // build extended pointer on lock protecting reference fd_array
1127    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1128
[23]1129    // take lock protecting reference fd_array
[610]1130        remote_queuelock_acquire( lock_xp );
[23]1131
[1]1132    found   = false;
1133
[23]1134    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]1135    {
[610]1136        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
[23]1137        if ( xp == XPTR_NULL )
[1]1138        {
[564]1139            // update reference fd_array
[610]1140            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1141                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
[564]1142
1143            // exit
1144                        *fdid = id;
[1]1145            found = true;
1146            break;
1147        }
1148    }
1149
[610]1150    // release lock protecting fd_array
1151        remote_queuelock_release( lock_xp );
[1]1152
[610]1153#if DEBUG_PROCESS_FD_REGISTER
1154cycle = (uint32_t)hal_get_cycles();
1155if( DEBUG_PROCESS_FD_REGISTER < cycle )
1156printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1157__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1158#endif
1159
[428]1160    if ( !found ) return -1;
[1]1161    else          return 0;
1162
[610]1163}  // end process_fd_register()
1164
[172]1165////////////////////////////////////////////////
[23]1166xptr_t process_fd_get_xptr( process_t * process,
[407]1167                            uint32_t    fdid )
[1]1168{
[23]1169    xptr_t  file_xp;
[564]1170    xptr_t  lock_xp;
[1]1171
[23]1172    // access local copy of process descriptor
[407]1173    file_xp = process->fd_array.array[fdid];
[1]1174
[23]1175    if( file_xp == XPTR_NULL )
1176    {
1177        // get reference process cluster and local pointer
1178        xptr_t      ref_xp  = process->ref_xp;
1179        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]1180        process_t * ref_ptr = GET_PTR( ref_xp );
[1]1181
[564]1182        // build extended pointer on lock protecting reference fd_array
1183        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1184
1185        // take lock protecting reference fd_array
1186            remote_queuelock_acquire( lock_xp );
1187
[23]1188        // access reference process descriptor
[564]1189        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]1190
[23]1191        // update local fd_array if found
[564]1192        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1193       
1194        // release lock protecting reference fd_array
1195            remote_queuelock_release( lock_xp );
[23]1196    }
[1]1197
[23]1198    return file_xp;
[1]1199
[407]1200}  // end process_fd_get_xptr()
1201
[1]1202///////////////////////////////////////////
1203void process_fd_remote_copy( xptr_t dst_xp,
1204                             xptr_t src_xp )
1205{
1206    uint32_t fd;
1207    xptr_t   entry;
1208
1209    // get cluster and local pointer for src fd_array
1210    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1211    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1212
1213    // get cluster and local pointer for dst fd_array
1214    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1215    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1216
1217    // get the remote lock protecting the src fd_array
[564]1218        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1219
[428]1220    // loop on all fd_array entries
1221    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1222        {
[564]1223                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1224
1225                if( entry != XPTR_NULL )
1226                {
[459]1227            // increment file descriptor refcount
[1]1228            vfs_file_count_up( entry );
1229
1230                        // copy entry in destination process fd_array
[564]1231                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1232                }
1233        }
1234
1235    // release lock on source process fd_array
[564]1236        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1237
[407]1238}  // end process_fd_remote_copy()
1239
[564]1240
1241////////////////////////////////////
1242bool_t process_fd_array_full( void )
1243{
1244    // get extended pointer on reference process
1245    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1246
1247    // get reference process cluster and local pointer
1248    process_t * ref_ptr = GET_PTR( ref_xp );
1249    cxy_t       ref_cxy = GET_CXY( ref_xp );
1250
1251    // get number of open file descriptors from reference fd_array
1252    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1253
1254        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1255}
1256
1257
[1]1258////////////////////////////////////////////////////////////////////////////////////
1259//  Thread related functions
1260////////////////////////////////////////////////////////////////////////////////////
1261
1262/////////////////////////////////////////////////////
1263error_t process_register_thread( process_t * process,
1264                                 thread_t  * thread,
1265                                 trdid_t   * trdid )
1266{
[472]1267    ltid_t         ltid;
1268    bool_t         found = false;
1269 
[564]1270// check arguments
1271assert( (process != NULL) , "process argument is NULL" );
1272assert( (thread != NULL) , "thread argument is NULL" );
[1]1273
[564]1274    // get the lock protecting th_tbl for all threads
1275    // but the idle thread executing kernel_init (cannot yield)
1276    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1277
[583]1278    // scan th_tbl
[564]1279    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1280    {
1281        if( process->th_tbl[ltid] == NULL )
1282        {
1283            found = true;
1284            break;
1285        }
1286    }
1287
1288    if( found )
1289    {
1290        // register thread in th_tbl[]
1291        process->th_tbl[ltid] = thread;
1292        process->th_nr++;
1293
1294        // returns trdid
1295        *trdid = TRDID( local_cxy , ltid );
1296    }
1297
[583]1298    // release the lock protecting th_tbl
[564]1299    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1300
[564]1301    return (found) ? 0 : 0xFFFFFFFF;
[204]1302
1303}  // end process_register_thread()
1304
[625]1305///////////////////////////////////////////////////
1306uint32_t process_remove_thread( thread_t * thread )
[1]1307{
[443]1308    uint32_t count;  // number of threads in local process descriptor
1309
[625]1310// check thread
1311assert( (thread != NULL) , "thread argument is NULL" );
1312
[1]1313    process_t * process = thread->process;
1314
1315    // get thread local index
1316    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1317   
1318    // get the lock protecting th_tbl[]
1319    rwlock_wr_acquire( &process->th_lock );
[428]1320
[583]1321    // get number of threads
[443]1322    count = process->th_nr;
[428]1323
[564]1324// check th_nr value
[624]1325assert( (count > 0) , "process th_nr cannot be 0" );
[443]1326
[1]1327    // remove thread from th_tbl[]
1328    process->th_tbl[ltid] = NULL;
[450]1329    process->th_nr = count-1;
[1]1330
[583]1331    // release lock protecting th_tbl
[564]1332    rwlock_wr_release( &process->th_lock );
[428]1333
[625]1334    return count;
[443]1335
[450]1336}  // end process_remove_thread()
[204]1337
[408]1338/////////////////////////////////////////////////////////
1339error_t process_make_fork( xptr_t      parent_process_xp,
1340                           xptr_t      parent_thread_xp,
1341                           pid_t     * child_pid,
1342                           thread_t ** child_thread )
[1]1343{
[408]1344    process_t * process;         // local pointer on child process descriptor
1345    thread_t  * thread;          // local pointer on child thread descriptor
1346    pid_t       new_pid;         // process identifier for child process
1347    pid_t       parent_pid;      // process identifier for parent process
1348    xptr_t      ref_xp;          // extended pointer on reference process
[428]1349    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1350    error_t     error;
[1]1351
[408]1352    // get cluster and local pointer for parent process
1353    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1354    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1355
[428]1356    // get parent process PID and extended pointer on .elf file
[564]1357    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1358    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1359
[564]1360    // get extended pointer on reference process
1361    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1362
[564]1363// check parent process is the reference process
1364assert( (parent_process_xp == ref_xp ) ,
[624]1365"parent process must be the reference process" );
[407]1366
[438]1367#if DEBUG_PROCESS_MAKE_FORK
[583]1368uint32_t cycle   = (uint32_t)hal_get_cycles();
1369thread_t * this  = CURRENT_THREAD;
1370trdid_t    trdid = this->trdid;
1371pid_t      pid   = this->process->pid;
[438]1372if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1373printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1374__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1375#endif
[172]1376
[408]1377    // allocate a process descriptor
1378    process = process_alloc();
1379    if( process == NULL )
1380    {
1381        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1382        __FUNCTION__, local_cxy ); 
1383        return -1;
1384    }
[1]1385
[408]1386    // allocate a child PID from local cluster
[416]1387    error = cluster_pid_alloc( process , &new_pid );
[428]1388    if( error ) 
[1]1389    {
[408]1390        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1391        __FUNCTION__, local_cxy ); 
1392        process_free( process );
1393        return -1;
[1]1394    }
[408]1395
[469]1396#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1397cycle = (uint32_t)hal_get_cycles();
1398if( DEBUG_PROCESS_MAKE_FORK < cycle )
[625]1399printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
[583]1400__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1401#endif
1402
[408]1403    // initializes child process descriptor from parent process descriptor
[625]1404    error = process_reference_init( process,
1405                                    new_pid,
1406                                    parent_process_xp );
1407    if( error ) 
1408    {
1409        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 
1410        __FUNCTION__, local_cxy ); 
1411        process_free( process );
1412        return -1;
1413    }
[408]1414
[438]1415#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1416cycle = (uint32_t)hal_get_cycles();
[438]1417if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1418printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1419__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1420#endif
[408]1421
1422    // copy VMM from parent descriptor to child descriptor
1423    error = vmm_fork_copy( process,
1424                           parent_process_xp );
1425    if( error )
[101]1426    {
[408]1427        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1428        __FUNCTION__, local_cxy ); 
1429        process_free( process );
1430        cluster_pid_release( new_pid );
1431        return -1;
[101]1432    }
[172]1433
[438]1434#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1435cycle = (uint32_t)hal_get_cycles();
[438]1436if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1437printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1438__FUNCTION__, pid, trdid, cycle );
[433]1439#endif
[407]1440
[564]1441    // if parent_process is INIT, or if parent_process is the TXT owner,
1442    // the child_process becomes the owner of its TXT terminal
1443    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1444    {
1445        process_txt_set_ownership( XPTR( local_cxy , process ) );
1446
1447#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1448cycle = (uint32_t)hal_get_cycles();
[626]1449if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1450printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",
[583]1451__FUNCTION__ , pid, trdid, cycle );
[457]1452#endif
1453
1454    }
1455
[428]1456    // update extended pointer on .elf file
1457    process->vfs_bin_xp = vfs_bin_xp;
1458
[408]1459    // create child thread descriptor from parent thread descriptor
1460    error = thread_user_fork( parent_thread_xp,
1461                              process,
1462                              &thread );
1463    if( error )
1464    {
1465        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1466        __FUNCTION__, local_cxy ); 
1467        process_free( process );
1468        cluster_pid_release( new_pid );
1469        return -1;
1470    }
[172]1471
[564]1472// check main thread LTID
1473assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
[624]1474"main thread must have LTID == 0" );
[428]1475
[564]1476#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1477cycle = (uint32_t)hal_get_cycles();
[438]1478if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1479printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1480__FUNCTION__, pid, trdid, thread, cycle );
[433]1481#endif
[1]1482
[625]1483    // set COW flag in DATA, ANON, REMOTE vsegs for parent process VMM
1484    // this includes all parnet process copies in all clusters
[408]1485    if( parent_process_cxy == local_cxy )   // reference is local
1486    {
1487        vmm_set_cow( parent_process_ptr );
1488    }
1489    else                                    // reference is remote
1490    {
1491        rpc_vmm_set_cow_client( parent_process_cxy,
1492                                parent_process_ptr );
1493    }
[1]1494
[625]1495    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
[433]1496    vmm_set_cow( process );
1497 
[438]1498#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1499cycle = (uint32_t)hal_get_cycles();
[438]1500if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1501printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",
[583]1502__FUNCTION__, pid, trdid, cycle );
[433]1503#endif
[101]1504
[428]1505    // get extended pointers on parent children_root, children_lock and children_nr
1506    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1507    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1508    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1509
[428]1510    // register process in parent children list
[564]1511    remote_queuelock_acquire( children_lock_xp );
[428]1512        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1513        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1514    remote_queuelock_release( children_lock_xp );
[204]1515
[408]1516    // return success
1517    *child_thread = thread;
1518    *child_pid    = new_pid;
[1]1519
[438]1520#if DEBUG_PROCESS_MAKE_FORK
[433]1521cycle = (uint32_t)hal_get_cycles();
[438]1522if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1523printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1524__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1525#endif
[428]1526
[408]1527    return 0;
1528
[416]1529}   // end process_make_fork()
[408]1530
1531/////////////////////////////////////////////////////
1532error_t process_make_exec( exec_info_t  * exec_info )
1533{
[457]1534    thread_t       * thread;                  // local pointer on this thread
1535    process_t      * process;                 // local pointer on this process
1536    pid_t            pid;                     // this process identifier
[610]1537    xptr_t           ref_xp;                  // reference process for this process
[441]1538        error_t          error;                   // value returned by called functions
[457]1539    char           * path;                    // path to .elf file
1540    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1541    uint32_t         file_id;                 // file index in fd_array
1542    uint32_t         args_nr;                 // number of main thread arguments
1543    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1544
[625]1545    // get calling thread, process, pid and ref_xp
[457]1546    thread  = CURRENT_THREAD;
1547    process = thread->process;
1548    pid     = process->pid;
[610]1549    ref_xp  = process->ref_xp;
[408]1550
[457]1551        // get relevant infos from exec_info
1552        path          = exec_info->path;
1553    args_nr       = exec_info->args_nr;
1554    args_pointers = exec_info->args_pointers;
[408]1555
[438]1556#if DEBUG_PROCESS_MAKE_EXEC
[433]1557uint32_t cycle = (uint32_t)hal_get_cycles();
[626]1558if( local_cxy == 0x11 )
[593]1559printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1560__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1561#endif
[408]1562
[457]1563    // open the file identified by <path>
1564    file_xp = XPTR_NULL;
[564]1565    file_id = 0xFFFFFFFF;
[610]1566        error   = vfs_open( process->vfs_root_xp,
[457]1567                            path,
[610]1568                        ref_xp,
[457]1569                            O_RDONLY,
1570                            0,
1571                            &file_xp,
1572                            &file_id );
1573        if( error )
1574        {
1575                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1576                return -1;
1577        }
1578
[446]1579#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1580cycle = (uint32_t)hal_get_cycles();
[626]1581if( local_cxy == 0x11 )
[593]1582printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1583__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1584#endif
1585
[457]1586    // delete all threads other than this main thread in all clusters
1587    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1588
[469]1589#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1590cycle = (uint32_t)hal_get_cycles();
[626]1591if( local_cxy == 0x11 )
[625]1592printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n",
[583]1593__FUNCTION__, pid, thread->trdid, cycle );
[469]1594#endif
1595
[625]1596    // reset calling process VMM
1597    vmm_user_reset( process );
[446]1598
[457]1599#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1600cycle = (uint32_t)hal_get_cycles();
[626]1601if( local_cxy == 0x11 )
[625]1602printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n",
[583]1603__FUNCTION__, pid, thread->trdid, cycle );
[457]1604#endif
[408]1605
[625]1606    // re-initialize the VMM (args/envs vsegs registration)
1607    error = vmm_user_init( process );
[457]1608    if( error )
[416]1609    {
[457]1610        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1611        vfs_close( file_xp , file_id );
[623]1612        // FIXME restore old process VMM [AG]
[416]1613        return -1;
1614    }
[457]1615   
[438]1616#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1617cycle = (uint32_t)hal_get_cycles();
[626]1618if( local_cxy == 0x11 )
[625]1619printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n",
[583]1620__FUNCTION__, pid, thread->trdid, cycle );
[433]1621#endif
[428]1622
[457]1623    // register code & data vsegs as well as entry-point in process VMM,
[428]1624    // and register extended pointer on .elf file in process descriptor
[457]1625        error = elf_load_process( file_xp , process );
[441]1626    if( error )
[1]1627        {
[441]1628                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1629        vfs_close( file_xp , file_id );
[623]1630        // FIXME restore old process VMM [AG]
[408]1631        return -1;
[1]1632        }
1633
[438]1634#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1635cycle = (uint32_t)hal_get_cycles();
[626]1636if( local_cxy == 0x11 )
[625]1637printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n",
[583]1638__FUNCTION__, pid, thread->trdid, cycle );
[433]1639#endif
[1]1640
[457]1641    // update the existing main thread descriptor... and jump to user code
1642    error = thread_user_exec( (void *)process->vmm.entry_point,
1643                              args_nr,
1644                              args_pointers );
1645    if( error )
1646    {
[469]1647        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1648        vfs_close( file_xp , file_id );
1649        // FIXME restore old process VMM
[408]1650        return -1;
[457]1651    }
[1]1652
[492]1653    assert( false, "we should not execute this code");
[457]1654 
[409]1655        return 0;
1656
1657}  // end process_make_exec()
1658
[457]1659
[623]1660////////////////////////////////////////////////
1661void process_zero_create( process_t   * process,
1662                          boot_info_t * info )
[428]1663{
[580]1664    error_t error;
1665    pid_t   pid;
[428]1666
[438]1667#if DEBUG_PROCESS_ZERO_CREATE
[433]1668uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1669if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1670printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1671__FUNCTION__, local_cxy, cycle );
[433]1672#endif
[428]1673
[624]1674    // get pointer on VMM
1675    vmm_t * vmm = &process->vmm;
1676
[580]1677    // get PID from local cluster manager for this kernel process
1678    error = cluster_pid_alloc( process , &pid );
1679
1680    if( error || (LPID_FROM_PID( pid ) != 0) )
1681    {
1682        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1683        __FUNCTION__ , local_cxy, pid );
1684        hal_core_sleep();
1685    }
1686
[428]1687    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1688    // the kernel process_zero is its own parent_process,
1689    // reference_process, and owner_process, and cannot be killed...
1690    process->pid        = pid;
[433]1691    process->ref_xp     = XPTR( local_cxy , process );
[443]1692    process->owner_xp   = XPTR( local_cxy , process );
[580]1693    process->parent_xp  = XPTR( local_cxy , process );
[433]1694    process->term_state = 0;
[428]1695
[624]1696    // initilise VSL as empty
1697    vmm->vsegs_nr = 0;
1698        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
[623]1699
[624]1700    // initialise GPT as empty
1701    error = hal_gpt_create( &vmm->gpt );
1702    if( error ) 
1703    {
1704        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
1705        hal_core_sleep();
1706    }
1707
[625]1708    // initialize VSL and GPT locks
1709        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
[624]1710    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
1711   
1712    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
1713    error = hal_vmm_kernel_init( info );
1714    if( error ) 
1715    {
1716        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
1717        hal_core_sleep();
1718    }
1719
[564]1720    // reset th_tbl[] array and associated fields
[428]1721    uint32_t i;
[564]1722    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]1723        {
1724        process->th_tbl[i] = NULL;
1725    }
1726    process->th_nr  = 0;
[564]1727    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]1728
[564]1729
[428]1730    // reset children list as empty
1731    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1732    process->children_nr = 0;
[564]1733    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1734                           LOCK_PROCESS_CHILDREN );
[428]1735
[580]1736    // register kernel process in cluster manager local_list
1737    cluster_process_local_link( process );
1738   
[428]1739        hal_fence();
1740
[438]1741#if DEBUG_PROCESS_ZERO_CREATE
[433]1742cycle = (uint32_t)hal_get_cycles();
[438]1743if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1744printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]1745__FUNCTION__, local_cxy, cycle );
[433]1746#endif
[428]1747
[610]1748}  // end process_zero_create()
[428]1749
[564]1750////////////////////////////////
[485]1751void process_init_create( void )
[1]1752{
[428]1753    process_t      * process;       // local pointer on process descriptor
[409]1754    pid_t            pid;           // process_init identifier
1755    thread_t       * thread;        // local pointer on main thread
1756    pthread_attr_t   attr;          // main thread attributes
1757    lid_t            lid;           // selected core local index for main thread
[457]1758    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1759    uint32_t         file_id;       // file index in fd_array
[409]1760    error_t          error;
[1]1761
[438]1762#if DEBUG_PROCESS_INIT_CREATE
[610]1763thread_t * this = CURRENT_THREAD;
[433]1764uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1765if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1766printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1767__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1768#endif
[1]1769
[408]1770    // allocates memory for process descriptor from local cluster
1771        process = process_alloc(); 
[625]1772    if( process == NULL )
1773    {
1774        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
1775        hal_core_sleep();
1776    }
[101]1777
[610]1778    // set the CWD and VFS_ROOT fields in process descriptor
1779    process->cwd_xp      = process_zero.vfs_root_xp;
1780    process->vfs_root_xp = process_zero.vfs_root_xp;
1781
[409]1782    // get PID from local cluster
[416]1783    error = cluster_pid_alloc( process , &pid );
[625]1784    if( error ) 
1785    {
1786        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
1787        hal_core_sleep();
1788    }
1789    if( pid != 1 ) 
1790    {
1791        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
1792        hal_core_sleep();
1793    }
[408]1794
[409]1795    // initialize process descriptor / parent is local process_zero
[625]1796    error = process_reference_init( process,
1797                                    pid,
1798                                    XPTR( local_cxy , &process_zero ) ); 
1799    if( error )
1800    {
1801        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
1802        hal_core_sleep();
1803    }
[408]1804
[564]1805#if(DEBUG_PROCESS_INIT_CREATE & 1)
1806if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1807printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1808__FUNCTION__, this->process->pid, this->trdid );
[564]1809#endif
1810
[457]1811    // open the file identified by CONFIG_PROCESS_INIT_PATH
1812    file_xp = XPTR_NULL;
1813    file_id = -1;
[610]1814        error   = vfs_open( process->vfs_root_xp,
[457]1815                            CONFIG_PROCESS_INIT_PATH,
[610]1816                        XPTR( local_cxy , process ),
[457]1817                            O_RDONLY,
1818                            0,
1819                            &file_xp,
1820                            &file_id );
[625]1821    if( error )
1822    {
1823        printk("\n[PANIC] in %s : cannot open file <%s>\n",
1824         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1825        hal_core_sleep();
1826    }
[457]1827
[564]1828#if(DEBUG_PROCESS_INIT_CREATE & 1)
1829if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1830printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1831__FUNCTION__, this->process->pid, this->trdid );
[564]1832#endif
1833
[625]1834    // register "code" and "data" vsegs as well as entry-point
[409]1835    // in process VMM, using information contained in the elf file.
[457]1836        error = elf_load_process( file_xp , process );
[101]1837
[625]1838    if( error ) 
1839    {
1840        printk("\n[PANIC] in %s : cannot access file <%s>\n",
1841         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
1842        hal_core_sleep();
1843    }
[457]1844
[625]1845
[564]1846#if(DEBUG_PROCESS_INIT_CREATE & 1)
1847if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1848printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1849__FUNCTION__, this->process->pid, this->trdid );
[564]1850#endif
1851
[625]1852#if (DEBUG_PROCESS_INIT_CREATE & 1)
1853hal_vmm_display( process , true );
1854#endif
1855
[428]1856    // get extended pointers on process_zero children_root, children_lock
1857    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1858    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1859
[564]1860    // take lock protecting kernel process children list
1861    remote_queuelock_acquire( children_lock_xp );
1862
[428]1863    // register process INIT in parent local process_zero
1864        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1865        hal_atomic_add( &process_zero.children_nr , 1 );
1866
[564]1867    // release lock protecting kernel process children list
1868    remote_queuelock_release( children_lock_xp );
1869
1870#if(DEBUG_PROCESS_INIT_CREATE & 1)
1871if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1872printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1873__FUNCTION__, this->process->pid, this->trdid );
[564]1874#endif
1875
[409]1876    // select a core in local cluster to execute the main thread
1877    lid  = cluster_select_local_core();
1878
1879    // initialize pthread attributes for main thread
1880    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1881    attr.cxy        = local_cxy;
1882    attr.lid        = lid;
1883
1884    // create and initialize thread descriptor
1885        error = thread_user_create( pid,
1886                                (void *)process->vmm.entry_point,
1887                                NULL,
1888                                &attr,
1889                                &thread );
[1]1890
[625]1891    if( error )
1892    {
1893        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
1894        hal_core_sleep();
1895    }
1896    if( thread->trdid != 0 )
1897    {
1898        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
1899        hal_core_sleep();
1900    }
[428]1901
[564]1902#if(DEBUG_PROCESS_INIT_CREATE & 1)
1903if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1904printk("\n[%s] thread[%x,%x] created main thread\n",
1905__FUNCTION__, this->process->pid, this->trdid );
[564]1906#endif
1907
[409]1908    // activate thread
1909        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1910
[124]1911    hal_fence();
[1]1912
[438]1913#if DEBUG_PROCESS_INIT_CREATE
[433]1914cycle = (uint32_t)hal_get_cycles();
[438]1915if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1916printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1917__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1918#endif
[409]1919
[204]1920}  // end process_init_create()
1921
[428]1922/////////////////////////////////////////
1923void process_display( xptr_t process_xp )
1924{
1925    process_t   * process_ptr;
1926    cxy_t         process_cxy;
[443]1927
[428]1928    xptr_t        parent_xp;       // extended pointer on parent process
1929    process_t   * parent_ptr;
1930    cxy_t         parent_cxy;
1931
[443]1932    xptr_t        owner_xp;        // extended pointer on owner process
1933    process_t   * owner_ptr;
1934    cxy_t         owner_cxy;
1935
[428]1936    pid_t         pid;
1937    pid_t         ppid;
[580]1938    lpid_t        lpid;
[428]1939    uint32_t      state;
1940    uint32_t      th_nr;
1941
[443]1942    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1943    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1944    chdev_t     * txt_chdev_ptr;
1945    cxy_t         txt_chdev_cxy;
1946    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1947
1948    xptr_t        elf_file_xp;     // extended pointer on .elf file
1949    cxy_t         elf_file_cxy;
1950    vfs_file_t  * elf_file_ptr;
1951    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1952
1953    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1954    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1955
1956    // get cluster and local pointer on process
1957    process_ptr = GET_PTR( process_xp );
1958    process_cxy = GET_CXY( process_xp );
1959
[580]1960    // get process PID, LPID, and state
[564]1961    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]1962    lpid  = LPID_FROM_PID( pid );
[564]1963    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1964
[580]1965    // get process PPID
[564]1966    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]1967    parent_cxy = GET_CXY( parent_xp );
1968    parent_ptr = GET_PTR( parent_xp );
[564]1969    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]1970
1971    // get number of threads
[564]1972    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]1973
[443]1974    // get pointers on owner process descriptor
[564]1975    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]1976    owner_cxy = GET_CXY( owner_xp );
1977    owner_ptr = GET_PTR( owner_xp );
[428]1978
[580]1979    // get process TXT name and .elf name
1980    if( lpid )                                   // user process
1981    {
[443]1982
[580]1983        // get extended pointer on file descriptor associated to TXT_RX
1984        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]1985
[580]1986        assert( (txt_file_xp != XPTR_NULL) ,
[624]1987        "process must be attached to one TXT terminal" ); 
[443]1988
[580]1989        // get TXT_RX chdev pointers
1990        txt_chdev_xp  = chdev_from_file( txt_file_xp );
1991        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1992        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1993
1994        // get TXT_RX name and ownership
1995        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
1996                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]1997   
[580]1998        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
1999                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]2000
[580]2001        // get process .elf name
2002        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2003        elf_file_cxy  = GET_CXY( elf_file_xp );
2004        elf_file_ptr  = GET_PTR( elf_file_xp );
2005        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2006        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2007    }
2008    else                                         // kernel process_zero
2009    {
2010        // TXT name and .elf name are not registered in kernel process_zero
2011        strcpy( txt_name , "txt0_rx" );
2012        txt_owner_xp = process_xp; 
2013        strcpy( elf_name , "kernel.elf" );
2014    }
2015
[428]2016    // display process info
[443]2017    if( txt_owner_xp == process_xp )
[428]2018    {
[581]2019        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2020        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2021    }
2022    else
2023    {
[581]2024        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2025        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]2026    }
2027}  // end process_display()
2028
2029
2030////////////////////////////////////////////////////////////////////////////////////////
2031//     Terminals related functions
2032////////////////////////////////////////////////////////////////////////////////////////
2033
[581]2034//////////////////////////////////
[485]2035uint32_t process_txt_alloc( void )
[428]2036{
2037    uint32_t  index;       // TXT terminal index
2038    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2039    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2040    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2041    xptr_t    root_xp;     // extended pointer on owner field in chdev
2042
2043    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2044    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2045    {
2046        // get pointers on TXT_RX[index]
2047        chdev_xp  = chdev_dir.txt_rx[index];
2048        chdev_cxy = GET_CXY( chdev_xp );
2049        chdev_ptr = GET_PTR( chdev_xp );
2050
2051        // get extended pointer on root of attached process
2052        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2053
2054        // return free TXT index if found
2055        if( xlist_is_empty( root_xp ) ) return index; 
2056    }
2057
[492]2058    assert( false , "no free TXT terminal found" );
[428]2059
2060    return -1;
2061
2062} // end process_txt_alloc()
2063
2064/////////////////////////////////////////////
2065void process_txt_attach( process_t * process,
2066                         uint32_t    txt_id )
2067{
2068    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2069    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2070    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2071    xptr_t      root_xp;      // extended pointer on list root in chdev
2072    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2073
[564]2074// check process is in owner cluster
2075assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
2076"process descriptor not in owner cluster" );
[428]2077
[564]2078// check terminal index
2079assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2080"illegal TXT terminal index" );
[428]2081
2082    // get pointers on TXT_RX[txt_id] chdev
2083    chdev_xp  = chdev_dir.txt_rx[txt_id];
2084    chdev_cxy = GET_CXY( chdev_xp );
2085    chdev_ptr = GET_PTR( chdev_xp );
2086
2087    // get extended pointer on root & lock of attached process list
2088    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2089    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2090
[564]2091    // get lock protecting list of processes attached to TXT
2092    remote_busylock_acquire( lock_xp );
2093
[428]2094    // insert process in attached process list
2095    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
2096
[564]2097    // release lock protecting list of processes attached to TXT
2098    remote_busylock_release( lock_xp );
2099
[446]2100#if DEBUG_PROCESS_TXT
[610]2101thread_t * this = CURRENT_THREAD;
[457]2102uint32_t cycle = (uint32_t)hal_get_cycles();
[446]2103if( DEBUG_PROCESS_TXT < cycle )
[610]2104printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2105__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]2106#endif
[428]2107
2108} // end process_txt_attach()
2109
[436]2110/////////////////////////////////////////////
2111void process_txt_detach( xptr_t  process_xp )
[428]2112{
[436]2113    process_t * process_ptr;  // local pointer on process in owner cluster
2114    cxy_t       process_cxy;  // process owner cluster
2115    pid_t       process_pid;  // process identifier
2116    xptr_t      file_xp;      // extended pointer on stdin file
[428]2117    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2118    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2119    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2120    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2121
[436]2122    // get process cluster, local pointer, and PID
2123    process_cxy = GET_CXY( process_xp );
2124    process_ptr = GET_PTR( process_xp );
[564]2125    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2126
[564]2127// check process descriptor in owner cluster
2128assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2129"process descriptor not in owner cluster" );
[436]2130
2131    // release TXT ownership (does nothing if not TXT owner)
2132    process_txt_transfer_ownership( process_xp );
[428]2133
[625]2134    // get extended pointer on process stdin pseudo file
[564]2135    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]2136
2137    // get pointers on TXT_RX chdev
2138    chdev_xp  = chdev_from_file( file_xp );
[428]2139    chdev_cxy = GET_CXY( chdev_xp );
2140    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2141
[436]2142    // get extended pointer on lock protecting attached process list
[428]2143    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2144
[564]2145    // get lock protecting list of processes attached to TXT
2146    remote_busylock_acquire( lock_xp );
2147
[428]2148    // unlink process from attached process list
[436]2149    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2150
[564]2151    // release lock protecting list of processes attached to TXT
2152    remote_busylock_release( lock_xp );
2153
[446]2154#if DEBUG_PROCESS_TXT
[610]2155thread_t * this = CURRENT_THREAD;
[457]2156uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2157uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]2158if( DEBUG_PROCESS_TXT < cycle )
[625]2159printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
[610]2160__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]2161#endif
[428]2162
2163} // end process_txt_detach()
2164
2165///////////////////////////////////////////////////
2166void process_txt_set_ownership( xptr_t process_xp )
2167{
2168    process_t * process_ptr;
2169    cxy_t       process_cxy;
[436]2170    pid_t       process_pid;
[428]2171    xptr_t      file_xp;
2172    xptr_t      txt_xp;     
2173    chdev_t   * txt_ptr;
2174    cxy_t       txt_cxy;
2175
[436]2176    // get pointers on process in owner cluster
[428]2177    process_cxy = GET_CXY( process_xp );
[435]2178    process_ptr = GET_PTR( process_xp );
[564]2179    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2180
2181    // check owner cluster
[492]2182    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2183    "process descriptor not in owner cluster" );
[436]2184
[428]2185    // get extended pointer on stdin pseudo file
[564]2186    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2187
2188    // get pointers on TXT chdev
2189    txt_xp  = chdev_from_file( file_xp );
2190    txt_cxy = GET_CXY( txt_xp );
[435]2191    txt_ptr = GET_PTR( txt_xp );
[428]2192
2193    // set owner field in TXT chdev
[564]2194    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]2195
[446]2196#if DEBUG_PROCESS_TXT
[610]2197thread_t * this = CURRENT_THREAD;
[457]2198uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]2199uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]2200if( DEBUG_PROCESS_TXT < cycle )
[625]2201printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n",
[610]2202__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]2203#endif
2204
[428]2205}  // end process_txt_set ownership()
2206
[436]2207////////////////////////////////////////////////////////
2208void process_txt_transfer_ownership( xptr_t process_xp )
[428]2209{
[436]2210    process_t * process_ptr;     // local pointer on process releasing ownership
2211    cxy_t       process_cxy;     // process cluster
2212    pid_t       process_pid;     // process identifier
[428]2213    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2214    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2215    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2216    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2217    uint32_t    txt_id;          // TXT_RX channel
[428]2218    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2219    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2220    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2221    xptr_t      iter_xp;         // iterator for xlist
2222    xptr_t      current_xp;      // extended pointer on current process
[625]2223    bool_t      found;
[428]2224
[457]2225#if DEBUG_PROCESS_TXT
[610]2226thread_t * this  = CURRENT_THREAD;
2227uint32_t   cycle;
[457]2228#endif
2229
[625]2230    // get pointers on target process
[428]2231    process_cxy = GET_CXY( process_xp );
[435]2232    process_ptr = GET_PTR( process_xp );
[564]2233    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2234
[625]2235// check owner cluster
2236assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2237"process descriptor not in owner cluster" );
[436]2238
[428]2239    // get extended pointer on stdin pseudo file
[564]2240    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2241
2242    // get pointers on TXT chdev
2243    txt_xp  = chdev_from_file( file_xp );
2244    txt_cxy = GET_CXY( txt_xp );
[433]2245    txt_ptr = GET_PTR( txt_xp );
[428]2246
[625]2247    // get relevant infos from chdev descriptor
[564]2248    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[625]2249    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2250
[625]2251    // transfer ownership only if target process is the TXT owner
[436]2252    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2253    {
[436]2254        // get extended pointers on root and lock of attached processes list
2255        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2256        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2257
[625]2258        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2259        {
2260            // get lock
2261            remote_busylock_acquire( lock_xp );
[436]2262
2263            // scan attached process list to find KSH process
[625]2264            found = false;
2265            for( iter_xp = hal_remote_l64( root_xp ) ;
2266                 (iter_xp != root_xp) && (found == false) ;
2267                 iter_xp = hal_remote_l64( iter_xp ) )
[436]2268            {
[625]2269                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
[435]2270
[436]2271                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2272                {
2273                    // set owner field in TXT chdev
[564]2274                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2275
[446]2276#if DEBUG_PROCESS_TXT
[610]2277cycle = (uint32_t)hal_get_cycles();
[446]2278if( DEBUG_PROCESS_TXT < cycle )
[625]2279printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2280__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2281#endif
[625]2282                    found = true;
[436]2283                }
2284            }
[625]2285
[436]2286            // release lock
[564]2287            remote_busylock_release( lock_xp );
[436]2288
[625]2289// It must exist a KSH process for each user TXT channel
2290assert( (found == true), "KSH process not found for TXT%d", txt_id );
[436]2291
2292        }
[625]2293        else                                           // target process is KSH
[436]2294        {
[625]2295            // get lock
2296            remote_busylock_acquire( lock_xp );
2297
[436]2298            // scan attached process list to find another process
[625]2299            found = false;
2300            for( iter_xp = hal_remote_l64( root_xp ) ;
2301                 (iter_xp != root_xp) && (found == false) ;
2302                 iter_xp = hal_remote_l64( iter_xp ) )
[428]2303            {
[436]2304                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2305
2306                if( current_xp != process_xp )            // current is not KSH
2307                {
2308                    // set owner field in TXT chdev
[564]2309                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2310
[446]2311#if DEBUG_PROCESS_TXT
[610]2312cycle  = (uint32_t)hal_get_cycles();
[625]2313cxy_t       current_cxy = GET_CXY( current_xp );
2314process_t * current_ptr = GET_PTR( current_xp );
2315uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2316if( DEBUG_PROCESS_TXT < cycle )
[625]2317printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
[610]2318__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[436]2319#endif
[625]2320                    found = true;
[436]2321                }
[428]2322            }
[436]2323
2324            // release lock
[564]2325            remote_busylock_release( lock_xp );
[436]2326
2327            // no more owner for TXT if no other process found
[625]2328            if( found == false )
2329            {
2330                // set owner field in TXT chdev
2331                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2332
[446]2333#if DEBUG_PROCESS_TXT
[436]2334cycle = (uint32_t)hal_get_cycles();
[446]2335if( DEBUG_PROCESS_TXT < cycle )
[625]2336printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
[610]2337__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[436]2338#endif
[625]2339            }
[428]2340        }
[436]2341    }
2342    else
2343    {
[433]2344
[446]2345#if DEBUG_PROCESS_TXT
[436]2346cycle = (uint32_t)hal_get_cycles();
[446]2347if( DEBUG_PROCESS_TXT < cycle )
[625]2348printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
2349__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
[436]2350#endif
2351
[428]2352    }
[625]2353
[436]2354}  // end process_txt_transfer_ownership()
[428]2355
2356
[564]2357////////////////////////////////////////////////
2358bool_t process_txt_is_owner( xptr_t process_xp )
[457]2359{
2360    // get local pointer and cluster of process in owner cluster
2361    cxy_t       process_cxy = GET_CXY( process_xp );
2362    process_t * process_ptr = GET_PTR( process_xp );
2363
[564]2364// check calling thread execute in target process owner cluster
2365pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2366assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[624]2367"process descriptor not in owner cluster" );
[457]2368
2369    // get extended pointer on stdin pseudo file
[564]2370    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2371
2372    // get pointers on TXT chdev
2373    xptr_t    txt_xp  = chdev_from_file( file_xp );
2374    cxy_t     txt_cxy = GET_CXY( txt_xp );
2375    chdev_t * txt_ptr = GET_PTR( txt_xp );
2376
2377    // get extended pointer on TXT_RX owner process
[564]2378    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2379
2380    return (process_xp == owner_xp);
2381
2382}   // end process_txt_is_owner()
2383
[436]2384////////////////////////////////////////////////     
2385xptr_t process_txt_get_owner( uint32_t channel )
[435]2386{
2387    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2388    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2389    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2390
[564]2391    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2392
[457]2393}  // end process_txt_get_owner()
2394
[435]2395///////////////////////////////////////////
2396void process_txt_display( uint32_t txt_id )
2397{
2398    xptr_t      chdev_xp;
2399    cxy_t       chdev_cxy;
2400    chdev_t   * chdev_ptr;
2401    xptr_t      root_xp;
2402    xptr_t      lock_xp;
2403    xptr_t      current_xp;
2404    xptr_t      iter_xp;
[443]2405    cxy_t       txt0_cxy;
2406    chdev_t   * txt0_ptr;
2407    xptr_t      txt0_xp;
2408    xptr_t      txt0_lock_xp;
2409   
[435]2410    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2411    "illegal TXT terminal index" );
[435]2412
[443]2413    // get pointers on TXT0 chdev
2414    txt0_xp  = chdev_dir.txt_tx[0];
2415    txt0_cxy = GET_CXY( txt0_xp );
2416    txt0_ptr = GET_PTR( txt0_xp );
2417
2418    // get extended pointer on TXT0 lock
2419    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2420
[435]2421    // get pointers on TXT_RX[txt_id] chdev
2422    chdev_xp  = chdev_dir.txt_rx[txt_id];
2423    chdev_cxy = GET_CXY( chdev_xp );
2424    chdev_ptr = GET_PTR( chdev_xp );
2425
2426    // get extended pointer on root & lock of attached process list
2427    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2428    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2429
[443]2430    // get lock on attached process list
[564]2431    remote_busylock_acquire( lock_xp );
[443]2432
2433    // get TXT0 lock in busy waiting mode
[564]2434    remote_busylock_acquire( txt0_lock_xp );
[443]2435
[435]2436    // display header
[443]2437    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2438    txt_id , (uint32_t)hal_get_cycles() );
[435]2439
[436]2440    // scan attached process list
[435]2441    XLIST_FOREACH( root_xp , iter_xp )
2442    {
2443        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2444        process_display( current_xp );
2445    }
2446
[443]2447    // release TXT0 lock in busy waiting mode
[564]2448    remote_busylock_release( txt0_lock_xp );
[443]2449
2450    // release lock on attached process list
[564]2451    remote_busylock_release( lock_xp );
[435]2452
2453}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.