source: trunk/kernel/kern/process.c @ 610

Last change on this file since 610 was 610, checked in by alain, 6 years ago

Fix several bugs in VFS to support the following
ksh commandis : cp, mv, rm, mkdir, cd, pwd

File size: 77.3 KB
RevLine 
[1]1/*
[564]2 * process.c - process related functions definition.
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[433]6 *          Alain Greiner (2016,2017,2018)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[1]31#include <errno.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <bits.h>
35#include <kmem.h>
36#include <page.h>
37#include <vmm.h>
38#include <vfs.h>
39#include <core.h>
40#include <thread.h>
[428]41#include <chdev.h>
[1]42#include <list.h>
[407]43#include <string.h>
[1]44#include <scheduler.h>
[564]45#include <busylock.h>
46#include <queuelock.h>
47#include <remote_queuelock.h>
48#include <rwlock.h>
49#include <remote_rwlock.h>
[1]50#include <dqdt.h>
51#include <cluster.h>
52#include <ppm.h>
53#include <boot_info.h>
54#include <process.h>
55#include <elf.h>
[23]56#include <syscalls.h>
[435]57#include <shared_syscalls.h>
[1]58
59//////////////////////////////////////////////////////////////////////////////////////////
60// Extern global variables
61//////////////////////////////////////////////////////////////////////////////////////////
62
[428]63extern process_t           process_zero;     // allocated in kernel_init.c
64extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
[1]65
66//////////////////////////////////////////////////////////////////////////////////////////
67// Process initialisation related functions
68//////////////////////////////////////////////////////////////////////////////////////////
69
[583]70/////////////////////////////////
[503]71process_t * process_alloc( void )
[1]72{
73        kmem_req_t   req;
74
75    req.type  = KMEM_PROCESS;
76        req.size  = sizeof(process_t);
77        req.flags = AF_KERNEL;
78
79    return (process_t *)kmem_alloc( &req );
80}
81
82////////////////////////////////////////
83void process_free( process_t * process )
84{
85    kmem_req_t  req;
86
87        req.type = KMEM_PROCESS;
88        req.ptr  = process;
89        kmem_free( &req );
90}
91
[101]92/////////////////////////////////////////////////
93void process_reference_init( process_t * process,
94                             pid_t       pid,
[457]95                             xptr_t      parent_xp )
[1]96{
[610]97    xptr_t      process_xp;
[428]98    cxy_t       parent_cxy;
99    process_t * parent_ptr;
[407]100    xptr_t      stdin_xp;
101    xptr_t      stdout_xp;
102    xptr_t      stderr_xp;
103    uint32_t    stdin_id;
104    uint32_t    stdout_id;
105    uint32_t    stderr_id;
[415]106    error_t     error;
[428]107    uint32_t    txt_id;
108    char        rx_path[40];
109    char        tx_path[40];
[440]110    xptr_t      file_xp;
[428]111    xptr_t      chdev_xp;
112    chdev_t *   chdev_ptr;
113    cxy_t       chdev_cxy;
114    pid_t       parent_pid;
[1]115
[610]116    // build extended pointer on this reference process
117    process_xp = XPTR( local_cxy , process );
118
[428]119    // get parent process cluster and local pointer
120    parent_cxy = GET_CXY( parent_xp );
[435]121    parent_ptr = GET_PTR( parent_xp );
[204]122
[457]123    // get parent_pid
[564]124    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]125
[438]126#if DEBUG_PROCESS_REFERENCE_INIT
[610]127thread_t * this = CURRENT_THREAD;
[433]128uint32_t cycle = (uint32_t)hal_get_cycles();
[610]129if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
130printk("\n[%s] thread[%x,%x] enter to initalialize process %x / cycle %d\n",
131__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]132#endif
[428]133
[610]134    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
[433]135        process->pid        = pid;
136    process->ref_xp     = XPTR( local_cxy , process );
[443]137    process->owner_xp   = XPTR( local_cxy , process );
[433]138    process->parent_xp  = parent_xp;
139    process->term_state = 0;
[428]140
[610]141    // initialize VFS root inode and CWD inode
142    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
143    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
144
[409]145    // initialize vmm as empty
[415]146    error = vmm_init( process );
[564]147
148assert( (error == 0) , "cannot initialize VMM\n" );
[415]149 
[438]150#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]151cycle = (uint32_t)hal_get_cycles();
[610]152if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
153printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n", 
154__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]155#endif
[1]156
[409]157    // initialize fd_array as empty
[408]158    process_fd_init( process );
[1]159
[428]160    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
[581]161    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
[408]162    {
[581]163        // select a TXT channel
164        if( pid == 1 )  txt_id = 0;                     // INIT
165        else            txt_id = process_txt_alloc();   // KSH
[428]166
[457]167        // attach process to TXT
[428]168        process_txt_attach( process , txt_id ); 
169
[457]170#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
171cycle = (uint32_t)hal_get_cycles();
[610]172if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
173printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
174__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
[457]175#endif
[428]176        // build path to TXT_RX[i] and TXT_TX[i] chdevs
177        snprintf( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
178        snprintf( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
179
180        // create stdin pseudo file         
[610]181        error = vfs_open(  process->vfs_root_xp,
[428]182                           rx_path,
[610]183                           process_xp,
[408]184                           O_RDONLY, 
185                           0,                // FIXME chmod
186                           &stdin_xp, 
187                           &stdin_id );
[1]188
[564]189assert( (error == 0) , "cannot open stdin pseudo file" );
190assert( (stdin_id == 0) , "stdin index must be 0" );
[428]191
[440]192#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
193cycle = (uint32_t)hal_get_cycles();
[610]194if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
195printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
196__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]197#endif
198
[428]199        // create stdout pseudo file         
[610]200        error = vfs_open(  process->vfs_root_xp,
[428]201                           tx_path,
[610]202                           process_xp,
[408]203                           O_WRONLY, 
204                           0,                // FIXME chmod
205                           &stdout_xp, 
206                           &stdout_id );
[1]207
[492]208        assert( (error == 0) , "cannot open stdout pseudo file" );
209        assert( (stdout_id == 1) , "stdout index must be 1" );
[428]210
[440]211#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
212cycle = (uint32_t)hal_get_cycles();
[610]213if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
214printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
215__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]216#endif
217
[428]218        // create stderr pseudo file         
[610]219        error = vfs_open(  process->vfs_root_xp,
[428]220                           tx_path,
[610]221                           process_xp,
[408]222                           O_WRONLY, 
223                           0,                // FIXME chmod
224                           &stderr_xp, 
225                           &stderr_id );
[428]226
[492]227        assert( (error == 0) , "cannot open stderr pseudo file" );
228        assert( (stderr_id == 2) , "stderr index must be 2" );
[428]229
[440]230#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
231cycle = (uint32_t)hal_get_cycles();
[610]232if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
233printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
234__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[440]235#endif
236
[408]237    }
[428]238    else                                            // normal user process
[408]239    {
[457]240        // get extended pointer on stdin pseudo file in parent process
[564]241        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
[440]242
[457]243        // get extended pointer on parent process TXT chdev
[440]244        chdev_xp = chdev_from_file( file_xp );
[428]245 
246        // get cluster and local pointer on chdev
247        chdev_cxy = GET_CXY( chdev_xp );
[435]248        chdev_ptr = GET_PTR( chdev_xp );
[428]249 
[564]250        // get parent process TXT terminal index
251        txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[407]252
[564]253        // attach child process to parent process TXT terminal
[428]254        process_txt_attach( process , txt_id ); 
[407]255
[457]256        // copy all open files from parent process fd_array to this process
[428]257        process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
[457]258                                XPTR( parent_cxy , &parent_ptr->fd_array ) );
[408]259    }
[407]260
[610]261    // initialize lock protecting CWD changes
262    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
[408]263
[438]264#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]265cycle = (uint32_t)hal_get_cycles();
[610]266if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
267printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
268__FUNCTION__, parent_pid, this->trdid, pid , cycle );
[433]269#endif
[407]270
[408]271    // reset children list root
272    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
273    process->children_nr     = 0;
[564]274    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
[407]275
[408]276    // reset semaphore / mutex / barrier / condvar list roots
277    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
278    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
279    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
280    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
[564]281    remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
[407]282
[408]283    // register new process in the local cluster manager pref_tbl[]
284    lpid_t lpid = LPID_FROM_PID( pid );
285    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]286
[408]287    // register new process descriptor in local cluster manager local_list
288    cluster_process_local_link( process );
[407]289
[408]290    // register new process descriptor in local cluster manager copies_list
291    cluster_process_copies_link( process );
[172]292
[564]293    // initialize th_tbl[] array and associated threads
[1]294    uint32_t i;
[564]295
296    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]297        {
298        process->th_tbl[i] = NULL;
299    }
300    process->th_nr  = 0;
[564]301    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[1]302
[124]303        hal_fence();
[1]304
[438]305#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
[433]306cycle = (uint32_t)hal_get_cycles();
[610]307if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
308printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
309__FUNCTION__, parent_pid, this->trdid, pid, cycle );
[433]310#endif
[101]311
[428]312}  // process_reference_init()
[204]313
[1]314/////////////////////////////////////////////////////
315error_t process_copy_init( process_t * local_process,
316                           xptr_t      reference_process_xp )
317{
[415]318    error_t error;
319
[23]320    // get reference process cluster and local pointer
321    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
[435]322    process_t * ref_ptr = GET_PTR( reference_process_xp );
[1]323
[428]324    // initialize PID, REF_XP, PARENT_XP, and STATE
[564]325    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
326    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
[433]327    local_process->ref_xp     = reference_process_xp;
[443]328    local_process->owner_xp   = reference_process_xp;
[433]329    local_process->term_state = 0;
[407]330
[564]331#if DEBUG_PROCESS_COPY_INIT
[610]332thread_t * this = CURRENT_THREAD; 
[433]333uint32_t cycle = (uint32_t)hal_get_cycles();
[610]334if( DEBUG_PROCESS_COPY_INIT < cycle )
335printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
336__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]337#endif
[407]338
[564]339// check user process
340assert( (local_process->pid != 0), "PID cannot be 0" );
341
[172]342    // reset local process vmm
[415]343    error = vmm_init( local_process );
[492]344    assert( (error == 0) , "cannot initialize VMM\n");
[1]345
[172]346    // reset process file descriptors array
[23]347        process_fd_init( local_process );
[1]348
[610]349    // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields
[564]350    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
351    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
[610]352    local_process->cwd_xp      = XPTR_NULL;
[1]353
354    // reset children list root (not used in a process descriptor copy)
355    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]356    local_process->children_nr   = 0;
[564]357    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
358                           LOCK_PROCESS_CHILDREN );
[1]359
[428]360    // reset children_list (not used in a process descriptor copy)
361    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
[1]362
363    // reset semaphores list root (not used in a process descriptor copy)
364    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]365    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
366    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
367    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]368
[564]369    // initialize th_tbl[] array and associated fields
[1]370    uint32_t i;
[564]371    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[1]372        {
373        local_process->th_tbl[i] = NULL;
374    }
375    local_process->th_nr  = 0;
[564]376    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
[1]377
[564]378
[1]379    // register new process descriptor in local cluster manager local_list
380    cluster_process_local_link( local_process );
381
382    // register new process descriptor in owner cluster manager copies_list
383    cluster_process_copies_link( local_process );
384
[124]385        hal_fence();
[1]386
[438]387#if DEBUG_PROCESS_COPY_INIT
[433]388cycle = (uint32_t)hal_get_cycles();
[610]389if( DEBUG_PROCESS_COPY_INIT < cycle )
390printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
391__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
[433]392#endif
[279]393
[1]394    return 0;
395
[204]396} // end process_copy_init()
397
[1]398///////////////////////////////////////////
399void process_destroy( process_t * process )
400{
[428]401    xptr_t      parent_xp;
402    process_t * parent_ptr;
403    cxy_t       parent_cxy;
404    xptr_t      children_lock_xp;
[446]405    xptr_t      children_nr_xp;
[1]406
[437]407    pid_t       pid = process->pid;
408
[593]409// check no more threads
410assert( (process->th_nr == 0) , "process %x in cluster %x contains threads", pid , local_cxy );
[428]411
[438]412#if DEBUG_PROCESS_DESTROY
[610]413thread_t * this = CURRENT_THREAD;
[433]414uint32_t cycle = (uint32_t)hal_get_cycles();
[610]415if( DEBUG_PROCESS_DESTROY < cycle )
416printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
417__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]418#endif
[428]419
[436]420    // remove process from local_list in local cluster manager
421    cluster_process_local_unlink( process );
[1]422
[436]423    // remove process from copies_list in owner cluster manager
424    cluster_process_copies_unlink( process );
[23]425
[450]426    // remove process from children_list
427    // and release PID if owner cluster
[437]428    if( CXY_FROM_PID( pid ) == local_cxy )
[428]429    {
430        // get pointers on parent process
431        parent_xp  = process->parent_xp;
432        parent_cxy = GET_CXY( parent_xp );
433        parent_ptr = GET_PTR( parent_xp );
434
435        // get extended pointer on children_lock in parent process
436        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
[446]437        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
[428]438
439        // remove process from children_list
[564]440        remote_queuelock_acquire( children_lock_xp );
[428]441        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
[446]442            hal_remote_atomic_add( children_nr_xp , -1 );
[564]443        remote_queuelock_release( children_lock_xp );
[450]444
[564]445        // release the process PID to cluster manager
446        cluster_pid_release( pid );
[428]447    }
448
[564]449    // FIXME close all open files and synchronize dirty [AG]
[23]450
[428]451    // decrease refcount for bin file, root file and cwd file
[337]452        if( process->vfs_bin_xp  != XPTR_NULL ) vfs_file_count_down( process->vfs_bin_xp );
453        if( process->vfs_root_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_root_xp );
[610]454        if( process->cwd_xp      != XPTR_NULL ) vfs_file_count_down( process->cwd_xp );
[1]455
456    // Destroy VMM
457    vmm_destroy( process );
458
[416]459    // release memory allocated to process descriptor
460    process_free( process );
[1]461
[438]462#if DEBUG_PROCESS_DESTROY
[433]463cycle = (uint32_t)hal_get_cycles();
[610]464if( DEBUG_PROCESS_DESTROY < cycle )
465printk("\n[%s] thread[%x,%x] exit / process %x in cluster %x / cycle %d\n",
466__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]467#endif
[428]468
[407]469}  // end process_destroy()
470
[583]471///////////////////////////////////////////////////////////////////
[527]472const char * process_action_str( process_sigactions_t action_type )
[409]473{
[583]474    switch ( action_type )
475    {
476        case BLOCK_ALL_THREADS:   return "BLOCK";
477        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
478        case DELETE_ALL_THREADS:  return "DELETE";
479        default:                  return "undefined";
480    }
[409]481}
482
[435]483////////////////////////////////////////
484void process_sigaction( pid_t       pid,
[457]485                        uint32_t    type )
[409]486{
487    cxy_t              owner_cxy;         // owner cluster identifier
488    lpid_t             lpid;              // process index in owner cluster
489    cluster_t        * cluster;           // pointer on cluster manager
490    xptr_t             root_xp;           // extended pointer on root of copies
491    xptr_t             lock_xp;           // extended pointer on lock protecting copies
492    xptr_t             iter_xp;           // iterator on copies list
493    xptr_t             process_xp;        // extended pointer on process copy
494    cxy_t              process_cxy;       // process copy cluster identifier
[457]495    process_t        * process_ptr;       // local pointer on process copy
[436]496    reg_t              save_sr;           // for critical section
497    rpc_desc_t         rpc;               // shared RPC descriptor
[457]498    thread_t         * client;            // pointer on client thread
499    xptr_t             client_xp;         // extended pointer on client thread
500    process_t        * local;             // pointer on process copy in local cluster
501    uint32_t           remote_nr;         // number of remote process copies
[409]502
[457]503    client    = CURRENT_THREAD;
504    client_xp = XPTR( local_cxy , client );
505    local     = NULL;
506    remote_nr = 0;
[435]507
[583]508    // check calling thread can yield
509    thread_assert_can_yield( client , __FUNCTION__ );
[564]510
[438]511#if DEBUG_PROCESS_SIGACTION
[433]512uint32_t cycle = (uint32_t)hal_get_cycles();
[438]513if( DEBUG_PROCESS_SIGACTION < cycle )
[593]514printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
[583]515__FUNCTION__ , client->process->pid, client->trdid,
[457]516process_action_str( type ) , pid , cycle );
[433]517#endif
[409]518
[436]519    // get pointer on local cluster manager
[416]520    cluster = LOCAL_CLUSTER;
521
[409]522    // get owner cluster identifier and process lpid
[435]523    owner_cxy = CXY_FROM_PID( pid );
524    lpid      = LPID_FROM_PID( pid );
[409]525
[593]526    // get root of list of copies and lock from owner cluster
[436]527    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
528    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
[435]529
[583]530// check action type
531assert( ((type == DELETE_ALL_THREADS ) ||
532         (type == BLOCK_ALL_THREADS )  ||
533         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
[416]534             
[593]535    // This client thread send parallel RPCs to all remote clusters containing
[564]536    // target process copies, wait all responses, and then handles directly
537    // the threads in local cluster, when required.
[457]538    // The client thread allocates a - shared - RPC descriptor in the stack,
539    // because all parallel, non-blocking, server threads use the same input
540    // arguments, and use the shared RPC response field
[436]541
542    // mask IRQs
543    hal_disable_irq( &save_sr);
544
[457]545    // client thread blocks itself
546    thread_block( client_xp , THREAD_BLOCKED_RPC );
[436]547
[564]548    // take the lock protecting process copies
549    remote_queuelock_acquire( lock_xp );
[409]550
[436]551    // initialize shared RPC descriptor
[438]552    rpc.responses = 0;
553    rpc.blocking  = false;
554    rpc.index     = RPC_PROCESS_SIGACTION;
555    rpc.thread    = client;
556    rpc.lid       = client->core->lid;
[457]557    rpc.args[0]   = type;
[438]558    rpc.args[1]   = pid;
[436]559
[457]560    // scan list of process copies
561    // to send RPCs to remote copies
[409]562    XLIST_FOREACH( root_xp , iter_xp )
563    {
[457]564        // get extended pointers and cluster on process
[440]565        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
566        process_cxy = GET_CXY( process_xp );
[457]567        process_ptr = GET_PTR( process_xp );
[440]568
[593]569        if( process_cxy == local_cxy )    // process copy is local
[457]570        { 
571            local = process_ptr;
572        }
[593]573        else                              // process copy is remote
[457]574        {
575            // update number of remote process copies
576            remote_nr++;
577
578            // atomically increment responses counter
579            hal_atomic_add( (void *)&rpc.responses , 1 );
580
[438]581#if DEBUG_PROCESS_SIGACTION
582if( DEBUG_PROCESS_SIGACTION < cycle )
[593]583printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
[583]584__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
[433]585#endif
[457]586            // call RPC in target cluster
587            rpc_process_sigaction_client( process_cxy , &rpc );
588        }
589    }  // end list of copies
590
[409]591    // release the lock protecting process copies
[564]592    remote_queuelock_release( lock_xp );
[409]593
[436]594    // restore IRQs
595    hal_restore_irq( save_sr);
[409]596
[457]597    // - if there is remote process copies, the client thread deschedules,
598    //   (it will be unblocked by the last RPC server thread).
599    // - if there is no remote copies, the client thread unblock itself.
600    if( remote_nr )
601    {
602        sched_yield("blocked on rpc_process_sigaction");
603    } 
604    else
605    {
606        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
607    }
[409]608
[457]609    // handle the local process copy if required
610    if( local != NULL )
611    {
612
613#if DEBUG_PROCESS_SIGACTION
614if( DEBUG_PROCESS_SIGACTION < cycle )
[593]615printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
[583]616__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
[457]617#endif
618        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
[583]619        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
[457]620        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
621    }
622
[438]623#if DEBUG_PROCESS_SIGACTION
[433]624cycle = (uint32_t)hal_get_cycles();
[438]625if( DEBUG_PROCESS_SIGACTION < cycle )
[593]626printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
[583]627__FUNCTION__, client->process->pid, client->trdid,
[457]628process_action_str( type ), pid, cycle );
[433]629#endif
[416]630
[409]631}  // end process_sigaction()
632
[433]633/////////////////////////////////////////////////
[583]634void process_block_threads( process_t * process )
[1]635{
[409]636    thread_t          * target;         // pointer on target thread
[433]637    thread_t          * this;           // pointer on calling thread
[564]638    uint32_t            ltid;           // index in process th_tbl[]
[436]639    cxy_t               owner_cxy;      // target process owner cluster
[409]640    uint32_t            count;          // requests counter
[593]641    volatile uint32_t   ack_count;      // acknowledges counter
[1]642
[416]643    // get calling thread pointer
[433]644    this = CURRENT_THREAD;
[407]645
[438]646#if DEBUG_PROCESS_SIGACTION
[564]647pid_t pid = process->pid;
[433]648uint32_t cycle = (uint32_t)hal_get_cycles();
[438]649if( DEBUG_PROCESS_SIGACTION < cycle )
[593]650printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]651__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]652#endif
[409]653
[564]654// check target process is an user process
[583]655assert( (LPID_FROM_PID( process->pid ) != 0 ), "target process must be an user process" );
[564]656
[610]657    // get target process owner cluster
[564]658    owner_cxy = CXY_FROM_PID( process->pid );
659
[409]660    // get lock protecting process th_tbl[]
[564]661    rwlock_rd_acquire( &process->th_lock );
[1]662
[440]663    // loop on target process local threads
[409]664    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[593]665    // - if the calling thread and the target thread are not running on the same
666    //   core, we ask the target scheduler to acknowlege the blocking
667    //   to be sure that the target thread is not running.
668    // - if the calling thread and the target thread are running on the same core,
669    //   we don't need confirmation from scheduler.
670           
[436]671    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
[1]672    {
[409]673        target = process->th_tbl[ltid];
[1]674
[436]675        if( target != NULL )                                 // thread exist
[1]676        {
677            count++;
[409]678
[583]679            // set the global blocked bit in target thread descriptor.
680            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[436]681 
[583]682            if( this->core->lid != target->core->lid )
683            {
684                // increment responses counter
685                hal_atomic_add( (void*)&ack_count , 1 );
[409]686
[583]687                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
688                thread_set_req_ack( target , (uint32_t *)&ack_count );
[409]689
[583]690                // force scheduling on target thread
691                dev_pic_send_ipi( local_cxy , target->core->lid );
[409]692            }
[1]693        }
[172]694    }
695
[428]696    // release lock protecting process th_tbl[]
[564]697    rwlock_rd_release( &process->th_lock );
[416]698
[593]699    // wait other threads acknowledges  TODO this could be improved...
[409]700    while( 1 )
701    {
[610]702        // exit when all scheduler acknowledges received
[436]703        if ( ack_count == 0 ) break;
[409]704   
705        // wait 1000 cycles before retry
706        hal_fixed_delay( 1000 );
707    }
[1]708
[438]709#if DEBUG_PROCESS_SIGACTION
[433]710cycle = (uint32_t)hal_get_cycles();
[438]711if( DEBUG_PROCESS_SIGACTION < cycle )
[593]712printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
713__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]714#endif
[409]715
[428]716}  // end process_block_threads()
[409]717
[440]718/////////////////////////////////////////////////
719void process_delete_threads( process_t * process,
720                             xptr_t      client_xp )
[409]721{
[433]722    thread_t          * this;          // pointer on calling thread
[440]723    thread_t          * target;        // local pointer on target thread
724    xptr_t              target_xp;     // extended pointer on target thread
725    cxy_t               owner_cxy;     // owner process cluster
[409]726    uint32_t            ltid;          // index in process th_tbl
[440]727    uint32_t            count;         // threads counter
[409]728
[433]729    // get calling thread pointer
730    this = CURRENT_THREAD;
[409]731
[440]732    // get target process owner cluster
733    owner_cxy = CXY_FROM_PID( process->pid );
734
[438]735#if DEBUG_PROCESS_SIGACTION
[433]736uint32_t cycle = (uint32_t)hal_get_cycles();
[438]737if( DEBUG_PROCESS_SIGACTION < cycle )
[593]738printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
[583]739__FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle );
[433]740#endif
741
[564]742// check target process is an user process
[593]743assert( (LPID_FROM_PID( process->pid ) != 0), "process %x not an user process", process->pid );
[564]744
[409]745    // get lock protecting process th_tbl[]
[583]746    rwlock_wr_acquire( &process->th_lock );
[409]747
[440]748    // loop on target process local threads                       
[416]749    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]750    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
[1]751    {
[409]752        target = process->th_tbl[ltid];
[1]753
[440]754        if( target != NULL )    // valid thread 
[1]755        {
[416]756            count++;
[440]757            target_xp = XPTR( local_cxy , target );
[1]758
[564]759            // main thread and client thread should not be deleted
[440]760            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
761                (client_xp) != target_xp )                           // not client thread
762            {
763                // mark target thread for delete and block it
764                thread_delete( target_xp , process->pid , false );   // not forced
765            }
[409]766        }
767    }
[1]768
[428]769    // release lock protecting process th_tbl[]
[583]770    rwlock_wr_release( &process->th_lock );
[407]771
[438]772#if DEBUG_PROCESS_SIGACTION
[433]773cycle = (uint32_t)hal_get_cycles();
[438]774if( DEBUG_PROCESS_SIGACTION < cycle )
[593]775printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
776__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]777#endif
[407]778
[440]779}  // end process_delete_threads()
[409]780
[440]781///////////////////////////////////////////////////
782void process_unblock_threads( process_t * process )
[409]783{
[440]784    thread_t          * target;        // pointer on target thead
785    thread_t          * this;          // pointer on calling thread
[409]786    uint32_t            ltid;          // index in process th_tbl
[440]787    uint32_t            count;         // requests counter
[409]788
[440]789    // get calling thread pointer
790    this = CURRENT_THREAD;
791
[438]792#if DEBUG_PROCESS_SIGACTION
[564]793pid_t pid = process->pid;
[433]794uint32_t cycle = (uint32_t)hal_get_cycles();
[438]795if( DEBUG_PROCESS_SIGACTION < cycle )
[593]796printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]797__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
[433]798#endif
799
[564]800// check target process is an user process
801assert( ( process->pid != 0 ),
802"target process must be an user process" );
803
[416]804    // get lock protecting process th_tbl[]
[564]805    rwlock_rd_acquire( &process->th_lock );
[416]806
[440]807    // loop on process threads to unblock all threads
[416]808    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[440]809    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[409]810    {
[416]811        target = process->th_tbl[ltid];
[409]812
[440]813        if( target != NULL )             // thread found
[409]814        {
815            count++;
[440]816
817            // reset the global blocked bit in target thread descriptor.
818            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
[1]819        }
820    }
821
[428]822    // release lock protecting process th_tbl[]
[564]823    rwlock_rd_release( &process->th_lock );
[407]824
[438]825#if DEBUG_PROCESS_SIGACTION
[433]826cycle = (uint32_t)hal_get_cycles();
[438]827if( DEBUG_PROCESS_SIGACTION < cycle )
[593]828printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
[583]829__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[433]830#endif
[1]831
[440]832}  // end process_unblock_threads()
[407]833
[1]834///////////////////////////////////////////////
835process_t * process_get_local_copy( pid_t pid )
836{
837    error_t        error;
[172]838    process_t    * process_ptr;   // local pointer on process
[23]839    xptr_t         process_xp;    // extended pointer on process
[1]840
841    cluster_t * cluster = LOCAL_CLUSTER;
842
[564]843#if DEBUG_PROCESS_GET_LOCAL_COPY
844thread_t * this = CURRENT_THREAD;
845uint32_t cycle = (uint32_t)hal_get_cycles();
846if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]847printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
[583]848__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
[564]849#endif
850
[1]851    // get lock protecting local list of processes
[564]852    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]853
854    // scan the local list of process descriptors to find the process
[23]855    xptr_t  iter;
856    bool_t  found = false;
857    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]858    {
[23]859        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
[435]860        process_ptr = GET_PTR( process_xp );
[23]861        if( process_ptr->pid == pid )
[1]862        {
863            found = true;
864            break;
865        }
866    }
867
868    // release lock protecting local list of processes
[564]869    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]870
[172]871    // allocate memory for a new local process descriptor
[440]872    // and initialise it from reference cluster if not found
[1]873    if( !found )
874    {
875        // get extended pointer on reference process descriptor
[23]876        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]877
[492]878        assert( (ref_xp != XPTR_NULL) , "illegal pid\n" );
[23]879
[1]880        // allocate memory for local process descriptor
[23]881        process_ptr = process_alloc();
[443]882
[23]883        if( process_ptr == NULL )  return NULL;
[1]884
885        // initialize local process descriptor copy
[23]886        error = process_copy_init( process_ptr , ref_xp );
[443]887
[1]888        if( error ) return NULL;
889    }
890
[440]891#if DEBUG_PROCESS_GET_LOCAL_COPY
[564]892cycle = (uint32_t)hal_get_cycles();
[440]893if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
[593]894printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
[583]895__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
[440]896#endif
897
[23]898    return process_ptr;
[1]899
[409]900}  // end process_get_local_copy()
901
[436]902////////////////////////////////////////////
903pid_t process_get_ppid( xptr_t  process_xp )
904{
905    cxy_t       process_cxy;
906    process_t * process_ptr;
907    xptr_t      parent_xp;
908    cxy_t       parent_cxy;
909    process_t * parent_ptr;
910
911    // get process cluster and local pointer
912    process_cxy = GET_CXY( process_xp );
913    process_ptr = GET_PTR( process_xp );
914
915    // get pointers on parent process
[564]916    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[436]917    parent_cxy = GET_CXY( parent_xp );
918    parent_ptr = GET_PTR( parent_xp );
919
[564]920    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[436]921}
922
[1]923//////////////////////////////////////////////////////////////////////////////////////////
924// File descriptor array related functions
925//////////////////////////////////////////////////////////////////////////////////////////
926
927///////////////////////////////////////////
928void process_fd_init( process_t * process )
929{
930    uint32_t fd;
931
[610]932    // initialize lock
[564]933    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
[1]934
[610]935    // initialize number of open files
[23]936    process->fd_array.current = 0;
937
[1]938    // initialize array
[23]939    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]940    {
941        process->fd_array.array[fd] = XPTR_NULL;
942    }
943}
[610]944////////////////////////////////////////////////////
945error_t process_fd_register( xptr_t      process_xp,
[407]946                             xptr_t      file_xp,
947                             uint32_t  * fdid )
[1]948{
949    bool_t    found;
[23]950    uint32_t  id;
951    xptr_t    xp;
[1]952
[23]953    // get reference process cluster and local pointer
[610]954    process_t * process_ptr = GET_PTR( process_xp );
955    cxy_t       process_cxy = GET_CXY( process_xp );
[23]956
[610]957// check client process is reference process
958assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ),
959"client process must be reference process\n" );
960
961#if DEBUG_PROCESS_FD_REGISTER
962thread_t * this  = CURRENT_THREAD;
963uint32_t   cycle = (uint32_t)hal_get_cycles();
964pid_t      pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
965if( DEBUG_PROCESS_FD_REGISTER < cycle )
966printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
967__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
968#endif
969
970    // build extended pointer on lock protecting reference fd_array
971    xptr_t lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
972
[23]973    // take lock protecting reference fd_array
[610]974        remote_queuelock_acquire( lock_xp );
[23]975
[1]976    found   = false;
977
[23]978    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]979    {
[610]980        xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
[23]981        if ( xp == XPTR_NULL )
[1]982        {
[564]983            // update reference fd_array
[610]984            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
985                hal_remote_atomic_add( XPTR( process_cxy , &process_ptr->fd_array.current ) , 1 );
[564]986
987            // exit
988                        *fdid = id;
[1]989            found = true;
990            break;
991        }
992    }
993
[610]994    // release lock protecting fd_array
995        remote_queuelock_release( lock_xp );
[1]996
[610]997#if DEBUG_PROCESS_FD_REGISTER
998cycle = (uint32_t)hal_get_cycles();
999if( DEBUG_PROCESS_FD_REGISTER < cycle )
1000printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1001__FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );
1002#endif
1003
[428]1004    if ( !found ) return -1;
[1]1005    else          return 0;
1006
[610]1007}  // end process_fd_register()
1008
[172]1009////////////////////////////////////////////////
[23]1010xptr_t process_fd_get_xptr( process_t * process,
[407]1011                            uint32_t    fdid )
[1]1012{
[23]1013    xptr_t  file_xp;
[564]1014    xptr_t  lock_xp;
[1]1015
[23]1016    // access local copy of process descriptor
[407]1017    file_xp = process->fd_array.array[fdid];
[1]1018
[23]1019    if( file_xp == XPTR_NULL )
1020    {
1021        // get reference process cluster and local pointer
1022        xptr_t      ref_xp  = process->ref_xp;
1023        cxy_t       ref_cxy = GET_CXY( ref_xp );
[435]1024        process_t * ref_ptr = GET_PTR( ref_xp );
[1]1025
[564]1026        // build extended pointer on lock protecting reference fd_array
1027        lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock );
1028
1029        // take lock protecting reference fd_array
1030            remote_queuelock_acquire( lock_xp );
1031
[23]1032        // access reference process descriptor
[564]1033        file_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]1034
[23]1035        // update local fd_array if found
[564]1036        if( file_xp != XPTR_NULL )  process->fd_array.array[fdid] = file_xp;
1037       
1038        // release lock protecting reference fd_array
1039            remote_queuelock_release( lock_xp );
[23]1040    }
[1]1041
[23]1042    return file_xp;
[1]1043
[407]1044}  // end process_fd_get_xptr()
1045
[1]1046///////////////////////////////////////////
1047void process_fd_remote_copy( xptr_t dst_xp,
1048                             xptr_t src_xp )
1049{
1050    uint32_t fd;
1051    xptr_t   entry;
1052
1053    // get cluster and local pointer for src fd_array
1054    cxy_t        src_cxy = GET_CXY( src_xp );
[435]1055    fd_array_t * src_ptr = GET_PTR( src_xp );
[1]1056
1057    // get cluster and local pointer for dst fd_array
1058    cxy_t        dst_cxy = GET_CXY( dst_xp );
[435]1059    fd_array_t * dst_ptr = GET_PTR( dst_xp );
[1]1060
1061    // get the remote lock protecting the src fd_array
[564]1062        remote_queuelock_acquire( XPTR( src_cxy , &src_ptr->lock ) );
[1]1063
[428]1064    // loop on all fd_array entries
1065    for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]1066        {
[564]1067                entry = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->array[fd] ) );
[1]1068
1069                if( entry != XPTR_NULL )
1070                {
[459]1071            // increment file descriptor refcount
[1]1072            vfs_file_count_up( entry );
1073
1074                        // copy entry in destination process fd_array
[564]1075                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
[1]1076                }
1077        }
1078
1079    // release lock on source process fd_array
[564]1080        remote_queuelock_release( XPTR( src_cxy , &src_ptr->lock ) );
[1]1081
[407]1082}  // end process_fd_remote_copy()
1083
[564]1084
1085////////////////////////////////////
1086bool_t process_fd_array_full( void )
1087{
1088    // get extended pointer on reference process
1089    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
1090
1091    // get reference process cluster and local pointer
1092    process_t * ref_ptr = GET_PTR( ref_xp );
1093    cxy_t       ref_cxy = GET_CXY( ref_xp );
1094
1095    // get number of open file descriptors from reference fd_array
1096    uint32_t current = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
1097
1098        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
1099}
1100
1101
[1]1102////////////////////////////////////////////////////////////////////////////////////
1103//  Thread related functions
1104////////////////////////////////////////////////////////////////////////////////////
1105
1106/////////////////////////////////////////////////////
1107error_t process_register_thread( process_t * process,
1108                                 thread_t  * thread,
1109                                 trdid_t   * trdid )
1110{
[472]1111    ltid_t         ltid;
1112    bool_t         found = false;
1113 
[564]1114// check arguments
1115assert( (process != NULL) , "process argument is NULL" );
1116assert( (thread != NULL) , "thread argument is NULL" );
[1]1117
[564]1118    // get the lock protecting th_tbl for all threads
1119    // but the idle thread executing kernel_init (cannot yield)
1120    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
[1]1121
[583]1122    // scan th_tbl
[564]1123    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
[1]1124    {
1125        if( process->th_tbl[ltid] == NULL )
1126        {
1127            found = true;
1128            break;
1129        }
1130    }
1131
1132    if( found )
1133    {
1134        // register thread in th_tbl[]
1135        process->th_tbl[ltid] = thread;
1136        process->th_nr++;
1137
1138        // returns trdid
1139        *trdid = TRDID( local_cxy , ltid );
1140    }
1141
[583]1142    // release the lock protecting th_tbl
[564]1143    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
[428]1144
[564]1145    return (found) ? 0 : 0xFFFFFFFF;
[204]1146
1147}  // end process_register_thread()
1148
[443]1149/////////////////////////////////////////////////
1150bool_t process_remove_thread( thread_t * thread )
[1]1151{
[443]1152    uint32_t count;  // number of threads in local process descriptor
1153
[1]1154    process_t * process = thread->process;
1155
1156    // get thread local index
1157    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
[564]1158   
1159    // get the lock protecting th_tbl[]
1160    rwlock_wr_acquire( &process->th_lock );
[428]1161
[583]1162    // get number of threads
[443]1163    count = process->th_nr;
[428]1164
[583]1165// check thread
1166assert( (thread != NULL) , "thread argument is NULL" );
1167
[564]1168// check th_nr value
[583]1169assert( (count > 0) , "process th_nr cannot be 0\n" );
[443]1170
[1]1171    // remove thread from th_tbl[]
1172    process->th_tbl[ltid] = NULL;
[450]1173    process->th_nr = count-1;
[1]1174
[583]1175    // release lock protecting th_tbl
[564]1176    rwlock_wr_release( &process->th_lock );
[428]1177
[443]1178    return (count == 1);
1179
[450]1180}  // end process_remove_thread()
[204]1181
[408]1182/////////////////////////////////////////////////////////
1183error_t process_make_fork( xptr_t      parent_process_xp,
1184                           xptr_t      parent_thread_xp,
1185                           pid_t     * child_pid,
1186                           thread_t ** child_thread )
[1]1187{
[408]1188    process_t * process;         // local pointer on child process descriptor
1189    thread_t  * thread;          // local pointer on child thread descriptor
1190    pid_t       new_pid;         // process identifier for child process
1191    pid_t       parent_pid;      // process identifier for parent process
1192    xptr_t      ref_xp;          // extended pointer on reference process
[428]1193    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
[408]1194    error_t     error;
[1]1195
[408]1196    // get cluster and local pointer for parent process
1197    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
[435]1198    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
[101]1199
[428]1200    // get parent process PID and extended pointer on .elf file
[564]1201    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1202    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
[428]1203
[564]1204    // get extended pointer on reference process
1205    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
[438]1206
[564]1207// check parent process is the reference process
1208assert( (parent_process_xp == ref_xp ) ,
1209"parent process must be the reference process\n" );
[407]1210
[438]1211#if DEBUG_PROCESS_MAKE_FORK
[583]1212uint32_t cycle   = (uint32_t)hal_get_cycles();
1213thread_t * this  = CURRENT_THREAD;
1214trdid_t    trdid = this->trdid;
1215pid_t      pid   = this->process->pid;
[438]1216if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1217printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
[583]1218__FUNCTION__, pid, trdid, local_cxy, cycle );
[433]1219#endif
[172]1220
[408]1221    // allocate a process descriptor
1222    process = process_alloc();
1223    if( process == NULL )
1224    {
1225        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
1226        __FUNCTION__, local_cxy ); 
1227        return -1;
1228    }
[1]1229
[408]1230    // allocate a child PID from local cluster
[416]1231    error = cluster_pid_alloc( process , &new_pid );
[428]1232    if( error ) 
[1]1233    {
[408]1234        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
1235        __FUNCTION__, local_cxy ); 
1236        process_free( process );
1237        return -1;
[1]1238    }
[408]1239
[469]1240#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[457]1241cycle = (uint32_t)hal_get_cycles();
1242if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1243printk("\n[%s] thread[%x,%x] allocated process %x / cycle %d\n",
[583]1244__FUNCTION__, pid, trdid, new_pid, cycle );
[457]1245#endif
1246
[408]1247    // initializes child process descriptor from parent process descriptor
1248    process_reference_init( process,
1249                            new_pid,
1250                            parent_process_xp );
1251
[438]1252#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1253cycle = (uint32_t)hal_get_cycles();
[438]1254if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1255printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",
[583]1256__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1257#endif
[408]1258
[457]1259
[408]1260    // copy VMM from parent descriptor to child descriptor
1261    error = vmm_fork_copy( process,
1262                           parent_process_xp );
1263    if( error )
[101]1264    {
[408]1265        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
1266        __FUNCTION__, local_cxy ); 
1267        process_free( process );
1268        cluster_pid_release( new_pid );
1269        return -1;
[101]1270    }
[172]1271
[438]1272#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1273cycle = (uint32_t)hal_get_cycles();
[438]1274if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1275printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",
[583]1276__FUNCTION__, pid, trdid, cycle );
[433]1277#endif
[407]1278
[564]1279    // if parent_process is INIT, or if parent_process is the TXT owner,
1280    // the child_process becomes the owner of its TXT terminal
1281    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
[457]1282    {
1283        process_txt_set_ownership( XPTR( local_cxy , process ) );
1284
1285#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1286cycle = (uint32_t)hal_get_cycles();
1287if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1288printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",
[583]1289__FUNCTION__ , pid, trdid, cycle );
[457]1290#endif
1291
1292    }
1293
[428]1294    // update extended pointer on .elf file
1295    process->vfs_bin_xp = vfs_bin_xp;
1296
[408]1297    // create child thread descriptor from parent thread descriptor
1298    error = thread_user_fork( parent_thread_xp,
1299                              process,
1300                              &thread );
1301    if( error )
1302    {
1303        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
1304        __FUNCTION__, local_cxy ); 
1305        process_free( process );
1306        cluster_pid_release( new_pid );
1307        return -1;
1308    }
[172]1309
[564]1310// check main thread LTID
1311assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
1312"main thread must have LTID == 0\n" );
[428]1313
[564]1314#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1315cycle = (uint32_t)hal_get_cycles();
[438]1316if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1317printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n", 
[583]1318__FUNCTION__, pid, trdid, thread, cycle );
[433]1319#endif
[1]1320
[433]1321    // set Copy_On_Write flag in parent process GPT
[408]1322    // this includes all replicated GPT copies
1323    if( parent_process_cxy == local_cxy )   // reference is local
1324    {
1325        vmm_set_cow( parent_process_ptr );
1326    }
1327    else                                    // reference is remote
1328    {
1329        rpc_vmm_set_cow_client( parent_process_cxy,
1330                                parent_process_ptr );
1331    }
[1]1332
[433]1333    // set Copy_On_Write flag in child process GPT
1334    vmm_set_cow( process );
1335 
[438]1336#if( DEBUG_PROCESS_MAKE_FORK & 1 )
[433]1337cycle = (uint32_t)hal_get_cycles();
[438]1338if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1339printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",
[583]1340__FUNCTION__, pid, trdid, cycle );
[433]1341#endif
[101]1342
[428]1343    // get extended pointers on parent children_root, children_lock and children_nr
1344    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1345    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1346    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
[101]1347
[428]1348    // register process in parent children list
[564]1349    remote_queuelock_acquire( children_lock_xp );
[428]1350        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1351        hal_remote_atomic_add( children_nr_xp , 1 );
[564]1352    remote_queuelock_release( children_lock_xp );
[204]1353
[408]1354    // return success
1355    *child_thread = thread;
1356    *child_pid    = new_pid;
[1]1357
[438]1358#if DEBUG_PROCESS_MAKE_FORK
[433]1359cycle = (uint32_t)hal_get_cycles();
[438]1360if( DEBUG_PROCESS_MAKE_FORK < cycle )
[593]1361printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
[583]1362__FUNCTION__, pid, trdid, new_pid, cycle );
[433]1363#endif
[428]1364
[408]1365    return 0;
1366
[416]1367}   // end process_make_fork()
[408]1368
1369/////////////////////////////////////////////////////
1370error_t process_make_exec( exec_info_t  * exec_info )
1371{
[457]1372    thread_t       * thread;                  // local pointer on this thread
1373    process_t      * process;                 // local pointer on this process
1374    pid_t            pid;                     // this process identifier
[610]1375    xptr_t           ref_xp;                  // reference process for this process
[441]1376        error_t          error;                   // value returned by called functions
[457]1377    char           * path;                    // path to .elf file
1378    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1379    uint32_t         file_id;                 // file index in fd_array
1380    uint32_t         args_nr;                 // number of main thread arguments
1381    char          ** args_pointers;           // array of pointers on main thread arguments
[446]1382
[610]1383    // get thread, process, pid and ref_xp
[457]1384    thread  = CURRENT_THREAD;
1385    process = thread->process;
1386    pid     = process->pid;
[610]1387    ref_xp  = process->ref_xp;
[408]1388
[457]1389        // get relevant infos from exec_info
1390        path          = exec_info->path;
1391    args_nr       = exec_info->args_nr;
1392    args_pointers = exec_info->args_pointers;
[408]1393
[438]1394#if DEBUG_PROCESS_MAKE_EXEC
[433]1395uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1396if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1397printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n",
[583]1398__FUNCTION__, pid, thread->trdid, path, cycle );
[433]1399#endif
[408]1400
[457]1401    // open the file identified by <path>
1402    file_xp = XPTR_NULL;
[564]1403    file_id = 0xFFFFFFFF;
[610]1404        error   = vfs_open( process->vfs_root_xp,
[457]1405                            path,
[610]1406                        ref_xp,
[457]1407                            O_RDONLY,
1408                            0,
1409                            &file_xp,
1410                            &file_id );
1411        if( error )
1412        {
1413                printk("\n[ERROR] in %s : failed to open file <%s>\n", __FUNCTION__ , path );
1414                return -1;
1415        }
1416
[446]1417#if (DEBUG_PROCESS_MAKE_EXEC & 1)
[469]1418cycle = (uint32_t)hal_get_cycles();
[446]1419if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1420printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n",
[583]1421__FUNCTION__, pid, thread->trdid, path, cycle );
[446]1422#endif
1423
[457]1424    // delete all threads other than this main thread in all clusters
1425    process_sigaction( pid , DELETE_ALL_THREADS );
[446]1426
[469]1427#if (DEBUG_PROCESS_MAKE_EXEC & 1)
1428cycle = (uint32_t)hal_get_cycles();
1429if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1430printk("\n[%s] thread[%x,%x] deleted all threads / cycle %d\n",
[583]1431__FUNCTION__, pid, thread->trdid, cycle );
[469]1432#endif
1433
[457]1434    // reset local process VMM
1435    vmm_destroy( process );
[446]1436
[457]1437#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
1438cycle = (uint32_t)hal_get_cycles();
1439if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1440printk("\n[%s] thread[%x,%x] reset VMM / cycle %d\n",
[583]1441__FUNCTION__, pid, thread->trdid, cycle );
[457]1442#endif
[408]1443
[457]1444    // re-initialize the VMM (kentry/args/envs vsegs registration)
1445    error = vmm_init( process );
1446    if( error )
[416]1447    {
[457]1448        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
1449        vfs_close( file_xp , file_id );
1450        // FIXME restore old process VMM
[416]1451        return -1;
1452    }
[457]1453   
[438]1454#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1455cycle = (uint32_t)hal_get_cycles();
[438]1456if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1457printk("\n[%s] thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n",
[583]1458__FUNCTION__, pid, thread->trdid, cycle );
[433]1459#endif
[428]1460
[457]1461    // register code & data vsegs as well as entry-point in process VMM,
[428]1462    // and register extended pointer on .elf file in process descriptor
[457]1463        error = elf_load_process( file_xp , process );
[441]1464    if( error )
[1]1465        {
[441]1466                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
[457]1467        vfs_close( file_xp , file_id );
1468        // FIXME restore old process VMM
[408]1469        return -1;
[1]1470        }
1471
[438]1472#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
[433]1473cycle = (uint32_t)hal_get_cycles();
[438]1474if( DEBUG_PROCESS_MAKE_EXEC < cycle )
[593]1475printk("\n[%s] thread[%x,%x] / code/data vsegs registered / cycle %d\n",
[583]1476__FUNCTION__, pid, thread->trdid, cycle );
[433]1477#endif
[1]1478
[457]1479    // update the existing main thread descriptor... and jump to user code
1480    error = thread_user_exec( (void *)process->vmm.entry_point,
1481                              args_nr,
1482                              args_pointers );
1483    if( error )
1484    {
[469]1485        printk("\n[ERROR] in %s : cannot update main thread for %s\n", __FUNCTION__ , path );
[457]1486        vfs_close( file_xp , file_id );
1487        // FIXME restore old process VMM
[408]1488        return -1;
[457]1489    }
[1]1490
[492]1491    assert( false, "we should not execute this code");
[457]1492 
[409]1493        return 0;
1494
1495}  // end process_make_exec()
1496
[457]1497
[428]1498///////////////////////////////////////////////
1499void process_zero_create( process_t * process )
1500{
[580]1501    error_t error;
1502    pid_t   pid;
[428]1503
[438]1504#if DEBUG_PROCESS_ZERO_CREATE
[433]1505uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1506if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1507printk("\n[%s] enter / cluster %x / cycle %d\n",
[564]1508__FUNCTION__, local_cxy, cycle );
[433]1509#endif
[428]1510
[580]1511    // get PID from local cluster manager for this kernel process
1512    error = cluster_pid_alloc( process , &pid );
1513
1514    if( error || (LPID_FROM_PID( pid ) != 0) )
1515    {
1516        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
1517        __FUNCTION__ , local_cxy, pid );
1518        hal_core_sleep();
1519    }
1520
[428]1521    // initialize PID, REF_XP, PARENT_XP, and STATE
[580]1522    // the kernel process_zero is its own parent_process,
1523    // reference_process, and owner_process, and cannot be killed...
1524    process->pid        = pid;
[433]1525    process->ref_xp     = XPTR( local_cxy , process );
[443]1526    process->owner_xp   = XPTR( local_cxy , process );
[580]1527    process->parent_xp  = XPTR( local_cxy , process );
[433]1528    process->term_state = 0;
[428]1529
[564]1530    // reset th_tbl[] array and associated fields
[428]1531    uint32_t i;
[564]1532    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
[428]1533        {
1534        process->th_tbl[i] = NULL;
1535    }
1536    process->th_nr  = 0;
[564]1537    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
[428]1538
[564]1539
[428]1540    // reset children list as empty
1541    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
1542    process->children_nr = 0;
[564]1543    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
1544                           LOCK_PROCESS_CHILDREN );
[428]1545
[580]1546    // register kernel process in cluster manager local_list
1547    cluster_process_local_link( process );
1548   
[428]1549        hal_fence();
1550
[438]1551#if DEBUG_PROCESS_ZERO_CREATE
[433]1552cycle = (uint32_t)hal_get_cycles();
[438]1553if( DEBUG_PROCESS_ZERO_CREATE < cycle )
[593]1554printk("\n[%s] exit / cluster %x / cycle %d\n",
[564]1555__FUNCTION__, local_cxy, cycle );
[433]1556#endif
[428]1557
[610]1558}  // end process_zero_create()
[428]1559
[564]1560////////////////////////////////
[485]1561void process_init_create( void )
[1]1562{
[428]1563    process_t      * process;       // local pointer on process descriptor
[409]1564    pid_t            pid;           // process_init identifier
1565    thread_t       * thread;        // local pointer on main thread
1566    pthread_attr_t   attr;          // main thread attributes
1567    lid_t            lid;           // selected core local index for main thread
[457]1568    xptr_t           file_xp;       // extended pointer on .elf file descriptor
1569    uint32_t         file_id;       // file index in fd_array
[409]1570    error_t          error;
[1]1571
[438]1572#if DEBUG_PROCESS_INIT_CREATE
[610]1573thread_t * this = CURRENT_THREAD;
[433]1574uint32_t cycle = (uint32_t)hal_get_cycles();
[438]1575if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1576printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1577__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1578#endif
[1]1579
[408]1580    // allocates memory for process descriptor from local cluster
1581        process = process_alloc(); 
[457]1582       
[564]1583// check memory allocator
1584assert( (process != NULL),
1585"no memory for process descriptor in cluster %x\n", local_cxy  );
[101]1586
[610]1587    // set the CWD and VFS_ROOT fields in process descriptor
1588    process->cwd_xp      = process_zero.vfs_root_xp;
1589    process->vfs_root_xp = process_zero.vfs_root_xp;
1590
[409]1591    // get PID from local cluster
[416]1592    error = cluster_pid_alloc( process , &pid );
[408]1593
[564]1594// check PID allocator
1595assert( (error == 0),
1596"cannot allocate PID in cluster %x\n", local_cxy );
[409]1597
[564]1598// check PID value
1599assert( (pid == 1) ,
1600"process INIT must be first process in cluster 0\n" );
[457]1601
[409]1602    // initialize process descriptor / parent is local process_zero
1603    process_reference_init( process,
[408]1604                            pid,
[457]1605                            XPTR( local_cxy , &process_zero ) ); 
[408]1606
[564]1607#if(DEBUG_PROCESS_INIT_CREATE & 1)
1608if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1609printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
1610__FUNCTION__, this->process->pid, this->trdid );
[564]1611#endif
1612
[457]1613    // open the file identified by CONFIG_PROCESS_INIT_PATH
1614    file_xp = XPTR_NULL;
1615    file_id = -1;
[610]1616        error   = vfs_open( process->vfs_root_xp,
[457]1617                            CONFIG_PROCESS_INIT_PATH,
[610]1618                        XPTR( local_cxy , process ),
[457]1619                            O_RDONLY,
1620                            0,
1621                            &file_xp,
1622                            &file_id );
1623
[564]1624assert( (error == 0),
1625"failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1626
[564]1627#if(DEBUG_PROCESS_INIT_CREATE & 1)
1628if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1629printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
1630__FUNCTION__, this->process->pid, this->trdid );
[564]1631#endif
1632
1633   // register "code" and "data" vsegs as well as entry-point
[409]1634    // in process VMM, using information contained in the elf file.
[457]1635        error = elf_load_process( file_xp , process );
[101]1636
[564]1637assert( (error == 0),
1638"cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1639
[564]1640#if(DEBUG_PROCESS_INIT_CREATE & 1)
1641if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1642printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
1643__FUNCTION__, this->process->pid, this->trdid );
[564]1644#endif
1645
[428]1646    // get extended pointers on process_zero children_root, children_lock
1647    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
1648    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
1649
[564]1650    // take lock protecting kernel process children list
1651    remote_queuelock_acquire( children_lock_xp );
1652
[428]1653    // register process INIT in parent local process_zero
1654        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1655        hal_atomic_add( &process_zero.children_nr , 1 );
1656
[564]1657    // release lock protecting kernel process children list
1658    remote_queuelock_release( children_lock_xp );
1659
1660#if(DEBUG_PROCESS_INIT_CREATE & 1)
1661if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1662printk("\n[%s] thread[%x,%x] registered init process in parent\n",
1663__FUNCTION__, this->process->pid, this->trdid );
[564]1664#endif
1665
[409]1666    // select a core in local cluster to execute the main thread
1667    lid  = cluster_select_local_core();
1668
1669    // initialize pthread attributes for main thread
1670    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1671    attr.cxy        = local_cxy;
1672    attr.lid        = lid;
1673
1674    // create and initialize thread descriptor
1675        error = thread_user_create( pid,
1676                                (void *)process->vmm.entry_point,
1677                                NULL,
1678                                &attr,
1679                                &thread );
[1]1680
[564]1681assert( (error == 0),
1682"cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
[428]1683
[564]1684assert( (thread->trdid == 0),
1685"main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
[457]1686
[564]1687#if(DEBUG_PROCESS_INIT_CREATE & 1)
1688if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1689printk("\n[%s] thread[%x,%x] created main thread\n",
1690__FUNCTION__, this->process->pid, this->trdid );
[564]1691#endif
1692
[409]1693    // activate thread
1694        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1695
[124]1696    hal_fence();
[1]1697
[438]1698#if DEBUG_PROCESS_INIT_CREATE
[433]1699cycle = (uint32_t)hal_get_cycles();
[438]1700if( DEBUG_PROCESS_INIT_CREATE < cycle )
[610]1701printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
1702__FUNCTION__, this->process->pid, this->trdid, cycle );
[433]1703#endif
[409]1704
[204]1705}  // end process_init_create()
1706
[428]1707/////////////////////////////////////////
1708void process_display( xptr_t process_xp )
1709{
1710    process_t   * process_ptr;
1711    cxy_t         process_cxy;
[443]1712
[428]1713    xptr_t        parent_xp;       // extended pointer on parent process
1714    process_t   * parent_ptr;
1715    cxy_t         parent_cxy;
1716
[443]1717    xptr_t        owner_xp;        // extended pointer on owner process
1718    process_t   * owner_ptr;
1719    cxy_t         owner_cxy;
1720
[428]1721    pid_t         pid;
1722    pid_t         ppid;
[580]1723    lpid_t        lpid;
[428]1724    uint32_t      state;
1725    uint32_t      th_nr;
1726
[443]1727    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
1728    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
1729    chdev_t     * txt_chdev_ptr;
1730    cxy_t         txt_chdev_cxy;
1731    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
[428]1732
1733    xptr_t        elf_file_xp;     // extended pointer on .elf file
1734    cxy_t         elf_file_cxy;
1735    vfs_file_t  * elf_file_ptr;
1736    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
1737
1738    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
1739    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
1740
1741    // get cluster and local pointer on process
1742    process_ptr = GET_PTR( process_xp );
1743    process_cxy = GET_CXY( process_xp );
1744
[580]1745    // get process PID, LPID, and state
[564]1746    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[580]1747    lpid  = LPID_FROM_PID( pid );
[564]1748    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
[428]1749
[580]1750    // get process PPID
[564]1751    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
[428]1752    parent_cxy = GET_CXY( parent_xp );
1753    parent_ptr = GET_PTR( parent_xp );
[564]1754    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
[428]1755
1756    // get number of threads
[564]1757    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
[428]1758
[443]1759    // get pointers on owner process descriptor
[564]1760    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
[443]1761    owner_cxy = GET_CXY( owner_xp );
1762    owner_ptr = GET_PTR( owner_xp );
[428]1763
[580]1764    // get process TXT name and .elf name
1765    if( lpid )                                   // user process
1766    {
[443]1767
[580]1768        // get extended pointer on file descriptor associated to TXT_RX
1769        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
[428]1770
[580]1771        assert( (txt_file_xp != XPTR_NULL) ,
1772        "process must be attached to one TXT terminal\n" ); 
[443]1773
[580]1774        // get TXT_RX chdev pointers
1775        txt_chdev_xp  = chdev_from_file( txt_file_xp );
1776        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
1777        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
1778
1779        // get TXT_RX name and ownership
1780        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
1781                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
[428]1782   
[580]1783        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
1784                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
[428]1785
[580]1786        // get process .elf name
1787        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
1788        elf_file_cxy  = GET_CXY( elf_file_xp );
1789        elf_file_ptr  = GET_PTR( elf_file_xp );
1790        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
1791        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
1792    }
1793    else                                         // kernel process_zero
1794    {
1795        // TXT name and .elf name are not registered in kernel process_zero
1796        strcpy( txt_name , "txt0_rx" );
1797        txt_owner_xp = process_xp; 
1798        strcpy( elf_name , "kernel.elf" );
1799    }
1800
[428]1801    // display process info
[443]1802    if( txt_owner_xp == process_xp )
[428]1803    {
[581]1804        nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
1805        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1806    }
1807    else
1808    {
[581]1809        nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
1810        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
[428]1811    }
1812}  // end process_display()
1813
1814
1815////////////////////////////////////////////////////////////////////////////////////////
1816//     Terminals related functions
1817////////////////////////////////////////////////////////////////////////////////////////
1818
[581]1819//////////////////////////////////
[485]1820uint32_t process_txt_alloc( void )
[428]1821{
1822    uint32_t  index;       // TXT terminal index
1823    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
1824    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
1825    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
1826    xptr_t    root_xp;     // extended pointer on owner field in chdev
1827
1828    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
1829    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
1830    {
1831        // get pointers on TXT_RX[index]
1832        chdev_xp  = chdev_dir.txt_rx[index];
1833        chdev_cxy = GET_CXY( chdev_xp );
1834        chdev_ptr = GET_PTR( chdev_xp );
1835
1836        // get extended pointer on root of attached process
1837        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1838
1839        // return free TXT index if found
1840        if( xlist_is_empty( root_xp ) ) return index; 
1841    }
1842
[492]1843    assert( false , "no free TXT terminal found" );
[428]1844
1845    return -1;
1846
1847} // end process_txt_alloc()
1848
1849/////////////////////////////////////////////
1850void process_txt_attach( process_t * process,
1851                         uint32_t    txt_id )
1852{
1853    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1854    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1855    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1856    xptr_t      root_xp;      // extended pointer on list root in chdev
1857    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1858
[564]1859// check process is in owner cluster
1860assert( (CXY_FROM_PID( process->pid ) == local_cxy) ,
1861"process descriptor not in owner cluster" );
[428]1862
[564]1863// check terminal index
1864assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
1865"illegal TXT terminal index" );
[428]1866
1867    // get pointers on TXT_RX[txt_id] chdev
1868    chdev_xp  = chdev_dir.txt_rx[txt_id];
1869    chdev_cxy = GET_CXY( chdev_xp );
1870    chdev_ptr = GET_PTR( chdev_xp );
1871
1872    // get extended pointer on root & lock of attached process list
1873    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
1874    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1875
[564]1876    // get lock protecting list of processes attached to TXT
1877    remote_busylock_acquire( lock_xp );
1878
[428]1879    // insert process in attached process list
1880    xlist_add_last( root_xp , XPTR( local_cxy , &process->txt_list ) );
1881
[564]1882    // release lock protecting list of processes attached to TXT
1883    remote_busylock_release( lock_xp );
1884
[446]1885#if DEBUG_PROCESS_TXT
[610]1886thread_t * this = CURRENT_THREAD;
[457]1887uint32_t cycle = (uint32_t)hal_get_cycles();
[446]1888if( DEBUG_PROCESS_TXT < cycle )
[610]1889printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
1890__FUNCTION__, this->process->pid, this->trdid, process->pid, txt_id , cycle );
[433]1891#endif
[428]1892
1893} // end process_txt_attach()
1894
[436]1895/////////////////////////////////////////////
1896void process_txt_detach( xptr_t  process_xp )
[428]1897{
[436]1898    process_t * process_ptr;  // local pointer on process in owner cluster
1899    cxy_t       process_cxy;  // process owner cluster
1900    pid_t       process_pid;  // process identifier
1901    xptr_t      file_xp;      // extended pointer on stdin file
[428]1902    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
1903    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
1904    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
1905    xptr_t      lock_xp;      // extended pointer on list lock in chdev
1906
[436]1907    // get process cluster, local pointer, and PID
1908    process_cxy = GET_CXY( process_xp );
1909    process_ptr = GET_PTR( process_xp );
[564]1910    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]1911
[564]1912// check process descriptor in owner cluster
1913assert( (CXY_FROM_PID( process_pid ) == process_cxy ) ,
1914"process descriptor not in owner cluster" );
[436]1915
1916    // release TXT ownership (does nothing if not TXT owner)
1917    process_txt_transfer_ownership( process_xp );
[428]1918
[436]1919    // get extended pointer on process stdin file
[564]1920    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[436]1921
1922    // get pointers on TXT_RX chdev
1923    chdev_xp  = chdev_from_file( file_xp );
[428]1924    chdev_cxy = GET_CXY( chdev_xp );
1925    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
1926
[436]1927    // get extended pointer on lock protecting attached process list
[428]1928    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
1929
[564]1930    // get lock protecting list of processes attached to TXT
1931    remote_busylock_acquire( lock_xp );
1932
[428]1933    // unlink process from attached process list
[436]1934    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
1935
[564]1936    // release lock protecting list of processes attached to TXT
1937    remote_busylock_release( lock_xp );
1938
[446]1939#if DEBUG_PROCESS_TXT
[610]1940thread_t * this = CURRENT_THREAD;
[457]1941uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]1942uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
[446]1943if( DEBUG_PROCESS_TXT < cycle )
[610]1944printk("\n[%s] thread[%x,%x] detached process %x from TXT %d / cycle %d\n",
1945__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
[433]1946#endif
[428]1947
1948} // end process_txt_detach()
1949
1950///////////////////////////////////////////////////
1951void process_txt_set_ownership( xptr_t process_xp )
1952{
1953    process_t * process_ptr;
1954    cxy_t       process_cxy;
[436]1955    pid_t       process_pid;
[428]1956    xptr_t      file_xp;
1957    xptr_t      txt_xp;     
1958    chdev_t   * txt_ptr;
1959    cxy_t       txt_cxy;
1960
[436]1961    // get pointers on process in owner cluster
[428]1962    process_cxy = GET_CXY( process_xp );
[435]1963    process_ptr = GET_PTR( process_xp );
[564]1964    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]1965
1966    // check owner cluster
[492]1967    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[436]1968    "process descriptor not in owner cluster\n" );
1969
[428]1970    // get extended pointer on stdin pseudo file
[564]1971    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]1972
1973    // get pointers on TXT chdev
1974    txt_xp  = chdev_from_file( file_xp );
1975    txt_cxy = GET_CXY( txt_xp );
[435]1976    txt_ptr = GET_PTR( txt_xp );
[428]1977
1978    // set owner field in TXT chdev
[564]1979    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
[428]1980
[446]1981#if DEBUG_PROCESS_TXT
[610]1982thread_t * this = CURRENT_THREAD;
[457]1983uint32_t cycle  = (uint32_t)hal_get_cycles();
[564]1984uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
[446]1985if( DEBUG_PROCESS_TXT < cycle )
[610]1986printk("\n[%s] thread[%x,%x] give TXT %d to process %x / cycle %d\n",
1987__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
[436]1988#endif
1989
[428]1990}  // end process_txt_set ownership()
1991
[436]1992////////////////////////////////////////////////////////
1993void process_txt_transfer_ownership( xptr_t process_xp )
[428]1994{
[436]1995    process_t * process_ptr;     // local pointer on process releasing ownership
1996    cxy_t       process_cxy;     // process cluster
1997    pid_t       process_pid;     // process identifier
[428]1998    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
1999    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
[433]2000    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2001    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2002    uint32_t    txt_id;          // TXT_RX channel
[428]2003    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2004    xptr_t      root_xp;         // extended pointer on root of attached process list
[436]2005    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
[428]2006    xptr_t      iter_xp;         // iterator for xlist
2007    xptr_t      current_xp;      // extended pointer on current process
[433]2008    process_t * current_ptr;     // local pointer on current process
2009    cxy_t       current_cxy;     // cluster for current process
[428]2010
[457]2011#if DEBUG_PROCESS_TXT
[610]2012thread_t * this  = CURRENT_THREAD;
2013uint32_t   cycle;
[457]2014#endif
2015
[436]2016    // get pointers on process in owner cluster
[428]2017    process_cxy = GET_CXY( process_xp );
[435]2018    process_ptr = GET_PTR( process_xp );
[564]2019    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
[436]2020
2021    // check owner cluster
[492]2022    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
[436]2023    "process descriptor not in owner cluster\n" );
2024
[428]2025    // get extended pointer on stdin pseudo file
[564]2026    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[428]2027
2028    // get pointers on TXT chdev
2029    txt_xp  = chdev_from_file( file_xp );
2030    txt_cxy = GET_CXY( txt_xp );
[433]2031    txt_ptr = GET_PTR( txt_xp );
[428]2032
[433]2033    // get extended pointer on TXT_RX owner and TXT channel
[564]2034    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2035    txt_id   = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) );
[428]2036
[436]2037    // transfer ownership only if process is the TXT owner
2038    if( (owner_xp == process_xp) && (txt_id > 0) ) 
[428]2039    {
[436]2040        // get extended pointers on root and lock of attached processes list
2041        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2042        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
[428]2043
[436]2044        // get lock
[564]2045        remote_busylock_acquire( lock_xp );
[436]2046
2047        if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
[428]2048        {
[436]2049            // scan attached process list to find KSH process
2050            XLIST_FOREACH( root_xp , iter_xp )
2051            {
2052                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2053                current_cxy = GET_CXY( current_xp );
2054                current_ptr = GET_PTR( current_xp );
[435]2055
[436]2056                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2057                {
2058                    // release lock
[564]2059                    remote_busylock_release( lock_xp );
[436]2060
2061                    // set owner field in TXT chdev
[564]2062                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2063
[446]2064#if DEBUG_PROCESS_TXT
[610]2065cycle = (uint32_t)hal_get_cycles();
[564]2066uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2067if( DEBUG_PROCESS_TXT < cycle )
[610]2068printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n",
2069__FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle );
[457]2070process_txt_display( txt_id );
[436]2071#endif
2072                     return;
2073                }
2074            }
2075 
2076            // release lock
[564]2077            remote_busylock_release( lock_xp );
[436]2078
2079            // PANIC if KSH not found
[492]2080            assert( false , "KSH process not found for TXT %d" );
[436]2081
2082            return;
2083        }
2084        else                                               // process is KSH
2085        {
2086            // scan attached process list to find another process
2087            XLIST_FOREACH( root_xp , iter_xp )
[428]2088            {
[436]2089                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2090                current_cxy = GET_CXY( current_xp );
2091                current_ptr = GET_PTR( current_xp );
2092
2093                if( current_xp != process_xp )            // current is not KSH
2094                {
2095                    // release lock
[564]2096                    remote_busylock_release( lock_xp );
[436]2097
2098                    // set owner field in TXT chdev
[564]2099                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
[436]2100
[446]2101#if DEBUG_PROCESS_TXT
[610]2102cycle  = (uint32_t)hal_get_cycles();
[564]2103uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
[446]2104if( DEBUG_PROCESS_TXT < cycle )
[610]2105printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n",
2106__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
[457]2107process_txt_display( txt_id );
[436]2108#endif
2109                     return;
2110                }
[428]2111            }
[436]2112
2113            // release lock
[564]2114            remote_busylock_release( lock_xp );
[436]2115
2116            // no more owner for TXT if no other process found
[564]2117            hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
[436]2118
[446]2119#if DEBUG_PROCESS_TXT
[436]2120cycle = (uint32_t)hal_get_cycles();
[446]2121if( DEBUG_PROCESS_TXT < cycle )
[610]2122printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n",
2123__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
[457]2124process_txt_display( txt_id );
[436]2125#endif
2126            return;
[428]2127        }
[436]2128    }
2129    else
2130    {
[433]2131
[446]2132#if DEBUG_PROCESS_TXT
[436]2133cycle = (uint32_t)hal_get_cycles();
[446]2134if( DEBUG_PROCESS_TXT < cycle )
[593]2135printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n",
[610]2136__FUNCTION__, this->trdid, process_pid, cycle );
[457]2137process_txt_display( txt_id );
[436]2138#endif
2139
[428]2140    }
[436]2141}  // end process_txt_transfer_ownership()
[428]2142
2143
[564]2144////////////////////////////////////////////////
2145bool_t process_txt_is_owner( xptr_t process_xp )
[457]2146{
2147    // get local pointer and cluster of process in owner cluster
2148    cxy_t       process_cxy = GET_CXY( process_xp );
2149    process_t * process_ptr = GET_PTR( process_xp );
2150
[564]2151// check calling thread execute in target process owner cluster
2152pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2153assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
2154"process descriptor not in owner cluster\n" );
[457]2155
2156    // get extended pointer on stdin pseudo file
[564]2157    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
[457]2158
2159    // get pointers on TXT chdev
2160    xptr_t    txt_xp  = chdev_from_file( file_xp );
2161    cxy_t     txt_cxy = GET_CXY( txt_xp );
2162    chdev_t * txt_ptr = GET_PTR( txt_xp );
2163
2164    // get extended pointer on TXT_RX owner process
[564]2165    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
[457]2166
2167    return (process_xp == owner_xp);
2168
2169}   // end process_txt_is_owner()
2170
[436]2171////////////////////////////////////////////////     
2172xptr_t process_txt_get_owner( uint32_t channel )
[435]2173{
2174    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
2175    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
2176    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
2177
[564]2178    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
[435]2179
[457]2180}  // end process_txt_get_owner()
2181
[435]2182///////////////////////////////////////////
2183void process_txt_display( uint32_t txt_id )
2184{
2185    xptr_t      chdev_xp;
2186    cxy_t       chdev_cxy;
2187    chdev_t   * chdev_ptr;
2188    xptr_t      root_xp;
2189    xptr_t      lock_xp;
2190    xptr_t      current_xp;
2191    xptr_t      iter_xp;
[443]2192    cxy_t       txt0_cxy;
2193    chdev_t   * txt0_ptr;
2194    xptr_t      txt0_xp;
2195    xptr_t      txt0_lock_xp;
2196   
[435]2197    assert( (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
[492]2198    "illegal TXT terminal index" );
[435]2199
[443]2200    // get pointers on TXT0 chdev
2201    txt0_xp  = chdev_dir.txt_tx[0];
2202    txt0_cxy = GET_CXY( txt0_xp );
2203    txt0_ptr = GET_PTR( txt0_xp );
2204
2205    // get extended pointer on TXT0 lock
2206    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
2207
[435]2208    // get pointers on TXT_RX[txt_id] chdev
2209    chdev_xp  = chdev_dir.txt_rx[txt_id];
2210    chdev_cxy = GET_CXY( chdev_xp );
2211    chdev_ptr = GET_PTR( chdev_xp );
2212
2213    // get extended pointer on root & lock of attached process list
2214    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2215    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2216
[443]2217    // get lock on attached process list
[564]2218    remote_busylock_acquire( lock_xp );
[443]2219
2220    // get TXT0 lock in busy waiting mode
[564]2221    remote_busylock_acquire( txt0_lock_xp );
[443]2222
[435]2223    // display header
[443]2224    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
2225    txt_id , (uint32_t)hal_get_cycles() );
[435]2226
[436]2227    // scan attached process list
[435]2228    XLIST_FOREACH( root_xp , iter_xp )
2229    {
2230        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2231        process_display( current_xp );
2232    }
2233
[443]2234    // release TXT0 lock in busy waiting mode
[564]2235    remote_busylock_release( txt0_lock_xp );
[443]2236
2237    // release lock on attached process list
[564]2238    remote_busylock_release( lock_xp );
[435]2239
2240}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.