source: trunk/kernel/kern/process.c @ 423

Last change on this file since 423 was 416, checked in by alain, 7 years ago

Improve sys_exec.

File size: 46.9 KB
RevLine 
[1]1/*
2 * process.c - process related management
[172]3 *
[1]4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
[23]6 *          Alain Greiner (2016,2017)
[1]7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[409]10 * This file is part of ALMOS-MKH.
[1]11 *
[172]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[172]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[172]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[1]27#include <hal_types.h>
28#include <hal_remote.h>
29#include <hal_uspace.h>
[409]30#include <hal_irqmask.h>
[1]31#include <errno.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <bits.h>
35#include <kmem.h>
36#include <page.h>
37#include <vmm.h>
38#include <vfs.h>
39#include <core.h>
40#include <thread.h>
41#include <list.h>
[407]42#include <string.h>
[1]43#include <scheduler.h>
44#include <remote_spinlock.h>
45#include <dqdt.h>
46#include <cluster.h>
47#include <ppm.h>
48#include <boot_info.h>
49#include <process.h>
50#include <elf.h>
[23]51#include <syscalls.h>
[409]52#include <signal.h>
[1]53
54//////////////////////////////////////////////////////////////////////////////////////////
55// Extern global variables
56//////////////////////////////////////////////////////////////////////////////////////////
57
58extern process_t process_zero;
59
60//////////////////////////////////////////////////////////////////////////////////////////
61// Process initialisation related functions
62//////////////////////////////////////////////////////////////////////////////////////////
63
64///////////////////////////
65process_t * process_alloc()
66{
67        kmem_req_t   req;
68
69    req.type  = KMEM_PROCESS;
70        req.size  = sizeof(process_t);
71        req.flags = AF_KERNEL;
72
73    return (process_t *)kmem_alloc( &req );
74}
75
76////////////////////////////////////////
77void process_free( process_t * process )
78{
79    kmem_req_t  req;
80
81        req.type = KMEM_PROCESS;
82        req.ptr  = process;
83        kmem_free( &req );
84}
85
[408]86/////////////////////////////////////////////
87void process_zero_init( process_t * process )
88{
89    // initialize PID, PPID anf PREF
90    process->pid    = 0;
91    process->ppid   = 0;
92    process->ref_xp = XPTR( local_cxy , process );
93
94    // reset th_tbl[] array as empty
95    uint32_t i;
96    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
97        {
98        process->th_tbl[i] = NULL;
99    }
100    process->th_nr  = 0;
101    spinlock_init( &process->th_lock );
102
103        hal_fence();
104
105process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
106__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
107
108}  // end process_zero_init()
109
[101]110/////////////////////////////////////////////////
111void process_reference_init( process_t * process,
112                             pid_t       pid,
[408]113                             pid_t       ppid,
114                             xptr_t      model_xp )
[1]115{
[408]116    cxy_t       model_cxy;
117    process_t * model_ptr;
[407]118        error_t     error1;
119        error_t     error2;
120        error_t     error3;
121    xptr_t      stdin_xp;
122    xptr_t      stdout_xp;
123    xptr_t      stderr_xp;
124    uint32_t    stdin_id;
125    uint32_t    stdout_id;
126    uint32_t    stderr_id;
[415]127    error_t     error;
[1]128
[409]129process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / ppid = %x\n",
130__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid , ppid );
[407]131
[408]132    // get model process cluster and local pointer
133    model_cxy = GET_CXY( model_xp );
134    model_ptr = (process_t *)GET_PTR( model_xp );
[1]135
[407]136    // initialize PID, PPID, and REF
137        process->pid    = pid;
[408]138    process->ppid   = ppid;
[407]139    process->ref_xp = XPTR( local_cxy , process );
[204]140
[409]141    // initialize vmm as empty
[415]142    error = vmm_init( process );
143    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" );
144 
[101]145
[408]146process_dmsg("\n[DBG] %s : core[%x,%d] / vmm empty for process %x\n", 
[407]147__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
[1]148
[409]149    // initialize fd_array as empty
[408]150    process_fd_init( process );
[1]151
[408]152    // create stdin / stdout / stderr pseudo-files
[409]153    if( ppid == 0 )                                       // process_init
[408]154    {
155        error1 = vfs_open( process,
156                           CONFIG_INIT_STDIN,
157                           O_RDONLY, 
158                           0,                // FIXME chmod
159                           &stdin_xp, 
160                           &stdin_id );
[1]161
[408]162        error2 = vfs_open( process,
163                           CONFIG_INIT_STDOUT,
164                           O_WRONLY, 
165                           0,                // FIXME chmod
166                           &stdout_xp, 
167                           &stdout_id );
[1]168
[408]169        error3 = vfs_open( process,
170                           CONFIG_INIT_STDERR,
171                           O_WRONLY, 
172                           0,                // FIXME chmod
173                           &stderr_xp, 
174                           &stderr_id );
175    }
[409]176    else                                                  // any other process
[408]177    {
178        error1 = vfs_open( process,
179                           CONFIG_USER_STDIN,
180                           O_RDONLY, 
181                           0,                // FIXME chmod
182                           &stdin_xp, 
183                           &stdin_id );
[407]184
[408]185        error2 = vfs_open( process,
186                           CONFIG_USER_STDOUT,
187                           O_WRONLY, 
188                           0,                // FIXME chmod
189                           &stdout_xp, 
190                           &stdout_id );
[407]191
[408]192        error3 = vfs_open( process,
193                           CONFIG_USER_STDERR,
194                           O_WRONLY, 
195                           0,                // FIXME chmod
196                           &stderr_xp, 
197                           &stderr_id );
198    }
[407]199
[408]200    assert( ((error1 == 0) && (error2 == 0) && (error3 == 0)) , __FUNCTION__ ,
201    "cannot open stdin/stdout/stderr pseudo files\n");
[407]202
[408]203    assert( ((stdin_id == 0) && (stdout_id == 1) && (stderr_id == 2)) , __FUNCTION__ ,
204    "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id );
[407]205
[409]206    // initialize specific inodes root and cwd
[408]207    process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
208                                                         &model_ptr->vfs_root_xp ) );
209    process->vfs_cwd_xp  = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
210                                                         &model_ptr->vfs_cwd_xp ) );
[409]211    vfs_inode_remote_up( process->vfs_root_xp );
212    vfs_inode_remote_up( process->vfs_cwd_xp );
[408]213
[409]214    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
215
216    // copy all open file descriptors (other than stdin / stdout / stderr)
[408]217    process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
218                            XPTR( model_cxy , &model_ptr->fd_array ) );
219
[409]220process_dmsg("\n[DBG] %s : core[%x,%d] / fd array for process %x\n", 
[407]221__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
222
[408]223    // reset children list root
224    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
225    process->children_nr     = 0;
[407]226
[408]227    // reset semaphore / mutex / barrier / condvar list roots
228    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
229    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
230    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
231    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
232    remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) );
[407]233
[408]234    // register new process in the local cluster manager pref_tbl[]
235    lpid_t lpid = LPID_FROM_PID( pid );
236    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
[407]237
[408]238    // register new process descriptor in local cluster manager local_list
239    cluster_process_local_link( process );
[407]240
[408]241    // register new process descriptor in local cluster manager copies_list
242    cluster_process_copies_link( process );
[172]243
[408]244    // reset th_tbl[] array as empty in process descriptor
[1]245    uint32_t i;
246    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
247        {
248        process->th_tbl[i] = NULL;
249    }
250    process->th_nr  = 0;
251    spinlock_init( &process->th_lock );
252
[124]253        hal_fence();
[1]254
[407]255process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
256__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
[101]257
[204]258}  // process_reference init()
259
[1]260/////////////////////////////////////////////////////
261error_t process_copy_init( process_t * local_process,
262                           xptr_t      reference_process_xp )
263{
[415]264    error_t error;
265
[23]266    // get reference process cluster and local pointer
267    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
268    process_t * ref_ptr = (process_t *)GET_PTR( reference_process_xp );
[1]269
[407]270    // set the pid, ppid, ref_xp fields in local process
271    local_process->pid    = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) );
272    local_process->ppid   = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->ppid ) );
273    local_process->ref_xp = reference_process_xp;
274
275process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x in cluster %x\n",
276__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid );
277
[172]278    // reset local process vmm
[415]279    error = vmm_init( local_process );
280    assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n");
[1]281
[172]282    // reset process file descriptors array
[23]283        process_fd_init( local_process );
[1]284
[23]285    // reset vfs_root_xp / vfs_bin_xp / vfs_cwd_xp fields
286    local_process->vfs_root_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
287    local_process->vfs_bin_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
288    local_process->vfs_cwd_xp  = XPTR_NULL;
[1]289
290    // reset children list root (not used in a process descriptor copy)
291    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
[172]292    local_process->children_nr   = 0;
[1]293
294    // reset brothers list (not used in a process descriptor copy)
295    xlist_entry_init( XPTR( local_cxy , &local_process->brothers_list ) );
296
297    // reset semaphores list root (not used in a process descriptor copy)
298    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
[23]299    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
300    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
301    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
[1]302
[23]303    // reset th_tbl[] array as empty
[1]304    uint32_t i;
305    for( i = 0 ; i < CONFIG_THREAD_MAX_PER_CLUSTER ; i++ )
306        {
307        local_process->th_tbl[i] = NULL;
308    }
309    local_process->th_nr  = 0;
310    spinlock_init( &local_process->th_lock );
311
312    // register new process descriptor in local cluster manager local_list
313    cluster_process_local_link( local_process );
314
315    // register new process descriptor in owner cluster manager copies_list
316    cluster_process_copies_link( local_process );
317
[124]318        hal_fence();
[1]319
[407]320process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x in cluster %x\n",
321__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid );
[279]322
[1]323    return 0;
324
[204]325} // end process_copy_init()
326
[1]327///////////////////////////////////////////
328void process_destroy( process_t * process )
329{
[172]330        if( process->th_nr != 0 )
[1]331    {
[374]332        panic("process %x in cluster %x has still active threads",
[373]333              process->pid , local_cxy );
[1]334    }
335
336    // get local process manager pointer
337    pmgr_t * pmgr = &LOCAL_CLUSTER->pmgr;
338
[416]339    // remove the process descriptor from local_list in cluster manager
[23]340    remote_spinlock_lock( XPTR( local_cxy , &pmgr->local_lock ) );
[1]341    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
[23]342    remote_spinlock_unlock( XPTR( local_cxy , &pmgr->local_lock ) );
343
[1]344    // get extended pointer on copies_lock in owner cluster manager
345    cxy_t  owner_cxy    = CXY_FROM_PID( process->pid );
346        lpid_t lpid         = LPID_FROM_PID( process->pid );
[326]347    xptr_t copies_lock  = XPTR( owner_cxy , &pmgr->copies_lock[lpid] );
[1]348
349    // remove the local process descriptor from copies_list
350    remote_spinlock_lock( copies_lock );
351    xlist_unlink( XPTR( local_cxy , &process->copies_list ) );
352    remote_spinlock_unlock( copies_lock );
[172]353
[416]354    // release the process PID to cluster manager
355    cluster_pid_release( process->pid );
356
[124]357        hal_fence();
[1]358
[172]359    // From this point, the process descriptor is unreachable
[1]360
[409]361    // FIXME close all open files and update dirty [AG]
[23]362
363    // Decrease refcount for bin file, root file and cwd file
[337]364        if( process->vfs_bin_xp  != XPTR_NULL ) vfs_file_count_down( process->vfs_bin_xp );
365        if( process->vfs_root_xp != XPTR_NULL ) vfs_file_count_down( process->vfs_root_xp );
366        if( process->vfs_cwd_xp  != XPTR_NULL ) vfs_file_count_down( process->vfs_cwd_xp );
[1]367
368    // Destroy VMM
369    vmm_destroy( process );
370
[416]371    // release memory allocated to process descriptor
372    process_free( process );
[1]373
[407]374}  // end process_destroy()
375
[409]376/////////////////////////////////////////////////
377char * process_action_str( uint32_t action_type )
378{
379    if     ( action_type == BLOCK_ALL_THREADS   ) return "BLOCK";
380    else if( action_type == UNBLOCK_ALL_THREADS ) return "UNBLOCK";
381    else if( action_type == DELETE_ALL_THREADS  ) return "DELETE";
382    else                                          return "undefined";
383}
384
385////////////////////////////////////////////
386void process_sigaction( process_t * process,
387                        uint32_t    action_type )
388{
389    cxy_t              owner_cxy;         // owner cluster identifier
390    lpid_t             lpid;              // process index in owner cluster
391    cluster_t        * cluster;           // pointer on cluster manager
392    xptr_t             root_xp;           // extended pointer on root of copies
393    xptr_t             lock_xp;           // extended pointer on lock protecting copies
394    xptr_t             iter_xp;           // iterator on copies list
395    xptr_t             process_xp;        // extended pointer on process copy
396    cxy_t              process_cxy;       // process copy cluster identifier
397    process_t        * process_ptr;       // local pointer on process copy
[416]398    uint32_t           responses;         // number of remote process copies
399    uint32_t           rsp_count;         // used to assert number of copies
[409]400
[416]401    rpc_desc_t         rpc;               // rpc descriptor allocated in stack
[409]402
[416]403sigaction_dmsg("\n[DBG] %s : enter to %s process %x in cluster %x\n",
404__FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy );
[409]405
[416]406    thread_t         * client = CURRENT_THREAD;
407    xptr_t             client_xp = XPTR( local_cxy , client );
[409]408
[416]409    // get local pointer on local cluster manager
410    cluster = LOCAL_CLUSTER;
411
[409]412    // get owner cluster identifier and process lpid
413    owner_cxy = CXY_FROM_PID( process->pid );
414    lpid      = LPID_FROM_PID( process->pid );
415
[416]416    // check owner cluster
417    assert( (owner_cxy == local_cxy) , __FUNCTION__ ,
418    "must be executed in the owner cluster\n" ); 
[409]419   
[416]420    // get number of remote copies
421    responses = cluster->pmgr.copies_nr[lpid] - 1;
422    rsp_count = 0;
[409]423
[416]424    // check action type
425    assert( ((action_type == DELETE_ALL_THREADS ) ||
426             (action_type == BLOCK_ALL_THREADS )  ||
427             (action_type == UNBLOCK_ALL_THREADS )),
428             __FUNCTION__ , "illegal action type" );
429             
430    // initialise rpc descriptor
431    rpc.index    = RPC_PROCESS_SIGACTION;
432    rpc.response = responses;
433    rpc.blocking = false;
434    rpc.thread   = client;
435
[409]436    // get extended pointers on copies root, copies lock, and number of copies
437    root_xp   = XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] );
438    lock_xp   = XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] );
439
440    // take the lock protecting the copies
441    remote_spinlock_lock( lock_xp );
442
[416]443    // send RPCs to remote clusters
[409]444    XLIST_FOREACH( root_xp , iter_xp )
445    {
446        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
447        process_cxy = GET_CXY( process_xp );
448        process_ptr = (process_t *)GET_PTR( process_xp );
449
[416]450        // send RPC to remote clusters
451        if( process_cxy != local_cxy ) 
452        {
[409]453
[416]454sigaction_dmsg("\n[DBG] %s : send RPC to remote cluster %x\n",
455__FUNCTION__ , process_cxy );
456
457            rpc.args[0] = (uint64_t)action_type;
458            rpc.args[1] = (uint64_t)(intptr_t)process_ptr;
459            rpc_process_sigaction_client( process_cxy , &rpc );
460            rsp_count++;
461        }
[409]462    }
463   
464    // release the lock protecting process copies
465    remote_spinlock_unlock( lock_xp );
466
[416]467    // check number of copies...
468    assert( (rsp_count == responses) , __FUNCTION__ ,
469    "unconsistent number of process copies : rsp_count = %d / responses = %d",
470    rsp_count , responses );
[409]471
[416]472    // block and deschedule to wait RPC responses if required
473    if( responses )
474    {   
475        thread_block( CURRENT_THREAD , THREAD_BLOCKED_RPC );
476        sched_yield("BLOCKED on RPC_PROCESS_SIGACTION");
477    }
[409]478
[416]479sigaction_dmsg("\n[DBG] %s : make action in owner cluster %x\n",
480__FUNCTION__ , local_cxy );
481
482
483    // call directly the relevant function in local owner cluster
484    if      (action_type == DELETE_ALL_THREADS  ) process_delete ( process , client_xp ); 
485    else if (action_type == BLOCK_ALL_THREADS   ) process_block  ( process , client_xp ); 
486    else if (action_type == UNBLOCK_ALL_THREADS ) process_unblock( process             );
487
488sigaction_dmsg("\n[DBG] %s : exit after %s process %x in cluster %x\n",
489__FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy );
490
[409]491}  // end process_sigaction()
492
[1]493////////////////////////////////////////
[409]494void process_block( process_t * process,
495                    xptr_t      client_xp )
[1]496{
[409]497    thread_t          * target;         // pointer on target thread
498    uint32_t            ltid;           // index in process th_tbl
[416]499    thread_t          * requester;      // requesting thread pointer
[409]500    uint32_t            count;          // requests counter
[416]501    volatile uint32_t   rsp_count;      // responses counter
[1]502
[416]503    // get calling thread pointer
504    requester = CURRENT_THREAD;
[407]505
[415]506sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n",
[409]507__FUNCTION__ , process->pid , local_cxy );
508
509    // get lock protecting process th_tbl[]
[1]510    spinlock_lock( &process->th_lock );
511
[409]512    // initialize local responses counter
[416]513    rsp_count = process->th_nr;
[409]514
515    // loop on process threads to block and deschedule all threads in cluster
516    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
517    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[1]518    {
[409]519        target = process->th_tbl[ltid];
[1]520
[409]521        if( target != NULL )             // thread found
[1]522        {
523            count++;
[409]524
[416]525            // - if the target thread is the client thread, we do nothing,
526            //   and simply decrement the responses counter.
527            // - if the calling thread and the target thread are on the same core,
528            //   we block the target thread, we don't ask ask anything to the scheduler,
529            //   and simply decrement the responses counter.
530            // - if the calling thread and the target thread are not running on the same
531            //   core, we ask the target scheduler to acknowlege the blocking
532            //   to be sure that the target thread is not running.
533           
534            if( XPTR( local_cxy , target ) == client_xp )
535            {
536                // decrement responses counter
537                hal_atomic_add( (void *)&rsp_count , -1 );
538            }
539            else if( requester->core->lid == target->core->lid )
540            {
541                // set the global blocked bit in target thread descriptor.
542                thread_block( target , THREAD_BLOCKED_GLOBAL );
[409]543
[416]544                // decrement responses counter
545                hal_atomic_add( (void *)&rsp_count , -1 );
546            }
547            else
548            {
549                // set the global blocked bit in target thread descriptor.
550                thread_block( target , THREAD_BLOCKED_GLOBAL );
[409]551
[416]552                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
553                thread_set_req_ack( target , (void *)&rsp_count );
554
555                // force scheduling on target thread
[409]556                dev_pic_send_ipi( local_cxy , target->core->lid );
557            }
[1]558        }
[172]559    }
560
[416]561    // get lock protecting process th_tbl[]
562    spinlock_unlock( &process->th_lock );
563
564    // wait all responses from schedulers
[409]565    while( 1 )
566    {
[416]567        // exit loop when all local responses received
568        if ( rsp_count == 0 ) break;
[409]569   
570        // wait 1000 cycles before retry
571        hal_fixed_delay( 1000 );
572    }
[1]573
[415]574sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n",
[409]575__FUNCTION__ , process->pid , local_cxy , count );
576
577}  // end process_block()
578
[416]579///////////////////////////////////////////
580void process_unblock( process_t * process )
[409]581{
582    thread_t          * target;        // pointer on target thead
583    uint32_t            ltid;          // index in process th_tbl
[416]584    uint32_t            count;         // requests counter
[409]585
[415]586sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n",
[409]587__FUNCTION__ , process->pid , local_cxy );
588
589    // get lock protecting process th_tbl[]
590    spinlock_lock( &process->th_lock );
591
592    // loop on process threads to unblock all threads in cluster
[416]593    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
594    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
[1]595    {
[409]596        target = process->th_tbl[ltid];
[1]597
[409]598        if( target != NULL )             // thread found
[1]599        {
[416]600            count++;
[1]601
[409]602            // reset the global blocked bit in target thread descriptor.
603            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
604        }
605    }
[1]606
[416]607    // get lock protecting process th_tbl[]
608    spinlock_unlock( &process->th_lock );
[407]609
[415]610sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n",
[416]611__FUNCTION__ , process->pid , local_cxy , count );
[407]612
[409]613}  // end process_unblock()
614
615/////////////////////////////////////////
616void process_delete( process_t * process,
617                     xptr_t      client_xp )
618{
[416]619    thread_t          * target;        // pointer on target thread
[409]620    uint32_t            ltid;          // index in process th_tbl
621    uint32_t            count;         // request counter
[416]622    thread_t          * requester;     // pointer on calling thread
[409]623
[415]624sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x at cycle %d\n",
[416]625__FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() );
[409]626
[416]627    // get calling thread pointer
628    requester = CURRENT_THREAD;
629
630    // get lock protecting process th_tbl[]
631    spinlock_lock( &process->th_lock );
632
633    // loop on threads to set the REQ_DELETE flag
634    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
[409]635    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
636    {
[416]637        target = process->th_tbl[ltid];
[409]638
[416]639        if( target != NULL )             // thread found
[409]640        {
641            count++;
642
[416]643            // delete only if the target is not the client
644            if( XPTR( local_cxy , target ) != client_xp ) 
645            { 
646                hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE );
647            }
[1]648        }
649    }
650
[416]651    // get lock protecting process th_tbl[]
652    spinlock_unlock( &process->th_lock );
[407]653
[415]654sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x at cycle %d\n",
[416]655__FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() );
[1]656
[409]657}  // end process_delete()
[407]658
[1]659///////////////////////////////////////////////
660process_t * process_get_local_copy( pid_t pid )
661{
662    error_t        error;
[172]663    process_t    * process_ptr;   // local pointer on process
[23]664    xptr_t         process_xp;    // extended pointer on process
[1]665
666    cluster_t * cluster = LOCAL_CLUSTER;
667
668    // get lock protecting local list of processes
[23]669    remote_spinlock_lock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]670
671    // scan the local list of process descriptors to find the process
[23]672    xptr_t  iter;
673    bool_t  found = false;
674    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
[1]675    {
[23]676        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
677        process_ptr = (process_t *)GET_PTR( process_xp );
678        if( process_ptr->pid == pid )
[1]679        {
680            found = true;
681            break;
682        }
683    }
684
685    // release lock protecting local list of processes
[23]686    remote_spinlock_unlock( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
[1]687
[172]688    // allocate memory for a new local process descriptor
[23]689    // and initialise it from reference cluster if required
[1]690    if( !found )
691    {
692        // get extended pointer on reference process descriptor
[23]693        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
[1]694
[23]695        assert( (ref_xp != XPTR_NULL) , __FUNCTION__ , "illegal pid\n" );
696
[1]697        // allocate memory for local process descriptor
[23]698        process_ptr = process_alloc();
699        if( process_ptr == NULL )  return NULL;
[1]700
701        // initialize local process descriptor copy
[23]702        error = process_copy_init( process_ptr , ref_xp );
[1]703        if( error ) return NULL;
704    }
705
[23]706    return process_ptr;
[1]707
[409]708}  // end process_get_local_copy()
709
[1]710//////////////////////////////////////////////////////////////////////////////////////////
711// File descriptor array related functions
712//////////////////////////////////////////////////////////////////////////////////////////
713
714///////////////////////////////////////////
715void process_fd_init( process_t * process )
716{
717    uint32_t fd;
718
719    remote_spinlock_init( XPTR( local_cxy , &process->fd_array.lock ) );
720
[23]721    process->fd_array.current = 0;
722
[1]723    // initialize array
[23]724    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]725    {
726        process->fd_array.array[fd] = XPTR_NULL;
727    }
728}
729
[23]730//////////////////////////////
731bool_t process_fd_array_full()
[1]732{
[172]733    // get extended pointer on reference process
[23]734    xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
[1]735
[23]736    // get reference process cluster and local pointer
737    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
738    cxy_t       ref_cxy = GET_CXY( ref_xp );
[1]739
[23]740    // get number of open file descriptors from reference fd_array
741    uint32_t current = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->fd_array.current ) );
742
[172]743        return ( current >= CONFIG_PROCESS_FILE_MAX_NR );
[1]744}
745
746/////////////////////////////////////////////////
[407]747error_t process_fd_register( process_t * process,
748                             xptr_t      file_xp,
749                             uint32_t  * fdid )
[1]750{
751    bool_t    found;
[23]752    uint32_t  id;
753    xptr_t    xp;
[1]754
[23]755    // get reference process cluster and local pointer
[407]756    xptr_t ref_xp = process->ref_xp;
[23]757    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
758    cxy_t       ref_cxy = GET_CXY( ref_xp );
759
760    // take lock protecting reference fd_array
761        remote_spinlock_lock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
762
[1]763    found   = false;
764
[23]765    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
[1]766    {
[23]767        xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) );
768        if ( xp == XPTR_NULL )
[1]769        {
770            found = true;
[23]771            hal_remote_swd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );
772                hal_remote_atomic_add( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , 1 );
[407]773                        *fdid = id;
[1]774            break;
775        }
776    }
777
[23]778    // release lock protecting reference fd_array
779        remote_spinlock_unlock( XPTR( ref_cxy , &ref_ptr->fd_array.lock ) );
[1]780
781    if ( !found ) return EMFILE;
782    else          return 0;
[172]783}
[1]784
[172]785////////////////////////////////////////////////
[23]786xptr_t process_fd_get_xptr( process_t * process,
[407]787                            uint32_t    fdid )
[1]788{
[23]789    xptr_t  file_xp;
[1]790
[23]791    // access local copy of process descriptor
[407]792    file_xp = process->fd_array.array[fdid];
[1]793
[23]794    if( file_xp == XPTR_NULL )
795    {
796        // get reference process cluster and local pointer
797        xptr_t      ref_xp  = process->ref_xp;
798        cxy_t       ref_cxy = GET_CXY( ref_xp );
799        process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
[1]800
[23]801        // access reference process descriptor
[407]802        file_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
[1]803
[23]804        // update local fd_array if found
805        if( file_xp != XPTR_NULL )
806        {
[407]807            process->fd_array.array[fdid] = file_xp;
[23]808        }
809    }
[1]810
[23]811    return file_xp;
[1]812
[407]813}  // end process_fd_get_xptr()
814
[1]815///////////////////////////////////////////
816void process_fd_remote_copy( xptr_t dst_xp,
817                             xptr_t src_xp )
818{
819    uint32_t fd;
820    xptr_t   entry;
821
822    // get cluster and local pointer for src fd_array
823    cxy_t        src_cxy = GET_CXY( src_xp );
824    fd_array_t * src_ptr = (fd_array_t *)GET_PTR( src_xp );
825
826    // get cluster and local pointer for dst fd_array
827    cxy_t        dst_cxy = GET_CXY( dst_xp );
828    fd_array_t * dst_ptr = (fd_array_t *)GET_PTR( dst_xp );
829
830    // get the remote lock protecting the src fd_array
831        remote_spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) );
832
[409]833    // loop on all entries other than
834    // the three first entries: stdin/stdout/stderr
835    for( fd = 3 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
[1]836        {
837                entry = (xptr_t)hal_remote_lwd( XPTR( src_cxy , &src_ptr->array[fd] ) );
838
839                if( entry != XPTR_NULL )
840                {
841            // increment file descriptor ref count
842            vfs_file_count_up( entry );
843
844                        // copy entry in destination process fd_array
845                        hal_remote_swd( XPTR( dst_cxy , &dst_ptr->array[fd] ) , entry );
846                }
847        }
848
849    // release lock on source process fd_array
850        remote_spinlock_unlock( XPTR( src_cxy , &src_ptr->lock ) );
851
[407]852}  // end process_fd_remote_copy()
853
[1]854////////////////////////////////////////////////////////////////////////////////////
855//  Thread related functions
856////////////////////////////////////////////////////////////////////////////////////
857
858/////////////////////////////////////////////////////
859error_t process_register_thread( process_t * process,
860                                 thread_t  * thread,
861                                 trdid_t   * trdid )
862{
863    ltid_t   ltid;
864    bool_t   found;
865
[14]866    assert( (process != NULL) , __FUNCTION__ , "process argument is NULL" );
[1]867
[14]868    assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" );
869
[407]870    // search a free slot in th_tbl[]
871    // 0 is not a valid ltid value
[1]872    found = false;
[407]873    for( ltid = 1 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ )
[1]874    {
875        if( process->th_tbl[ltid] == NULL )
876        {
877            found = true;
878            break;
879        }
880    }
881
882    if( found )
883    {
884        // register thread in th_tbl[]
885        process->th_tbl[ltid] = thread;
886        process->th_nr++;
887
888        // returns trdid
889        *trdid = TRDID( local_cxy , ltid );
890    }
891
892    return (found) ? 0 : ENOMEM;
[204]893
894}  // end process_register_thread()
895
[1]896///////////////////////////////////////////////
897void process_remove_thread( thread_t * thread )
898{
[373]899    assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" );
[172]900
[1]901    process_t * process = thread->process;
902
903    // get thread local index
904    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
905
906    // remove thread from th_tbl[]
907    process->th_tbl[ltid] = NULL;
908    process->th_nr--;
909
[204]910}  // process_remove_thread()
911
[408]912/////////////////////////////////////////////////////////
913error_t process_make_fork( xptr_t      parent_process_xp,
914                           xptr_t      parent_thread_xp,
915                           pid_t     * child_pid,
916                           thread_t ** child_thread )
[1]917{
[408]918    process_t * process;         // local pointer on child process descriptor
919    thread_t  * thread;          // local pointer on child thread descriptor
920    pid_t       new_pid;         // process identifier for child process
921    pid_t       parent_pid;      // process identifier for parent process
922    xptr_t      ref_xp;          // extended pointer on reference process
923    error_t     error;
[1]924
[408]925    // get cluster and local pointer for parent process
926    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
927    process_t * parent_process_ptr = (process_t *)GET_PTR( parent_process_xp );
[101]928
[408]929    // get parent process PID
930    parent_pid = hal_remote_lw( XPTR( parent_process_cxy , &parent_process_ptr->pid ) );
931   
932    // check parent process is the reference
933    ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
934    assert( (parent_process_xp == ref_xp ) , __FUNCTION__ ,
935    "parent process must be the reference process\n" );
[407]936
[409]937fork_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",
938__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() );
[172]939
[408]940    // allocate a process descriptor
941    process = process_alloc();
942    if( process == NULL )
943    {
944        printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 
945        __FUNCTION__, local_cxy ); 
946        return -1;
947    }
[1]948
[409]949fork_dmsg("\n[DBG] %s : core[%x,%d] child process descriptor allocated at cycle %d\n",
950 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
[408]951
952    // allocate a child PID from local cluster
[416]953    error = cluster_pid_alloc( process , &new_pid );
[408]954    if( (error != 0) || (new_pid == 0) )
[1]955    {
[408]956        printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 
957        __FUNCTION__, local_cxy ); 
958        process_free( process );
959        return -1;
[1]960    }
[408]961
[409]962fork_dmsg("\n[DBG] %s : core[%x, %d] child process PID allocated = %x at cycle %d\n",
963 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , (uint32_t)hal_get_cycles() );
[408]964
965    // initializes child process descriptor from parent process descriptor
966    process_reference_init( process,
967                            new_pid,
968                            parent_pid,
969                            parent_process_xp );
970
[409]971fork_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n",
[408]972__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );
973
974    // copy VMM from parent descriptor to child descriptor
975    error = vmm_fork_copy( process,
976                           parent_process_xp );
977    if( error )
[101]978    {
[408]979        printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 
980        __FUNCTION__, local_cxy ); 
981        process_free( process );
982        cluster_pid_release( new_pid );
983        return -1;
[101]984    }
[172]985
[409]986fork_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n",
987__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
[407]988
[408]989    // create child thread descriptor from parent thread descriptor
990    error = thread_user_fork( parent_thread_xp,
991                              process,
992                              &thread );
993    if( error )
994    {
995        printk("\n[ERROR] in %s : cannot create thread in cluster %x\n",
996        __FUNCTION__, local_cxy ); 
997        process_free( process );
998        cluster_pid_release( new_pid );
999        return -1;
1000    }
[172]1001
[409]1002fork_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n", 
1003__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
[1]1004
[408]1005    // update parent process GPT to set Copy_On_Write for shared data vsegs
1006    // this includes all replicated GPT copies
1007    if( parent_process_cxy == local_cxy )   // reference is local
1008    {
1009        vmm_set_cow( parent_process_ptr );
1010    }
1011    else                                    // reference is remote
1012    {
1013        rpc_vmm_set_cow_client( parent_process_cxy,
1014                                parent_process_ptr );
1015    }
[1]1016
[409]1017fork_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n",
1018__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
[101]1019
[408]1020    // update children list in parent process
1021        xlist_add_last( XPTR( parent_process_cxy , &parent_process_ptr->children_root ),
1022                    XPTR( local_cxy , &process->brothers_list ) );
1023        hal_remote_atomic_add( XPTR( parent_process_cxy,
1024                                 &parent_process_ptr->children_nr), 1 );
[101]1025
[408]1026// vmm_display( process , true );
1027// vmm_display( parent_process_ptr , true );
1028// sched_display( 0 );
[204]1029
[408]1030    // return success
1031    *child_thread = thread;
1032    *child_pid    = new_pid;
[1]1033
[409]1034fork_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n",
1035__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
1036
[408]1037    return 0;
1038
[416]1039}   // end process_make_fork()
[408]1040
[409]1041
[408]1042/////////////////////////////////////////////////////
1043error_t process_make_exec( exec_info_t  * exec_info )
1044{
1045    char           * path;                    // pathname to .elf file
[416]1046    process_t      * old_process;             // local pointer on old process
1047    process_t      * new_process;             // local pointer on new process
1048    pid_t            old_pid;                 // old process identifier
1049    pid_t            new_pid;                 // new (temporary) process identifier
1050    thread_t       * old_thread;              // pointer on new thread
1051    thread_t       * new_thread;              // pointer on new thread
[408]1052    pthread_attr_t   attr;                    // main thread attributes
1053    lid_t            lid;                     // selected core local index
1054        error_t          error;
1055
1056        // get .elf pathname and PID from exec_info
[416]1057        path     = exec_info->path;
1058    old_pid  = exec_info->pid;
[408]1059
[416]1060    // this function must be executed by a thread running in owner cluster
1061    assert( (CXY_FROM_PID( old_pid ) == local_cxy), __FUNCTION__,
1062    "local cluster %x is not owner for process %x\n", local_cxy, old_pid );
[408]1063
1064exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / path = %s\n",
[416]1065__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, old_pid , path );
[408]1066
[416]1067    // get old process and thread local pointers
1068    old_process = (process_t *)cluster_get_local_process_from_pid( old_pid );
1069    old_thread  = CURRENT_THREAD;
[408]1070   
[416]1071    if( old_process == NULL )
1072    {
1073        printk("\n[ERROR] in %s : cannot get old process descriptor\n", __FUNCTION__ );
1074        return -1;
1075    }
[408]1076
1077    // allocate memory for new process descriptor
[416]1078    new_process = process_alloc();
[408]1079
[416]1080    if( new_process == NULL )
1081    {
1082        printk("\n[ERROR] in %s : cannot allocate new process descriptor\n", __FUNCTION__ );
1083        return -1;
1084    }
1085
1086    // get a (temporary) PID for new process
1087    error = cluster_pid_alloc( new_process , &new_pid );
1088
1089    if( error )
1090    {
1091        printk("\n[ERROR] in %s : cannot allocate a temporary PID\n", __FUNCTION__ );
1092        process_destroy( new_process );
1093        return -1;
1094    }
1095
[408]1096    // initialize new process descriptor
[416]1097    process_reference_init( new_process,
1098                            new_pid,                            // temporary PID
1099                            old_process->ppid,                  // same parent
1100                            XPTR( local_cxy , old_process ) );
[408]1101
1102exec_dmsg("\n[DBG] %s : core[%x,%d] created new process %x / path = %s\n",
[416]1103__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, new_pid, path );
[408]1104
1105    // register "code" and "data" vsegs as well as entry-point
1106    // in new process VMM, using information contained in the elf file.
[416]1107        if( elf_load_process( path , new_process ) )
[1]1108        {
[407]1109                printk("\n[ERROR] in %s : failed to access .elf file for process %x / path = %s\n",
[416]1110                __FUNCTION__, new_pid , path );
1111        cluster_pid_release( new_pid );
1112        process_destroy( new_process );
[408]1113        return -1;
[1]1114        }
1115
[409]1116exec_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",
1117__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path );
[1]1118
[408]1119    // select a core in local cluster to execute the main thread
[1]1120    lid  = cluster_select_local_core();
1121
1122    // initialize pthread attributes for main thread
[23]1123    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1124    attr.cxy        = local_cxy;
1125    attr.lid        = lid;
[1]1126
[172]1127    // create and initialize thread descriptor
[416]1128        error = thread_user_create( new_pid,
1129                                (void *)new_process->vmm.entry_point,
[23]1130                                exec_info->args_pointers,
[1]1131                                &attr,
[416]1132                                &new_thread );
[1]1133        if( error )
1134        {
1135                printk("\n[ERROR] in %s : cannot create thread for process %x / path = %s\n",
[416]1136            __FUNCTION__, new_pid , path );
1137        cluster_pid_release( new_pid );
1138        process_destroy( new_process );
[408]1139        return -1;
[1]1140        }
1141
[416]1142exec_dmsg("\n[DBG] %s : core[%x,%d] created main thread %x\n",
1143__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_thread->trdid );
[204]1144
[416]1145    // update children list rooted in parent process
1146        xlist_replace( XPTR( local_cxy , &old_process->brothers_list ) ,
1147                   XPTR( local_cxy , &new_process->brothers_list ) );
[101]1148
[416]1149    // request blocking for all threads in old process (but the calling thread)
1150    process_sigaction( old_process , BLOCK_ALL_THREADS );
[408]1151
[416]1152    // request destruction for all threads in old process (but the calling thread)
1153    process_sigaction( old_process , DELETE_ALL_THREADS );
1154
1155    // update PID for both processes
1156    new_process->pid = old_pid;
1157    old_process->pid = 0xFFFFFFFF;
1158
1159    // release temporary PID
1160    cluster_pid_release( new_pid );
1161   
[172]1162    // activate new thread
[416]1163        thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL );
[1]1164
[408]1165exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n",
[407]1166__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path  );
[204]1167
[416]1168    // set BLOCKED_GLOBAL bit
1169    thread_block( old_thread , THREAD_BLOCKED_GLOBAL );
[1]1170
[416]1171    // set REQ_DELETE flag
1172    hal_atomic_or( &old_thread->flags , THREAD_FLAG_REQ_DELETE );
[204]1173
[416]1174    // deschedule
1175    sched_yield("suicide after exec"); 
[409]1176
[416]1177    // never executed but required by compiler
[409]1178        return 0;
1179
1180}  // end process_make_exec()
1181
[416]1182///////////////////////////////////////
1183void process_make_kill( pid_t      pid,
1184                        uint32_t   sig_id )
[409]1185{
1186    // this function must be executed by a thread running in owner cluster
[416]1187    assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,
[409]1188    "must execute in owner cluster" );
1189
[416]1190kill_dmsg("\n[DBG] %s : core[%x,%d] enter / process %x / sig %d\n",
1191__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , sig_id );
1192
1193    // get pointer on local process descriptor
1194    process_t * process = process_get_local_copy( pid );
1195
1196    // does nothing if process does not exist
1197    if( process == NULL )
1198    {
1199        printk("\n[WARNING] %s : process %x does not exist => do nothing\n",
1200        __FUNCTION__ , pid );
1201        return;
1202    }
1203
[409]1204    // analyse signal type
1205    switch( sig_id )
1206    {
[416]1207        case SIGSTOP:     // block all threads in all clusters
[409]1208        {
1209            process_sigaction( process , BLOCK_ALL_THREADS );
1210        }
1211        break;
[416]1212        case SIGCONT:     // unblock all threads in all clusters
[409]1213        {
1214            process_sigaction( process , UNBLOCK_ALL_THREADS );
1215        }
1216        break;
1217        case SIGKILL:  // block all threads, then delete all threads
1218        {
[416]1219            // block all threads (but the calling thread)
[409]1220            process_sigaction( process , BLOCK_ALL_THREADS );
[416]1221
1222            // delete all threads (but the calling thread)
[409]1223            process_sigaction( process , DELETE_ALL_THREADS );
[416]1224
1225            // delete the calling thread if required
1226            thread_t * this = CURRENT_THREAD;
1227
1228            if( this->process == process )
1229            {
1230                // set REQ_DELETE flag
1231                hal_atomic_or( &this->flags , THREAD_FLAG_REQ_DELETE );
1232
1233                // deschedule
1234                sched_yield( "suicide after kill" ); 
1235            }
[409]1236        }
1237        break;
1238    }
[416]1239
1240//@@@
1241sched_display( 0 );
1242//@@@
1243
1244kill_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / sig %d \n",
1245__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , sig_id );
1246
[409]1247}  // end process_make_kill()
1248
[416]1249/////////////////////////////////////////
1250void process_make_exit( pid_t       pid,
[409]1251                        uint32_t    status )
1252{
1253    // this function must be executed by a thread running in owner cluster
[416]1254    assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,
[409]1255    "must execute in owner cluster" );
1256
[416]1257    // get pointer on local process descriptor
1258    process_t * process = process_get_local_copy( pid );
1259
1260    // does nothing if process does not exist
1261    if( process == NULL )
1262    {
1263        printk("\n[WARNING] %s : process %x does not exist => do nothing\n",
1264        __FUNCTION__ , pid );
1265        return;
1266    }
1267
1268    // block all threads in all clusters (but the calling thread)
[409]1269    process_sigaction( process , BLOCK_ALL_THREADS );
1270
[416]1271    // delete all threads in all clusters (but the calling thread)
[409]1272    process_sigaction( process , DELETE_ALL_THREADS );
1273
[416]1274    // delete the calling thread
1275    hal_atomic_or( &CURRENT_THREAD->flags , THREAD_FLAG_REQ_DELETE );
[409]1276
[416]1277    // deschedule
1278    sched_yield( "suicide after exit" ); 
1279
[409]1280}  // end process_make_exit()
1281
[1]1282//////////////////////////
1283void process_init_create()
1284{
[409]1285    process_t      * process;       // local pointer on process_init descriptor
1286    pid_t            pid;           // process_init identifier
1287    thread_t       * thread;        // local pointer on main thread
1288    pthread_attr_t   attr;          // main thread attributes
1289    lid_t            lid;           // selected core local index for main thread
1290    error_t          error;
[1]1291
[409]1292kinit_dmsg("\n[DBG] %s :  core[%x,%d] enters\n", 
1293__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );
[1]1294
[408]1295    // allocates memory for process descriptor from local cluster
1296        process = process_alloc(); 
1297        if( process == NULL )
1298    {
1299                printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n",
[409]1300                __FUNCTION__, local_cxy  );
[408]1301    }
[101]1302
[409]1303    // get PID from local cluster
[416]1304    error = cluster_pid_alloc( process , &pid );
[408]1305    if( error )
1306    {
1307                printk("\n[PANIC] in %s : cannot allocate PID in cluster %x\n",
1308                __FUNCTION__, local_cxy );
[409]1309        process_destroy( process );
[408]1310    }
1311
[409]1312    assert( (LPID_FROM_PID(pid) == 1) , __FUNCTION__ , "LPID must be 1 for process_init" );
1313
1314    // initialize process descriptor / parent is local process_zero
1315    process_reference_init( process,
[408]1316                            pid,
[409]1317                            0,
[408]1318                            XPTR( local_cxy , &process_zero ) );
1319
[409]1320kinit_dmsg("\n[DBG] %s : core[%x,%d] / process initialised\n", 
1321__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );
[1]1322
[409]1323    // register "code" and "data" vsegs as well as entry-point
1324    // in process VMM, using information contained in the elf file.
1325        if( elf_load_process( CONFIG_PROCESS_INIT_PATH , process ) )
1326        {
1327                printk("\n[PANIC] in %s : cannot access .elf file / path = %s\n",
1328                __FUNCTION__, CONFIG_PROCESS_INIT_PATH );
1329        process_destroy( process );
1330        }
[101]1331
[409]1332kinit_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",
1333__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, CONFIG_PROCESS_INIT_PATH );
1334
1335    // select a core in local cluster to execute the main thread
1336    lid  = cluster_select_local_core();
1337
1338    // initialize pthread attributes for main thread
1339    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
1340    attr.cxy        = local_cxy;
1341    attr.lid        = lid;
1342
1343    // create and initialize thread descriptor
1344        error = thread_user_create( pid,
1345                                (void *)process->vmm.entry_point,
1346                                NULL,
1347                                &attr,
1348                                &thread );
[408]1349        if( error )
[409]1350        {
1351                printk("\n[PANIC] in %s : cannot create main thread / path = %s\n",
1352                __FUNCTION__, CONFIG_PROCESS_INIT_PATH );
1353        process_destroy( process );
1354        }
[1]1355
[409]1356    // activate thread
1357        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
1358
[124]1359    hal_fence();
[1]1360
[409]1361kinit_dmsg("\n[DBG] %s : core[%x,%d] exit / main thread = %x\n",
1362__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, thread );
1363
[204]1364}  // end process_init_create()
1365
Note: See TracBrowser for help on using the repository browser.