source: trunk/kernel/kern/process.c @ 690

Last change on this file since 690 was 683, checked in by alain, 4 years ago

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

File size: 107.4 KB
Line 
1/*
2 * process.c - process related functions definition.
3 *
4 * Authors  Ghassan Almaless       (2008,2009,2010,2011,2012)
5 *          Mohamed Lamine Karaoui (2015)
6 *          Alain Greiner          (2016,2017,2018,2019,2020)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <kernel_config.h>
27#include <hal_kernel_types.h>
28#include <hal_remote.h>
29#include <hal_uspace.h>
30#include <hal_irqmask.h>
31#include <hal_vmm.h>
32#include <errno.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <bits.h>
36#include <kmem.h>
37#include <page.h>
38#include <vmm.h>
39#include <vfs.h>
40#include <core.h>
41#include <thread.h>
42#include <chdev.h>
43#include <ksocket.h>
44#include <list.h>
45#include <string.h>
46#include <scheduler.h>
47#include <busylock.h>
48#include <queuelock.h>
49#include <remote_queuelock.h>
50#include <rwlock.h>
51#include <remote_rwlock.h>
52#include <dqdt.h>
53#include <cluster.h>
54#include <ppm.h>
55#include <boot_info.h>
56#include <process.h>
57#include <elf.h>
58#include <syscalls.h>
59#include <shared_syscalls.h>
60
61//////////////////////////////////////////////////////////////////////////////////////////
62// Extern global variables
63//////////////////////////////////////////////////////////////////////////////////////////
64
65extern process_t           process_zero;     // allocated in kernel_init.c
66extern chdev_directory_t   chdev_dir;        // allocated in kernel_init.c
67
68//////////////////////////////////////////////////////////////////////////////////////////
69// Process initialisation related functions
70//////////////////////////////////////////////////////////////////////////////////////////
71
72////////////////////////////////////////////////////
73error_t process_reference_init( process_t * process,
74                                pid_t       pid,
75                                xptr_t      parent_xp )
76{
77    error_t     error;
78    xptr_t      process_xp;
79    cxy_t       parent_cxy;
80    process_t * parent_ptr;
81    xptr_t      stdin_xp;
82    xptr_t      stdout_xp;
83    xptr_t      stderr_xp;
84    uint32_t    stdin_id;
85    uint32_t    stdout_id;
86    uint32_t    stderr_id;
87    uint32_t    txt_id;
88    char        rx_path[40];
89    char        tx_path[40];
90    pid_t       parent_pid;
91    vmm_t     * vmm;
92
93#if DEBUG_PROCESS_REFERENCE_INIT || DEBUG_PROCESS_ERROR
94thread_t * this  = CURRENT_THREAD;
95uint32_t   cycle = (uint32_t)hal_get_cycles();
96#endif
97
98    // build extended pointer on reference process
99    process_xp = XPTR( local_cxy , process );
100
101    // get pointer on process vmm
102    vmm = &process->vmm;
103
104    // get parent process cluster and local pointer
105    parent_cxy = GET_CXY( parent_xp );
106    parent_ptr = GET_PTR( parent_xp );
107
108    // get parent_pid
109    parent_pid = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
110
111#if DEBUG_PROCESS_REFERENCE_INIT
112if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
113printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
114__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
115#endif
116
117    // initialize pid, ref_xp, parent_xp, owner_xp, term_state fields
118        process->pid        = pid;
119    process->ref_xp     = XPTR( local_cxy , process );
120    process->owner_xp   = XPTR( local_cxy , process );
121    process->parent_xp  = parent_xp;
122    process->term_state = 0;
123
124    // initialize VFS root inode and CWD inode
125    process->vfs_root_xp = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->vfs_root_xp ) );
126    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
127
128    // initialize VSL as empty
129    vmm->vsegs_nr = 0;
130        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
131
132    // create an empty GPT as required by the architecture
133    error = hal_gpt_create( &vmm->gpt );
134    if( error ) 
135    {
136
137#if DEBUG_PROCESS_ERROR
138printk("\n[ERROR] in %s : thread[%x,%x] cannot create empty GPT / cycle %d\n",
139__FUNCTION__, this->process->pid, this->trdid, cycle );
140#endif
141        return -1;
142    }
143
144#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
145if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
146printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
147__FUNCTION__, parent_pid, this->trdid, pid );
148#endif
149
150    // initialize VSL lock
151        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
152
153    // register kernel vsegs in user process VMM as required by the architecture
154    error = hal_vmm_kernel_update( process );
155    if( error ) 
156    {
157
158#if DEBUG_PROCESS_ERROR
159printk("\n[ERROR] in %s : thread[%x,%x] cannot register kernel vsegs in VMM / cycle %d\n",
160__FUNCTION__, this->process->pid, this->trdid, cycle );
161#endif
162        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
163        return -1;
164    }
165
166#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
167if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
168printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n",
169__FUNCTION__, parent_pid, this->trdid, pid );
170#endif
171
172    // create "args" and "envs" vsegs
173    // create "stacks" and "mmap" vsegs allocators
174    // initialize locks protecting GPT and VSL
175    error = vmm_user_init( process );
176    if( error ) 
177    {
178        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
179        return -1;
180    }
181 
182#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
183cycle = (uint32_t)hal_get_cycles();
184if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
185printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n", 
186__FUNCTION__, parent_pid, this->trdid, pid );
187#endif
188
189    // initialize fd_array as empty
190    process_fd_init( process );
191
192    // define the stdin/stdout/stderr pseudo files <=> select a TXT terminal.
193    if( (pid == 1) || (parent_pid  == 1) )      // INIT or KSH  process
194    {
195        // select a TXT channel
196        if( pid == 1 )  txt_id = 0;                     // INIT
197        else            txt_id = process_txt_alloc();   // KSH
198
199        // attach process to TXT
200        process_txt_attach( process_xp , txt_id ); 
201
202#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
203cycle = (uint32_t)hal_get_cycles();
204if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
205printk("\n[%s] thread[%x,%x] / process %x attached to TXT%d / cycle %d\n", 
206__FUNCTION__, parent_pid, this->trdid, pid, txt_id, cycle );
207#endif
208        // build path to TXT_RX[i] and TXT_TX[i] chdevs
209        snprintk( rx_path , 40 , "/dev/external/txt%d_rx", txt_id );
210        snprintk( tx_path , 40 , "/dev/external/txt%d_tx", txt_id );
211
212        // create stdin pseudo file         
213        error = vfs_open(  process->vfs_root_xp,
214                           rx_path,
215                           process_xp,
216                           O_RDONLY, 
217                           0,                // FIXME chmod
218                           &stdin_xp, 
219                           &stdin_id );
220        if( error )
221        {
222
223#if DEBUG_PROCESS_ERROR
224printk("\n[ERROR] in %s : thread[%x,%x] cannot open stdin pseudo file / cycle %d\n",
225__FUNCTION__, this->process->pid, this->trdid, cycle );
226#endif
227            return -1;
228        }
229
230assert( __FUNCTION__, (stdin_id == 0) , "stdin index must be 0" );
231
232#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
233cycle = (uint32_t)hal_get_cycles();
234if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
235printk("\n[%s] thread[%x,%x] / stdin open for process %x / cycle %d\n", 
236__FUNCTION__, parent_pid, this->trdid, pid, cycle );
237#endif
238
239        // create stdout pseudo file         
240        error = vfs_open(  process->vfs_root_xp,
241                           tx_path,
242                           process_xp,
243                           O_WRONLY, 
244                           0,                // FIXME chmod
245                           &stdout_xp, 
246                           &stdout_id );
247        if( error )
248        {
249
250#if DEBUG_PROCESS_ERROR
251printk("\n[ERROR] in %s : thread[%x,%x] cannot open stdout pseudo file / cycle %d\n",
252__FUNCTION__, this->process->pid, this->trdid, cycle );
253#endif
254            return -1;
255        }
256
257assert( __FUNCTION__, (stdout_id == 1) , "stdout index must be 1" );
258
259#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
260cycle = (uint32_t)hal_get_cycles();
261if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
262printk("\n[%s] thread[%x,%x] / stdout open for process %x / cycle %d\n", 
263__FUNCTION__, parent_pid, this->trdid, pid, cycle );
264#endif
265
266        // create stderr pseudo file         
267        error = vfs_open(  process->vfs_root_xp,
268                           tx_path,
269                           process_xp,
270                           O_WRONLY, 
271                           0,                // FIXME chmod
272                           &stderr_xp, 
273                           &stderr_id );
274        if( error )
275        {
276
277#if DEBUG_PROCESS_ERROR
278printk("\n[ERROR] in %s : thread[%x,%x] cannot open stderr pseudo file / cycle %d\n",
279__FUNCTION__, this->process->pid, this->trdid, cycle );
280#endif
281            return -1;
282        }
283
284assert( __FUNCTION__, (stderr_id == 2) , "stderr index must be 2" );
285
286#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
287cycle = (uint32_t)hal_get_cycles();
288if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
289printk("\n[%s] thread[%x,%x] / stderr open for process %x / cycle %d\n", 
290__FUNCTION__, parent_pid, this->trdid, pid, cycle );
291#endif
292
293    }
294    else                                            // normal user process
295    {
296        // get parent process TXT index
297        txt_id = process_txt_get_index( parent_xp );
298
299        // attach child process to same TXT terminal as parent
300        process_txt_attach( process_xp , txt_id ); 
301
302        // recreate all open files from parent process fd_array to child process fd_array
303        error = process_fd_replicate( process_xp , parent_xp );
304
305        if( error )
306        {
307
308#if DEBUG_PROCESS_ERROR
309printk("\n[ERROR] in %s : thread[%x,%x] cannot replicate fd_array / cycle %d\n",
310__FUNCTION__, this->process->pid, this->trdid, cycle );
311#endif
312            return -1;
313        }
314
315    }
316
317    // initialize lock protecting CWD changes
318    remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
319
320#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
321cycle = (uint32_t)hal_get_cycles();
322if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
323printk("\n[%s] thread[%x,%x] / set fd_array for process %x / cycle %d\n", 
324__FUNCTION__, parent_pid, this->trdid, pid , cycle );
325#endif
326
327    // reset children list root
328    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
329    process->children_nr     = 0;
330    remote_queuelock_init( XPTR( local_cxy,
331                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
332
333    // reset semaphore / mutex / barrier / condvar list roots and lock
334    xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
335    xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
336    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
337    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
338    remote_queuelock_init( XPTR( local_cxy , 
339                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
340
341    // reset open directories root and lock
342    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
343    remote_queuelock_init( XPTR( local_cxy , 
344                                 &process->dir_lock ), LOCK_PROCESS_DIR );
345
346    // register new process in the local cluster manager pref_tbl[]
347    lpid_t lpid = LPID_FROM_PID( pid );
348    LOCAL_CLUSTER->pmgr.pref_tbl[lpid] = XPTR( local_cxy , process );
349
350    // register new process descriptor in local cluster manager local_list
351    cluster_process_local_link( process );
352
353    // register new process descriptor in local cluster manager copies_list
354    cluster_process_copies_link( process );
355
356    // initialize th_tbl[] array and associated threads
357    uint32_t i;
358
359    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
360        {
361        process->th_tbl[i] = NULL;
362    }
363    process->th_nr  = 0;
364    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
365
366        hal_fence();
367
368#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
369cycle = (uint32_t)hal_get_cycles();
370if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
371printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 
372__FUNCTION__, parent_pid, this->trdid, pid, cycle );
373#endif
374
375#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
376hal_vmm_display( parent_xp , false );
377hal_vmm_display( XPTR( local_cxy , process ) , false );
378#endif
379
380    return 0;
381
382}  // process_reference_init()
383
384/////////////////////////////////////////////////////
385error_t process_copy_init( process_t * local_process,
386                           xptr_t      reference_process_xp )
387{
388    error_t   error;
389    vmm_t   * vmm;
390
391#if DEBUG_PROCESS_COPY_INIT || DEBUG_PROCESS_ERROR
392thread_t * this = CURRENT_THREAD; 
393uint32_t cycle = (uint32_t)hal_get_cycles();
394#endif
395
396    // get reference process cluster and local pointer
397    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
398    process_t * ref_ptr = GET_PTR( reference_process_xp );
399
400    // get pointer on process vmm
401    vmm = &local_process->vmm;
402
403    // initialize PID, REF_XP, PARENT_XP, and STATE
404    local_process->pid        = hal_remote_l32(  XPTR( ref_cxy , &ref_ptr->pid ) );
405    local_process->parent_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->parent_xp ) );
406    local_process->ref_xp     = reference_process_xp;
407    local_process->owner_xp   = reference_process_xp;
408    local_process->term_state = 0;
409
410#if DEBUG_PROCESS_COPY_INIT
411if( DEBUG_PROCESS_COPY_INIT < cycle )
412printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
413__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
414#endif
415
416// check user process
417assert( __FUNCTION__, (local_process->pid != 0), "LPID cannot be 0" );
418
419    // initialize VSL as empty
420    vmm->vsegs_nr = 0;
421        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
422
423    // create an empty GPT as required by the architecture
424    error = hal_gpt_create( &vmm->gpt );
425
426    if( error ) 
427    {
428
429#if DEBUG_PROCESS_ERROR
430printk("\n[ERROR] in %s : thread[%x,%x] cannot create empty GPT / cycle %d\n",
431__FUNCTION__, this->process->pid, this->trdid, cycle );
432#endif
433        return -1;
434    }
435
436    // initialize GPT and VSL locks
437        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
438
439    // register kernel vsegs in VMM as required by the architecture
440    error = hal_vmm_kernel_update( local_process );
441
442    if( error ) 
443    {
444
445#if DEBUG_PROCESS_ERROR
446printk("\n[ERROR] in %s : thread[%x,%x] cannot register kernel vsegs in VMM / cycle %d\n",
447__FUNCTION__, this->process->pid, this->trdid, cycle );
448#endif
449        return -1;
450    }
451
452    // create "args" and "envs" vsegs
453    // create "stacks" and "mmap" vsegs allocators
454    // initialize locks protecting GPT and VSL
455    error = vmm_user_init( local_process );
456
457    if( error ) 
458    {
459
460#if DEBUG_PROCESS_ERROR
461printk("\n[ERROR] in %s : thread[%x,%x] cannot register user vsegs in VMM / cycle %d\n",
462__FUNCTION__, this->process->pid, this->trdid, cycle );
463#endif
464        return -1;
465    }
466 
467#if (DEBUG_PROCESS_COPY_INIT & 1)
468cycle = (uint32_t)hal_get_cycles();
469if( DEBUG_PROCESS_COPY_INIT < cycle )
470printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n", 
471__FUNCTION__, parent_pid, this->trdid, pid, cycle );
472#endif
473
474    // set process file descriptors array
475        process_fd_init( local_process );
476
477    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
478    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
479    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
480    local_process->cwd_xp      = XPTR_NULL;
481
482    // reset children list root (not used in a process descriptor copy)
483    xlist_root_init( XPTR( local_cxy , &local_process->children_root ) );
484    local_process->children_nr   = 0;
485    remote_queuelock_init( XPTR( local_cxy , &local_process->children_lock ),
486                           LOCK_PROCESS_CHILDREN );
487
488    // reset children_list (not used in a process descriptor copy)
489    xlist_entry_init( XPTR( local_cxy , &local_process->children_list ) );
490
491    // reset semaphores list root (not used in a process descriptor copy)
492    xlist_root_init( XPTR( local_cxy , &local_process->sem_root ) );
493    xlist_root_init( XPTR( local_cxy , &local_process->mutex_root ) );
494    xlist_root_init( XPTR( local_cxy , &local_process->barrier_root ) );
495    xlist_root_init( XPTR( local_cxy , &local_process->condvar_root ) );
496
497    // initialize th_tbl[] array and associated fields
498    uint32_t i;
499    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
500        {
501        local_process->th_tbl[i] = NULL;
502    }
503    local_process->th_nr  = 0;
504    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
505
506    // register new process descriptor in local cluster manager local_list
507    cluster_process_local_link( local_process );
508
509    // register new process descriptor in owner cluster manager copies_list
510    cluster_process_copies_link( local_process );
511
512        hal_fence();
513
514#if DEBUG_PROCESS_COPY_INIT
515cycle = (uint32_t)hal_get_cycles();
516if( DEBUG_PROCESS_COPY_INIT < cycle )
517printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
518__FUNCTION__, this->process->pid, this->trdid, local_process->pid, cycle );
519#endif
520
521    return 0;
522
523} // end process_copy_init()
524
525///////////////////////////////////////////
526void process_destroy( process_t * process )
527{
528    xptr_t      parent_xp;
529    process_t * parent_ptr;
530    cxy_t       parent_cxy;
531    xptr_t      children_lock_xp;
532    xptr_t      children_nr_xp;
533
534    pid_t       pid = process->pid;
535
536// check no more threads
537assert( __FUNCTION__, (process->th_nr == 0),
538"process %x in cluster %x contains threads", pid , local_cxy );
539
540#if DEBUG_PROCESS_DESTROY
541thread_t * this = CURRENT_THREAD;
542uint32_t cycle = (uint32_t)hal_get_cycles();
543if( DEBUG_PROCESS_DESTROY < cycle )
544printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
545__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
546#endif
547
548    // Destroy VMM
549    vmm_destroy( process );
550
551#if (DEBUG_PROCESS_DESTROY & 1)
552if( DEBUG_PROCESS_DESTROY < cycle )
553printk("\n[%s] thread[%x,%x] destroyed VMM for process %x in cluster %x\n",
554__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
555#endif
556
557    // remove process from local_list in local cluster manager
558    cluster_process_local_unlink( process );
559
560#if (DEBUG_PROCESS_DESTROY & 1)
561if( DEBUG_PROCESS_DESTROY < cycle )
562printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from local list\n",
563__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
564#endif
565
566    // remove process from copies_list in owner cluster manager
567    cluster_process_copies_unlink( process );
568
569#if (DEBUG_PROCESS_DESTROY & 1)
570if( DEBUG_PROCESS_DESTROY < cycle )
571printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from copies list\n",
572__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
573#endif
574
575    // when target process cluster is the owner cluster
576    // - remove process from TXT list and transfer ownership
577    // - remove process from children_list
578    // - release PID
579    if( CXY_FROM_PID( pid ) == local_cxy )
580    {
581        process_txt_detach( XPTR( local_cxy , process ) );
582
583#if (DEBUG_PROCESS_DESTROY & 1)
584if( DEBUG_PROCESS_DESTROY < cycle )
585printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
586__FUNCTION__, this->process->pid, this->trdid, pid );
587#endif
588
589        // get pointers on parent process
590        parent_xp  = process->parent_xp;
591        parent_cxy = GET_CXY( parent_xp );
592        parent_ptr = GET_PTR( parent_xp );
593
594        // get extended pointer on children_lock in parent process
595        children_lock_xp = XPTR( parent_cxy , &parent_ptr->children_lock );
596        children_nr_xp   = XPTR( parent_cxy , &parent_ptr->children_nr );
597
598        // remove process from children_list
599        remote_queuelock_acquire( children_lock_xp );
600        xlist_unlink( XPTR( local_cxy , &process->children_list ) );
601            hal_remote_atomic_add( children_nr_xp , -1 );
602        remote_queuelock_release( children_lock_xp );
603
604#if (DEBUG_PROCESS_DESTROY & 1)
605if( DEBUG_PROCESS_DESTROY < cycle )
606printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
607__FUNCTION__, this->process->pid, this->trdid, pid );
608#endif
609
610        // release the process PID to cluster manager
611        cluster_pid_release( pid );
612
613#if (DEBUG_PROCESS_DESTROY & 1)
614if( DEBUG_PROCESS_DESTROY < cycle )
615printk("\n[%s] thread[%x,%x] released process PID %x to pmgr in cluster %x\n",
616__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
617#endif
618
619    }
620
621    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
622
623    // FIXME close all open files [AG]
624
625    // FIXME synchronize dirty files [AG]
626
627    // release memory allocated to process descriptor
628        kmem_free( process , bits_log2(sizeof(process_t)) );
629
630#if DEBUG_PROCESS_DESTROY
631cycle = (uint32_t)hal_get_cycles();
632if( DEBUG_PROCESS_DESTROY < cycle )
633printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
634__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
635#endif
636
637}  // end process_destroy()
638
639///////////////////////////////////////////////////////////////////
640const char * process_action_str( process_sigactions_t action_type )
641{
642    switch ( action_type )
643    {
644        case BLOCK_ALL_THREADS:   return "BLOCK";
645        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
646        case DELETE_ALL_THREADS:  return "DELETE";
647        default:                  return "undefined";
648    }
649}
650
651////////////////////////////////////////
652void process_sigaction( pid_t       pid,
653                        uint32_t    type )
654{
655    cxy_t              owner_cxy;         // owner cluster identifier
656    lpid_t             lpid;              // process index in owner cluster
657    cluster_t        * cluster;           // pointer on cluster manager
658    xptr_t             root_xp;           // extended pointer on root of copies
659    xptr_t             lock_xp;           // extended pointer on lock protecting copies
660    xptr_t             iter_xp;           // iterator on copies list
661    xptr_t             process_xp;        // extended pointer on process copy
662    cxy_t              process_cxy;       // process copy cluster identifier
663    process_t        * process_ptr;       // local pointer on process copy
664    reg_t              save_sr;           // for critical section
665    thread_t         * client;            // pointer on client thread
666    xptr_t             client_xp;         // extended pointer on client thread
667    process_t        * local;             // pointer on process copy in local cluster
668    uint32_t           remote_nr;         // number of remote process copies
669    rpc_desc_t         rpc;               // shared RPC descriptor
670    uint32_t           responses;         // shared RPC responses counter
671
672    client    = CURRENT_THREAD;
673    client_xp = XPTR( local_cxy , client );
674    local     = NULL;
675    remote_nr = 0;
676
677    // check calling thread can yield
678    thread_assert_can_yield( client , __FUNCTION__ );
679
680#if DEBUG_PROCESS_SIGACTION
681uint32_t cycle = (uint32_t)hal_get_cycles();
682if( DEBUG_PROCESS_SIGACTION < cycle )
683printk("\n[%s] thread[%x,%x] enter to %s process %x / cycle %d\n",
684__FUNCTION__ , client->process->pid, client->trdid,
685process_action_str( type ) , pid , cycle );
686#endif
687
688    // get pointer on local cluster manager
689    cluster = LOCAL_CLUSTER;
690
691    // get owner cluster identifier and process lpid
692    owner_cxy = CXY_FROM_PID( pid );
693    lpid      = LPID_FROM_PID( pid );
694
695    // get root of list of copies and lock from owner cluster
696    root_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
697    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
698
699// check action type
700assert( __FUNCTION__, ((type == DELETE_ALL_THREADS ) ||
701         (type == BLOCK_ALL_THREADS )  ||
702         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
703             
704    // This client thread send parallel RPCs to all remote clusters containing
705    // target process copies, wait all responses, and then handles directly
706    // the threads in local cluster, when required.
707    // The client thread allocates a - shared - RPC descriptor in the stack,
708    // because all parallel, non-blocking, server threads use the same input
709    // arguments, and use the shared RPC response field
710
711    // mask IRQs
712    hal_disable_irq( &save_sr);
713
714    // client thread blocks itself
715    thread_block( client_xp , THREAD_BLOCKED_RPC );
716
717    // initialize RPC responses counter
718    responses = 0;
719
720    // initialize shared RPC descriptor
721    // can be shared, because no out arguments
722    rpc.rsp       = &responses;
723    rpc.blocking  = false;
724    rpc.index     = RPC_PROCESS_SIGACTION;
725    rpc.thread    = client;
726    rpc.lid       = client->core->lid;
727    rpc.args[0]   = pid;
728    rpc.args[1]   = type;
729
730    // take the lock protecting process copies
731    remote_queuelock_acquire( lock_xp );
732
733    // scan list of process copies
734    XLIST_FOREACH( root_xp , iter_xp )
735    {
736        // get extended pointers and cluster on process
737        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
738        process_cxy = GET_CXY( process_xp );
739        process_ptr = GET_PTR( process_xp );
740
741        if( process_cxy == local_cxy )    // process copy is local
742        { 
743            local = process_ptr;
744        }
745        else                              // process copy is remote
746        {
747            // update number of remote process copies
748            remote_nr++;
749
750            // atomically increment RPC responses counter
751            hal_atomic_add( &responses , 1 );
752
753#if DEBUG_PROCESS_SIGACTION
754if( DEBUG_PROCESS_SIGACTION < cycle )
755printk("\n[%s] thread[%x,%x] send RPC to cluster %x for process %x\n",
756__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
757#endif
758            // call RPC in target cluster
759            rpc_send( process_cxy , &rpc );
760        }
761    }  // end list of copies
762
763    // release the lock protecting process copies
764    remote_queuelock_release( lock_xp );
765
766    // restore IRQs
767    hal_restore_irq( save_sr);
768
769    // - if there is remote process copies, the client thread deschedules,
770    //   (it will be unblocked by the last RPC server thread).
771    // - if there is no remote copies, the client thread unblock itself.
772    if( remote_nr )
773    {
774        sched_yield("blocked on rpc_process_sigaction");
775    } 
776    else
777    {
778        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
779    }
780
781    // handle the local process copy if required
782    if( local != NULL )
783    {
784
785#if DEBUG_PROCESS_SIGACTION
786if( DEBUG_PROCESS_SIGACTION < cycle )
787printk("\n[%s] thread[%x,%x] handles local process %x in cluster %x\n",
788__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
789#endif
790        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp ); 
791        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local ); 
792        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
793    }
794
795#if DEBUG_PROCESS_SIGACTION
796cycle = (uint32_t)hal_get_cycles();
797if( DEBUG_PROCESS_SIGACTION < cycle )
798printk("\n[%s] thread[%x,%x] exit after %s process %x / cycle %d\n",
799__FUNCTION__, client->process->pid, client->trdid,
800process_action_str( type ), pid, cycle );
801#endif
802
803}  // end process_sigaction()
804
805/////////////////////////////////////////////////
806void process_block_threads( process_t * process )
807{
808    thread_t          * target;         // pointer on target thread
809    thread_t          * this;           // pointer on calling thread
810    uint32_t            ltid;           // index in process th_tbl[]
811    uint32_t            count;          // requests counter
812    volatile uint32_t   ack_count;      // acknowledges counter
813
814    // get calling thread pointer
815    this = CURRENT_THREAD;
816
817#if DEBUG_PROCESS_SIGACTION
818pid_t pid = process->pid;
819uint32_t cycle = (uint32_t)hal_get_cycles();
820if( DEBUG_PROCESS_SIGACTION < cycle )
821printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
822__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
823#endif
824
825// check target process is an user process
826assert( __FUNCTION__, (LPID_FROM_PID( process->pid ) != 0 ),
827"process %x is not an user process\n", process->pid );
828
829    // get lock protecting process th_tbl[]
830    rwlock_rd_acquire( &process->th_lock );
831
832    // loop on target process local threads
833    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
834    // - if the calling thread and the target thread are not running on the same
835    //   core, we ask the target scheduler to acknowlege the blocking
836    //   to be sure that the target thread is not running.
837    // - if the calling thread and the target thread are running on the same core,
838    //   we don't need confirmation from scheduler.
839           
840    for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ )
841    {
842        target = process->th_tbl[ltid];
843
844        if( target != NULL )                                 // thread exist
845        {
846            count++;
847
848            // set the global blocked bit in target thread descriptor.
849            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
850 
851            if( this->core->lid != target->core->lid )
852            {
853                // increment responses counter
854                hal_atomic_add( (void*)&ack_count , 1 );
855
856                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
857                thread_set_req_ack( target , (uint32_t *)&ack_count );
858
859                // force scheduling on target thread
860                dev_pic_send_ipi( local_cxy , target->core->lid );
861            }
862        }
863    }
864
865    // release lock protecting process th_tbl[]
866    rwlock_rd_release( &process->th_lock );
867
868    // wait other threads acknowledges  TODO this could be improved...
869    while( 1 )
870    {
871        // exit when all scheduler acknowledges received
872        if ( ack_count == 0 ) break;
873   
874        // wait 1000 cycles before retry
875        hal_fixed_delay( 1000 );
876    }
877
878#if DEBUG_PROCESS_SIGACTION
879cycle = (uint32_t)hal_get_cycles();
880if( DEBUG_PROCESS_SIGACTION < cycle )
881printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
882__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
883#endif
884
885}  // end process_block_threads()
886
887/////////////////////////////////////////////////
888void process_delete_threads( process_t * process,
889                             xptr_t      client_xp )
890{
891    thread_t          * target;        // local pointer on target thread
892    xptr_t              target_xp;     // extended pointer on target thread
893    cxy_t               owner_cxy;     // owner process cluster
894    uint32_t            ltid;          // index in process th_tbl
895    uint32_t            count;         // threads counter
896
897    // get calling thread pointer
898
899    // get target process owner cluster
900    owner_cxy = CXY_FROM_PID( process->pid );
901
902#if DEBUG_PROCESS_SIGACTION
903thread_t * this  = CURRENT_THREAD;
904uint32_t   cycle = (uint32_t)hal_get_cycles();
905if( DEBUG_PROCESS_SIGACTION < cycle )
906printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
907__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
908#endif
909
910// check target process is an user process
911assert( __FUNCTION__, (LPID_FROM_PID( process->pid ) != 0),
912"process %x is not an user process\n", process->pid );
913
914    // get lock protecting process th_tbl[]
915    rwlock_wr_acquire( &process->th_lock );
916
917    // loop on target process local threads                       
918    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
919    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
920    {
921        target = process->th_tbl[ltid];
922
923        if( target != NULL )    // valid thread 
924        {
925            count++;
926            target_xp = XPTR( local_cxy , target );
927
928            // main thread and client thread should not be deleted
929            if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
930                (client_xp) != target_xp )                           // not client thread
931            {
932                // mark target thread for delete and block it
933                thread_delete_request( target_xp , true );                   // forced
934            }
935        }
936    }
937
938    // release lock protecting process th_tbl[]
939    rwlock_wr_release( &process->th_lock );
940
941#if DEBUG_PROCESS_SIGACTION
942cycle = (uint32_t)hal_get_cycles();
943if( DEBUG_PROCESS_SIGACTION < cycle )
944printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
945__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
946#endif
947
948}  // end process_delete_threads()
949
950///////////////////////////////////////////////////
951void process_unblock_threads( process_t * process )
952{
953    thread_t          * target;        // pointer on target thead
954    uint32_t            ltid;          // index in process th_tbl
955    uint32_t            count;         // requests counter
956
957#if DEBUG_PROCESS_SIGACTION
958thread_t * this  = CURRENT_THREAD;
959pid_t      pid   = process->pid;
960uint32_t   cycle = (uint32_t)hal_get_cycles();
961if( DEBUG_PROCESS_SIGACTION < cycle )
962printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
963__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
964#endif
965
966// check target process is an user process
967assert( __FUNCTION__, ( LPID_FROM_PID( process->pid ) != 0 ),
968"process %x is not an user process\n", process->pid );
969
970    // get lock protecting process th_tbl[]
971    rwlock_rd_acquire( &process->th_lock );
972
973    // loop on process threads to unblock all threads
974    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
975    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
976    {
977        target = process->th_tbl[ltid];
978
979        if( target != NULL )             // thread found
980        {
981            count++;
982
983            // reset the global blocked bit in target thread descriptor.
984            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
985        }
986    }
987
988    // release lock protecting process th_tbl[]
989    rwlock_rd_release( &process->th_lock );
990
991#if DEBUG_PROCESS_SIGACTION
992cycle = (uint32_t)hal_get_cycles();
993if( DEBUG_PROCESS_SIGACTION < cycle )
994printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
995__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
996#endif
997
998}  // end process_unblock_threads()
999
1000///////////////////////////////////////////////
1001process_t * process_get_local_copy( pid_t pid )
1002{
1003    error_t        error;
1004    process_t    * process;       // local pointer on process
1005    xptr_t         process_xp;    // extended pointer on process
1006
1007#if DEBUG_PROCESS_GET_LOCAL_COPY || DEBUG_PROCESS_ERROR
1008thread_t * this  = CURRENT_THREAD;
1009uint32_t   cycle = (uint32_t)hal_get_cycles();
1010#endif
1011
1012    cluster_t * cluster = LOCAL_CLUSTER;
1013
1014#if DEBUG_PROCESS_GET_LOCAL_COPY
1015if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
1016printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
1017__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
1018#endif
1019
1020    // get lock protecting local list of processes
1021    remote_queuelock_acquire( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
1022
1023    // scan the local list of process descriptors to find the process
1024    xptr_t  iter;
1025    bool_t  found = false;
1026    XLIST_FOREACH( XPTR( local_cxy , &cluster->pmgr.local_root ) , iter )
1027    {
1028        process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
1029        process     = GET_PTR( process_xp );
1030        if( process->pid == pid )
1031        {
1032            found = true;
1033            break;
1034        }
1035    }
1036
1037    // release lock protecting local list of processes
1038    remote_queuelock_release( XPTR( local_cxy , &cluster->pmgr.local_lock ) );
1039
1040    // allocate memory for a new local process descriptor
1041    // and initialise it from reference cluster if not found
1042    if( !found )
1043    {
1044        // get extended pointer on reference process descriptor
1045        xptr_t ref_xp = cluster_get_reference_process_from_pid( pid );
1046
1047        assert( __FUNCTION__, (ref_xp != XPTR_NULL) , "illegal pid\n" );
1048
1049        // allocate memory for local process descriptor
1050        process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
1051
1052        if( process == NULL )  return NULL;
1053
1054        // initialize local process descriptor copy
1055        error = process_copy_init( process , ref_xp );
1056
1057        if( error )
1058        {
1059
1060#if DEBUG_PROCESS_ERROR
1061printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize local process copy / cycle %d\n",
1062__FUNCTION__, this->process->pid, this->trdid, cycle );
1063#endif
1064            return NULL;
1065        }
1066    }
1067
1068#if DEBUG_PROCESS_GET_LOCAL_COPY
1069cycle = (uint32_t)hal_get_cycles();
1070if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
1071printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
1072__FUNCTION__, this->process->pid, this->trdid, local_cxy, process, cycle );
1073#endif
1074
1075    return process;
1076
1077}  // end process_get_local_copy()
1078
1079////////////////////////////////////////////
1080pid_t process_get_ppid( xptr_t  process_xp )
1081{
1082    cxy_t       process_cxy;
1083    process_t * process_ptr;
1084    xptr_t      parent_xp;
1085    cxy_t       parent_cxy;
1086    process_t * parent_ptr;
1087
1088    // get process cluster and local pointer
1089    process_cxy = GET_CXY( process_xp );
1090    process_ptr = GET_PTR( process_xp );
1091
1092    // get pointers on parent process
1093    parent_xp  = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
1094    parent_cxy = GET_CXY( parent_xp );
1095    parent_ptr = GET_PTR( parent_xp );
1096
1097    return hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
1098}
1099
1100//////////////////////////////////////////////////////////////////////////////////////////
1101// File descriptor array related functions
1102//////////////////////////////////////////////////////////////////////////////////////////
1103
1104///////////////////////////////////////////
1105char * process_fd_type_str( uint32_t type )
1106{
1107    switch( type )
1108    {
1109        case FILE_TYPE_REG : return "FILE";
1110        case FILE_TYPE_DIR  : return "DIR";
1111        case FILE_TYPE_FIFO : return "FIFO";
1112        case FILE_TYPE_PIPE : return "PIPE";
1113        case FILE_TYPE_SOCK : return "SOCK";
1114        case FILE_TYPE_DEV  : return "DEV";
1115        case FILE_TYPE_BLK  : return "BLK";
1116        case FILE_TYPE_SYML : return "SYML";
1117       
1118        default              : return "undefined";
1119    }
1120}
1121   
1122///////////////////////////////////////////
1123void process_fd_init( process_t * process )
1124{
1125    uint32_t fd;
1126
1127    // initialize lock
1128    remote_queuelock_init( XPTR( local_cxy , &process->fd_array.lock ), LOCK_PROCESS_FDARRAY );
1129
1130    // initialize number of open files
1131    process->fd_array.max = 0;
1132
1133    // initialize array
1134    for ( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
1135    {
1136        process->fd_array.array[fd] = XPTR_NULL;
1137    }
1138}
1139
1140////////////////////////////////////////////////////
1141error_t process_fd_register( xptr_t      process_xp,
1142                             xptr_t      file_xp,
1143                             uint32_t  * fdid )
1144{
1145    bool_t    found;
1146    uint32_t  id;
1147    uint32_t  max;             // current value of max non-free slot index
1148    xptr_t    entry_xp;        // current value of one fd_array entry
1149    xptr_t    lock_xp;         // extended pointer on lock protecting fd_array
1150    xptr_t    max_xp;          // extended pointer on max field in fd_array
1151
1152#if DEBUG_PROCESS_FD_REGISTER
1153thread_t * this  = CURRENT_THREAD;
1154uint32_t   cycle = (uint32_t)hal_get_cycles();
1155#endif
1156
1157    // get target process cluster and local pointer
1158    process_t * process_ptr = GET_PTR( process_xp );
1159    cxy_t       process_cxy = GET_CXY( process_xp );
1160
1161// check target process is owner process
1162assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
1163"process must be owner process\n" );
1164
1165#if DEBUG_PROCESS_FD_REGISTER
1166pid_t  tgt_pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1167if( DEBUG_PROCESS_FD_REGISTER < cycle )
1168printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
1169__FUNCTION__, this->process->pid, this->trdid, tgt_pid, cycle );
1170#endif
1171
1172    // build extended pointers on lock & max
1173    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1174    max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
1175
1176    // take lock protecting fd_array
1177        remote_queuelock_acquire( lock_xp );
1178
1179    found   = false;
1180
1181    // get current value of max_fdid
1182    max = hal_remote_l32( max_xp );
1183
1184    for ( id = 0; id < CONFIG_PROCESS_FILE_MAX_NR ; id++ )
1185    {
1186        // get fd_array entry
1187        entry_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) );
1188       
1189        // take the first empty slot
1190        if ( entry_xp == XPTR_NULL )
1191        {
1192            // update  fd_array
1193            hal_remote_s64( XPTR( process_cxy , &process_ptr->fd_array.array[id] ) , file_xp );
1194
1195            // update max when required
1196            if( id > max ) hal_remote_s32( max_xp , id );
1197
1198            // exit loop
1199                        *fdid = id;
1200            found = true;
1201            break;
1202        }
1203    }
1204
1205    // release lock protecting fd_array
1206        remote_queuelock_release( lock_xp );
1207
1208#if DEBUG_PROCESS_FD_REGISTER
1209cycle = (uint32_t)hal_get_cycles();
1210if( DEBUG_PROCESS_FD_REGISTER < cycle )
1211printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n",
1212__FUNCTION__, this->process->pid, this->trdid, tgt_pid, id, cycle );
1213#endif
1214
1215    if ( !found ) return -1;
1216    else          return 0;
1217
1218}  // end process_fd_register()
1219
1220/////////////////////////////////////////////
1221void process_fd_remove( xptr_t    process_xp,
1222                        uint32_t  fdid )
1223{
1224    pid_t       pid;           // target process PID
1225    lpid_t      lpid;          // target process LPID
1226    xptr_t      file_xp;       // extended pointer on file descriptor
1227    xptr_t      iter_xp;       // iterator for list of process copies
1228    xptr_t      copy_xp;       // extended pointer on process copy
1229    process_t * copy_ptr;      // local pointer on process copy 
1230    cxy_t       copy_cxy;      // process copy cluster identifier
1231
1232    // get target process cluster and local pointer
1233    process_t * process_ptr = GET_PTR( process_xp );
1234    cxy_t       process_cxy = GET_CXY( process_xp );
1235
1236// check target process is owner process
1237assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) ) ),
1238"process must be owner process\n" );
1239
1240    // get target process pid and lpid
1241    pid  = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) );
1242    lpid = LPID_FROM_PID( pid );
1243
1244#if DEBUG_PROCESS_FD_REMOVE
1245uint32_t    cycle = (uint32_t)hal_get_cycles();
1246thread_t  * this  = CURRENT_THREAD;
1247if( DEBUG_PROCESS_FD_REMOVE < cycle )
1248printk("\n[%s] thread[%x,%x] enter for fdid %d in process %x / cycle %d\n",
1249__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1250#endif
1251
1252    // get extended pointer on file descriptor
1253    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1254
1255    // build extended pointers on list_of_copies root and lock (in owner cluster)
1256    xptr_t copies_root_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_root[lpid] );
1257    xptr_t copies_lock_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_lock[lpid] );
1258 
1259    // build extended pointer on fd_array lock and max
1260    xptr_t fd_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1261    xptr_t fd_max_xp  = XPTR( process_cxy , &process_ptr->fd_array.max );
1262
1263    // take lock protecting fd_array
1264        remote_queuelock_acquire( fd_lock_xp );
1265
1266    // take the lock protecting the list of copies
1267    remote_queuelock_acquire( copies_lock_xp );
1268
1269    // get max value
1270    uint32_t max = hal_remote_l32( fd_max_xp );
1271
1272    // loop on list of process copies
1273    XLIST_FOREACH( copies_root_xp , iter_xp )
1274    {
1275        // get pointers on process copy
1276        copy_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
1277        copy_ptr = GET_PTR( copy_xp );
1278        copy_cxy = GET_CXY( copy_xp );
1279
1280        // release the fd_array entry in process copy
1281        hal_remote_s64( XPTR( copy_cxy , &copy_ptr->fd_array.array[fdid] ), XPTR_NULL );
1282    }
1283
1284    // update max when required
1285    if( fdid == max ) hal_remote_s32( fd_max_xp , max-1 );
1286
1287    // release the lock protecting fd_array
1288        remote_queuelock_release( fd_lock_xp );
1289
1290    // release the lock protecting the list of copies
1291    remote_queuelock_release( copies_lock_xp );
1292
1293#if DEBUG_PROCESS_FD_REMOVE
1294cycle = (uint32_t)hal_get_cycles();
1295if( DEBUG_PROCESS_FD_REMOVE < cycle )
1296printk("\n[%s] thread[%x,%x] exit for fdid %d in process %x / cycle %d\n",
1297__FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle );
1298#endif
1299
1300}  // end process_fd_remove()
1301
1302//////////////////////////////////////////////
1303void process_fd_clean_all( xptr_t process_xp )
1304{
1305    uint32_t  fdid;
1306    xptr_t    file_xp;         // one fd_array entry
1307    xptr_t    lock_xp;         // extendad pointer on lock protecting fd_array
1308    uint32_t  max;             // number of registered files
1309
1310    // get process cluster, local pointer and PID
1311    process_t * process_ptr = GET_PTR( process_xp );
1312    cxy_t       process_cxy = GET_CXY( process_xp );
1313
1314// check target process is owner process
1315assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) ),
1316"process must be owner process\n" );
1317
1318#if DEBUG_PROCESS_FD_CLEAN_ALL
1319thread_t * this  = CURRENT_THREAD;
1320uint32_t   cycle = (uint32_t)hal_get_cycles();
1321if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1322printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
1323__FUNCTION__, this->process->pid, this->trdid, cycle );
1324
1325process_fd_display( process_xp );
1326#endif
1327
1328    // build extended pointer on lock protecting the fd_array
1329    lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
1330
1331    // get max index for fd_array
1332    max = hal_remote_l32( XPTR( process_cxy , &process_ptr->fd_array.max ));
1333
1334    // take lock protecting fd_array
1335        remote_queuelock_acquire( lock_xp );
1336
1337    for( fdid = 0 ; fdid <= max ; fdid++ )
1338    {
1339        // get fd_array entry
1340        file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ) );
1341       
1342        if ( file_xp != XPTR_NULL )
1343        {
1344            vfs_file_t * file_ptr = GET_PTR( file_xp );
1345            cxy_t        file_cxy = GET_CXY( file_xp );
1346
1347            // get file type
1348            uint32_t file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ));
1349 
1350            if( file_type == FILE_TYPE_REG )
1351            {
1352                vfs_close( file_xp , fdid );
1353            }
1354            if( file_type == FILE_TYPE_SOCK )
1355            {
1356                socket_close( file_xp , fdid );
1357            }
1358        }
1359    }
1360
1361    // release lock protecting fd_array
1362        remote_queuelock_release( lock_xp );
1363
1364#if DEBUG_PROCESS_FD_CLEAN_ALL
1365cycle = (uint32_t)hal_get_cycles();
1366if( DEBUG_PROCESS_FD_CLEAN_ALL < cycle )
1367printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
1368__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
1369#endif
1370
1371}  // end process_fd_clean_all()
1372
1373//////////////////////////////////////////////////////////////
1374xptr_t process_fd_get_xptr_from_owner( xptr_t      process_xp,
1375                                       uint32_t    fdid )
1376{
1377    cxy_t       process_cxy = GET_CXY( process_xp );
1378    process_t * process_ptr = GET_PTR( process_xp );
1379
1380assert( __FUNCTION__, (hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )) == process_xp),
1381"process_xp argument must be the owner process" );
1382
1383    // access owner process fd_array
1384    return hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1385
1386}  // end process_fd_get_xptr_from_owner()
1387
1388///////////////////////////////////////////////////////////
1389xptr_t process_fd_get_xptr_from_local( process_t * process,
1390                                       uint32_t    fdid )
1391{
1392    xptr_t  file_xp;
1393    xptr_t  lock_xp;
1394
1395    // access local copy of process descriptor
1396    file_xp = process->fd_array.array[fdid];
1397
1398    if( file_xp == XPTR_NULL )
1399    {
1400        // get owner process cluster and local pointer
1401        xptr_t      owner_xp  = process->owner_xp;
1402        cxy_t       owner_cxy = GET_CXY( owner_xp );
1403        process_t * owner_ptr = GET_PTR( owner_xp );
1404
1405        // build extended pointer on lock protecting fd_array
1406        lock_xp = XPTR( owner_cxy , &owner_ptr->fd_array.lock );
1407
1408        // take lock protecting fd_array
1409            remote_queuelock_acquire( lock_xp );
1410
1411        // access owner process descriptor
1412        file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[fdid] ) );
1413
1414        if( file_xp != XPTR_NULL ) 
1415        {
1416           // update local fd_array
1417            process->fd_array.array[fdid] = file_xp;
1418        }
1419
1420        // release lock protecting fd_array
1421            remote_queuelock_release( lock_xp );
1422    }
1423
1424    return file_xp;
1425
1426}  // end process_fd_get_xptr_from_local()
1427
1428////////////////////////////////////////////
1429error_t process_fd_replicate( xptr_t dst_xp,
1430                              xptr_t src_xp )
1431{
1432    uint32_t fdid;      // current file descriptor index
1433    xptr_t   old_xp;    // extended pointer on a file descriptor (stored in SRC fd_array)
1434    xptr_t   new_xp;    // extended pointer on a file descriptor (stored in DST fd_array)
1435    error_t  error;
1436
1437    // get cluster and local pointer for SRC process
1438    cxy_t       src_cxy = GET_CXY( src_xp );
1439    process_t * src_ptr = GET_PTR( src_xp );
1440
1441assert( __FUNCTION__, (src_xp == hal_remote_l64( XPTR( src_cxy , &src_ptr->owner_xp ))),
1442"src_xp process not in owner cluster" );
1443
1444    // get cluster and local pointer for DST fd_array
1445    cxy_t       dst_cxy = GET_CXY( dst_xp );
1446    process_t * dst_ptr = GET_PTR( dst_xp );
1447
1448assert( __FUNCTION__, (dst_xp == hal_remote_l64( XPTR( dst_cxy , &dst_ptr->owner_xp ))),
1449"dst_xp process not in owner cluster" );
1450
1451    // build extende pointers on SRC fd_array lock and max fields
1452    xptr_t  src_lock_xp = XPTR( src_cxy , &src_ptr->fd_array.lock );
1453    xptr_t  src_max_xp  = XPTR( src_cxy , &src_ptr->fd_array.max );
1454
1455    // get the remote lock protecting the src fd_array
1456        remote_queuelock_acquire( src_lock_xp );
1457 
1458    // loop on fd_array entries
1459    for( fdid = 0 ; fdid <= hal_remote_l32( src_max_xp ) ; fdid++ )
1460        {
1461                old_xp = (xptr_t)hal_remote_l64( XPTR( src_cxy , &src_ptr->fd_array.array[fdid] ) );
1462
1463                if( old_xp != XPTR_NULL )
1464                {
1465            // get the existing file descriptor cluster and local pointer
1466            vfs_file_t * old_ptr = GET_PTR( old_xp );
1467            cxy_t        old_cxy = GET_CXY( old_xp );
1468
1469            // get existing file attributes and local pointer on inode
1470            uint32_t      attr      = hal_remote_l32( XPTR( old_cxy , &old_ptr->attr ) );
1471            vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( old_cxy , &old_ptr->inode ) );
1472
1473            // create a new file descriptor in same cluster as the existing one
1474            error = vfs_file_create( XPTR( old_cxy , inode_ptr ),
1475                                     attr,
1476                                     &new_xp );
1477            if( error )
1478            {
1479
1480#if DEBUG_PROCESS_ERROR
1481thread_t * this  = CURRENT_THREAD;
1482uint32_t   cycle = (uint32_t)hal_get_cycles();
1483printk("\n[ERROR] in %s : thread[%x,%x] cannot create file descriptor / cycle %d\n",
1484__FUNCTION__, this->process->pid, this->trdid, cycle );
1485#endif
1486                return -1;
1487            }
1488
1489                        // register new_xp in DST fd_array
1490                        hal_remote_s64( XPTR( dst_cxy , &dst_ptr->fd_array.array[fdid] ) , new_xp );
1491                }
1492        }
1493
1494    // release lock on source process fd_array
1495        remote_queuelock_release( src_lock_xp );
1496
1497    return 0;
1498
1499}  // end process_fd_replicate()
1500
1501
1502////////////////////////////////////
1503bool_t process_fd_array_full( void )
1504{
1505    // get extended pointer on owner process
1506    xptr_t owner_xp = CURRENT_THREAD->process->owner_xp;
1507
1508    // get owner process cluster and local pointer
1509    process_t * owner_ptr = GET_PTR( owner_xp );
1510    cxy_t       owner_cxy = GET_CXY( owner_xp );
1511
1512    // get number of open file descriptors from  fd_array
1513    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1514
1515        return ( max == CONFIG_PROCESS_FILE_MAX_NR - 1 );
1516}
1517
1518////////////////////////////////////////////
1519void process_fd_display( xptr_t process_xp )
1520{
1521    uint32_t      fdid;
1522    xptr_t        file_xp;
1523    vfs_file_t *  file_ptr;
1524    cxy_t         file_cxy;
1525    uint32_t      file_type;
1526    xptr_t        inode_xp;
1527    vfs_inode_t * inode_ptr;
1528
1529    char          name[CONFIG_VFS_MAX_NAME_LENGTH];
1530
1531    // get process cluster and local pointer
1532    process_t * process_ptr = GET_PTR( process_xp );
1533    cxy_t       process_cxy = GET_CXY( process_xp );
1534
1535    // get process PID
1536    pid_t  pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ));
1537
1538    // get pointers on owner process descriptor
1539    xptr_t      owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ));
1540    process_t * owner_ptr = GET_PTR( owner_xp );
1541    cxy_t       owner_cxy = GET_CXY( owner_xp );
1542
1543    // get max fdid from owner process descriptor
1544    uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max ));
1545
1546    // get pointers on TXT0 chdev
1547    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1548    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1549    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1550
1551    // get extended pointer on remote TXT0 lock
1552    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1553
1554    // get TXT0 lock
1555    remote_busylock_acquire( lock_xp );
1556
1557    nolock_printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n",
1558    pid, process_cxy, max );
1559
1560    for( fdid = 0 ; fdid <= max ; fdid++ )
1561    {
1562        // get pointers on file descriptor
1563        file_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[fdid] ));
1564        file_ptr = GET_PTR( file_xp );
1565        file_cxy = GET_CXY( file_xp );
1566
1567        if( file_xp != XPTR_NULL )
1568        {
1569            // get file type
1570            file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type )); 
1571
1572            // get file name if inode exist
1573            if( (file_type != FILE_TYPE_PIPE) && (file_type != FILE_TYPE_SOCK) )
1574            {
1575                // get inode pointers
1576                inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ));
1577                inode_xp  = XPTR( file_cxy , inode_ptr );
1578
1579                // get file name
1580                vfs_inode_get_name( inode_xp , name );
1581
1582                // display relevant file descriptor info
1583                nolock_printk(" - %d : type %s / ptr %x (%s)\n",
1584                fdid, process_fd_type_str(file_type), file_ptr, name );
1585            }
1586            else    // PIPE or SOCK types
1587            {
1588                // display relevant file decriptor info
1589                nolock_printk(" - %d : type %s / ptr %x\n",
1590                fdid , process_fd_type_str(file_type), file_ptr );
1591            }
1592        }
1593        else
1594        {
1595            nolock_printk(" - %d : empty slot\n", fdid );
1596        }
1597    }
1598
1599    // get TXT0 lock
1600    remote_busylock_acquire( lock_xp );
1601
1602}   // end process_fd_display()
1603
1604////////////////////////////////////////////////////////////////////////////////////
1605//  Thread related functions
1606////////////////////////////////////////////////////////////////////////////////////
1607
1608/////////////////////////////////////////////////////
1609error_t process_register_thread( process_t * process,
1610                                 thread_t  * thread,
1611                                 trdid_t   * trdid )
1612{
1613    ltid_t         ltid;
1614    ltid_t         ltid_min;
1615
1616    bool_t         found = false;
1617    lpid_t         lpid  = LPID_FROM_PID( process->pid );
1618 
1619// check arguments
1620assert( __FUNCTION__, (process != NULL) , "process argument is NULL" );
1621assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
1622
1623    // get the lock protecting th_tbl for all threads but the idle thread
1624    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
1625
1626    // compute ltid_min : 0 for an user thread / 1 for a kernel thread
1627    ltid_min = (lpid == 0) ? 1 : 0;
1628 
1629    // scan th_tbl
1630    for( ltid = ltid_min ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
1631    {
1632        if( process->th_tbl[ltid] == NULL )
1633        {
1634            found = true;
1635            break;
1636        }
1637    }
1638
1639    if( found )
1640    {
1641        // register thread in th_tbl[]
1642        process->th_tbl[ltid] = thread;
1643        process->th_nr++;
1644
1645        // returns trdid
1646        *trdid = TRDID( local_cxy , ltid );
1647    }
1648
1649    // release the lock protecting th_tbl
1650    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
1651
1652    return (found) ? 0 : -1;
1653
1654}  // end process_register_thread()
1655
1656///////////////////////////////////////////////////
1657uint32_t process_remove_thread( thread_t * thread )
1658{
1659    uint32_t count;  // number of threads in local process descriptor
1660
1661// check thread
1662assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" );
1663
1664    process_t * process = thread->process;
1665
1666    // get thread local index
1667    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
1668   
1669    // get the lock protecting th_tbl[]
1670    rwlock_wr_acquire( &process->th_lock );
1671
1672    // get number of threads
1673    count = process->th_nr;
1674
1675// check th_nr value
1676assert( __FUNCTION__, (count > 0) , "process th_nr cannot be 0" );
1677
1678    // remove thread from th_tbl[]
1679    process->th_tbl[ltid] = NULL;
1680    process->th_nr = count-1;
1681
1682    // release lock protecting th_tbl
1683    rwlock_wr_release( &process->th_lock );
1684
1685    return count;
1686
1687}  // end process_remove_thread()
1688
1689/////////////////////////////////////////////////////////
1690error_t process_make_fork( xptr_t      parent_process_xp,
1691                           xptr_t      parent_thread_xp,
1692                           pid_t     * child_pid,
1693                           thread_t ** child_thread )
1694{
1695    process_t * process;         // local pointer on child process descriptor
1696    thread_t  * thread;          // local pointer on child thread descriptor
1697    pid_t       new_pid;         // process identifier for child process
1698    pid_t       parent_pid;      // process identifier for parent process
1699    xptr_t      ref_xp;          // extended pointer on reference process
1700    xptr_t      vfs_bin_xp;      // extended pointer on .elf file
1701    error_t     error;
1702
1703    // get cluster and local pointer for parent process
1704    cxy_t       parent_process_cxy = GET_CXY( parent_process_xp );
1705    process_t * parent_process_ptr = GET_PTR( parent_process_xp );
1706
1707    // get parent process PID and extended pointer on .elf file
1708    parent_pid = hal_remote_l32 (XPTR( parent_process_cxy , &parent_process_ptr->pid));
1709    vfs_bin_xp = hal_remote_l64(XPTR( parent_process_cxy , &parent_process_ptr->vfs_bin_xp));
1710
1711    // get extended pointer on reference process
1712    ref_xp = hal_remote_l64( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) );
1713
1714// check parent process is the reference process
1715assert( __FUNCTION__, (parent_process_xp == ref_xp ) ,
1716"parent process must be the reference process" );
1717
1718#if DEBUG_PROCESS_MAKE_FORK || DEBUG_PROCESS_ERROR
1719uint32_t   cycle  = (uint32_t)hal_get_cycles();
1720thread_t * this  = CURRENT_THREAD;
1721trdid_t    trdid = this->trdid;
1722pid_t      pid   = this->process->pid;
1723#endif
1724
1725#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1726if( DEBUG_PROCESS_MAKE_FORK < cycle )
1727printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n",
1728__FUNCTION__, pid, trdid, local_cxy, cycle );
1729#endif
1730
1731    // allocate a process descriptor
1732    process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
1733
1734    if( process == NULL )
1735    {
1736
1737#if DEBUG_PROCESS_ERROR
1738printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate process descriptor / cxy %x / cycle %d\n", 
1739__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1740#endif
1741        return -1;
1742    }
1743
1744    // allocate a child PID from local cluster
1745    error = cluster_pid_alloc( process , &new_pid );
1746    if( error ) 
1747    {
1748
1749#if DEBUG_PROCESS_ERROR
1750printk("\n[ERROR] in %s : thread[%x,%x] cannot get PID / cxy %x / cycle %d\n", 
1751__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1752#endif
1753            kmem_free( process , bits_log2(sizeof(process_t)) );
1754        return -1;
1755    }
1756
1757#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1758if( DEBUG_PROCESS_MAKE_FORK < cycle )
1759printk("\n[%s] thread[%x,%x] allocated child_process %x\n",
1760__FUNCTION__, pid, trdid, new_pid );
1761#endif
1762
1763    // initializes child process descriptor from parent process descriptor
1764    error = process_reference_init( process,
1765                                    new_pid,
1766                                    parent_process_xp );
1767    if( error ) 
1768    {
1769
1770#if DEBUG_PROCESS_ERROR
1771printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize child process / cxy %x / cycle %d\n", 
1772__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1773#endif
1774        cluster_pid_release( new_pid );
1775            kmem_free( process , bits_log2(sizeof(process_t)) );
1776        return -1;
1777    }
1778
1779#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1780if( DEBUG_PROCESS_MAKE_FORK < cycle )
1781printk("\n[%s] thread[%x,%x] initialized child_process %x\n",
1782__FUNCTION__, pid, trdid, new_pid );
1783#endif
1784
1785    // copy VMM from parent descriptor to child descriptor
1786    error = vmm_fork_copy( process,
1787                           parent_process_xp );
1788    if( error )
1789    {
1790
1791#if DEBUG_PROCESS_ERROR
1792printk("\n[ERROR] in %s : thread[%x,%x] cannot copy VMM to child process / cxy %x / cycle %d\n", 
1793__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1794#endif
1795        cluster_pid_release( new_pid );
1796            kmem_free( process , bits_log2(sizeof(process_t)) );
1797        return -1;
1798    }
1799
1800#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1801if( DEBUG_PROCESS_MAKE_FORK < cycle )
1802{
1803    printk("\n[%s] thread[%x,%x] copied VMM from parent to child\n",
1804    __FUNCTION__, pid, trdid );
1805    hal_vmm_display( XPTR( local_cxy , process ) , true );
1806}
1807#endif
1808
1809    // if parent_process is INIT, or if parent_process is the TXT owner,
1810    // the child_process becomes the owner of its TXT terminal
1811    if( (parent_pid == 1) || process_txt_is_owner( parent_process_xp ) )
1812    {
1813        process_txt_set_ownership( XPTR( local_cxy , process ) );
1814
1815#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1816if( DEBUG_PROCESS_MAKE_FORK < cycle )
1817printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership\n",
1818__FUNCTION__ , pid, trdid, new_pid );
1819#endif
1820
1821    }
1822
1823    // update extended pointer on .elf file
1824    process->vfs_bin_xp = vfs_bin_xp;
1825
1826    // create child thread descriptor from parent thread descriptor
1827    error = thread_user_fork( parent_thread_xp,
1828                              process,
1829                              &thread );
1830    if( error )
1831    {
1832
1833#if DEBUG_PROCESS_ERROR
1834printk("\n[ERROR] in %s : thread[%x,%x] cannot create main thread / cxy %x / cycle %d\n", 
1835__FUNCTION__, pid, trdid, local_cxy, cycle ); 
1836#endif
1837        cluster_pid_release( new_pid );
1838            kmem_free( process , bits_log2(sizeof(process_t)) );
1839        return -1;
1840    }
1841
1842// check main thread LTID
1843assert( __FUNCTION__, (LTID_FROM_TRDID(thread->trdid) == 0) ,
1844"main thread must have LTID == 0" );
1845
1846#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1847if( DEBUG_PROCESS_MAKE_FORK < cycle )
1848printk("\n[%s] thread[%x,%x] created main thread %x\n", 
1849__FUNCTION__, pid, trdid, thread );
1850#endif
1851
1852    // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM
1853    // this includes all parent process copies in all clusters
1854    if( parent_process_cxy == local_cxy )   // reference is local
1855    {
1856        vmm_set_cow( parent_process_ptr );
1857    }
1858    else                                    // reference is remote
1859    {
1860        rpc_vmm_set_cow_client( parent_process_cxy,
1861                                parent_process_ptr );
1862    }
1863
1864    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
1865    vmm_set_cow( process );
1866 
1867#if( DEBUG_PROCESS_MAKE_FORK & 1 )
1868if( DEBUG_PROCESS_MAKE_FORK < cycle )
1869printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child\n",
1870__FUNCTION__, pid, trdid );
1871#endif
1872
1873    // get extended pointers on parent children_root, children_lock and children_nr
1874    xptr_t children_root_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_root );
1875    xptr_t children_lock_xp = XPTR( parent_process_cxy , &parent_process_ptr->children_lock );
1876    xptr_t children_nr_xp   = XPTR( parent_process_cxy , &parent_process_ptr->children_nr   );
1877
1878    // register process in parent children list
1879    remote_queuelock_acquire( children_lock_xp );
1880        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
1881        hal_remote_atomic_add( children_nr_xp , 1 );
1882    remote_queuelock_release( children_lock_xp );
1883
1884    // return success
1885    *child_thread = thread;
1886    *child_pid    = new_pid;
1887
1888#if DEBUG_PROCESS_MAKE_FORK
1889cycle = (uint32_t)hal_get_cycles();
1890if( DEBUG_PROCESS_MAKE_FORK < cycle )
1891printk("\n[%s] thread[%x,%x] exit / created process %x / cycle %d\n",
1892__FUNCTION__, pid, trdid, new_pid, cycle );
1893#endif
1894
1895    return 0;
1896
1897}   // end process_make_fork()
1898
1899#if DEBUG_PROCESS_MAKE_EXEC
1900
1901/////////////////////////////////////////////////////////////////////////////////////////
1902// This static debug function displays the current state of the exec_info structure
1903// embedded in the calling process descriptor.
1904//
1905// WARNING : It can be used after execution of the sys_exec function, but it cannot
1906//           be used after execution of the process_make_exec() function, because the
1907//           kernel pointers have been replaced by user pointers.
1908/////////////////////////////////////////////////////////////////////////////////////////
1909static void process_exec_info_display( bool_t args_ok,
1910                                       bool_t envs_ok )
1911{
1912    uint32_t   i;
1913    char     * str;    // local pointer on a string
1914
1915    process_t * process = CURRENT_THREAD->process;
1916
1917    // get relevant info from calling process descriptor
1918    pid_t       pid      = process->pid;
1919
1920    uint32_t    args_nr  = process->exec_info.args_nr;
1921    char     ** args     = process->exec_info.args_pointers;
1922
1923    uint32_t    envs_nr  = process->exec_info.envs_nr;
1924    char     ** envs     = process->exec_info.envs_pointers;
1925
1926    char      * path     = process->exec_info.path;
1927
1928    // get pointers on TXT0 chdev
1929    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
1930    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
1931    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
1932
1933    // get extended pointer on remote TXT0 lock
1934    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
1935
1936    // get TXT0 lock
1937    remote_busylock_acquire( lock_xp );
1938
1939    nolock_printk("\n***** exec_info for process %x in cluster %x / %s\n",
1940    pid , local_cxy , path ); 
1941
1942    // display arguments if required
1943    if( args_ok )
1944    {
1945        for( i = 0 ; i < args_nr ; i++ )
1946        {
1947            str = args[i];
1948            if( str != NULL)         // display pointer and string
1949            nolock_printk(" - &arg[%d] = %x / arg[%d] = <%s>\n", i, str, i, str );
1950            else                     // display WARNING
1951            nolock_printk(" - unexpected NULL pointer for &arg[%d]\n", i );
1952        }
1953    }
1954
1955    // display env variables if required
1956    if( envs_ok )
1957    {
1958        for( i = 0 ; i < envs_nr ; i++ )
1959        {
1960            str = envs[i];
1961            if( str != NULL)     // display pointer and string
1962            nolock_printk(" - &env[%d] = %x / env[%d] = <%s>\n", i, str, i, str );
1963            else                     // display WARNING
1964            nolock_printk(" - unexpected NULL pointer for &env[%d]\n", i );
1965        }
1966    }
1967
1968    // release TXT0 lock
1969    remote_busylock_release( lock_xp );
1970
1971}  // end process_exec_info_display()
1972
1973#endif // DEBUG_PROCESS_MAKE_EXEC
1974
1975/////////////////////////////////
1976error_t process_make_exec( void )
1977{
1978    thread_t       * this;                    // local pointer on this thread
1979    process_t      * process;                 // local pointer on this process
1980    pid_t            pid;                     // this process identifier
1981    trdid_t          trdid;                   // this thread identifier
1982    xptr_t           ref_xp;                  // reference process for this process
1983        error_t          error;                   // value returned by called functions
1984    char           * elf_path;                // path to .elf file
1985    xptr_t           file_xp;                 // extended pointer on .elf file descriptor
1986    uint32_t         file_id;                 // file index in fd_array
1987    vseg_t         * vseg;                    // local pointer on created vseg(s)
1988    uint32_t         n;                       // index for loops
1989
1990    uint32_t         args_nr;                 // actual number of args (from exec_info)
1991    intptr_t         args_base;               // args vseg base address in user space
1992    uint32_t         args_size;               // args vseg size (bytes)
1993
1994    uint32_t         envs_nr;                 // actual number of envs (from exec_info)
1995    intptr_t         envs_base;               // envs vseg base address in user space
1996    uint32_t         envs_size;               // envs vseg size (bytes)
1997
1998#if DEBUG_PROCESS_MAKE_EXEC || DEBUG_PROCESS_ERROR
1999uint32_t cycle = (uint32_t)hal_get_cycles();
2000#endif
2001
2002    // get calling thread, process, pid, trdid, and ref_xp
2003    this    = CURRENT_THREAD;
2004    process = this->process;
2005    pid     = process->pid;
2006    trdid   = this->trdid;
2007    ref_xp  = process->ref_xp;
2008
2009        // get .elf pathname from exec_info structure
2010        elf_path      = process->exec_info.path;
2011
2012#if DEBUG_PROCESS_MAKE_EXEC
2013if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2014printk("\n[%s] thread[%x,%x] enters for <%s> / cycle %d\n",
2015__FUNCTION__, pid, trdid, elf_path, cycle );
2016#endif
2017
2018    // 1. open the file identified by <path>
2019    file_xp = XPTR_NULL;
2020    file_id = 0xFFFFFFFF;
2021        error   = vfs_open( process->vfs_root_xp,
2022                            elf_path,
2023                        ref_xp,
2024                            O_RDONLY,
2025                            0,
2026                            &file_xp,
2027                            &file_id );
2028        if( error )
2029        {
2030
2031#if DEBUG_PROCESS_ERROR
2032printk("\n[ERROR] in %s : thread[%x,%x] failed to open file <%s> / cycle %d\n", 
2033__FUNCTION__, pid, trdid, elf_path, cycle ); 
2034#endif
2035                return -1;
2036        }
2037
2038#if (DEBUG_PROCESS_MAKE_EXEC & 1)
2039if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2040printk("\n[%s] thread[%x,%x] opened file <%s>\n",
2041__FUNCTION__, pid, trdid, elf_path );
2042#endif
2043
2044    // 2. delete all threads other than this main thread in all clusters
2045    process_sigaction( pid , DELETE_ALL_THREADS );
2046
2047#if (DEBUG_PROCESS_MAKE_EXEC & 1)
2048if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2049printk("\n[%s] thread[%x,%x] deleted existing threads\n",
2050__FUNCTION__, pid, trdid );
2051#endif
2052
2053    // 3. reset calling process VMM
2054    vmm_user_reset( process );
2055
2056#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2057if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2058{
2059    printk("\n[%s] thread[%x,%x] completed VMM reset\n",
2060    __FUNCTION__, pid, trdid );
2061    hal_vmm_display( ref_xp , true );
2062}
2063#endif
2064
2065    // 4. register the "args" vseg in VSL and map it in GPT, if args_nr != 0.
2066    //    As this vseg contains an array of pointers, the kernel pointers
2067    //    are replaced by user pointers in new process space.
2068    args_nr = process->exec_info.args_nr;
2069
2070    if( args_nr > 0 )
2071    {
2072        // get args vseg base and size in user space
2073        args_base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_ORDER;
2074        args_size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ORDER;
2075
2076        // create and register args vseg in VMM
2077        vseg = vmm_create_vseg( process,
2078                                VSEG_TYPE_DATA,
2079                                args_base,
2080                                args_size,
2081                                0,                 // file_offset unused for DATA type
2082                                0,                 // file_size unused for DATA type
2083                                XPTR_NULL,         // mapper_xp unused for DATA type
2084                                0 );               // cxy unused for DATA type
2085        if( vseg == NULL )
2086        {
2087
2088#if DEBUG_PROCESS_ERROR
2089printk("\n[ERROR] in %s : thread[%x,%x] cannot create args vseg for <%s> / cycle %d\n", 
2090__FUNCTION__, pid, trdid, elf_path, cycle ); 
2091#endif
2092                     return -1;
2093        }
2094
2095#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2096if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2097{
2098    printk("\n[%s] thread[%x,%x] args vseg registered in new process VSL\n",
2099    __FUNCTION__, pid, trdid );
2100    hal_vmm_display( ref_xp , true );
2101}
2102#endif
2103        // map all pages for the "args" vseg
2104        uint32_t fake_attr;   // required for hal_gpt_lock_pte()
2105        ppn_t    fake_ppn;    // required for hal_gpt_lock_pte()
2106
2107        xptr_t   base_xp = XPTR( local_cxy , process->exec_info.args_pointers );
2108        xptr_t   gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
2109        uint32_t attr    = GPT_MAPPED | GPT_SMALL | GPT_READABLE | GPT_USER | GPT_CACHABLE;
2110        vpn_t    vpn     = CONFIG_VMM_UTILS_BASE;
2111        ppn_t    ppn     = ppm_base2ppn( base_xp );
2112
2113        for( n = 0 ; n < CONFIG_VMM_ARGS_SIZE ; n++ ) 
2114        {
2115            // lock the PTE
2116            if (hal_gpt_lock_pte( gpt_xp , vpn + n , &fake_attr , &fake_ppn ) )
2117            {
2118
2119#if DEBUG_PROCESS_ERROR
2120printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn[%x] of args vseg for <%s> / cycle %d\n", 
2121__FUNCTION__, pid, trdid,  vpn + n , elf_path , cycle ); 
2122#endif
2123                        return -1;
2124            }
2125
2126            // map and unlock the PTE
2127            hal_gpt_set_pte( gpt_xp , vpn + n , attr , ppn + n );
2128       }
2129
2130#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2131if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2132{
2133    printk("\n[%s] thread[%x,%x] args vseg mapped in new process GPT\n",
2134    __FUNCTION__, pid, trdid );
2135    hal_vmm_display( ref_xp , true );
2136    process_exec_info_display( true , false );   // args & not envs
2137}
2138#endif
2139
2140        // build pointer on args buffer in kernel space
2141        char  ** k_args = process->exec_info.args_pointers;
2142
2143        // build pointer on args buffer in user space
2144        char  ** u_args = (char **)args_base;
2145
2146        // set user space pointers in kernel args buffer
2147        for( n = 0 ; n < args_nr ; n++ )
2148        {
2149            k_args[n] = (char *)((intptr_t)k_args[n] + (intptr_t)u_args - (intptr_t)k_args);
2150        } 
2151
2152#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2153if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2154printk("\n[%s] thread[%x,%x] args user pointers set in exec_info\n",
2155__FUNCTION__, pid, trdid );
2156#endif
2157
2158    }
2159
2160    // 5. register the "envs" vseg in VSL and map it in GPT, if envs_nr != 0.
2161    //    As this vseg contains an array of pointers, the kernel pointers
2162    //    are replaced by user pointers in new process space.
2163
2164    envs_nr = process->exec_info.envs_nr;
2165
2166    if( envs_nr > 0 )
2167    {
2168        // get envs vseg base and size in user space from config
2169        envs_base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_ORDER;
2170        envs_size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_ORDER;
2171
2172        // TODO (should be similar to the code for args above)
2173
2174#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2175if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2176printk("\n[%s] thread[%x,%x] envs user pointers set in exec_info\n",
2177__FUNCTION__, pid, trdid );
2178#endif
2179
2180    }
2181
2182
2183    // 6. register code & data vsegs, and entry-point in process VMM,
2184    // register extended pointer on .elf file in process descriptor
2185        error = elf_load_process( file_xp , process );
2186
2187    if( error )
2188        {
2189
2190#if DEBUG_PROCESS_ERROR
2191printk("\n[ERROR] in %s : thread[%x,%x] failed to access file <%s> / cycle %d\n", 
2192__FUNCTION__, pid, trdid , elf_path , cycle ); 
2193#endif
2194        return -1;
2195        }
2196
2197#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2198if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2199{
2200    printk("\n[%s] thread[%x,%x] registered code/data vsegs / entry %x\n",
2201    __FUNCTION__, pid, trdid, process->vmm.entry_point );
2202    hal_vmm_display( ref_xp , true );
2203}
2204#endif
2205
2206    // 7. allocate an user stack vseg for main thread
2207    vseg = vmm_create_vseg( process,
2208                            VSEG_TYPE_STACK,
2209                            LTID_FROM_TRDID( trdid ),
2210                            0,                 // length unused
2211                            0,                 // file_offset unused
2212                            0,                 // file_size unused
2213                            XPTR_NULL,         // mapper_xp unused
2214                            local_cxy );
2215    if( vseg == NULL )
2216    {
2217
2218#if DEBUG_PROCESS_ERROR
2219printk("\n[ERROR] in %s : thread[%x,%x] failed to set u_stack vseg for <%s> / cycle %d\n", 
2220__FUNCTION__, pid, trdid , elf_path , cycle ); 
2221#endif
2222                return -1;
2223    }
2224
2225#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
2226if( DEBUG_PROCESS_MAKE_EXEC < cycle )
2227{
2228    printk("\n[%s] thread[%x,%x] registered stack vseg\n",
2229    __FUNCTION__, pid, trdid );
2230    hal_vmm_display( ref_xp , true );
2231}
2232#endif
2233
2234    // update user stack in thread descriptor
2235    this->user_stack_vseg = vseg;
2236
2237    // 8. update the main thread descriptor ... and jumps (one way) to user code
2238    thread_user_exec( args_nr , args_base );
2239
2240    if( error )
2241    {
2242
2243#if DEBUG_PROCESS_ERROR
2244printk("\n[ERROR] in %s : thread[%x,%x] failed to set main thread for <%s> / cycle %d\n", 
2245__FUNCTION__, pid, trdid , elf_path , cycle ); 
2246#endif
2247        return -1;
2248    }
2249
2250    // should not be reached, avoid a warning
2251        return 0;
2252
2253}  // end process_make_exec()
2254
2255
2256////////////////////////////////////////////////
2257void process_zero_create( process_t   * process,
2258                          boot_info_t * info )
2259{
2260    error_t error;
2261    pid_t   pid;
2262
2263#if DEBUG_PROCESS_ZERO_CREATE
2264uint32_t cycle = (uint32_t)hal_get_cycles();
2265if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2266printk("\n[%s] enter / cluster %x / cycle %d\n",
2267__FUNCTION__, local_cxy, cycle );
2268#endif
2269
2270    // get pointer on VMM
2271    vmm_t * vmm = &process->vmm;
2272
2273    // get PID from local cluster manager for this kernel process
2274    error = cluster_pid_alloc( process , &pid );
2275
2276    if( error || (LPID_FROM_PID( pid ) != 0) )
2277    {
2278        printk("\n[PANIC] in %s : cannot get valid PID in cluster %x / PID = %x\n",
2279        __FUNCTION__ , local_cxy, pid );
2280        hal_core_sleep();
2281    }
2282
2283#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2284if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2285printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy );
2286#endif
2287
2288    // initialize PID, REF_XP, PARENT_XP, and STATE
2289    // the kernel process_zero is its own parent_process,
2290    // reference_process, and owner_process, and cannot be killed...
2291    process->pid        = pid;
2292    process->ref_xp     = XPTR( local_cxy , process );
2293    process->owner_xp   = XPTR( local_cxy , process );
2294    process->parent_xp  = XPTR( local_cxy , process );
2295    process->term_state = 0;
2296
2297    // initialize VSL as empty
2298    vmm->vsegs_nr = 0;
2299        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
2300
2301#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2302if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2303printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy );
2304#endif
2305
2306    // initialize GPT as empty
2307    error = hal_gpt_create( &vmm->gpt );
2308
2309    if( error ) 
2310    {
2311        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
2312        hal_core_sleep();
2313    }
2314
2315#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2316if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2317printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy );
2318#endif
2319
2320    // initialize VSL and GPT locks
2321    remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
2322   
2323    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
2324    error = hal_vmm_kernel_init( info );
2325
2326    if( error ) 
2327    {
2328        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
2329        hal_core_sleep();
2330    }
2331
2332#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2333if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2334printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy );
2335hal_vmm_display( XPTR( local_cxy , process ) , true ); 
2336#endif
2337
2338    // reset th_tbl[] array and associated fields
2339    uint32_t i;
2340    for( i = 0 ; i < CONFIG_THREADS_MAX_PER_CLUSTER ; i++ )
2341        {
2342        process->th_tbl[i] = NULL;
2343    }
2344    process->th_nr  = 0;
2345    rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL );
2346
2347#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2348if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2349printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy );
2350#endif
2351
2352    // reset children list as empty
2353    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
2354    process->children_nr = 0;
2355    remote_queuelock_init( XPTR( local_cxy , &process->children_lock ),
2356                           LOCK_PROCESS_CHILDREN );
2357
2358#if (DEBUG_PROCESS_ZERO_CREATE & 1)
2359if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2360printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy );
2361#endif
2362
2363    // register kernel process in cluster manager local_list
2364    cluster_process_local_link( process );
2365   
2366        hal_fence();
2367
2368#if DEBUG_PROCESS_ZERO_CREATE
2369cycle = (uint32_t)hal_get_cycles();
2370if( DEBUG_PROCESS_ZERO_CREATE < cycle )
2371printk("\n[%s] exit / cluster %x / cycle %d\n",
2372__FUNCTION__, local_cxy, cycle );
2373#endif
2374
2375}  // end process_zero_create()
2376
2377////////////////////////////////
2378void process_init_create( void )
2379{
2380    process_t      * process;       // local pointer on process descriptor
2381    pid_t            pid;           // process_init identifier
2382    thread_t       * thread;        // local pointer on main thread
2383    pthread_attr_t   attr;          // main thread attributes
2384    lid_t            lid;           // selected core local index for main thread
2385    xptr_t           file_xp;       // extended pointer on .elf file descriptor
2386    uint32_t         file_id;       // file index in fd_array
2387    error_t          error;
2388
2389#if DEBUG_PROCESS_INIT_CREATE
2390thread_t * this = CURRENT_THREAD;
2391uint32_t cycle = (uint32_t)hal_get_cycles();
2392if( DEBUG_PROCESS_INIT_CREATE < cycle )
2393printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
2394__FUNCTION__, this->process->pid, this->trdid, cycle );
2395#endif
2396
2397    // allocates memory for process descriptor from local cluster
2398    process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO );
2399    if( process == NULL )
2400    {
2401        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
2402        hal_core_sleep();
2403    }
2404
2405    // set the CWD and VFS_ROOT fields in process descriptor
2406    process->cwd_xp      = process_zero.vfs_root_xp;
2407    process->vfs_root_xp = process_zero.vfs_root_xp;
2408
2409    // get PID from local cluster
2410    error = cluster_pid_alloc( process , &pid );
2411    if( error ) 
2412    {
2413        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
2414        hal_core_sleep();
2415    }
2416    if( pid != 1 ) 
2417    {
2418        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
2419        hal_core_sleep();
2420    }
2421
2422    // initialize process descriptor / parent is local process_zero
2423    error = process_reference_init( process,
2424                                    pid,
2425                                    XPTR( local_cxy , &process_zero ) ); 
2426    if( error )
2427    {
2428        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
2429        hal_core_sleep();
2430    }
2431
2432#if(DEBUG_PROCESS_INIT_CREATE & 1)
2433if( DEBUG_PROCESS_INIT_CREATE < cycle )
2434printk("\n[%s] thread[%x,%x] initialized process descriptor\n",
2435__FUNCTION__, this->process->pid, this->trdid );
2436#endif
2437
2438    // open the file identified by CONFIG_PROCESS_INIT_PATH
2439    file_xp = XPTR_NULL;
2440    file_id = -1;
2441        error   = vfs_open( process->vfs_root_xp,
2442                            CONFIG_PROCESS_INIT_PATH,
2443                        XPTR( local_cxy , process ),
2444                            O_RDONLY,
2445                            0,
2446                            &file_xp,
2447                            &file_id );
2448    if( error )
2449    {
2450        printk("\n[PANIC] in %s : cannot open file <%s>\n",
2451         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2452        hal_core_sleep();
2453    }
2454
2455#if(DEBUG_PROCESS_INIT_CREATE & 1)
2456if( DEBUG_PROCESS_INIT_CREATE < cycle )
2457printk("\n[%s] thread[%x,%x] open .elf file decriptor\n",
2458__FUNCTION__, this->process->pid, this->trdid );
2459#endif
2460
2461    // register "code" and "data" vsegs as well as entry-point
2462    // in process VMM, using information contained in the elf file.
2463        error = elf_load_process( file_xp , process );
2464
2465    if( error ) 
2466    {
2467        printk("\n[PANIC] in %s : cannot access file <%s>\n",
2468         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
2469        hal_core_sleep();
2470    }
2471
2472
2473#if(DEBUG_PROCESS_INIT_CREATE & 1)
2474if( DEBUG_PROCESS_INIT_CREATE < cycle )
2475{
2476    printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
2477    __FUNCTION__, this->process->pid, this->trdid );
2478    hal_vmm_display( XPTR( local_cxy , process ) , true );
2479}
2480#endif
2481
2482    // get extended pointers on process_zero children_root, children_lock
2483    xptr_t children_root_xp = XPTR( local_cxy , &process_zero.children_root );
2484    xptr_t children_lock_xp = XPTR( local_cxy , &process_zero.children_lock );
2485
2486    // take lock protecting kernel process children list
2487    remote_queuelock_acquire( children_lock_xp );
2488
2489    // register process INIT in parent local process_zero
2490        xlist_add_last( children_root_xp , XPTR( local_cxy , &process->children_list ) );
2491        hal_atomic_add( &process_zero.children_nr , 1 );
2492
2493    // release lock protecting kernel process children list
2494    remote_queuelock_release( children_lock_xp );
2495
2496#if(DEBUG_PROCESS_INIT_CREATE & 1)
2497if( DEBUG_PROCESS_INIT_CREATE < cycle )
2498printk("\n[%s] thread[%x,%x] registered init process in parent\n",
2499__FUNCTION__, this->process->pid, this->trdid );
2500#endif
2501
2502    // select a core in local cluster to execute the main thread
2503    lid  = cluster_select_local_core( local_cxy );
2504
2505    // initialize pthread attributes for main thread
2506    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
2507    attr.cxy        = local_cxy;
2508    attr.lid        = lid;
2509
2510    // create and initialize thread descriptor
2511        error = thread_user_create( pid,
2512                                (void *)process->vmm.entry_point,
2513                                NULL,
2514                                &attr,
2515                                &thread );
2516
2517    if( error )
2518    {
2519        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
2520        hal_core_sleep();
2521    }
2522    if( thread->trdid != 0 )
2523    {
2524        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
2525        hal_core_sleep();
2526    }
2527
2528#if(DEBUG_PROCESS_INIT_CREATE & 1)
2529if( DEBUG_PROCESS_INIT_CREATE < cycle )
2530printk("\n[%s] thread[%x,%x] created main thread\n",
2531__FUNCTION__, this->process->pid, this->trdid );
2532#endif
2533
2534    // activate thread
2535        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
2536
2537    hal_fence();
2538
2539#if DEBUG_PROCESS_INIT_CREATE
2540cycle = (uint32_t)hal_get_cycles();
2541if( DEBUG_PROCESS_INIT_CREATE < cycle )
2542printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
2543__FUNCTION__, this->process->pid, this->trdid, cycle );
2544#endif
2545
2546}  // end process_init_create()
2547
2548///////////////////////////////////////////////////
2549uint32_t process_build_string( xptr_t   process_xp,
2550                               char   * buffer,
2551                               uint32_t size )
2552{
2553    int32_t       length;          // actual length of the string
2554
2555    process_t   * process_ptr;     // process descriptor local pointer
2556    cxy_t         process_cxy;     // process descriptor cluster identifier
2557
2558    xptr_t        parent_xp;       // extended pointer on parent process
2559    process_t   * parent_ptr;      // parent process local pointer
2560    cxy_t         parent_cxy;      // parent process cluster identifier
2561
2562    xptr_t        owner_xp;        // extended pointer on owner process
2563    process_t   * owner_ptr;       // owner process local pointer
2564    cxy_t         owner_cxy;       // owner process cluster identifier
2565
2566    pid_t         pid;             // process identifier
2567    pid_t         ppid;            // parent process identifier
2568    lpid_t        lpid;            // local process identifier
2569    uint32_t      state;           // terminaison state
2570    uint32_t      th_nr;           // number of threads
2571
2572    xptr_t        txt_file_xp;     // extended pointer on TXT_RX file descriptor
2573    xptr_t        txt_chdev_xp;    // extended pointer on TXT_RX chdev
2574    chdev_t     * txt_chdev_ptr;
2575    cxy_t         txt_chdev_cxy;
2576    xptr_t        txt_owner_xp;    // extended pointer on TXT owner process
2577
2578    xptr_t        elf_file_xp;     // extended pointer on .elf file
2579    cxy_t         elf_file_cxy;
2580    vfs_file_t  * elf_file_ptr;
2581    vfs_inode_t * elf_inode_ptr;   // local pointer on .elf inode
2582
2583    char          txt_name[CONFIG_VFS_MAX_NAME_LENGTH];
2584    char          elf_name[CONFIG_VFS_MAX_NAME_LENGTH];
2585
2586assert( __FUNCTION__ , (size >= 80 ) , "buffer size too small" );
2587
2588    // get cluster and local pointer on process
2589    process_ptr = GET_PTR( process_xp );
2590    process_cxy = GET_CXY( process_xp );
2591
2592    // get process PID, LPID, and state
2593    pid   = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2594    lpid  = LPID_FROM_PID( pid );
2595    state = hal_remote_l32( XPTR( process_cxy , &process_ptr->term_state ) );
2596
2597    // get process PPID
2598    parent_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->parent_xp ) );
2599    parent_cxy = GET_CXY( parent_xp );
2600    parent_ptr = GET_PTR( parent_xp );
2601    ppid       = hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid ) );
2602
2603    // get number of threads
2604    th_nr      = hal_remote_l32( XPTR( process_cxy , &process_ptr->th_nr ) );
2605
2606    // get pointers on owner process descriptor
2607    owner_xp  = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ) );
2608    owner_cxy = GET_CXY( owner_xp );
2609    owner_ptr = GET_PTR( owner_xp );
2610
2611    // get process TXT name and .elf name
2612    if( lpid )                                   // user process
2613    {
2614        // get extended pointer on file descriptor associated to TXT_RX
2615        txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) );
2616
2617assert( __FUNCTION__, (txt_file_xp != XPTR_NULL) ,
2618"process must be attached to one TXT terminal" ); 
2619
2620        // get TXT_RX chdev pointers
2621        txt_chdev_xp  = chdev_from_file( txt_file_xp );
2622        txt_chdev_cxy = GET_CXY( txt_chdev_xp );
2623        txt_chdev_ptr = GET_PTR( txt_chdev_xp );
2624
2625        // get TXT_RX name and ownership
2626        hal_remote_strcpy( XPTR( local_cxy , txt_name ) ,
2627                           XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) );
2628   
2629        // get TXT_owner process
2630        txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 
2631                                                     &txt_chdev_ptr->ext.txt.owner_xp ) );
2632        // get process .elf name
2633        elf_file_xp   = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) );
2634        elf_file_cxy  = GET_CXY( elf_file_xp );
2635        elf_file_ptr  = GET_PTR( elf_file_xp );
2636        elf_inode_ptr = hal_remote_lpt( XPTR( elf_file_cxy , &elf_file_ptr->inode ) );
2637        vfs_inode_get_name( XPTR( elf_file_cxy , elf_inode_ptr ) , elf_name );
2638    }
2639    else                                         // kernel process_zero
2640    {
2641        // TXT name and .elf name are not registered in kernel process
2642        strcpy( txt_name , "txt0_rx" );
2643        txt_owner_xp = process_xp; 
2644        strcpy( elf_name , "kernel.elf" );
2645    }
2646
2647    // display process info
2648    if( txt_owner_xp == process_xp )
2649    {
2650        length = snprintk( buffer, size,
2651        "PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 
2652        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2653    }
2654    else
2655    {
2656        length = snprintk( buffer, size,
2657        "PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 
2658        pid, txt_name, process_ptr, ppid, state, th_nr, elf_name );
2659    }
2660
2661    // check length
2662    if( (length < 0) )
2663    {
2664        length = snprintk( buffer , size , 
2665        "buffer too small for process %x in cluster %x", pid , process_cxy );
2666    }
2667
2668    return length; 
2669
2670}  // end process_build_string()
2671
2672/////////////////////////////////////////
2673void process_display( xptr_t process_xp )
2674{
2675    char  buffer[CONFIG_PROCESS_DISPLAY_BUF_SIZE];
2676
2677    // build the string to be displayed
2678    process_build_string( process_xp,
2679                          buffer,
2680                          CONFIG_PROCESS_DISPLAY_BUF_SIZE ); 
2681    // display the string
2682    nolock_puts( buffer );
2683
2684}  // end process_display()
2685
2686
2687////////////////////////////////////////////////////////////////////////////////////////
2688//     Terminals related functions
2689////////////////////////////////////////////////////////////////////////////////////////
2690
2691//////////////////////////////////
2692uint32_t process_txt_alloc( void )
2693{
2694    uint32_t  index;       // TXT terminal index
2695    xptr_t    chdev_xp;    // extended pointer on TXT_RX chdev
2696    chdev_t * chdev_ptr;   // local pointer on TXT_RX chdev
2697    cxy_t     chdev_cxy;   // TXT_RX chdev cluster
2698    xptr_t    root_xp;     // extended pointer on owner field in chdev
2699
2700    // scan the user TXT_RX chdevs (TXT0 is reserved for kernel)
2701    for( index = 1 ; index < LOCAL_CLUSTER->nb_txt_channels ; index ++ )
2702    {
2703        // get pointers on TXT_RX[index]
2704        chdev_xp  = chdev_dir.txt_rx[index];
2705        chdev_cxy = GET_CXY( chdev_xp );
2706        chdev_ptr = GET_PTR( chdev_xp );
2707
2708        // get extended pointer on root of attached process
2709        root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2710
2711        // return free TXT index if found
2712        if( xlist_is_empty( root_xp ) ) return index; 
2713    }
2714
2715    assert( __FUNCTION__, false , "no free TXT terminal found" );
2716
2717    return -1;
2718
2719} // end process_txt_alloc()
2720
2721/////////////////////////////////////////////
2722void process_txt_attach( xptr_t   process_xp,
2723                         uint32_t txt_id )
2724{
2725    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2726    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2727    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2728    xptr_t      root_xp;      // extended pointer on list root in chdev
2729    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2730
2731    process_t * process_ptr = GET_PTR(process_xp );
2732    cxy_t       process_cxy = GET_CXY(process_xp );
2733
2734// check process is in owner cluster
2735assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ))),
2736"process descriptor not in owner cluster" );
2737
2738// check terminal index
2739assert( __FUNCTION__, (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
2740"illegal TXT terminal index" );
2741
2742    // get pointers on TXT_RX[txt_id] chdev
2743    chdev_xp  = chdev_dir.txt_rx[txt_id];
2744    chdev_cxy = GET_CXY( chdev_xp );
2745    chdev_ptr = GET_PTR( chdev_xp );
2746
2747    // get extended pointer on root & lock of attached process list
2748    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
2749    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2750
2751    // get lock protecting list of processes attached to TXT
2752    remote_busylock_acquire( lock_xp );
2753
2754    // insert owner process in list of attached processes to same TXT
2755    xlist_add_last( root_xp , XPTR( process_cxy , &process_ptr->txt_list ) );
2756
2757    // release lock protecting list of processes attached to TXT
2758    remote_busylock_release( lock_xp );
2759
2760#if DEBUG_PROCESS_TXT
2761thread_t * this = CURRENT_THREAD;
2762uint32_t cycle = (uint32_t)hal_get_cycles();
2763if( DEBUG_PROCESS_TXT < cycle )
2764printk("\n[%s] thread[%x,%x] attached process %x to TXT %d / cycle %d\n",
2765__FUNCTION__, this->process->pid, this->trdid,
2766hal_remote_l32( XPTR( process_cxy , &process_ptr->pid, txt_id , cycle );
2767#endif
2768
2769} // end process_txt_attach()
2770
2771/////////////////////////////////////////////
2772void process_txt_detach( xptr_t  process_xp )
2773{
2774    process_t * process_ptr;  // local pointer on process in owner cluster
2775    cxy_t       process_cxy;  // process owner cluster
2776    pid_t       process_pid;  // process identifier
2777    xptr_t      file_xp;      // extended pointer on stdin file
2778    xptr_t      chdev_xp;     // extended pointer on TXT_RX chdev
2779    cxy_t       chdev_cxy;    // TXT_RX chdev cluster
2780    chdev_t *   chdev_ptr;    // local pointer on TXT_RX chdev
2781    xptr_t      lock_xp;      // extended pointer on list lock in chdev
2782
2783    // get process cluster, local pointer, and PID
2784    process_cxy = GET_CXY( process_xp );
2785    process_ptr = GET_PTR( process_xp );
2786    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2787
2788// check process descriptor in owner cluster
2789assert( __FUNCTION__, (CXY_FROM_PID( process_pid ) == process_cxy ) ,
2790"process descriptor not in owner cluster" );
2791
2792    // release TXT ownership (does nothing if not TXT owner)
2793    process_txt_transfer_ownership( process_xp );
2794
2795    // get extended pointer on process stdin pseudo file
2796    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2797
2798    // get pointers on TXT_RX chdev
2799    chdev_xp  = chdev_from_file( file_xp );
2800    chdev_cxy = GET_CXY( chdev_xp );
2801    chdev_ptr = (chdev_t *)GET_PTR( chdev_xp );
2802
2803    // get extended pointer on lock protecting attached process list
2804    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
2805
2806    // get lock protecting list of processes attached to TXT
2807    remote_busylock_acquire( lock_xp );
2808
2809    // unlink process from attached process list
2810    xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) );
2811
2812    // release lock protecting list of processes attached to TXT
2813    remote_busylock_release( lock_xp );
2814
2815#if DEBUG_PROCESS_TXT
2816thread_t * this = CURRENT_THREAD;
2817uint32_t cycle  = (uint32_t)hal_get_cycles();
2818uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2819if( DEBUG_PROCESS_TXT < cycle )
2820printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
2821__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
2822#endif
2823
2824} // end process_txt_detach()
2825
2826///////////////////////////////////////////////////
2827uint32_t process_txt_get_index( xptr_t process_xp )
2828{
2829
2830    // get target process cluster and local pointer
2831    process_t * process_ptr = GET_PTR( process_xp );
2832    cxy_t       process_cxy = GET_CXY( process_xp );
2833
2834assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp))),
2835"process descriptor not in owner cluster" );
2836
2837    // get extended pointer on STDIN pseudo file in owner process descriptor
2838    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0]));
2839
2840assert( __FUNCTION__, (file_xp != XPTR_NULL),
2841"STDIN pseudo-file undefined in fd_array for process %x\n",
2842hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );
2843
2844    // get extended pointer on TXT chdev
2845    xptr_t chdev_xp = chdev_from_file( file_xp );
2846 
2847assert( __FUNCTION__, (chdev_xp != XPTR_NULL),
2848"chdev undefined for STDIN pseudo-file of process %x\n",
2849hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ) );
2850
2851    // get cluster and local pointer on chdev
2852   cxy_t     chdev_cxy = GET_CXY( chdev_xp );
2853   chdev_t * chdev_ptr = GET_PTR( chdev_xp );
2854 
2855   // get parent TXT terminal index
2856   return hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
2857
2858}  // end process_txt_get_index()
2859
2860///////////////////////////////////////////////////
2861void process_txt_set_ownership( xptr_t process_xp )
2862{
2863    process_t * process_ptr;
2864    cxy_t       process_cxy;
2865    xptr_t      file_xp;
2866    xptr_t      txt_xp;     
2867    chdev_t   * txt_ptr;
2868    cxy_t       txt_cxy;
2869
2870    // get pointers on process in owner cluster
2871    process_cxy = GET_CXY( process_xp );
2872    process_ptr = GET_PTR( process_xp );
2873
2874    // check owner cluster
2875    assert( __FUNCTION__, (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp ))),
2876    "process descriptor not in owner cluster" );
2877
2878    // get extended pointer on stdin pseudo file
2879    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2880
2881    // get pointers on TXT chdev
2882    txt_xp  = chdev_from_file( file_xp );
2883    txt_cxy = GET_CXY( txt_xp );
2884    txt_ptr = GET_PTR( txt_xp );
2885
2886    // set owner field in TXT chdev
2887    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp );
2888
2889#if DEBUG_PROCESS_TXT
2890thread_t * this = CURRENT_THREAD;
2891uint32_t cycle  = (uint32_t)hal_get_cycles();
2892uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2893if( DEBUG_PROCESS_TXT < cycle )
2894printk("\n[%s] thread[%x,%x] give TXT%d ownership to process / cycle %d\n",
2895__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2896#endif
2897
2898}  // end process_txt_set ownership()
2899
2900////////////////////////////////////////////////////////
2901void process_txt_transfer_ownership( xptr_t process_xp )
2902{
2903    process_t * process_ptr;     // local pointer on process releasing ownership
2904    cxy_t       process_cxy;     // process cluster
2905    pid_t       process_pid;     // process identifier
2906    xptr_t      file_xp;         // extended pointer on TXT_RX pseudo file
2907    xptr_t      txt_xp;          // extended pointer on TXT_RX chdev
2908    chdev_t   * txt_ptr;         // local pointer on TXT_RX chdev
2909    cxy_t       txt_cxy;         // cluster of TXT_RX chdev
2910    uint32_t    txt_id;          // TXT_RX channel
2911    xptr_t      owner_xp;        // extended pointer on current TXT_RX owner
2912    xptr_t      root_xp;         // extended pointer on root of attached process list
2913    xptr_t      lock_xp;         // extended pointer on lock protecting attached process list
2914    xptr_t      iter_xp;         // iterator for xlist
2915    xptr_t      current_xp;      // extended pointer on current process
2916    bool_t      found;
2917
2918#if DEBUG_PROCESS_TXT
2919thread_t * this  = CURRENT_THREAD;
2920uint32_t   cycle;
2921#endif
2922
2923    // get pointers on target process
2924    process_cxy = GET_CXY( process_xp );
2925    process_ptr = GET_PTR( process_xp );
2926    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
2927
2928// check owner cluster
2929assert( __FUNCTION__, (process_cxy == CXY_FROM_PID( process_pid )) ,
2930"process descriptor not in owner cluster" );
2931
2932    // get extended pointer on stdin pseudo file
2933    file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
2934
2935    // get pointers on TXT chdev
2936    txt_xp  = chdev_from_file( file_xp );
2937    txt_cxy = GET_CXY( txt_xp );
2938    txt_ptr = GET_PTR( txt_xp );
2939
2940    // get relevant infos from chdev descriptor
2941    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
2942    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
2943
2944    // transfer ownership only if target process is the TXT owner
2945    if( (owner_xp == process_xp) && (txt_id > 0) ) 
2946    {
2947        // get extended pointers on root and lock of attached processes list
2948        root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root );
2949        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
2950
2951        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
2952        {
2953            // get lock
2954            remote_busylock_acquire( lock_xp );
2955
2956            // scan attached process list to find KSH process
2957            found = false;
2958            for( iter_xp = hal_remote_l64( root_xp ) ;
2959                 (iter_xp != root_xp) && (found == false) ;
2960                 iter_xp = hal_remote_l64( iter_xp ) )
2961            {
2962                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2963
2964                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
2965                {
2966                    // set owner field in TXT chdev
2967                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
2968
2969#if DEBUG_PROCESS_TXT
2970cycle = (uint32_t)hal_get_cycles();
2971if( DEBUG_PROCESS_TXT < cycle )
2972printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
2973__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
2974#endif
2975                    found = true;
2976                }
2977            }
2978
2979            // release lock
2980            remote_busylock_release( lock_xp );
2981
2982// It must exist a KSH process for each user TXT channel
2983assert( __FUNCTION__, (found == true), "KSH process not found for TXT%d", txt_id );
2984
2985        }
2986        else                                           // target process is KSH
2987        {
2988            // get lock
2989            remote_busylock_acquire( lock_xp );
2990
2991            // scan attached process list to find another process
2992            found = false;
2993            for( iter_xp = hal_remote_l64( root_xp ) ;
2994                 (iter_xp != root_xp) && (found == false) ;
2995                 iter_xp = hal_remote_l64( iter_xp ) )
2996            {
2997                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
2998
2999                if( current_xp != process_xp )            // current is not KSH
3000                {
3001                    // set owner field in TXT chdev
3002                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
3003
3004#if DEBUG_PROCESS_TXT
3005cycle  = (uint32_t)hal_get_cycles();
3006cxy_t       current_cxy = GET_CXY( current_xp );
3007process_t * current_ptr = GET_PTR( current_xp );
3008uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
3009if( DEBUG_PROCESS_TXT < cycle )
3010printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
3011__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
3012#endif
3013                    found = true;
3014                }
3015            }
3016
3017            // release lock
3018            remote_busylock_release( lock_xp );
3019
3020            // no more owner for TXT if no other process found
3021            if( found == false )
3022            {
3023                // set owner field in TXT chdev
3024                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
3025
3026#if DEBUG_PROCESS_TXT
3027cycle = (uint32_t)hal_get_cycles();
3028if( DEBUG_PROCESS_TXT < cycle )
3029printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
3030__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
3031#endif
3032            }
3033        }
3034    }
3035    else
3036    {
3037
3038#if DEBUG_PROCESS_TXT
3039cycle = (uint32_t)hal_get_cycles();
3040if( DEBUG_PROCESS_TXT < cycle )
3041printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
3042__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
3043#endif
3044
3045    }
3046
3047}  // end process_txt_transfer_ownership()
3048
3049
3050////////////////////////////////////////////////
3051bool_t process_txt_is_owner( xptr_t process_xp )
3052{
3053    // get local pointer and cluster of process in owner cluster
3054    cxy_t       process_cxy = GET_CXY( process_xp );
3055    process_t * process_ptr = GET_PTR( process_xp );
3056
3057// check calling thread execute in target process owner cluster
3058pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
3059assert( __FUNCTION__, (process_cxy == CXY_FROM_PID( process_pid )) ,
3060"process descriptor not in owner cluster" );
3061
3062    // get extended pointer on stdin pseudo file
3063    xptr_t file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
3064
3065    // get pointers on TXT chdev
3066    xptr_t    txt_xp  = chdev_from_file( file_xp );
3067    cxy_t     txt_cxy = GET_CXY( txt_xp );
3068    chdev_t * txt_ptr = GET_PTR( txt_xp );
3069
3070    // get extended pointer on TXT_RX owner process
3071    xptr_t owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
3072
3073    return (process_xp == owner_xp);
3074
3075}   // end process_txt_is_owner()
3076
3077////////////////////////////////////////////////     
3078xptr_t process_txt_get_owner( uint32_t channel )
3079{
3080    xptr_t      txt_rx_xp  = chdev_dir.txt_rx[channel];
3081    cxy_t       txt_rx_cxy = GET_CXY( txt_rx_xp );
3082    chdev_t *   txt_rx_ptr = GET_PTR( txt_rx_xp );
3083
3084    return (xptr_t)hal_remote_l64( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) );
3085
3086}  // end process_txt_get_owner()
3087
3088///////////////////////////////////////////
3089void process_txt_display( uint32_t txt_id )
3090{
3091    xptr_t      chdev_xp;
3092    cxy_t       chdev_cxy;
3093    chdev_t   * chdev_ptr;
3094    xptr_t      root_xp;
3095    xptr_t      lock_xp;
3096    xptr_t      current_xp;
3097    xptr_t      iter_xp;
3098    cxy_t       txt0_cxy;
3099    chdev_t   * txt0_ptr;
3100    xptr_t      txt0_xp;
3101    xptr_t      txt0_lock_xp;
3102   
3103    assert( __FUNCTION__, (txt_id < LOCAL_CLUSTER->nb_txt_channels) ,
3104    "illegal TXT terminal index" );
3105
3106    // get pointers on TXT0 chdev
3107    txt0_xp  = chdev_dir.txt_tx[0];
3108    txt0_cxy = GET_CXY( txt0_xp );
3109    txt0_ptr = GET_PTR( txt0_xp );
3110
3111    // get extended pointer on TXT0 lock
3112    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
3113
3114    // get pointers on TXT_RX[txt_id] chdev
3115    chdev_xp  = chdev_dir.txt_rx[txt_id];
3116    chdev_cxy = GET_CXY( chdev_xp );
3117    chdev_ptr = GET_PTR( chdev_xp );
3118
3119    // get extended pointer on root & lock of attached process list
3120    root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root );
3121    lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock );
3122
3123    // get lock on attached process list
3124    remote_busylock_acquire( lock_xp );
3125
3126    // get TXT0 lock in busy waiting mode
3127    remote_busylock_acquire( txt0_lock_xp );
3128
3129    // display header
3130    nolock_printk("\n***** processes attached to TXT_%d / cycle %d\n",
3131    txt_id , (uint32_t)hal_get_cycles() );
3132
3133    // scan attached process list
3134    XLIST_FOREACH( root_xp , iter_xp )
3135    {
3136        current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
3137        process_display( current_xp );
3138    }
3139
3140    // release TXT0 lock in busy waiting mode
3141    remote_busylock_release( txt0_lock_xp );
3142
3143    // release lock on attached process list
3144    remote_busylock_release( lock_xp );
3145
3146}  // end process_txt_display
Note: See TracBrowser for help on using the repository browser.