Changeset 625 for trunk


Ignore:
Timestamp:
Apr 10, 2019, 10:09:39 AM (6 years ago)
Author:
alain
Message:

Fix a bug in the vmm_remove_vseg() function: the physical pages
associated to an user DATA vseg were released to the kernel when
the target process descriptor was in the reference cluster.
This physical pages release should be done only when the page
forks counter value is zero.
All other modifications are cosmetic.

Location:
trunk
Files:
73 edited

Legend:

Unmodified
Added
Removed
  • trunk/Makefile

    r623 r625  
    1313# Default values for hardware parameters.
    1414# These parameters should be defined in the 'params-hard.mk' file.
    15 ARCH            ?= /users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob
    16 X_SIZE          ?= 2
    17 Y_SIZE          ?= 2
    18 NB_PROCS        ?= 2
    19 NB_TTYS         ?= 3
    20 IOC_TYPE        ?= IOC_BDV
     15ARCH        ?= /users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob
     16X_SIZE      ?= 2
     17Y_SIZE      ?= 2
     18NB_PROCS    ?= 2
     19NB_TTYS     ?= 3
     20IOC_TYPE    ?= IOC_BDV
    2121TXT_TYPE    ?= TXT_TTY
    2222FBF_TYPE    ?= FBF_SCL
     
    117117        rm -f $(DISK_IMAGE)
    118118        ./create_dmg    create $(basename $(DISK_IMAGE))
    119         dd                              if=$(DISK_IMAGE) of=temp.dmg count=65536
    120         mv                              temp.dmg $(DISK_IMAGE)
    121         mmd                     -o -i $(DISK_IMAGE) ::/bin         || true
    122         mmd                     -o -i $(DISK_IMAGE) ::/bin/kernel  || true
    123         mmd                     -o -i $(DISK_IMAGE) ::/bin/user    || true
    124         mmd                     -o -i $(DISK_IMAGE) ::/home        || true
    125         mdir             -/ -b -i $(DISK_IMAGE) ::/
     119        dd              if=$(DISK_IMAGE) of=temp.dmg count=65536
     120        mv              temp.dmg $(DISK_IMAGE)
     121        mmd             -o -i $(DISK_IMAGE) ::/bin            || true
     122        mmd             -o -i $(DISK_IMAGE) ::/bin/kernel     || true
     123        mmd             -o -i $(DISK_IMAGE) ::/bin/user       || true
     124        mmd             -o -i $(DISK_IMAGE) ::/home           || true
     125        mcopy           -o -i $(DISK_IMAGE) Makefile ::/home  || true
     126        mdir                -/ -b -i $(DISK_IMAGE) ::/
    126127
    127128##############################################################
  • trunk/hal/generic/hal_context.h

    r457 r625  
    22 * hal_context.h - Generic Thread Context Access API definition.
    33 *
    4  * Author  Alain Greiner    (2016)
     4 * Author  Alain Greiner    (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3131// and hal_fpu_context_t, defined in hal_context.c file, that are accessed with generic
    3232// void* pointers stored in the thread descriptor.
    33 // - the "hal_context_t" struct is used for the CPU registers values at context switch.
    34 // - the "hal_fpu_context_t" struct is used for the FPU registers when required.
     33// - the "hal_cpu_context_t" struct saves the CPU registers values at context switch.
     34// - the "hal_fpu_context_t" struct saves the FPU registers values at FPU switch.
    3535//////////////////////////////////////////////////////////////////////////////////////////
    3636
     
    5656
    5757/****************************************************************************************
    58  * This function is used to implement the fork() system call.
    59  * 1) It saves in a remote (child) thread CPU context the current CPU registers values.
    60  *    Three slots are not simple copies of the parent registers values :
    61  *    - the thread pointer is set to the child thread local pointer.
    62  *    - the stack pointer is set to parrent SP + (child_base - parent_base).
    63  *    - the status register is set to kernel mode with IRQ disabled.
    64  * 2) It copies the content of the calling (parent) thread kernel_stack,
    65  *    to the remote (child) thread kernel_stack.
     58 * This function is called the sys_fork() function to complete the fork mechanism.
     59 * It is called by th local parent thread to initialize the CPU context of the remote
     60 * child thread, identified by the <thread_xp> argument.
     61 * It makes three actions:
     62 * 1) It copies the current values of the CPU registers of the core running the parent
     63 *    thread to the remote child CPU context.
     64 * 2) It patches four slots of this remote child CPU context:
     65 *    - the c0_th   slot is set to the child thread descriptor pointer.
     66 *    - the sp_29   slot is set to the child kernel stack pointer.
     67 *    - the c0_sr   slot is set to kernel mode with IRQ disabled.
     68 *    - the c2_ptpr slot is set to the child process GPT value.
     69 * 3) It copies the content of the parent thread kernel_stack, to the child thread
     70 *    kernel_stack, because the COW mechanism is not available on architectures where
     71 *    the data MMU is de-activated in kernel mode.
    6672 ****************************************************************************************
    67  * @ thread_xp  : extended pointer on the remote thread descriptor.
     73 * @ thread_xp  : extended pointer on the child thread descriptor.
    6874 ***************************************************************************************/
    6975void hal_cpu_context_fork( xptr_t    thread_xp );
  • trunk/hal/generic/hal_gpt.h

    r624 r625  
    167167
    168168/****************************************************************************************
    169  * This function is used to implement the "fork" system call: It copies one GPT entry
    170  * identified by the <vpn> argument, from a remote <src_gpt_xp> to a local <dst_gpt>.
     169 * This function is used to implement the "fork" system call: It copies a remote
     170 * source PTE, identified by the <src_gpt_xp> and <src_vpn> arguments, to a local
     171 * destination PTE, identified by the <dst_gpt> and <dst_vpn> arguments.
    171172 * It does nothing if the source PTE is not MAPPED and SMALL.
    172173 * It optionnally activates the "Copy on Write" mechanism: when the <cow> argument is
    173174 * true: the GPT_WRITABLE flag is reset, and the GPT_COW flag is set.
    174  * A new second level PT2(s) is allocated for destination GPT if required.
     175 * A new second level PT2 is allocated for the destination GPT if required.
    175176 * It returns in the <ppn> and <mapped> arguments the PPN value for the copied PTE,
    176177 * and a boolean indicating if the PTE is mapped and small, and was actually copied.
    177178 ****************************************************************************************
    178  * @ dst_gpt      : [in]  local pointer on the local destination GPT.
    179  * @ src_gpt_xp   : [in]  extended pointer on the remote source GPT.
    180  * @ vpn_base     : [in]  vpn defining the PTE to be copied.
     179 * @ dst_gpt      : [in]  local pointer on local destination GPT.
     180 * @ dst_vpn      : [in]  vpn defining the PTE in the desination GPT.
     181 * @ src_gpt_xp   : [in]  extended pointer on remote source GPT.
     182 * @ src_vpn      : [in]  vpn defining the PTE in the source GPT.
    181183 * @ cow          : [in]  activate the COPY-On-Write mechanism if true.
    182184 * @ ppn          : [out] PPN value (only if mapped is true).
     
    185187 ***************************************************************************************/
    186188error_t hal_gpt_pte_copy( gpt_t    * dst_gpt,
     189                          vpn_t      dst_vpn,
    187190                          xptr_t     src_gpt_xp,
    188                           vpn_t      vpn,
     191                          vpn_t      src_vpn,
    189192                          bool_t     cow,
    190193                          ppn_t    * ppn,
  • trunk/hal/generic/hal_special.h

    r624 r625  
    101101 * This function returns the current value of stack pointer from core register.
    102102 ****************************************************************************************/
    103 uint32_t hal_get_sp( void );
    104 
    105 /*****************************************************************************************
    106  * This function returns the current value of the return adddress from core register.
    107  ****************************************************************************************/
    108 uint32_t hal_get_ra( void );
    109 
    110 /*****************************************************************************************
    111  * This function registers a new value in the core stack pointer and returns previous one.
    112  ****************************************************************************************/
    113 inline uint32_t hal_set_sp( void * new_val );
     103reg_t hal_get_sp( void );
    114104
    115105/*****************************************************************************************
    116106 * This function returns the faulty address in case of address exception.
    117107 ****************************************************************************************/
    118 uint32_t hal_get_bad_vaddr( void );
     108reg_t hal_get_bad_vaddr( void );
    119109
    120110/*****************************************************************************************
  • trunk/hal/generic/hal_vmm.h

    r623 r625  
    5959error_t hal_vmm_kernel_update( struct process_s * process );
    6060
     61/****************************************************************************************
     62 * Depending on the hardware architecture, this function displays the current state
     63 * of the VMM of the process identified by the <process> argument.
     64 * It displays all valit GPT entries when the <mapping> argument is true.
     65 ****************************************************************************************
     66 * @ process   : local pointer on user process descriptor.
     67 * @ return 0 if success / return ENOMEM if failure.
     68 ***************************************************************************************/
     69void hal_vmm_display( struct process_s * process,
     70                      bool_t             mapping );
     71
     72
     73
    6174#endif  /* HAL_VMM_H_ */
  • trunk/hal/tsar_mips32/core/hal_context.c

    r570 r625  
    152152    {
    153153        context->a0_04   = (uint32_t)thread->entry_args;
    154         context->sp_29   = (uint32_t)thread->u_stack_base + (uint32_t)thread->u_stack_size - 8;
     154        context->sp_29   = (uint32_t)thread->user_stack_vseg->max - 8;
    155155        context->ra_31   = (uint32_t)&hal_kentry_eret;
    156156        context->c0_epc  = (uint32_t)thread->entry_func;
     
    175175void hal_cpu_context_fork( xptr_t child_xp )
    176176{
    177     // allocate a local CPU context in kernel stack
    178     // It is initialized from local parent context
    179     // and from child specific values, and is copied in
    180     // in the remote child context using a remote_memcpy()
     177    // get pointer on calling thread
     178    thread_t * this = CURRENT_THREAD;
     179
     180    // allocate a local CPU context in parent kernel stack
    181181    hal_cpu_context_t  context;
    182182
    183     // get local parent thread local pointer
     183    // get local parent thread cluster and local pointer
     184    cxy_t      parent_cxy = local_cxy;
    184185    thread_t * parent_ptr = CURRENT_THREAD;
    185186
     
    188189    thread_t * child_ptr = GET_PTR( child_xp );
    189190
    190     // get remote child cpu_context local pointer
     191    // get local pointer on remote child cpu context
    191192    char * child_context_ptr = hal_remote_lpt( XPTR(child_cxy , &child_ptr->cpu_context) );
    192193
    193194    // get local pointer on remote child process
    194     process_t * process = (process_t *)hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) );
     195    process_t * process = hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) );
    195196
    196197    // get ppn of remote child process page table
    197     uint32_t    pt_ppn = hal_remote_l32( XPTR(child_cxy , &process->vmm.gpt.ppn) );
    198 
    199     // save CPU registers in local CPU context
     198    uint32_t pt_ppn = hal_remote_l32( XPTR(child_cxy , &process->vmm.gpt.ppn) );
     199
     200    // get local pointer on parent uzone from parent thread descriptor
     201    uint32_t * parent_uzone = parent_ptr->uzone_current;
     202
     203    // compute  local pointer on child uzone
     204    uint32_t * child_uzone  = (uint32_t *)( (intptr_t)parent_uzone +
     205                                            (intptr_t)child_ptr    -
     206                                            (intptr_t)parent_ptr  );
     207
     208    // update the uzone pointer in child thread descriptor
     209    hal_remote_spt( XPTR( child_cxy , &child_ptr->uzone_current ) , child_uzone );
     210
     211#if DEBUG_HAL_CONTEXT
     212uint32_t cycle = (uint32_t)hal_get_cycles();
     213if( DEBUG_HAL_CONTEXT < cycle )
     214printk("\n[%s] thread[%x,%x] parent_uzone %x / child_uzone %x / cycle %d\n",
     215__FUNCTION__, this->process->pid, this->trdid, parent_uzone, child_uzone, cycle );
     216#endif
     217
     218    // copy parent kernel stack to child thread descriptor
     219    // (this includes the uzone, that is allocated in the kernel stack)
     220    char * parent_ksp = (char *)hal_get_sp();
     221    char * child_ksp  = (char *)((intptr_t)parent_ksp +
     222                                 (intptr_t)child_ptr  -
     223                                 (intptr_t)parent_ptr );
     224
     225    uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_ksp;
     226
     227    hal_remote_memcpy( XPTR( child_cxy , child_ksp ),
     228                       XPTR( local_cxy , parent_ksp ),
     229                       size );
     230
     231#if DEBUG_HAL_CONTEXT
     232cycle = (uint32_t)hal_get_cycles();
     233printk("\n[%s] thread[%x,%x] copied kstack from parent %x to child %x / cycle %d\n",
     234__FUNCTION__, this->process->pid, this->trdid, parent_ptr, child_ptr, cycle );
     235#endif
     236
     237    // patch the user stack pointer slot in the child uzone[UZ_SP]
     238    // because parent and child use the same offset to access the user stack,
     239    // but parent and child do not have the same user stack base address.
     240    uint32_t parent_us_base = parent_ptr->user_stack_vseg->min;
     241    vseg_t * child_us_vseg  = hal_remote_lpt( XPTR( child_cxy , &child_ptr->user_stack_vseg ) );
     242    uint32_t child_us_base  = hal_remote_l32( XPTR( child_cxy , &child_us_vseg->min ) );
     243    uint32_t parent_usp     = parent_uzone[UZ_SP];
     244    uint32_t child_usp      = parent_usp + child_us_base - parent_us_base;
     245
     246    hal_remote_s32( XPTR( child_cxy , &child_uzone[UZ_SP] ) , child_usp );
     247
     248#if DEBUG_HAL_CONTEXT
     249cycle = (uint32_t)hal_get_cycles();
     250printk("\n[%s] thread[%x,%x] parent_usp %x / child_usp %x / cycle %d\n",
     251__FUNCTION__, this->process->pid, this->trdid, parent_usp, child_usp, cycle );
     252#endif
     253
     254    // save current values of CPU registers to local CPU context
    200255    hal_do_cpu_save( &context );
    201256
    202     // From this point, both parent and child threads execute the following code.
    203     // They can be distinguished by the CURRENT_THREAD value, and child will only
    204     // execute it when it is unblocked by parent, after return to sys_fork().
    205     // - parent thread copies user stack, and patch sp_29 / c0_th / C0_sr / c2_ptpr
    206     // - child thread does nothing
    207 
    208     thread_t * current = CURRENT_THREAD;
    209 
    210     if( current == parent_ptr )    // current == parent thread
     257    // From this point, both parent and child can execute the following code,
     258    // but child thread will only execute it after being unblocked by parent thread.
     259    // They can be distinguished by the (CURRENT_THREAD,local_cxy) values,
     260    // and we must re-initialise the calling thread pointer from c0_th register
     261
     262    this = CURRENT_THREAD;
     263
     264    if( (this == parent_ptr) && (local_cxy == parent_cxy) )   // parent thread
    211265    {
    212         // get parent and child stack pointers
    213         char * parent_sp = (char *)context.sp_29;
    214         char * child_sp  = (char *)((intptr_t)parent_sp +
    215                                     (intptr_t)child_ptr -
    216                                     (intptr_t)parent_ptr );
    217  
    218         // patch kernel_stack pointer, current thread, and status slots
    219         context.sp_29   = (uint32_t)child_sp;
     266        // patch 4 slots in the local CPU context: the sp_29 / c0_th / C0_sr / c2_ptpr
     267        // slots are not identical in parent and child
     268        context.sp_29   = context.sp_29 + (intptr_t)child_ptr - (intptr_t)parent_ptr;
    220269        context.c0_th   = (uint32_t)child_ptr;
    221270        context.c0_sr   = SR_SYS_MODE;
    222271        context.c2_ptpr = pt_ppn >> 1;
    223272
    224         // copy local context to remote child context)
     273        // copy this patched context to remote child context
    225274        hal_remote_memcpy( XPTR( child_cxy , child_context_ptr ),
    226275                           XPTR( local_cxy  , &context ) ,
    227276                           sizeof( hal_cpu_context_t ) );
    228 
    229         // copy kernel stack content from local parent thread to remote child thread
    230         uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_sp;
    231         hal_remote_memcpy( XPTR( child_cxy , child_sp ),
    232                            XPTR( local_cxy , parent_sp ),
    233                            size );
     277#if DEBUG_HAL_CONTEXT
     278cycle = (uint32_t)hal_get_cycles();
     279printk("\n[%s] thread[%x,%x] copied CPU context to child / cycle %d\n",
     280__FUNCTION__, this->process->pid, this->trdid, cycle );
     281#endif
     282
     283        // parent thread unblock child thread
     284        thread_unblock( XPTR( child_cxy , child_ptr ) , THREAD_BLOCKED_GLOBAL );
     285
     286#if DEBUG_HAL_CONTEXT
     287cycle = (uint32_t)hal_get_cycles();
     288printk("\n[%s] thread[%x,%x] unblocked child thread / cycle %d\n",
     289__FUNCTION__, this->process->pid, this->trdid, cycle );
     290#endif
     291
    234292    }
    235     else                           // current == child thread
    236     {
    237         assert( (current == child_ptr) , "current = %x / child = %x\n");
    238     }
     293
    239294}  // end hal_cpu_context_fork()
    240295
     
    285340void hal_cpu_context_destroy( thread_t * thread )
    286341{
    287     kmem_req_t  req;
    288 
    289     req.type = KMEM_CPU_CTX;
    290     req.ptr  = thread->cpu_context;
    291     kmem_free( &req );
     342    kmem_req_t          req;
     343
     344    hal_cpu_context_t * ctx = thread->cpu_context;
     345
     346    // release CPU context if required
     347    if( ctx != NULL )
     348    {   
     349        req.type = KMEM_CPU_CTX;
     350        req.ptr  = ctx;
     351        kmem_free( &req );
     352    }
    292353
    293354}  // end hal_cpu_context_destroy()
     
    348409    kmem_req_t  req;
    349410
    350     req.type = KMEM_FPU_CTX;
    351     req.ptr  = thread->fpu_context;
    352     kmem_free( &req );
     411    hal_fpu_context_t * context = thread->fpu_context;
     412
     413    // release FPU context if required
     414    if( context != NULL )
     415    {   
     416        req.type = KMEM_FPU_CTX;
     417        req.ptr  = context;
     418        kmem_free( &req );
     419    }
    353420
    354421}  // end hal_fpu_context_destroy()
  • trunk/hal/tsar_mips32/core/hal_exception.c

    r619 r625  
    189189    if( CURRENT_THREAD->type != THREAD_USER )
    190190    {
    191         printk("\n[KERNEL PANIC] in %s : illegal thread type %s\n",
     191        printk("\n[PANIC] in %s : illegal thread type %s\n",
    192192        __FUNCTION__, thread_type_str(CURRENT_THREAD->type) );
    193193
     
    250250            else if( error == EXCP_USER_ERROR )      // illegal vaddr
    251251            {
    252                 printk("\n[USER ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
     252                printk("\n[ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
    253253                "  %s : epc %x / badvaddr %x / is_ins %d\n",
    254254                __FUNCTION__, this->process->pid, this->trdid, local_cxy,
     
    260260            else  // error == EXCP_KERNEL_PANIC 
    261261            {
    262                 printk("\n[KERNEL PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
     262                printk("\n[PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
    263263                "  %s : epc %x / badvaddr %x / is_ins %d\n",
    264264                __FUNCTION__, this->process->pid, this->trdid, local_cxy,
     
    272272        case MMU_READ_PRIVILEGE_VIOLATION:   // illegal
    273273        {
    274             printk("\n[USER ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
     274            printk("\n[ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
    275275            "  %s : epc %x / badvaddr %x / is_ins %d\n",
    276276            __FUNCTION__, this->process->pid, this->trdid, local_cxy,
     
    299299            else if( error == EXCP_USER_ERROR )  // illegal write access
    300300            {
    301                     printk("\n[USER ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
     301                    printk("\n[ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
    302302                    "  %s : epc %x / badvaddr %x / is_ins %d\n",
    303303                    __FUNCTION__, this->process->pid, this->trdid, local_cxy,
     
    309309            else   // error == EXCP_KERNEL_PANIC
    310310            {
    311                 printk("\n[KERNEL PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
     311                printk("\n[PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
    312312                "  %s : epc %x / badvaddr %x / is_ins %d\n",
    313313                __FUNCTION__, this->process->pid, this->trdid, local_cxy,
     
    320320        case MMU_READ_EXEC_VIOLATION:        // user error
    321321        {
    322             printk("\n[USER ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
     322            printk("\n[ERROR] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
    323323            "  %s : epc %x / badvaddr %x / is_ins %d\n",
    324324            __FUNCTION__, this->process->pid, this->trdid, local_cxy,
     
    330330        default:                             // this is a kernel error   
    331331        {
    332             printk("\n[KERNEL PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
     332            printk("\n[PANIC] in %s : thread[%x,%x] on core[%x,%x] / cycle %d\n"
    333333            "  %s : epc %x / badvaddr %x / is_ins %d\n",
    334334            __FUNCTION__, this->process->pid, this->trdid, local_cxy,
     
    346346//////////////////////////////////////////////////////////////////////////////////////////
    347347// @ this     : pointer on faulty thread descriptor.
    348 // @ error    : EXCP_USER_ERROR or EXCP_KERNEL_PANIC
    349 //////////////////////////////////////////////////////////////////////////////////////////
    350 static void hal_exception_dump( thread_t * this,
    351                                 error_t    error )
     348//////////////////////////////////////////////////////////////////////////////////////////
     349static void hal_exception_dump( thread_t * this )
    352350{
    353351    core_t    * core    = this->core;
     
    366364    remote_busylock_acquire( lock_xp );
    367365
    368     if( error == EXCP_USER_ERROR )
    369     {
    370         nolock_printk("\n=== USER ERROR / thread(%x,%x) / core[%d] / cycle %d ===\n",
    371         process->pid, this->trdid, core->lid, (uint32_t)hal_get_cycles() );
    372     }
    373     else
    374     {
    375         nolock_printk("\n=== KERNEL PANIC / thread(%x,%x) / core[%d] / cycle %d ===\n",
    376         process->pid, this->trdid, core->lid, (uint32_t)hal_get_cycles() );
    377     }
     366    nolock_printk("\n=== thread(%x,%x) / core[%d] / cycle %d ===\n",
     367    process->pid, this->trdid, core->lid, (uint32_t)hal_get_cycles() );
    378368
    379369        nolock_printk("busylocks = %d / blocked_vector = %X / flags = %X\n\n",
     
    507497        if( error == EXCP_USER_ERROR )          //  user error => kill user process
    508498        {
    509         hal_exception_dump( this , error );
     499        hal_exception_dump( this );
    510500
    511501        sys_exit( EXIT_FAILURE );
     
    513503    else if( error == EXCP_KERNEL_PANIC )   // kernel error => kernel panic
    514504    {
    515         hal_exception_dump( this , error );
     505        hal_exception_dump( this );
    516506
    517507        hal_core_sleep();
  • trunk/hal/tsar_mips32/core/hal_gpt.c

    r624 r625  
    823823///////////////////////////////////////////
    824824error_t hal_gpt_pte_copy( gpt_t  * dst_gpt,
     825                          vpn_t    dst_vpn,
    825826                          xptr_t   src_gpt_xp,
    826                           vpn_t    vpn,
     827                          vpn_t    src_vpn,
    827828                          bool_t   cow,
    828829                          ppn_t  * ppn,
    829830                          bool_t * mapped )
    830831{
    831     uint32_t     ix1;       // index in PT1
    832     uint32_t     ix2;       // index in PT2
     832    uint32_t     src_ix1;   // index in SRC PT1
     833    uint32_t     src_ix2;   // index in SRC PT2
     834
     835    uint32_t     dst_ix1;   // index in DST PT1
     836    uint32_t     dst_ix2;   // index in DST PT2
    833837
    834838    cxy_t        src_cxy;   // SRC GPT cluster
     
    862866thread_t * this  = CURRENT_THREAD;
    863867if( DEBUG_HAL_GPT_COPY < cycle )
    864 printk("\n[%s] : thread[%x,%x] enter / vpn %x / src_cxy %x / dst_cxy %x / cycle %d\n",
    865 __FUNCTION__, this->process->pid, this->trdid, vpn, src_cxy, local_cxy, cycle );
     868printk("\n[%s] : thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n",
     869__FUNCTION__, this->process->pid, this->trdid, src_cxy, local_cxy, cycle );
    866870#endif
    867871
     
    878882    assert( (dst_pt1 != NULL) , "dst_pt1 does not exist\n");
    879883
    880     ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );
    881     ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );
     884    // compute SRC indexes
     885    src_ix1 = TSAR_MMU_IX1_FROM_VPN( src_vpn );
     886    src_ix2 = TSAR_MMU_IX2_FROM_VPN( src_vpn );
     887
     888    // compute DST indexes
     889    dst_ix1 = TSAR_MMU_IX1_FROM_VPN( dst_vpn );
     890    dst_ix2 = TSAR_MMU_IX2_FROM_VPN( dst_vpn );
    882891
    883892    // get src_pte1
    884     src_pte1 = hal_remote_l32( XPTR( src_cxy , &src_pt1[ix1] ) );
     893    src_pte1 = hal_remote_l32( XPTR( src_cxy , &src_pt1[src_ix1] ) );
    885894
    886895    // do nothing if src_pte1 not MAPPED or not SMALL
     
    888897    {
    889898        // get dst_pt1 entry
    890         dst_pte1 = dst_pt1[ix1];
     899        dst_pte1 = dst_pt1[dst_ix1];
    891900
    892901        // map dst_pte1 if required
     
    915924
    916925            // register it in DST_GPT
    917             dst_pt1[ix1] = dst_pte1;
     926            dst_pt1[dst_ix1] = dst_pte1;
    918927        }
    919928
     
    927936
    928937        // get attr and ppn from SRC_PT2
    929         src_pte2_attr = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * ix2]     ) );
    930         src_pte2_ppn  = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * ix2 + 1] ) );
     938        src_pte2_attr = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * src_ix2]     ) );
     939        src_pte2_ppn  = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * src_ix2 + 1] ) );
    931940
    932941        // do nothing if src_pte2 not MAPPED
     
    934943        {
    935944            // set PPN in DST PTE2
    936             dst_pt2[2*ix2+1] = src_pte2_ppn;
     945            dst_pt2[2 * dst_ix2 + 1] = src_pte2_ppn;
    937946                       
    938947            // set attributes in DST PTE2         
    939948            if( cow && (src_pte2_attr & TSAR_MMU_WRITABLE) )
    940949            {
    941                 dst_pt2[2*ix2] = (src_pte2_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE);
     950                dst_pt2[2 * dst_ix2] = (src_pte2_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE);
    942951            }
    943952            else
    944953            {
    945                 dst_pt2[2*ix2] = src_pte2_attr;
     954                dst_pt2[2 * dst_ix2] = src_pte2_attr;
    946955            }
    947956
     
    953962cycle = (uint32_t)hal_get_cycles;
    954963if( DEBUG_HAL_GPT_COPY < cycle )
    955 printk("\n[%s] : thread[%x,%x] exit / copy done for vpn %x / cycle %d\n",
    956 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
     964printk("\n[%s] : thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n",
     965__FUNCTION__, this->process->pid, this->trdid, src_vpn, dst_vpn, cycle );
    957966#endif
    958967
     
    970979cycle = (uint32_t)hal_get_cycles;
    971980if( DEBUG_HAL_GPT_COPY < cycle )
    972 printk("\n[%s] : thread[%x,%x] exit / nothing done for vpn %x / cycle %d\n",
    973 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
     981printk("\n[%s] : thread[%x,%x] exit / nothing done / cycle %d\n",
     982__FUNCTION__, this->process->pid, this->trdid, cycle );
    974983#endif
    975984
  • trunk/hal/tsar_mips32/core/hal_kentry.S

    r438 r625  
    44 * AUthors   Ghassan Almaless (2007,2008,2009,2010,2011,2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2017)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    8787#------------------------------------------------------------------------------------
    8888# Kernel Entry point for Interrupt / Exception / Syscall
    89 # The c2_dext and c2_iext CP2 registers must have been previously set
    90 # to "local_cxy", because the kernel run with MMU desactivated.
     89# The c2_dext CP2 register must have been previously set
     90# to "local_cxy", because the kernel run with data MMU desactivated.
    9191#------------------------------------------------------------------------------------
    9292
     
    9696        andi    $26,    $26,  0x10          # test User Mode bit
    9797        beq     $26,    $0,       kernel_mode   # jump if core already in kernel
    98         ori     $27,    $0,       0x3           # $27 <= code for MMU OFF
     98        ori     $27,    $0,       0xB           # $27 <= code data MMU OFF
    9999       
    100100#------------------------------------------------------------------------------------
     
    102102# to handle a syscall, an interrupt, or an user exception.
    103103# - save current c2_mode in $26.
    104 # - set MMU OFF.
     104# - set data MMU OFF.
    105105# - copy user stack pointer in $27 to be saved in uzone.
    106 # - set kernel stack pointer in $29 == top_kernel_stack(this).
     106# - set kernel stack pointer in $29 (kernel stack empty at firts entry).
    107107
    108108user_mode:
    109109
    110110    mfc2    $26,    $1                  # $26 <= c2_mode
    111         mtc2    $27,    $1                              # set MMU OFF
     111        mtc2    $27,    $1                              # set data MMU OFF
    112112    move    $27,    $29                 # $27 <= user stack pointer
    113113        mfc0    $29,    $4,   2             # get pointer on thread descriptor from c0_th
     
    121121# after a syscall, to handle an interrupt, or to handle a non-fatal exception.
    122122# - save current c2_mode in $26.
    123 # - set MMU OFF.
     123# - set data MMU OFF.
    124124# - copy current kernel stack pointer in $27.
    125125
     
    127127
    128128    mfc2    $26,    $1                  # $26 <= c2_mode
    129         mtc2    $27,    $1                              # set MMU OFF
     129        mtc2    $27,    $1                              # set data MMU OFF
    130130    move    $27,    $29                 # $27 <= current kernel stack pointer
    131131
     
    133133# This code is executed in both modes (user or kernel):
    134134# The assumptions are:
    135 # - c2_mode contains the MMU OFF value.
     135# - c2_mode contains the data MMU OFF value.
    136136# - $26 contains the previous c2_mode value.
    137137# - $27 contains the previous sp value (can be usp or ksp).
     
    139139# We execute the following actions:
    140140# - decrement $29 to allocate an uzone in kernel stack
    141 # - save relevant GPR, CP0 and CP2 registers to uzone.
    142 # - set the SR in kernel mode: IRQ disabled, clear exl.
     141# - save GPR, CP0 and CP2 registers to uzone.
     142# - set the SR in kernel mode: IRQ disabled, clear EXL.
    143143
    144144unified_mode:
     
    195195    sw      $26,    (UZ_MODE*4)($29)    # save previous c2_mode (can be user or kernel)
    196196
    197     mfc0    $3,     $12
     197    mfc0    $3,     $12                 # $3 <= c0_sr
    198198        srl         $3,     $3,   5
    199199        sll     $3,         $3,   5                 # reset 5 LSB bits
     
    216216    nop
    217217    move    $4,     $2
     218    jal     putd
     219    nop
     220    la      $4,     msg_crlf
     221    jal     puts
     222    nop   
     223    # display saved CR value
     224    la      $4,     msg_cr
     225    jal     puts
     226    nop
     227    lw      $4,         (UZ_CR*4)($29)
    218228    jal     putx
    219229    nop
     
    286296   
    287297#------------------------------------------------------------------------------------
    288 # This code handle the uzone pointers stack, and calls the relevant
     298# This code handle the two-slots uzone pointers stack, and calls the relevant
    289299# Interrupt / Exception / Syscall handler, depending on XCODE in CP0_CR.
    290300# Both the hal_do_syscall() and the hal_do_exception() functions use
     
    338348# - All registers saved in the uzone are restored, using the pointer on uzone,
    339349#   that is contained in $29.
    340 # - The "uzone" field in thread descriptor, that has beeen modified at kernel entry
    341 #   is restored from value contained in the uzone[UZ_SP] slot.
     350# - The "current_uzone" pointer in thread descriptor, that has beeen modified at
     351#   kernel entry is restored from value contained in the uzone[UZ_SP] slot.
    342352# -----------------------------------------------------------------------------------
    343353
     
    365375    nop
    366376    move    $4,     $2
     377    jal     putd
     378    nop
     379    la      $4,     msg_crlf
     380    jal     puts
     381    nop   
     382    # display saved CR value
     383    la      $4,     msg_cr
     384    jal     puts
     385    nop
     386    lw      $4,         (UZ_CR*4)($29)
    367387    jal     putx
    368388    nop
     
    479499
    480500        lw          $26,    (UZ_MODE*4)($27)   
    481     mtc2    $26,    $1                  # restore CP2_MODE from uzone
     501    mtc2    $26,    $1                  # restore c2_mode from uzone
    482502
    483503# -----------------------------------------------------------------------------------
     
    494514    .section .kdata
    495515
     516msg_cr:
     517    .align 2
     518    .asciiz "- UZ_CR   = "
    496519msg_sp:
    497520    .align 2
  • trunk/hal/tsar_mips32/core/hal_kentry.h

    r481 r625  
    11/*
    2  * hal_kentry.h - MIPS32 registers mnemonics
     2 * hal_kentry.h - uzone definition
    33 *
    4  * Copyright (c) 2008,2009,2010,2011,2012 Ghassan Almaless
    5  * Copyright (c) 2011,2012 UPMC Sorbonne Universites
     4 * Author     Alain Greiner (2016,2017,2018,2019)
     5 *
     6 * Copyright (c) UPMC Sorbonne Universites
    67 *
    7  * This file is part of ALMOS-kernel.
     8 * This file is part of ALMOS-MKH.
    89 *
    9  * ALMOS-kernel is free software; you can redistribute it and/or modify it
     10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
    1011 * under the terms of the GNU General Public License as published by
    1112 * the Free Software Foundation; version 2.0 of the License.
    1213 *
    13  * ALMOS-kernel is distributed in the hope that it will be useful, but
     14 * ALMOS-MKH is distributed in the hope that it will be useful, but
    1415 * WITHOUT ANY WARRANTY; without even the implied warranty of
    1516 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     
    2829// a fixed size array of 32 bits integers, used by the kentry function to save/restore
    2930// the MIPS32 CPU registers, at each exception / interruption / syscall.
    30 // It also defines several initial values for the SR register.
    3131//
    3232// This file is included in the hal_kentry.S, hal_syscall.c, hal_exception.c,
     
    3636
    3737/****************************************************************************************
    38  * This structure defines the cpu_uzone dynamically allocated in the kernel stack
    39  * by the hal_kentry assembly code for the TSAR_MIPS32 architecture.
     38 * This structure defines the "uzone" dynamically allocated in the kernel stack
     39 * by the hal_kentry assembly code to save the MIPS32 registers each time a core
     40 * enters the kernel to handle an interrupt, exception, or syscall.
     41 * These define are specific for the TSAR_MIPS32 architecture.
     42 *
    4043 * WARNING : It is replicated in hal_kentry.S file.
    4144 ***************************************************************************************/
     
    8790 * The hal_kentry_enter() function is the unique kernel entry point in case of
    8891 * exception, interrupt, or syscall for the TSAR_MIPS32 architecture. 
    89  * It can be executed by a core in user mode (in case of exception or syscall),
    90  * or by a core already in kernel mode (in case of interrupt).
    91  *
     92 * It can be executed by a core in user mode or by a core already in kernel mode
     93 * (in case of interrupt or non fatal exception).
    9294 * In both cases it allocates an "uzone" space in the kernel stack to save the
    9395 * CPU registers values, desactivates the MMU, and calls the relevant handler
     
    9597 *
    9698 * After handler execution, it restores the CPU context from the uzone and jumps
    97  * to address contained in EPC calling hal_kentry_eret()
     99 * to address contained in EPC calling the hal_kentry_eret() function.
    98100 ************************************************************************************/
    99101void hal_kentry_enter( void );
     
    101103/*************************************************************************************
    102104 * The hal_kentry_eret() function contains only the assembly "eret" instruction,
    103  * that reset the EXL bit in the c0_sr register, and jump to the address
     105 * that reset the EXL bit in the c0_sr register, and jumps to the address
    104106 * contained in the c0_epc register.
    105107 * ************************************************************************************/
  • trunk/hal/tsar_mips32/core/hal_remote.c

    r610 r625  
    381381    uint32_t scxy = (uint32_t)GET_CXY( src );
    382382
    383 /*
    384 if( local_cxy == 1 )
    385 printk("\n@@@ %s : scxy %x / sptr %x / dcxy %x / dptr %x\n",
    386 __FUNCTION__, scxy, sptr, dcxy, dptr );
    387 */
    388383    hal_disable_irq( &save_sr );
    389384
  • trunk/hal/tsar_mips32/core/hal_special.c

    r624 r625  
    4040extern cxy_t local_cxy;
    4141extern void  hal_kentry_enter( void );
     42
     43////////////////////////////////////////////////////////////////////////////////
     44// For the TSAR architecture, this function registers the address of the
     45// hal_kentry_enter() function in the MIPS32 cp0_ebase register.
     46////////////////////////////////////////////////////////////////////////////////
     47void hal_set_kentry( void )
     48{
     49    uint32_t kentry = (uint32_t)(&hal_kentry_enter);
     50
     51    asm volatile("mtc0   %0,  $15,  1" : : "r" (kentry) );
     52}
    4253
    4354/////////////////////////////////////////////////////////////////////////////////
     
    4859void hal_mmu_init( gpt_t * gpt )
    4960{
    50 
    51     // set PT1 base address in mmu_ptpr register
     61    // set PT1 base address in cp2_ptpr register
    5262    uint32_t ptpr = (((uint32_t)gpt->ptr) >> 13) | (local_cxy << 19);
    5363    asm volatile ( "mtc2   %0,   $0         \n" : : "r" (ptpr) );
    5464
    55     // set ITLB | ICACHE | DCACHE bits in mmu_mode register
     65    // set ITLB | ICACHE | DCACHE bits in cp2_mode register
    5666    asm volatile ( "ori    $26,  $0,  0xB   \n"
    5767                   "mtc2   $26,  $1         \n" );
     
    5969
    6070////////////////////////////////////////////////////////////////////////////////
    61 // For the TSAR architecture, this function registers the address of the
    62 // hal_kentry_enter() function in the MIPS32 cp0_ebase register.
    63 ////////////////////////////////////////////////////////////////////////////////
    64 void hal_set_kentry( void )
    65 {
    66     uint32_t kentry = (uint32_t)(&hal_kentry_enter);
    67 
    68     asm volatile("mtc0   %0,  $15,  1" : : "r" (kentry) );
    69 }
    70 
    71 ////////////////////////////////
     71// For the TSAR architecture, this function returns the current value
     72// of the 32 bits c0_sr register
     73////////////////////////////////////////////////////////////////////////////////
     74inline reg_t hal_get_sr( void )
     75{
     76    reg_t sr;
     77
     78        asm volatile ("mfc0    %0,    $12" : "=&r" (sr));
     79
     80        return sr;
     81}
     82
     83////////////////////////////////////////////////////////////////////////////////
     84// For the TSAR architecture, this function returns the 10 LSB bits
     85// of the 32 bits c0_ebase register : Y (4 bits) | Y (4 bits) | LID (2 bits)
     86////////////////////////////////////////////////////////////////////////////////
    7287inline gid_t hal_get_gid( void )
    7388{
     
    7994}
    8095
    81 ///////////////////////////////////
     96////////////////////////////////////////////////////////////////////////////////
     97// For the TSAR architecture, this function returns the current value
     98// of the 32 bits c0_count cycle counter.
     99////////////////////////////////////////////////////////////////////////////////
    82100inline reg_t hal_time_stamp( void )
    83101{
     
    87105
    88106    return count;
    89 }
    90 
    91 ///////////////////////////////
    92 inline reg_t hal_get_sr( void )
    93 {
    94     reg_t sr;
    95 
    96         asm volatile ("mfc0    %0,    $12" : "=&r" (sr));
    97 
    98         return sr;
    99107}
    100108
     
    131139}
    132140
    133 ///////////////////////////////////////////////////////
     141////////////////////////////////////////////////////////////////////////////////
     142// For the TSAR architecture, this function returns the current value
     143// of the 32 bits c0_th register.
     144////////////////////////////////////////////////////////////////////////////////
    134145inline struct thread_s * hal_get_current_thread( void )
    135146{
     
    141152}
    142153
    143 ///////////////////////////////////////////////////////
     154////////////////////////////////////////////////////////////////////////////////
     155// For the TSAR architecture, this function set a new value
     156// to the 32 bits c0_th register.
     157////////////////////////////////////////////////////////////////////////////////
    144158void hal_set_current_thread( struct thread_s * thread )
    145159{
     
    182196}
    183197
    184 ///////////////////////////
    185 uint32_t hal_get_sp( void )
     198////////////////////////////////////////////////////////////////////////////////
     199// For the TSAR architecture, this function returns the current value
     200// of the 32 bits sp_29 register.
     201////////////////////////////////////////////////////////////////////////////////
     202reg_t hal_get_sp( void )
    186203{
    187204        register uint32_t sp;
     
    190207 
    191208        return sp;
    192 }
    193 
    194 /////////////////////////////////////
    195 uint32_t hal_set_sp( void * new_val )
    196 {
    197         register uint32_t sp;
    198  
    199         asm volatile
    200         ( "or    %0,   $0,      $29   \n"
    201           "or    $29,  $0,      %1    \n"
    202           : "=&r" (sp) : "r" (new_val)  );
    203  
    204         return sp;
    205 }
    206 
    207 ///////////////////////////
    208 uint32_t hal_get_ra( void )
    209 {
    210         register uint32_t ra;
    211  
    212         asm volatile ("or    %0,   $0,   $31" : "=&r" (ra));
    213  
    214         return ra;
    215209}
    216210
  • trunk/hal/tsar_mips32/core/hal_switch.S

    r457 r625  
    22 * hal_witch.S - CPU context switch function for TSAR-MIPS32
    33 *
    4  * Author  Alain Greiner    (2016)
     4 * Author  Alain Greiner    (2016,2017,2018,2019)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
  • trunk/hal/tsar_mips32/core/hal_syscall.c

    r481 r625  
    2929#include <hal_kentry.h>
    3030
    31 /////////////////////
     31///////////////////////////
    3232void hal_do_syscall( void )
    3333{
     
    6363                         service_num );
    6464
    65     // get pointer on exit_thread uzone, because
    66     // exit_thread can be different from enter_thread
     65    // get pointer on exit_thread uzone, because exit thread
     66    // can be different from enter_thread for a fork syscall
    6767    this       = CURRENT_THREAD;
    6868    exit_uzone = (uint32_t *)this->uzone_current;
  • trunk/hal/tsar_mips32/core/hal_uspace.c

    r610 r625  
    249249        ".set noreorder             \n"
    250250        "move   $13,   %1           \n"   /* $13 <= str                     */
    251         "mfc2   $15,   $1           \n"   /* $15 <= DTLB and ITLB off       */
     251        "mfc2   $15,   $1           \n"   /* $15 <= MMU_MODE (DTLB off)     */
    252252        "ori    $14,   $15,  0x4    \n"   /* $14 <= mode DTLB on            */
    253253        "1:                         \n"
    254254        "mtc2   $14,   $1                       \n"   /* set DTLB on                    */
    255         "lb         $12,   0($13)       \n"   /* read char from user space      */
     255        "lb         $12,   0($13)       \n"   /* $12 <= one byte from u_space   */
    256256        "mtc2   $15,   $1                       \n"   /* set DTLB off                   */
    257257        "addi   $13,   $13,  1      \n"   /* increment address              */
  • trunk/hal/tsar_mips32/core/hal_vmm.c

    r624 r625  
    4848// This function is called by the process_zero_init() function during kernel_init.
    4949// It initializes the VMM of the kernel proces_zero (containing all kernel threads)
    50 // in the local cluster: it registers one "kcode" vseg in kernel VSL, and registers
    51 // one big page in slot[0] of kernel GPT.
     50// in the local cluster: For TSAR, it registers one "kcode" vseg in kernel VSL,
     51// and registers one big page in slot[0] of kernel GPT.
    5252//////////////////////////////////////////////////////////////////////////////////////////
    5353error_t  hal_vmm_kernel_init( boot_info_t * info )
     
    119119    return 0;
    120120
    121 }  // end hal_kernel_vmm_init()
    122 
    123 //////////////////////////////////////////////////////////////////////////////////////////
    124 // This function is called by the vmm_init() function to update the VMM of an user
    125 // process identified by the <process> argument.
    126 // It registers in the user VSL the "kcode" vseg, registered in the local kernel VSL,
    127 // and register in the user GPT the big page[0] mapped in the local kernel GPT.
     121}  // end hal_vmm_kernel_init()
     122
     123//////////////////////////////////////////////////////////////////////////////////////////
     124// This function registers in the VMM of an user process identified by the <process>
     125// argument all required kernel vsegs.
     126// For TSAR, it registers in the user VSL the "kcode" vseg, from the local kernel VSL,
     127// and register in the user GPT the big page[0] from the local kernel GPT.
    128128//////////////////////////////////////////////////////////////////////////////////////////
    129129error_t hal_vmm_kernel_update( process_t * process )
    130130{
    131     error_t error;
     131    error_t  error;
    132132    uint32_t attr;
    133133    uint32_t ppn;
    134134
     135    // get cluster identifier
     136    cxy_t cxy = local_cxy;
     137
    135138#if DEBUG_HAL_VMM
    136139thread_t * this = CURRENT_THREAD;
    137140printk("\n[%s] thread[%x,%x] enter in cluster %x \n",
    138 __FUNCTION__, this->process->pid, this->trdid, local_cxy );
     141__FUNCTION__, this->process->pid, this->trdid, cxy );
     142hal_vmm_display( &process_zero , true );
    139143hal_vmm_display( process , true );
    140 hal_vmm_display( &process_zero , true );
    141 #endif
    142 
    143     // get cluster identifier
    144     cxy_t cxy = local_cxy;
    145 
    146     // get extended pointer on kernel GPT
     144#endif
     145
     146    // get extended pointer on local kernel GPT
    147147    xptr_t k_gpt_xp = XPTR( cxy , &process_zero.vmm.gpt );
    148148
     
    212212                      bool_t      mapping )
    213213{
     214    // get pointer on process VMM
    214215    vmm_t * vmm = &process->vmm;
    215     gpt_t * gpt = &vmm->gpt;
    216216
    217217    // get pointers on TXT0 chdev
     
    220220    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    221221
    222     // get extended pointer on remote TXT0 lock
    223     xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    224 
    225     // get locks protecting the VSL and the GPT
    226     remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) );
    227     remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->gpt_lock ) );
    228 
    229     // get TXT0 lock
    230     remote_busylock_acquire( lock_xp );
    231 
    232     nolock_printk("\n***** VSL and GPT for process %x in cluster %x\n",
    233     process->pid , local_cxy );
    234 
    235     // scan the list of vsegs
    236     xptr_t         root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    237     xptr_t         iter_xp;
    238     xptr_t         vseg_xp;
    239     vseg_t       * vseg;
    240     XLIST_FOREACH( root_xp , iter_xp )
    241     {
    242         vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    243         vseg    = GET_PTR( vseg_xp );
    244 
    245         nolock_printk(" - %s : base = %X / size = %X / npages = %d\n",
    246         vseg_type_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size );
    247 
    248         if( mapping )
     222    // build extended pointers on TXT0 lock, GPT lock and VSL lock
     223    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     224    xptr_t  vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     225    xptr_t  gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock );
     226
     227    // get root of vsegs list
     228    xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     229
     230    // get the locks protecting TXT0, VSL, and GPT
     231    remote_rwlock_rd_acquire( vsl_lock_xp );
     232    remote_rwlock_rd_acquire( gpt_lock_xp );
     233    remote_busylock_acquire( txt_lock_xp );
     234
     235    nolock_printk("\n***** VSL and GPT for process %x in cluster %x / PT1 = %x\n",
     236    process->pid , local_cxy , vmm->gpt.ptr );
     237
     238    if( xlist_is_empty( root_xp ) )
     239    {
     240        nolock_printk("   ... no vsegs registered\n");
     241    }
     242    else  // scan the list of vsegs
     243    {
     244        xptr_t         iter_xp;
     245        xptr_t         vseg_xp;
     246        vseg_t       * vseg;
     247
     248        XLIST_FOREACH( root_xp , iter_xp )
    249249        {
    250             vpn_t    vpn     = vseg->vpn_base;
    251             vpn_t    vpn_max = vpn + vseg->vpn_size;
    252             ppn_t    ppn;
    253             uint32_t attr;
    254 
    255             while( vpn < vpn_max )
     250            vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     251            vseg    = GET_PTR( vseg_xp );
     252
     253            nolock_printk(" - %s : base = %X / size = %X / npages = %d\n",
     254            vseg_type_str(vseg->type), vseg->min, vseg->max - vseg->min, vseg->vpn_size );
     255
     256            if( mapping ) 
    256257            {
    257                 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );
    258 
    259                 if( attr & GPT_MAPPED )
     258                vpn_t    vpn     = vseg->vpn_base;
     259                vpn_t    vpn_max = vpn + vseg->vpn_size;
     260                ppn_t    ppn;
     261                uint32_t attr;
     262
     263                while( vpn < vpn_max )   // scan the PTEs
    260264                {
    261                     if( attr & GPT_SMALL )
     265                    hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn );
     266
     267                    if( attr & GPT_MAPPED )
    262268                    {
    263                         nolock_printk("    . SMALL : vpn = %X / attr = %X / ppn = %X\n",
    264                         vpn , attr , ppn );
    265                         vpn++;
     269                        if( attr & GPT_SMALL )
     270                        {
     271                            nolock_printk("    . SMALL : vpn = %X / attr = %X / ppn = %X\n",
     272                            vpn , attr , ppn );
     273                            vpn++;
     274                        }
     275                        else
     276                        {
     277                            nolock_printk("    . BIG   : vpn = %X / attr = %X / ppn = %X\n",
     278                            vpn , attr , ppn );
     279                            vpn += 512;
     280                        }
    266281                    }
    267282                    else
    268283                    {
    269                         nolock_printk("    . BIG   : vpn = %X / attr = %X / ppn = %X\n",
    270                         vpn , attr , ppn );
    271                         vpn += 512;
     284                        vpn++;
    272285                    }
    273                 }
    274                 else
    275                 {
    276                     vpn++;
    277286                }
    278287            }
     
    280289    }
    281290
    282     // release TXT0 lock
    283     remote_busylock_release( lock_xp );
    284 
    285     // release the VSK and GPT locks
    286     remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) );
    287     remote_rwlock_rd_release( XPTR( local_cxy , &vmm->gpt_lock ) );
     291    // release locks
     292    remote_busylock_release( txt_lock_xp );
     293    remote_rwlock_rd_release( gpt_lock_xp );
     294    remote_rwlock_rd_release( vsl_lock_xp );
    288295
    289296}  // hal_vmm_display()
  • trunk/hal/tsar_mips32/drivers/soclib_tty.c

    r619 r625  
    346346                owner_pid = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->pid ) );
    347347
    348                 // block TXT owner process only if it is not the INIT process
    349                 if( owner_pid != 1 )
    350                 {
    351                     // get parent process descriptor pointers
    352                     parent_xp  = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->parent_xp ) );
    353                     parent_cxy = GET_CXY( parent_xp );
    354                     parent_ptr = GET_PTR( parent_xp );
    355 
    356                     // get pointers on the parent process main thread
    357                     parent_main_ptr = hal_remote_lpt(XPTR(parent_cxy,&parent_ptr->th_tbl[0]));
    358                     parent_main_xp  = XPTR( parent_cxy , parent_main_ptr );
    359 
    360                     // transfer TXT ownership
    361                     process_txt_transfer_ownership( owner_xp );
    362 
    363                     // block all threads in all clusters, but the main thread
    364                     process_sigaction( owner_pid , BLOCK_ALL_THREADS );
    365 
    366                     // block the main thread
    367                     xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] );
    368                     thread_block( main_xp , THREAD_BLOCKED_GLOBAL );
    369 
    370                     // atomically update owner process termination state
    371                     hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) ,
    372                                           PROCESS_TERM_STOP );
    373 
    374                     // unblock the parent process main thread
    375                     thread_unblock( parent_main_xp , THREAD_BLOCKED_WAIT );
    376 
    377                     return;
    378                 }
     348// TXT owner cannot be the INIT process
     349assert( (owner_pid != 1) , "INIT process cannot be the TXT owner" );
     350
     351                // get parent process descriptor pointers
     352                parent_xp  = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->parent_xp ) );
     353                parent_cxy = GET_CXY( parent_xp );
     354                parent_ptr = GET_PTR( parent_xp );
     355
     356                // get pointers on the parent process main thread
     357                parent_main_ptr = hal_remote_lpt(XPTR(parent_cxy,&parent_ptr->th_tbl[0]));
     358                parent_main_xp  = XPTR( parent_cxy , parent_main_ptr );
     359
     360                // transfer TXT ownership
     361                process_txt_transfer_ownership( owner_xp );
     362
     363                // mark for block all threads in all clusters, but the main
     364                process_sigaction( owner_pid , BLOCK_ALL_THREADS );
     365
     366                // block the main thread
     367                xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] );
     368                thread_block( main_xp , THREAD_BLOCKED_GLOBAL );
     369
     370                // atomically update owner process termination state
     371                hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) ,
     372                                      PROCESS_TERM_STOP );
     373
     374                // unblock the parent process main thread
     375                thread_unblock( parent_main_xp , THREAD_BLOCKED_WAIT );
     376
     377                return;
    379378            }
    380379
     
    390389                owner_xp  = process_txt_get_owner( channel );
    391390
    392                 // check process exist
    393                 assert( (owner_xp != XPTR_NULL) ,
    394                 "TXT owner process not found\n" );
     391// check process exist
     392assert( (owner_xp != XPTR_NULL) , "TXT owner process not found\n" );
    395393
    396394                // get relevant infos on TXT owner process
     
    399397                owner_pid = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->pid ) );
    400398
    401                 // kill TXT owner process only if it is not the INIT process
    402                 if( owner_pid != 1 )
    403                 {
    404                     // get parent process descriptor pointers
    405                     parent_xp  = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->parent_xp ) );
    406                     parent_cxy = GET_CXY( parent_xp );
    407                     parent_ptr = GET_PTR( parent_xp );
    408 
    409                     // get pointers on the parent process main thread
    410                     parent_main_ptr = hal_remote_lpt(XPTR(parent_cxy,&parent_ptr->th_tbl[0]));
    411                     parent_main_xp  = XPTR( parent_cxy , parent_main_ptr );
    412 
    413                     // remove process from TXT list
    414                     process_txt_detach( owner_xp );
    415 
    416                     // mark for delete all thread in all clusters, but the main
    417                     process_sigaction( owner_pid , DELETE_ALL_THREADS );
     399// TXT owner cannot be the INIT process
     400assert( (owner_pid != 1) , "INIT process cannot be the TXT owner" );
     401
     402#if DEBUG_HAL_TXT_RX
     403if( DEBUG_HAL_TXT_RX < rx_cycle )
     404printk("\n[%s] TXT%d owner is process %x\n",
     405__FUNCTION__, channel, owner_pid );
     406#endif
     407                // get parent process descriptor pointers
     408                parent_xp  = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->parent_xp ) );
     409                parent_cxy = GET_CXY( parent_xp );
     410                parent_ptr = GET_PTR( parent_xp );
     411
     412                // get pointers on the parent process main thread
     413                parent_main_ptr = hal_remote_lpt(XPTR(parent_cxy,&parent_ptr->th_tbl[0]));
     414                parent_main_xp  = XPTR( parent_cxy , parent_main_ptr );
     415
     416                // transfer TXT ownership
     417                process_txt_transfer_ownership( owner_xp );
     418
     419                // remove process from TXT list
     420                // process_txt_detach( owner_xp );
     421
     422                // mark for delete all thread in all clusters, but the main
     423                process_sigaction( owner_pid , DELETE_ALL_THREADS );
    418424               
    419                     // block main thread
    420                     xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] );
    421                     thread_block( main_xp , THREAD_BLOCKED_GLOBAL );
    422 
    423                     // atomically update owner process termination state
    424                     hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) ,
    425                                           PROCESS_TERM_KILL );
    426 
    427                     // unblock the parent process main thread
    428                     thread_unblock( parent_main_xp , THREAD_BLOCKED_WAIT );
    429 
    430                     return;
    431                 }
     425#if DEBUG_HAL_TXT_RX
     426if( DEBUG_HAL_TXT_RX < rx_cycle )
     427printk("\n[%s] marked for delete all threads of process but main\n",
     428__FUNCTION__, owner_pid );
     429#endif
     430                // block main thread
     431                xptr_t main_xp = XPTR( owner_cxy , &owner_ptr->th_tbl[0] );
     432                thread_block( main_xp , THREAD_BLOCKED_GLOBAL );
     433
     434#if DEBUG_HAL_TXT_RX
     435if( DEBUG_HAL_TXT_RX < rx_cycle )
     436printk("\n[%s] blocked process %x main thread\n",
     437__FUNCTION__, owner_pid );
     438#endif
     439
     440                // atomically update owner process termination state
     441                hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) ,
     442                                      PROCESS_TERM_KILL );
     443
     444                // unblock the parent process main thread
     445                thread_unblock( parent_main_xp , THREAD_BLOCKED_WAIT );
     446
     447#if DEBUG_HAL_TXT_RX
     448if( DEBUG_HAL_TXT_RX < rx_cycle )
     449printk("\n[%s] unblocked parent process %x main thread\n",
     450__FUNCTION__, hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid) ) );
     451#endif
     452                return;
    432453            }
    433454
  • trunk/kernel/fs/fatfs.c

    r623 r625  
    22 * fatfs.c - FATFS file system API implementation.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    374374
    375375//////////////////////////////////////////////////////////////////////////////////////////
    376 // This static function - atomically - decrements "free_clusters", and updates
    377 // the "free_cluster_hint" shared variables in the FATFS context in the FAT cluster.
    378 // It scan the FAT to find the first free slot larger than the <cluster> argument,
    379 // and set "free_cluster_hint" <= (free - 1).
     376// This static function decrements the  "free_clusters" variable, and updates the
     377// "free_cluster_hint" variable in the FATFS context, identified by the <fat_ctx_cxy>
     378// and <fat_ctx_ptr> arguments (cluster containing the FAT mapper).
     379// It scan all slots in the FAT mapper seen as an array of 32 bits words, looking for the
     380// first free slot larger than the <cluster> argument, to update "free_cluster_hint".
    380381//
    381382// WARNING : The free_lock protecting exclusive access to these variables
    382383//           must be taken by the calling function.
    383384//////////////////////////////////////////////////////////////////////////////////////////
    384 // @ cluster     : recently allocated cluster index in FAT.
    385 //////////////////////////////////////////////////////////////////////////////////////////
    386 static error_t fatfs_free_clusters_decrement( uint32_t  cluster )
    387 {
    388     fatfs_ctx_t * loc_ctx;      // local pointer on local FATFS context
    389     fatfs_ctx_t * fat_ctx;      // local pointer on FATFS in cluster containing FAT mapper
    390     cxy_t         mapper_cxy;   // cluster identifier for cluster containing FAT mapper
     385// @ fat_ctx_cxy  : FAT mapper cluster identifier.
     386// @ fat_ctx_ptr  : local pointer on FATFS context.
     387// @ cluster      : recently allocated cluster index in FAT.
     388//////////////////////////////////////////////////////////////////////////////////////////
     389static error_t fatfs_free_clusters_decrement( cxy_t         fat_ctx_cxy,
     390                                              fatfs_ctx_t * fat_ctx_ptr,
     391                                              uint32_t      cluster )
     392{
    391393    xptr_t        mapper_xp;    // extended pointer on FAT mapper
    392394    xptr_t        hint_xp;      // extended pointer on "free_cluster_hint" shared variable
     
    397399    uint32_t      page_max;     // max number of pages in FAT mapper
    398400    xptr_t        page_xp;      // extended pointer on current page in FAT mapper
     401    xptr_t        base_xp;      // extended pointer on current page base
    399402    xptr_t        slot_xp;      // extended pointer on current slot in FAT mapper
    400403
     
    407410#endif
    408411
    409     // get local pointer on local FATFS context
    410     loc_ctx = fs_context[FS_TYPE_FATFS].extend;
    411 
    412     // get cluster containing FAT mapper
    413     mapper_xp  = loc_ctx->fat_mapper_xp;
    414     mapper_cxy = GET_CXY( mapper_xp );
    415 
    416     // get local pointer on FATFS context in FAT cluster
    417     fat_ctx = hal_remote_lpt( XPTR( mapper_cxy , &fs_context[FS_TYPE_FATFS].extend ) );
    418 
    419412    // build extended pointers on free_clusters, and free_cluster_hint
    420     hint_xp = XPTR( mapper_cxy , &fat_ctx->free_cluster_hint );
    421     numb_xp = XPTR( mapper_cxy , &fat_ctx->free_clusters );
     413    hint_xp = XPTR( fat_ctx_cxy , &fat_ctx_ptr->free_cluster_hint );
     414    numb_xp = XPTR( fat_ctx_cxy , &fat_ctx_ptr->free_clusters );
    422415
    423416    // update "free_clusters"
     
    425418    hal_remote_s32( numb_xp , numb - 1 );
    426419
    427     // scan FAT mapper to find the first free slot > cluster
    428     // and update "free_cluster_hint" as (free - 1)
     420    // get extended pointer on FAT mapper
     421    mapper_xp = hal_remote_l64( XPTR( fat_ctx_cxy , &fat_ctx_ptr->fat_mapper_xp ) );
     422
     423    // initialise variables to scan the FAT mapper
     424    // and find the first free slot > cluster
    429425    page_id  = (cluster + 1) >> 10;
    430426    slot_id  = (cluster + 1) & 0x3FF;
    431     page_max = (loc_ctx->fat_sectors_count >> 3);
     427    page_max = hal_remote_l32( XPTR( fat_ctx_cxy, &fat_ctx_ptr->fat_sectors_count ) ) >> 3;
    432428
    433429    // scan FAT mapper / loop on pages
     
    443439        }
    444440
     441        // get extended pointer on page
     442        base_xp = ppm_page2base( page_xp );
     443
    445444        // scan FAT mapper / loop on slots
    446445        while ( slot_id < 1024 )
    447446        {
    448447            // get extended pointer on current slot
    449             slot_xp = ppm_page2base( page_xp ) + (slot_id << 2);
    450 
    451             // test FAT slot value
     448            slot_xp = base_xp + (slot_id << 2);
     449
     450            // test slot value
    452451            if ( hal_remote_l32( slot_xp ) == FREE_CLUSTER )
    453452            {
    454                 // update "free_cluster_hint" <= (free - 1)
     453                // update "free_cluster_hint"
    455454                hal_remote_s32( hint_xp , (page_id << 10) + slot_id - 1 );
    456455
     
    465464            }
    466465
    467             // increment slot_id
    468             slot_id++;
     466            // update slot_id
     467            slot_id = 0;
    469468
    470469        }  // end loop on slots
    471470
    472         // update loop variables
     471        // update (page_id,slot_id) variables
    473472        page_id++;
    474473        slot_id = 0;
     
    483482
    484483//////////////////////////////////////////////////////////////////////////////////////////
    485 // This static function atomically increments <free_clusters>, and updates
    486 // the <free_cluster_hint> shared variables in the FATFS context in the FAT cluster.
    487 // If the released cluster index is smaller than the current (hint + 1) value,
    488 // it set "free_cluster_hint" <= cluster - 1.
     484// This static function increments the "free_clusters" variable, and updates the
     485// "free_cluster_hint" variables in the FATFS context, identified by the <fat_ctx_cxy>
     486// and <fat_ctx_ptr> argument (cluster containing the FAT mapper).
     487// If the released cluster index is smaller than the current (hint) value,
     488// it set "free_cluster_hint" <= cluster.
    489489//
    490490// WARNING : The free_lock protecting exclusive access to these variables
    491491//           must be taken by the calling function.
    492492//////////////////////////////////////////////////////////////////////////////////////////
     493// @ fat_ctx_cxy  : FAT mapper cluster identifier.
     494// @ fat_ctx_ptr  : local pointer on FATFS context.
    493495// @ cluster     : recently released cluster index in FAT.
    494496//////////////////////////////////////////////////////////////////////////////////////////
    495 static void fatfs_free_clusters_increment( uint32_t  cluster )
    496 {
    497     fatfs_ctx_t * loc_ctx;      // local pointer on local FATFS context
    498     fatfs_ctx_t * fat_ctx;      // local pointer on FATFS in cluster containing FAT mapper
    499     cxy_t         fat_cxy;      // cluster identifier for cluster containing FAT mapper
     497static void fatfs_free_clusters_increment( cxy_t         fat_ctx_cxy,
     498                                           fatfs_ctx_t * fat_ctx_ptr,
     499                                           uint32_t      cluster )
     500{
    500501    xptr_t        hint_xp;      // extended pointer on "free_cluster_hint" shared variable
    501502    xptr_t        numb_xp;      // extended pointer on "free_clusters" shared variable
     
    503504    uint32_t      numb;         // "free_clusters" variable current value
    504505
    505     // get local pointer on local FATFS context
    506     loc_ctx = fs_context[FS_TYPE_FATFS].extend;
    507 
    508     // get cluster containing FAT mapper
    509     fat_cxy = GET_CXY( loc_ctx->fat_mapper_xp );
    510 
    511     // get local pointer on FATFS context in FAT cluster
    512     fat_ctx = hal_remote_lpt( XPTR( fat_cxy , &fs_context[FS_TYPE_FATFS].extend ) );
    513 
    514     // build extended pointers free_lock, free_clusters, and free_cluster_hint
    515     hint_xp = XPTR( fat_cxy , &fat_ctx->free_cluster_hint );
    516     numb_xp = XPTR( fat_cxy , &fat_ctx->free_clusters );
     506    // build extended pointers on free_clusters, and free_cluster_hint
     507    hint_xp = XPTR( fat_ctx_cxy , &fat_ctx_ptr->free_cluster_hint );
     508    numb_xp = XPTR( fat_ctx_cxy , &fat_ctx_ptr->free_clusters );
    517509
    518510    // get current value of free_cluster_hint and free_clusters
     
    521513
    522514    // update free_cluster_hint if required
    523     if ( cluster < (hint + 1) ) hal_remote_s32( hint_xp , (cluster - 1) );
     515    if ( (cluster - 1) < hint ) hal_remote_s32( hint_xp , (cluster - 1) );
    524516
    525517    // update free_clusters
     
    542534// It does NOT update the FS on the IOC device.
    543535//////////////////////////////////////////////////////////////////////////////////////////
    544 // @ fat_mapper_xp : extended pointer on FAT mapper.
    545 // @ cluster       : cluster index in FAT.
     536// @ mapper_cxy : FAT mapper cluster identifier.
     537// @ mapper_ptr : local pointer on FAT mapper.
     538// @ fatfs_ctx  : local pointer on FATFS context in FAT cluster.
     539// @ cluster    : index of cluster to be released from FAT mapper.
    546540// @ return 0 if success / return -1 if error (cannot access FAT)
    547541//////////////////////////////////////////////////////////////////////////////////////////
    548 static error_t fatfs_recursive_release( xptr_t    fat_mapper_xp,
    549                                         uint32_t  cluster )
     542static error_t fatfs_recursive_release( cxy_t         mapper_cxy,
     543                                        mapper_t    * mapper_ptr,
     544                                        fatfs_ctx_t * fatfs_ctx,
     545                                        uint32_t      cluster )
    550546{
    551547    uint32_t next;
    552548
    553     // get next cluster from FAT mapper
    554     if ( mapper_remote_get_32( fat_mapper_xp , cluster , &next ) ) return -1;
     549    // build extended pointer on FAT mapper
     550    xptr_t mapper_xp = XPTR( mapper_cxy , mapper_ptr );
     551
     552    // get next cluster index from FAT mapper
     553    if ( mapper_remote_get_32( mapper_xp,
     554                               cluster,
     555                               &next ) ) return -1;
    555556
    556557#if (DEBUG_FATFS_RELEASE_INODE & 1)
     
    564565    {
    565566        // call fatfs_recursive_release() on next cluster
    566         if ( fatfs_recursive_release( fat_mapper_xp , next ) ) return -1;
     567        if ( fatfs_recursive_release( mapper_cxy,
     568                                      mapper_ptr,
     569                                      fatfs_ctx,
     570                                      next ) ) return -1;
    567571    }       
    568572
    569573    // update current cluster in FAT mapper
    570     if ( mapper_remote_set_32( fat_mapper_xp, cluster , FREE_CLUSTER ) ) return -1;
     574    if ( mapper_remote_set_32( mapper_xp,
     575                               cluster,
     576                               FREE_CLUSTER ) ) return -1;
    571577
    572578    // Update free_cluster_hint and free_clusters in FAT context
    573     fatfs_free_clusters_increment( cluster );
     579    fatfs_free_clusters_increment( mapper_cxy,
     580                                   fatfs_ctx,
     581                                   cluster );
    574582
    575583    return 0;
     
    582590//////////////////////////////////////////////////////////////////////////////////////////
    583591
    584 //////////////////////////////
    585 void fatfs_ctx_display( void )
    586 {
    587     // get pointer on local FATFS context
    588     vfs_ctx_t   * vfs_ctx   = &fs_context[FS_TYPE_FATFS];
    589     fatfs_ctx_t * fatfs_ctx = (fatfs_ctx_t *)vfs_ctx->extend;
    590 
     592///////////////////////////////////////////
     593void fatfs_ctx_display( fatfs_ctx_t * ctx )
     594{
    591595    printk("\n*** FAT context ***\n"
    592596           "- fat_sectors       = %d\n"
     
    599603           "- free_cluster_hint = %d\n"
    600604           "- fat_mapper_xp     = %l\n",
    601            fatfs_ctx->fat_sectors_count,
    602            fatfs_ctx->bytes_per_sector,
    603            fatfs_ctx->sectors_per_cluster * fatfs_ctx->bytes_per_sector,
    604            fatfs_ctx->fat_begin_lba,
    605            fatfs_ctx->cluster_begin_lba,
    606            fatfs_ctx->root_dir_cluster,
    607            fatfs_ctx->free_clusters,
    608            fatfs_ctx->free_cluster_hint,
    609            fatfs_ctx->fat_mapper_xp );
    610 
    611 }  // end fatfs_ctx_display()
     605           ctx->fat_sectors_count,
     606           ctx->bytes_per_sector,
     607           ctx->sectors_per_cluster * ctx->bytes_per_sector,
     608           ctx->fat_begin_lba,
     609           ctx->cluster_begin_lba,
     610           ctx->root_dir_cluster,
     611           ctx->free_clusters,
     612           ctx->free_cluster_hint,
     613           ctx->fat_mapper_xp );
     614
     615}  // end ctx_display()
    612616
    613617//////////////////////////////////////////
     
    659663    uint32_t * buffer;                 // pointer on current page (array of uint32_t)
    660664    uint32_t   current_page_index;     // index of current page in FAT
    661     uint32_t   current_page_offset;    // offset of slot in current page
     665    uint32_t   current_slot_index;     // index of slot in current page
    662666    uint32_t   page_count_in_file;     // index of page in file (index in linked list)
    663667    uint32_t   next_cluster_id;        // content of current FAT slot
     
    670674thread_t * this  = CURRENT_THREAD;
    671675if( DEBUG_FATFS_GET_CLUSTER < cycle )
    672 printk("\n[%s] thread[%x,%x] enter / first_cluster_id %d / searched_index / cycle %d\n",
     676printk("\n[%s] thread[%x,%x] enter / first_cluster_id %d / searched_index %d / cycle %d\n",
    673677__FUNCTION__, this->process->pid, this->trdid, first_cluster_id, searched_page_index, cycle );
    674678#endif
     
    678682
    679683    // get extended pointer and cluster on FAT mapper
    680     xptr_t mapper_xp  = ctx->fat_mapper_xp;
    681     cxy_t  mapper_cxy = GET_CXY( mapper_xp );
     684    xptr_t fat_mapper_xp  = ctx->fat_mapper_xp;
     685    cxy_t  fat_mapper_cxy = GET_CXY( fat_mapper_xp );
    682686
    683687    // initialize loop variable (1024 slots per page)
    684688    current_page_index  = first_cluster_id >> 10;
    685     current_page_offset = first_cluster_id & 0x3FF;
     689    current_slot_index = first_cluster_id & 0x3FF;
    686690    page_count_in_file  = 0;
    687691    next_cluster_id     = 0xFFFFFFFF;
    688692
    689     // scan FAT (i.e. traverse FAT linked list)
     693    // scan FAT mapper (i.e. traverse FAT linked list)
    690694    while( page_count_in_file < searched_page_index )
    691695    {
    692         // get pointer on current page descriptor
    693         current_page_xp = mapper_remote_get_page( mapper_xp , current_page_index );
     696        // get pointer on current page descriptor in FAT mapper
     697        current_page_xp = mapper_remote_get_page( fat_mapper_xp , current_page_index );
    694698
    695699        if( current_page_xp == XPTR_NULL )
    696700        {
    697             // TODO
     701            printk("\n[ERROR] in %s : cannot get next page from FAT mapper\n", __FUNCTION__);
    698702            return -1;
    699703        }
     
    704708
    705709        // get FAT slot content
    706         next_cluster_id = hal_remote_l32( XPTR( mapper_cxy , &buffer[current_page_offset] ) );
     710        next_cluster_id = hal_remote_l32( XPTR( fat_mapper_cxy,
     711                                                &buffer[current_slot_index] ) );
    707712
    708713#if (DEBUG_FATFS_GET_CLUSTER & 1)
    709714if( DEBUG_FATFS_GET_CLUSTER < cycle )
    710715printk("\n[%s] traverse FAT / current_page_index = %d\n"
    711 "current_page_offset = %d / next_cluster_id = %d\n",
    712 __FUNCTION__, current_page_index, current_page_offset , next_cluster_id );
     716"current_slot_index = %d / next_cluster_id = %d\n",
     717__FUNCTION__, current_page_index, current_slot_index , next_cluster_id );
    713718#endif
    714719
    715720        // update loop variables
    716         current_page_index  = next_cluster_id >> 10;
    717         current_page_offset = next_cluster_id & 0x3FF;
     721        current_page_index = next_cluster_id >> 10;
     722        current_slot_index = next_cluster_id & 0x3FF;
    718723        page_count_in_file++;
    719724    }
    720725
    721     if( next_cluster_id == 0xFFFFFFFF ) return -1;
     726    if( next_cluster_id == 0xFFFFFFFF )
     727    {
     728        printk("\n[ERROR] in %s : searched_cluster_id not found in FAT\n", __FUNCTION__ );
     729        return -1;
     730    }
    722731   
    723732#if DEBUG_FATFS_GET_CLUSTER
     
    759768
    760769#if DEBUG_FATFS_CTX_INIT
    761 uint32_t cycle = (uint32_t)hal_get_cycles();
     770uint32_t   cycle = (uint32_t)hal_get_cycles();
     771thread_t * this  = CURRENT_THREAD;
    762772if( DEBUG_FATFS_CTX_INIT < cycle )
    763773printk("\n[%s] thread[%x,%x] enter for fatfs_ctx = %x / cycle %d\n",
     
    766776
    767777// check argument
    768 assert( (fatfs_ctx != NULL) , "pointer on FATFS context is NULL\n" );
     778assert( (fatfs_ctx != NULL) , "pointer on FATFS context is NULL" );
    769779
    770780// check only cluster 0 does FATFS init
    771 assert( (local_cxy == 0) , "only cluster 0 can initialize FATFS\n");
     781assert( (local_cxy == 0) , "only cluster 0 can initialize FATFS");
    772782
    773783    // allocate a 512 bytes buffer to store the boot record
     
    882892    // WARNING : the inode field MUST be NULL for the FAT mapper
    883893    fat_mapper->inode = NULL;
     894
    884895
    885896    // initialize the FATFS context
     
    895906
    896907    remote_queuelock_init( XPTR( local_cxy , &fatfs_ctx->free_lock ) , LOCK_FATFS_FREE );
     908
     909#if (DEBUG_FATFS_CTX_INIT & 0x1)
     910if( DEBUG_FATFS_CTX_INIT < cycle )
     911fatfs_ctx_display( fatfs_ctx );
     912#endif
    897913
    898914#if DEBUG_FATFS_CTX_INIT
     
    15251541                          xptr_t        child_inode_xp )
    15261542{
    1527     uint8_t  * entry;    // pointer on FAT32 directory entry (array of 32 bytes)
    1528     uint32_t   index;    // index of FAT32 directory entry in mapper
    1529     mapper_t * mapper;   // pointer on directory mapper
    1530     uint32_t   cluster;  // directory entry cluster
    1531     uint32_t   size;     // directory entry size
    1532     bool_t     is_dir;   // directory entry type (file/dir)
    1533     error_t    error;
     1543    uint8_t      * entry;            // pointer on FAT32 directory entry (array of 32 bytes)
     1544    uint32_t       index;            // index of FAT32 directory entry in mapper
     1545    mapper_t     * mapper;           // pointer on directory mapper
     1546    uint32_t       cluster;          // directory entry cluster
     1547    uint32_t       size;             // directory entry size
     1548    bool_t         is_dir;           // directory entry type (file/dir)
     1549    xptr_t         root_xp;          // extended pointer on root of parent dentries
     1550    xptr_t         iter_xp;          // iterator for this list
     1551    cxy_t          child_inode_cxy;  // child inode cluster 
     1552    vfs_inode_t  * child_inode_ptr;  // child inode local pointer
     1553    xptr_t         dentry_xp;        // extended pointer on searched dentry descriptor
     1554    cxy_t          dentry_cxy;       // cluster identifier of dentry (must be local_cxy)
     1555    vfs_dentry_t * dentry_ptr;       // local pointer
     1556    error_t        error;
     1557
     1558    char       dir_name[CONFIG_VFS_MAX_NAME_LENGTH];
    15341559
    15351560// check arguments
     
    15391564
    15401565#if DEBUG_FATFS_GET_DENTRY
    1541 char       parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
    15421566uint32_t   cycle = (uint32_t)hal_get_cycles();
    15431567thread_t * this  = CURRENT_THREAD;
    1544 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );
     1568vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name );
    15451569if( DEBUG_FATFS_GET_DENTRY < cycle )
    15461570printk("\n[%s]  thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n",
    1547 __FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle );
    1548 #endif
    1549 
    1550     // get pointer and index of searched directory entry in mapper
     1571__FUNCTION__, this->process->pid, this->trdid, name , dir_name , cycle );
     1572#endif
     1573
     1574    // get local pointer on parent mapper
    15511575    mapper = parent_inode->mapper;
     1576
     1577    // get pointer and index in mapper for searched directory entry
    15521578    error  = fatfs_scan_directory( mapper, name , &entry , &index );
    15531579
    1554     // update child inode and dentry descriptors if sucess
    1555     if( error == 0 )
     1580    if( error )
    15561581    {
     1582        vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name );
     1583        printk("\n[ERROR] in %s : cannot find <%s> in parent mapper <%s>\n",
     1584        __FUNCTION__, name , dir_name );
     1585        return -1;
     1586    }
     1587
     1588    // get relevant infos from FAT32 directory entry
     1589    cluster = (fatfs_get_record( DIR_FST_CLUS_HI , entry , 1 ) << 16) |
     1590              (fatfs_get_record( DIR_FST_CLUS_LO , entry , 1 )      ) ;
     1591    is_dir  = (fatfs_get_record( DIR_ATTR        , entry , 1 ) & ATTR_DIRECTORY);
     1592    size    =  fatfs_get_record( DIR_FILE_SIZE   , entry , 1 );
     1593
     1594    // get child inode cluster and local pointer
     1595    child_inode_cxy = GET_CXY( child_inode_xp );
     1596    child_inode_ptr = GET_PTR( child_inode_xp );
     1597
     1598    // build extended pointer on root of list of parent dentries
     1599    root_xp = XPTR( child_inode_cxy , &child_inode_ptr->parents );
     1600
     1601// check child inode has at least one parent
     1602assert( (xlist_is_empty( root_xp ) == false ), "child inode must have one parent\n");
     1603
     1604    // scan list of parent dentries to search the parent_inode
     1605    bool_t found = false;
     1606    XLIST_FOREACH( root_xp , iter_xp )
     1607    {
     1608        // get pointers on dentry
     1609        dentry_xp  = XLIST_ELEMENT( iter_xp , vfs_dentry_t , parents );
     1610        dentry_cxy = GET_CXY( dentry_xp );
     1611        dentry_ptr = GET_PTR( dentry_xp );
     1612
     1613        // get local pointer on current parent directory inode
     1614        vfs_inode_t * current = hal_remote_lpt( XPTR( dentry_cxy , &dentry_ptr->parent ) );
     1615
     1616        // check if current parent is the searched parent
     1617        if( XPTR( dentry_cxy , current ) == XPTR( local_cxy , parent_inode ) )
     1618        {
     1619            found = true;
     1620            break;
     1621        }
     1622    }
     1623
     1624    if( found == false )
     1625    {
     1626        vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name );
     1627        printk("\n[ERROR] in %s : cannot find <%s> directory in list of parents for <%s>\n",
     1628        __FUNCTION__, dir_name, name );
     1629        return -1;
     1630    }
     1631
     1632    // update the child inode "type", "size", and "extend" fields
     1633    vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE;
     1634
     1635    hal_remote_s32( XPTR( child_inode_cxy , &child_inode_ptr->type   ) , type );
     1636    hal_remote_s32( XPTR( child_inode_cxy , &child_inode_ptr->size   ) , size );
     1637    hal_remote_s32( XPTR( child_inode_cxy , &child_inode_ptr->extend ) , cluster );
     1638
     1639    // update the dentry "extend" field
     1640    dentry_ptr->extend = (void *)(intptr_t)index;
    15571641
    15581642#if DEBUG_FATFS_GET_DENTRY
    15591643cycle = (uint32_t)hal_get_cycles();
    15601644if( DEBUG_FATFS_GET_DENTRY < cycle )
    1561 printk("\n[%s]  thread[%x,%x] exit / intialised child <%s> in %s / cycle %d\n",
    1562 __FUNCTION__, this->process->pid, this->trdid, name, parent_name, cycle );
    1563 #endif
    1564         // get relevant infos from FAT32 directory entry
    1565         cluster = (fatfs_get_record( DIR_FST_CLUS_HI , entry , 1 ) << 16) |
    1566                   (fatfs_get_record( DIR_FST_CLUS_LO , entry , 1 )      ) ;
    1567         is_dir  = (fatfs_get_record( DIR_ATTR        , entry , 1 ) & ATTR_DIRECTORY);
    1568         size    =  fatfs_get_record( DIR_FILE_SIZE   , entry , 1 );
    1569 
    1570         // get child inode cluster and local pointer
    1571         cxy_t          inode_cxy = GET_CXY( child_inode_xp );
    1572         vfs_inode_t  * inode_ptr = GET_PTR( child_inode_xp );
    1573 
    1574         // build extended pointer on root of list of prent dentries
    1575         xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents );
    1576 
    1577 // check child inode has at least one parent
    1578 assert( (xlist_is_empty( parents_root_xp ) == false ), "child inode must have one parent\n");
    1579 
    1580         // get dentry pointers and cluster
    1581         xptr_t         dentry_xp  = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents );
    1582         vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );
    1583         cxy_t          dentry_cxy = GET_CXY( dentry_xp );
    1584 
    1585 // check dentry descriptor in same cluster as parent inode
    1586 assert( (dentry_cxy == local_cxy) , "illegal dentry cluster\n" );
    1587 
    1588         // update the child inode "type", "size", and "extend" fields
    1589         vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE;
    1590 
    1591         hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type   ) , type );
    1592         hal_remote_s32( XPTR( inode_cxy , &inode_ptr->size   ) , size );
    1593         hal_remote_s32( XPTR( inode_cxy , &inode_ptr->extend ) , cluster );
    1594 
    1595         // update the dentry "extend" field
    1596         dentry_ptr->extend = (void *)(intptr_t)index;
    1597 
    1598         return 0;
    1599     }
    1600     else
    1601     {
    1602         return -1;
    1603     }
     1645printk("\n[%s]  thread[%x,%x] exit / intialised inode & dentry for <%s> in <%s> / cycle %d\n",
     1646__FUNCTION__, this->process->pid, this->trdid, name, dir_name, cycle );
     1647#endif
     1648
     1649    return 0;
    16041650
    16051651}  // end fatfs_new_dentry()
     
    16151661    error_t    error;
    16161662
     1663    char       dir_name[CONFIG_VFS_MAX_NAME_LENGTH];
     1664
    16171665// check arguments
    16181666assert( (inode  != NULL) , "inode is NULL\n" );
     
    16211669
    16221670#if DEBUG_FATFS_UPDATE_DENTRY
    1623 char       dir_name[CONFIG_VFS_MAX_NAME_LENGTH];
    16241671uint32_t   cycle = (uint32_t)hal_get_cycles();
    16251672thread_t * this  = CURRENT_THREAD;
    16261673vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );
    16271674if( DEBUG_FATFS_UPDATE_DENTRY < cycle )
    1628 printk("\n[%s]  thread[%x,%x] enter for entry <%s> in dir <%s> / cycle %d\n",
    1629 __FUNCTION__, this->process->pid, this->trdid, dentry->name , dir_name , cycle );
    1630 #endif
    1631 
    1632     // get pointer and index of searched directory entry in mapper
     1675printk("\n[%s]  thread[%x,%x] enter for <%s/%s> / size %d / cycle %d\n",
     1676__FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, size, cycle );
     1677#endif
     1678
     1679    // get local pointer on mapper
    16331680    mapper = inode->mapper;
     1681
     1682    // get pointer and index in mapper for searched directory entry
    16341683    error  = fatfs_scan_directory( mapper, dentry->name , &entry , &index );
    16351684
    1636     // update size in mapper if found
    1637     if( error == 0 )
     1685    if( error )
    16381686    {
     1687        vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );
     1688        printk("\n[ERROR] in %s : cannot find <%s> in parent mapper <%s>\n",
     1689        __FUNCTION__, dentry->name, dir_name );
     1690        return -1;
     1691    }
     1692
     1693    // set size in FAT32 directory entry
     1694    fatfs_set_record( DIR_FILE_SIZE , entry , 1 , size );
     1695
     1696    // get local pointer on modified page base
     1697    void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK));
     1698
     1699    // get extended pointer on modified page descriptor
     1700    xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) );
     1701
     1702    // synchronously update the modified page on device
     1703    error = fatfs_move_page( page_xp , IOC_SYNC_WRITE );
     1704
     1705    if( error )
     1706    {
     1707        vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );
     1708        printk("\n[ERROR] in %s : cannot update parent directory <%s> on device\n",
     1709        __FUNCTION__, dir_name );
     1710        return -1;
     1711    }
    16391712
    16401713#if DEBUG_FATFS_UPDATE_DENTRY
    16411714cycle = (uint32_t)hal_get_cycles();
    16421715if( DEBUG_FATFS_UPDATE_DENTRY < cycle )
    1643 printk("\n[%s]  thread[%x,%x] exit / found entry <%s> in <%s> / cycle %d\n",
    1644 __FUNCTION__, this->process->pid, this->trdid, dentry->name, dir_name, cycle );
    1645 #endif
    1646         // set size in FAT32 directory entry
    1647         fatfs_set_record( DIR_FILE_SIZE , entry , 1 , size );
    1648 
    1649         // get local pointer on modified page base
    1650         void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK));
    1651 
    1652         // get extended pointer on modified page descriptor
    1653         xptr_t    page_xp = ppm_base2page( XPTR( local_cxy , base ) );
    1654 
    1655         // mark page as dirty
    1656         ppm_page_do_dirty( page_xp );
    1657 
    1658         return 0;
    1659     }
    1660     else
    1661     {
    1662         return -1;
    1663     }
     1716printk("\n[%s]  thread[%x,%x] exit / updated size for <%s/%s> / cycle %d\n",
     1717__FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle );
     1718#endif
     1719
     1720    return 0;
    16641721
    16651722}  // end fatfs_update_dentry()
     
    20442101error_t fatfs_cluster_alloc( uint32_t * searched_cluster )
    20452102{
    2046     uint32_t      page_id;        // page index in mapper
     2103    uint32_t      page_id;        // page index in FAT mapper
    20472104    uint32_t      slot_id;        // slot index in page (1024 slots per page)
    2048     uint32_t      hint;           // first free cluster index in FAT
     2105    uint32_t      cluster;        // first free cluster index in FAT
    20492106    uint32_t      free_clusters;  // total number of free clusters
    20502107    vfs_ctx_t   * vfs_ctx;        // local pointer on VFS context (same in all clusters)
     
    20802137    fat_fatfs_ctx = hal_remote_lpt( XPTR( mapper_cxy , &vfs_ctx->extend ) );
    20812138
    2082     // build relevant extended pointers in on free clusters info in FAT cluster
     2139    // build relevant extended pointers on free clusters info in mapper cluster
    20832140    lock_xp = XPTR( mapper_cxy , &fat_fatfs_ctx->free_lock );
    20842141    hint_xp = XPTR( mapper_cxy , &fat_fatfs_ctx->free_cluster_hint );
     
    20892146
    20902147    // get hint and free_clusters values from FATFS context
    2091     hint          = hal_remote_l32( hint_xp );
     2148    cluster       = hal_remote_l32( hint_xp ) + 1;
    20922149    free_clusters = hal_remote_l32( numb_xp );
    20932150       
    2094     // get page index & slot index for the first free cluster
    2095     page_id  = (hint + 1) >> 10;
    2096     slot_id  = (hint + 1) & 0x3FF;
    2097 
    2098     // get relevant page from mapper
    2099     page_xp = mapper_remote_get_page( mapper_xp , page_id );
    2100 
    2101     if( page_xp == XPTR_NULL )
    2102     {
    2103         printk("\n[ERROR] in %s : cannot acces FAT mapper\n", __FUNCTION__ );
    2104         return -1;
    2105     }
    2106 
    2107     // build extended pointer on free cluster slot
    2108     slot_xp = ppm_page2base( page_xp ) + (slot_id<<2);
    2109          
    21102151#if (DEBUG_FATFS_CLUSTER_ALLOC & 1)
    21112152if( DEBUG_FATFS_CLUSTER_ALLOC < cycle )
    2112 printk("\n[%s] thread[%x,%x] get free info / hint %x / free_clusters %x\n",
    2113 __FUNCTION__, this->process->pid, this->trdid, hint, free_clusters );
     2153printk("\n[%s] thread[%x,%x] get free info : hint %x / free_clusters %x\n",
     2154__FUNCTION__, this->process->pid, this->trdid, (cluster - 1), free_clusters );
    21142155#endif
    21152156
     
    21272168    }
    21282169
    2129     // check "hint"
     2170
     2171
     2172    // get page index & slot index for selected cluster
     2173    page_id  = cluster >> 10;
     2174    slot_id  = cluster & 0x3FF;
     2175
     2176    // get relevant page descriptor from mapper
     2177    page_xp = mapper_remote_get_page( mapper_xp , page_id );
     2178
     2179    if( page_xp == XPTR_NULL )
     2180    {
     2181        printk("\n[ERROR] in %s : cannot acces FAT mapper\n", __FUNCTION__ );
     2182        return -1;
     2183    }
     2184
     2185    // build extended pointer on selected cluster slot in FAT mapper
     2186    slot_xp = ppm_page2base( page_xp ) + (slot_id << 2);
     2187         
     2188    // check selected cluster actually free
    21302189    if( hal_remote_l32( slot_xp ) != FREE_CLUSTER )
    21312190    {
    2132         printk("\n[ERROR] in %s : illegal hint cluster\n", __FUNCTION__ );
     2191        printk("\n[ERROR] in %s : selected cluster %x not free\n", __FUNCTION__, cluster );
    21332192        remote_queuelock_acquire( lock_xp );
    21342193        return -1;
    21352194    }
    21362195
    2137     // update allocated cluster in FAT mapper
    2138     hal_remote_s32( slot_xp , END_OF_CHAIN_CLUSTER_MAX );
    2139 
    2140     // update free cluster info
    2141     fatfs_free_clusters_decrement( hint + 1 );
     2196    // update free cluster info in FATFS context
     2197    fatfs_free_clusters_decrement( mapper_cxy , fat_fatfs_ctx , cluster );
    21422198
    21432199    // release free clusters busylock
    21442200    remote_queuelock_release( lock_xp );
     2201
     2202    // update FAT mapper
     2203    hal_remote_s32( slot_xp , END_OF_CHAIN_CLUSTER_MAX );
     2204
     2205    // synchronously update FAT on device
     2206    fatfs_move_page( page_xp , IOC_SYNC_WRITE );
    21452207
    21462208#if DEBUG_FATFS_CLUSTER_ALLOC
    21472209cycle = (uint32_t)hal_get_cycles();
    21482210if( DEBUG_FATFS_CLUSTER_ALLOC < cycle )
    2149 printk("\n[%s] thread[%x,%x] exit / cluster %x / cycle %d\n",
    2150 __FUNCTION__, this->process->pid, this->trdid, hint + 1, cycle );
    2151 #endif
    2152 
    2153     *searched_cluster = hint + 1;
     2211printk("\n[%s] thread[%x,%x] exit / updated cluster %x in FAT / cycle %d\n",
     2212__FUNCTION__, this->process->pid, this->trdid, cluster, cycle );
     2213#endif
     2214
     2215    *searched_cluster = cluster;
    21542216    return 0;
    21552217
     
    21642226    xptr_t        mapper_xp;      // extended pointer on FAT mapper
    21652227    cxy_t         mapper_cxy;     // Fat mapper cluster identifier
     2228    mapper_t    * mapper_ptr;     // local pointer on FAT mapper
    21662229    xptr_t        lock_xp;        // extended pointer on lock protecting free clusters info.
    21672230    xptr_t        first_xp;       // extended pointer on inode extension
     
    22042267    loc_fatfs_ctx = vfs_ctx->extend;
    22052268
    2206     // get extended pointer and cluster on FAT mapper
     2269    // get pointers and cluster on FAT mapper
    22072270    mapper_xp  = loc_fatfs_ctx->fat_mapper_xp;
    22082271    mapper_cxy = GET_CXY( mapper_xp );
     2272    mapper_ptr = GET_PTR( mapper_xp );
    22092273   
    22102274    // get local pointer on FATFS context in FAT cluster
     
    22182282
    22192283    // call the recursive function to release all clusters from FAT mapper
    2220     if ( fatfs_recursive_release( mapper_xp , first_cluster ) )
     2284    if ( fatfs_recursive_release( mapper_cxy,
     2285                                  mapper_ptr,
     2286                                  fat_fatfs_ctx,
     2287                                  first_cluster ) )
    22212288    {
    22222289        printk("\n[ERROR] in %s : cannot update FAT mapper\n", __FUNCTION__ );
     
    23002367    {
    23012368        // get lba from FATFS context and page_id
    2302         uint32_t      lba        = fatfs_ctx->fat_begin_lba + (page_id << 3);
     2369        uint32_t      lba = fatfs_ctx->fat_begin_lba + (page_id << 3);
    23032370 
    23042371        // access device
     
    23112378        if( error ) return EIO;
    23122379
    2313 #if (DEBUG_FATFS_MOVE_PAGE & 0x1)
    2314 if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2315 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id );
    2316 #endif
    2317 
    23182380#if DEBUG_FATFS_MOVE_PAGE
    2319 cycle = (uint32_t)hal_get_cycles();
    23202381if( DEBUG_FATFS_MOVE_PAGE < cycle )
    23212382{
     
    23572418                                       page_id,
    23582419                                       &searched_cluster );
    2359             if( error )  return EIO;
     2420            if( error )
     2421            {
     2422                printk("\n[ERROR] in %s : cannot access FAT mapper\n", __FUNCTION__ );
     2423                return -1;
     2424            }
    23602425        }
    23612426
    23622427        // get lba from searched_cluster
    23632428        uint32_t lba = fatfs_lba_from_cluster( fatfs_ctx , searched_cluster );
     2429
     2430#if DEBUG_FATFS_MOVE_PAGE
     2431if( DEBUG_FATFS_MOVE_PAGE < cycle )
     2432{
     2433    if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) )
     2434    printk("\n[%s] thread[%x,%x] load page %d of <%s> / cluster_id %x / cycle %d\n",
     2435    __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster, cycle );
     2436    else
     2437    printk("\n[%s] thread[%x,%x] sync page %d of <%s> / cluster_id %x / cycle %d\n",
     2438    __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster, cycle );
     2439}
     2440#endif
    23642441
    23652442        // access device
     
    23702447        else                                  error = -1;
    23712448
    2372         if( error ) return EIO;
    2373 
    2374 #if (DEBUG_FATFS_MOVE_PAGE & 0x1)
    2375 if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2376 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id );
    2377 #endif
    2378 
    2379 #if DEBUG_FATFS_MOVE_PAGE
    2380 cycle = (uint32_t)hal_get_cycles();
    2381 if(DEBUG_FATFS_MOVE_PAGE < cycle)
    2382 {
    2383     if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) )
    2384         printk("\n[%s] thread[%x,%x] load page %d of <%s> inode / cycle %d\n",
    2385         __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
    2386     else
    2387         printk("\n[%s] thread[%x,%x] sync page %d of <%s> inode / cycle %d\n",
    2388         __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
    2389 }
    2390 #endif
    2391 
     2449        if( error )
     2450        {
     2451            printk("\n[ERROR] in %s : cannot access device\n", __FUNCTION__ );
     2452            return -1;
     2453        }
    23922454    }
    23932455
  • trunk/kernel/fs/fatfs.h

    r623 r625  
    3535// The FATFS File System implements a FAT32 read/write file system.
    3636//
    37 // The FATFS extensions to the generic VFS are the following:
     37// The FATFS specific extensions to the generic VFS are the following:
    3838//
    3939// 1) The vfs_ctx_t "extend" field is a void* pointing on the fatfs_ctx_t structure.
     
    190190    uint32_t            root_dir_cluster;      /*! cluster index for  root directory    */
    191191    xptr_t              fat_mapper_xp;         /*! extended pointer on FAT mapper       */
    192     uint32_t            free_cluster_hint;     /*! start point to search free cluster   */
     192    uint32_t            free_cluster_hint;     /*! cluster[hint+1] is the first free    */
    193193    uint32_t            free_clusters;         /*! free clusters number                 */
    194194    remote_queuelock_t  free_lock;             /*! exclusive access to hint & number    */
     
    224224
    225225/*****************************************************************************************
    226  * This function display the content of the FATFS context.
    227  ****************************************************************************************/
    228 void fatfs_ctx_display( void );
     226 * This function display the content of the local FATFS context.
     227 *****************************************************************************************
     228 * @ ctx  : local pointer on the context.
     229 ****************************************************************************************/
     230void fatfs_ctx_display( fatfs_ctx_t * ctx );
    229231
    230232/*****************************************************************************************
     
    312314 *****************************************************************************************
    313315 * It initializes a new inode/dentry couple in Inode Tree, attached to the directory
    314  * identified by the <parent_inode> argument. The new directory entry is identified
    315  * by the <name> argument. The child inode descriptor identified by the <child_inode_xp>
    316  * argument, and the dentry descriptor must have been previously allocated.
     316 * identified by the <parent_inode> argument. The directory entry is identified
     317 * by the <name> argument. The child inode descriptor, identified by the <child_inode_xp>
     318 * argument, and the associated dentry descriptor must have been previously allocated.
    317319 * It scan the parent mapper to find the <name> argument.
    318  * It set the "type", "size", and "extend" fields in inode descriptor.
    319  * It set the " extend" field in dentry descriptor.
     320 * It set the "type", "size", and "extend" fields in the child inode descriptor.
     321 * It set the " extend" field in the dentry descriptor.
    320322 * It must be called by a thread running in the cluster containing the parent inode.
    321323 *****************************************************************************************
     
    333335 *****************************************************************************************
    334336 * It update the size of a directory entry identified by the <dentry> argument in
    335  * the mapper of a directory identified by the <inode> argument, as defined by the <size>
    336  * argument.
     337 * the mapper of a directory identified by the <inode> argument, as defined by the
     338 * <size> argument.
    337339 * It scan the mapper to find the entry identified by the dentry "name" field.
    338340 * It set the "size" field in the in the directory mapper AND marks the page as DIRTY.
     
    427429 * in <searched_cluster> the FATFS cluster index of a free cluster.
    428430 * It can be called by a thread running in any cluster, as it uses remote access
    429  * primitives when the FAT mapper is remote. It takes the "free_lock" stored in the
    430  * FATFS context located in the same cluster as the FAT mapper itself, to get exclusive
    431  * access to the FAT. It uses (and updates) the <free_cluster_hint> and <free_clusters>
    432  * shared variables in this FATFS context.
    433  * It updates the FAT mapper, and synchronously updates the FAT region on IOC device.
    434  * The FAT mapper being a cache, this function updates the FAT mapper from informations
    435  * stored on IOC device in case of miss.
     431 * primitives when the FAT mapper is remote. It takes the queuelock stored in the FATFS
     432 * context (located in the same cluster as the FAT mapper itself), to get exclusive
     433 * access to the FAT. It uses the <free_cluster_hint> and <free_clusters> variables
     434 * stored in this FATFS context.
     435 * - it updates the <free_cluster_hint> and <free_clusters> variables in FATFS context.
     436 * - it updates the FAT mapper (handling miss from IOC device if required).
     437 * - it synchronously updates the FAT region on IOC device.
     438 * - it returns the allocated cluster index.
    436439 *****************************************************************************************
    437440 * @ searched_cluster    : [out] found FATFS cluster index.
     
    461464 * This function moves a page from/to the mapper to/from the FATFS file system on device.
    462465 * The page must have been previously allocated and registered in the mapper.   
    463  * The page - and the mapper - can be located in another cluster than the calling thread.
    464466 * The pointer on the mapper and the page index in file are found in the page descriptor.
    465467 * It is used for both a regular file/directory mapper, and the FAT mapper.
  • trunk/kernel/fs/vfs.c

    r623 r625  
    175175    else
    176176    {
    177         ctx = NULL;
    178                 assert( false , "illegal file system type = %d\n" , fs_type );
     177        printk("\n[ERROR] in %s : illegal FS type\n", __FUNCTION__ );
     178        return -1;
    179179    }
    180180
     
    185185    {
    186186        printk("\n[ERROR] in %s : cannot allocate inum\n", __FUNCTION__ );
    187         return ENOMEM;
     187        return -1;
    188188    }
    189189
     
    378378{
    379379
    380 assert( (inode != NULL) , "inode pointer is NULL\n" );
     380assert( (inode != NULL) , "inode pointer is NULL" );
    381381
    382382    uint32_t   page_id;
     
    386386    uint32_t   size   = inode->size;
    387387
    388 assert( (mapper != NULL) , "mapper pointer is NULL\n" );
     388assert( (mapper != NULL) , "mapper pointer is NULL" );
    389389
    390390#if DEBUG_VFS_INODE_LOAD_ALL
     
    560560void vfs_file_destroy( vfs_file_t *  file )
    561561{
    562 
    563 // check refcount
    564 // assert( (file->refcount == 0) , "refcount non zero\n" );
    565 
    566562        kmem_req_t req;
    567563        req.ptr   = file;
     
    766762
    767763// check argument
    768 assert( (file_xp != XPTR_NULL), "file_xp == XPTR_NULL\n" );
     764assert( (file_xp != XPTR_NULL), "file_xp == XPTR_NULL" );
    769765
    770766    // get cluster and local pointer on remote file descriptor
     
    776772   
    777773// check inode type
    778 assert( (inode_type == INODE_TYPE_FILE), "inode type is not INODE_TYPE_FILE" );
     774assert( (inode_type == INODE_TYPE_FILE), "bad inode type" );
    779775
    780776    // get mapper pointer and file offset from file descriptor
    781777    file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) );
    782     mapper = (mapper_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );
     778    mapper      = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );
    783779
    784780    // move data between mapper and buffer
     
    788784                              buffer,
    789785                              size );
     786    if( error )
     787    {
     788        printk("\n[ERROR] in %s : cannot move data", __FUNCTION__ );
     789        return -1;
     790    }
    790791
    791792    // update file offset in file descriptor
    792793    hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->offset ) , size );
    793794
    794     if( error )
    795     {
    796         return -1;
    797     }
     795#if DEBUG_VFS_USER_MOVE
     796char          name[CONFIG_VFS_MAX_NAME_LENGTH];
     797uint32_t      cycle      = (uint32_t)hal_get_cycles();
     798thread_t    * this       = CURRENT_THREAD;
     799vfs_inode_t * inode      = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
     800vfs_inode_get_name( XPTR( file_cxy , inode ) , name );
     801if( cycle > DEBUG_VFS_USER_MOVE )
     802{
     803    if( to_buffer )
     804    printk("\n[%s] thread[%x,%x] moves %d bytes from <%s> mapper to buffer (%x) / cycle %d\n",
     805    __FUNCTION__ , this->process->pid, this->trdid, size, name, buffer );
     806    else           
     807    printk("\n[%s] thread[%x,%x] moves %d bytes from buffer (%x) to <%s> mapper / cycle %d\n",
     808    __FUNCTION__ , this->process->pid, this->trdid, size, buffer, name );
     809}
     810#endif
    798811
    799812    return size;
     
    816829
    817830// check argument
    818 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL\n" );
     831assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" );
    819832
    820833    // get cluster and local pointer on remote file descriptor
     
    825838    inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type   ) );
    826839
    827     // action depends on inode type
    828     if( inode_type == INODE_TYPE_FILE )
    829     {
    830         // get mapper pointers and file offset from file descriptor
    831         file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) );
    832         mapper_ptr  = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );
    833         mapper_xp   = XPTR( file_cxy , mapper_ptr );
    834 
    835         // move data between mapper and buffer
    836         error = mapper_move_kernel( mapper_xp,
    837                                     to_buffer,
    838                                     file_offset,
    839                                     buffer_xp,
    840                                     size );
    841         if( error ) return -1;
    842     }
    843     else
    844     {
    845         printk("\n[ERROR] in %s : inode is not a file", __FUNCTION__ );
     840// check inode type
     841assert( (inode_type == INODE_TYPE_FILE), "bad file type" );
     842
     843    // get mapper pointers and file offset from file descriptor
     844    file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) );
     845    mapper_ptr  = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );
     846    mapper_xp   = XPTR( file_cxy , mapper_ptr );
     847
     848    // move data between mapper and buffer
     849    error = mapper_move_kernel( mapper_xp,
     850                                to_buffer,
     851                                file_offset,
     852                                buffer_xp,
     853                                size );
     854    if( error )
     855    {
     856        printk("\n[ERROR] in %s : cannot move data", __FUNCTION__ );
    846857        return -1;
    847858    }
     859
     860#if DEBUG_VFS_KERNEL_MOVE
     861char          name[CONFIG_VFS_MAX_NAME_LENGTH];
     862uint32_t      cycle      = (uint32_t)hal_get_cycles();
     863thread_t    * this       = CURRENT_THREAD;
     864cxy_t         buffer_cxy = GET_CXY( buffer_xp );
     865void        * buffer_ptr = GET_PTR( buffer_xp );
     866vfs_inode_t * inode      = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
     867vfs_inode_get_name( XPTR( file_cxy , inode ) , name );
     868if( cycle > DEBUG_VFS_KERNEL_MOVE )
     869{
     870    if( to_buffer )
     871    printk("\n[%s] thread[%x,%x] moves %d bytes from <%s> mapper to buffer(%x,%x) / cycle %d\n",
     872    __FUNCTION__ , this->process->pid, this->trdid, size, name, buffer_cxy, buffer_ptr );
     873    else           
     874    printk("\n[%s] thread[%x,%x] moves %d bytes from buffer(%x,%x) to <%s> mapper / cycle %d\n",
     875    __FUNCTION__ , this->process->pid, this->trdid, size, buffer_cxy, buffer_ptr, name );
     876}
     877#endif
    848878
    849879    return 0;
     
    866896
    867897// check argument
    868 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL\n" );
     898assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" );
    869899
    870900    // get cluster and local pointer on remote file descriptor
     
    946976
    947977// check argument
    948 assert( (file_xp != XPTR_NULL) , "file_xp is XPTR_NULL\n" );
     978assert( (file_xp != XPTR_NULL) , "file_xp is XPTR_NULL" );
    949979
    950980    thread_t  * this    = CURRENT_THREAD;
     
    9971027#endif
    9981028
    999     //////// 2) update file size in all parent directory mapper(s) and on device
     1029    //////// 2) update file size in all parent directory mapper(s) and update device
    10001030
    10011031    // get pointers on remote inode
     
    10521082vfs_inode_get_name( XPTR( parent_cxy , parent_inode_ptr ) , parent_name );
    10531083if( DEBUG_VFS_CLOSE < cycle )
    1054 printk("\n[%s] thread[%x,%x] updated size of <%s> in parent <%s>\n",
    1055 __FUNCTION__, process->pid, this->trdid, name, parent_name );
     1084printk("\n[%s] thread[%x,%x] updated <%s> in <%s> / size = %d bytes\n",
     1085__FUNCTION__, process->pid, this->trdid, name, parent_name, size );
    10561086#endif
    10571087
     
    11141144#if DEBUG_VFS_CLOSE
    11151145if( DEBUG_VFS_CLOSE < cycle )
    1116 printk("\n[%s] thread[%x,%x] reset all fd-array copies for <%x>\n",
     1146printk("\n[%s] thread[%x,%x] reset all fd-array copies for <%s>\n",
    11171147__FUNCTION__, process->pid, this->trdid, name );
    11181148#endif
     
    11321162cycle = (uint32_t)hal_get_cycles();
    11331163if( DEBUG_VFS_CLOSE < cycle )
    1134 printk("\n[%s] thread[%x,%x] exit / <%s> closed / cycle %d\n",
    1135 __FUNCTION__, process->pid, this->trdid, name, cycle );
     1164printk("\n[%s] thread[%x,%x] exit / closed <%s> in process %x / cycle %d\n",
     1165__FUNCTION__, process->pid, this->trdid, name, process->pid, cycle );
    11361166#endif
    11371167
     
    20292059    vfs_inode_type_t  inode_type;   // target inode type
    20302060
    2031     // set lookup working mode
    2032     assert( (rights == 0), __FUNCTION__,
    2033     "access rights non implemented yet\n" );
     2061// check lookup working mode
     2062assert( (rights == 0), "access rights non implemented yet" );
    20342063 
    20352064    // get extended pointer on target inode
     
    20512080    // TODO implement this function
    20522081
    2053 assert( false , "not implemented\n" );
     2082assert( false , "not implemented" );
    20542083
    20552084    return 0;
     
    20612090                    uint32_t rights )
    20622091{
    2063     assert( false , "not implemented cwd_xp: %x, path <%s>, rights %x\n",
    2064       cwd_xp, path, rights );
     2092    assert( false , "not implemented %l %x %x", cwd_xp, path, rights );
    20652093    return 0;
    20662094}
     
    20842112    vfs_inode_type_t   inode_type;
    20852113    uint32_t           inode_size;
    2086     uint32_t           inode_inum;
    20872114    uint32_t           inode_attr;
    20882115    uint32_t           inode_dirty;
     2116    void             * inode_extd;
     2117
    20892118    xptr_t             children_xp;    // extended pointer on children xhtab
    20902119
     
    21152144                                        "                              " };  // level 15
    21162145
    2117 assert( (inode_xp != XPTR_NULL) , "inode_xp cannot be NULL\n" );
    2118 assert( (name_xp  != XPTR_NULL) , "name_xp cannot be NULL\n" );
    2119 assert( (indent < 16)           , "depth cannot be larger than 15\n" );
     2146assert( (inode_xp != XPTR_NULL) , "inode_xp cannot be NULL" );
     2147assert( (name_xp  != XPTR_NULL) , "name_xp cannot be NULL" );
     2148assert( (indent < 16)           , "depth cannot be larger than 15" );
    21202149   
    21212150    // get current inode cluster and local pointer
     
    21262155    inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type   ) );
    21272156    inode_size = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->size   ) );
    2128     inode_inum = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->inum   ) );
    21292157    inode_attr = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->attr   ) );
     2158    inode_extd = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->extend ) );
    21302159    mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) );
    21312160
     
    21372166
    21382167    // display inode
    2139     nolock_printk("%s<%s> : %s / inum %d / %d bytes / dirty %d / cxy %x / inode %x / mapper %x\n",
    2140                   indent_str[indent], name, vfs_inode_type_str( inode_type ),
    2141                   inode_inum, inode_size, inode_dirty, inode_cxy, inode_ptr, mapper_ptr );
     2168    nolock_printk("%s<%s> : %s / extd %d / %d bytes / dirty %d / cxy %x / inode %x / mapper %x\n",
     2169    indent_str[indent], name, vfs_inode_type_str( inode_type ), (uint32_t)inode_extd,
     2170    inode_size, inode_dirty, inode_cxy, inode_ptr, mapper_ptr );
    21422171
    21432172    // scan directory entries when current inode is a directory
     
    24052434// check pathname / root_xp consistency
    24062435assert( ((pathname[0] != '/') || (root_xp == process->vfs_root_xp)),
    2407 "root inode must be VFS root for path <%s>\n", pathname );
     2436"root inode must be VFS root for path <%s>", pathname );
    24082437
    24092438#if DEBUG_VFS_LOOKUP
     
    25502579                if ( error )   // child not found in parent mapper
    25512580                {
    2552                     if ( last && create )  // add a brand new dentry in parent
     2581                    if ( last && create )  // add a brand new dentry in parent directory
    25532582                    {
    25542583                        error = vfs_new_dentry_init( parent_xp,               
     
    27052734    uint32_t    child_size;
    27062735
    2707 #if DEBUG_VFS_NEW_CHILD_INIT
     2736#if DEBUG_VFS_NEW_DENTRY_INIT
    27082737char parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
    27092738char child_name[CONFIG_VFS_MAX_NAME_LENGTH];
     
    27122741uint32_t   cycle = (uint32_t)hal_get_cycles();
    27132742thread_t * this  = CURRENT_THREAD;
    2714 if( DEBUG_VFS_NEW_CHILD_INIT < cycle )
     2743if( DEBUG_VFS_NEW_DENTRY_INIT < cycle )
    27152744printk("\n[%s] thread[%x,%x] enter / parent <%s> / child <%s> / cycle %d\n",
    27162745__FUNCTION__ , this->process->pid, this->trdid, parent_name, child_name, cycle );
     
    27412770    }
    27422771
    2743 #if( DEBUG_VFS_NEW_CHILD_INIT & 1)
    2744 if( DEBUG_VFS_NEW_CHILD_INIT < cycle )
    2745 printk("\n[%s] thread[%x,%x] allocated one FAT cluster to <%s>\n",
    2746 __FUNCTION__ , this->process->pid, this->trdid, child_name );
     2772#if( DEBUG_VFS_NEW_DENTRY_INIT & 1)
     2773if( DEBUG_VFS_NEW_DENTRY_INIT < cycle )
     2774printk("\n[%s] thread[%x,%x] allocated FAT cluster %x to <%s>\n",
     2775__FUNCTION__ , this->process->pid, this->trdid, cluster, child_name );
    27472776#endif
    27482777
     
    27752804    }
    27762805
    2777 #if DEBUG_VFS_NEW_CHILD_INIT
     2806#if DEBUG_VFS_NEW_DENTRY_INIT
    27782807cycle = (uint32_t)hal_get_cycles();
    2779 if( DEBUG_VFS_NEW_CHILD_INIT < cycle )
     2808if( DEBUG_VFS_NEW_DENTRY_INIT < cycle )
    27802809printk("\n[%s] thread[%x,%x] exit / parent <%s> / child <%s> / cycle %d\n",
    27812810__FUNCTION__ , this->process->pid, this->trdid, parent_name, child_name, cycle );
     
    30853114
    30863115// check buffer overflow
    3087 assert( (index >= 0) , "kernel buffer too small\n" );
     3116assert( (index >= 0) , "kernel buffer too small" );
    30883117
    30893118            }
     
    31113140
    31123141// check buffer overflow
    3113 assert( (index >= 0) , "kernel buffer too small\n" );
     3142assert( (index >= 0) , "kernel buffer too small" );
    31143143
    31153144            // update pathname
     
    33793408    error_t error = 0;
    33803409
    3381 assert( (page_xp != XPTR_NULL) , "page pointer is NULL\n" );
     3410assert( (page_xp != XPTR_NULL) , "page pointer is NULL" );
    33823411
    33833412    page_t * page_ptr = GET_PTR( page_xp );
     
    33873416    mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) );
    33883417
    3389 assert( (mapper != NULL) , "no mapper for page\n" );
     3418assert( (mapper != NULL) , "no mapper for page" );
    33903419
    33913420    // get FS type
     
    34073436    else
    34083437    {
    3409         assert( false , "undefined file system type\n" );
     3438        assert( false , "undefined file system type" );
    34103439    }
    34113440
     
    34203449    error_t error = 0;
    34213450
    3422 assert( (inode  != NULL) , "inode  pointer is NULL\n" );
    3423 assert( (dentry != NULL) , "dentry pointer is NULL\n" );
     3451assert( (inode  != NULL) , "inode  pointer is NULL" );
     3452assert( (dentry != NULL) , "dentry pointer is NULL" );
    34243453
    34253454    mapper_t * mapper = inode->mapper;
    34263455
    3427 assert( (mapper != NULL) , "mapper pointer is NULL\n" );
     3456assert( (mapper != NULL) , "mapper pointer is NULL" );
    34283457
    34293458    // get FS type
     
    34453474    else
    34463475    {
    3447         assert( false , "undefined file system type\n" );
     3476        assert( false , "undefined file system type" );
    34483477    }
    34493478
     
    34583487    error_t error = 0;
    34593488
    3460 assert( (inode  != NULL) , "inode  pointer is NULL\n" );
    3461 assert( (dentry != NULL) , "dentry pointer is NULL\n" );
     3489assert( (inode  != NULL) , "inode  pointer is NULL" );
     3490assert( (dentry != NULL) , "dentry pointer is NULL" );
    34623491
    34633492    mapper_t * mapper = inode->mapper;
    34643493
    3465 assert( (mapper != NULL) , "mapper pointer is NULL\n" );
     3494assert( (mapper != NULL) , "mapper pointer is NULL" );
    34663495
    34673496    // get FS type
     
    34833512    else
    34843513    {
    3485         assert( false , "undefined file system type\n" );
     3514        assert( false , "undefined file system type" );
    34863515    }
    34873516
     
    34983527
    34993528// check arguments
    3500 assert( (parent != NULL) , "parent pointer is NULL\n");
    3501 assert( (child_xp != XPTR_NULL) , "child pointer is NULL\n");
     3529assert( (parent != NULL) , "parent pointer is NULL");
     3530assert( (child_xp != XPTR_NULL) , "child pointer is NULL");
    35023531
    35033532    // get parent inode FS type
     
    35113540    else if( fs_type == FS_TYPE_RAMFS )
    35123541    {
    3513         assert( false , "should not be called for RAMFS\n" );
     3542        assert( false , "should not be called for RAMFS" );
    35143543    }
    35153544    else if( fs_type == FS_TYPE_DEVFS )
    35163545    {
    3517         assert( false , "should not be called for DEVFS\n" );
     3546        assert( false , "should not be called for DEVFS" );
    35183547    }
    35193548    else
    35203549    {
    3521         assert( false , "undefined file system type\n" );
     3550        assert( false , "undefined file system type" );
    35223551    }
    35233552
     
    35343563
    35353564// check arguments
    3536 assert( (inode  != NULL) , "inode  pointer is NULL\n");
    3537 assert( (dentry != NULL) , "dentry pointer is NULL\n");
     3565assert( (inode  != NULL) , "inode  pointer is NULL");
     3566assert( (dentry != NULL) , "dentry pointer is NULL");
    35383567
    35393568    // get parent inode FS type
     
    35473576    else if( fs_type == FS_TYPE_RAMFS )
    35483577    {
    3549         assert( false , "should not be called for RAMFS\n" );
     3578        assert( false , "should not be called for RAMFS" );
    35503579    }
    35513580    else if( fs_type == FS_TYPE_DEVFS )
    35523581    {
    3553         assert( false , "should not be called for DEVFS\n" );
     3582        assert( false , "should not be called for DEVFS" );
    35543583    }
    35553584    else
    35563585    {
    3557         assert( false , "undefined file system type\n" );
     3586        assert( false , "undefined file system type" );
    35583587    }
    35593588
     
    35743603
    35753604// check arguments
    3576 assert( (inode != NULL) , "parent pointer is NULL\n");
    3577 assert( (array != NULL) , "child pointer is NULL\n");
     3605assert( (inode != NULL) , "parent pointer is NULL");
     3606assert( (array != NULL) , "child pointer is NULL");
    35783607assert( (detailed == false) , "detailed argument not supported\n");
    35793608
     
    36023631    else if( fs_type == FS_TYPE_RAMFS )
    36033632    {
    3604         assert( false , "should not be called for RAMFS\n" );
     3633        assert( false , "should not be called for RAMFS" );
    36053634    }
    36063635    else if( fs_type == FS_TYPE_DEVFS )
     
    36163645    else
    36173646    {
    3618         assert( false , "undefined file system type\n" );
     3647        assert( false , "undefined file system type" );
    36193648    }
    36203649
     
    36293658
    36303659// check arguments
    3631 assert( (inode != NULL) , "inode pointer is NULL\n");
     3660assert( (inode != NULL) , "inode pointer is NULL");
    36323661
    36333662    // get inode FS type
     
    36413670    else if( fs_type == FS_TYPE_RAMFS )
    36423671    {
    3643         assert( false , "should not be called for RAMFS\n" );
     3672        assert( false , "should not be called for RAMFS" );
    36443673    }
    36453674    else if( fs_type == FS_TYPE_DEVFS )
    36463675    {
    3647         assert( false , "should not be called for DEVFS\n" );
     3676        assert( false , "should not be called for DEVFS" );
    36483677    }
    36493678    else
    36503679    {
    3651         assert( false , "undefined file system type\n" );
     3680        assert( false , "undefined file system type" );
    36523681    }
    36533682
     
    36683697    else if( fs_type == FS_TYPE_RAMFS )
    36693698    {
    3670         assert( false , "should not be called for RAMFS\n" );
     3699        assert( false , "should not be called for RAMFS" );
    36713700    }
    36723701    else if( fs_type == FS_TYPE_DEVFS )
    36733702    {
    3674         assert( false , "should not be called for DEVFS\n" );
     3703        assert( false , "should not be called for DEVFS" );
    36753704    }
    36763705    else
    36773706    {
    3678         assert( false , "undefined file system type\n" );
     3707        assert( false , "undefined file system type" );
    36793708    }
    36803709
     
    36953724    else if( fs_type == FS_TYPE_RAMFS )
    36963725    {
    3697         assert( false , "should not be called for RAMFS\n" );
     3726        assert( false , "should not be called for RAMFS" );
    36983727    }
    36993728    else if( fs_type == FS_TYPE_DEVFS )
    37003729    {
    3701         assert( false , "should not be called for DEVFS\n" );
     3730        assert( false , "should not be called for DEVFS" );
    37023731    }
    37033732    else
    37043733    {
    3705         assert( false , "undefined file system type\n" );
     3734        assert( false , "undefined file system type" );
    37063735    }
    37073736
     
    37233752    else if( fs_type == FS_TYPE_RAMFS )
    37243753    {
    3725         assert( false , "should not be called for RAMFS\n" );
     3754        assert( false , "should not be called for RAMFS" );
    37263755    }
    37273756    else if( fs_type == FS_TYPE_DEVFS )
    37283757    {
    3729         assert( false , "should not be called for DEVFS\n" );
     3758        assert( false , "should not be called for DEVFS" );
    37303759    }
    37313760    else
    37323761    {
    3733         assert( false , "undefined file system type\n" );
     3762        assert( false , "undefined file system type" );
    37343763    }
    37353764
     
    37433772    error_t error = 0;
    37443773
    3745 assert( (inode_xp  != XPTR_NULL) , "inode pointer is NULL\n")       
     3774assert( (inode_xp  != XPTR_NULL) , "inode pointer is NULL")       
    37463775
    37473776    vfs_inode_t * inode_ptr = GET_PTR( inode_xp );
     
    37513780    mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) );
    37523781
    3753 assert( (mapper != NULL) , "mapper pointer is NULL\n")       
     3782assert( (mapper != NULL) , "mapper pointer is NULL")       
    37543783
    37553784    // get FS type from mapper
     
    37633792    else if( fs_type == FS_TYPE_RAMFS )
    37643793    {
    3765         assert( false , "should not be called for RAMFS\n" );
     3794        assert( false , "should not be called for RAMFS" );
    37663795    }
    37673796    else if( fs_type == FS_TYPE_DEVFS )
    37683797    {
    3769         assert( false , "should not be called for DEVFS\n" );
     3798        assert( false , "should not be called for DEVFS" );
    37703799    }
    37713800    else
    37723801    {
    3773         assert( false , "undefined file system type\n" );
     3802        assert( false , "undefined file system type" );
    37743803    }
    37753804
  • trunk/kernel/fs/vfs.h

    r623 r625  
    593593 * This function is called by the vfs_lookup() function when a new dentry/inode must
    594594 * be created from scratch and introduced in both the Inode Tree and the IOC device.
    595  * The dentry and inode descriptors have been created by the caller:
     595 * The dentry and inode descriptors have been created by the caller.
    596596 * - It allocates one cluster from the relevant FS, and updates the File Allocation
    597597 *   Table (both the FAT mapper, and the IOC device).
     
    966966 * the <inode> argument, to find a directory entry identified by the <dentry> argument,
    967967 * and update the size for this directory entry in mapper, as defined by <size>.
    968  * The searched "name" is defined in the <dentry> argument, that must be in the same
    969  * cluster as the parent inode. It is called by the vfs_close() function.
     968 * The parent directory on device is synchronously updated.
     969 * It is called by the vfs_close() function.
    970970 *
    971971 * Depending on the file system type, it calls the relevant, FS specific function.
  • trunk/kernel/kern/chdev.c

    r619 r625  
    138138    uint32_t   server_lid;    // core running the server thread local index
    139139    xptr_t     lock_xp;       // extended pointer on lock protecting the chdev state
    140     uint32_t   save_sr;       // for critical section
    141140
    142141#if (DEBUG_SYS_READ & 1)
     
    177176uint32_t rx_cycle = (uint32_t)hal_get_cycles();
    178177if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    179 printk("\n[%s] client[%x,%x] enter for RX / server[%x,%x] / cycle %d\n",
     178printk("\n[%s] client thread[%x,%x] enter for RX / server[%x,%x] / cycle %d\n",
    180179__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid, rx_cycle );
    181180#endif
     
    184183uint32_t tx_cycle = (uint32_t)hal_get_cycles();
    185184if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    186 printk("\n[%s] client[%x,%x] enter for TX / server[%x,%x] / cycle %d\n",
     185printk("\n[%s] client thread[%x,%x] enter for TX / server[%x,%x] / cycle %d\n",
    187186__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid, tx_cycle );
    188187#endif
     
    194193    xptr_t  root_xp    = XPTR( chdev_cxy , &chdev_ptr->wait_root );
    195194
    196     // build extended pointer on server thread blocked state
    197     xptr_t  blocked_xp = XPTR( chdev_cxy , &server_ptr->blocked );
    198 
    199195    // build extended pointer on lock protecting chdev waiting queue
    200196    lock_xp            = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
    201197
    202     // TODO the hal_disable_irq() / hal_restore_irq()
    203     // in the sequence below is probably useless, as it is
    204     // already done by the busylock_acquire() / busylock_release()
    205     // => remove it [AG] october 2018
    206 
    207     // critical section for the following sequence:
     198    // The following actions execute in critical section,
     199    // because the lock_acquire / lock_release :
    208200    // (1) take the lock protecting the chdev state
    209     // (2) block the client thread
    210     // (3) unblock the server thread if required
    211     // (4) register client thread in server queue
    212     // (5) send IPI to force server scheduling
    213     // (6) release the lock protecting waiting queue
    214     // (7) deschedule
    215 
    216     // enter critical section
    217     hal_disable_irq( &save_sr );
    218 
    219     // take the lock protecting chdev queue
     201    // (2) register client thread in server queue
     202    // (3) unblock the server thread and block client thread
     203    // (4) send IPI to force server scheduling
     204    // (5) release the lock protecting waiting queue
     205
     206    // 1. take the lock protecting chdev queue
    220207    remote_busylock_acquire( lock_xp );
    221208
    222     // block current thread
    223     thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
    224 
    225 #if (DEBUG_CHDEV_CMD_TX & 1)
    226 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    227 printk("\n[%s] client thread[%x,%x] blocked\n",
    228 __FUNCTION__, this->process->pid, this->trdid );
    229 #endif
    230 
    231 #if (DEBUG_CHDEV_CMD_RX & 1)
    232 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    233 printk("\n[%s] client thread[%x,%x] blocked\n",
    234 __FUNCTION__, this->process_pid, this->trdid );
    235 #endif
    236 
    237     // unblock server thread if required
    238     if( hal_remote_l32( blocked_xp ) & THREAD_BLOCKED_IDLE )
    239     thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
    240 
    241 #if (DEBUG_CHDEV_CMD_TX & 1)
    242 if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    243 printk("\n[%s] TX server thread[%x,%x] unblocked\n",
    244 __FUNCTION__, server_pid, server_trdid );
    245 #endif
    246 
    247 #if (DEBUG_CHDEV_CMD_RX & 1)
    248 if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    249 printk("\n[%s] RX server thread[%x,%x] unblocked\n",
    250 __FUNCTION__, server_pid, server_trdid );
    251 #endif
    252 
    253     // register client thread in waiting queue
     209    // 2. register client thread in waiting queue
    254210    xlist_add_last( root_xp , list_xp );
    255211
     
    266222#endif
    267223 
    268     // send IPI to core running the server thread when server core != client core
     224    // 3. client thread unblocks server thread and blocks itself
     225    thread_unblock( server_xp , THREAD_BLOCKED_IDLE );
     226    thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO );
     227
     228#if (DEBUG_CHDEV_CMD_TX & 1)
     229if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
     230printk("\n[%s] client thread[%x,%x] unblock server thread[%x,%x] and block itsef\n",
     231__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
     232#endif
     233
     234#if (DEBUG_CHDEV_CMD_RX & 1)
     235if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
     236printk("\n[%s] client thread[%x,%x] unblock server thread[%x,%x] and block itsef\n",
     237__FUNCTION__, this->process->pid, this->trdid, server_pid, server_trdid );
     238#endif
     239
     240    // 4. send IPI to core running the server thread when server core != client core
    269241    if( (server_lid != this->core->lid) || (local_cxy != chdev_cxy) )
    270242    {
     
    285257    }
    286258 
    287     // release lock protecting chdev queue
     259    // 5. release lock protecting chdev queue
    288260    remote_busylock_release( lock_xp );
    289261
    290262    // deschedule
    291263    sched_yield("blocked on I/O");
    292 
    293     // exit critical section
    294     hal_restore_irq( save_sr );
    295264
    296265#if DEBUG_CHDEV_CMD_RX
    297266rx_cycle = (uint32_t)hal_get_cycles();
    298267if( (is_rx) && (DEBUG_CHDEV_CMD_RX < rx_cycle) )
    299 printk("\n[%s] client_thread[%x,%x] exit for RX / cycle %d\n",
     268printk("\n[%s] client thread[%x,%x] exit for RX / cycle %d\n",
    300269__FUNCTION__, this->process->pid, this->trdid, rx_cycle );
    301270#endif
     
    304273tx_cycle = (uint32_t)hal_get_cycles();
    305274if( (is_rx == 0) && (DEBUG_CHDEV_CMD_TX < tx_cycle) )
    306 printk("\n[%s] client_thread[%x,%x] exit for TX / cycle %d\n",
     275printk("\n[%s] client thread[%x,%x] exit for TX / cycle %d\n",
    307276__FUNCTION__, this->process->pid, this->trdid, tx_cycle );
    308277#endif
     
    344313uint32_t rx_cycle = (uint32_t)hal_get_cycles();
    345314if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    346 printk("\n[%s] DEV thread[%x,%x] check TXT_RX channel %d / cycle %d\n",
     315printk("\n[%s] server thread[%x,%x] check TXT_RX channel %d / cycle %d\n",
    347316__FUNCTION__ , server->process->pid, server->trdid, chdev->channel, rx_cycle );
    348317#endif
     
    370339rx_cycle = (uint32_t)hal_get_cycles();
    371340if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    372 printk("\n[%s] thread[%x,%x] found RX queue empty => blocks / cycle %d\n",
     341printk("\n[%s] server thread[%x,%x] found RX queue empty => blocks / cycle %d\n",
    373342__FUNCTION__ , server->process->pid, server->trdid, rx_cycle );
    374343#endif
     
    377346tx_cycle = (uint32_t)hal_get_cycles();
    378347if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    379 printk("\n[%s] thread[%x,%x] found TX queue empty => blocks / cycle %d\n",
     348printk("\n[%s] server thread[%x,%x] found TX queue empty => blocks / cycle %d\n",
    380349__FUNCTION__ , server->process->pid, server->trdid, tx_cycle );
    381350#endif
     
    407376rx_cycle = (uint32_t)hal_get_cycles();
    408377if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    409 printk("\n[%s] thread[%x,%x] for RX get client thread[%x,%x] / cycle %d\n",
     378printk("\n[%s] server thread[%x,%x] get command from client thread[%x,%x] / cycle %d\n",
    410379__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, rx_cycle );
    411380#endif
     
    414383tx_cycle = (uint32_t)hal_get_cycles();
    415384if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    416 printk("\n[%s] thread[%x,%x] for TX get client thread[%x,%x] / cycle %d\n",
     385printk("\n[%s] server thread[%x,%x] get command from client thread[%x,%x] / cycle %d\n",
    417386__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, tx_cycle );
    418387#endif
     
    445414rx_cycle = (uint32_t)hal_get_cycles();
    446415if( (chdev->is_rx) && (DEBUG_CHDEV_SERVER_RX < rx_cycle) )
    447 printk("\n[%s] thread[%x,%x] completes RX for client thread[%x,%x] / cycle %d\n",
     416printk("\n[%s] thread[%x,%x] completes command for client thread[%x,%x] / cycle %d\n",
    448417__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, rx_cycle );
    449418#endif
     
    452421tx_cycle = (uint32_t)hal_get_cycles();
    453422if( (chdev->is_rx == 0) && (DEBUG_CHDEV_SERVER_TX < tx_cycle) )
    454 printk("\n[%s] thread[%x,%x] completes TX for client thread[%x,%x] / cycle %d\n",
     423printk("\n[%s] thread[%x,%x] completes command for client thread[%x,%x] / cycle %d\n",
    455424__FUNCTION__, server->process->pid, server->trdid, client_pid, client_trdid, tX_cycle );
    456425#endif
  • trunk/kernel/kern/chdev.h

    r619 r625  
    111111/******************************************************************************************
    112112 * This structure defines a chdev descriptor.
    113  * For multi-channels device, there is one chdev descriptor per channel.
    114113 * This structure is NOT replicated, and can be located in any cluster.
    115114 * One kernel thread, in charge of handling the commands registered in the waiting queue
  • trunk/kernel/kern/printk.c

    r623 r625  
    253253                break;
    254254            }
    255             case ('b'):             // excactly 2 digits hexadecimal
     255            case ('b'):             /* exactly 2 digits hexadecimal */
    256256            {
    257257                int  val = va_arg( *args, int );
     
    426426
    427427    // print generic infos
    428     nolock_printk("\n[PANIC] in %s: line %d | cycle %d\n"
     428    nolock_printk("\n\n[PANIC] in %s: line %d | cycle %d\n"
    429429                  "core[%x,%d] | thread %x (%x) | process %x (%x)\n",
    430430                  function_name, line, (uint32_t)cycle,
     
    502502    remote_busylock_acquire( lock_xp );
    503503
    504     // display string on TTY0
     504    // display buf on TTY0
    505505    dev_txt_sync_write( buf , 10 );
     506
     507    // release TXT0 lock
     508    remote_busylock_release( lock_xp );
     509}
     510
     511////////////////////////
     512void putd( int32_t val )
     513{
     514    static const char HexaTab[] = "0123456789ABCDEF";
     515
     516    char      buf[10];
     517    uint32_t  i;
     518
     519    // get pointers on TXT0 chdev
     520    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     521    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     522    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     523
     524    // get extended pointer on remote TXT0 chdev lock
     525    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     526
     527    // get TXT0 lock
     528    remote_busylock_acquire( lock_xp );
     529
     530    if (val < 0)
     531    {
     532        val = -val;
     533        dev_txt_sync_write( "-" , 1 );
     534    }
     535
     536    for(i = 0; i < 10 ; i++)
     537    {
     538        buf[9 - i] = HexaTab[val % 10];
     539        if (!(val /= 10)) break;
     540    }
     541
     542    // display buf on TTY0
     543    dev_txt_sync_write( &buf[9-i] , i+1 );
    506544
    507545    // release TXT0 lock
  • trunk/kernel/kern/printk.h

    r623 r625  
    123123
    124124/**********************************************************************************
    125  * This function displays a non-formated message on kernel TXT0 terminal.
     125 * This function displays a non-formated message on TXT0 terminal.
    126126 * This function is actually used to debug the assembly level kernel functions.
    127127 **********************************************************************************
     
    131131
    132132/**********************************************************************************
    133  * This function displays a 32 bits value in hexadecimal on kernel TXT0 terminal.
     133 * This function displays a 32 bits value in hexadecimal on TXT0 terminal.
    134134 * This function is actually used to debug the assembly level kernel functions.
    135135 **********************************************************************************
     
    139139
    140140/**********************************************************************************
    141  * This function displays a 64 bits value in hexadecimal on kernel TXT0 terminal.
     141 * This function displays a 32 bits signed value in decimal on TXT0 terminal.
     142 * This function is actually used to debug the assembly level kernel functions.
     143 **********************************************************************************
     144 * @ val   : 32 bits signed value.
     145 *********************************************************************************/
     146void putd( int32_t val );
     147
     148/**********************************************************************************
     149 * This function displays a 64 bits value in hexadecimal on TXT0 terminal.
    142150 * This function is actually used to debug the assembly level kernel functions.
    143151 **********************************************************************************
     
    147155
    148156/**********************************************************************************
    149  * This debug function displays on the kernel TXT0 terminal the content of an
     157 * This debug function displays on the TXT0 terminal the content of an
    150158 * array of bytes defined by <buffer> and <size> arguments (16 bytes per line).
    151159 * The <string> argument is displayed before the buffer content.
  • trunk/kernel/kern/process.c

    r624 r625  
    9191}
    9292
    93 /////////////////////////////////////////////////
    94 void process_reference_init( process_t * process,
    95                              pid_t       pid,
    96                              xptr_t      parent_xp )
    97 {
     93////////////////////////////////////////////////////
     94error_t process_reference_init( process_t * process,
     95                                pid_t       pid,
     96                                xptr_t      parent_xp )
     97{
     98    error_t     error;
    9899    xptr_t      process_xp;
    99100    cxy_t       parent_cxy;
     
    105106    uint32_t    stdout_id;
    106107    uint32_t    stderr_id;
    107     error_t     error;
    108108    uint32_t    txt_id;
    109109    char        rx_path[40];
     
    111111    xptr_t      file_xp;
    112112    xptr_t      chdev_xp;
    113     chdev_t chdev_ptr;
     113    chdev_t   * chdev_ptr;
    114114    cxy_t       chdev_cxy;
    115115    pid_t       parent_pid;
     116    vmm_t     * vmm;
    116117
    117118    // build extended pointer on this reference process
    118119    process_xp = XPTR( local_cxy , process );
     120
     121    // get pointer on process vmm
     122    vmm = &process->vmm;
    119123
    120124    // get parent process cluster and local pointer
     
    129133uint32_t cycle = (uint32_t)hal_get_cycles();
    130134if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
    131 printk("\n[%s] thread[%x,%x] enter to initalialize process %x / cycle %d\n",
    132 __FUNCTION__, parent_pid, this->trdid, pid, cycle );
     135printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n",
     136__FUNCTION__, this->process->pid, this->trdid, pid, cycle );
    133137#endif
    134138
     
    144148    process->cwd_xp      = hal_remote_l64( XPTR( parent_cxy, &parent_ptr->cwd_xp ) );
    145149
    146     // initialize vmm as empty
    147     error = vmm_init( process );
    148 
    149 assert( (error == 0) , "cannot initialize VMM\n" );
     150    // initialize VSL as empty
     151    vmm->vsegs_nr = 0;
     152        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
     153
     154    // create an empty GPT as required by the architecture
     155    error = hal_gpt_create( &vmm->gpt );
     156    if( error )
     157    {
     158        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
     159        return -1;
     160    }
     161
     162#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     163if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
     164printk("\n[%s] thread[%x,%x] created empty GPT for process %x\n",
     165__FUNCTION__, parent_pid, this->trdid, pid );
     166#endif
     167
     168    // initialize GPT and VSL locks
     169    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
     170        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
     171
     172    // register kernel vsegs in VMM as required by the architecture
     173    error = hal_vmm_kernel_update( process );
     174    if( error )
     175    {
     176        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
     177        return -1;
     178    }
     179
     180#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     181if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
     182printk("\n[%s] thread[%x,%x] registered kernel vsegs for process %x\n",
     183__FUNCTION__, parent_pid, this->trdid, pid );
     184#endif
     185
     186    // create "args" and "envs" vsegs
     187    // create "stacks" and "mmap" vsegs allocators
     188    // initialize locks protecting GPT and VSL
     189    error = vmm_user_init( process );
     190    if( error )
     191    {
     192        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
     193        return -1;
     194    }
    150195 
    151196#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
    152197cycle = (uint32_t)hal_get_cycles();
    153198if( DEBUG_PROCESS_REFERENCE_INIT < cycle )
    154 printk("\n[%s] thread[%x,%x] / vmm empty for process %x / cycle %d\n",
    155 __FUNCTION__, parent_pid, this->trdid, pid, cycle );
     199printk("\n[%s] thread[%x,%x] initialized vmm for process %x\n",
     200__FUNCTION__, parent_pid, this->trdid, pid );
    156201#endif
    157202
     
    187232                           &stdin_xp,
    188233                           &stdin_id );
    189 
    190 assert( (error == 0) , "cannot open stdin pseudo file" );
     234        if( error )
     235        {
     236            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
     237            return -1;
     238        }
     239
    191240assert( (stdin_id == 0) , "stdin index must be 0" );
    192241
     
    206255                           &stdout_xp,
    207256                           &stdout_id );
    208 
    209         assert( (error == 0) , "cannot open stdout pseudo file" );
    210         assert( (stdout_id == 1) , "stdout index must be 1" );
     257        if( error )
     258        {
     259            printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ );
     260            return -1;
     261        }
     262
     263assert( (stdout_id == 1) , "stdout index must be 1" );
    211264
    212265#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     
    225278                           &stderr_xp,
    226279                           &stderr_id );
    227 
    228         assert( (error == 0) , "cannot open stderr pseudo file" );
    229         assert( (stderr_id == 2) , "stderr index must be 2" );
     280        if( error )
     281        {
     282            printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ );
     283            return -1;
     284        }
     285
     286assert( (stderr_id == 2) , "stderr index must be 2" );
    230287
    231288#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     
    240297    {
    241298        // get extended pointer on stdin pseudo file in parent process
    242         file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy , &parent_ptr->fd_array.array[0] ) );
     299        file_xp = (xptr_t)hal_remote_l64( XPTR( parent_cxy,
     300                                                &parent_ptr->fd_array.array[0] ) );
    243301
    244302        // get extended pointer on parent process TXT chdev
     
    261319
    262320    // initialize lock protecting CWD changes
    263     remote_busylock_init( XPTR( local_cxy , &process->cwd_lock ), LOCK_PROCESS_CWD );
     321    remote_busylock_init( XPTR( local_cxy ,
     322                                &process->cwd_lock ), LOCK_PROCESS_CWD );
    264323
    265324#if (DEBUG_PROCESS_REFERENCE_INIT & 1)
     
    273332    xlist_root_init( XPTR( local_cxy , &process->children_root ) );
    274333    process->children_nr     = 0;
    275     remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN );
     334    remote_queuelock_init( XPTR( local_cxy,
     335                                 &process->children_lock ), LOCK_PROCESS_CHILDREN );
    276336
    277337    // reset semaphore / mutex / barrier / condvar list roots and lock
     
    280340    xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
    281341    xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
    282     remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC );
     342    remote_queuelock_init( XPTR( local_cxy ,
     343                                 &process->sync_lock ), LOCK_PROCESS_USERSYNC );
    283344
    284345    // reset open directories root and lock
    285346    xlist_root_init( XPTR( local_cxy , &process->dir_root ) );
    286     remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR );
     347    remote_queuelock_init( XPTR( local_cxy ,
     348                                 &process->dir_lock ), LOCK_PROCESS_DIR );
    287349
    288350    // register new process in the local cluster manager pref_tbl[]
     
    315377#endif
    316378
     379    return 0;
     380
    317381}  // process_reference_init()
    318382
     
    321385                           xptr_t      reference_process_xp )
    322386{
    323     error_t error;
     387    error_t   error;
     388    vmm_t   * vmm;
    324389
    325390    // get reference process cluster and local pointer
    326391    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
    327392    process_t * ref_ptr = GET_PTR( reference_process_xp );
     393
     394    // get pointer on process vmm
     395    vmm = &local_process->vmm;
    328396
    329397    // initialize PID, REF_XP, PARENT_XP, and STATE
     
    343411
    344412// check user process
    345 assert( (local_process->pid != 0), "PID cannot be 0" );
    346 
    347     // reset local process vmm
    348     error = vmm_init( local_process );
    349     assert( (error == 0) , "cannot initialize VMM\n");
    350 
    351     // reset process file descriptors array
     413assert( (local_process->pid != 0), "LPID cannot be 0" );
     414
     415    // initialize VSL as empty
     416    vmm->vsegs_nr = 0;
     417        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
     418
     419    // create an empty GPT as required by the architecture
     420    error = hal_gpt_create( &vmm->gpt );
     421    if( error )
     422    {
     423        printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ );
     424        return -1;
     425    }
     426
     427    // initialize GPT and VSL locks
     428    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
     429        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
     430
     431    // register kernel vsegs in VMM as required by the architecture
     432    error = hal_vmm_kernel_update( local_process );
     433    if( error )
     434    {
     435        printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ );
     436        return -1;
     437    }
     438
     439    // create "args" and "envs" vsegs
     440    // create "stacks" and "mmap" vsegs allocators
     441    // initialize locks protecting GPT and VSL
     442    error = vmm_user_init( local_process );
     443    if( error )
     444    {
     445        printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ );
     446        return -1;
     447    }
     448 
     449#if (DEBUG_PROCESS_COPY_INIT & 1)
     450cycle = (uint32_t)hal_get_cycles();
     451if( DEBUG_PROCESS_COPY_INIT < cycle )
     452printk("\n[%s] thread[%x,%x] initialized vmm for process %x / cycle %d\n",
     453__FUNCTION__, parent_pid, this->trdid, pid, cycle );
     454#endif
     455
     456    // set process file descriptors array
    352457        process_fd_init( local_process );
    353458
    354     // reset vfs_root_xp / vfs_bin_xp / cwd_xp fields
     459    // set vfs_root_xp / vfs_bin_xp / cwd_xp fields
    355460    local_process->vfs_root_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_root_xp ) );
    356461    local_process->vfs_bin_xp  = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
     
    380485    local_process->th_nr  = 0;
    381486    rwlock_init( &local_process->th_lock , LOCK_PROCESS_THTBL );
    382 
    383487
    384488    // register new process descriptor in local cluster manager local_list
     
    451555#endif
    452556
    453     // remove process from children_list
    454     // and release PID if owner cluster
     557    // when target process cluster is the owner cluster
     558    // - remove process from TXT list and transfer ownership
     559    // - remove process from children_list
     560    // - release PID
    455561    if( CXY_FROM_PID( pid ) == local_cxy )
    456562    {
     563        process_txt_detach( XPTR( local_cxy , process ) );
     564
     565#if (DEBUG_PROCESS_DESTROY & 1)
     566if( DEBUG_PROCESS_DESTROY < cycle )
     567printk("\n[%s] thread[%x,%x] removed process %x from TXT list\n",
     568__FUNCTION__, this->process->pid, this->trdid, pid );
     569#endif
     570
    457571        // get pointers on parent process
    458572        parent_xp  = process->parent_xp;
     
    472586#if (DEBUG_PROCESS_DESTROY & 1)
    473587if( DEBUG_PROCESS_DESTROY < cycle )
    474 printk("\n[%s] thread[%x,%x] removed process %x in cluster %x from children list\n",
    475 __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy );
     588printk("\n[%s] thread[%x,%x] removed process %x from parent process children list\n",
     589__FUNCTION__, this->process->pid, this->trdid, pid );
    476590#endif
    477591
     
    777891uint32_t cycle = (uint32_t)hal_get_cycles();
    778892if( DEBUG_PROCESS_SIGACTION < cycle )
    779 printk("\n[%s] thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
    780 __FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle );
     893printk("\n[%s] thread[%x,%x] enter for process %x n cluster %x / cycle %d\n",
     894__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
    781895#endif
    782896
     
    11891303}  // end process_register_thread()
    11901304
    1191 /////////////////////////////////////////////////
    1192 bool_t process_remove_thread( thread_t * thread )
     1305///////////////////////////////////////////////////
     1306uint32_t process_remove_thread( thread_t * thread )
    11931307{
    11941308    uint32_t count;  // number of threads in local process descriptor
     1309
     1310// check thread
     1311assert( (thread != NULL) , "thread argument is NULL" );
    11951312
    11961313    process_t * process = thread->process;
     
    12051322    count = process->th_nr;
    12061323
    1207 // check thread
    1208 assert( (thread != NULL) , "thread argument is NULL" );
    1209 
    12101324// check th_nr value
    12111325assert( (count > 0) , "process th_nr cannot be 0" );
     
    12181332    rwlock_wr_release( &process->th_lock );
    12191333
    1220     return (count == 1);
     1334    return count;
    12211335
    12221336}  // end process_remove_thread()
     
    12831397cycle = (uint32_t)hal_get_cycles();
    12841398if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1285 printk("\n[%s] thread[%x,%x] allocated process %x / cycle %d\n",
     1399printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",
    12861400__FUNCTION__, pid, trdid, new_pid, cycle );
    12871401#endif
    12881402
    12891403    // initializes child process descriptor from parent process descriptor
    1290     process_reference_init( process,
    1291                             new_pid,
    1292                             parent_process_xp );
     1404    error = process_reference_init( process,
     1405                                    new_pid,
     1406                                    parent_process_xp );
     1407    if( error )
     1408    {
     1409        printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n",
     1410        __FUNCTION__, local_cxy );
     1411        process_free( process );
     1412        return -1;
     1413    }
    12931414
    12941415#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     
    12981419__FUNCTION__, pid, trdid, new_pid, cycle );
    12991420#endif
    1300 
    13011421
    13021422    // copy VMM from parent descriptor to child descriptor
     
    13611481#endif
    13621482
    1363     // set Copy_On_Write flag in parent process GPT
    1364     // this includes all replicated GPT copies
     1483    // set COW flag in DATA, ANON, REMOTE vsegs for parent process VMM
     1484    // this includes all parnet process copies in all clusters
    13651485    if( parent_process_cxy == local_cxy )   // reference is local
    13661486    {
     
    13731493    }
    13741494
    1375     // set Copy_On_Write flag in child process GPT
     1495    // set COW flag in DATA, ANON, REMOTE vsegs for child process VMM
    13761496    vmm_set_cow( process );
    13771497 
     
    14231543    char          ** args_pointers;           // array of pointers on main thread arguments
    14241544
    1425     // get thread, process, pid and ref_xp
     1545    // get calling thread, process, pid and ref_xp
    14261546    thread  = CURRENT_THREAD;
    14271547    process = thread->process;
     
    14701590cycle = (uint32_t)hal_get_cycles();
    14711591if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1472 printk("\n[%s] thread[%x,%x] deleted all threads / cycle %d\n",
     1592printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n",
    14731593__FUNCTION__, pid, thread->trdid, cycle );
    14741594#endif
    14751595
    1476     // reset local process VMM
    1477     vmm_destroy( process );
     1596    // reset calling process VMM
     1597    vmm_user_reset( process );
    14781598
    14791599#if( DEBUG_PROCESS_MAKE_EXEC & 1 )
    14801600cycle = (uint32_t)hal_get_cycles();
    14811601if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1482 printk("\n[%s] thread[%x,%x] reset VMM / cycle %d\n",
     1602printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n",
    14831603__FUNCTION__, pid, thread->trdid, cycle );
    14841604#endif
    14851605
    1486     // re-initialize the VMM (kentry/args/envs vsegs registration)
    1487     error = vmm_init( process );
     1606    // re-initialize the VMM (args/envs vsegs registration)
     1607    error = vmm_user_init( process );
    14881608    if( error )
    14891609    {
     
    14971617cycle = (uint32_t)hal_get_cycles();
    14981618if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1499 printk("\n[%s] thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n",
     1619printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n",
    15001620__FUNCTION__, pid, thread->trdid, cycle );
    15011621#endif
     
    15151635cycle = (uint32_t)hal_get_cycles();
    15161636if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1517 printk("\n[%s] thread[%x,%x] / code/data vsegs registered / cycle %d\n",
     1637printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n",
    15181638__FUNCTION__, pid, thread->trdid, cycle );
    15191639#endif
     
    15771697    vmm->vsegs_nr = 0;
    15781698        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
    1579         remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL );
    15801699
    15811700    // initialise GPT as empty
    15821701    error = hal_gpt_create( &vmm->gpt );
    1583 
    15841702    if( error )
    15851703    {
     
    15881706    }
    15891707
    1590     // initialize GPT lock
     1708    // initialize VSL and GPT locks
     1709        remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
    15911710    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    15921711   
    15931712    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
    15941713    error = hal_vmm_kernel_init( info );
    1595 
    15961714    if( error )
    15971715    {
     
    16521770    // allocates memory for process descriptor from local cluster
    16531771        process = process_alloc();
    1654        
    1655 // check memory allocator
    1656 assert( (process != NULL),
    1657 "no memory for process descriptor in cluster %x", local_cxy  );
     1772    if( process == NULL )
     1773    {
     1774        printk("\n[PANIC] in %s : cannot allocate process\n", __FUNCTION__ );
     1775        hal_core_sleep();
     1776    }
    16581777
    16591778    // set the CWD and VFS_ROOT fields in process descriptor
     
    16631782    // get PID from local cluster
    16641783    error = cluster_pid_alloc( process , &pid );
    1665 
    1666 // check PID allocator
    1667 assert( (error == 0),
    1668 "cannot allocate PID in cluster %x", local_cxy );
    1669 
    1670 // check PID value
    1671 assert( (pid == 1) ,
    1672 "process INIT must be first process in cluster 0" );
     1784    if( error )
     1785    {
     1786        printk("\n[PANIC] in %s : cannot allocate PID\n", __FUNCTION__ );
     1787        hal_core_sleep();
     1788    }
     1789    if( pid != 1 )
     1790    {
     1791        printk("\n[PANIC] in %s : process PID must be 0x1\n", __FUNCTION__ );
     1792        hal_core_sleep();
     1793    }
    16731794
    16741795    // initialize process descriptor / parent is local process_zero
    1675     process_reference_init( process,
    1676                             pid,
    1677                             XPTR( local_cxy , &process_zero ) ); 
     1796    error = process_reference_init( process,
     1797                                    pid,
     1798                                    XPTR( local_cxy , &process_zero ) ); 
     1799    if( error )
     1800    {
     1801        printk("\n[PANIC] in %s : cannot initialize process\n", __FUNCTION__ );
     1802        hal_core_sleep();
     1803    }
    16781804
    16791805#if(DEBUG_PROCESS_INIT_CREATE & 1)
     
    16931819                            &file_xp,
    16941820                            &file_id );
    1695 
    1696 assert( (error == 0),
    1697 "failed to open file <%s>", CONFIG_PROCESS_INIT_PATH );
     1821    if( error )
     1822    {
     1823        printk("\n[PANIC] in %s : cannot open file <%s>\n",
     1824         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
     1825        hal_core_sleep();
     1826    }
    16981827
    16991828#if(DEBUG_PROCESS_INIT_CREATE & 1)
     
    17031832#endif
    17041833
    1705    // register "code" and "data" vsegs as well as entry-point
     1834    // register "code" and "data" vsegs as well as entry-point
    17061835    // in process VMM, using information contained in the elf file.
    17071836        error = elf_load_process( file_xp , process );
    17081837
    1709 assert( (error == 0),
    1710 "cannot access .elf file <%s>", CONFIG_PROCESS_INIT_PATH );
     1838    if( error )
     1839    {
     1840        printk("\n[PANIC] in %s : cannot access file <%s>\n",
     1841         __FUNCTION__, CONFIG_PROCESS_INIT_PATH  );
     1842        hal_core_sleep();
     1843    }
     1844
    17111845
    17121846#if(DEBUG_PROCESS_INIT_CREATE & 1)
     
    17141848printk("\n[%s] thread[%x,%x] registered code/data vsegs in VMM\n",
    17151849__FUNCTION__, this->process->pid, this->trdid );
     1850#endif
     1851
     1852#if (DEBUG_PROCESS_INIT_CREATE & 1)
     1853hal_vmm_display( process , true );
    17161854#endif
    17171855
     
    17511889                                &thread );
    17521890
    1753 assert( (error == 0),
    1754 "cannot create main thread for <%s>", CONFIG_PROCESS_INIT_PATH );
    1755 
    1756 assert( (thread->trdid == 0),
    1757 "main thread must have index 0 for <%s>", CONFIG_PROCESS_INIT_PATH );
     1891    if( error )
     1892    {
     1893        printk("\n[PANIC] in %s : cannot create main thread\n", __FUNCTION__  );
     1894        hal_core_sleep();
     1895    }
     1896    if( thread->trdid != 0 )
     1897    {
     1898        printk("\n[PANIC] in %s : bad main thread trdid\n", __FUNCTION__  );
     1899        hal_core_sleep();
     1900    }
    17581901
    17591902#if(DEBUG_PROCESS_INIT_CREATE & 1)
     
    19892132    process_txt_transfer_ownership( process_xp );
    19902133
    1991     // get extended pointer on process stdin file
     2134    // get extended pointer on process stdin pseudo file
    19922135    file_xp = (xptr_t)hal_remote_l64( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) );
    19932136
     
    20142157uint32_t txt_id = hal_remote_l32( XPTR( chdev_cxy , &chdev_ptr->channel ) );
    20152158if( DEBUG_PROCESS_TXT < cycle )
    2016 printk("\n[%s] thread[%x,%x] detached process %x from TXT %d / cycle %d\n",
     2159printk("\n[%s] thread[%x,%x] detached process %x from TXT%d / cycle %d\n",
    20172160__FUNCTION__, this->process->pid, this->trdid, process_pid, txt_id, cycle );
    20182161#endif
     
    20562199uint32_t txt_id = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
    20572200if( DEBUG_PROCESS_TXT < cycle )
    2058 printk("\n[%s] thread[%x,%x] give TXT %d to process %x / cycle %d\n",
     2201printk("\n[%s] thread[%x,%x] give TXT%d ownership to process %x / cycle %d\n",
    20592202__FUNCTION__, this->process->pid, this->trdid, txt_id, process_pid, cycle );
    20602203#endif
     
    20782221    xptr_t      iter_xp;         // iterator for xlist
    20792222    xptr_t      current_xp;      // extended pointer on current process
    2080     process_t * current_ptr;     // local pointer on current process
    2081     cxy_t       current_cxy;     // cluster for current process
     2223    bool_t      found;
    20822224
    20832225#if DEBUG_PROCESS_TXT
     
    20862228#endif
    20872229
    2088     // get pointers on process in owner cluster
     2230    // get pointers on target process
    20892231    process_cxy = GET_CXY( process_xp );
    20902232    process_ptr = GET_PTR( process_xp );
    20912233    process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
    20922234
    2093     // check owner cluster
    2094     assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
    2095     "process descriptor not in owner cluster" );
     2235// check owner cluster
     2236assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
     2237"process descriptor not in owner cluster" );
    20962238
    20972239    // get extended pointer on stdin pseudo file
     
    21032245    txt_ptr = GET_PTR( txt_xp );
    21042246
    2105     // get extended pointer on TXT_RX owner and TXT channel
     2247    // get relevant infos from chdev descriptor
    21062248    owner_xp = hal_remote_l64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) );
    2107     txt_id   = hal_remote_l32 ( XPTR( txt_cxy , &txt_ptr->channel ) );
    2108 
    2109     // transfer ownership only if process is the TXT owner
     2249    txt_id   = hal_remote_l32( XPTR( txt_cxy , &txt_ptr->channel ) );
     2250
     2251    // transfer ownership only if target process is the TXT owner
    21102252    if( (owner_xp == process_xp) && (txt_id > 0) ) 
    21112253    {
     
    21142256        lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock );
    21152257
    2116         // get lock
    2117         remote_busylock_acquire( lock_xp );
    2118 
    2119         if( process_get_ppid( process_xp ) != 1 )           // process is not KSH
     2258        if( process_get_ppid( process_xp ) != 1 )       // target process is not KSH
    21202259        {
     2260            // get lock
     2261            remote_busylock_acquire( lock_xp );
     2262
    21212263            // scan attached process list to find KSH process
    2122             XLIST_FOREACH( root_xp , iter_xp )
     2264            found = false;
     2265            for( iter_xp = hal_remote_l64( root_xp ) ;
     2266                 (iter_xp != root_xp) && (found == false) ;
     2267                 iter_xp = hal_remote_l64( iter_xp ) )
     2268            {
     2269                current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list );
     2270
     2271                if( process_get_ppid( current_xp ) == 1 )  // current is KSH
     2272                {
     2273                    // set owner field in TXT chdev
     2274                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
     2275
     2276#if DEBUG_PROCESS_TXT
     2277cycle = (uint32_t)hal_get_cycles();
     2278if( DEBUG_PROCESS_TXT < cycle )
     2279printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to KSH / cycle %d\n",
     2280__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
     2281#endif
     2282                    found = true;
     2283                }
     2284            }
     2285
     2286            // release lock
     2287            remote_busylock_release( lock_xp );
     2288
     2289// It must exist a KSH process for each user TXT channel
     2290assert( (found == true), "KSH process not found for TXT%d", txt_id );
     2291
     2292        }
     2293        else                                           // target process is KSH
     2294        {
     2295            // get lock
     2296            remote_busylock_acquire( lock_xp );
     2297
     2298            // scan attached process list to find another process
     2299            found = false;
     2300            for( iter_xp = hal_remote_l64( root_xp ) ;
     2301                 (iter_xp != root_xp) && (found == false) ;
     2302                 iter_xp = hal_remote_l64( iter_xp ) )
    21232303            {
    21242304                current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
    2125                 current_cxy = GET_CXY( current_xp );
    2126                 current_ptr = GET_PTR( current_xp );
    2127 
    2128                 if( process_get_ppid( current_xp ) == 1 )  // current is KSH
     2305
     2306                if( current_xp != process_xp )            // current is not KSH
    21292307                {
    2130                     // release lock
    2131                     remote_busylock_release( lock_xp );
    2132 
    21332308                    // set owner field in TXT chdev
    21342309                    hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
    21352310
    21362311#if DEBUG_PROCESS_TXT
    2137 cycle = (uint32_t)hal_get_cycles();
    2138 uint32_t ksh_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
     2312cycle  = (uint32_t)hal_get_cycles();
     2313cxy_t       current_cxy = GET_CXY( current_xp );
     2314process_t * current_ptr = GET_PTR( current_xp );
     2315uint32_t    new_pid     = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
    21392316if( DEBUG_PROCESS_TXT < cycle )
    2140 printk("\n[%s] thread[%x,%x] release TXT %d to KSH %x / cycle %d\n",
    2141 __FUNCTION__, this->process->pid, this->trdid, txt_id, ksh_pid, cycle );
    2142 process_txt_display( txt_id );
    2143 #endif
    2144                      return;
     2317printk("\n[%s] thread[%x,%x] transfered TXT%d ownership to process %x / cycle %d\n",
     2318__FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
     2319#endif
     2320                    found = true;
    21452321                }
    21462322            }
    2147  
     2323
    21482324            // release lock
    21492325            remote_busylock_release( lock_xp );
    21502326
    2151             // PANIC if KSH not found
    2152             assert( false , "KSH process not found for TXT %d" );
    2153 
    2154             return;
     2327            // no more owner for TXT if no other process found
     2328            if( found == false )
     2329            {
     2330                // set owner field in TXT chdev
     2331                hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
     2332
     2333#if DEBUG_PROCESS_TXT
     2334cycle = (uint32_t)hal_get_cycles();
     2335if( DEBUG_PROCESS_TXT < cycle )
     2336printk("\n[%s] thread[%x,%x] released TXT%d (no attached process) / cycle %d\n",
     2337__FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
     2338#endif
     2339            }
    21552340        }
    2156         else                                               // process is KSH
    2157         {
    2158             // scan attached process list to find another process
    2159             XLIST_FOREACH( root_xp , iter_xp )
    2160             {
    2161                 current_xp  = XLIST_ELEMENT( iter_xp , process_t , txt_list );
    2162                 current_cxy = GET_CXY( current_xp );
    2163                 current_ptr = GET_PTR( current_xp );
    2164 
    2165                 if( current_xp != process_xp )            // current is not KSH
    2166                 {
    2167                     // release lock
    2168                     remote_busylock_release( lock_xp );
    2169 
    2170                     // set owner field in TXT chdev
    2171                     hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp );
     2341    }
     2342    else
     2343    {
    21722344
    21732345#if DEBUG_PROCESS_TXT
    2174 cycle  = (uint32_t)hal_get_cycles();
    2175 uint32_t new_pid = hal_remote_l32( XPTR( current_cxy , &current_ptr->pid ) );
     2346cycle = (uint32_t)hal_get_cycles();
    21762347if( DEBUG_PROCESS_TXT < cycle )
    2177 printk("\n[%s] thread[%x,%x] release TXT %d to process %x / cycle %d\n",
    2178 __FUNCTION__,this->process->pid, this->trdid, txt_id, new_pid, cycle );
    2179 process_txt_display( txt_id );
    2180 #endif
    2181                      return;
    2182                 }
    2183             }
    2184 
    2185             // release lock
    2186             remote_busylock_release( lock_xp );
    2187 
    2188             // no more owner for TXT if no other process found
    2189             hal_remote_s64( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL );
    2190 
    2191 #if DEBUG_PROCESS_TXT
    2192 cycle = (uint32_t)hal_get_cycles();
    2193 if( DEBUG_PROCESS_TXT < cycle )
    2194 printk("\n[%s] thread[%x,%x] release TXT %d to nobody / cycle %d\n",
    2195 __FUNCTION__, this->process->pid, this->trdid, txt_id, cycle );
    2196 process_txt_display( txt_id );
    2197 #endif
    2198             return;
    2199         }
    2200     }
    2201     else
    2202     {
    2203 
    2204 #if DEBUG_PROCESS_TXT
    2205 cycle = (uint32_t)hal_get_cycles();
    2206 if( DEBUG_PROCESS_TXT < cycle )
    2207 printk("\n[%s] thread %x in process %d does nothing (not TXT owner) / cycle %d\n",
    2208 __FUNCTION__, this->trdid, process_pid, cycle );
    2209 process_txt_display( txt_id );
    2210 #endif
    2211 
    2212     }
     2348printk("\n[%s] thread[%x,%x] does nothing for process %x (not TXT owner) / cycle %d\n",
     2349__FUNCTION__, this->process->pid, this->trdid, process_pid, cycle );
     2350#endif
     2351
     2352    }
     2353
    22132354}  // end process_txt_transfer_ownership()
    22142355
  • trunk/kernel/kern/process.h

    r623 r625  
    228228
    229229/*********************************************************************************************
    230  * This function initializes a reference, user process descriptor from another process
     230 * This function initializes a reference user process descriptor from another process
    231231 * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments
    232232 * are previously allocated by the caller. This function can be called by two functions:
    233  * 1) process_init_create() : process is the INIT process; parent is process-zero.
     233 * 1) process_init_create() : process is the INIT process, and parent is process-zero.
    234234 * 2) process_make_fork() : the parent process descriptor is generally remote.
    235235 * The following fields are initialised :
    236236 * - It set the pid / ppid / ref_xp / parent_xp / state fields.
    237  * - It initializes the VMM (register the kentry, args, envs vsegs in VSL)
     237 * - It creates an empty GPT and an empty VSL.
     238 * - It initializes the locks protecting the GPT and the VSL.
     239 * - It registers the "kernel" vsegs in VSL, using the hal_vmm_kernel_update() function.
     240 * - It registers the "args" and "envs" vsegs in VSL, using the vmm_user_init() function.
     241 * - The "code and "data" must be registered later, using the elf_load_process() function.
    238242 * - It initializes the FDT, defining the three pseudo files STDIN / STDOUT / STDERR.
    239243 *   . if INIT process     => link to kernel TXT[0].
    240  *   . if KSH[i] process   => allocate a free TXT[i] and give TXT ownership.
    241  *   . if USER process     => same TXT[i] as parent process and give TXT ownership.
     244 *   . if KSH[i] process   => allocate a free TXT[i].
     245 *   . if USER process     => link to parent process TXT[i].
    242246 * - It set the root_xp, bin_xp, cwd_xp fields.
    243247 * - It reset the children list as empty, but does NOT register it in parent children list.
     
    251255 * @ pid          : [in] process identifier.
    252256 * @ parent_xp    : [in] extended pointer on parent process descriptor.
    253  ********************************************************************************************/
    254 void process_reference_init( process_t * process,
    255                              pid_t       pid,
    256                              xptr_t      parent_xp );
     257 * @ return 0 if success / return -1 if failure
     258 ********************************************************************************************/
     259error_t process_reference_init( process_t * process,
     260                                pid_t       pid,
     261                                xptr_t      parent_xp );
    257262
    258263/*********************************************************************************************
    259264 * This function initializes a copy process descriptor, in the local cluster,
    260265 * from information defined in the reference remote process descriptor.
     266 * As the VSL and the GPT of a process copy are handled as local caches, the GPT copy is
     267 * created empty, and the VSL copy contains only the "kernel", "args", and "envs" vsegs.
    261268 *********************************************************************************************
    262269 * @ process              : [in] local pointer on process descriptor to initialize.
    263270 * @ reference_process_xp : [in] extended pointer on reference process descriptor.
    264  * @ return 0 if success / return ENOMEM if failure
     271 * @ return 0 if success / return -1 if failure
    265272 ********************************************************************************************/
    266273error_t process_copy_init( process_t * local_process,
     
    272279 * The local th_tbl[] array must be empty.
    273280 *********************************************************************************************
    274  * @ process     : pointer on the process descriptor.
     281 * @ process     : [in] pointer on the process descriptor.
    275282 ********************************************************************************************/
    276283void process_destroy( process_t * process );
     
    283290 * taken by the caller function.
    284291 *********************************************************************************************
    285  * @ process_xp : extended pointer on process descriptor.
     292 * @ process_xp    : [in] extended pointer on process descriptor.
    286293 ********************************************************************************************/
    287294void process_display( xptr_t process_xp );
     
    396403/*********************************************************************************************
    397404 * This function implements the "fork" system call, and is called by the sys_fork() function,
    398  * likely throuch the RPC_PROCESS_MAKE_FORK.
    399  * It allocates memory and initializes a new "child" process descriptor, and the associated
    400  * "child" thread descriptor in local cluster. It involves up to three different clusters :
     405 * likely through the RPC_PROCESS_MAKE_FORK.
     406 * It allocates memory and initializes a new child process descriptor, and the associated
     407 * child thread descriptor in local cluster. It involves up to three different clusters:
    401408 * - the child (local) cluster can be any cluster selected by the sys_fork function.
    402409 * - the parent cluster must be the reference cluster for the parent process.
    403410 * - the client cluster containing the thread requesting the fork can be any cluster.
    404  * The new "child" process descriptor is initialised from informations found in the "parent"
     411 * The new child process descriptor is initialised from informations found in the parent
    405412 * reference process descriptor, containing the complete process description.
    406  * The new "child" thread descriptor is initialised from informations found in the "parent"
     413 * The new child thread descriptor is initialised from informations found in the parent
    407414 * thread descriptor.
    408415 *********************************************************************************************
     
    504511
    505512/*********************************************************************************************
    506  * This function atomically registers a new thread in the local process descriptor.
    507  * It checks that there is an available slot in the local th_tbl[] array, and allocates
    508  * a new LTID using the relevant lock depending on the kernel/user type.
    509  *********************************************************************************************
    510  * @ process  : pointer on the local process descriptor.
    511  * @ thread   : pointer on new thread to be registered.
     513 * This function atomically registers a new thread identified by the <thread> argument
     514 * in the th_tbl[] array of the local process descriptor identified by the <process>
     515 * argument. It checks that there is an available slot in the local th_tbl[] array,
     516 * and allocates a new LTID using the relevant lock depending on the kernel/user type,
     517 * and returns the global thread identifier in the <trdid> buffer.
     518 *********************************************************************************************
     519 * @ process  : [in]  pointer on the local process descriptor.
     520 * @ thread   : [in]  pointer on new thread to be registered.
    512521 * @ trdid    : [out] buffer for allocated trdid.
    513522 * @ returns 0 if success / returns non zero if no slot available.
     
    516525                                 struct thread_s * thread,
    517526                                 trdid_t         * trdid );
     527
     528/*********************************************************************************************
     529 * This function atomically removes a thread identified by the <thread> argument from
     530 * the local process descriptor th_tbl[] array, and returns the number of thread currently
     531 * registered in th_tbl[] array before this remove.
     532 *********************************************************************************************
     533 * @ thread   : pointer on thread to be removed.
     534 * @ returns number of threads registered in th_tbl before thread remove.
     535 ********************************************************************************************/
     536uint32_t process_remove_thread( struct thread_s * thread );
    518537
    519538
     
    556575
    557576/*********************************************************************************************
    558  * This function gives a process identified by the <process_xp> argument the exclusive
     577 * This function gives a process identified by the <process_xp> argument the
    559578 * ownership of its attached TXT_RX terminal (i.e. put the process in foreground).
    560  * It can be called by a thread running in any cluster, but the <process_xp> must be the
    561  * owner cluster process descriptor.
     579 * It can be called by a thread running in any cluster, but the target process descriptor
     580 * must be the process owner.
    562581 *********************************************************************************************
    563582 * @ owner_xp  : extended pointer on process descriptor in owner cluster.
     
    566585
    567586/*********************************************************************************************
    568  * When the process identified by the <owner_xp> argument has the exclusive ownership of
    569  * the TXT_RX terminal, this function transfer this ownership to another attached process.
    570  * The process descriptor must be the process owner.
    571  * This function does nothing if the process identified by the <process_xp> is not
    572  * the TXT owner.
     587 * When the target process identified by the <owner_xp> argument has the exclusive ownership
     588 * of the TXT_RX terminal, this function transfer this ownership to another process attached
     589 * to the same terminal. The target process descriptor must be the process owner.
     590 * This function does nothing if the target process is not the TXT owner.
    573591 * - If the current owner is not the KSH process, the new owner is the KSH process.
    574592 * - If the current owner is the KSH process, the new owner is another attached process.
  • trunk/kernel/kern/rpc.c

    r624 r625  
    2424#include <kernel_config.h>
    2525#include <hal_kernel_types.h>
     26#include <hal_vmm.h>
    2627#include <hal_atomic.h>
    2728#include <hal_remote.h>
     
    5253    &rpc_pmem_get_pages_server,            // 0
    5354    &rpc_pmem_release_pages_server,        // 1
    54     &rpc_undefined,                        // 2    unused slot
     55    &rpc_ppm_display_server,               // 2
    5556    &rpc_process_make_fork_server,         // 3
    5657    &rpc_user_dir_create_server,           // 4
     
    8182    &rpc_vmm_create_vseg_server,           // 27
    8283    &rpc_vmm_set_cow_server,               // 28
    83     &rpc_hal_vmm_display_server,               // 29
     84    &rpc_hal_vmm_display_server,           // 29
    8485};
    8586
     
    8889    "PMEM_GET_PAGES",            // 0
    8990    "PMEM_RELEASE_PAGES",        // 1
    90     "undefined",                 // 2
     91    "PPM_DISPLAY",               // 2
    9192    "PROCESS_MAKE_FORK",         // 3
    9293    "USER_DIR_CREATE",           // 4
     
    566567
    567568/////////////////////////////////////////////////////////////////////////////////////////
    568 // [2]      undefined slot
    569 /////////////////////////////////////////////////////////////////////////////////////////
     569// [2]            Marshaling functions attached to RPC_PPM_DISPLAY   
     570/////////////////////////////////////////////////////////////////////////////////////////
     571
     572/////////////////////////////////////////
     573void rpc_ppm_display_client( cxy_t  cxy )
     574{
     575#if DEBUG_RPC_PPM_DISPLAY
     576thread_t * this = CURRENT_THREAD;
     577uint32_t cycle = (uint32_t)hal_get_cycles();
     578if( cycle > DEBUG_RPC_PPM_DISPLAY )
     579printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     580__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     581#endif
     582
     583    uint32_t responses = 1;
     584
     585    // initialise RPC descriptor header
     586    rpc_desc_t  rpc;
     587    rpc.index    = RPC_PPM_DISPLAY;
     588    rpc.blocking = true;
     589    rpc.rsp      = &responses;
     590
     591    // register RPC request in remote RPC fifo
     592    rpc_send( cxy , &rpc );
     593
     594#if DEBUG_RPC_PPM_DISPLAY
     595cycle = (uint32_t)hal_get_cycles();
     596if( cycle > DEBUG_RPC_PPM_DISPLAY )
     597printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     598__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     599#endif
     600}
     601
     602////////////////////////////////////////////////////////////////////
     603void rpc_ppm_display_server( xptr_t __attribute__((__unused__)) xp )
     604{
     605#if DEBUG_RPC_PPM_DISPLAY
     606thread_t * this = CURRENT_THREAD;
     607uint32_t cycle = (uint32_t)hal_get_cycles();
     608if( cycle > DEBUG_RPC_PPM_DISPLAY )
     609printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     610__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     611#endif
     612
     613    // call local kernel function
     614    ppm_display();
     615
     616#if DEBUG_RPC_PPM_DISPLAY
     617cycle = (uint32_t)hal_get_cycles();
     618if( cycle > DEBUG_RPC_PPM_DISPLAY )
     619printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     620__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     621#endif
     622}
    570623
    571624/////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/kern/rpc.h

    r624 r625  
    6262    RPC_PMEM_GET_PAGES            = 0,
    6363    RPC_PMEM_RELEASE_PAGES        = 1,
    64     RPC_UNDEFINED_2               = 2,     
     64    RPC_PPM_DISPLAY               = 2,     
    6565    RPC_PROCESS_MAKE_FORK         = 3,
    6666    RPC_USER_DIR_CREATE           = 4,
     
    200200
    201201/***********************************************************************************
    202  * [2] undefined slot
    203  **********************************************************************************/
     202 * [2] The RPC_PPM_DISPLAY allows any client thread to require any remote cluster
     203 * identified by the <cxy> argumentto display the physical memory allocator state.
     204 **********************************************************************************/
     205void rpc_ppm_display_client( cxy_t  cxy );
     206
     207void rpc_ppm_display_server( xptr_t xp );
    204208
    205209/***********************************************************************************
  • trunk/kernel/kern/scheduler.c

    r624 r625  
    180180    sched = &core->scheduler;
    181181
    182     /////////////// scan user threads to handle both ACK and DELETE requests
     182    ////////////////// scan user threads to handle both ACK and DELETE requests
    183183    root = &sched->u_root;
    184184    iter = root->next;
     
    240240            busylock_release( &sched->lock );
    241241
    242 // check th_nr value
    243 assert( (process->th_nr > 0) , "process th_nr cannot be 0\n" );
    244 
    245             // remove thread from process th_tbl[]
    246             process->th_tbl[ltid] = NULL;
    247             count = hal_atomic_add( &process->th_nr , - 1 );
    248  
    249             // release memory allocated for thread descriptor
    250             thread_destroy( thread );
     242            // release memory allocated for thread
     243            count = thread_destroy( thread );
    251244
    252245            hal_fence();
     
    255248uint32_t cycle = (uint32_t)hal_get_cycles();
    256249if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    257 printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
    258 __FUNCTION__ , process->pid , thread->trdid , local_cxy , thread->core->lid , cycle );
     250printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / %d threads / cycle %d\n",
     251__FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, count, cycle );
    259252#endif
    260253            // destroy process descriptor if last thread
     
    274267    }  // end user threads
    275268
    276     ////// scan kernel threads for DELETE only
     269    ///////////// scan kernel threads for DELETE only
    277270    root = &sched->k_root;
    278271    iter = root->next;
     
    290283
    291284// check process descriptor is local kernel process
    292 assert( ( thread->process == &process_zero ) , "illegal process descriptor\n");
     285assert( ( thread->process == &process_zero ) , "illegal process descriptor");
    293286
    294287            // get thread ltid
     
    325318
    326319// check th_nr value
    327 assert( (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0\n" );
     320assert( (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0" );
    328321
    329322            // remove thread from process th_tbl[]
     
    477470}  // end sched_register_thread()
    478471
    479 //////////////////////////////////////
    480 void sched_yield( const char * cause )
     472//////////////////////////////////////////////////////////////////
     473void sched_yield( const char * cause __attribute__((__unused__)) )
    481474{
    482475    thread_t      * next;
     
    512505// check next thread kernel_stack overflow
    513506assert( (next->signature == THREAD_SIGNATURE),
    514 "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, lid );
     507"kernel stack overflow for thread %x on core[%x,%d]", next, local_cxy, lid );
    515508
    516509// check next thread attached to same core as the calling thread
    517510assert( (next->core == current->core),
    518 "next core %x != current core %x\n", next->core, current->core );
     511"next core %x != current core %x", next->core, current->core );
    519512
    520513// check next thread not blocked when type != IDLE
    521514assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) ,
    522 "next thread %x (%s) is blocked on core[%x,%d]\n",
     515"next thread %x (%s) is blocked on core[%x,%d]",
    523516next->trdid , thread_type_str(next->type) , local_cxy , lid );
    524517
     
    561554#if (DEBUG_SCHED_YIELD & 1)
    562555// if( sched->trace )
    563 if(uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
     556if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
    564557printk("\n[%s] core[%x,%d] / cause = %s\n"
    565558"      thread %x (%s) (%x,%x) continue / cycle %d\n",
     
    584577    list_entry_t * iter;
    585578    thread_t     * thread;
    586 
    587 // check lid
    588 assert( (lid < LOCAL_CLUSTER->cores_nr),
    589 "illegal core index %d\n", lid);
    590579
    591580    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
     
    644633{
    645634    thread_t     * thread;
    646 
    647 // check cxy
    648 assert( (cluster_is_undefined( cxy ) == false),
    649 "illegal cluster %x\n", cxy );
    650 
    651 assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ),
    652 "illegal core index %d\n", lid );
    653635
    654636    // get local pointer on target scheduler
  • trunk/kernel/kern/thread.c

    r624 r625  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017,2018)
     5 *         Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    2929#include <hal_special.h>
    3030#include <hal_remote.h>
     31#include <hal_vmm.h>
    3132#include <memcpy.h>
    3233#include <printk.h>
     
    9697
    9798/////////////////////////////////////////////////////////////////////////////////////
    98 // This static function releases the physical memory for a thread descriptor.
    99 // It is called by the three functions:
    100 // - thread_user_create()
    101 // - thread_user_fork()
    102 // - thread_kernel_create()
    103 /////////////////////////////////////////////////////////////////////////////////////
    104 // @ thread  : pointer on thread descriptor.
    105 /////////////////////////////////////////////////////////////////////////////////////
    106 static void thread_release( thread_t * thread )
    107 {
    108     kmem_req_t   req;
    109 
    110     xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
    111 
    112     req.type  = KMEM_PAGE;
    113     req.ptr   = GET_PTR( base_xp );
    114     kmem_free( &req );
    115 }
    116 
    117 /////////////////////////////////////////////////////////////////////////////////////
    11899// This static function initializes a thread descriptor (kernel or user).
    119100// It can be called by the four functions:
     
    122103// - thread_kernel_create()
    123104// - thread_idle_init()
     105// The "type" and "trdid" fields must have been previously set.
    124106// It updates the local DQDT.
    125107/////////////////////////////////////////////////////////////////////////////////////
    126 // @ thread       : pointer on local thread descriptor
    127 // @ process      : pointer on local process descriptor.
    128 // @ type         : thread type.
    129 // @ func         : pointer on thread entry function.
    130 // @ args         : pointer on thread entry function arguments.
    131 // @ core_lid     : target core local index.
    132 // @ u_stack_base : stack base (user thread only)
    133 // @ u_stack_size : stack base (user thread only)
     108// @ thread          : pointer on local thread descriptor
     109// @ process         : pointer on local process descriptor.
     110// @ type            : thread type.
     111// @ trdid           : thread identifier
     112// @ func            : pointer on thread entry function.
     113// @ args            : pointer on thread entry function arguments.
     114// @ core_lid        : target core local index.
     115// @ user_stack_vseg : local pointer on user stack vseg (user thread only)
    134116/////////////////////////////////////////////////////////////////////////////////////
    135117static error_t thread_init( thread_t      * thread,
    136118                            process_t     * process,
    137119                            thread_type_t   type,
     120                            trdid_t         trdid,
    138121                            void          * func,
    139122                            void          * args,
    140123                            lid_t           core_lid,
    141                             intptr_t        u_stack_base,
    142                             uint32_t        u_stack_size )
    143 {
    144     error_t        error;
    145     trdid_t        trdid;      // allocated thread identifier
    146 
    147         cluster_t    * local_cluster = LOCAL_CLUSTER;
     124                            vseg_t        * user_stack_vseg )
     125{
     126
     127// check type and trdid fields initialized
     128assert( (thread->type == type)   , "bad type argument" );
     129assert( (thread->trdid == trdid) , "bad trdid argument" );
    148130
    149131#if DEBUG_THREAD_INIT
     
    152134if( DEBUG_THREAD_INIT < cycle )
    153135printk("\n[%s] thread[%x,%x] enter for thread %x in process %x / cycle %d\n",
    154 __FUNCTION__, this->process->pid, this->trdid, thread, process->pid , cycle );
     136__FUNCTION__, this->process->pid, this->trdid, thread->trdid, process->pid , cycle );
    155137#endif
    156138
     
    159141
    160142        // Initialize new thread descriptor
    161         thread->type            = type;
    162143    thread->quantum         = 0;            // TODO
    163144    thread->ticks_nr        = 0;            // TODO
    164145    thread->time_last_check = 0;            // TODO
    165         thread->core            = &local_cluster->core_tbl[core_lid];
     146        thread->core            = &LOCAL_CLUSTER->core_tbl[core_lid];
    166147        thread->process         = process;
    167 
    168148    thread->busylocks       = 0;
    169149
     
    172152#endif
    173153
    174     thread->u_stack_base    = u_stack_base;
    175     thread->u_stack_size    = u_stack_size;
     154    thread->user_stack_vseg = user_stack_vseg;
    176155    thread->k_stack_base    = (intptr_t)thread + desc_size;
    177156    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
    178 
    179157    thread->entry_func      = func;         // thread entry point
    180158    thread->entry_args      = args;         // thread function arguments
     
    185163    thread->blocked         = THREAD_BLOCKED_GLOBAL;
    186164
    187     // register new thread in process descriptor, and get a TRDID
    188     error = process_register_thread( process, thread , &trdid );
    189 
    190     if( error )
    191     {
    192         printk("\n[ERROR] in %s : thread %x in process %x cannot get TRDID in cluster %x\n"
    193         "    for thread %s in process %x / cycle %d\n",
    194         __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
    195         local_cxy, thread_type_str(type), process->pid, (uint32_t)hal_get_cycles() );
    196         return EINVAL;
    197     }
    198 
    199     // initialize trdid
    200     thread->trdid           = trdid;
    201 
    202165    // initialize sched list
    203166    list_entry_init( &thread->sched_list );
     
    237200} // end thread_init()
    238201
    239 /////////////////////////////////////////////////////////
     202//////////////////////////////////////////////////
    240203error_t thread_user_create( pid_t             pid,
    241204                            void            * start_func,
     
    246209    error_t        error;
    247210        thread_t     * thread;       // pointer on created thread descriptor
     211    trdid_t        trdid;        // created thred identifier
    248212    process_t    * process;      // pointer to local process descriptor
    249213    lid_t          core_lid;     // selected core local index
    250     vseg_t       * vseg;         // stack vseg
     214    vseg_t       * us_vseg;      // user stack vseg
    251215
    252216assert( (attr != NULL) , "pthread attributes must be defined" );
     
    266230    {
    267231                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
    268                __FUNCTION__ , pid );
    269         return ENOMEM;
     232        __FUNCTION__ , pid );
     233        return -1;
    270234    }
    271235
     
    284248                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
    285249            __FUNCTION__ , core_lid );
    286             return EINVAL;
     250            return -1;
    287251        }
    288252    }
     
    298262#endif
    299263
    300     // allocate a stack from local VMM
    301     vseg = vmm_create_vseg( process,
    302                             VSEG_TYPE_STACK,
    303                             0,                 // size unused
    304                             0,                 // length unused
    305                             0,                 // file_offset unused
    306                             0,                 // file_size unused
    307                             XPTR_NULL,         // mapper_xp unused
    308                             local_cxy );
    309 
    310     if( vseg == NULL )
    311     {
    312             printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
    313                 return ENOMEM;
    314     }
    315 
    316 #if( DEBUG_THREAD_USER_CREATE & 1)
    317 if( DEBUG_THREAD_USER_CREATE < cycle )
    318 printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",
    319 __FUNCTION__, vseg->vpn_base, vseg->vpn_size );
    320 #endif
    321 
    322264    // allocate memory for thread descriptor
    323265    thread = thread_alloc();
     
    325267    if( thread == NULL )
    326268    {
    327             printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
    328         vmm_delete_vseg( process->pid , vseg->min );
    329         return ENOMEM;
     269            printk("\n[ERROR] in %s : cannot create new thread in cluster %x\n",
     270        __FUNCTION__, local_cxy );
     271        return -1;
    330272    }
    331273
     
    336278#endif
    337279
     280    // set type in thread descriptor
     281    thread->type = THREAD_USER;
     282
     283    // register new thread in process descriptor, and get a TRDID
     284    error = process_register_thread( process, thread , &trdid );
     285
     286    if( error )
     287    {
     288        printk("\n[ERROR] in %s : cannot register new thread in process %x\n",
     289        __FUNCTION__, pid );
     290        thread_destroy( thread );
     291        return -1;
     292    }
     293
     294    // set trdid in thread descriptor
     295    thread->trdid = trdid;
     296
     297#if( DEBUG_THREAD_USER_CREATE & 1)
     298if( DEBUG_THREAD_USER_CREATE < cycle )
     299printk("\n[%s] new thread %x registered in process %x\n",
     300__FUNCTION__, trdid, pid );
     301#endif
     302
     303    // allocate a stack from local VMM
     304    us_vseg = vmm_create_vseg( process,
     305                               VSEG_TYPE_STACK,
     306                               LTID_FROM_TRDID( trdid ),
     307                               0,                         // size unused
     308                               0,                         // file_offset unused
     309                               0,                         // file_size unused
     310                               XPTR_NULL,                 // mapper_xp unused
     311                               local_cxy );
     312
     313    if( us_vseg == NULL )
     314    {
     315            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
     316        process_remove_thread( thread );
     317        thread_destroy( thread );
     318                return -1;
     319    }
     320
     321#if( DEBUG_THREAD_USER_CREATE & 1)
     322if( DEBUG_THREAD_USER_CREATE < cycle )
     323printk("\n[%s] stack vseg created / vpn_base %x / %d pages\n",
     324__FUNCTION__, us_vseg->vpn_base, us_vseg->vpn_size );
     325#endif
     326
    338327    // initialize thread descriptor
    339328    error = thread_init( thread,
    340329                         process,
    341330                         THREAD_USER,
     331                         trdid,
    342332                         start_func,
    343333                         start_arg,
    344334                         core_lid,
    345                          vseg->min,
    346                          vseg->max - vseg->min );
     335                         us_vseg );
    347336    if( error )
    348337    {
    349338            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
    350         vmm_delete_vseg( process->pid , vseg->min );
    351         thread_release( thread );
    352         return EINVAL;
     339        vmm_remove_vseg( process , us_vseg );
     340        process_remove_thread( thread );
     341        thread_destroy( thread );
     342        return -1;
    353343    }
    354344
    355345#if( DEBUG_THREAD_USER_CREATE & 1)
    356346if( DEBUG_THREAD_USER_CREATE < cycle )
    357 printk("\n[%s] new thread descriptor initialised / trdid %x\n",
    358 __FUNCTION__, thread->trdid );
     347printk("\n[%s] new thread %x in process %x initialised\n",
     348__FUNCTION__, thread->trdid, process->pid );
    359349#endif
    360350
     
    369359    {
    370360            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
    371         vmm_delete_vseg( process->pid , vseg->min );
    372         thread_release( thread );
    373         return ENOMEM;
     361        vmm_remove_vseg( process , us_vseg );
     362        process_remove_thread( thread );
     363        thread_destroy( thread );
     364        return -1;
    374365    }
    375366    hal_cpu_context_init( thread );
     
    379370    {
    380371            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
    381         vmm_delete_vseg( process->pid , vseg->min );
    382         thread_release( thread );
    383         return ENOMEM;
     372        vmm_remove_vseg( process , us_vseg );
     373        process_remove_thread( thread );
     374        thread_destroy( thread );
     375        return -1;
    384376    }
    385377    hal_fpu_context_init( thread );
     
    410402{
    411403    error_t        error;
    412         thread_t     * child_ptr;        // local pointer on local child thread
     404        thread_t     * child_ptr;        // local pointer on child thread
     405    trdid_t        child_trdid;      // child thread identifier
    413406    lid_t          core_lid;         // selected core local index
    414 
    415407    thread_t     * parent_ptr;       // local pointer on remote parent thread
    416408    cxy_t          parent_cxy;       // parent thread cluster
    417409    process_t    * parent_process;   // local pointer on parent process
    418410    xptr_t         parent_gpt_xp;    // extended pointer on parent thread GPT
    419 
    420     void         * func;             // parent thread entry_func
    421     void         * args;             // parent thread entry_args
    422     intptr_t       base;             // parent thread u_stack_base
    423     uint32_t       size;             // parent thread u_stack_size
    424     uint32_t       flags;            // parent_thread flags
    425     vpn_t          vpn_base;         // parent thread stack vpn_base
    426     vpn_t          vpn_size;         // parent thread stack vpn_size
    427     reg_t        * uzone;            // parent thread pointer on uzone 
    428 
    429     vseg_t       * vseg;             // child thread STACK vseg
     411    void         * parent_func;      // parent thread entry_func
     412    void         * parent_args;      // parent thread entry_args
     413    uint32_t       parent_flags;     // parent_thread flags
     414    vseg_t       * parent_us_vseg;   // parent thread user stack vseg
     415    vseg_t       * child_us_vseg;    // child thread user stack vseg
    430416
    431417#if DEBUG_THREAD_USER_FORK
     
    433419thread_t * this  = CURRENT_THREAD;
    434420if( DEBUG_THREAD_USER_FORK < cycle )
    435 printk("\n[%s] thread[%x,%x] enter / child_process %x / cycle %d\n",
     421printk("\n[%s] thread[%x,%x] enter for child_process %x / cycle %d\n",
    436422__FUNCTION__, this->process->pid, this->trdid, child_process->pid, cycle );
    437423#endif
     
    439425    // select a target core in local cluster
    440426    core_lid = cluster_select_local_core();
     427
     428#if (DEBUG_THREAD_USER_FORK & 1)
     429if( DEBUG_THREAD_USER_FORK < cycle )
     430printk("\n[%s] thread[%x,%x] selected core [%x,%d]\n",
     431__FUNCTION__, this->process->pid, this->trdid, local_cxy, core_lid );
     432#endif
    441433
    442434    // get cluster and local pointer on parent thread descriptor
     
    444436    parent_ptr = GET_PTR( parent_thread_xp );
    445437
    446     // get relevant fields from parent thread
    447     func  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_func    ));
    448     args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
    449     base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
    450     size  = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
    451     flags =           hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags         ));
    452     uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
    453 
    454     vpn_base = base >> CONFIG_PPM_PAGE_SHIFT;
    455     vpn_size = size >> CONFIG_PPM_PAGE_SHIFT;
     438    // get relevant infos from parent thread
     439    parent_func    = (void *)  hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_func ));
     440    parent_args    = (void *)  hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->entry_args ));
     441    parent_flags   = (uint32_t)hal_remote_l32( XPTR(parent_cxy,&parent_ptr->flags ));
     442    parent_us_vseg = (vseg_t *)hal_remote_lpt( XPTR(parent_cxy,&parent_ptr->user_stack_vseg ));
    456443
    457444    // get pointer on parent process in parent thread cluster
     
    459446                                                        &parent_ptr->process ) );
    460447 
    461     // get extended pointer on parent GPT in parent thread cluster
     448    // build extended pointer on parent GPT in parent thread cluster
    462449    parent_gpt_xp = XPTR( parent_cxy , &parent_process->vmm.gpt );
     450
     451#if (DEBUG_THREAD_USER_FORK & 1)
     452if( DEBUG_THREAD_USER_FORK < cycle )
     453printk("\n[%s] thread[%x,%x] get parent GPT\n",
     454__FUNCTION__, this->process->pid, this->trdid );
     455#endif
    463456
    464457    // allocate memory for child thread descriptor
    465458    child_ptr = thread_alloc();
     459
    466460    if( child_ptr == NULL )
    467461    {
    468         printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
     462        printk("\n[ERROR] in %s : cannot allocate new thread\n",
     463        __FUNCTION__ );
    469464        return -1;
    470465    }
     466
     467#if (DEBUG_THREAD_USER_FORK & 1)
     468if( DEBUG_THREAD_USER_FORK < cycle )
     469printk("\n[%s] thread[%x,%x] allocated new thread descriptor %x\n",
     470__FUNCTION__, this->process->pid, this->trdid, child_ptr );
     471#endif
     472
     473    // set type in thread descriptor
     474    child_ptr->type = THREAD_USER;
     475
     476    // register new thread in process descriptor, and get a TRDID
     477    error = process_register_thread( child_process, child_ptr , &child_trdid );
     478
     479    if( error )
     480    {
     481        printk("\n[ERROR] in %s : cannot register new thread in process %x\n",
     482        __FUNCTION__, child_process->pid );
     483        thread_destroy( child_ptr );
     484        return -1;
     485    }
     486
     487    // set trdid in thread descriptor
     488    child_ptr->trdid = child_trdid;
     489
     490#if (DEBUG_THREAD_USER_FORK & 1)
     491if( DEBUG_THREAD_USER_FORK < cycle )
     492printk("\n[%s] thread[%x,%x] registered child thread %x in child process %x\n",
     493__FUNCTION__, this->process->pid, this->trdid, child_trdid, child_process->pid );
     494#endif
     495
     496    // get an user stack vseg from local VMM allocator
     497    child_us_vseg = vmm_create_vseg( child_process,
     498                                     VSEG_TYPE_STACK,
     499                                     LTID_FROM_TRDID( child_trdid ), 
     500                                     0,                               // size unused
     501                                     0,                               // file_offset unused
     502                                     0,                               // file_size unused
     503                                     XPTR_NULL,                       // mapper_xp unused
     504                                     local_cxy );
     505    if( child_us_vseg == NULL )
     506    {
     507            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
     508        process_remove_thread( child_ptr );
     509        thread_destroy( child_ptr );
     510        return -1;
     511    }
     512
     513#if (DEBUG_THREAD_USER_FORK & 1)
     514if( DEBUG_THREAD_USER_FORK < cycle )
     515printk("\n[%s] thread[%x,%x] created an user stack vseg / vpn_base %x / %d pages\n",
     516__FUNCTION__, this->process->pid, this->trdid,
     517child_us_vseg->vpn_base, child_us_vseg->vpn_size );
     518#endif
    471519
    472520    // initialize thread descriptor
     
    474522                         child_process,
    475523                         THREAD_USER,
    476                          func,
    477                          args,
     524                         child_trdid,
     525                         parent_func,
     526                         parent_args,
    478527                         core_lid,
    479                          base,
    480                          size );
     528                         child_us_vseg );
    481529    if( error )
    482530    {
    483531            printk("\n[ERROR] in %s : cannot initialize child thread\n", __FUNCTION__ );
    484         thread_release( child_ptr );
    485         return EINVAL;
     532        vmm_remove_vseg( child_process , child_us_vseg );
     533        process_remove_thread( child_ptr );
     534        thread_destroy( child_ptr );
     535        return -1;
    486536    }
    487537
     
    492542#endif
    493543
    494     // return child pointer
    495     *child_thread = child_ptr;
    496 
    497544    // set detached flag if required
    498     if( flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
    499 
    500     // update uzone pointer in child thread descriptor
    501     child_ptr->uzone_current = (char *)((intptr_t)uzone +
    502                                         (intptr_t)child_ptr -
    503                                         (intptr_t)parent_ptr );
    504  
    505 
    506     // allocate CPU context for child thread
     545    if( parent_flags & THREAD_FLAG_DETACHED ) child_ptr->flags = THREAD_FLAG_DETACHED;
     546
     547    // allocate a CPU context for child thread
    507548        if( hal_cpu_context_alloc( child_ptr ) )
    508549    {
    509550            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
    510         thread_release( child_ptr );
     551        vmm_remove_vseg( child_process , child_us_vseg );
     552        process_remove_thread( child_ptr );
     553        thread_destroy( child_ptr );
    511554        return -1;
    512555    }
    513556
    514     // allocate FPU context for child thread
     557    // allocate a FPU context for child thread
    515558        if( hal_fpu_context_alloc( child_ptr ) )
    516559    {
    517560            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
    518         thread_release( child_ptr );
     561        vmm_remove_vseg( child_process , child_us_vseg );
     562        process_remove_thread( child_ptr );
     563        thread_destroy( child_ptr );
    519564        return -1;
    520565    }
     
    526571#endif
    527572
    528    // create and initialize STACK vseg
    529     vseg = vseg_alloc();
    530     vseg_init( vseg,
    531                VSEG_TYPE_STACK,
    532                base,
    533                size,
    534                vpn_base,
    535                vpn_size,
    536                0, 0, XPTR_NULL,                         // not a file vseg
    537                local_cxy );
    538 
    539     // register STACK vseg in local child VSL
    540     vmm_attach_vseg_to_vsl( &child_process->vmm , vseg );
    541 
    542 #if (DEBUG_THREAD_USER_FORK & 1)
    543 if( DEBUG_THREAD_USER_FORK < cycle )
    544 printk("\n[%s] thread[%x,%x] created stack vseg for thread %x in process %x\n",
    545 __FUNCTION__, this->process->pid, this->trdid, child_ptr->trdid, child_process->pid );
    546 #endif
    547 
    548     // copy all valid STACK GPT entries   
    549     vpn_t          vpn;
    550     bool_t         mapped;
    551     ppn_t          ppn;
    552     for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
     573    // scan parent GPT, and copy all valid entries
     574    // associated to user stack vseg into child GPT
     575    vpn_t  parent_vpn;
     576    vpn_t  child_vpn;
     577    bool_t mapped;
     578    ppn_t  ppn;
     579    vpn_t  parent_vpn_base = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_base ) );
     580    vpn_t  parent_vpn_size = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_size ) );
     581    vpn_t  child_vpn_base  = child_us_vseg->vpn_base;
     582    for( parent_vpn = parent_vpn_base , child_vpn = child_vpn_base ;
     583         parent_vpn < (parent_vpn_base + parent_vpn_size) ;
     584         parent_vpn++ , child_vpn++ )
    553585    {
    554586        error = hal_gpt_pte_copy( &child_process->vmm.gpt,
     587                                  child_vpn,
    555588                                  parent_gpt_xp,
    556                                   vpn,
     589                                  parent_vpn,
    557590                                  true,                 // set cow
    558591                                  &ppn,
     
    560593        if( error )
    561594        {
    562             vmm_detach_vseg_from_vsl( &child_process->vmm , vseg );
    563             thread_release( child_ptr );
    564595            printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
     596            vmm_remove_vseg( child_process , child_us_vseg );
     597            process_remove_thread( child_ptr );
     598            thread_destroy( child_ptr );
    565599            return -1;
    566600        }
    567601
    568         // increment pending forks counter for the page if mapped
     602        // increment pending forks counter for a mapped page
    569603        if( mapped )
    570604        {
     
    574608            page_t * page_ptr = GET_PTR( page_xp );
    575609
    576             // get extended pointers on forks and lock fields
     610            // build extended pointers on forks and lock fields
    577611            xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
    578612            xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     
    586620            // release lock protecting page
    587621            remote_busylock_release( lock_xp ); 
     622        }
     623    }
    588624
    589625#if (DEBUG_THREAD_USER_FORK & 1)
    590 cycle = (uint32_t)hal_get_cycles();
    591626if( DEBUG_THREAD_USER_FORK < cycle )
    592 printk("\n[%s] thread[%x,%x] copied one PTE to child GPT : vpn %x / forks %d\n",
    593 __FUNCTION__, this->process->pid, this->trdid,
    594 vpn, hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) );
    595 #endif
    596 
    597         }
    598     }
    599 
    600     // set COW flag for all mapped entries of STAK vseg in parent thread GPT
     627printk("\n[%s] thread[%x,%x] copied all stack vseg PTEs to child GPT\n",
     628__FUNCTION__, this->process->pid, this->trdid );
     629#endif
     630
     631    // set COW flag for all mapped entries of user stack vseg in parent GPT
    601632    hal_gpt_set_cow( parent_gpt_xp,
    602                      vpn_base,
    603                      vpn_size );
    604  
     633                     parent_vpn_base,
     634                     parent_vpn_size );
     635
     636#if (DEBUG_THREAD_USER_FORK & 1)
     637if( DEBUG_THREAD_USER_FORK < cycle )
     638printk("\n[%s] thread[%x,%x] set the COW flag for stack vseg in parent GPT\n",
     639__FUNCTION__, this->process->pid, this->trdid );
     640#endif
     641
     642    // return child pointer
     643    *child_thread = child_ptr;
     644
    605645#if DEBUG_THREAD_USER_FORK
    606646cycle = (uint32_t)hal_get_cycles();
    607647if( DEBUG_THREAD_USER_FORK < cycle )
    608 printk("\n[%s] thread[%x,%x] exit / child_thread %x / cycle %d\n",
    609 __FUNCTION__, this->process->pid, this->trdid, child_ptr, cycle );
     648printk("\n[%s] thread[%x,%x] exit / created thread[%x,%x] / cycle %d\n",
     649__FUNCTION__, this->process->pid, this->trdid,
     650child_ptr->process->pid, child_ptr->trdid, cycle );
    610651#endif
    611652
     
    660701
    661702    // allocate an user stack vseg for main thread
    662     vseg_t * vseg = vmm_create_vseg( process,
    663                                      VSEG_TYPE_STACK,
    664                                      0,                 // size unused
    665                                      0,                 // length unused
    666                                      0,                 // file_offset unused
    667                                      0,                 // file_size unused
    668                                      XPTR_NULL,         // mapper_xp unused
    669                                      local_cxy );
    670     if( vseg == NULL )
     703    vseg_t * us_vseg = vmm_create_vseg( process,
     704                                        VSEG_TYPE_STACK,
     705                                        LTID_FROM_TRDID( thread->trdid ),
     706                                        0,                 // length unused
     707                                        0,                 // file_offset unused
     708                                        0,                 // file_size unused
     709                                        XPTR_NULL,         // mapper_xp unused
     710                                        local_cxy );
     711    if( us_vseg == NULL )
    671712    {
    672713            printk("\n[ERROR] in %s : cannot create stack vseg for main thread\n", __FUNCTION__ );
     
    675716
    676717    // update user stack in thread descriptor
    677     thread->u_stack_base = vseg->min;
    678     thread->u_stack_size = vseg->max - vseg->min;
     718    thread->user_stack_vseg = us_vseg;
    679719   
    680720    // release FPU ownership if required
     
    710750    error_t        error;
    711751        thread_t     * thread;       // pointer on new thread descriptor
     752    trdid_t        trdid;        // new thread identifier
    712753
    713754    thread_t * this = CURRENT_THREAD;
     
    737778    }
    738779
     780    // set type in thread descriptor
     781    thread->type = type;
     782
     783    // register new thread in local kernel process descriptor, and get a TRDID
     784    error = process_register_thread( &process_zero , thread , &trdid );
     785
     786    if( error )
     787    {
     788        printk("\n[ERROR] in %s : cannot register thread in kernel process\n", __FUNCTION__ );
     789        return -1;
     790    }
     791
     792    // set trdid in thread descriptor
     793    thread->trdid = trdid;
     794
    739795    // initialize thread descriptor
    740796    error = thread_init( thread,
    741797                         &process_zero,
    742798                         type,
     799                         trdid,
    743800                         func,
    744801                         args,
    745802                         core_lid,
    746                          0 , 0 );  // no user stack for a kernel thread
     803                         NULL );  // no user stack for a kernel thread
    747804
    748805    if( error ) // release allocated memory for thread descriptor
    749806    {
    750         printk("\n[ERROR] in %s : thread %x in process %x\n"
    751         "   cannot initialize thread descriptor\n",
    752         __FUNCTION__, this->trdid, this->process->pid );
    753         thread_release( thread );
     807        printk("\n[ERROR] in %s : cannot initialize thread descriptor\n", __FUNCTION__ );
     808        thread_destroy( thread );
    754809        return ENOMEM;
    755810    }
     
    763818        "    cannot create CPU context\n",
    764819        __FUNCTION__, this->trdid, this->process->pid );
    765         thread_release( thread );
     820        thread_destroy( thread );
    766821        return EINVAL;
    767822    }
     
    791846                           lid_t           core_lid )
    792847{
     848    trdid_t trdid;   
     849    error_t error;
    793850
    794851// check arguments
     
    796853assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
    797854
     855    // set type in thread descriptor
     856    thread->type = THREAD_IDLE;
     857
     858    // register idle thread in local kernel process descriptor, and get a TRDID
     859    error = process_register_thread( &process_zero , thread , &trdid );
     860
     861assert( (error == 0), "cannot register idle_thread in kernel process" );
     862
     863    // set trdid in thread descriptor
     864    thread->trdid = trdid;
     865
    798866    // initialize thread descriptor
    799     error_t  error = thread_init( thread,
    800                                   &process_zero,
    801                                   type,
    802                                   func,
    803                                   args,
    804                                   core_lid,
    805                                   0 , 0 );   // no user stack for a kernel thread
    806 
    807     assert( (error == 0), "cannot create thread idle" );
     867    error = thread_init( thread,
     868                         &process_zero,
     869                         THREAD_IDLE,
     870                         trdid,
     871                         func,
     872                         args,
     873                         core_lid,
     874                         NULL );   // no user stack for a kernel thread
     875
     876assert( (error == 0), "cannot initialize idle_thread" );
    808877
    809878    // allocate & initialize CPU context if success
    810879    error = hal_cpu_context_alloc( thread );
    811880
    812     assert( (error == 0), "cannot allocate CPU context" );
     881assert( (error == 0), "cannot allocate CPU context" );
    813882
    814883    hal_cpu_context_init( thread );
     
    816885}  // end thread_idle_init()
    817886
    818 ///////////////////////////////////////////////////////////////////////////////////////
    819 // TODO: check that all memory dynamically allocated during thread execution
    820 // has been released => check vmm destroy for MMAP vsegs  [AG]
    821 ///////////////////////////////////////////////////////////////////////////////////////
    822 void thread_destroy( thread_t * thread )
    823 {
    824     reg_t        save_sr;
    825 
    826     process_t  * process    = thread->process;
    827     core_t     * core       = thread->core;
     887////////////////////////////////////////////
     888uint32_t thread_destroy( thread_t * thread )
     889{
     890    reg_t           save_sr;
     891    uint32_t        count;
     892
     893    thread_type_t   type    = thread->type;
     894    process_t     * process = thread->process;
     895    core_t        * core    = thread->core;
    828896
    829897#if DEBUG_THREAD_DESTROY
     
    835903#endif
    836904
    837     // check busylocks counter
     905    // check calling thread busylocks counter
    838906    thread_assert_can_yield( thread , __FUNCTION__ );
    839907
    840     // update intrumentation values
     908    // update target process instrumentation counter
    841909        process->vmm.pgfault_nr += thread->info.pgfault_nr;
    842910
    843     // release memory allocated for CPU context and FPU context
     911    // remove thread from process th_tbl[]
     912    count = process_remove_thread( thread );
     913
     914    // release memory allocated for CPU context and FPU context if required
    844915        hal_cpu_context_destroy( thread );
    845         if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread );
     916        hal_fpu_context_destroy( thread );
    846917       
     918    // release user stack vseg (for an user thread only)
     919    if( type == THREAD_USER )  vmm_remove_vseg( process , thread->user_stack_vseg );
     920
    847921    // release FPU ownership if required
    848922        hal_disable_irq( &save_sr );
     
    857931        thread->signature = 0;
    858932
    859     // release memory for thread descriptor
    860     thread_release( thread );
     933    // release memory for thread descriptor (including kernel stack)
     934    kmem_req_t   req;
     935    xptr_t       base_xp = ppm_base2page( XPTR(local_cxy , thread ) );
     936
     937    req.type  = KMEM_PAGE;
     938    req.ptr   = GET_PTR( base_xp );
     939    kmem_free( &req );
    861940
    862941#if DEBUG_THREAD_DESTROY
     
    866945__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
    867946#endif
     947
     948    return count;
    868949
    869950}   // end thread_destroy()
     
    9931074    cxy_t       target_cxy;             // target thread cluster     
    9941075    thread_t  * target_ptr;             // pointer on target thread
     1076    process_t * target_process;         // pointer on arget process
     1077    pid_t       target_pid;             // target process identifier
    9951078    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
    9961079    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
     
    10061089    target_ptr      = GET_PTR( target_xp );
    10071090
    1008     // get target thread identifiers, and attached flag
     1091    // get target thread identifier, attached flag, and process PID
    10091092    target_trdid    = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) );
    10101093    target_ltid     = LTID_FROM_TRDID( target_trdid );
    10111094    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
    10121095    target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 );
     1096    target_process  = hal_remote_lpt( XPTR( target_cxy , &target_ptr->process ) );
     1097    target_pid      = hal_remote_l32( XPTR( target_cxy , &target_process->pid ) );
     1098
     1099// check target PID
     1100assert( (pid == target_pid),
     1101"unconsistent pid and target_xp arguments" );
    10131102
    10141103    // get killer thread pointers
     
    10271116// must be deleted by the parent process sys_wait() function
    10281117assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)),
    1029 "tharget thread cannot be the main thread\n" );
     1118"target thread cannot be the main thread" );
    10301119
    10311120    // check killer thread can yield
     
    11511240void thread_idle_func( void )
    11521241{
     1242
     1243#if DEBUG_THREAD_IDLE
     1244uint32_t cycle;
     1245#endif
     1246
    11531247    while( 1 )
    11541248    {
     
    11611255
    11621256#if DEBUG_THREAD_IDLE
    1163 {
    1164 uint32_t cycle = (uint32_t)hal_get_cycles();
     1257cycle = (uint32_t)hal_get_cycles();
    11651258if( DEBUG_THREAD_IDLE < cycle )
    11661259printk("\n[%s] idle thread on core[%x,%d] goes to sleep / cycle %d\n",
    11671260__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
    1168 }
    11691261#endif
    11701262
     
    11721264
    11731265#if DEBUG_THREAD_IDLE
    1174 {
    1175 uint32_t cycle = (uint32_t)hal_get_cycles();
     1266cycle = (uint32_t)hal_get_cycles();
    11761267if( DEBUG_THREAD_IDLE < cycle )
    11771268printk("\n[%s] idle thread on core[%x,%d] wake up / cycle %d\n",
    11781269__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
    1179 }
    11801270#endif
    11811271
     
    11831273
    11841274#if DEBUG_THREAD_IDLE
    1185 {
    1186 uint32_t cycle = (uint32_t)hal_get_cycles();
     1275cycle = (uint32_t)hal_get_cycles();
    11871276if( DEBUG_THREAD_IDLE < cycle )
    11881277sched_display( CURRENT_THREAD->core->lid );
    1189 }
    11901278#endif     
    11911279        // search a runable thread
  • trunk/kernel/kern/thread.h

    r619 r625  
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017,2018)
     5 *         Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    2929#include <shared_syscalls.h>
    3030#include <hal_special.h>
     31#include <hal_kentry.h>
    3132#include <xlist.h>
    3233#include <list.h>
     
    100101{
    101102        uint32_t              pgfault_nr;    /*! cumulated number of page fault           */
    102         uint32_t              sched_nr;      /*! TODO ???  [AG]                           */
    103         uint32_t              u_err_nr;      /*! TODO ???  [AG]                           */
    104         uint32_t              m_err_nr;      /*! TODO ???  [AG]                           */
    105103        cycle_t               last_cycle;    /*! last cycle counter value (date)          */
    106104        cycle_t               usr_cycles;    /*! user execution duration (cycles)         */
     
    121119 *
    122120 * WARNING (1) Don't modify the first 4 fields order, as this order is used by the
    123  *             hal_kentry assembly code for some architectures (TSAR).
     121 *             hal_kentry assembly code for the TSAR architectures.
    124122 *
    125123 * WARNING (2) Most of the thread state is private and accessed only by this thread,
     
    165163    uint32_t          * ack_rsp_count;   /*! pointer on acknowledge response counter  */
    166164
    167         intptr_t            u_stack_base;    /*! user stack base address                  */
    168         uint32_t            u_stack_size;    /*! user stack size (bytes)                  */
     165        vseg_t            * user_stack_vseg; /*! local pointer on user stack vseg         */
    169166
    170167    void              * entry_func;      /*! pointer on entry function                */
     
    248245
    249246/***************************************************************************************
    250  * This function is used by the sys_fork() system call to create the "child" thread
    251  * in the local cluster. It allocates memory for a thread descriptor, and initializes
    252  * it from the "parent" thread descriptor defined by the <parent_thread_xp> argument.
     247 * This function is used by the sys_fork() syscall to create the "child" main thread
     248 * in the local cluster. It is called, generally through the RPC_PROCESS_MAKE_FORK,
     249 * by the process_make_fork() function. It allocates memory from the local cluster
     250 * for a "child" thread descriptor, and initializes it from the "parent" thread
     251 * descriptor defined by the <parent_thread_xp> argument.
    253252 * The new thread is attached to the core that has the lowest load in local cluster.
    254253 * It is registered in the "child" process defined by the <child_process> argument.
     
    259258 * uses physical addressing on some architectures).
    260259 * The CPU and FPU execution contexts are created and linked to the new thread.
    261  * but the actual context copy is NOT done, and must be done by by the sys_fork().
     260 * but the actual context copy is NOT done, and is done by the sys_fork() function.
    262261 * The THREAD_BLOCKED_GLOBAL bit is set => the thread must be activated to start.
    263262 ***************************************************************************************
     
    273272/***************************************************************************************
    274273 * This function is called by the process_make_exec() function to re-initialise the
    275  * thread descriptor of the calling thread (that will become the new process main
    276  * thread), and immediately jump to user code without returning to kentry!!!
     274 * calling thread descriptor, that will become the new process main thread.
    277275 * It must be called by the main thread of the calling process.
     276 * - The calling thread TRDID is not modified.
     277 * - The kernel stack (currently in use) is not modified. 
    278278 * - A new user stack vseg is created and initialised.
    279  * - The kernel stack (currently in use) is not modified. 
    280279 * - The function calls the hal_cpu_context_exec() to re-initialize the CPU context
    281  *   an jump to user code. 
     280 *   and the uzone registered in kernel stack, an jump to user code. 
    282281 ***************************************************************************************
    283282 * @ entry_func : main thread entry point.
     
    329328
    330329/***************************************************************************************
    331  * This low-level function is called by the sched_handle_signals() function to releases
    332  * the physical memory allocated for a thread in a given cluster, when this thread
    333  * is marked for delete. This include the thread descriptor itself, the associated
    334  * CPU and FPU context, and the physical memory allocated for an user thread local stack.
     330 * This low-level function is called by the sched_handle_signals() function when a
     331 * thread is marked for delete. It removes the thread identified by the <thread>
     332 * argument from the process th_tbl[], and releases all physical memory allocated for
     333 * this. This includes the thread descriptor itself, the associated CPU and FPU context,
     334 * and the physical memory allocated for an user thread stack.
    335335 ***************************************************************************************
    336336 * @ thread  : pointer on the thread descriptor to release.
    337  * @ return true, if the thread was the last registerd thread in local process.
    338  **************************************************************************************/
    339 void thread_destroy( thread_t * thread );
     337 * @ return the number of threads registered in the process th_tbl[] before deletion.
     338 **************************************************************************************/
     339uint32_t thread_destroy( thread_t * thread );
    340340
    341341/***************************************************************************************
     
    383383 * This function is used by the four sys_thread_cancel(), sys_thread_exit(),
    384384 * sys_kill() and sys_exit() system calls to mark for delete a given thread.
    385  * It set the THREAD_BLOCKED_GLOBAL bit and set the the THREAD_FLAG_REQ_DELETE bit
    386  * in the thread descriptor identified by the <thread_xp> argument, to ask the scheduler
     385 * It set the THREAD_BLOCKED_GLOBAL bit and set the THREAD_FLAG_REQ_DELETE bit in the
     386 * thread descriptor identified by the <thread_xp> argument, to ask the scheduler
    387387 * to asynchronously delete the target thread, at the next scheduling point.
    388  * The calling thread can run in any cluster, as it uses remote accesses, but
    389  * the target thread cannot be the main thread of the process identified by the <pid>
    390  * argument, because the main thread must be deleted by the parent process argument.
     388 * The calling thread can run in any cluster, as it uses remote accesses.
     389 * This function makes a kernel panic if the target thread is the main thread,
     390 * because * the main thread deletion will cause the process deletion, and a process
     391 * must be deleted by the parent process, running the wait function.
    391392 * If the target thread is running in "attached" mode, and the <is_forced> argument
    392393 * is false, this function implements the required sychronisation with the joining
  • trunk/kernel/kernel_config.h

    r624 r625  
    2626#define _KERNEL_CONFIG_H_
    2727
    28 #define CONFIG_ALMOS_VERSION           "Version 1.1 / October 2018"
     28#define CONFIG_ALMOS_VERSION           "Version 2.0 / April 2019"
    2929
    3030////////////////////////////////////////////////////////////////////////////////////////////
     
    4040
    4141#define DEBUG_BUSYLOCK                    0
    42 #define DEBUG_BUSYLOCK_PID                0x10001    // thread pid (when detailed debug)
    43 #define DEBUG_BUSYLOCK_TRDID              0x10000    // thread trdid (when detailed debug)
     42#define DEBUG_BUSYLOCK_PID                0x10001    // for busylock detailed debug
     43#define DEBUG_BUSYLOCK_TRDID              0x10000    // for busylock detailed debug
    4444                 
    4545#define DEBUG_CHDEV_CMD_RX                0
     
    9292#define DEBUG_FATFS_UPDATE_DENTRY         0
    9393
     94#define DEBUG_HAL_CONTEXT                 0
    9495#define DEBUG_HAL_EXCEPTIONS              0
    9596#define DEBUG_HAL_GPT_SET_PTE             0
     
    164165
    165166#define DEBUG_SCHED_HANDLE_SIGNALS        2
    166 #define DEBUG_SCHED_YIELD                 0     
     167#define DEBUG_SCHED_YIELD                 0
    167168#define DEBUG_SCHED_RPC_ACTIVATE          0
    168169
     
    186187#define DEBUG_SYS_IS_FG                   0
    187188#define DEBUG_SYS_KILL                    0
    188 #define DEBUG_SYS_OPEN                    0
    189 #define DEBUG_SYS_OPENDIR                 0
    190189#define DEBUG_SYS_MKDIR                   0
    191190#define DEBUG_SYS_MMAP                    0
    192191#define DEBUG_SYS_MUNMAP                  0
    193192#define DEBUG_SYS_MUTEX                   0
     193#define DEBUG_SYS_OPEN                    0
     194#define DEBUG_SYS_OPENDIR                 0
    194195#define DEBUG_SYS_READ                    0
    195196#define DEBUG_SYS_READDIR                 0
     
    230231#define DEBUG_VFS_INODE_CREATE            0
    231232#define DEBUG_VFS_INODE_LOAD_ALL          0
     233#define DEBUG_VFS_KERNEL_MOVE             0
    232234#define DEBUG_VFS_LINK                    0
    233235#define DEBUG_VFS_LOOKUP                  0
    234236#define DEBUG_VFS_LSEEK                   0
    235237#define DEBUG_VFS_MKDIR                   0
    236 #define DEBUG_VFS_NEW_CHILD_INIT          0
     238#define DEBUG_VFS_NEW_DENTRY_INIT         0
    237239#define DEBUG_VFS_OPEN                    0
    238240#define DEBUG_VFS_OPENDIR                 0
    239241#define DEBUG_VFS_STAT                    0
     242#define DEBUG_VFS_USER_MOVE               0
    240243#define DEBUG_VFS_UNLINK                  0
    241244
     
    248251#define DEBUG_VMM_HANDLE_PAGE_FAULT       0
    249252#define DEBUG_VMM_HANDLE_COW              0
    250 #define DEBUG_VMM_INIT                    0
    251253#define DEBUG_VMM_MMAP_ALLOC              0
    252254#define DEBUG_VMM_PAGE_ALLOCATE           0
     255#define DEBUG_VMM_REMOVE_VSEG             0
    253256#define DEBUG_VMM_RESIZE_VSEG             0
    254257#define DEBUG_VMM_SET_COW                 0
    255258#define DEBUG_VMM_UPDATE_PTE              0
     259#define DEBUG_VMM_USER_INIT               0
     260#define DEBUG_VMM_USER_RESET              0
    256261
    257262#define DEBUG_XHTAB                       0
     
    421426#define CONFIG_VMM_ARGS_SIZE          0x000004     // args vseg size         : 16  Kbytes
    422427#define CONFIG_VMM_ENVS_SIZE          0x000008     // envs vseg size         : 32  Kbytes
    423 #define CONFIG_VMM_STACK_SIZE         0x000100     // single stack vseg size : 1   Mbytes
     428#define CONFIG_VMM_STACK_SIZE         0x001000     // single stack vseg size : 16  Mbytes
    424429
    425430////////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/libk/elf.c

    r603 r625  
    196196
    197197#if DEBUG_ELF_LOAD
    198 uint32_t cycle = (uint32_t)hal_get_cycles();
    199 if( DEBUG_ELF_LOAD < cycle )
    200 printk("\n[%s] found %s vseg / base %x / size %x\n"
     198uint32_t   cycle = (uint32_t)hal_get_cycles();
     199thread_t * this  = CURRENT_THREAD;
     200if( DEBUG_ELF_LOAD < cycle )
     201printk("\n[%s] thread[%x,%x] found %s vseg / base %x / size %x\n"
    201202"  file_size %x / file_offset %x / mapper_xp %l / cycle %d\n",
    202 __FUNCTION__ , vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min ,
     203__FUNCTION__ , this->process_pid, this->trdid,
     204vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min ,
    203205vseg->file_size , vseg->file_offset , vseg->mapper_xp );
    204206#endif
  • trunk/kernel/libk/elf.h

    r457 r625  
    228228
    229229/****************************************************************************************
    230  * This function registers in VMM of the process identified by the <process> argument
    231  * the CODE and DATA vsegs defined in the .elf open file descriptor <file_xp>.
     230 * This function registers in the VSL of the process identified by the <process>
     231 * argument the CODE and DATA vsegs defined in the .elf file descriptor <file_xp>.
    232232 * The segments are not loaded in memory.
    233233 * It also registers the process entry point in VMM.
  • trunk/kernel/libk/remote_rwlock.c

    r623 r625  
    251251        thread_t *  thread_ptr = GET_PTR( thread_xp );
    252252
     253printk("\n@@@ in %s : release first waiting writer[%x,%x]\n",
     254__FUNCTION__, thread_cxy, thread_ptr );
     255
    253256        // remove this waiting thread from waiting list
    254257        xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
  • trunk/kernel/mm/mapper.c

    r624 r625  
    153153
    154154#if DEBUG_MAPPER_GET_PAGE
    155 uint32_t cycle = (uint32_t)hal_get_cycles();
     155vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
     156uint32_t      cycle = (uint32_t)hal_get_cycles();
    156157char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    157 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
    158 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
    159 if( DEBUG_MAPPER_GET_PAGE < cycle )
    160 printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
    161 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
     158if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )  // FAT mapper
     159{
     160    printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
     161    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
     162}
     163if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )  // file mapper
     164{
     165    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
     166    printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n",
     167    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
     168}
    162169#endif
    163170
     
    235242#if DEBUG_MAPPER_GET_PAGE
    236243cycle = (uint32_t)hal_get_cycles();
    237 if( DEBUG_MAPPER_GET_PAGE < cycle )
    238 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
    239 __FUNCTION__, this->process->pid, this->trdid,
    240 page_id, name, ppm_page2ppn( page_xp ), cycle );
     244if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
     245{
     246    printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n",
     247    __FUNCTION__, this->process->pid, this->trdid, page_id,
     248    name, ppm_page2ppn(page_xp), cycle );
     249}
     250if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
     251{
     252    printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x / cycle %d\n",
     253    __FUNCTION__, this->process->pid, this->trdid, page_id,
     254    ppm_page2ppn(page_xp), cycle );
     255}
    241256#endif
    242257
     
    257272
    258273#if DEBUG_MAPPER_HANDLE_MISS
    259 uint32_t cycle = (uint32_t)hal_get_cycles();
     274uint32_t      cycle = (uint32_t)hal_get_cycles();
    260275char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    261276vfs_inode_t * inode = mapper->inode;
    262 vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
    263 if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    264 printk("\n[%s] enter for page %d in <%s> / cycle %d",
    265 __FUNCTION__, page_id, name, cycle );
    266 if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    267 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
     277if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
     278{
     279    vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
     280    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cycle %d",
     281    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
     282   if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );
     283}
     284if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
     285{
     286    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cycle %d",
     287    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
     288   if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" );
     289}
    268290#endif
    269291
     
    321343#if DEBUG_MAPPER_HANDLE_MISS
    322344cycle = (uint32_t)hal_get_cycles();
    323 if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    324 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
    325 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
    326 if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    327 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
     345if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
     346{
     347    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d",
     348    __FUNCTION__, this->process->pid, this->trdid,
     349    page_id, name, ppm_page2ppn( *page_xp ), cycle );
     350    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name );
     351}
     352if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
     353{
     354    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d",
     355    __FUNCTION__, this->process->pid, this->trdid,
     356    page_id, ppm_page2ppn( *page_xp ), cycle );
     357    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" );
     358}
    328359#endif
    329360
     
    482513
    483514#if DEBUG_MAPPER_MOVE_KERNEL
    484 uint32_t   cycle = (uint32_t)hal_get_cycles();
    485 thread_t * this  = CURRENT_THREAD;
     515char          name[CONFIG_VFS_MAX_NAME_LENGTH];
     516uint32_t      cycle  = (uint32_t)hal_get_cycles();
     517thread_t    * this   = CURRENT_THREAD;
     518mapper_t    * mapper = GET_PTR( mapper_xp );
     519vfs_inode_t * inode  = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) );
     520vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
    486521if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    487 printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    488 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
     522printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n",
     523__FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle );
    489524#endif
    490525
     
    496531    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
    497532    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
    498 
    499 #if (DEBUG_MAPPER_MOVE_KERNEL & 1)
    500 if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    501 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
    502 #endif
    503533
    504534    // compute source and destination clusters
     
    528558        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
    529559        else                         page_count = CONFIG_PPM_PAGE_SIZE;
    530 
    531 #if (DEBUG_MAPPER_MOVE_KERNEL & 1)
    532 if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    533 printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
    534 __FUNCTION__ , page_id , page_offset , page_count );
    535 #endif
    536560
    537561        // get extended pointer on page descriptor
     
    560584#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
    561585if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    562 printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n",
    563 __FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr );
     586{
     587    if( to_buffer )
     588    printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n",
     589    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_count );
     590    else
     591    printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n",
     592    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_count );
     593}
    564594#endif
    565595
     
    571601
    572602#if DEBUG_MAPPER_MOVE_KERNEL
    573 cycle = (uint32_t)hal_get_cycles();
     603cycle  = (uint32_t)hal_get_cycles();
    574604if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    575 printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    576 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
     605printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
     606__FUNCTION__, this->process->pid, this->trdid, cycle );
    577607#endif
    578608
     
    662692
    663693    // get pointer on radix tree
    664     rt        = &mapper->rt;
     694    rt = &mapper->rt;
    665695
    666696    // initialise loop variable
     
    675705        if( page == NULL ) break;
    676706
    677 assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" );
    678 assert( (page->order == 0),          __FUNCTION__, "mapper page order must be 0" );
     707assert( (page->index == found_key ), "wrong page descriptor index" );
     708assert( (page->order == 0),          "mapper page order must be 0" );
    679709
    680710        // build extended pointer on page descriptor
     
    730760    char          buffer[4096];   // local buffer
    731761    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
    732     char        * tabc;           // pointer on char to scan buffer
    733762    uint32_t      line;           // line index
    734763    uint32_t      word;           // word index
    735     uint32_t      n;              // char index
    736764    cxy_t         mapper_cxy;     // mapper cluster identifier
    737765    mapper_t    * mapper_ptr;     // mapper local pointer
     
    776804    // display 8 words per line
    777805    tabi = (uint32_t *)buffer;
    778     tabc = (char *)buffer;
    779806    printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id );
    780807    for( line = 0 ; line < (nbytes >> 5) ; line++ )
    781808    {
    782         printk("%X : ", line );
     809        printk("%X : ", line << 5 );
    783810        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
    784         printk(" | ");
    785         for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] );
    786811        printk("\n");
    787812    }
  • trunk/kernel/mm/mapper.h

    r623 r625  
    123123
    124124/*******************************************************************************************
    125  * This function move data between a remote mapper, dentified by the <mapper_xp> argument,
     125 * This function move data between a remote mapper, identified by the <mapper_xp> argument,
    126126 * and a distributed user buffer. It can be called by a thread running in any cluster.
    127127 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls.
     
    148148
    149149/********************************************************************************************
    150  * This function move data between a remote mapper and a remote kernel buffer.
    151  * It can be called by a thread running any cluster.
     150 * This function move data between a remote mapper, identified by the <mapper_xp> argument,
     151 * and a localised remote kernel buffer. It can be called by a thread running any cluster.
    152152 * If required, the data transfer is split in "fragments", where one fragment contains
    153153 * contiguous bytes in the same mapper page.
     
    215215/*******************************************************************************************
    216216 * This function allows to write a single word to a mapper seen as and array of uint32_t.
    217  * It has bee designed to support remote access tho the FAT mapper of the FATFS.
     217 * It has been designed to support remote access to the FAT mapper of the FATFS.
    218218 * It can be called by any thread running in any cluster.
    219219 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing
  • trunk/kernel/mm/page.h

    r623 r625  
    5050 *   test/modify the forks counter or the page flags.
    5151 * - The list entry is used to register the page in a free list or in dirty list.
    52  * NOTE: Size is 48 bytes for a 32 bits core.
    53  * TODO : the refcount use is not defined [AG]
     52 *   The refcount is used for page release to KMEM.
     53 * NOTE: the size is 48 bytes for a 32 bits core.
    5454 ************************************************************************************/
    5555
     
    6161    uint32_t          index;          /*! page index in mapper                 (4)  */
    6262        list_entry_t      list;           /*! for both dirty pages and free pages  (8)  */
    63         uint32_t          refcount;       /*! reference counter TODO ??? [AG]      (4)  */
     63        int32_t           refcount;       /*! references counter for page release  (4)  */
    6464        uint32_t          forks;          /*! number of pending forks              (4)  */
    6565        remote_busylock_t lock;           /*! protect forks or flags modifs        (16) */
  • trunk/kernel/mm/ppm.c

    r611 r625  
    349349}  // end ppm_free_pages()
    350350
    351 ///////////////////////////////
    352 void ppm_print( char * string )
     351////////////////////////
     352void ppm_display( void )
    353353{
    354354        uint32_t       order;
     
    361361        busylock_acquire( &ppm->free_lock );
    362362
    363         printk("\n***  PPM in cluster %x / %s / %d pages ***\n",
    364     local_cxy , string, ppm->pages_nr );
     363        printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr );
    365364
    366365        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
  • trunk/kernel/mm/ppm.h

    r623 r625  
    176176 * string   : character string printed in header
    177177 ****************************************************************************************/
    178 void ppm_print( char * string );
     178void ppm_display( void );
    179179
    180180/*****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r624 r625  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    5555extern  process_t  process_zero;      // allocated in cluster.c
    5656
    57 ///////////////////////////////////////
    58 error_t vmm_init( process_t * process )
     57////////////////////////////////////////////////////////////////////////////////////////////
     58// This static function is called by the vmm_create_vseg() function, and implements
     59// the VMM STACK specific allocator.
     60////////////////////////////////////////////////////////////////////////////////////////////
     61// @ vmm      : [in]  pointer on VMM.
     62// @ ltid     : [in]  requested slot == local user thread identifier.
     63// @ vpn_base : [out] first allocated page
     64// @ vpn_size : [out] number of allocated pages
     65////////////////////////////////////////////////////////////////////////////////////////////
     66static void vmm_stack_alloc( vmm_t  * vmm,
     67                             ltid_t   ltid,
     68                             vpn_t  * vpn_base,
     69                             vpn_t  * vpn_size )
    5970{
    60     error_t   error;
     71
     72// check ltid argument
     73assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
     74"slot index %d too large for an user stack vseg", ltid );
     75
     76    // get stack allocator pointer
     77    stack_mgr_t * mgr = &vmm->stack_mgr;
     78
     79    // get lock on stack allocator
     80    busylock_acquire( &mgr->lock );
     81
     82// check requested slot is available
     83assert( (bitmap_state( &mgr->bitmap , ltid ) == false),
     84"slot index %d already allocated", ltid );
     85
     86    // update bitmap
     87    bitmap_set( &mgr->bitmap , ltid );
     88
     89    // release lock on stack allocator
     90    busylock_release( &mgr->lock );
     91
     92    // returns vpn_base, vpn_size (first page non allocated)
     93    *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1;
     94    *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
     95
     96} // end vmm_stack_alloc()
     97
     98////////////////////////////////////////////////////////////////////////////////////////////
     99// This static function is called by the vmm_remove_vseg() function, and implements
     100// the VMM STACK specific desallocator.
     101////////////////////////////////////////////////////////////////////////////////////////////
     102// @ vmm      : [in] pointer on VMM.
     103// @ vseg     : [in] pointer on released vseg.
     104////////////////////////////////////////////////////////////////////////////////////////////
     105static void vmm_stack_free( vmm_t  * vmm,
     106                            vseg_t * vseg )
     107{
     108    // get stack allocator pointer
     109    stack_mgr_t * mgr = &vmm->stack_mgr;
     110
     111    // compute slot index
     112    uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE;
     113
     114// check index
     115assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
     116"slot index %d too large for an user stack vseg", index );
     117
     118// check released slot is allocated
     119assert( (bitmap_state( &mgr->bitmap , index ) == true),
     120"released slot index %d non allocated", index );
     121
     122    // get lock on stack allocator
     123    busylock_acquire( &mgr->lock );
     124
     125    // update stacks_bitmap
     126    bitmap_clear( &mgr->bitmap , index );
     127
     128    // release lock on stack allocator
     129    busylock_release( &mgr->lock );
     130
     131}  // end vmm_stack_free()
     132
     133////////////////////////////////////////////////////////////////////////////////////////////
     134// This static function is called by the vmm_create_vseg() function, and implements
     135// the VMM MMAP specific allocator.
     136////////////////////////////////////////////////////////////////////////////////////////////
     137// @ vmm      : [in] pointer on VMM.
     138// @ npages   : [in] requested number of pages.
     139// @ vpn_base : [out] first allocated page.
     140// @ vpn_size : [out] actual number of allocated pages.
     141////////////////////////////////////////////////////////////////////////////////////////////
     142static error_t vmm_mmap_alloc( vmm_t * vmm,
     143                               vpn_t   npages,
     144                               vpn_t * vpn_base,
     145                               vpn_t * vpn_size )
     146{
     147    uint32_t   order;
     148    xptr_t     vseg_xp;
     149    vseg_t   * vseg;
     150    vpn_t      base;
     151    vpn_t      size;
     152    vpn_t      free;
     153
     154#if DEBUG_VMM_MMAP_ALLOC
     155thread_t * this = CURRENT_THREAD;
     156uint32_t cycle = (uint32_t)hal_get_cycles();
     157if( DEBUG_VMM_MMAP_ALLOC < cycle )
     158printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
     159__FUNCTION__, this->process->pid, this->trdid, cycle );
     160#endif
     161
     162    // number of allocated pages must be power of 2
     163    // compute actual size and order
     164    size  = POW2_ROUNDUP( npages );
     165    order = bits_log2( size );
     166
     167    // get mmap allocator pointer
     168    mmap_mgr_t * mgr = &vmm->mmap_mgr;
     169
     170    // build extended pointer on root of zombi_list[order]
     171    xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] );
     172
     173    // take lock protecting zombi_lists
     174    busylock_acquire( &mgr->lock );
     175
     176    // get vseg from zombi_list or from mmap zone
     177    if( xlist_is_empty( root_xp ) )                   // from mmap zone
     178    {
     179        // check overflow
     180        free = mgr->first_free_vpn;
     181        if( (free + size) > mgr->vpn_size ) return -1;
     182
     183        // update MMAP allocator
     184        mgr->first_free_vpn += size;
     185
     186        // compute base
     187        base = free;
     188    }
     189    else                                              // from zombi_list
     190    {
     191        // get pointer on zombi vseg from zombi_list
     192        vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
     193        vseg    = GET_PTR( vseg_xp );
     194
     195        // remove vseg from free-list
     196        xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     197
     198        // compute base
     199        base = vseg->vpn_base;
     200    }
     201
     202    // release lock
     203    busylock_release( &mgr->lock );
     204
     205#if DEBUG_VMM_MMAP_ALLOC
     206cycle = (uint32_t)hal_get_cycles();
     207if( DEBUG_VMM_DESTROY < cycle )
     208printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
     209__FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
     210#endif
     211
     212    // returns vpn_base, vpn_size
     213    *vpn_base = base;
     214    *vpn_size = size;
     215    return 0;
     216
     217}  // end vmm_mmap_alloc()
     218
     219////////////////////////////////////////////////////////////////////////////////////////////
     220// This static function is called by the vmm_remove_vseg() function, and implements
     221// the VMM MMAP specific desallocator.
     222////////////////////////////////////////////////////////////////////////////////////////////
     223// @ vmm      : [in] pointer on VMM.
     224// @ vseg     : [in] pointer on released vseg.
     225////////////////////////////////////////////////////////////////////////////////////////////
     226static void vmm_mmap_free( vmm_t  * vmm,
     227                           vseg_t * vseg )
     228{
     229    // get pointer on mmap allocator
     230    mmap_mgr_t * mgr = &vmm->mmap_mgr;
     231
     232    // compute zombi_list order
     233    uint32_t order = bits_log2( vseg->vpn_size );
     234
     235    // take lock protecting zombi lists
     236    busylock_acquire( &mgr->lock );
     237
     238    // update relevant zombi_list
     239    xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ),
     240                     XPTR( local_cxy , &vseg->xlist ) );
     241
     242    // release lock
     243    busylock_release( &mgr->lock );
     244
     245}  // end of vmm_mmap_free()
     246
     247////////////////////////////////////////////////////////////////////////////////////////////
     248// This static function registers one vseg in the VSL of a local process descriptor.
     249////////////////////////////////////////////////////////////////////////////////////////////
     250// vmm       : [in] pointer on VMM.
     251// vseg      : [in] pointer on vseg.
     252////////////////////////////////////////////////////////////////////////////////////////////
     253void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
     254                             vseg_t * vseg )
     255{
     256    // update vseg descriptor
     257    vseg->vmm = vmm;
     258
     259    // increment vsegs number
     260    vmm->vsegs_nr++;
     261
     262    // add vseg in vmm list
     263    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
     264                    XPTR( local_cxy , &vseg->xlist ) );
     265
     266}  // end vmm_attach_vseg_from_vsl()
     267
     268////////////////////////////////////////////////////////////////////////////////////////////
     269// This static function removes one vseg from the VSL of a local process descriptor.
     270////////////////////////////////////////////////////////////////////////////////////////////
     271// vmm       : [in] pointer on VMM.
     272// vseg      : [in] pointer on vseg.
     273////////////////////////////////////////////////////////////////////////////////////////////
     274void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
     275                               vseg_t * vseg )
     276{
     277    // update vseg descriptor
     278    vseg->vmm = NULL;
     279
     280    // decrement vsegs number
     281    vmm->vsegs_nr--;
     282
     283    // remove vseg from VSL
     284    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     285
     286}  // end vmm_detach_from_vsl()
     287
     288
     289
     290
     291////////////////////////////////////////////
     292error_t vmm_user_init( process_t * process )
     293{
    61294    vseg_t  * vseg_args;
    62295    vseg_t  * vseg_envs;
     
    65298    uint32_t  i;
    66299
    67 #if DEBUG_VMM_INIT
     300#if DEBUG_VMM_USER_INIT
    68301thread_t * this = CURRENT_THREAD;
    69302uint32_t cycle = (uint32_t)hal_get_cycles();
    70 if( DEBUG_VMM_INIT )
     303if( DEBUG_VMM_USER_INIT )
    71304printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
    72305__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     
    76309    vmm_t   * vmm = &process->vmm;
    77310
    78     // initialize VSL (empty)
    79     vmm->vsegs_nr = 0;
    80         xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
    81         remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL );
    82 
     311// check UTILS zone
    83312assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <=
    84313         (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) ,
    85314         "UTILS zone too small\n" );
    86315
     316// check STACK zone
    87317assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
    88318(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    89319"STACK zone too small\n");
    90320
    91     // register args vseg in VSL
     321    // register "args" vseg in VSL
    92322    base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
    93323    size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
     
    101331                                 XPTR_NULL,     // mapper_xp unused
    102332                                 local_cxy );
    103 
    104333    if( vseg_args == NULL )
    105334    {
     
    110339    vmm->args_vpn_base = base;
    111340
    112     // register the envs vseg in VSL
     341    // register "envs" vseg in VSL
    113342    base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
    114343    size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
     
    122351                                 XPTR_NULL,     // mapper_xp unused
    123352                                 local_cxy );
    124 
    125353    if( vseg_envs == NULL )
    126354    {
     
    130358
    131359    vmm->envs_vpn_base = base;
    132 
    133     // create GPT (empty)
    134     error = hal_gpt_create( &vmm->gpt );
    135 
    136     if( error )
    137     {
    138         printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
    139         return -1;
    140     }
    141 
    142     // initialize GPT lock
    143     remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    144 
    145     // update process VMM with kernel vsegs as required by the hardware architecture
    146     error = hal_vmm_kernel_update( process );
    147 
    148     if( error )
    149     {
    150         printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );
    151         return -1;
    152     }
    153360
    154361    // initialize STACK allocator
     
    162369    vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    163370    busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
    164     for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] );
     371    for( i = 0 ; i < 32 ; i++ )
     372    {
     373        xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) );
     374    }
    165375
    166376    // initialize instrumentation counters
     
    169379    hal_fence();
    170380
    171 #if DEBUG_VMM_INIT
     381#if DEBUG_VMM_USER_INIT
    172382cycle = (uint32_t)hal_get_cycles();
    173 if( DEBUG_VMM_INIT )
     383if( DEBUG_VMM_USER_INIT )
    174384printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
    175385__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     
    178388    return 0;
    179389
    180 }  // end vmm_init()
    181 
     390}  // end vmm_user_init()
    182391
    183392//////////////////////////////////////////
    184 void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
    185                              vseg_t * vseg )
     393void vmm_user_reset( process_t * process )
    186394{
    187     // build extended pointer on rwlock protecting VSL
    188     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    189 
    190     // get rwlock in write mode
    191     remote_rwlock_wr_acquire( lock_xp );
    192 
    193     // update vseg descriptor
    194     vseg->vmm = vmm;
    195 
    196     // increment vsegs number
    197     vmm->vsegs_nr++;
    198 
    199     // add vseg in vmm list
    200     xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
    201                     XPTR( local_cxy , &vseg->xlist ) );
    202 
    203     // release rwlock in write mode
    204     remote_rwlock_wr_release( lock_xp );
    205 }
    206 
    207 ////////////////////////////////////////////
    208 void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
    209                                vseg_t * vseg )
    210 {
    211     // get vseg type
    212     uint32_t type = vseg->type;
    213 
    214     // build extended pointer on rwlock protecting VSL
    215     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    216 
    217     // get rwlock in write mode
    218     remote_rwlock_wr_acquire( lock_xp );
    219 
    220     // update vseg descriptor
    221     vseg->vmm = NULL;
    222 
    223     // remove vseg from VSL
    224     xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    225 
    226     // release rwlock in write mode
    227     remote_rwlock_wr_release( lock_xp );
    228 
    229     // release the stack slot to VMM stack allocator if STACK type
    230     if( type == VSEG_TYPE_STACK )
    231     {
    232         // get pointer on stack allocator
    233         stack_mgr_t * mgr = &vmm->stack_mgr;
    234 
    235         // compute slot index
    236         uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE);
    237 
    238         // update stacks_bitmap
    239         busylock_acquire( &mgr->lock );
    240         bitmap_clear( &mgr->bitmap , index );
    241         busylock_release( &mgr->lock );
    242     }
    243 
    244     // release the vseg to VMM mmap allocator if MMAP type
    245     if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) )
    246     {
    247         // get pointer on mmap allocator
    248         mmap_mgr_t * mgr = &vmm->mmap_mgr;
    249 
    250         // compute zombi_list index
    251         uint32_t index = bits_log2( vseg->vpn_size );
    252 
    253         // update zombi_list
    254         busylock_acquire( &mgr->lock );
    255         list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
    256         busylock_release( &mgr->lock );
    257     }
    258 
    259     // release physical memory allocated for vseg if no MMAP and no kernel type
    260     if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) &&
    261         (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    262     {
    263         vseg_free( vseg );
    264     }
    265 
    266 }  // end vmm_remove_vseg_from_vsl()
     395    xptr_t       vseg_xp;
     396        vseg_t     * vseg;
     397    vseg_type_t  vseg_type;
     398
     399#if DEBUG_VMM_USER_RESET
     400uint32_t cycle = (uint32_t)hal_get_cycles();
     401thread_t * this = CURRENT_THREAD;
     402if( DEBUG_VMM_USER_RESET < cycle )
     403printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
     404__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     405#endif
     406
     407#if (DEBUG_VMM_USER_RESET & 1 )
     408if( DEBUG_VMM_USER_RESET < cycle )
     409hal_vmm_display( process , true );
     410#endif
     411
     412    // get pointer on local VMM
     413    vmm_t * vmm = &process->vmm;
     414
     415    // build extended pointer on VSL root and VSL lock
     416    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     417    xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     418
     419    // take the VSL lock
     420        remote_rwlock_wr_acquire( lock_xp );
     421
     422    // scan the VSL to delete all non kernel vsegs
     423    // (we don't use a FOREACH in case of item deletion)
     424    xptr_t   iter_xp;
     425    xptr_t   next_xp;
     426        for( iter_xp = hal_remote_l64( root_xp ) ;
     427         iter_xp != root_xp ;
     428         iter_xp = next_xp )
     429        {
     430        // save extended pointer on next item in xlist
     431        next_xp = hal_remote_l64( iter_xp );
     432
     433        // get pointers on current vseg in VSL
     434        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     435        vseg      = GET_PTR( vseg_xp );
     436        vseg_type = vseg->type;
     437
     438#if( DEBUG_VMM_USER_RESET & 1 )
     439if( DEBUG_VMM_USER_RESET < cycle )
     440printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n",
     441__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     442#endif
     443        // delete non kernel vseg 
     444        if( (vseg_type != VSEG_TYPE_KCODE) &&
     445            (vseg_type != VSEG_TYPE_KDATA) &&
     446            (vseg_type != VSEG_TYPE_KDEV ) )
     447        {
     448            // remove vseg from VSL
     449            vmm_remove_vseg( process , vseg );
     450
     451#if( DEBUG_VMM_USER_RESET & 1 )
     452if( DEBUG_VMM_USER_RESET < cycle )
     453printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
     454__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     455#endif
     456        }
     457        else
     458        {
     459
     460#if( DEBUG_VMM_USER_RESET & 1 )
     461if( DEBUG_VMM_USER_RESET < cycle )
     462printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n",
     463__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     464#endif
     465        }
     466        }  // end loop on vsegs in VSL
     467
     468    // release the VSL lock
     469        remote_rwlock_wr_release( lock_xp );
     470
     471// FIXME il faut gérer les process copies...
     472
     473#if DEBUG_VMM_USER_RESET
     474cycle = (uint32_t)hal_get_cycles();
     475if( DEBUG_VMM_USER_RESET < cycle )
     476printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
     477__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
     478#endif
     479
     480}  // end vmm_user_reset()
    267481
    268482////////////////////////////////////////////////
     
    507721    cxy_t       page_cxy;
    508722    xptr_t      forks_xp;       // extended pointer on forks counter in page descriptor
    509     xptr_t      lock_xp;        // extended pointer on lock protecting the forks counter
    510723    xptr_t      parent_root_xp;
    511724    bool_t      mapped;
     
    528741    child_vmm  = &child_process->vmm;
    529742
    530     // get extended pointer on lock protecting the parent VSL
    531     parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock );
    532 
    533     // initialize the lock protecting the child VSL
    534     remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK );
     743    // initialize the locks protecting the child VSL and GPT
     744    remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT );
     745        remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL );
    535746
    536747    // initialize the child VSL as empty
     
    538749    child_vmm->vsegs_nr = 0;
    539750
    540     // create the child GPT
     751    // create an empty child GPT
    541752    error = hal_gpt_create( &child_vmm->gpt );
    542 
    543753    if( error )
    544754    {
     
    547757    }
    548758
    549     // build extended pointer on parent VSL
     759    // build extended pointer on parent VSL root and lock
    550760    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
     761    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
    551762
    552763    // take the lock protecting the parent VSL in read mode
     
    556767    XLIST_FOREACH( parent_root_xp , iter_xp )
    557768    {
    558         // get local and extended pointers on current parent vseg
     769        // get pointers on current parent vseg
    559770        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    560771        parent_vseg    = GET_PTR( parent_vseg_xp );
     
    587798            vseg_init_from_ref( child_vseg , parent_vseg_xp );
    588799
     800            // build extended pointer on VSL lock
     801            xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
     802 
     803            // take the VSL lock in write mode
     804            remote_rwlock_wr_acquire( lock_xp );
     805
    589806            // register child vseg in child VSL
    590807            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
     808
     809            // release the VSL lock
     810            remote_rwlock_wr_release( lock_xp );
    591811
    592812#if DEBUG_VMM_FORK_COPY
     
    597817hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
    598818#endif
    599 
    600             // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT
     819            // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT
    601820            if( type != VSEG_TYPE_CODE )
    602821            {
    603                 // activate the COW for DATA, MMAP, REMOTE vsegs only
     822                // activate the COW for DATA, ANON, REMOTE vsegs only
    604823                cow = ( type != VSEG_TYPE_FILE );
    605824
     
    611830                {
    612831                    error = hal_gpt_pte_copy( &child_vmm->gpt,
     832                                              vpn,
    613833                                              XPTR( parent_cxy , &parent_vmm->gpt ),
    614834                                              vpn,
     
    677897    child_vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
    678898    child_vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    679     for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] );
     899    for( i = 0 ; i < 32 ; i++ )
     900    {
     901        xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) );
     902    }
    680903
    681904    // initialize instrumentation counters
     
    726949    vmm_t  * vmm = &process->vmm;
    727950
    728     // get extended pointer on VSL root and VSL lock
    729     xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     951    // build extended pointer on VSL root, VSL lock and GPT lock
     952    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     953    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     954    xptr_t   gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock );
     955
     956    // take the VSL lock
     957    remote_rwlock_wr_acquire( vsl_lock_xp );
    730958
    731959    // scan the VSL to delete all registered vsegs
    732     // (don't use a FOREACH for item deletion in xlist)
    733 
    734         while( !xlist_is_empty( root_xp ) )
     960    // (we don't use a FOREACH in case of item deletion)
     961    xptr_t  iter_xp;
     962    xptr_t  next_xp;
     963        for( iter_xp = hal_remote_l64( vsl_root_xp ) ;
     964         iter_xp != vsl_root_xp ;
     965         iter_xp = next_xp )
    735966        {
    736         // get pointer on first vseg in VSL
    737                 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
    738         vseg    = GET_PTR( vseg_xp );
     967        // save extended pointer on next item in xlist
     968        next_xp = hal_remote_l64( iter_xp );
     969
     970        // get pointers on current vseg in VSL
     971        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     972        vseg      = GET_PTR( vseg_xp );
    739973
    740974        // delete vseg and release physical pages
    741         vmm_delete_vseg( process->pid , vseg->min );
     975        vmm_remove_vseg( process , vseg );
    742976
    743977#if( DEBUG_VMM_DESTROY & 1 )
     
    749983        }
    750984
    751     // remove all vsegs from zombi_lists in MMAP allocator
     985    // release the VSL lock
     986    remote_rwlock_wr_release( vsl_lock_xp );
     987
     988    // remove all registered MMAP vsegs
     989    // from zombi_lists in MMAP allocator
    752990    uint32_t i;
    753991    for( i = 0 ; i<32 ; i++ )
    754992    {
    755             while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) )
     993        // build extended pointer on zombi_list[i]
     994        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] );
     995 
     996        // scan zombi_list[i]
     997            while( !xlist_is_empty( root_xp ) )
    756998            {
    757                     vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist );
     999                    vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
     1000            vseg    = GET_PTR( vseg_xp );
    7581001
    7591002#if( DEBUG_VMM_DESTROY & 1 )
     
    7651008            vseg->vmm = NULL;
    7661009
    767             // remove vseg from  xlist
     1010            // remove vseg from  zombi_list
    7681011            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    7691012
     
    7791022    }
    7801023
     1024    // take the GPT lock
     1025    remote_rwlock_wr_acquire( gpt_lock_xp );
     1026
    7811027    // release memory allocated to the GPT itself
    7821028    hal_gpt_destroy( &vmm->gpt );
     1029
     1030    // release the GPT lock
     1031    remote_rwlock_wr_release( gpt_lock_xp );
    7831032
    7841033#if DEBUG_VMM_DESTROY
     
    8161065}  // end vmm_check_conflict()
    8171066
    818 ////////////////////////////////////////////////////////////////////////////////////////////
    819 // This static function is called by the vmm_create_vseg() function, and implements
    820 // the VMM stack_vseg specific allocator.
    821 ////////////////////////////////////////////////////////////////////////////////////////////
    822 // @ vmm      : pointer on VMM.
    823 // @ vpn_base : (return value) first allocated page
    824 // @ vpn_size : (return value) number of allocated pages
    825 ////////////////////////////////////////////////////////////////////////////////////////////
    826 static error_t vmm_stack_alloc( vmm_t * vmm,
    827                                 vpn_t * vpn_base,
    828                                 vpn_t * vpn_size )
    829 {
    830     // get stack allocator pointer
    831     stack_mgr_t * mgr = &vmm->stack_mgr;
    832 
    833     // get lock on stack allocator
    834     busylock_acquire( &mgr->lock );
    835 
    836     // get first free slot index in bitmap
    837     int32_t index = bitmap_ffc( &mgr->bitmap , 4 );
    838     if( (index < 0) || (index > 31) )
    839     {
    840         busylock_release( &mgr->lock );
    841         return 0xFFFFFFFF;
    842     }
    843 
    844     // update bitmap
    845     bitmap_set( &mgr->bitmap , index );
    846 
    847     // release lock on stack allocator
    848     busylock_release( &mgr->lock );
    849 
    850     // returns vpn_base, vpn_size (one page non allocated)
    851     *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1;
    852     *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
    853     return 0;
    854 
    855 } // end vmm_stack_alloc()
    856 
    857 ////////////////////////////////////////////////////////////////////////////////////////////
    858 // This static function is called by the vmm_create_vseg() function, and implements
    859 // the VMM MMAP specific allocator.
    860 ////////////////////////////////////////////////////////////////////////////////////////////
    861 // @ vmm      : [in] pointer on VMM.
    862 // @ npages   : [in] requested number of pages.
    863 // @ vpn_base : [out] first allocated page.
    864 // @ vpn_size : [out] actual number of allocated pages.
    865 ////////////////////////////////////////////////////////////////////////////////////////////
    866 static error_t vmm_mmap_alloc( vmm_t * vmm,
    867                                vpn_t   npages,
    868                                vpn_t * vpn_base,
    869                                vpn_t * vpn_size )
    870 {
    871     uint32_t   index;
    872     vseg_t   * vseg;
    873     vpn_t      base;
    874     vpn_t      size;
    875     vpn_t      free;
    876 
    877 #if DEBUG_VMM_MMAP_ALLOC
    878 thread_t * this = CURRENT_THREAD;
    879 uint32_t cycle = (uint32_t)hal_get_cycles();
    880 if( DEBUG_VMM_MMAP_ALLOC < cycle )
    881 printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    882 __FUNCTION__, this->process->pid, this->trdid, cycle );
    883 #endif
    884 
    885     // vseg size must be power of 2
    886     // compute actual size and index in zombi_list array
    887     size  = POW2_ROUNDUP( npages );
    888     index = bits_log2( size );
    889 
    890     // get mmap allocator pointer
    891     mmap_mgr_t * mgr = &vmm->mmap_mgr;
    892 
    893     // get lock on mmap allocator
    894     busylock_acquire( &mgr->lock );
    895 
    896     // get vseg from zombi_list or from mmap zone
    897     if( list_is_empty( &mgr->zombi_list[index] ) )     // from mmap zone
    898     {
    899         // check overflow
    900         free = mgr->first_free_vpn;
    901         if( (free + size) > mgr->vpn_size ) return -1;
    902 
    903         // update MMAP allocator
    904         mgr->first_free_vpn += size;
    905 
    906         // compute base
    907         base = free;
    908     }
    909     else                                             // from zombi_list
    910     {
    911         // get pointer on zombi vseg from zombi_list
    912         vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist );
    913 
    914         // remove vseg from free-list
    915         list_unlink( &vseg->zlist );
    916 
    917         // compute base
    918         base = vseg->vpn_base;
    919     }
    920 
    921     // release lock on mmap allocator
    922     busylock_release( &mgr->lock );
    923 
    924 #if DEBUG_VMM_MMAP_ALLOC
    925 cycle = (uint32_t)hal_get_cycles();
    926 if( DEBUG_VMM_DESTROY < cycle )
    927 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
    928 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
    929 #endif
    930 
    931     // returns vpn_base, vpn_size
    932     *vpn_base = base;
    933     *vpn_size = size;
    934     return 0;
    935 
    936 }  // end vmm_mmap_alloc()
     1067
    9371068
    9381069////////////////////////////////////////////////
     
    9681099    {
    9691100        // get vpn_base and vpn_size from STACK allocator
    970         error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size );
    971         if( error )
    972         {
    973             printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n",
    974             __FUNCTION__ , process->pid , local_cxy );
    975             return NULL;
    976         }
     1101        vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size );
    9771102
    9781103        // compute vseg base and size from vpn_base and vpn_size
     
    10721197               cxy );
    10731198
     1199    // build extended pointer on VSL lock
     1200    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     1201 
     1202    // take the VSL lock in write mode
     1203    remote_rwlock_wr_acquire( lock_xp );
     1204
    10741205    // attach vseg to VSL
    10751206        vmm_attach_vseg_to_vsl( vmm , vseg );
     1207
     1208    // release the VSL lock
     1209    remote_rwlock_wr_release( lock_xp );
    10761210
    10771211#if DEBUG_VMM_CREATE_VSEG
     
    10861220}  // vmm_create_vseg()
    10871221
    1088 ///////////////////////////////////
    1089 void vmm_delete_vseg( pid_t    pid,
    1090                       intptr_t vaddr )
     1222
     1223//////////////////////////////////////////
     1224void vmm_remove_vseg( process_t * process,
     1225                      vseg_t    * vseg )
    10911226{
    1092     process_t * process;    // local pointer on local process
    1093     vmm_t     * vmm;        // local pointer on local process VMM
    1094     vseg_t    * vseg;       // local pointer on local vseg containing vaddr
    1095     gpt_t     * gpt;        // local pointer on local process GPT
     1227    vmm_t     * vmm;        // local pointer on process VMM
     1228    bool_t      is_ref;     // local process is reference process
     1229    uint32_t    vseg_type;  // vseg type
    10961230    vpn_t       vpn;        // VPN of current PTE
    10971231    vpn_t       vpn_min;    // VPN of first PTE
     
    11031237    cxy_t       page_cxy;   // page descriptor cluster
    11041238    page_t    * page_ptr;   // page descriptor pointer
    1105     xptr_t      forks_xp;   // extended pointer on pending forks counter
    1106     xptr_t      lock_xp;    // extended pointer on lock protecting forks counter
    1107     uint32_t    forks;      // actual number of pendinf forks
    1108     uint32_t    vseg_type;  // vseg type
    1109 
    1110 #if DEBUG_VMM_DELETE_VSEG
    1111 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1112 thread_t * this  = CURRENT_THREAD;
    1113 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1114 printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n",
    1115 __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle );
    1116 #endif
    1117 
    1118     // get local pointer on local process descriptor
    1119     process = cluster_get_local_process_from_pid( pid );
    1120 
    1121     if( process == NULL )
    1122     {
    1123         printk("\n[ERRORR] in %s : cannot get local process descriptor\n",
    1124         __FUNCTION__ );
    1125         return;
    1126     }
    1127 
    1128     // get pointers on local process VMM an GPT
     1239    xptr_t      count_xp;   // extended pointer on page refcount
     1240    uint32_t    count;      // current value of page refcount
     1241
     1242// check arguments
     1243assert( (process != NULL), "process argument is NULL" );
     1244assert( (vseg    != NULL), "vseg argument is NULL" );
     1245
     1246    // compute is_ref
     1247    is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
     1248
     1249    // get pointers on local process VMM
    11291250    vmm = &process->vmm;
    1130     gpt = &process->vmm.gpt;
    1131 
    1132     // get local pointer on vseg containing vaddr
    1133     vseg = vmm_vseg_from_vaddr( vmm , vaddr );
    1134 
    1135     if( vseg == NULL )
    1136     {
    1137         printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",
    1138         __FUNCTION__ );
    1139         return;
    1140     }
    11411251
    11421252    // get relevant vseg infos
     
    11451255    vpn_max   = vpn_min + vseg->vpn_size;
    11461256
    1147     // loop to invalidate all vseg PTEs in GPT
     1257#if DEBUG_VMM_REMOVE_VSEG
     1258uint32_t   cycle = (uint32_t)hal_get_cycles();
     1259thread_t * this  = CURRENT_THREAD;
     1260if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1261printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
     1262__FUNCTION__, this->process->pid, this->trdid,
     1263process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
     1264#endif
     1265
     1266    // loop on PTEs in GPT
    11481267        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    11491268    {
    1150         // get ppn and attr from GPT entry
    1151         hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );
    1152 
    1153         if( attr & GPT_MAPPED )  // entry is mapped
     1269        // get ppn and attr
     1270        hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn );
     1271
     1272        if( attr & GPT_MAPPED )  // PTE is mapped
    11541273        {
    11551274
    1156 #if( DEBUG_VMM_DELETE_VSEG & 1 )
    1157 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1158 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );
     1275#if( DEBUG_VMM_REMOVE_VSEG & 1 )
     1276if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1277printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) );
    11591278#endif
    11601279            // unmap GPT entry in local GPT
    1161             hal_gpt_reset_pte( gpt , vpn );
    1162 
    1163             // the allocated page is not released to for kernel vseg
    1164             if( (vseg_type != VSEG_TYPE_KCODE) &&
    1165                 (vseg_type != VSEG_TYPE_KDATA) &&
    1166                 (vseg_type != VSEG_TYPE_KDEV ) )
     1280            hal_gpt_reset_pte( &vmm->gpt , vpn );
     1281
     1282            // get pointers on physical page descriptor
     1283            page_xp  = ppm_ppn2page( ppn );
     1284            page_cxy = GET_CXY( page_xp );
     1285            page_ptr = GET_PTR( page_xp );
     1286
     1287            // decrement page refcount
     1288            count_xp = XPTR( page_cxy , &page_ptr->refcount );
     1289            count    = hal_remote_atomic_add( count_xp , -1 );
     1290
     1291            // compute the ppn_release condition depending on vseg type
     1292            bool_t ppn_release;
     1293            if( (vseg_type == VSEG_TYPE_FILE)  ||
     1294                (vseg_type == VSEG_TYPE_KCODE) ||
     1295                (vseg_type == VSEG_TYPE_KDATA) ||
     1296                (vseg_type == VSEG_TYPE_KDEV) )           
    11671297            {
    1168                 // get extended pointer on physical page descriptor
    1169                 page_xp  = ppm_ppn2page( ppn );
    1170                 page_cxy = GET_CXY( page_xp );
    1171                 page_ptr = GET_PTR( page_xp );
    1172 
    1173 // FIXME This code must be re-written, as the actual release depends on vseg type,
    1174 // the reference cluster, the page refcount and/or the forks counter...
    1175 
    1176                 // get extended pointers on forks and lock fields
    1177                 forks_xp = XPTR( page_cxy , &page_ptr->forks );
    1178                 lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    1179 
    1180                 // get the lock protecting the page
     1298                // no physical page release for FILE and KERNEL
     1299                ppn_release = false;
     1300            }
     1301            else if( (vseg_type == VSEG_TYPE_CODE)  ||
     1302                     (vseg_type == VSEG_TYPE_STACK) )
     1303            {
     1304                // always release physical page for private vsegs
     1305                ppn_release = true;
     1306            }
     1307            else if( (vseg_type == VSEG_TYPE_ANON)  ||
     1308                     (vseg_type == VSEG_TYPE_REMOTE) )
     1309            {
     1310                // release physical page if reference cluster
     1311                ppn_release = is_ref;
     1312            }
     1313            else if( is_ref )  // vseg_type == DATA in reference cluster
     1314            {
     1315                // get extended pointers on forks and lock field in page descriptor
     1316                xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1317                xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     1318
     1319                // take lock protecting "forks" counter
    11811320                remote_busylock_acquire( lock_xp );
    11821321
    1183                 // get pending forks counter
    1184                 forks = hal_remote_l32( forks_xp );
    1185 
    1186                 if( forks )  // decrement pending forks counter
     1322                // get number of pending forks from page descriptor
     1323                uint32_t forks = hal_remote_l32( forks_xp );
     1324
     1325                // decrement pending forks counter if required
     1326                if( forks )  hal_remote_atomic_add( forks_xp , -1 );
     1327
     1328                // release lock protecting "forks" counter
     1329                remote_busylock_release( lock_xp );
     1330
     1331                // release physical page if forks == 0
     1332                ppn_release = (forks == 0);
     1333            }
     1334            else              // vseg_type == DATA not in reference cluster
     1335            {
     1336                // no physical page release if not in reference cluster
     1337                ppn_release = false;
     1338            }
     1339
     1340            // release physical page to relevant kmem when required
     1341            if( ppn_release )
     1342            {
     1343                if( page_cxy == local_cxy )
    11871344                {
    1188                     // update forks counter
    1189                     hal_remote_atomic_add( forks_xp , -1 );
    1190 
    1191                     // release the lock protecting the page
    1192                     remote_busylock_release( lock_xp );
    1193                 } 
    1194                 else         // release physical page to relevant cluster
     1345                    req.type = KMEM_PAGE;
     1346                    req.ptr  = page_ptr;
     1347                    kmem_free( &req );
     1348                }
     1349                else
    11951350                {
    1196                     // release the lock protecting the page
    1197                     remote_busylock_release( lock_xp );
    1198 
    1199                     // release the page to kmem
    1200                     if( page_cxy == local_cxy )   // local cluster
    1201                     {
    1202                         req.type = KMEM_PAGE;
    1203                         req.ptr  = page_ptr;
    1204                         kmem_free( &req );
    1205                     }
    1206                     else                          // remote cluster
    1207                     {
    1208                         rpc_pmem_release_pages_client( page_cxy , page_ptr );
    1209                     }
    1210 
    1211 #if( DEBUG_VMM_DELETE_VSEG & 1 )
    1212 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1213 printk("- release ppn %x\n", ppn );
    1214 #endif
     1351                    rpc_pmem_release_pages_client( page_cxy , page_ptr );
    12151352                }
    1216 
    12171353            }
     1354
     1355#if( DEBUG_VMM_REMOVE_VSEG & 1 )
     1356if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1357{
     1358    if( ppn_release ) printk(" / released to kmem\n" );
     1359    else              printk("\n");
     1360}
     1361#endif
    12181362        }
    12191363    }
    12201364
    1221     // remove vseg from VSL and release vseg descriptor (if not MMAP)
     1365    // remove vseg from VSL
    12221366    vmm_detach_vseg_from_vsl( vmm , vseg );
    12231367
    1224 #if DEBUG_VMM_DELETE_VSEG
     1368    // release vseg descriptor depending on vseg type
     1369    if( vseg_type == VSEG_TYPE_STACK )
     1370    {
     1371        // release slot to local stack allocator
     1372        vmm_stack_free( vmm , vseg );
     1373
     1374        // release vseg descriptor to local kmem
     1375        vseg_free( vseg );
     1376    }
     1377    else if( (vseg_type == VSEG_TYPE_ANON) ||
     1378             (vseg_type == VSEG_TYPE_FILE) ||
     1379             (vseg_type == VSEG_TYPE_REMOTE) ) 
     1380    {
     1381        // release vseg to local mmap allocator
     1382        vmm_mmap_free( vmm , vseg );
     1383    }
     1384    else
     1385    {
     1386        // release vseg descriptor to local kmem
     1387        vseg_free( vseg );
     1388    }
     1389
     1390#if DEBUG_VMM_REMOVE_VSEG
    12251391cycle = (uint32_t)hal_get_cycles();
    1226 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1227 printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n",
    1228 __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle );
    1229 #endif
    1230 
    1231 }  // end vmm_delete_vseg()
     1392if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1393printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
     1394__FUNCTION__, this->process->pid, this->trdid,
     1395process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
     1396#endif
     1397
     1398}  // end vmm_remove_vseg()
     1399
     1400
     1401///////////////////////////////////
     1402void vmm_delete_vseg( pid_t    pid,
     1403                      intptr_t vaddr )
     1404{
     1405    process_t * process;    // local pointer on local process
     1406    vseg_t    * vseg;       // local pointer on local vseg containing vaddr
     1407
     1408    // get local pointer on local process descriptor
     1409    process = cluster_get_local_process_from_pid( pid );
     1410
     1411    if( process == NULL )
     1412    {
     1413        printk("\n[WARNING] in %s : cannot get local process descriptor\n",
     1414        __FUNCTION__ );
     1415        return;
     1416    }
     1417
     1418    // get local pointer on local vseg containing vaddr
     1419    vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr );
     1420
     1421    if( vseg == NULL )
     1422    {
     1423        printk("\n[WARNING] in %s : cannot get vseg descriptor\n",
     1424        __FUNCTION__ );
     1425        return;
     1426    }
     1427
     1428    // call relevant function
     1429    vmm_remove_vseg( process , vseg );
     1430
     1431}  // end vmm_delete_vseg
     1432
    12321433
    12331434/////////////////////////////////////////////
     
    12351436                              intptr_t   vaddr )
    12361437{
    1237     xptr_t   iter_xp;
    12381438    xptr_t   vseg_xp;
    12391439    vseg_t * vseg;
     1440    xptr_t   iter_xp;
    12401441
    12411442    // get extended pointers on VSL lock and root
    1242     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     1443    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    12431444    xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    12441445
     
    12491450    XLIST_FOREACH( root_xp , iter_xp )
    12501451    {
     1452        // get pointers on vseg
    12511453        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    12521454        vseg    = GET_PTR( vseg_xp );
    12531455
     1456        // return success when match
    12541457        if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
    12551458        {
     
    12621465    // return failure
    12631466    remote_rwlock_rd_release( lock_xp );
    1264 
    12651467    return NULL;
    12661468
     
    14621664        vseg_init_from_ref( vseg , vseg_xp );
    14631665
     1666        // build extended pointer on VSL lock
     1667        xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     1668 
     1669        // take the VSL lock in write mode
     1670        remote_rwlock_wr_acquire( lock_xp );
     1671
    14641672        // register local vseg in local VSL
    14651673        vmm_attach_vseg_to_vsl( vmm , vseg );
     1674 
     1675        // release the VSL lock
     1676        remote_rwlock_wr_release( lock_xp );
    14661677    }   
    14671678
     
    14861697uint32_t   cycle   = (uint32_t)hal_get_cycles();
    14871698thread_t * this    = CURRENT_THREAD;
    1488 xptr_t     this_xp = XPTR( local_cxy , this );
    14891699if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    14901700printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
     
    17171927    error_t          error;           // value returned by called functions
    17181928
     1929#if DEBUG_VMM_HANDLE_PAGE_FAULT
     1930uint32_t   cycle = (uint32_t)hal_get_cycles();
     1931thread_t * this  = CURRENT_THREAD;
     1932if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
     1933printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
     1934__FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
     1935hal_vmm_display( process , true );
     1936#endif
     1937
    17191938    // get local vseg (access to reference VSL can be required)
    17201939    error = vmm_get_vseg( process,
     
    17231942    if( error )
    17241943    {
    1725         printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n",
    1726         __FUNCTION__ , vpn , process->pid );
     1944        printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n",
     1945        __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() );
    17271946       
    17281947        return EXCP_USER_ERROR;
    17291948    }
    17301949
    1731  #if DEBUG_VMM_HANDLE_PAGE_FAULT
    1732 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1733 thread_t * this  = CURRENT_THREAD;
     1950#if DEBUG_VMM_HANDLE_PAGE_FAULT
     1951cycle = (uint32_t)hal_get_cycles();
    17341952if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    1735 printk("\n[%s] threadr[%x,%x] enter for vpn %x / %s / cycle %d\n",
    1736 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(vseg->type), cycle );
     1953printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n",
     1954__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle );
    17371955#endif
    17381956
     
    19712189    error_t          error;
    19722190
     2191    thread_t * this = CURRENT_THREAD;
     2192
    19732193#if DEBUG_VMM_HANDLE_COW
    19742194uint32_t   cycle   = (uint32_t)hal_get_cycles();
    1975 thread_t * this    = CURRENT_THREAD;
    1976 xptr_t     this_xp = XPTR( local_cxy , this );
    19772195if( DEBUG_VMM_HANDLE_COW < cycle )
    19782196printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
    19792197__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     2198hal_vmm_display( process , true );
    19802199#endif
    19812200
     
    19912210    if( error )
    19922211    {
    1993         printk("\n[PANIC] in %s : vpn %x in process %x not in a registered vseg\n",
    1994         __FUNCTION__, vpn, process->pid );
     2212        printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n",
     2213        __FUNCTION__, vpn, process->pid, this->trdid );
    19952214
    19962215        return EXCP_KERNEL_PANIC;
  • trunk/kernel/mm/vmm.h

    r624 r625  
    4848 * Each slot can contain one user stack vseg. The first 4 Kbytes page in the slot is not
    4949 * mapped to detect stack overflow.
    50  * The slot index can be computed form the slot base address, and reversely.
    51  * All allocation / release operations are registered in the stack_bitmap, that completely
    52  * define the STACK zone status.
     50 * In this implementation, the slot index is defined by the user thead LTID.
     51 * All allocated stacks are registered in a bitmap defining the STACK zone state:
     52 * - The allocator checks that the requested slot has not been already allocated, and set the
     53 *   corresponding bit in the bitmap.
     54 * - The de-allocator function reset the corresponding bit in the bitmap.
    5355 ********************************************************************************************/
    5456
     
    5759    busylock_t     lock;               /*! lock protecting STACK allocator                  */
    5860    vpn_t          vpn_base;           /*! first page of STACK zone                         */
    59     bitmap_t       bitmap;             /*! bit bector of allocated stacks                   */
     61    bitmap_t       bitmap;             /*! bit vector of allocated stacks                   */
    6062}
    6163stack_mgr_t;
     
    8486    vpn_t          vpn_size;           /*! number of pages in MMAP zone                     */
    8587    vpn_t          first_free_vpn;     /*! first free page in MMAP zone                     */
    86     list_entry_t   zombi_list[32];     /*! array of roots of released vsegs lists           */
     88    xlist_entry_t  zombi_list[32];     /*! array of roots of released vsegs lists           */
    8789}
    8890mmap_mgr_t;
     
    109111typedef struct vmm_s
    110112{
    111         remote_rwlock_t  vsegs_lock;         /*! lock protecting the local VSL                  */
     113        remote_rwlock_t  vsl_lock;           /*! lock protecting the local VSL                  */
    112114        xlist_entry_t    vsegs_root;         /*! Virtual Segment List (complete in reference)   */
    113115        uint32_t         vsegs_nr;           /*! total number of local vsegs                    */
     
    132134
    133135/*********************************************************************************************
    134  * This function initialises the virtual memory manager attached to an user process.
     136 * This function mkkes a partial initialisation of the VMM attached to an user process.
     137 * The GPT must have been previously created, with the hal_gpt_create() function.
     138 * - It registers "args", "envs" vsegs in the VSL.
    135139 * - It initializes the STACK and MMAP allocators.
    136  * - It registers the "kentry", "args", "envs" vsegs in the VSL.
    137  * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function.
    138  * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping.
    139  ******************************************************a**************************************
    140  * Implementation notes:
     140 * Note:
    141141 * - The "code" and "data" vsegs are registered by the elf_load_process() function.
    142  * - The "stack" vsegs are dynamically created by the thread_user_create() function.
    143  * - The "file", "anon", "remote" vsegs are dynamically created by the mmap() syscall.
     142 * - The "stack" vsegs are dynamically registered by the thread_user_create() function.
     143 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall.
    144144 *********************************************************************************************
    145145 * @ process   : pointer on process descriptor
    146146 * @ return 0 if success / return -1 if failure.
    147147 ********************************************************************************************/
    148 error_t vmm_init( struct process_s * process );
    149 
    150 /*********************************************************************************************
    151  * This function displays on TXY0 the list or registered vsegs for a given <process>.
    152  * It must be executed by a thread running in reference cluster.
    153  * If the <mapping> argument is true, it displays for each vseg all mapped PTEs in GPT.
     148error_t vmm_user_init( struct process_s * process );
     149
     150/*********************************************************************************************
     151 * This function re-initialises the VMM attached to an user process to prepare a new
     152 * call to the vmm_user_init() function after an exec() syscall.
     153 * It removes from the VMM of the process identified by the <process> argument all
     154 * non kernel vsegs (i.e. all user vsegs), by calling the vmm_remove_vseg() function.
     155 * - the vsegs are removed from the VSL.
     156 * - the corresponding GPT entries are removed from the GPT.
     157 * - the physical pages are released to the relevant kmem when they are not shared.
     158 * The VSL and the GPT are not modified for the kernel vsegs.
    154159 *********************************************************************************************
    155160 * @ process   : pointer on process descriptor.
    156  * @ mapping   : detailed mapping if true.
    157  ********************************************************************************************/
    158 void hal_vmm_display( struct process_s * process,
    159                   bool_t             mapping );
     161 ********************************************************************************************/
     162void vmm_user_reset( struct process_s * process );
    160163
    161164/*********************************************************************************************
    162165 * This function is called by the process_make_fork() function. It partially copies
    163166 * the content of a remote parent process VMM to the local child process VMM:
    164  * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child
    165  *   VSL, and all valid GPT entries in parent GPT are copied to the child GPT.
    166  *   The WRITABLE flag is reset and the COW flag is set in child GPT.
    167  * - all CODE vsegs registered in the parent VSL are registered in the child VSL, but the
    168  *   GPT entries are not copied in the chilf GPT, that will be dynamically updated from
     167 * - All DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the
     168 *   child VSL. All valid PTEs in parent GPT are copied to the child GPT, but the
     169 *   WRITABLE flag is reset and the COW flag is set.
     170 * - All CODE vsegs registered in the parent VSL are registered in the child VSL, but the
     171 *   GPT entries are not copied in the child GPT, and will be dynamically updated from
    169172 *   the .elf file when a page fault is reported.
    170  * - all FILE vsegs registered in the parent VSL are registered in the child VSL, and all
     173 * - All FILE vsegs registered in the parent VSL are registered in the child VSL, and all
    171174 *   valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set.
    172  * - no STACK vseg is copied from  parent VMM to child VMM, because the child STACK vseg
     175 * - No STACK vseg is copied from  parent VMM to child VMM, because the child stack vseg
    173176 *   must be copied later from the cluster containing the user thread requesting the fork().
     177 * - The KERNEL vsegs required by the target architecture are re-created in the child
     178 *   VMM, from the local kernel process VMM, using the hal_vmm_kernel_update() function.
    174179 *********************************************************************************************
    175180 * @ child_process     : local pointer on local child process descriptor.
     
    196201
    197202/*********************************************************************************************
    198  * This global function modifies a GPT entry identified by the <process> and <vpn>
    199  * arguments in all clusters containing a process copy.
     203 * This function modifies a GPT entry identified by the <process> and <vpn> arguments
     204 * in all clusters containing a process copy.
    200205 * It must be called by a thread running in the reference cluster.
    201206 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
     
    240245/*********************************************************************************************
    241246 * This function allocates memory for a vseg descriptor, initialises it, and register it
    242  * in the VMM of the local process descriptor, that must be the reference process.
    243  * For the 'stack", "file", "anon", & "remote" types, it does not use the <base> argument,
    244  * but uses the STACK and MMAP virtual memory allocators.
     247 * in the VSL of the local process descriptor, that must be the reference process.
     248 * - For the FILE, ANON, & REMOTE types, it does not use the <base> and <size> arguments,
     249 *   but uses the specific MMAP virtual memory allocator.
     250 * - For the STACK type, it does not use the <size> argument, and the <base> argument
     251 *   defines the user thread LTID used by the specific STACK virtual memory allocator.
    245252 * It checks collision with all pre-existing vsegs.
    246  * To comply with the "on-demand" paging policy, this function does NOT modify the page table,
     253 * To comply with the "on-demand" paging policy, this function does NOT modify the GPT,
    247254 * and does not allocate physical memory for vseg data.
    248255 * It should be called by a local thread (could be a RPC thread if the client thread is not
    249  * running in the regerence cluster).
     256 * running in the reference cluster).
    250257 *********************************************************************************************
    251258 * @ process     : pointer on local processor descriptor.
    252259 * @ type        : vseg type.
    253  * @ base        : vseg base address (not used for dynamically allocated vsegs).
     260 * @ base        : vseg base address (or user thread ltid for an user stack vseg).
    254261 * @ size        : vseg size (bytes).
    255262 * @ file_offset : offset in file for CODE, DATA, FILE types.
     
    269276
    270277/*********************************************************************************************
    271  * This function removes from the local VMM of a process descriptor identified by the <pid>
    272  * argument a local vseg identified by its base address <vaddr> in user space.
    273  * It can be used for any type of vseg, but must be called by a local thread.
    274  * Use the RPC_VMM_DELETE_VSEG if the client thread is not local.
    275  * It does nothing if the process is not registered in the local cluster.
    276  * It does nothing if the vseg is not registered in the local process VSL.
    277  * - It removes from the local GPT all registered PTEs. If it is executed in the reference
    278  *   cluster, it releases the referenced physical pages, to the relevant kmem allocator,
    279  *   depending on vseg type and the pending forks counter.
    280  * - It removes the vseg from the local VSL, and release the vseg descriptor if not MMAP.
    281  *********************************************************************************************
    282  * @ process  : process identifier.
    283  * @ vaddr    : vseg base address in user space.
     278 * This function removes from the VMM of a process descriptor identified by the <process>
     279 * argument the vseg identified by the <vseg> argument. It can be used for any type of vseg.
     280 * As it uses local pointers, it must be called by a local thread.
     281 * It is called by the vmm_user_reset(), vmm_delete_vseg() and vmm_destroy() functions.
     282 * It makes a kernel panic if the process is not registered in the local cluster,
     283 * or if the vseg is not registered in the process VSL.
     284 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are
     285 * unmapped from local GPT. Other actions depend on the vseg type:
     286 * - Regarding the vseg descriptor release:
     287 *   . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list.
     288 *   . for STACK the vseg is released to the local stack allocator.
     289 *   . for all other types, the vseg is released to the local kmem.
     290 * - Regarding the physical pages release:
     291 *   . for KERNEL and FILE, the pages are not released to kmem.
     292 *   . for CODE and STACK, the pages are released to local kmem when they are not COW.
     293 *   . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when
     294 *     the local cluster is the reference cluster.
     295 * The lock protecting the VSL must be taken by the caller.
     296 *********************************************************************************************
     297 * @ process  : local pointer on process.
     298 * @ vseg     : local pointer on vseg.
     299 ********************************************************************************************/
     300void vmm_remove_vseg( struct process_s * process,
     301                      struct vseg_s    * vseg );
     302
     303/*********************************************************************************************
     304 * This function call the vmm_remove vseg() function to remove from the VMM of a local
     305 * process descriptor, identified by the <pid> argument the vseg identified by the <vaddr>
     306 * virtual address in user space.
     307 * Use the RPC_VMM_DELETE_VSEG to remove a vseg from a remote process descriptor.
     308 *********************************************************************************************
     309 * @ pid      : process identifier.
     310 * @ vaddr    : virtual address in user space.
    284311 ********************************************************************************************/
    285312void vmm_delete_vseg( pid_t    pid,
    286313                      intptr_t vaddr );
    287 
    288 /*********************************************************************************************
    289  * This function insert a new <vseg> descriptor in the VSL identifed by the <vmm> argument.
    290  * and updates the vmm field in the vseg descriptor.
    291  * It takes the lock protecting VSL.
    292  *********************************************************************************************
    293  * @ vmm       : local pointer on local VMM.
    294  * @ vseg      : local pointer on local vseg descriptor.
    295  ********************************************************************************************/
    296 void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
    297                              vseg_t * vseg );
    298 
    299 /*********************************************************************************************
    300  * This function removes a vseg identified by the <vseg> argument from the local VSL
    301  * identified by the <vmm> argument and release the memory allocated to vseg descriptor,
    302  * for all vseg types, BUT the MMAP type (i.e. ANON or REMOTE).
    303  * - If the vseg has not the STACK or MMAP type, it is simply removed from the VSL,
    304  *   and vseg descriptor is released.
    305  * - If the vseg has the STACK type, it is removed from VSL, vseg descriptor is released,
    306  *   and the stack slot is returned to the local VMM_STACK allocator.
    307  * - If the vseg has the MMAP type, it is removed from VSL and is registered in zombi_list
    308  *   of the VMM_MMAP allocator for future reuse. The vseg descriptor is NOT released.
    309  *********************************************************************************************
    310  * @ vmm       : local pointer on local VMM.
    311  * @ vseg      : local pointer on local vseg to be removed.
    312  ********************************************************************************************/
    313 void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
    314                                vseg_t * vseg );
    315314
    316315/*********************************************************************************************
  • trunk/kernel/mm/vseg.c

    r623 r625  
    6161}
    6262
    63 /////////////////////
     63///////////////////////////
    6464vseg_t * vseg_alloc( void )
    6565{
  • trunk/kernel/mm/vseg.h

    r623 r625  
    7070/*******************************************************************************************
    7171 * This structure defines a virtual segment descriptor.
    72  * - The VSL contains only local vsegs, but is implemented as an xlist, because it can be
    73  *   accessed by thread running in a remote cluster.
    74  * - The zombi list is used by the local MMAP allocator. It is implemented as a local list.
     72 * The VSL contains only local vsegs, but is implemented as an xlist, because it can be
     73 * accessed by a thread running in a remote cluster.
     74 * The xlist field is also used to implement the zombi lists used by the MMAP allocator.
    7575 ******************************************************************************************/
    7676
     
    7878{
    7979    xlist_entry_t     xlist;        /*! all vsegs in same VSL                             */
    80     list_entry_t      zlist;        /*! all vsegs in same zombi list                      */
    8180    struct vmm_s    * vmm;          /*! pointer on associated VM manager                  */
    8281    uint32_t          type;         /*! vseg type                                         */
  • trunk/kernel/syscalls/sys_barrier.c

    r624 r625  
    22 * sys_barrier.c - Access a POSIX barrier.
    33 *
    4  * authors       Alain Greiner (2016,2017,2018)
     4 * authors       Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2525#include <hal_special.h>
    2626#include <hal_uspace.h>
     27#include <hal_vmm.h>
    2728#include <errno.h>
    2829#include <thread.h>
     
    5657    process_t * process = this->process;
    5758
     59#if (DEBUG_SYS_BARRIER || CONFIG_INSTRUMENTATION_SYSCALLS)
     60uint64_t     tm_start = hal_get_cycles();
     61#endif
     62
    5863#if DEBUG_SYS_BARRIER
    59 uint64_t   tm_start;
    60 uint64_t   tm_end;
    61 tm_start = hal_get_cycles();
    6264if( DEBUG_SYS_BARRIER < tm_start )
    6365printk("\n[%s] thread[%x,%x] enters for %s / count %d / cycle %d\n",
     
    184186        }  // end switch
    185187
     188    hal_fence();
     189
     190#if (DEBUG_SYS_BARRIER || CONFIG_INSTRUMENTATION_SYSCALLS)
     191uint64_t     tm_end = hal_get_cycles();
     192#endif
     193
    186194#if DEBUG_SYS_BARRIER
    187 tm_end = hal_get_cycles();
    188195if( DEBUG_SYS_BARRIER < tm_end )
    189 printk("\n[%s] thread[%x,%x] exit for %s / cost %d / cycle %d\n",
    190 __FUNCTION__, process->pid, this->trdid, sys_barrier_op_str(operation),
    191 (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
     196printk("\n[%s] thread[%x,%x] exit for %s / cycle %d\n",
     197__FUNCTION__, process->pid, this->trdid, sys_barrier_op_str(operation), (uint32_t)tm_end );
     198#endif
     199
     200#if CONFIG_INSTRUMENTATION_SYSCALLS
     201hal_atomic_add( &syscalls_cumul_cost[SYS_BARRIER] , tm_end - tm_start );
     202hal_atomic_add( &syscalls_occurences[SYS_BARRIER] , 1 );
    192203#endif
    193204
  • trunk/kernel/syscalls/sys_close.c

    r594 r625  
    3535int sys_close ( uint32_t file_id )
    3636{
    37     error_t     error;
    38     xptr_t      file_xp;
     37    error_t            error;
     38    xptr_t             file_xp;
     39    cxy_t              file_cxy;
     40    vfs_file_t       * file_ptr;
     41    vfs_inode_type_t   file_type;
    3942
    4043        thread_t  * this    = CURRENT_THREAD;
     
    5457        if( file_id >= CONFIG_PROCESS_FILE_MAX_NR )
    5558        {
    56         printk("\n[ERROR] in %s : illegal file descriptor index = %d\n",
    57                __FUNCTION__ , file_id );
     59
     60#if DEBUG_SYSCALLS_ERROR
     61printk("\n[ERROR] in %s : illegal file descriptor index = %d\n",
     62__FUNCTION__ , file_id );
     63#endif
    5864                this->errno = EBADFD;
    5965                return -1;
     
    7379                return -1;
    7480    }
     81
     82    // get file type
     83    file_cxy  = GET_CXY( file_xp );
     84    file_ptr  = GET_PTR( file_xp );
     85    file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) );
     86
     87    if( file_type == INODE_TYPE_DIR )
     88        {
     89
     90#if DEBUG_SYSCALLS_ERROR
     91printk("\n[ERROR] in %s : file descriptor %d is a directory\n",
     92__FUNCTION__ , file_id );
     93#endif
     94                this->errno = EBADFD;
     95                return -1;
     96        }
    7597
    7698    // call the relevant VFS function
  • trunk/kernel/syscalls/sys_display.c

    r624 r625  
    9696            // check string in user space
    9797            error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg );
    98 
    9998            if( error )
    10099            {
     
    110109            // ckeck string length
    111110            length = hal_strlen_from_uspace( string );
    112 
    113111            if( length >= 512 )
    114112            {
     
    150148            // get extended pointer on process PID in cluster CXY
    151149            xptr_t process_xp = cluster_get_process_from_pid_in_cxy( cxy , pid );
    152 
    153150                if( process_xp == XPTR_NULL )
    154151            {
  • trunk/kernel/syscalls/sys_exec.c

    r584 r625  
    22 * sys_exec.c - Kernel function implementing the "exec" system call.
    33 *
    4  * Authors   Alain Greiner (2016,2017)
     4 * Authors   Alain Greiner (2016,2017,2017,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    208208#if DEBUG_SYS_EXEC
    209209if( DEBUG_SYS_EXEC < tm_start )
    210 printk("\n[DBG] %s : thread[%x,%x] enter for path <%s> / cycle = %d\n",
     210printk("\n[%s] thread[%x,%x] enter for path <%s> / cycle = %d\n",
    211211__FUNCTION__, pid, this->trdid, exec_info.path, (uint32_t)tm_start );
    212212#endif
     
    256256    }
    257257
    258     assert( false , "we should not execute this code" );
     258    assert( false , "we should never execute this code" );
    259259
    260260    return 0; 
  • trunk/kernel/syscalls/sys_exit.c

    r619 r625  
    22 * sys_exit.c - Kernel function implementing the "exit" system call.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    5353    pid_t       pid     = process->pid;
    5454
     55#if (DEBUG_SYS_EXIT || CONFIG_INSTRUMENTATION_SYSCALLS)
     56uint64_t     tm_start = hal_get_cycles();
     57#endif
     58
    5559#if DEBUG_SYS_EXIT
    56 uint64_t    tm_start;
    57 uint64_t    tm_end;
    58 tm_start = hal_get_cycles();
    5960if( DEBUG_SYS_EXIT < tm_start )
    6061printk("\n[%s] thread[%x,%x] enter / status %x / cycle %d\n",
    61 __FUNCTION__, process->pid, this->trdid , status , (uint32_t)tm_start );
     62__FUNCTION__, pid, this->trdid , status , (uint32_t)tm_start );
    6263#endif
    6364
     
    6667    owner_cxy = GET_CXY( owner_xp );
    6768    owner_ptr = GET_PTR( owner_xp );
    68 
    69 #if (DEBUG_SYS_EXIT & 1)
    70 if( DEBUG_SYS_EXIT < tm_start )
    71 printk("\n[%s] thread[%x,%x] get owner process in cluster %x\n",
    72 __FUNCTION__, process->pid, this->trdid, owner_cxy );
    73 #endif
    7469
    7570    // get local pointer on the main thread
     
    8075    parent_cxy = GET_CXY( parent_xp );
    8176    parent_ptr = GET_PTR( parent_xp );
    82 
    83 #if (DEBUG_SYS_EXIT & 1)
    84 if( DEBUG_SYS_EXIT < tm_start )
    85 printk("\n[%s] thread[%x,%x] get parent process in cluster %x\n",
    86 __FUNCTION__, process->pid, this->trdid, parent_cxy );
    87 #endif
    8877
    8978    // get pointers on the parent process main thread
     
    9685#if( DEBUG_SYS_EXIT & 1)
    9786if( DEBUG_SYS_EXIT < tm_start )
    98 printk("\n[%s] thread[%x,%x] detached process from TXT\n",
    99 __FUNCTION__, process->pid, this->trdid );
     87printk("\n[%s] thread[%x,%x] detached process %x from TXT\n",
     88__FUNCTION__, pid, this->trdid, pid );
    10089#endif
    10190
    10291    // mark for delete all process threads in all clusters,
    10392    // but the main thread and this calling thread
    104     process_sigaction( process->pid , DELETE_ALL_THREADS );
     93    process_sigaction( pid , DELETE_ALL_THREADS );
    10594
    10695#if( DEBUG_SYS_EXIT & 1)
    10796if( DEBUG_SYS_EXIT < tm_start )
    108 printk("\n[%s] thread[%x,%x] deleted all threads but itself\n",
    109 __FUNCTION__, process->pid, this->trdid );
     97printk("\n[%s] thread[%x,%x] deleted all threads in process %x (but itself)\n",
     98__FUNCTION__, pid, this->trdid, pid );
    11099#endif
    111100
     
    116105#if( DEBUG_SYS_EXIT & 1)
    117106if( tm_start > DEBUG_SYS_EXIT )
    118 printk("\n[%u] thread[%x,%x] marked iself for delete\n",
    119 __FUNCTION__, process->pid, this->trdid );
     107printk("\n[%s] thread[%x,%x] marked iself for delete\n",
     108__FUNCTION__, pid, this->trdid );
    120109#endif
    121110        thread_delete( XPTR( local_cxy , this ) , pid , true );
    122111    }
    123112
    124     // block this main thread
     113    // block the main thread
    125114    thread_block( XPTR( owner_cxy , main_ptr ) , THREAD_BLOCKED_GLOBAL );
    126115
    127116#if( DEBUG_SYS_EXIT & 1)
     117trdid_t main_trdid = hal_remote_l32( XPTR( owner_cxy , &main_ptr->trdid ) );
    128118if( tm_start > DEBUG_SYS_EXIT )
    129 printk("\n[%s] thread[%x,%x] blocked main thread\n",
    130 __FUNCTION__, process->pid, this->trdid );
     119printk("\n[%s] thread[%x,%x] blocked main thread[%x,%x]\n",
     120__FUNCTION__, pid, this->trdid, pid, main_trdid );
    131121#endif
    132122
    133     // atomically update owner process descriptor term_state to ask
    134     // the parent process sys_wait() function to delete the main thread
     123    // update term_state in owner process descriptor to ask
     124    // the parent process sys_wait() function to delete the process
    135125    term_state = (status & 0xFF) | PROCESS_TERM_EXIT;
    136126    hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) , term_state );
     
    139129if( tm_start > DEBUG_SYS_EXIT )
    140130printk("\n[%s] thread[%x,%x] set exit status %x in owner process\n",
    141 __FUNCTION__, process->pid, this->trdid, term_state );
     131__FUNCTION__, pid, this->trdid, term_state );
    142132#endif
    143133
     
    148138if( tm_start > DEBUG_SYS_EXIT )
    149139printk("\n[%s] thread[%x,%x] unblocked parent main thread in process %x\n",
    150 __FUNCTION__ , process->pid, this->trdid,
     140__FUNCTION__ , pid, this->trdid,
    151141hal_remote_l32( XPTR( parent_cxy , &parent_ptr->pid) ) );
    152142#endif
     
    154144    hal_fence();
    155145
     146#if (DEBUG_SYS_EXIT || CONFIG_INSTRUMENTATION_SYSCALLS)
     147uint64_t     tm_end = hal_get_cycles();
     148#endif
     149
    156150#if DEBUG_SYS_EXIT
    157 tm_end = hal_get_cycles();
    158151if( DEBUG_SYS_EXIT < tm_end )
    159 printk("\n[%s] thread[%x,%x] exit / status %x / cost = %d / cycle %d\n",
    160 __FUNCTION__, process->pid, this->trdid, status,
    161 (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
     152printk("\n[%s] thread[%x,%x] exit / term_state %x / cycle %d\n",
     153__FUNCTION__, pid, this->trdid, term_state,  (uint32_t)tm_end );
     154#endif
     155
     156#if CONFIG_INSTRUMENTATION_SYSCALLS
     157hal_atomic_add( &syscalls_cumul_cost[SYS_EXIT] , tm_end - tm_start );
     158hal_atomic_add( &syscalls_occurences[SYS_EXIT] , 1 );
    162159#endif
    163160
  • trunk/kernel/syscalls/sys_fork.c

    r594 r625  
    22 * sys_fork.c - Kernel function implementing the "fork" system call.
    33 *
    4  * Authors  Alain Greiner  (2016,2017)
     4 * Authors  Alain Greiner  (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    7373#if DEBUG_SYS_FORK
    7474if( DEBUG_SYS_FORK < tm_start )
    75 printk("\n[DBG] %s : thread[%x,%x] enter / cycle =  %d\n",
     75printk("\n[%s] thread[%x,%x] enter / cycle =  %d\n",
    7676__FUNCTION__, parent_pid, parent_thread_ptr->trdid, (uint32_t)tm_start );
    7777#endif
     
    151151
    152152    // set remote child CPU context from parent_thread register values
     153    // replicates the parent thread kernel stack to the child thread descriptor,
     154    // and finally unblock the child thread.
    153155    hal_cpu_context_fork( XPTR( child_cxy , child_thread_ptr ) );
    154156
    155157    // From this point, both parent and child threads execute the following code,
    156     // but they can be distinguished by the (CURRENT_THREAD,local_cxy) values.
    157     // - parent unblock child, and return child PID to user application.
    158     // - child thread does nothing, and return 0 to user pplication
    159     // The child thread will only execute it when it is unblocked by parent thread.
     158    // but child thread will only execute it after being unblocked by parent thread.
     159    // They can be distinguished by the (CURRENT_THREAD,local_cxy) values.
     160    // - parent return child PID to user application.
     161    // - child  return 0 to user application
    160162
    161163    thread_t * current = CURRENT_THREAD;
     
    165167#endif
    166168
     169    if( (current == parent_thread_ptr) && (local_cxy == parent_cxy) )   // parent thread
     170    {
     171
    167172#if DEBUG_SYS_FORK
    168173if( DEBUG_SYS_FORK < tm_end )
    169 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
    170 __FUNCTION__, current->process->pid, current->trdid, (uint32_t)tm_end );
     174printk("\n[%s] parent thread[%x,%x] exit / child_pid %x / cycle %d\n",
     175__FUNCTION__, current->process->pid, current->trdid, child_pid, (uint32_t)tm_end );
    171176#endif
    172177
    173     if( (current == parent_thread_ptr) && (local_cxy == parent_cxy) )   // parent thread
    174     {
    175         // parent_thread unblock child_thread
    176         thread_unblock( XPTR( child_cxy , child_thread_ptr ) , THREAD_BLOCKED_GLOBAL );
    177 
    178         // only parent contribute to instrumentation
    179 
     178// only parent contribute to instrumentation
    180179#if CONFIG_INSTRUMENTATION_SYSCALLS
    181180hal_atomic_add( &syscalls_cumul_cost[SYS_FORK] , tm_end - tm_start );
     
    186185        else                                                               // child_thread
    187186    {
     187
     188#if DEBUG_SYS_FORK
     189if( DEBUG_SYS_FORK < tm_end )
     190printk("\n[%s] child thread[%x,%x] exit / child_pid %x / cycle %d\n",
     191__FUNCTION__, current->process->pid, current->trdid, child_pid, (uint32_t)tm_end );
     192#endif
     193
    188194        return 0;
    189195    }
  • trunk/kernel/syscalls/sys_get_config.c

    r624 r625  
    22 * sys_get_config.c - get hardware platform parameters.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 * 
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2424#include <hal_kernel_types.h>
    2525#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2627#include <hal_special.h>
    2728#include <errno.h>
     
    4849    process_t * process = this->process;
    4950
     51#if (DEBUG_SYS_GET_CONFIG || CONFIG_INSTRUMENTATION_SYSCALLS)
     52uint64_t     tm_start = hal_get_cycles();
     53#endif
     54
    5055#if DEBUG_SYS_GET_CONFIG
    51 uint64_t     tm_start;
    52 uint64_t     tm_end;
    5356tm_start = hal_get_cycles();
    5457if( DEBUG_SYS_GET_CONFIG < tm_start )
     
    114117    hal_fence();
    115118
     119#if (DEBUG_SYS_GET_CONFIG || CONFIG_INSTRUMENTATION_SYSCALLS)
     120uint64_t     tm_end = hal_get_cycles();
     121#endif
     122
    116123#if DEBUG_SYS_GET_CONFIG
    117 tm_end = hal_get_cycles();
    118124if( DEBUG_SYS_GET_CONFIG < tm_end )
    119125printk("\n[DBG] %s : thread %x exit / process %x / cost %d / cycle %d\n",
     
    121127#endif
    122128
     129#if CONFIG_INSTRUMENTATION_SYSCALLS
     130hal_atomic_add( &syscalls_cumul_cost[SYS_GET_CONFIG] , tm_end - tm_start );
     131hal_atomic_add( &syscalls_occurences[SYS_GET_CONFIG] , 1 );
     132#endif
     133
    123134        return 0;
    124135
  • trunk/kernel/syscalls/sys_get_core.c

    r624 r625  
    2424#include <hal_kernel_types.h>
    2525#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2627#include <hal_special.h>
    2728#include <errno.h>
  • trunk/kernel/syscalls/sys_get_cycle.c

    r624 r625  
    2424#include <hal_kernel_types.h>
    2525#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2627#include <hal_special.h>
    2728#include <errno.h>
  • trunk/kernel/syscalls/sys_is_fg.c

    r624 r625  
    22 * sys_fg.c - Kernel function implementing the "is_fg" system call.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    2525#include <hal_kernel_types.h>
    2626#include <hal_uspace.h>
     27#include <hal_vmm.h>
    2728#include <hal_special.h>
    2829#include <errno.h>
  • trunk/kernel/syscalls/sys_mmap.c

    r624 r625  
    22 * sys_mmap.c - map files, memory or devices into process virtual address space
    33 *
    4  * Authors       Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *               Alain Greiner (2016,2017,2018)
     4 * Authors       Alain Greiner (2016,2017,2018,2019)
    65 *
    76 * Copyright (c) UPMC Sorbonne Universites
     
    2524#include <hal_kernel_types.h>
    2625#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2727#include <hal_irqmask.h>
    2828#include <shared_syscalls.h>
  • trunk/kernel/syscalls/sys_munmap.c

    r624 r625  
    22 * sys_munmap.c - unmap a mapping from process virtual address space
    33 *
    4  * Authors       Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *               Alain Greiner (2016,2017,2018)
     4 * Authors       Alain Greiner (2016,2017,2018,2019)
    65 *
    76 * Copyright (c) UPMC Sorbonne Universites
     
    2524#include <hal_kernel_types.h>
    2625#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2727#include <hal_irqmask.h>
    2828#include <shared_syscalls.h>
  • trunk/kernel/syscalls/sys_mutex.c

    r624 r625  
    22 * sys_mutex.c - Access a POSIX mutex.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2424#include <hal_kernel_types.h>
    2525#include <hal_special.h>
     26#include <hal_vmm.h>
    2627#include <errno.h>
    2728#include <thread.h>
     
    5657    process_t * process = this->process;
    5758
     59#if (DEBUG_SYS_MUTEX || CONFIG_INSTRUMENTATION_SYSCALLS)
     60uint64_t     tm_start = hal_get_cycles();
     61#endif
     62
    5863#if DEBUG_SYS_MUTEX
    59 uint64_t    tm_start;
    60 uint64_t    tm_end;
    61 tm_start = hal_get_cycles();
    6264if( DEBUG_SYS_MUTEX < tm_start )
    63 printk("\n[DBG] %s : thread %x in process %x enter for %s / cycle %d\n",
     65printk("\n[%s] thread[%x,%x] enter for %s / cycle %d\n",
    6466__FUNCTION__, this->trdid, process->pid, sys_mutex_op_str( operation ), (uint32_t)tm_start );
    6567#endif
     
    221223    hal_fence();
    222224
     225#if (DEBUG_SYS_MUTEX || CONFIG_INSTRUMENTATION_SYSCALLS)
     226uint64_t     tm_end = hal_get_cycles();
     227#endif
     228
    223229#if DEBUG_SYS_MUTEX
    224 tm_end = hal_get_cycles();
    225 if( DEBUG_SYS_MUTEX < tm_start )
    226 printk("\n[DBG] %s : thread %x in process %x exit for %s / cost %d / cycle %d\n",
    227 __FUNCTION__, this->trdid, process->pid, sys_mutex_op_str( operation ),
    228 (uint32_t)(tm_end - tm_start), (uint32_t)tm_end );
     230if( DEBUG_SYS_MUTEX < tm_end )
     231printk("\n[%s] thread[%x,%x] exit for %s / cycle %d\n",
     232__FUNCTION__, this->trdid, process->pid, sys_mutex_op_str( operation ), (uint32_t)tm_end );
     233#endif
     234
     235#if CONFIG_INSTRUMENTATION_SYSCALLS
     236hal_atomic_add( &syscalls_cumul_cost[SYS_MUTEX] , tm_end - tm_start );
     237hal_atomic_add( &syscalls_occurences[SYS_MUTEX] , 1 );
    229238#endif
    230239
  • trunk/kernel/syscalls/sys_open.c

    r610 r625  
    22 * sys_open.c - open a file.
    33 *
    4  * Author        Alain Greiner (2016,2017)
     4 * Author        Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/syscalls/sys_opendir.c

    r624 r625  
    22 * sys_opendir.c - Open an user accessible VFS directory.
    33 *
    4  * Author        Alain Greiner (2016,2017,2018)
     4 * Author        Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2525#include <hal_kernel_types.h>
    2626#include <hal_uspace.h>
     27#include <hal_vmm.h>
    2728#include <thread.h>
    2829#include <process.h>
  • trunk/kernel/syscalls/sys_read.c

    r624 r625  
    11/*
    2  * sys_read.c - read bytes from a file
     2 * sys_read.c - Kernel function implementing the "read" system call.
    33 *
    4  * Author     Alain Greiner (2016,2017,2018)
     4 * Author     Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2424#include <kernel_config.h>
    2525#include <hal_kernel_types.h>
     26#include <hal_vmm.h>
    2627#include <hal_uspace.h>
    2728#include <hal_irqmask.h>
  • trunk/kernel/syscalls/sys_readdir.c

    r624 r625  
    2525#include <hal_kernel_types.h>
    2626#include <hal_uspace.h>
     27#include <hal_vmm.h>
    2728#include <errno.h>
    2829#include <thread.h>
  • trunk/kernel/syscalls/sys_thread_exit.c

    r619 r625  
    6464uint64_t     tm_start = hal_get_cycles();
    6565if( DEBUG_SYS_THREAD_EXIT < tm_start )
    66 printk("\n[%s] thread[%x,%x] / main => delete process / cycle %d\n",
     66printk("\n[%s] thread[%x,%x] is main => delete process / cycle %d\n",
    6767__FUNCTION__ , pid , trdid , (uint32_t)tm_start );
    6868#endif
     
    7676uint64_t     tm_start = hal_get_cycles();
    7777if( DEBUG_SYS_THREAD_EXIT < tm_start )
    78 printk("\n[%s] thread[%x,%x] / not main => delete thread / cycle %d\n",
     78printk("\n[%s] thread[%x,%x] is not main => delete thread / cycle %d\n",
    7979__FUNCTION__ , pid , trdid , (uint32_t)tm_start );
    8080#endif
  • trunk/kernel/syscalls/sys_wait.c

    r624 r625  
    22 * sys_wait.c - wait termination or blocking of a child process.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 * 
    66 * Copyright (c) UPMC Sorbonne Universites
     
    5656uint64_t    cycle = hal_get_cycles();
    5757if( DEBUG_SYS_WAIT < cycle )
    58 printk("\n[DBG] %s : thread %x in process %x enter / cycle %d\n",
    59 __FUNCTION__, this, process->pid, (uint32_t)cycle );
     58printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
     59__FUNCTION__, pid, this->trdid, (uint32_t)cycle );
    6060#endif
    6161
     
    6767
    6868#if DEBUG_SYSCALLS_ERROR
    69 printk("\n[ERROR] in %s : status buffer %x unmapped for thread %x in process %x\n",
    70 __FUNCTION__ , (intptr_t)status, this->trdid , process->pid );
     69printk("\n[ERROR] in %s : status buffer %x unmapped for thread[%x,%x]\n",
     70__FUNCTION__ , (intptr_t)status, pid, this->trdid );
    7171hal_vmm_display( process , false );
    7272#endif
     
    8686
    8787#if DEBUG_SYSCALLS_ERROR
    88 printk("\n[ERROR] in %s : calling thread %x is not thread 0 in owner cluster %x\n",
    89 __FUNCTION__ , trdid , owner_cxy );
     88printk("\n[ERROR] in %s : calling thread[%x,%x] is not thread 0 in owner cluster %x\n",
     89__FUNCTION__ , pid, this->trdid , owner_cxy );
    9090#endif
    9191        this->errno = EINVAL;
     
    119119            child_thread = hal_remote_lpt(XPTR( child_cxy , &child_ptr->th_tbl[0] ));
    120120
    121 #if (DEBUG_SYS_WAIT & 1)
    122 cycle = hal_get_cycles();
    123 if( DEBUG_SYS_WAIT < cycle )
    124 printk("\n[DBG] %s : thread %x in process %x check child %x / state %x\n",
    125 __FUNCTION__, this, process->pid, child_pid, child_state );
    126 #endif
    127121            // test if this child process is terminated,
    128122            // but termination not yet reported to parent process
     
    148142if( DEBUG_SYS_WAIT < cycle )
    149143{
    150     if     ( child_state & PROCESS_TERM_EXIT )
    151         printk("\n[DBG] %s : thread %x in process %x exit / child %x exit / cycle %d\n",
    152         __FUNCTION__, this, process->pid, child_pid, (uint32_t)cycle );
     144    if( child_state & PROCESS_TERM_EXIT )
     145        printk("\n[%s] thread[%x,%x] exit : child process %x terminated / cycle %d\n",
     146        __FUNCTION__, pid, this->trdid, child_pid, (uint32_t)cycle );
    153147    if( child_state & PROCESS_TERM_KILL )
    154         printk("\n[DBG] %s : thread %x in process %x exit / child %x killed / cycle %d\n",
    155         __FUNCTION__, this, process->pid, child_pid, (uint32_t)cycle );
     148        printk("\n[%s] thread[%x,%x] exit : child process %x killed / cycle %d\n",
     149        __FUNCTION__, pid, this->trdid, child_pid, (uint32_t)cycle );
    156150    if( child_state & PROCESS_TERM_STOP )
    157         printk("\n[DBG] %s : thread %x in process %x exit / child %x stopped / cycle %d\n",
    158         __FUNCTION__, this, process->pid, child_pid, (uint32_t)cycle );
     151        printk("\n[%s] thread[%x,%x] exit : child process %x stopped / cycle %d\n",
     152        __FUNCTION__, pid, this->trdid, child_pid, (uint32_t)cycle );
    159153}
    160154#endif
     
    165159        }  // end loop on children
    166160       
    167         // we execute this code when no child terminated:
     161        // we execute this code when no child change detected
    168162        // - release the lock protecting children list,
    169163        // - block on the WAIT condition
     
    179173cycle = hal_get_cycles();
    180174if( DEBUG_SYS_WAIT < cycle )
    181 printk("\n[DBG] %s : thread %x in process %x block & deschedule / cycle %d\n",
    182 __FUNCTION__, this, process->pid, (uint32_t)cycle );
     175printk("\n[%s] thread[%x,%x] block & deschedule / cycle %d\n",
     176__FUNCTION__, pid, this->trdid, (uint32_t)cycle );
    183177#endif
    184178
     
    189183cycle = hal_get_cycles();
    190184if( DEBUG_SYS_WAIT < cycle )
    191 printk("\n[DBG] %s : thread %x in process %x unblock & resume / cycle %d\n",
    192 __FUNCTION__, this, process->pid, (uint32_t)cycle );
     185printk("\n[%s] thread[%x,%x] resume / cycle %d\n",
     186__FUNCTION__, pid, this->trdid, (uint32_t)cycle );
    193187#endif
    194188
  • trunk/kernel/syscalls/sys_write.c

    r624 r625  
    22 * sys_write.c - Kernel function implementing the "write" system call.
    33 *
    4  * Author        Alain Greiner (2016,2017,2018)
     4 * Author        Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    7676
    7777#if DEBUG_SYS_WRITE
    78 tm_start = hal_get_cycles();
    7978if( DEBUG_SYS_WRITE < tm_start )
    80 printk("\n[%s] thread[%x,%x] enter / vaddr %x / count %d / cycle %d\n",
     79printk("\n[%s] thread[%x,%x] enter / vaddr %x / %d bytes / cycle %d\n",
    8180__FUNCTION__, process->pid, this->trdid, vaddr, count, (uint32_t)tm_start );
    8281#endif
     
    140139    hal_enable_irq( &save_sr );
    141140
    142    // action depend on file type
    143     if( file_type == INODE_TYPE_FILE )  // write to file mapper
     141    // action depend on file type
     142    if( file_type == INODE_TYPE_FILE )  // write to a file mapper
    144143    {
    145144        // check file writable
     
    180179        xptr_t inode_xp = XPTR( file_cxy , inode_ptr );
    181180        vfs_inode_update_size( inode_xp , file_offset + count );
    182 
    183181    }
    184182    else if( file_type == INODE_TYPE_DEV )  // write to TXT device
  • trunk/libs/libalmosmkh/almosmkh.h

    r623 r625  
    135135 * It can be called by any thread running in any cluster.
    136136 ***************************************************************************************
     137 * @ cxy      : [in] target cluster identifier.
    137138 * @ pid      : [in] process identifier.
    138139 * @ return 0 if success / return -1 if illegal argument.
  • trunk/libs/mini-libc/stdio.c

    r624 r625  
    403403   
    404404    // check stream valid
    405     if( stream->key != VALID_OPEN_FILE ) return EOF;
     405    if( stream->key != VALID_OPEN_FILE )
     406    {
     407        printf("\n[error in %s] stream %x non registered\n", __FUNCTION__, stream );
     408        return -1;
     409    }
    406410
    407411    va_start( args, format );
     
    409413    va_end( args );
    410414
     415    // check format
    411416    if ( count < 0 )
    412417    {
    413         display_string( "fprintf : xprintf failure" );
     418        printf("\n[error in %s] unsupported format %s\n", __FUNCTION__, format );
    414419        return -1;
    415420    }
    416     else
    417     {
    418         // get file descriptor from file pointer
    419         fd = stream->fd;
    420 
    421         // set terminating NUL
    422         string[count] = 0;
    423 
    424 printf("\n[%s] fd = %d for string : %s\n", __FUNCTION__, fd, string );
    425 
    426 idbg();
    427 
    428         // copy string to file
    429         writen = write( fd , &string , count );
    430 
    431         if( writen != count )
    432         {
    433             display_string( "fprintf : write failure" );
    434             return -1;
    435         }
    436 
    437 idbg();
    438 
    439         return writen;
    440     }
     421
     422    // get file descriptor from file pointer
     423    fd = stream->fd;
     424
     425    // set terminating NUL
     426    string[count] = 0;
     427
     428    // copy string to file
     429    writen = write( fd , &string , count );
     430
     431    // check write
     432    if(writen != count )
     433    {
     434        printf("\n[error in %s] cannot write to stream %s\n", __FUNCTION__, stream );
     435        return -1;
     436    }
     437
     438    return writen;
     439
    441440}  // end fprintf()
    442441
  • trunk/libs/mini-libc/unistd.c

    r589 r625  
    22 * unistd.c - User level <unistd> library implementation.
    33 *
    4  * Author     Alain Greiner (2016,2017,2018)
     4 * Author     Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/params-hard.mk

    r624 r625  
    55Y_SIZE    = 2
    66NB_PROCS  = 1
    7 NB_TTYS   = 3
     7NB_TTYS   = 2
    88IOC_TYPE  = IOC_BDV
    99TXT_TYPE  = TXT_TTY
  • trunk/user/init/init.c

    r623 r625  
    7878        {
    7979            // INIT display CHILD[i] process PID
    80             snprintf( string , 64 , "[init] created KSH[%d] / pid = %x", i , ret_fork );
     80            snprintf( string , 64 , "[init] (pid 0x1) created ksh[%d] (pid %x)", i , ret_fork );
    8181            display_string( string );
    8282
  • trunk/user/ksh/ksh.c

    r624 r625  
    5454#define LOG_DEPTH      (32)     // max number of registered commands
    5555#define MAX_ARGS           (32)     // max number of arguments in a command
     56#define PATH_MAX_SIZE  (256)    // max number of characters in a pathname
    5657
    5758#define DEBUG_MAIN          0
    5859#define DEBUG_INTER         0
    59 #define DEBUG_PARSE         0
     60#define DEBUG_EXECUTE       0
    6061#define DEBUG_CMD_CAT       0
    6162#define DEBUG_CMD_CP        0
     
    9091//////////////////////////////////////////////////////////////////////////////////////////
    9192
    92 ksh_cmd_t       command[];                // array of supported commands
    93 
    94 log_entry_t     log_entries[LOG_DEPTH];   // array of registered commands
    95 
    96 unsigned int    ptw;                      // write pointer in log_entries[]
    97 unsigned int    ptr;                      // read pointer in log_entries[]
    98 
    99 pthread_attr_t  attr;                     // interactive thread attributes
    100 
    101 sem_t           semaphore;                // block interactive thread when zero
    102 
    103 pthread_t       trdid;                    // interactive thread identifier
     93ksh_cmd_t       command[];                  // array of supported commands
     94
     95log_entry_t     log_entries[LOG_DEPTH];     // array of registered commands
     96
     97unsigned int    ptw;                        // write pointer in log_entries[]
     98unsigned int    ptr;                        // read pointer in log_entries[]
     99
     100pthread_attr_t  attr;                       // interactive thread attributes
     101
     102sem_t           semaphore;                  // block interactive thread when zero
     103
     104pthread_t       trdid;                      // interactive thread identifier
     105
     106char            pathname[PATH_MAX_SIZE];    // pathname for a file
     107
     108char            pathnew[PATH_MAX_SIZE];     // used by the rename command
    104109 
    105110//////////////////////////////////////////////////////////////////////////////////////////
     
    110115static void cmd_cat( int argc , char **argv )
    111116{
    112         char         * path;
    113117    struct stat    st;
    114118    int            fd;
     
    128132    }
    129133
    130     path = argv[1];
     134    strcpy( pathname , argv[1] );
    131135
    132136    // open the file
    133     fd = open( path , O_RDONLY , 0 );
     137    fd = open( pathname , O_RDONLY , 0 );
    134138    if (fd < 0)
    135139    {
    136             printf("  error: cannot open file <%s>\n", path);
     140            printf("  error: cannot open file <%s>\n", pathname );
    137141
    138142        sem_post( &semaphore );
     
    141145
    142146#if DEBUG_CMD_CAT
    143 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, path );
     147snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, pathname );
    144148display_string( string );
    145149#endif
    146150
    147151    // get file stats
    148     if ( stat( path , &st ) == -1)
    149     {
    150             printf("  error: cannot stat <%s>\n", path);
     152    if ( stat( pathname , &st ) == -1)
     153    {
     154            printf("  error: cannot stat <%s>\n", pathname );
    151155
    152156            close(fd);
     
    157161        if ( S_ISDIR(st.st_mode) )
    158162    {
    159             printf("  error: <%s> is a directory\n", path);
     163            printf("  error: <%s> is a directory\n", pathname );
    160164
    161165            close(fd);
     
    174178    if( size == 0 )
    175179    {
    176             printf("  error: size = 0 for <%s>\n", path);
     180            printf("  error: size = 0 for <%s>\n", pathname );
    177181
    178182            close(fd);
     
    186190    if ( buf == NULL )
    187191    {
    188             printf("  error: cannot map file <%s>\n", path );
     192            printf("  error: cannot map file <%s>\n", pathname );
    189193
    190194            close(fd);
     
    196200snprintf( string , 64 , "[ksh] %s : maped file %d to buffer %x", __FUNCTION__, fd , buf );
    197201display_string( string );
    198 // unsigned int pid = getpid();
    199 // unsigned int cxy = pid >> 16;
    200 // display_vmm( cxy , pid );
    201202#endif
    202203
     
    207208    if( munmap( buf , size ) )
    208209    {
    209             printf("  error: cannot unmap file <%s>\n", path );
     210            printf("  error: cannot unmap file <%s>\n", pathname );
    210211    }
    211212
     
    213214snprintf( string , 64 , "[ksh] %s : unmaped file %d from buffer %x", __FUNCTION__, fd , buf );
    214215display_string( string );
    215 // display_vmm( cxy , pid );
    216216#endif
    217217
     
    219219        if( close( fd ) )
    220220    {
    221             printf("  error: cannot close file <%s>\n", path );
     221            printf("  error: cannot close file <%s>\n", pathname );
    222222    }
    223223
     
    230230static void cmd_cd( int argc , char **argv )
    231231{
    232         char * path;
    233 
    234232        if (argc != 2)
    235233    {
     
    238236    else
    239237    {
    240             path = argv[1];
     238            strcpy( pathname , argv[1] );
    241239
    242240        // call the relevant syscall
    243         if( chdir( path ) )
    244         {
    245             printf("  error: cannot found <%s> directory\n", path );
     241        if( chdir( pathname ) )
     242        {
     243            printf("  error: cannot found <%s> directory\n", pathname );
    246244        }
    247245    }
     
    257255        int          src_fd;
    258256    int          dst_fd;
    259         char       * srcpath;
    260     char       * dstpath;
    261257        int          size;          // source file size
    262258        int          bytes;         // number of transfered bytes
     
    276272        }
    277273
    278     srcpath = argv[1];
    279     dstpath = argv[2];
    280 
    281274    // open the src file
    282     src_fd = open( srcpath , O_RDONLY , 0 );
     275    strcpy( pathname , argv[1] );
     276    src_fd = open( pathname , O_RDONLY , 0 );
    283277
    284278    if ( src_fd < 0 )
    285279    {
    286280        dst_fd = -1;
    287             printf("  error: cannot open <%s>\n", srcpath );
     281            printf("  error: cannot open <%s>\n", argv[1] );
    288282            goto cmd_cp_exit;
    289283    }
    290284
    291285#if DEBUG_CMD_CP
    292 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, srcpath );
     286snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, argv[1] );
    293287display_string( string );
    294288#endif
    295289
    296290    // get file stats
    297     if ( stat( srcpath , &st ) )
     291    if ( stat( pathname , &st ) )
    298292    {
    299293        dst_fd = -1;
    300             printf("  error: cannot stat <%s>\n", srcpath);
     294            printf("  error: cannot stat <%s>\n", argv[1] );
    301295            goto cmd_cp_exit;
    302296    }
    303297
    304298#if DEBUG_CMD_CP
    305 snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, srcpath );
     299snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, argv[1] );
    306300display_string( string );
    307301#endif
     
    310304    {
    311305        dst_fd = -1;
    312                 printf("  error: <%s> is a directory\n", srcpath);
     306                printf("  error: <%s> is a directory\n", argv[1] );
    313307                goto cmd_cp_exit;
    314308        }
     
    318312
    319313        // open the dst file
    320         dst_fd = open( dstpath , O_CREAT|O_TRUNC|O_RDWR , 0 );
     314    strcpy( pathname , argv[2] );
     315        dst_fd = open( pathname , O_CREAT|O_TRUNC|O_RDWR , 0 );
    321316
    322317        if ( dst_fd < 0 )
    323318    {
    324                 printf("  error: cannot open <%s>\n", dstpath );
     319                printf("  error: cannot open <%s>\n", argv[2] );
    325320                goto cmd_cp_exit;
    326321        }
    327322
    328323#if DEBUG_CMD_CP
    329 snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, dstpath );
    330 display_string( string );
    331 #endif
    332 
    333         if ( stat( dstpath , &st ) )
    334     {
    335                 printf("  error: cannot stat <%s>\n", dstpath );
     324snprintf( string , 64 , "[ksh] %s : file %s open", __FUNCTION__, argv[2] );
     325display_string( string );
     326#endif
     327
     328        if ( stat( pathname , &st ) )
     329    {
     330                printf("  error: cannot stat <%s>\n", argv[2] );
    336331                goto cmd_cp_exit;
    337332        }
    338333
    339334#if DEBUG_CMD_CP
    340 snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, dstpath );
     335snprintf( string , 64 , "[ksh] %s : got stats for %s", __FUNCTION__, argv[2] );
    341336display_string( string );
    342337#endif
     
    344339        if ( S_ISDIR(st.st_mode ) )
    345340    {
    346                 printf("  error: <%s> is a directory\n", dstpath );
     341                printf("  error: <%s> is a directory\n", argv[2] );
    347342                goto cmd_cp_exit;
    348343        }
     
    357352                if ( read( src_fd , buf , len ) != len )
    358353        {
    359                         printf("  error: cannot read from file <%s>\n", srcpath);
     354                        printf("  error: cannot read from file <%s>\n", argv[1] );
    360355                        goto cmd_cp_exit;
    361356                }
    362357
    363358#if DEBUG_CMD_CP
    364 snprintf( string , 64 , "[ksh] %s : read %d bytes from %s", __FUNCTION__, len, srcpath );
     359snprintf( string , 64 , "[ksh] %s : read %d bytes from %s", __FUNCTION__, len, argv[1] );
    365360display_string( string );
    366361#endif
     
    369364                if ( write( dst_fd , buf , len ) != len )
    370365        {
    371                         printf("  error: cannot write to file <%s>\n", dstpath);
     366                        printf("  error: cannot write to file <%s>\n", argv[2] );
    372367                        goto cmd_cp_exit;
    373368                }
    374369
    375370#if DEBUG_CMD_CP
    376 snprintf( string , 64 , "[ksh] %s : write %d bytes to %s", __FUNCTION__, len, dstpath );
     371snprintf( string , 64 , "[ksh] %s : write %d bytes to %s", __FUNCTION__, len, argv[2] );
    377372display_string( string );
    378373#endif
     
    662657        int                  ret_exec;           // return value from exec
    663658    unsigned int         ksh_pid;            // KSH process PID
    664         char               * pathname;           // path to .elf file
    665659    unsigned int         background;         // background execution if non zero
    666660    unsigned int         placement;          // placement specified if non zero
     
    677671    else
    678672    {
    679             pathname = argv[1];
     673            strcpy( pathname , argv[1] );
    680674
    681675        if( argc == 2 )
     
    707701        }
    708702
    709 /*
    710         // take semaphore to block the interactive thread
    711         if ( sem_wait( &semaphore ) )
    712         {
    713             printf("\n[ksh error] cannot found semafore\n" );
    714             exit( 1 );
    715         }
    716 */
    717703        // get KSH process PID
    718704        ksh_pid = getpid();
     
    767753display_string( string );
    768754#endif
     755            // when the new process is launched in background, the KSH process
     756            // takes the TXT ownership, and release the semaphore to get the next command.
     757            // Otherwise, the child process keep the TXT ownership, and the semaphore will
     758            // be released by the KSH main thread when the child process exit
    769759
    770760            if( background )    //  KSH must keep TXT ownership
     
    776766                sem_post( &semaphore );
    777767            }
    778             else                // KSH loosed TXT ownership
    779             {
    780                 // semaphore will be released by the KSH main thread
    781                 // when the loaded process exit
    782             }
    783768        }
    784769    }
     
    812797static void cmd_ls( int argc , char **argv )
    813798{
    814         char           * pathname = NULL;
    815799    struct dirent  * entry;
    816800    DIR            * dir;
     
    830814        // get target directory path
    831815        if ( argc == 1 ) strcpy( pathname , "." );
    832         else             pathname = argv[1];
     816        else             strcpy( pathname , argv[1] );
    833817
    834818        // open target directory
     
    874858static void cmd_mkdir( int argc , char **argv )
    875859{
    876         char * pathname;
    877 
    878860        if (argc != 2)
    879861    {
     
    882864    else
    883865    {
    884         pathname = argv[1];
     866        strcpy( pathname , argv[1] );
    885867
    886868        mkdir( pathname , 0x777 );
     
    895877static void cmd_mv( int argc , char **argv )
    896878{
    897         char * old_path;
    898     char * new_path;
    899 
    900879        if (argc != 3)
    901880    {
     
    904883    else
    905884    {
    906         old_path = argv[1];
    907         new_path = argv[2];
     885        strcpy( pathname , argv[1] );
     886        strcpy( pathnew  , argv[2] );
    908887
    909888        // call the relevant syscall
    910         if( rename( old_path , new_path ) )
    911         {
    912             printf("  error: unable to rename <%s> to <%s>\n", old_path, new_path );
     889        if( rename( pathname , pathnew ) )
     890        {
     891            printf("  error: unable to rename <%s> to <%s>\n", pathname , pathnew );
    913892        }
    914893    }
     
    967946static void cmd_pwd( int argc , char **argv )
    968947{
    969         char buf[1024];
    970 
    971948        if (argc != 1)
    972949    {
     
    975952    else
    976953    {
    977         if ( getcwd( buf , 1024 ) )
     954        if ( getcwd( pathname , PATH_MAX_SIZE ) )
    978955        {
    979956                    printf("  error: unable to get current directory\n");
     
    981958        else
    982959        {
    983                     printf("%s\n", buf);
     960                    printf("%s\n", pathname );
    984961            }
    985962    }
     
    993970static void cmd_rm( int argc , char **argv )
    994971{
    995         char * pathname;
    996 
    997972        if (argc != 2)
    998973    {
     
    1001976    else
    1002977    {
    1003             pathname = argv[1];
     978            strcpy( pathname , argv[1] );
    1004979
    1005980        if ( unlink( pathname ) )
     
    1018993{
    1019994    // same as cmd_rm()
    1020         cmd_rm(argc, argv);
     995        cmd_rm (argc , argv );
    1021996}
    1022997
     
    11031078// This function analyses one command (with arguments), executes it, and returns.
    11041079////////////////////////////////////////////////////////////////////////////////////
    1105 static void __attribute__ ((noinline)) parse( char * buf )
     1080static void __attribute__ ((noinline)) execute( char * buf )
    11061081{
    11071082        int    argc = 0;
     
    11101085        int    len = strlen(buf);
    11111086
    1112 #if DEBUG_PARSE
    1113 char string[64];
    1114 snprintf( string , 64 , "\n[ksh] %s : <%s>", __FUNCTION__ , buf );
    1115 display_string( string );
     1087#if DEBUG_EXECUTE
     1088printf("\n[ksh] %s : command <%s>\n",
     1089__FUNCTION__ , buf );
    11161090#endif
    11171091
     
    11341108        }
    11351109
    1136 #if DEBUG_PARSE
    1137 snprintf( string , 64 , "\n[ksh] %s : argc = %d for <%s>", __FUNCTION__ , argc , argv[0] );
    1138 display_string( string );
    1139 #endif
    1140 
    1141     // analyse command type
    1142         if (argc > 0)
    1143     {
    1144                 int found = 0;
    1145 
    1146                 // try to match typed command
    1147                 for ( i = 0 ; command[i].name ; i++ )
    1148         {
    1149                         if (strcmp(argv[0], command[i].name) == 0)
    1150             {
    1151                                 command[i].fn(argc, argv);
    1152                                 found = 1;
    1153                                 break;
    1154                         }
     1110    // check command
     1111        if (argc == 0)
     1112    {
     1113        // release semaphore to get next command
     1114        sem_post( &semaphore );
     1115    }
     1116
     1117#if DEBUG_EXECUTE
     1118printf("\n[ksh] %s : argc %d / arg0 %s / arg1 %s\n",
     1119__FUNCTION__ , argc , argv[0], argv[1] );
     1120#endif
     1121
     1122    // scan the list of commands to match typed command
     1123    int found = 0;
     1124    for ( i = 0 ; (command[i].name != NULL) && (found == 0) ; i++ )
     1125    {
     1126        if (strcmp(argv[0], command[i].name) == 0)
     1127        {
     1128                        command[i].fn(argc, argv);
     1129                        found = 1;
    11551130                }
    1156 
    1157                 if (!found)  // undefined command
    1158         {
    1159                         printf("  error : undefined command <%s>\n", argv[0]);
    1160 
    1161             // release semaphore to get next command
    1162             sem_post( &semaphore );
    1163                 }
    1164         }
    1165 }  // end parse()
     1131    }
     1132
     1133    // check undefined command
     1134        if (!found)
     1135    {   
     1136        printf("  error : undefined command <%s>\n", argv[0]);
     1137
     1138        // release semaphore to get next command
     1139        sem_post( &semaphore );
     1140        }
     1141}  // end execute()
    11661142
    11671143///////////////////////////////
     
    11771153
    11781154#if DEBUG_INTER
    1179 char string[64];
    1180 #endif
    1181 
    1182 /* To lauch one command without interactive mode
     1155char string[128];
     1156#endif
     1157
     1158/*
     1159// To lauch one or several commands without interactive mode
     1160
     1161// 1. first command
    11831162if( sem_wait( &semaphore ) )
    11841163{
     
    11881167else
    11891168{
    1190     printf("\n[ksh] load bin/user/sort.elf\n");
     1169    printf("\n[ksh] load bin/user/pgcd.elf\n");
    11911170}
    11921171
    1193 strcpy( cmd , "load bin/user/sort.elf" );
    1194 parse( cmd );
     1172strcpy( cmd , "load bin/user/pgcd.elf" );
     1173execute( cmd );
     1174
     1175// 2. second command
     1176if( sem_wait( &semaphore ) )
     1177{
     1178    printf("\n[ksh error] cannot found semafore\n" );
     1179    exit( 1 );
     1180}
     1181else
     1182{
     1183    printf("\n[ksh] ls home\n");
     1184}
     1185
     1186strcpy( cmd , "ls home" );
     1187execute( cmd );
     1188
     1189// end non-interactive mode
    11951190*/
    11961191
     
    12271222        {
    12281223            // initialize command buffer
    1229             memset( cmd, 0x20 , sizeof(cmd) );   // TODO useful ?
     1224            // memset( cmd , 0x20 , sizeof(cmd) );   // TODO useful ?
    12301225            count       = 0;
    12311226            state       = NORMAL;
     
    12341229#if DEBUG_INTER
    12351230unsigned int pid = getpid();
    1236 snprintf( string , 64 , "\n[ksh] %s : request a new command", __FUNCTION__ );
     1231snprintf( string , 128 , "\n[ksh] %s : request a new command", __FUNCTION__ );
    12371232display_string( string );
    12381233#endif
     
    12631258                                            cmd[count] = 0;
    12641259                        count++;
    1265 
    1266                         // register command in log
     1260#if DEBUG_INTER
     1261snprintf( string , 128 , "[ksh] %s : get command <%s> / &log = %x / ptw = %d / &ptw = %x",
     1262__FUNCTION__, cmd , log_entries[ptw].buf , ptw , &ptw );
     1263display_string( string );
     1264display_vmm( 0 , 2 );
     1265#endif
     1266                        // register command in log_entries[] array
    12671267                                            strncpy( log_entries[ptw].buf , cmd , count );
    12681268                                            log_entries[ptw].count = count;
     
    12711271
    12721272#if DEBUG_INTER
    1273 snprintf( string , 64 , "[ksh] %s : parse and execute <%s>", __FUNCTION__, cmd );
     1273snprintf( string , 128 , "[ksh] %s : execute <%s>", __FUNCTION__, cmd );
    12741274display_string( string );
    12751275#endif
     
    12771277                        putchar( c );
    12781278
    1279                                             // call parser to analyse and execute command
    1280                                             parse( cmd );
     1279                                            // execute command
     1280                                            execute( cmd );
    12811281                                    }
    12821282                    else                         // no command registered
     
    13911391
    13921392#if DEBUG_INTER
    1393 snprintf( string , 64 , "\n[ksh] %s : complete <%s> command", __FUNCTION__, cmd );
     1393snprintf( string , 128 , "\n[ksh] %s : complete <%s> command", __FUNCTION__, cmd );
    13941394display_string( string );
    13951395#endif
  • trunk/user/pgcd/pgcd.c

    r580 r625  
    2424    get_core( &cxy , &lid );
    2525
    26     printf( "\n\n[PGCD] starts on core[%x,%d] / cycle %d\n",
     26    printf( "\n\n[pgcd] starts on core[%x,%d] / cycle %d\n",
    2727    cxy , lid , (unsigned int)cycle );
    2828
  • trunk/user/sort/sort.c

    r624 r625  
    2929#include <hal_macros.h>
    3030
    31 #define ARRAY_LENGTH        1024       // number of items
     31#define ARRAY_LENGTH        256        // number of items
    3232#define MAX_THREADS         1024       // 16 * 16 * 4
    3333
     
    412412#endif
    413413
    414 #if CHECK_RESULT   
    415 int    success = 1;
    416 int*   res_array = ( (total_threads ==   2) ||
    417                      (total_threads ==   8) ||
    418                      (total_threads ==  32) ||
    419                      (total_threads == 128) ||
    420                      (total_threads == 512) ) ? array1 : array0;
    421 
    422 for( n=0 ; n<(ARRAY_LENGTH-2) ; n++ )
    423 {
    424     if ( res_array[n] > res_array[n+1] )
    425     {
    426         printf("\n[sort] array[%d] = %d > array[%d] = %d\n",
    427         n , res_array[n] , n+1 , res_array[n+1] );
    428         success = 0;
    429         break;
    430     }
    431 }
    432 
    433 if ( success ) printf("\n[sort] success\n");
    434 else           printf("\n[sort] failure\n");
     414#if CHECK_RESULT
     415   
     416    int    success = 1;
     417    int *  res_array = ( (total_threads ==   2) ||
     418                         (total_threads ==   8) ||
     419                         (total_threads ==  32) ||
     420                         (total_threads == 128) ||
     421                         (total_threads == 512) ) ? array1 : array0;
     422
     423    for( n=0 ; n<(ARRAY_LENGTH-2) ; n++ )
     424    {
     425        if ( res_array[n] > res_array[n+1] )
     426        {
     427            printf("\n[sort] array[%d] = %d > array[%d] = %d\n",
     428            n , res_array[n] , n+1 , res_array[n+1] );
     429            success = 0;
     430            break;
     431        }
     432    }
     433
     434    if ( success ) printf("\n[sort] success\n");
     435    else           printf("\n[sort] failure\n");
     436
    435437#endif
    436438
    437439#if INSTRUMENTATION
    438 char   name[64];
    439 char   path[128];
    440 
    441 // build a file name from n_items / n_clusters / n_cores
    442 if( USE_DQT_BARRIER ) snprintf( name , 64 , "sort_dqt_%d_%d_%d",
    443                       ARRAY_LENGTH, x_size * y_size, ncores );
    444 else                  snprintf( name , 64 , "sort_smp_%d_%d_%d",
    445                       ARRAY_LENGTH, x_size * y_size, ncores );
    446 
    447 // build file pathname
    448 snprintf( path , 128 , "home/%s" , name );
    449 
    450 // compute results
    451 unsigned int sequencial = (unsigned int)(seq_end_cycle - start_cycle);
    452 unsigned int parallel   = (unsigned int)(para_end_cycle - seq_end_cycle);
    453 
    454 // display results on process terminal
    455 printf("\n----- %s -----\n"
    456        " - sequencial : %d cycles\n"
    457        " - parallel   : %d cycles\n",
    458        name, sequencial, parallel );
    459 
    460 // open file
    461 FILE * stream = fopen( path , NULL );
    462 if( stream == NULL )
    463 {
    464     printf("\n[sort error] cannot open instrumentation file <%s>\n", name );
    465     exit(0);
    466 }
    467 
    468 // register results to file
    469 int ret = fprintf( stream , "\n----- %s -----\n"
    470                             " - sequencial : %d cycles\n"
    471                             " - parallel   : %d cycles\n", name, sequencial, parallel );
    472 if( ret < 0 )
    473 {
    474     printf("\n[sort error] cannot write to instrumentation file <%s>\n", name );
    475     exit(0);
    476 }
    477 
    478 // close instrumentation file
    479 if( fclose( stream ) )
    480 {
    481     printf("\n[sort error] cannot close instrumentation file <%s>\n", name );
    482     exit(0);
    483 }
     440
     441    char   name[64];
     442    char   path[128];
     443
     444    // build a file name from n_items / n_clusters / n_cores
     445    if( USE_DQT_BARRIER ) snprintf( name , 64 , "sort_dqt_%d_%d_%d",
     446                          ARRAY_LENGTH, x_size * y_size, ncores );
     447    else                  snprintf( name , 64 , "sort_smp_%d_%d_%d",
     448                          ARRAY_LENGTH, x_size * y_size, ncores );
     449
     450    // build file pathname
     451    snprintf( path , 128 , "home/%s" , name );
     452
     453    // compute results
     454    unsigned int sequencial = (unsigned int)(seq_end_cycle - start_cycle);
     455    unsigned int parallel   = (unsigned int)(para_end_cycle - seq_end_cycle);
     456
     457    // display results on process terminal
     458    printf("\n----- %s -----\n"
     459           " - sequencial : %d cycles\n"
     460           " - parallel   : %d cycles\n",
     461           name, sequencial, parallel );
     462
     463    // open file
     464    FILE * stream = fopen( path , NULL );
     465    if( stream == NULL )
     466    {
     467        printf("\n[sort error] cannot open instrumentation file <%s>\n", name );
     468        exit(0);
     469    }
     470
     471    printf("\n[sort] file %s successfully open\n", path);
     472
     473    // register results to file
     474    int ret = fprintf( stream , "\n----- %s -----\n"
     475                                " - sequencial : %d cycles\n"
     476                                " - parallel   : %d cycles\n", name, sequencial, parallel );
     477    if( ret < 0 )
     478    {
     479        printf("\n[sort error] cannot write to instrumentation file <%s>\n", name );
     480        exit(0);
     481    }
     482
     483    printf("\n[sort] file %s successfully written\n", path);
     484
     485    // close instrumentation file
     486
     487    if( fclose( stream ) )
     488    {
     489        printf("\n[sort error] cannot close the file <%s>\n", name );
     490        exit(0);
     491    }
     492
     493    printf("\n[sort] file %s successfully closed\n", path);
     494
    484495#endif
    485496
Note: See TracChangeset for help on using the changeset viewer.