Changeset 583


Ignore:
Timestamp:
Nov 1, 2018, 12:10:42 PM (6 years ago)
Author:
alain
Message:

Improve signals.

Location:
trunk/kernel/kern
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r581 r583  
    6363    switch ( func_type )
    6464    {
    65     case DEV_FUNC_RAM: return "RAM";
    66     case DEV_FUNC_ROM: return "ROM";
    67     case DEV_FUNC_FBF: return "FBF";
    68     case DEV_FUNC_IOB: return "IOB";
    69     case DEV_FUNC_IOC: return "IOC";
    70     case DEV_FUNC_MMC: return "MMC";
    71     case DEV_FUNC_DMA: return "DMA";
    72     case DEV_FUNC_NIC: return "NIC";
    73     case DEV_FUNC_TIM: return "TIM";
    74     case DEV_FUNC_TXT: return "TXT";
    75     case DEV_FUNC_ICU: return "ICU";
    76     case DEV_FUNC_PIC: return "PIC";
    77     default:           return "undefined";
     65        case DEV_FUNC_RAM: return "RAM";
     66        case DEV_FUNC_ROM: return "ROM";
     67        case DEV_FUNC_FBF: return "FBF";
     68        case DEV_FUNC_IOB: return "IOB";
     69        case DEV_FUNC_IOC: return "IOC";
     70        case DEV_FUNC_MMC: return "MMC";
     71        case DEV_FUNC_DMA: return "DMA";
     72        case DEV_FUNC_NIC: return "NIC";
     73        case DEV_FUNC_TIM: return "TIM";
     74        case DEV_FUNC_TXT: return "TXT";
     75        case DEV_FUNC_ICU: return "ICU";
     76        case DEV_FUNC_PIC: return "PIC";
     77        default:           return "undefined";
    7878    }
    7979}
  • trunk/kernel/kern/cluster.c

    r582 r583  
    272272////////////////////////////////////////////////////////////////////////////////////
    273273
    274 /////////////////////////////////
     274///////////////////////////////////////
    275275lid_t cluster_select_local_core( void )
    276276{
     
    680680}  // end cluster_process_copies_unlink()
    681681
    682 ///////////////////////////////////////////
    683 void cluster_processes_display( cxy_t cxy )
     682////////////////////////////////////////////
     683void cluster_processes_display( cxy_t   cxy,
     684                                bool_t  owned )
    684685{
    685686    xptr_t        root_xp;
     
    687688    xptr_t        iter_xp;
    688689    xptr_t        process_xp;
     690    process_t   * process_ptr;
     691    cxy_t         process_cxy;
     692    pid_t         pid;
    689693    cxy_t         txt0_cxy;
    690694    chdev_t     * txt0_ptr;
     
    692696    xptr_t        txt0_lock_xp;
    693697
    694     assert( (cluster_is_undefined( cxy ) == false),
    695     "illegal cluster index" );
     698assert( (cluster_is_undefined( cxy ) == false), "illegal cluster index" );
    696699
    697700    // get extended pointer on root and lock for local process list in cluster
     
    720723    XLIST_FOREACH( root_xp , iter_xp )
    721724    {
    722         process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list );
    723         process_display( process_xp );
     725        process_xp  = XLIST_ELEMENT( iter_xp , process_t , local_list );
     726        process_ptr = GET_PTR( process_xp );
     727        process_cxy = GET_CXY( process_xp );
     728
     729        // get process PID
     730        pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
     731
     732        if( owned )  // display only user & owned processes
     733        {
     734            if( (CXY_FROM_PID( pid ) == cxy) && (LPID_FROM_PID( pid ) != 0) )
     735            {
     736                process_display( process_xp );
     737            }
     738        }
     739        else         // display all local processes
     740        {
     741            process_display( process_xp );
     742        }
    724743    }
    725744
  • trunk/kernel/kern/cluster.h

    r582 r583  
    309309 ******************************************************************************************
    310310 * @ cxy   : cluster identifier.
    311  *****************************************************************************************/
    312 void cluster_processes_display( cxy_t cxy );
     311 * @ owned : only owned process if non zero.
     312 *****************************************************************************************/
     313void cluster_processes_display( cxy_t   cxy,
     314                                bool_t  owned );
    313315
    314316/******************************************************************************************
  • trunk/kernel/kern/do_syscall.c

    r527 r583  
    3535// This ƒonction should never be called...
    3636///////////////////////////////////////////////////////////////////////////////////////
    37 static int sys_undefined( void )
     37int sys_undefined( void )
    3838{
    3939    assert( false , "undefined system call" );
     
    4343///////////////////////////////////////////////////////////////////////////////////////
    4444// This array of pointers define the kernel functions implementing the syscalls.
    45 // It must be kept consistent with the enum in "shared_syscalls.h" file.
     45// It must be kept consistent with the enum in "syscalls_numbers.h" file.
    4646///////////////////////////////////////////////////////////////////////////////////////
    4747
     
    9898    sys_get_cycle,          // 42
    9999    sys_display,            // 43
    100     sys_undefined,          // 44
     100    sys_place_fork,         // 44
    101101    sys_thread_sleep,       // 45
    102102    sys_thread_wakeup,      // 46
     
    106106};
    107107
    108 ////////////////////////////////////
     108////////////////////////////////////////////
    109109const char * syscall_str( syscalls_t index )
    110110{
    111     switch (index) {
     111    switch (index)
     112    {
    112113    case SYS_THREAD_EXIT:                  return "THREAD_EXIT";      // 0
    113114    case SYS_THREAD_YIELD:                 return "THREAD_YIELD";     // 1
     
    158159    case SYS_GET_CYCLE:                    return "GET_CYCLE";        // 42
    159160    case SYS_DISPLAY:                      return "DISPLAY";          // 43
     161    case SYS_PLACE_FORK:                   return "PLACE_FORK";       // 44
    160162    case SYS_THREAD_SLEEP:                 return "THREAD_SLEEP";     // 45
    161163    case SYS_THREAD_WAKEUP:                return "THREAD_WAKEUP";    // 46
     
    163165    case SYS_FG:                           return "FG";               // 48
    164166    case SYS_IS_FG:                        return "IS_FG";            // 49
    165 
    166     case SYS_UNDEFINED:
    167167    default:                               return "undefined";
    168   }
     168    }
    169169}
    170170
  • trunk/kernel/kern/dqdt.c

    r582 r583  
    2828#include <hal_atomic.h>
    2929#include <hal_remote.h>
     30#include <thread.h>
    3031#include <printk.h>
    3132#include <chdev.h>
     
    5455
    5556    // display node content
    56         nolock_printk("- level %d in cluster %x (node %x) : threads = %x / pages = %x\n",
    57     node.level, GET_CXY( node_xp ), GET_PTR( node_xp ), node.threads, node.pages );
     57        nolock_printk("- level %d / cluster %x : threads = %x / pages = %x / clusters %d / cores %d\n",
     58    node.level, GET_CXY( node_xp ), node.threads, node.pages, node.clusters, node.cores );
    5859
    5960    // recursive call on children if node is not terminal
     
    102103// This static function initializes recursively, from top to bottom, the quad-tree
    103104// infrastructure. The DQDT nodes are allocated as global variables in each local
    104 //  cluster manager. At each level in the quad-tree, this function initializes the
    105 // parent DQDT node in the cluster identified by the <cxy> and <level> arguments.
    106 // A each level, it selects in each child macro-cluster the precise cluster where
    107 // will be placed the the subtree root node, and call recursively itself to
    108 // initialize the child node in this cluster.
     105// cluster manager. At each level in the quad-tree, this function initializes the
     106// node identified by the <cxy> and <level> arguments, selects in each child
     107// macro-cluster the precise cluster where will be placed the subtree root node,
     108// and call recursively itself to initialize the child node in the selected cluster.
    109109///////////////////////////////////////////////////////////////////////////////////////
    110110// @ node cxy  : cluster containing the node to initialize
     
    124124    uint32_t node_base_y;    // associated macro_cluster y coordinate
    125125    uint32_t half;           // associated macro-cluster half size
    126 
    127     // get remote node cluster coordinates
     126    uint32_t cores;          // number of cores in macro cluster
     127    uint32_t clusters;       // number of clusters in macro cluster
     128
     129    // get node cluster coordinates
    128130    node_x = HAL_X_FROM_CXY( node_cxy );
    129131    node_y = HAL_Y_FROM_CXY( node_cxy );
     
    140142    cluster_t * cluster = LOCAL_CLUSTER;
    141143
    142     // get local pointer on remote node to be initialized
    143     dqdt_node_t * node  = &cluster->dqdt_tbl[level];
     144    // build local and extended pointer on node to be initialized
     145    dqdt_node_t * node_ptr = &cluster->dqdt_tbl[level];
     146    xptr_t        node_xp  = XPTR( node_cxy , node_ptr );
    144147
    145148#if DEBUG_DQDT_INIT
    146149printk("\n[DBG] %s : cxy(%d,%d) / level %d / mask %x / half %d / ptr %x\n",
    147 __FUNCTION__, node_x, node_y, level, mask, half, node );
     150__FUNCTION__, node_x, node_y, level, mask, half, node_ptr );
    148151#endif
    149152 
    150153    // make remote node default initialisation
    151     hal_remote_memset( XPTR( node_cxy , node ) , 0 , sizeof( dqdt_node_t ) );
     154    hal_remote_memset( node_xp , 0 , sizeof( dqdt_node_t ) );
     155
     156    // initialize <parent> field
     157    hal_remote_s64( XPTR( node_cxy , &node_ptr->parent ) , parent_xp );
     158
     159    // initialize <level> field
     160    hal_remote_s32( XPTR( node_cxy , &node_ptr->level ) , level );
    152161
    153162    // recursive initialisation
    154     if( level == 0 )                      // terminal case
     163    if( level == 0 )                      // terminal case : cluster
    155164    {
    156         // update parent field
    157         hal_remote_s64( XPTR( node_cxy , &node->parent ) , parent_xp );
     165        // initialize <clusters> field in node
     166        hal_remote_s32( XPTR( node_cxy , &node_ptr->clusters ) , 1 );
     167
     168        // initialize <cores> field in node
     169        cores = hal_remote_l32( XPTR ( node_cxy , &cluster->cores_nr ) );
     170        hal_remote_s32( XPTR( node_cxy , &node_ptr->cores ) , cores );
    158171    }
    159     else                                  // non terminal
     172    else                                  // non terminal : macro-cluster
    160173    {
    161         uint32_t x;
    162         uint32_t y;
    163         cxy_t    cxy;
    164         bool_t   found;
    165 
    166         // update <level> in remote node
    167         hal_remote_s32( XPTR( node_cxy , &node->level ) , level );
    168 
    169         // try to find a valid cluster in child[0][0] macro-cluster
     174        bool_t        found;
     175        uint32_t      x;
     176        uint32_t      y;
     177        cxy_t         child_cxy;
     178        xptr_t        child_xp;
     179        dqdt_node_t * child_ptr =  &cluster->dqdt_tbl[level-1];
     180
     181        // search an active cluster in child[0][0] macro-cluster
    170182        found = false;
    171183        for( x = node_base_x ;
     
    175187            (y < (node_base_y + half)) && (found == false) ; y++ )
    176188            {
    177                 cxy = HAL_CXY_FROM_XY( x , y );
    178                 if( cluster_is_active( cxy ) )
     189                child_cxy = HAL_CXY_FROM_XY( x , y );
     190
     191                if( cluster_is_active( child_cxy ) )
    179192                {
    180                     // update <child[0][0]> in remote inode
    181                     hal_remote_s64( XPTR( node_cxy , &node->children[0][0] ),
    182                                     XPTR( cxy , &cluster->dqdt_tbl[level - 1] ) );
    183 
    184                     // udate <arity> in remote node
    185                     hal_remote_atomic_add( XPTR( node_cxy , &node->arity ) , 1 );
    186 
    187                     // initialize recursively child[0][0] node
    188                     dqdt_recursive_build( cxy , level-1 , XPTR( node_cxy , node ) );
     193                    // initialize recursively selected child[0][0] node
     194                    dqdt_recursive_build( child_cxy , level-1 , node_xp );
     195
     196                    // build extended pointer on child[0][0] node
     197                    child_xp = XPTR( child_cxy , child_ptr );
     198
     199                    // update <cores> field in node
     200                    cores = hal_remote_l32( XPTR ( child_cxy , &child_ptr->cores ) );
     201                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->cores ) , cores );
     202
     203                    // update <clusters> field in node
     204                    clusters = hal_remote_l32( XPTR ( child_cxy , &child_ptr->clusters ) );
     205                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->clusters ) , clusters );
     206
     207                    // update <child[0][0]> field in node
     208                    hal_remote_s64( XPTR( node_cxy , &node_ptr->children[0][0] ), child_xp );
     209
     210                    // udate <arity> field in node
     211                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->arity ) , 1 );
    189212   
    190213                    // exit loops
     
    194217        }
    195218
    196         // try to find a valid cluster in child[0][1] macro-cluster
     219        // search an active cluster in child[0][1] macro-cluster
    197220        found = false;
    198221        for( x = node_base_x ;
     
    200223        {
    201224            for( y = (node_base_y + half) ;
    202             (y < (node_base_y + (half<<2))) && (found == false) ; y++ )
     225            (y < (node_base_y + (half<<1))) && (found == false) ; y++ )
    203226            {
    204                 cxy = HAL_CXY_FROM_XY( x , y );
    205                 if( cluster_is_active( cxy ) )
     227                child_cxy = HAL_CXY_FROM_XY( x , y );
     228
     229                if( cluster_is_active( child_cxy ) )
    206230                {
    207                     // update <child[0][1]> in remote inode
    208                     hal_remote_s64( XPTR( node_cxy , &node->children[0][1] ),
    209                                     XPTR( cxy , &cluster->dqdt_tbl[level - 1] ) );
    210 
    211                     // udate <arity> in remote node
    212                     hal_remote_atomic_add( XPTR( node_cxy , &node->arity ) , 1 );
    213 
    214                     // initialize recursively child[0][1] node
    215                     dqdt_recursive_build( cxy , level-1 , XPTR( node_cxy , node ) );
     231                    // initialize recursively selected child[0][1] node
     232                    dqdt_recursive_build( child_cxy , level-1 , node_xp );
     233
     234                    // build extended pointer on child[0][1] node
     235                    child_xp = XPTR( child_cxy , child_ptr );
     236
     237                    // update <cores> field in node
     238                    cores = hal_remote_l32( XPTR ( child_cxy , &child_ptr->cores ) );
     239                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->cores ) , cores );
     240
     241                    // update <clusters> field in node
     242                    clusters = hal_remote_l32( XPTR ( child_cxy , &child_ptr->clusters ) );
     243                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->clusters ) , clusters );
     244
     245                    // update <child[0][1]> field in node
     246                    hal_remote_s64( XPTR( node_cxy , &node_ptr->children[0][1] ), child_xp );
     247
     248                    // udate <arity> field in node
     249                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->arity ) , 1 );
    216250   
    217251                    // exit loops
     
    220254            }
    221255        }
    222            
    223         // try to find a valid cluster in child[1][0] macro-cluster
     256
     257        // search an active cluster in child[1][0] macro-cluster
    224258        found = false;
    225         for( x = (node_base_x + half) ;
     259        for( x = (node_base_x +half) ;
    226260        (x < (node_base_x + (half<<1))) && (found == false) ; x++ )
    227261        {
     
    229263            (y < (node_base_y + half)) && (found == false) ; y++ )
    230264            {
    231                 cxy = HAL_CXY_FROM_XY( x , y );
    232                 if( cluster_is_active( cxy ) )
     265                child_cxy = HAL_CXY_FROM_XY( x , y );
     266
     267                if( cluster_is_active( child_cxy ) )
    233268                {
    234                     // update <child[1][0]> in remote inode
    235                     hal_remote_s64( XPTR( node_cxy , &node->children[1][0] ),
    236                                     XPTR( cxy , &cluster->dqdt_tbl[level - 1] ) );
    237 
    238                     // udate <arity> in remote node
    239                     hal_remote_atomic_add( XPTR( node_cxy , &node->arity ) , 1 );
    240 
    241                     // initialize recursively child[1][0] node
    242                     dqdt_recursive_build( cxy , level-1 , XPTR( node_cxy , node ) );
     269                    // initialize recursively selected child[1][0] node
     270                    dqdt_recursive_build( child_cxy , level-1 , node_xp );
     271
     272                    // build extended pointer on child[1][0] node
     273                    child_xp = XPTR( child_cxy , child_ptr );
     274
     275                    // update <cores> field in node
     276                    cores = hal_remote_l32( XPTR ( child_cxy , &child_ptr->cores ) );
     277                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->cores ) , cores );
     278
     279                    // update <clusters> field in node
     280                    clusters = hal_remote_l32( XPTR ( child_cxy , &child_ptr->clusters ) );
     281                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->clusters ) , clusters );
     282
     283                    // update <child[1][0]> field in node
     284                    hal_remote_s64( XPTR( node_cxy , &node_ptr->children[1][0] ), child_xp );
     285
     286                    // udate <arity> field in node
     287                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->arity ) , 1 );
    243288   
    244289                    // exit loops
     
    248293        }
    249294
    250         // try to find a valid cluster in child[1][1] macro-cluster
     295        // search an active cluster in child[1][1] macro-cluster
    251296        found = false;
    252297        for( x = (node_base_x + half) ;
     
    254299        {
    255300            for( y = (node_base_y + half) ;
    256             (y < (node_base_y + (half<<2))) && (found == false) ; y++ )
     301            (y < (node_base_y + (half<<1))) && (found == false) ; y++ )
    257302            {
    258                 cxy = HAL_CXY_FROM_XY( x , y );
    259                 if( cluster_is_active( cxy ) )
     303                child_cxy = HAL_CXY_FROM_XY( x , y );
     304
     305                if( cluster_is_active( child_cxy ) )
    260306                {
    261                     // update <child[1][1]> in remote inode
    262                     hal_remote_s64( XPTR( node_cxy , &node->children[1][1] ),
    263                                     XPTR( cxy , &cluster->dqdt_tbl[level - 1] ) );
    264 
    265                     // udate <arity> in remote node
    266                     hal_remote_atomic_add( XPTR( node_cxy , &node->arity ) , 1 );
    267 
    268                     // initialize recursively child[1][1] node
    269                     dqdt_recursive_build( cxy , level-1 , XPTR( node_cxy , node ) );
     307                    // initialize recursively selected child[1][1] node
     308                    dqdt_recursive_build( child_cxy , level-1 , node_xp );
     309
     310                    // build extended pointer on child[1][1] node
     311                    child_xp = XPTR( child_cxy , child_ptr );
     312
     313                    // update <cores> field in node
     314                    cores = hal_remote_l32( XPTR ( child_cxy , &child_ptr->cores ) );
     315                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->cores ) , cores );
     316
     317                    // update <clusters> field in node
     318                    clusters = hal_remote_l32( XPTR ( child_cxy , &child_ptr->clusters ) );
     319                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->clusters ) , clusters );
     320
     321                    // update <child[1][1]> field in node
     322                    hal_remote_s64( XPTR( node_cxy , &node_ptr->children[1][1] ), child_xp );
     323
     324                    // udate <arity> field in node
     325                    hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->arity ) , 1 );
    270326   
    271327                    // exit loops
     
    311367}  // end dqdt_init()
    312368
     369
    313370///////////////////////////////////////////////////////////////////////////
    314 // This recursive function is called by the dqdt_update_threads() function.
    315 // It traverses the quad tree from clusters to root.
    316 ///////////////////////////////////////////////////////////////////////////
    317 // @ node       : extended pointer on current node
    318 // @ increment  : number of threads variation
    319 ///////////////////////////////////////////////////////////////////////////
    320 static void dqdt_propagate_threads( xptr_t  node,
    321                                     int32_t increment )
    322 {
    323     // get current node cluster identifier and local pointer
    324     cxy_t         cxy = GET_CXY( node );
    325     dqdt_node_t * ptr = GET_PTR( node );
    326 
    327     // update current node threads number
    328     hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , increment );
    329 
    330     // get extended pointer on parent node
    331     xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) );
    332 
    333     // propagate if required
    334     if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment );
    335 }
    336 
    337 ///////////////////////////////////////////////////////////////////////////
    338 // This recursive function is called by the dqdt_update_pages() function.
     371// This recursive function is called by both the dqdt_increment_pages()
     372// and by the dqdt_decrement_pages() functions.
    339373// It traverses the quad tree from clusters to root.
    340374///////////////////////////////////////////////////////////////////////////
     
    349383    dqdt_node_t * ptr = GET_PTR( node );
    350384
    351     // update current node threads number
     385    // update current node pages number
    352386    hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , increment );
    353387
     
    359393}
    360394
    361 /////////////////////////////////////////////
    362 void dqdt_update_threads( int32_t increment )
     395///////////////////////////////////////////
     396void dqdt_increment_pages( uint32_t order )
    363397{
    364398        cluster_t   * cluster = LOCAL_CLUSTER;
     
    366400
    367401    // update DQDT node level 0
    368     hal_atomic_add( &node->threads , increment );
     402    hal_atomic_add( &node->pages , (1 << order) );
    369403
    370404    // propagate to DQDT upper levels
    371     if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment );
     405    if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , (1 << order) );
     406
     407#if DEBUG_DQDT_UPDATE_PAGES
     408uint32_t cycle = hal_get_cycles();
     409if( cycle > DEBUG_DQDT_UPDATE_PAGES )
     410printk("\n[DBG] %s : thread %x in process %x / %x pages in cluster %x / cycle %d\n",
     411__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->pages, local_cxy, cycle );
     412#endif
     413
    372414}
    373415
    374416///////////////////////////////////////////
    375 void dqdt_update_pages( int32_t increment )
     417void dqdt_decrement_pages( uint32_t order )
    376418{
    377419        cluster_t   * cluster = LOCAL_CLUSTER;
     
    379421
    380422    // update DQDT node level 0
    381     hal_atomic_add( &node->pages , increment );
     423    hal_atomic_add( &node->pages , -(1 << order) );
    382424
    383425    // propagate to DQDT upper levels
    384     if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment );
    385 }
    386 
    387 ////////////////////////////////////////////////////////////////////////////////
     426    if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , -(1 << order) );
     427
     428#if DEBUG_DQDT_UPDATE_PAGES
     429uint32_t cycle = hal_get_cycles();
     430if( cycle > DEBUG_DQDT_UPDATE_PAGES )
     431printk("\n[DBG] %s : thread %x in process %x / %x pages in cluster %x / cycle %d\n",
     432__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->pages, local_cxy, cycle );
     433#endif
     434
     435}
     436
     437
     438
     439///////////////////////////////////////////////////////////////////////////
     440// This recursive function is called by both the dqdt_increment_threads()
     441// and by the dqdt_decrement_threads functions.
     442// It traverses the quad tree from clusters to root.
     443///////////////////////////////////////////////////////////////////////////
     444// @ node       : extended pointer on current node
     445// @ increment  : number of pages variation
     446///////////////////////////////////////////////////////////////////////////
     447static void dqdt_propagate_threads( xptr_t  node,
     448                                    int32_t increment )
     449{
     450    // get current node cluster identifier and local pointer
     451    cxy_t         cxy = GET_CXY( node );
     452    dqdt_node_t * ptr = GET_PTR( node );
     453
     454    // update current node threads number
     455    hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , increment );
     456
     457    // get extended pointer on parent node
     458    xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) );
     459
     460    // propagate if required
     461    if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment );
     462}
     463
     464///////////////////////////////////
     465void dqdt_increment_threads( void )
     466{
     467        cluster_t   * cluster = LOCAL_CLUSTER;
     468    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
     469
     470    // update DQDT node level 0
     471    hal_atomic_add( &node->threads , 1 );
     472
     473    // propagate to DQDT upper levels
     474    if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , 1 );
     475
     476#if DEBUG_DQDT_UPDATE_THREADS
     477uint32_t cycle = hal_get_cycles();
     478if( cycle > DEBUG_DQDT_UPDATE_THREADS )
     479printk("\n[DBG] %s : thread %x in process %x / %d threads in cluster %x / cycle %d\n",
     480__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->threads, local_cxy, cycle );
     481#endif
     482
     483}
     484
     485///////////////////////////////////
     486void dqdt_decrement_threads( void )
     487{
     488        cluster_t   * cluster = LOCAL_CLUSTER;
     489    dqdt_node_t * node    = &cluster->dqdt_tbl[0];
     490
     491    // update DQDT node level 0
     492    hal_atomic_add( &node->threads , -1 );
     493
     494    // propagate to DQDT upper levels
     495    if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , -1 );
     496
     497#if DEBUG_DQDT_UPDATE_THREADS
     498uint32_t cycle = hal_get_cycles();
     499if( cycle > DEBUG_DQDT_UPDATE_THREADS )
     500printk("\n[DBG] %s : thread %x in process %x / %d threads in cluster %x / cycle %d\n",
     501__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->threads, local_cxy, cycle );
     502#endif
     503
     504}
     505
     506
     507/////////////////////////////////////////////////////////////////////////////////////
    388508// This recursive function is called by both the dqdt_get_cluster_for_process()
    389 // and by the dqdt_get_cluster_for_memory() functions to select the cluster
    390 // with smallest number of thread, or smallest number of allocated pages.
     509// and by the dqdt_get_cluster_for_memory() functions to select the cluster with the
     510// smallest number of threads per core, or the smallest number of pages per cluster.
    391511// It traverses the quad tree from root to clusters.
    392 ///////////////////////////////////////////////////////////////////////////////
     512/////////////////////////////////////////////////////////////////////////////////////
    393513static cxy_t dqdt_select_cluster( xptr_t node,
    394514                                  bool_t for_memory )
     
    422542                cxy_t         cxy  = GET_CXY( child_xp );
    423543                dqdt_node_t * ptr  = GET_PTR( child_xp );
    424                 if( for_memory ) load = hal_remote_l32( XPTR( cxy , &ptr->pages ) );
    425                 else             load = hal_remote_l32( XPTR( cxy , &ptr->threads ) );
    426                 if( load < load_min )
     544
     545                // compute average load  for each child
     546                if( for_memory )
     547                {
     548                    load = hal_remote_l32( XPTR( cxy , &ptr->pages ) ) /
     549                           hal_remote_l32( XPTR( cxy , &ptr->clusters ) );
     550                }
     551                else
     552                {
     553                    load = hal_remote_l32( XPTR( cxy , &ptr->threads ) ) /
     554                           hal_remote_l32( XPTR( cxy , &ptr->cores ) );
     555                }
     556
     557                // select children with smallest load
     558                if( load <= load_min )
    427559                {
    428560                    load_min = load;
     
    436568    // select the child with the lowest load
    437569    return dqdt_select_cluster( node_copy.children[select_x][select_y], for_memory );
    438 }
     570
     571}  // end dqdt_select_cluster()
     572
    439573
    440574//////////////////////////////////////////
     
    442576{
    443577    // call recursive function
    444     return dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , false );
     578    cxy_t cxy = dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , false );
     579
     580#if DEBUG_DQDT_SELECT_FOR_PROCESS
     581uint32_t cycle = hal_get_cycles();
     582if( cycle > DEBUG_DQDT_SELECT_FOR_PROCESS )
     583printk("\n[DBG] %s : thread %x in process %x select cluster %x / cycle %d\n",
     584__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cxy, cycle );
     585#endif
     586
     587    return cxy;
    445588}
    446589
     
    449592{
    450593    // call recursive function
    451     return dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , true );
    452 }
    453 
     594    cxy_t cxy = dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , true );
     595
     596#if DEBUG_DQDT_SELECT_FOR_MEMORY
     597uint32_t cycle = hal_get_cycles();
     598if( cycle > DEBUG_DQDT_SELECT_FOR_MEMORY )
     599printk("\n[DBG] %s : thread %x in process %x select cluster %x / cycle %d\n",
     600__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cxy, cycle );
     601#endif
     602
     603    return cxy;
     604}
     605
  • trunk/kernel/kern/dqdt.h

    r582 r583  
    7272typedef struct dqdt_node_s
    7373{
    74         uint32_t            level;               // node level
    75         uint32_t            arity;               // actual children number in this node
    76     uint32_t            threads;             // current number of threads in subtree
    77     uint32_t            pages;               // current number of pages in subtree
    78         xptr_t              parent;              // extended pointer on parent node
    79         xptr_t              children[2][2];      // extended pointers on children nodes
     74        uint32_t      level;            /*! node level                                     */
     75        uint32_t      arity;            /*! actual children number in this node            */
     76    uint32_t      threads;          /*! current number of threads in macro-cluster     */
     77    uint32_t      pages;            /*! current number of pages in macro-cluster       */
     78    uint32_t      cores;            /*! number of active cores in macro cluster        */
     79    uint32_t      clusters;         /*! number of active cluster in macro cluster      */
     80        xptr_t        parent;           /*! extended pointer on parent node                */
     81        xptr_t        children[2][2];   /*! extended pointers on children nodes            */
    8082}
    8183dqdt_node_t;
     
    9597
    9698/****************************************************************************************
    97  * This local function updates the total number of threads in level 0 DQDT node,
    98  * and propagates the variation to the DQDT upper levels.
    99  * It should be called on each thread creation or destruction.
    100  ****************************************************************************************
    101  * @ increment : increment (can be positive or negative)
     99 * These local function update the total number of threads in level 0 DQDT node,
     100 * and immediately propagates the variation to the DQDT upper levels.
     101 * They are called on each thread creation or destruction.
    102102 ***************************************************************************************/
    103 void dqdt_update_threads( int32_t  increment );
     103void dqdt_increment_threads( void );
     104void dqdt_decrement_threads( void );
    104105
    105106/****************************************************************************************
    106107 * This local function updates the total number of pages in level 0 DQDT node,
    107  * and propagates the variation to the DQDT upper levels.
    108  * It should be called on each physical memory page allocation or release.
     108 * and immediately propagates the variation to the DQDT upper levels.
     109 * They are called by PPM on each physical memory page allocation or release.
    109110 ****************************************************************************************
    110  * @ increment : increment (can be positive or negative)
     111 * @ order   : ln2( number of small pages )
    111112 ***************************************************************************************/
    112 void dqdt_update_pages( int32_t increment );
     113void dqdt_increment_pages( uint32_t order );
     114void dqdt_decrement_pages( uint32_t order );
    113115
    114116/****************************************************************************************
  • trunk/kernel/kern/kernel_init.c

    r582 r583  
    174174    "VFS_FILE",              // 32
    175175    "VMM_VSL",               // 33
     176    "VMM_GPT",               // 34
    176177};       
    177178
     
    967968
    968969#if DEBUG_KERNEL_INIT
    969 // if( (core_lid ==  0) & (local_cxy == 0) )
     970if( (core_lid ==  0) & (local_cxy == 0) )
    970971printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / sr %x / cycle %d\n",
    971972__FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() );
  • trunk/kernel/kern/printk.c

    r564 r583  
    385385
    386386////////////////////////////////////
    387 void panic( const char * file_name,
    388             const char * function_name,
     387void panic( const char * function_name,
    389388            uint32_t     line,
    390389            cycle_t      cycle,
     
    407406
    408407    // print generic infos
    409     nolock_printk(
    410             "\n[PANIC] in %s: line %d | function %s | cycle %d\n"
    411             "core[%x,%d] | thread %x (%x) in process %x (%x)\n",
    412             file_name, line, function_name, (uint32_t) cycle,
    413             local_cxy, current->core->lid,
    414             current->trdid, current,
    415             current->process->pid, current->process );
     408    nolock_printk("\n[PANIC] in %s: line %d | cycle %d\n"
     409                  "core[%x,%d] | thread %x (%x) | process %x (%x)\n",
     410                  function_name, line, (uint32_t)cycle,
     411                  local_cxy, current->core->lid,
     412                  current->trdid, current,
     413                  current->process->pid, current->process );
    416414
    417415    // call kernel_printf to print format
  • trunk/kernel/kern/printk.h

    r564 r583  
    9898 * See assert macro documentation for information about printed information.
    9999 *********************************************************************************/
    100 void panic( const char * file_name,
    101             const char * function_name,
     100void panic( const char * function_name,
    102101            uint32_t     line,
    103102            cycle_t      cycle,
     
    110109 * Actually used to debug the kernel.
    111110 *
    112  * Information printed by assert:
    113  * Current running thread:
    114  *   - thread descriptior adress
    115  *   - thread id (trdid)
    116  *
    117  * Current Process:
    118  *   - Process descriptor adress
    119  *   - Process id (pid)
    120  *
    121  * Current Core:
    122  *   - local cluster position (local_cxy)
    123  *   - local core id (lid)
    124  *
    125  * File name (__FILE__) and were the assert is invoked.
    126  * And the assert message.
    127  *
    128  * Cycle: before the assert branchment.
    129  * Note: cycle may change due to compiler optimisation.
    130  *
    131  * Exemple:
    132  * assert( my_ptr != NULL, "my_ptr should not be NULL")
     111 * Extra information printed by assert:
     112 * - Current thread, process, and core
     113 * - Function name / line number in file / cycle
    133114 **********************************************************************************
    134115 * @ condition     : condition that must be true.
     
    141122    if ( ( expr ) == false )                                                      \
    142123    {                                                                             \
    143         panic( __FILE__, __FUNCTION__,                                            \
    144                __line_at_expansion, __assert_cycle,                               \
     124        panic( __FUNCTION__,                                                      \
     125               __line_at_expansion,                                               \
     126               __assert_cycle,                                                    \
    145127               ( format ), ##__VA_ARGS__ );                                       \
    146128    }                                                                             \
  • trunk/kernel/kern/process.c

    r581 r583  
    6868//////////////////////////////////////////////////////////////////////////////////////////
    6969
    70 ///////////////////////////
     70/////////////////////////////////
    7171process_t * process_alloc( void )
    7272{
     
    463463}  // end process_destroy()
    464464
    465 /////////////////////////////////////////////////
     465///////////////////////////////////////////////////////////////////
    466466const char * process_action_str( process_sigactions_t action_type )
    467467{
    468   switch ( action_type ) {
    469   case BLOCK_ALL_THREADS:   return "BLOCK";
    470   case UNBLOCK_ALL_THREADS: return "UNBLOCK";
    471   case DELETE_ALL_THREADS:  return "DELETE";
    472   default:                  return "undefined";
    473   }
     468    switch ( action_type )
     469    {
     470        case BLOCK_ALL_THREADS:   return "BLOCK";
     471        case UNBLOCK_ALL_THREADS: return "UNBLOCK";
     472        case DELETE_ALL_THREADS:  return "DELETE";
     473        default:                  return "undefined";
     474    }
    474475}
    475476
     
    499500    remote_nr = 0;
    500501
    501 // check calling thread can yield
    502 assert( (client->busylocks == 0),
    503 "cannot yield : busylocks = %d\n", client->busylocks );
     502    // check calling thread can yield
     503    thread_assert_can_yield( client , __FUNCTION__ );
    504504
    505505#if DEBUG_PROCESS_SIGACTION
    506506uint32_t cycle = (uint32_t)hal_get_cycles();
    507507if( DEBUG_PROCESS_SIGACTION < cycle )
    508 printk("\n[DBG] %s : thread %x in process %x enter to %s process %x / cycle %d\n",
    509 __FUNCTION__ , client->trdid, client->process->pid,
     508printk("\n[DBG] %s : thread[%x,%x] enter to %s process %x / cycle %d\n",
     509__FUNCTION__ , client->process->pid, client->trdid,
    510510process_action_str( type ) , pid , cycle );
    511511#endif
     
    522522    lock_xp   = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
    523523
    524     // check action type
    525     assert( ((type == DELETE_ALL_THREADS ) ||
    526              (type == BLOCK_ALL_THREADS )  ||
    527              (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
     524// check action type
     525assert( ((type == DELETE_ALL_THREADS ) ||
     526         (type == BLOCK_ALL_THREADS )  ||
     527         (type == UNBLOCK_ALL_THREADS )), "illegal action type" );
    528528             
    529 
    530529    // The client thread send parallel RPCs to all remote clusters containing
    531530    // target process copies, wait all responses, and then handles directly
     
    576575#if DEBUG_PROCESS_SIGACTION
    577576if( DEBUG_PROCESS_SIGACTION < cycle )
    578 printk("\n[DBG] %s : thread %x in process %x handles remote process %x in cluster %x\n",
    579 __FUNCTION__, client->trdid, client->process->pid, pid , process_cxy );
     577printk("\n[DBG] %s : thread[%x,%x] send RPC to cluster %x for process %x\n",
     578__FUNCTION__, client->process->pid, client->trdid, process_cxy, pid );
    580579#endif
    581580            // call RPC in target cluster
     
    608607#if DEBUG_PROCESS_SIGACTION
    609608if( DEBUG_PROCESS_SIGACTION < cycle )
    610 printk("\n[DBG] %s : thread %x in process %x handles local process %x in cluster %x\n",
    611 __FUNCTION__, client->trdid, client->process->pid, pid , local_cxy );
     609printk("\n[DBG] %s : thread[%x,%x] handles local process %x in cluster %x\n",
     610__FUNCTION__, client->process->pid, client->trdid, pid , local_cxy );
    612611#endif
    613612        if     (type == DELETE_ALL_THREADS  ) process_delete_threads ( local , client_xp );
    614         else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local , client_xp );
     613        else if(type == BLOCK_ALL_THREADS   ) process_block_threads  ( local );
    615614        else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local );
    616615    }
     
    619618cycle = (uint32_t)hal_get_cycles();
    620619if( DEBUG_PROCESS_SIGACTION < cycle )
    621 printk("\n[DBG] %s : thread %x in process %x exit after %s process %x / cycle %d\n",
    622 __FUNCTION__, client->trdid, client->process->pid,
     620printk("\n[DBG] %s : thread[%x,%x] exit after %s process %x / cycle %d\n",
     621__FUNCTION__, client->process->pid, client->trdid,
    623622process_action_str( type ), pid, cycle );
    624623#endif
     
    627626
    628627/////////////////////////////////////////////////
    629 void process_block_threads( process_t * process,
    630                             xptr_t      client_xp )
     628void process_block_threads( process_t * process )
    631629{
    632630    thread_t          * target;         // pointer on target thread
     
    644642uint32_t cycle = (uint32_t)hal_get_cycles();
    645643if( DEBUG_PROCESS_SIGACTION < cycle )
    646 printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n",
    647 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     644printk("\n[DBG] %s : thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
     645__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
    648646#endif
    649647
    650648// check target process is an user process
    651 assert( ( process->pid != 0 ),
    652 "target process must be an user process" );
    653 
    654     // get target process owner cluster
     649assert( (LPID_FROM_PID( process->pid ) != 0 ), "target process must be an user process" );
     650
     651    // get target process cluster
    655652    owner_cxy = CXY_FROM_PID( process->pid );
    656653
     
    668665            count++;
    669666
    670             // main thread and client thread should not be blocked
    671             if( ((ltid != 0) || (owner_cxy != local_cxy)) &&         // not main thread
    672                 (client_xp) != XPTR( local_cxy , target ) )          // not client thread
     667            // set the global blocked bit in target thread descriptor.
     668            thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
     669 
     670            // - if the calling thread and the target thread are running on the same core,
     671            //   we don't need confirmation from scheduler,
     672            // - if the calling thread and the target thread are not running on the same
     673            //   core, we ask the target scheduler to acknowlege the blocking
     674            //   to be sure that the target thread is not running.
     675           
     676            if( this->core->lid != target->core->lid )
    673677            {
    674                 // set the global blocked bit in target thread descriptor.
    675                 thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
    676  
    677                 // - if the calling thread and the target thread are on the same core,
    678                 //   we don't need confirmation from scheduler,
    679                 // - if the calling thread and the target thread are not running on the same
    680                 //   core, we ask the target scheduler to acknowlege the blocking
    681                 //   to be sure that the target thread is not running.
    682            
    683                 if( this->core->lid != target->core->lid )
    684                 {
    685                     // increment responses counter
    686                     hal_atomic_add( (void*)&ack_count , 1 );
    687 
    688                     // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
    689                     thread_set_req_ack( target , (uint32_t *)&ack_count );
    690 
    691                     // force scheduling on target thread
    692                     dev_pic_send_ipi( local_cxy , target->core->lid );
    693                 }
     678                // increment responses counter
     679                hal_atomic_add( (void*)&ack_count , 1 );
     680
     681                // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor
     682                thread_set_req_ack( target , (uint32_t *)&ack_count );
     683
     684                // force scheduling on target thread
     685                dev_pic_send_ipi( local_cxy , target->core->lid );
    694686            }
    695687        }
     
    713705cycle = (uint32_t)hal_get_cycles();
    714706if( DEBUG_PROCESS_SIGACTION < cycle )
    715 printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
     707printk("\n[DBG] %s : thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
    716708__FUNCTION__, this, this->process->pid, pid, local_cxy , cycle );
    717709#endif
     
    740732uint32_t cycle = (uint32_t)hal_get_cycles();
    741733if( DEBUG_PROCESS_SIGACTION < cycle )
    742 printk("\n[DBG] %s : thread %x n process %x enter for process %x in cluster %x / cycle %d\n",
    743 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     734printk("\n[DBG] %s : thread[%x,%x] enter in cluster %x for process %x / cycle %d\n",
     735__FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle );
    744736#endif
    745737
    746738// check target process is an user process
    747 assert( ( process->pid != 0 ),
     739assert( ( LPID_FROM_PID( process->pid ) != 0 ),
    748740"target process must be an user process" );
    749741
    750742    // get lock protecting process th_tbl[]
    751     rwlock_rd_acquire( &process->th_lock );
     743    rwlock_wr_acquire( &process->th_lock );
    752744
    753745    // loop on target process local threads                       
     
    773765
    774766    // release lock protecting process th_tbl[]
    775     rwlock_rd_release( &process->th_lock );
     767    rwlock_wr_release( &process->th_lock );
    776768
    777769#if DEBUG_PROCESS_SIGACTION
    778770cycle = (uint32_t)hal_get_cycles();
    779771if( DEBUG_PROCESS_SIGACTION < cycle )
    780 printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
    781 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     772printk("\n[DBG] %s : thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
     773__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
    782774#endif
    783775
     
    799791uint32_t cycle = (uint32_t)hal_get_cycles();
    800792if( DEBUG_PROCESS_SIGACTION < cycle )
    801 printk("\n[DBG] %s : thread %x in process %x enter for process %x in cluster %x / cycle %d\n",
    802 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy , cycle );
     793printk("\n[DBG] %s : thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
     794__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle );
    803795#endif
    804796
     
    831823cycle = (uint32_t)hal_get_cycles();
    832824if( DEBUG_PROCESS_SIGACTION < cycle )
    833 printk("\n[DBG] %s : thread %x in process %x exit for process %x in cluster %x / cycle %d\n",
    834 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy, cycle );
     825printk("\n[DBG] %s : thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
     826__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
    835827#endif
    836828
     
    850842uint32_t cycle = (uint32_t)hal_get_cycles();
    851843if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
    852 printk("\n[DBG] %s : thread %x in cluster %x enter for process %x in cluster %x / cycle %d\n",
    853 __FUNCTION__, this->trdid, this->process->pid, pid, local_cxy, cycle );
     844printk("\n[DBG] %s : thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
     845__FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle );
    854846#endif
    855847
     
    897889cycle = (uint32_t)hal_get_cycles();
    898890if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle )
    899 printk("\n[DBG] %s : thread %x in cluster %x exit in cluster %x / process %x / cycle %d\n",
    900 __FUNCTION__, this->trdid, this->process->pid, local_cxy, process_ptr, cycle );
     891printk("\n[DBG] %s : thread[%x,%x] exit in cluster %x / process %x / cycle %d\n",
     892__FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle );
    901893#endif
    902894
     
    11091101    if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock );
    11101102
    1111     // scan kth_tbl
     1103    // scan th_tbl
    11121104    for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )
    11131105    {
     
    11271119        // returns trdid
    11281120        *trdid = TRDID( local_cxy , ltid );
    1129     }
    1130 
    1131     // get the lock protecting th_tbl for all threads
    1132     // but the idle thread executing kernel_init (cannot yield)
     1121
     1122// if( LPID_FROM_PID( process->pid ) == 0 )
     1123// printk("\n@@@ %s : allocate ltid %d for a thread %s in cluster %x\n",
     1124// __FUNCTION__, ltid, thread_type_str( thread->type), local_cxy );
     1125
     1126    }
     1127
     1128    // release the lock protecting th_tbl
    11331129    if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock );
    11341130
     
    11411137{
    11421138    uint32_t count;  // number of threads in local process descriptor
    1143 
    1144 // check argument
    1145 assert( (thread != NULL) , "thread argument is NULL" );
    11461139
    11471140    process_t * process = thread->process;
     
    11501143    ltid_t  ltid = LTID_FROM_TRDID( thread->trdid );
    11511144   
    1152     // the lock depends on thread user/kernel type, because we cannot
    1153     // use a descheduling policy for the lock protecting the kth_tbl
    1154 
    11551145    // get the lock protecting th_tbl[]
    11561146    rwlock_wr_acquire( &process->th_lock );
    11571147
    1158     // get number of kernel threads
     1148    // get number of threads
    11591149    count = process->th_nr;
    11601150
     1151// check thread
     1152assert( (thread != NULL) , "thread argument is NULL" );
     1153
    11611154// check th_nr value
    1162 assert( (count > 0) , "process kth_nr cannot be 0\n" );
     1155assert( (count > 0) , "process th_nr cannot be 0\n" );
    11631156
    11641157    // remove thread from th_tbl[]
     
    11661159    process->th_nr = count-1;
    11671160
    1168     // release lock protecting kth_tbl
     1161// if( LPID_FROM_PID( process->pid ) == 0 )
     1162// printk("\n@@@ %s : release ltid %d for a thread %s in cluster %x\n",
     1163// __FUNCTION__, ltid, thread_type_str( thread->type), local_cxy );
     1164
     1165    // release lock protecting th_tbl
    11691166    rwlock_wr_release( &process->th_lock );
    11701167
     
    12031200
    12041201#if DEBUG_PROCESS_MAKE_FORK
    1205 uint32_t cycle = (uint32_t)hal_get_cycles();
     1202uint32_t cycle   = (uint32_t)hal_get_cycles();
     1203thread_t * this  = CURRENT_THREAD;
     1204trdid_t    trdid = this->trdid;
     1205pid_t      pid   = this->process->pid;
    12061206if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1207 printk("\n[DBG] %s : thread %x in process %x enter / cluster %x / cycle %d\n",
    1208 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, local_cxy, cycle );
     1207printk("\n[DBG] %s : thread[%x,%x] enter / cluster %x / cycle %d\n",
     1208__FUNCTION__, pid, trdid, local_cxy, cycle );
    12091209#endif
    12101210
     
    12311231cycle = (uint32_t)hal_get_cycles();
    12321232if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1233 printk("\n[DBG] %s : thread %x in process %x allocated process %x / cycle %d\n",
    1234 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, new_pid, cycle );
     1233printk("\n[DBG] %s : thread[%x,%x] allocated process %x / cycle %d\n",
     1234__FUNCTION__, pid, trdid, new_pid, cycle );
    12351235#endif
    12361236
     
    12431243cycle = (uint32_t)hal_get_cycles();
    12441244if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1245 printk("\n[DBG] %s : thread %x in process %x initialized child_process %x / cycle %d\n",
    1246 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, new_pid, cycle );
     1245printk("\n[DBG] %s : thread[%x,%x] initialized child_process %x / cycle %d\n",
     1246__FUNCTION__, pid, trdid, new_pid, cycle );
    12471247#endif
    12481248
     
    12631263cycle = (uint32_t)hal_get_cycles();
    12641264if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1265 printk("\n[DBG] %s : thread %x in process %x copied VMM from parent %x to child %x / cycle %d\n",
    1266 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
    1267 parent_pid, new_pid, cycle );
     1265printk("\n[DBG] %s : thread[%x,%x] copied VMM from parent to child / cycle %d\n",
     1266__FUNCTION__, pid, trdid, cycle );
    12681267#endif
    12691268
     
    12771276cycle = (uint32_t)hal_get_cycles();
    12781277if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1279 printk("\n[DBG] %s : thread %x in process %x / child takes TXT ownership / cycle %d\n",
    1280 __FUNCTION__ , CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle );
     1278printk("\n[DBG] %s : thread[%x,%x] / child takes TXT ownership / cycle %d\n",
     1279__FUNCTION__ , pid, trdid, cycle );
    12811280#endif
    12821281
     
    13061305cycle = (uint32_t)hal_get_cycles();
    13071306if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1308 printk("\n[DBG] %s : thread %x in process %x created main thread %x / cycle %d\n",
    1309 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, thread, cycle );
     1307printk("\n[DBG] %s : thread[%x,%x] created main thread %x / cycle %d\n",
     1308__FUNCTION__, pid, trdid, thread, cycle );
    13101309#endif
    13111310
     
    13281327cycle = (uint32_t)hal_get_cycles();
    13291328if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1330 printk("\n[DBG] %s : thread %x in process %x set COW in parent and child / cycle %d\n",
    1331 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle );
     1329printk("\n[DBG] %s : thread[%x,%x] set COW in parent and child / cycle %d\n",
     1330__FUNCTION__, pid, trdid, cycle );
    13321331#endif
    13331332
     
    13501349cycle = (uint32_t)hal_get_cycles();
    13511350if( DEBUG_PROCESS_MAKE_FORK < cycle )
    1352 printk("\n[DBG] %s : thread %x in process %x exit / created process %x / cycle %d\n",
    1353 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, new_pid, cycle );
     1351printk("\n[DBG] %s : thread[%x,%x] exit / created process %x / cycle %d\n",
     1352__FUNCTION__, pid, trdid, new_pid, cycle );
    13541353#endif
    13551354
     
    13841383uint32_t cycle = (uint32_t)hal_get_cycles();
    13851384if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1386 printk("\n[DBG] %s : thread %x in process %x enters / path %s / cycle %d\n",
    1387 __FUNCTION__, thread->trdid, pid, path, cycle );
     1385printk("\n[DBG] %s : thread[%x,%x] enters for %s / cycle %d\n",
     1386__FUNCTION__, pid, thread->trdid, path, cycle );
    13881387#endif
    13891388
     
    14061405cycle = (uint32_t)hal_get_cycles();
    14071406if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1408 printk("\n[DBG] %s : thread %x in process %x opened file <%s> / cycle %d\n",
    1409 __FUNCTION__, thread->trdid, pid, path, cycle );
     1407printk("\n[DBG] %s : thread[%x,%x] opened file <%s> / cycle %d\n",
     1408__FUNCTION__, pid, thread->trdid, path, cycle );
    14101409#endif
    14111410
     
    14161415cycle = (uint32_t)hal_get_cycles();
    14171416if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1418 printk("\n[DBG] %s : thread %x in process %x deleted all threads / cycle %d\n",
    1419 __FUNCTION__, thread->trdid, pid, cycle );
     1417printk("\n[DBG] %s : thread[%x,%x] deleted all threads / cycle %d\n",
     1418__FUNCTION__, pid, thread->trdid, cycle );
    14201419#endif
    14211420
     
    14261425cycle = (uint32_t)hal_get_cycles();
    14271426if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1428 printk("\n[DBG] %s : thread %x in process %x reset VMM / cycle %d\n",
    1429 __FUNCTION__, thread->trdid, pid, cycle );
     1427printk("\n[DBG] %s : thread[%x,%x] reset VMM / cycle %d\n",
     1428__FUNCTION__, pid, thread->trdid, cycle );
    14301429#endif
    14311430
     
    14431442cycle = (uint32_t)hal_get_cycles();
    14441443if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1445 printk("\n[DBG] %s : thread %x in process %x / kentry/args/envs vsegs registered / cycle %d\n",
    1446 __FUNCTION__, thread->trdid, pid, cycle );
     1444printk("\n[DBG] %s : thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n",
     1445__FUNCTION__, pid, thread->trdid, cycle );
    14471446#endif
    14481447
     
    14611460cycle = (uint32_t)hal_get_cycles();
    14621461if( DEBUG_PROCESS_MAKE_EXEC < cycle )
    1463 printk("\n[DBG] %s : thread %x in process %x / code/data vsegs registered / cycle %d\n",
    1464 __FUNCTION__, thread->trdid, pid, cycle );
     1462printk("\n[DBG] %s : thread[%x,%x] / code/data vsegs registered / cycle %d\n",
     1463__FUNCTION__, pid, thread->trdid, cycle );
    14651464#endif
    14661465
  • trunk/kernel/kern/process.h

    r564 r583  
    312312
    313313/*********************************************************************************************
    314  * This function blocks all threads for a given <process> in the local cluster.
    315  * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all
    316  * threads, BUT the main thread (thread 0 in owner cluster), and the client thread
    317  * identified by the <client_xp> argument. It request the relevant schedulers to acknowledge
    318  * the blocking, using IPI if required, and returns only when all blockable threads
    319  * in cluster are actually blocked.
    320  * The threads are not detached from the scheduler, and not detached from the local process.
    321  *********************************************************************************************
    322  * @ process     : pointer on the target process descriptor.
    323  * @ client_xp   : extended pointer on the client thread that should not be blocked.
    324  ********************************************************************************************/
    325 void process_block_threads( process_t * process,
    326                             xptr_t      client_xp );
    327 
    328 /*********************************************************************************************
    329  * This function marks for deletion all threads for a given <process> in the local cluster.
     314 * This function marks for delete all threads for a given <process> in the local cluster.
    330315 * It scan the list of local thread, and sets the THREAD_FLAG_REQ_DELETE bit for all
    331316 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread
    332317 * identified by the <client_xp> argument.
    333  * The actual deletion will be done by the scheduler at the next scheduling point.
     318 * The actual delete will be done by the scheduler at the next scheduling point.
    334319 *********************************************************************************************
    335320 * @ process     : pointer on the process descriptor.
     
    337322 ********************************************************************************************/
    338323void process_delete_threads( process_t * process,
    339                             xptr_t       client_xp );
     324                             xptr_t      client_xp );
     325
     326/*********************************************************************************************
     327 * This function blocks all threads for a given <process> in the local cluster.
     328 * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all threads.
     329 * It request the relevant schedulers to acknowledge the blocking, using IPI if required,
     330 * and returns only when all threads in cluster are actually blocked.
     331 * The threads are not detached from the scheduler, and not detached from the local process.
     332 *********************************************************************************************
     333 * @ process     : pointer on the target process descriptor.
     334 ********************************************************************************************/
     335void process_block_threads( process_t * process );
    340336
    341337/*********************************************************************************************
     
    498494 * @ process  : pointer on the local process descriptor.
    499495 * @ thread   : pointer on new thread to be registered.
    500  * @ trdid    : [out] address of buffer for allocated trdid.
     496 * @ trdid    : [out] buffer for allocated trdid.
    501497 * @ returns 0 if success / returns non zero if no slot available.
    502498 ********************************************************************************************/
     
    504500                                 struct thread_s * thread,
    505501                                 trdid_t         * trdid );
    506 
    507 /*********************************************************************************************
    508  * This function atomically removes a thread registration from the local process descriptor
    509  * th_tbl[] array, using the relevant lock, depending on the kernel/user type.
    510  *********************************************************************************************
    511  * @ thread   : local pointer on thread to be removed.
    512  * @ return true if the removed thread was the last registered thread.
    513  ********************************************************************************************/
    514 bool_t process_remove_thread( struct thread_s * thread );
    515502
    516503
  • trunk/kernel/kern/rpc.c

    r581 r583  
    7272
    7373    &rpc_vmm_get_vseg_server,           // 20
    74     &rpc_vmm_get_pte_server,            // 21
     74    &rpc_vmm_global_update_pte_server,  // 21
    7575    &rpc_kcm_alloc_server,              // 22
    7676    &rpc_kcm_free_server,               // 23
     
    108108
    109109    "GET_VSEG",               // 20
    110     "GET_PTE",                // 21
     110    "GLOBAL_UPDATE_PTE",                // 21
    111111    "KCM_ALLOC",              // 22
    112112    "KCM_FREE",               // 23
     
    126126
    127127/***************************************************************************************/
    128 /************ Generic functions supporting RPCs : client side **************************/
     128/************ Generic function supporting RPCs : client side ***************************/
    129129/***************************************************************************************/
    130130
     
    145145    // RPCs executed by the IDLE thread during kernel_init do not deschedule
    146146    if( this->type != THREAD_IDLE ) thread_assert_can_yield( this , __FUNCTION__ );
    147 
    148 #if DEBUG_RPC_CLIENT_GENERIC
    149 uint32_t cycle = (uint32_t)hal_get_cycles();
    150 if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    151 printk("\n[DBG] %s : thread %x in process %x enter for rpc %s / server_cxy %x / cycle %d\n",
    152 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], server_cxy, cycle );
    153 #endif
    154147
    155148    // select a server_core : use client core index if possible / core 0 otherwise
     
    163156    }
    164157
    165     // register client_thread pointer and client_core lid in RPC descriptor
     158    // register client_thread and client_core in RPC descriptor
    166159    rpc->thread = this;
    167160    rpc->lid    = client_core_lid;
     
    193186
    194187#if DEBUG_RPC_CLIENT_GENERIC
    195 cycle = (uint32_t)hal_get_cycles();
     188uint32_t cycle = (uint32_t)hal_get_cycles();
    196189uint32_t items = remote_fifo_items( rpc_fifo_xp );
    197190if( DEBUG_RPC_CLIENT_GENERIC < cycle )
    198 printk("\n[DBG] %s : thread %x in process %x / rpc %s / items %d / cycle %d\n",
    199 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], items, cycle );
     191printk("\n[DBG] %s : thread %x in process %x / rpc %s / server[%x,%d] / items %d / cycle %d\n",
     192__FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index],
     193server_cxy, server_core_lid, items, cycle );
    200194#endif
    201195       
     
    17521746
    17531747/////////////////////////////////////////////////////////////////////////////////////////
    1754 // [21]          Marshaling functions attached to RPC_VMM_GET_PTE  (blocking)
    1755 /////////////////////////////////////////////////////////////////////////////////////////
    1756 
    1757 ////////////////////////////////////////////
    1758 void rpc_vmm_get_pte_client( cxy_t       cxy,   
    1759                              process_t * process,  // in
    1760                              vpn_t       vpn,      // in
    1761                              bool_t      cow,      // in
    1762                              uint32_t  * attr,     // out
    1763                              ppn_t     * ppn,      // out
    1764                              error_t   * error )   // out
    1765 {
    1766 #if DEBUG_RPC_VMM_GET_PTE
    1767 thread_t * this = CURRENT_THREAD;
    1768 uint32_t cycle = (uint32_t)hal_get_cycles();
    1769 if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1748// [21]    Marshaling functions attached to RPC_VMM_GLOBAL_UPDATE_PTE  (blocking)
     1749/////////////////////////////////////////////////////////////////////////////////////////
     1750
     1751///////////////////////////////////////////////////////
     1752void rpc_vmm_global_update_pte_client( cxy_t       cxy,   
     1753                                       process_t * process,  // in
     1754                                       vpn_t       vpn,      // in
     1755                                       uint32_t    attr,     // in
     1756                                       ppn_t       ppn )     // in
     1757{
     1758#if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE
     1759thread_t * this = CURRENT_THREAD;
     1760uint32_t cycle = (uint32_t)hal_get_cycles();
     1761if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE )
    17701762printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
    17711763__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     
    17761768    // initialise RPC descriptor header
    17771769    rpc_desc_t  rpc;
    1778     rpc.index    = RPC_VMM_GET_PTE;
     1770    rpc.index    = RPC_VMM_GLOBAL_UPDATE_PTE;
    17791771    rpc.blocking = true;
    17801772    rpc.responses = 1;
     
    17831775    rpc.args[0] = (uint64_t)(intptr_t)process;
    17841776    rpc.args[1] = (uint64_t)vpn;
    1785     rpc.args[2] = (uint64_t)cow;
     1777    rpc.args[2] = (uint64_t)attr;
     1778    rpc.args[3] = (uint64_t)ppn;
    17861779
    17871780    // register RPC request in remote RPC fifo
    17881781    rpc_send( cxy , &rpc );
    17891782
    1790     // get output argument from rpc descriptor
    1791     *attr  = (uint32_t)rpc.args[3];
    1792     *ppn   = (ppn_t)rpc.args[4];
    1793     *error = (error_t)rpc.args[5];
    1794 
    1795 #if DEBUG_RPC_VMM_GET_PTE
    1796 cycle = (uint32_t)hal_get_cycles();
    1797 if( cycle > DEBUG_RPC_VMM_GET_PTE )
    1798 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
    1799 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
    1800 #endif
    1801 }
    1802 
    1803 ////////////////////////////////////////
    1804 void rpc_vmm_get_pte_server( xptr_t xp )
    1805 {
    1806 #if DEBUG_RPC_VMM_GET_PTE
    1807 thread_t * this = CURRENT_THREAD;
    1808 uint32_t cycle = (uint32_t)hal_get_cycles();
    1809 if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1783#if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE
     1784cycle = (uint32_t)hal_get_cycles();
     1785if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE )
     1786printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
     1787__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     1788#endif
     1789}
     1790
     1791//////////////////////////////////////////////////
     1792void rpc_vmm_global_update_pte_server( xptr_t xp )
     1793{
     1794#if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE
     1795thread_t * this = CURRENT_THREAD;
     1796uint32_t cycle = (uint32_t)hal_get_cycles();
     1797if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE )
    18101798printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n",
    18111799__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
     
    18141802    process_t   * process;
    18151803    vpn_t         vpn;
    1816     bool_t        cow;
    18171804    uint32_t      attr;
    18181805    ppn_t         ppn;
    1819     error_t       error;
    18201806
    18211807    // get client cluster identifier and pointer on RPC descriptor
     
    18261812    process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    18271813    vpn     = (vpn_t)                hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    1828     cow     = (bool_t)               hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
     1814    attr    = (uint32_t)             hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
     1815    ppn     = (ppn_t)                hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
    18291816   
    18301817    // call local kernel function
    1831     error = vmm_get_pte( process , vpn , cow , &attr , &ppn );
    1832 
    1833     // set output argument "attr" & "ppn" to client RPC descriptor
    1834     hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)attr );
    1835     hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)ppn );
    1836     hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
    1837 
    1838 #if DEBUG_RPC_VMM_GET_PTE
    1839 cycle = (uint32_t)hal_get_cycles();
    1840 if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1818    vmm_global_update_pte( process , vpn , attr , ppn );
     1819
     1820#if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE
     1821cycle = (uint32_t)hal_get_cycles();
     1822if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE )
    18411823printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n",
    18421824__FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle );
  • trunk/kernel/kern/rpc.h

    r564 r583  
    8282
    8383    RPC_VMM_GET_VSEG           = 20,
    84     RPC_VMM_GET_PTE            = 21,
     84    RPC_VMM_GLOBAL_UPDATE_PTE  = 21,
    8585    RPC_KCM_ALLOC              = 22,
    8686    RPC_KCM_FREE               = 23,
     
    149149
    150150/***********************************************************************************
    151  * This function contains the infinite loop executed by a RPC thread,
    152  * to handle all pending RPCs registered in the RPC fifo attached to a given core.
     151 * This function contains the infinite loop executed by a RPC server thread,
     152 * to handle pending RPCs registered in the RPC fifo attached to a given core.
     153 * In each iteration in this loop, it try to handle one RPC request:
     154 * - it tries to take the RPC FIFO ownership,
     155 * - it consumes one request when the FIFO is not empty,
     156 * - it releases the FIFO ownership,
     157 * - it execute the requested service,
     158 * - it unblock and send an IPI to the client thread,
     159 * - it suicides if the number of RPC threads for this core is to large,
     160 * - it block on IDLE and deschedule otherwise. 
    153161 **********************************************************************************/
    154162void rpc_thread_func( void );
     
    483491
    484492/***********************************************************************************
    485  * [21] The RPC_VMM_GET_PTE returns in the <ppn> and <attr> arguments the PTE value
    486  * for a given <vpn> in a given <process> (page_fault or copy_on_write event).
    487  * The server cluster is supposed to be the reference cluster, and the vseg
    488  * containing the VPN must be registered in the reference VMM.
    489  * It returns an error if physical memory cannot be allocated for the missing PTE2,
    490  * or for the missing page itself.
     493 * [21] The RPC_VMM_GLOBAL_UPDATE_PTE can be used by a thread that is not running
     494 * in reference cluster, to ask the reference cluster to update a specific entry,
     495 * identified by the <vpn> argument in all GPT copies of a process identified by
     496 * the <process> argument, using the values defined by <attr> and <ppn> arguments.
     497 * The server cluster is supposed to be the reference cluster.
     498 * It does not return any error code as the called function vmm_global_update_pte()
     499 * cannot fail.
    491500 ***********************************************************************************
    492501 * @ cxy     : server cluster identifier.
    493  * @ process : [in]   pointer on process descriptor in server cluster.
    494  * @ vaddr   : [in]   virtual address to be searched.
    495  * @ cow     : [in]   "copy_on_write" event if true / "page_fault" event if false.
    496  * @ attr    : [out]  address of buffer for attributes.
    497  * @ ppn     : [out]  address of buffer for PPN.
    498  * @ error   : [out]  address of buffer for error code.
    499  **********************************************************************************/
    500 void rpc_vmm_get_pte_client( cxy_t              cxy,
    501                              struct process_s * process,
    502                              vpn_t              vpn,
    503                              bool_t             cow,
    504                              uint32_t         * attr,
    505                              ppn_t            * ppn,
    506                              error_t          * error );
    507 
    508 void rpc_vmm_get_pte_server( xptr_t xp );
     502 * @ process : [in]  pointer on process descriptor in server cluster.
     503 * @ vpn     : [in]  virtual address to be searched.
     504 * @ attr    : [in]  PTE attributes.
     505 * @ ppn     : [it]  PTE PPN.
     506 **********************************************************************************/
     507void rpc_vmm_global_update_pte_client( cxy_t              cxy,
     508                                       struct process_s * process,
     509                                       vpn_t              vpn,
     510                                       uint32_t           attr,
     511                                       ppn_t              ppn );
     512
     513void rpc_vmm_global_update_pte_server( xptr_t xp );
    509514
    510515/***********************************************************************************
  • trunk/kernel/kern/scheduler.c

    r582 r583  
    4040
    4141extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
     42extern process_t            process_zero;       // allocated in kernel_init.c
    4243
    4344///////////////////////////////////////////////////////////////////////////////////////////
     
    8384
    8485// check kernel threads list
    85 assert( (count < sched->k_threads_nr),
    86 "bad kernel threads list" );
     86assert( (count < sched->k_threads_nr), "bad kernel threads list" );
    8787
    8888            // get next entry in kernel list
     
    118118
    119119// check user threads list
    120 assert( (count < sched->u_threads_nr),
    121 "bad user threads list" );
     120assert( (count < sched->u_threads_nr), "bad user threads list" );
    122121
    123122            // get next entry in user list
     
    146145
    147146////////////////////////////////////////////////////////////////////////////////////////////
    148 // This static function is the only function that can remove a thread from the scheduler.
     147// This static function is the only function that can actually delete a thread.
    149148// It is private, because it is called by the sched_yield() public function.
    150149// It scan all threads attached to a given scheduler, and executes the relevant
    151 // actions for pending requests:
     150// actions for two types of pending requests:
    152151// - REQ_ACK : it checks that target thread is blocked, decrements the response counter
    153152//   to acknowledge the client thread, and reset the pending request.
    154 // - REQ_DELETE : it detach the target thread from parent if attached, detach it from
    155 //   the process, remove it from scheduler, release memory allocated to thread descriptor,
    156 //   and destroy the process descriptor it the target thread was the last thread.
     153// - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it
     154//   from the scheduler list, and release the memory allocated to thread descriptor.
     155//   For an user thread, it destroys the process descriptor it the target thread is
     156//   the last thread in the local process descriptor.
     157//
     158// Implementation note:
     159// We use a while to scan the threads in scheduler lists, because some threads can
     160// be destroyed, and we want not use a LIST_FOREACH()
    157161////////////////////////////////////////////////////////////////////////////////////////////
    158162// @ core    : local pointer on the core descriptor.
     
    166170    process_t    * process;
    167171    scheduler_t  * sched;
    168     bool_t         last;
     172    uint32_t       threads_nr;   // number of threads in scheduler list
     173    ltid_t         ltid;         // thread local index
     174    uint32_t       count;        // number of threads in local process
    169175
    170176    // get pointer on scheduler
    171177    sched = &core->scheduler;
    172178
    173     // get pointer on user threads root
     179    ////// scan user threads to handle both ACK and DELETE requests
    174180    root = &sched->u_root;
    175 
    176     // We use a while to scan the user threads, to control the iterator increment,
    177     // because some threads will be destroyed, and we want not use a LIST_FOREACH()
    178 
    179     // initialise list iterator
    180181    iter = root->next;
    181 
    182     // scan all user threads
    183182    while( iter != root )
    184183    {
     
    210209            process = thread->process;
    211210
    212                 // release FPU if required
    213                 if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
    214 
    215             // take lock protecting sheduler state
     211            // get thread ltid
     212            ltid = LTID_FROM_TRDID( thread->trdid);
     213
     214            // take the lock protecting th_tbl[]
     215            rwlock_wr_acquire( &process->th_lock );
     216
     217            // take the lock protecting sheduler state
    216218            busylock_acquire( &sched->lock );
    217219
    218220            // update scheduler state
    219             uint32_t threads_nr = sched->u_threads_nr;
     221            threads_nr = sched->u_threads_nr;
    220222            sched->u_threads_nr = threads_nr - 1;
    221223            list_unlink( &thread->sched_list );
     
    236238            }
    237239
    238             // release lock protecting scheduler state
     240            // release the lock protecting sheduler state
    239241            busylock_release( &sched->lock );
    240242
    241             // delete thread descriptor
    242             last = thread_destroy( thread );
     243            // get number of threads in local process
     244            count = process->th_nr;
     245
     246// check th_nr value
     247assert( (count > 0) , "process th_nr cannot be 0\n" );
     248
     249            // remove thread from process th_tbl[]
     250            process->th_tbl[ltid] = NULL;
     251            process->th_nr = count - 1;
     252 
     253            // release the lock protecting th_tbl[]
     254            rwlock_wr_release( &process->th_lock );
     255
     256            // release memory allocated for thread descriptor
     257            thread_destroy( thread );
    243258
    244259#if DEBUG_SCHED_HANDLE_SIGNALS
    245260uint32_t cycle = (uint32_t)hal_get_cycles();
    246261if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    247 printk("\n[DBG] %s : thread %x in process %x on core[%x,%d] deleted / cycle %d\n",
    248 __FUNCTION__ , thread->trdid , process->pid , local_cxy , thread->core->lid , cycle );
     262printk("\n[DBG] %s : thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
     263__FUNCTION__ , process->pid , thread->trdid , local_cxy , thread->core->lid , cycle );
    249264#endif
    250             // destroy process descriptor if no more threads
    251             if( last )
     265            // destroy process descriptor if last thread
     266            if( count == 1 )
    252267            {
    253268                // delete process   
     
    262277            }
    263278        }
     279    }  // end user threads
     280
     281    ////// scan kernel threads for DELETE only
     282    root = &sched->k_root;
     283    iter = root->next;
     284    while( iter != root )
     285    {
     286        // get pointer on thread
     287        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     288
     289        // increment iterator
     290        iter = iter->next;
     291
     292        // handle REQ_DELETE only if target thread != calling thread
     293        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
     294        {
     295
     296// check process descriptor is local kernel process
     297assert( ( thread->process == &process_zero ) , "illegal process descriptor\n");
     298
     299            // get thread ltid
     300            ltid = LTID_FROM_TRDID( thread->trdid);
     301
     302            // take the lock protecting th_tbl[]
     303            rwlock_wr_acquire( &process_zero.th_lock );
     304
     305            // take lock protecting sheduler state
     306            busylock_acquire( &sched->lock );
     307
     308            // update scheduler state
     309            threads_nr = sched->k_threads_nr;
     310            sched->k_threads_nr = threads_nr - 1;
     311            list_unlink( &thread->sched_list );
     312            if( sched->k_last == &thread->sched_list )
     313            {
     314                if( threads_nr == 1 )
     315                {
     316                    sched->k_last = NULL;
     317                }
     318                else if( sched->k_root.next == &thread->sched_list )
     319                {
     320                    sched->k_last = sched->k_root.pred;
     321                }
     322                else
     323                {
     324                    sched->k_last = sched->k_root.next;
     325                }
     326            }
     327
     328            // release lock protecting scheduler state
     329            busylock_release( &sched->lock );
     330
     331            // get number of threads in local kernel process
     332            count = process_zero.th_nr;
     333
     334// check th_nr value
     335assert( (count > 0) , "kernel process th_nr cannot be 0\n" );
     336
     337            // remove thread from process th_tbl[]
     338            process_zero.th_tbl[ltid] = NULL;
     339            process_zero.th_nr = count - 1;
     340 
     341            // release the lock protecting th_tbl[]
     342            rwlock_wr_release( &process_zero.th_lock );
     343
     344            // delete thread descriptor
     345            thread_destroy( thread );
     346
     347#if DEBUG_SCHED_HANDLE_SIGNALS
     348uint32_t cycle = (uint32_t)hal_get_cycles();
     349if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     350printk("\n[DBG] %s : thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
     351__FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle );
     352#endif
     353        }
    264354    }
    265355} // end sched_handle_signals()
     
    268358// This static function is called by the sched_yield function when the RFC_FIFO
    269359// associated to the core is not empty.
    270 // It checks if it exists an idle (blocked) RPC thread for this core, and unblock
    271 // it if found. It creates a new RPC thread if no idle RPC thread is found.
     360// It search an idle RPC thread for this core, and unblock it if found.
     361// It creates a new RPC thread if no idle RPC thread is found.
    272362////////////////////////////////////////////////////////////////////////////////////////////
    273363// @ sched   : local pointer on scheduler.
     
    285375    {
    286376        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    287         if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
    288         {
    289             // exit loop
     377
     378        if( (thread->type == THREAD_RPC) &&
     379            (thread->blocked == THREAD_BLOCKED_IDLE ) )
     380        {
    290381            found = true;
    291382            break;
     
    303394        if ( error )
    304395        {
    305             printk("\n[WARNING] in %s : no memory to create a RPC thread in cluster %x\n",
     396            printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n",
    306397            __FUNCTION__, local_cxy );
    307398        }
     
    317408uint32_t cycle = (uint32_t)hal_get_cycles();
    318409if( DEBUG_SCHED_RPC_ACTIVATE < cycle )
    319 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n",
    320 __FUNCTION__, thread->trdid, local_cxy, lid, cycle );
     410printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n",
     411__FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle );
    321412#endif
    322413        }
     
    476567        busylock_release( &sched->lock );
    477568
    478 #if DEBUG_SCHED_YIELD
     569#if (DEBUG_SCHED_YIELD & 1)
    479570if( sched->trace )
    480571printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
     
    519610    remote_busylock_acquire( lock_xp );
    520611
    521     nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
    522     local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() );
     612    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
     613    local_cxy , core->lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid],
     614    (uint32_t)hal_get_cycles() );
    523615
    524616    // display kernel threads
     
    564656"illegal cluster %x\n", cxy );
    565657
    566 // check lid
    567658assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ),
    568659"illegal core index %d\n", lid );
     
    590681    remote_busylock_acquire( lock_xp );
    591682
     683    // get rpc_threads
     684    uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) );
     685 
    592686    // display header
    593     nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
    594     cxy , lid, current, (uint32_t)hal_get_cycles() );
     687    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
     688    cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() );
    595689
    596690    // display kernel threads
  • trunk/kernel/kern/thread.c

    r581 r583  
    224224
    225225        // update DQDT
    226     dqdt_update_threads( 1 );
     226    dqdt_increment_threads();
    227227
    228228#if DEBUG_THREAD_INIT
     
    768768    hal_cpu_context_init( thread );
    769769
     770    // set THREAD_BLOCKED_IDLE for DEV threads
     771    if( type == THREAD_DEV ) thread->blocked |= THREAD_BLOCKED_IDLE;
    770772
    771773#if DEBUG_THREAD_KERNEL_CREATE
     
    815817///////////////////////////////////////////////////////////////////////////////////////
    816818// TODO: check that all memory dynamically allocated during thread execution
    817 // has been released, using a cache of mmap requests. [AG]
     819// has been released => check vmm destroy for MMAP vsegs [AG]
    818820///////////////////////////////////////////////////////////////////////////////////////
    819 bool_t thread_destroy( thread_t * thread )
     821void thread_destroy( thread_t * thread )
    820822{
    821823    reg_t        save_sr;
    822     bool_t       last_thread;
    823824
    824825    process_t  * process    = thread->process;
     
    826827
    827828#if DEBUG_THREAD_DESTROY
    828 uint32_t cycle = (uint32_t)hal_get_cycles();
     829uint32_t   cycle = (uint32_t)hal_get_cycles();
     830thread_t * this  = CURRENT_THREAD;
    829831if( DEBUG_THREAD_DESTROY < cycle )
    830 printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n",
    831 __FUNCTION__, CURRENT_THREAD, thread->trdid, process->pid, cycle );
    832 #endif
    833 
    834 // check busylocks counter
    835 assert( (thread->busylocks == 0) ,
    836 "busylock not released for thread %x in process %x", thread->trdid, process->pid );
     832printk("\n[DBG] %s : thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n",
     833__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
     834#endif
     835
     836    // check busylocks counter
     837    thread_assert_can_yield( thread , __FUNCTION__ );
    837838
    838839    // update intrumentation values
     
    852853        hal_restore_irq( save_sr );
    853854
    854     // remove thread from process th_tbl[]
    855     last_thread = process_remove_thread( thread );
    856        
    857     // update DQDT
    858     dqdt_update_threads( -1 );
    859 
    860855    // invalidate thread descriptor
    861856        thread->signature = 0;
     
    867862cycle = (uint32_t)hal_get_cycles();
    868863if( DEBUG_THREAD_DESTROY < cycle )
    869 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / last %d / cycle %d\n",
    870 __FUNCTION__, CURRENT_THREAD, thread->trdid, process->pid, last_thread / cycle );
    871 #endif
    872 
    873     return last_thread;
     864printk("\n[DBG] %s : thread[%x,%x] exit / destroyed thread[%x,%x] / cycle %d\n",
     865__FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle );
     866#endif
    874867
    875868}   // end thread_destroy()
     
    10231016uint32_t cycle  = (uint32_t)hal_get_cycles();
    10241017if( DEBUG_THREAD_DELETE < cycle )
    1025 printk("\n[DBG] %s : thread %x in process %x enters / target thread %x / cycle %d\n",
    1026 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle );
    1027 #endif
    1028 
    1029 // check killer thread can yield
    1030 assert( (killer_ptr->busylocks == 0),
    1031 "cannot yield : busylocks = %d\n", killer_ptr->busylocks );
     1018printk("\n[DBG] %s : killer[%x,%x] enters / target[%x,%x] / cycle %d\n",
     1019__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 
     1020target_ptr->process->pid, target_ptr->trdid, cycle );
     1021#endif
    10321022
    10331023// check target thread is not the main thread, because the main thread
     
    10361026"tharget thread cannot be the main thread\n" );
    10371027
    1038     // block the target thread
    1039     thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
    1040 
    1041     // synchronize with the joining thread if attached
    1042     if( target_attached && (is_forced == false) )
    1043     {
    1044 
    1045 #if (DEBUG_THREAD_DELETE & 1)
    1046 if( DEBUG_THREAD_DELETE < cycle )
    1047 printk("\n[DBG] %s : thread %x in process %x / target thread is attached\n",
    1048 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
    1049 #endif
     1028    // check killer thread can yield
     1029    thread_assert_can_yield( killer_ptr , __FUNCTION__ );
     1030
     1031    // if the target thread is attached, we must synchonize with the joining thread
     1032    // before blocking and marking the target thead for delete.
     1033
     1034    if( target_attached && (is_forced == false) ) // synchronize with joining thread
     1035    {
    10501036        // build extended pointers on target thread join fields
    10511037        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
     
    10611047        target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
    10621048   
    1063         if( target_join_done )  // joining thread arrived first => unblock the joining thread
     1049        if( target_join_done )                     // joining thread arrived first
    10641050        {
    1065 
    1066 #if (DEBUG_THREAD_DELETE & 1)
    1067 if( DEBUG_THREAD_DELETE < cycle )
    1068 printk("\n[DBG] %s : thread %x in process %x / joining thread arrived first\n",
    1069 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
    1070 #endif
    10711051            // get extended pointer on joining thread
    10721052            joining_xp  = (xptr_t)hal_remote_l64( target_join_xp_xp );
     
    10831063            remote_busylock_release( target_join_lock_xp );
    10841064
     1065            // block the target thread
     1066            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
     1067
    10851068            // set the REQ_DELETE flag in target thread descriptor
    10861069            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    10871070
    1088             // restore IRQs
     1071            // exit critical section
    10891072            hal_restore_irq( save_sr );
    1090         }
    1091         else                // killer thread arrived first => register flags and deschedule
    1092         {
    1093 
    1094 #if (DEBUG_THREAD_DELETE & 1)
    1095 if( DEBUG_THREAD_DELETE < cycle )
    1096 printk("\n[DBG] %s : thread %x in process %x / killer thread arrived first\n",
    1097 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
    1098 #endif
    1099             // set the kill_done flag in target thread
    1100             hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
    1101 
    1102             // block this thread on BLOCKED_JOIN
    1103             thread_block( killer_xp , THREAD_BLOCKED_JOIN );
    1104 
    1105             // set extended pointer on killer thread in target thread
    1106             hal_remote_s64( target_join_xp_xp , killer_xp );
    1107 
    1108             // release the join_lock in target thread descriptor
    1109             remote_busylock_release( target_join_lock_xp );
    1110 
    1111 #if (DEBUG_THREAD_DELETE & 1)
    1112 if( DEBUG_THREAD_DELETE < cycle )
    1113 printk("\n[DBG] %s : thread %x in process %x / killer thread deschedule\n",
    1114 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
    1115 #endif
    1116             // deschedule
    1117             sched_yield( "killer thread wait joining thread" );
    1118 
    1119 #if (DEBUG_THREAD_DELETE & 1)
    1120 if( DEBUG_THREAD_DELETE < cycle )
    1121 printk("\n[DBG] %s : thread %x in process %x / killer thread resume\n",
    1122 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
    1123 #endif
    1124             // set the REQ_DELETE flag in target thread descriptor
    1125             hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    1126 
    1127             // restore IRQs
    1128             hal_restore_irq( save_sr );
    1129         }
    1130     }
    1131     else                                                   // target thread not attached
    1132     {
    1133         // set the REQ_DELETE flag in target thread descriptor
    1134         hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    1135     }
    11361073
    11371074#if DEBUG_THREAD_DELETE
    11381075cycle  = (uint32_t)hal_get_cycles;
    11391076if( DEBUG_THREAD_DELETE < cycle )
    1140 printk("\n[DBG] %s : thread %x in process %x exit / target thread %x / cycle %d\n",
    1141 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle );
    1142 #endif
     1077printk("\n[DBG] %s : killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
     1078__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
     1079target_ptr->process->pid, target_ptr->trdid, cycle );
     1080#endif
     1081
     1082        }
     1083        else                                      // killer thread arrived first
     1084        {
     1085            // set the kill_done flag in target thread
     1086            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
     1087
     1088            // block this thread on BLOCKED_JOIN
     1089            thread_block( killer_xp , THREAD_BLOCKED_JOIN );
     1090
     1091            // set extended pointer on killer thread in target thread
     1092            hal_remote_s64( target_join_xp_xp , killer_xp );
     1093
     1094            // release the join_lock in target thread descriptor
     1095            remote_busylock_release( target_join_lock_xp );
     1096
     1097#if DEBUG_THREAD_DELETE
     1098cycle  = (uint32_t)hal_get_cycles;
     1099if( DEBUG_THREAD_DELETE < cycle )
     1100printk("\n[DBG] %s : killer[%x,%x] deschedules / target[%x,%x] not completed / cycle %d\n",
     1101__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
     1102target_ptr->process->pid, target_ptr->trdid, cycle );
     1103#endif
     1104            // deschedule
     1105            sched_yield( "killer thread wait joining thread" );
     1106
     1107            // block the target thread
     1108            thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
     1109
     1110            // set the REQ_DELETE flag in target thread descriptor
     1111            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1112
     1113            // exit critical section
     1114            hal_restore_irq( save_sr );
     1115
     1116#if DEBUG_THREAD_DELETE
     1117cycle  = (uint32_t)hal_get_cycles;
     1118if( DEBUG_THREAD_DELETE < cycle )
     1119printk("\n[DBG] %s : killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n",
     1120__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
     1121target_ptr->process->pid, target_ptr->trdid, cycle );
     1122#endif
     1123
     1124        }
     1125    }
     1126    else                     // no synchronization with joining thread required
     1127    {
     1128        // block the target thread
     1129        thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
     1130
     1131        // set the REQ_DELETE flag in target thread descriptor
     1132        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1133
     1134#if DEBUG_THREAD_DELETE
     1135cycle  = (uint32_t)hal_get_cycles;
     1136if( DEBUG_THREAD_DELETE < cycle )
     1137printk("\n[DBG] %s : killer[%x,%x] exit / target [%x,%x] marked / no join / cycle %d\n",
     1138__FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid,
     1139target_ptr->process->pid, target_ptr->trdid, cycle );
     1140#endif
     1141
     1142    }
    11431143
    11441144}  // end thread_delete()
     
    11551155
    11561156        // force core to low-power mode (optional)
    1157         if( CONFIG_THREAD_IDLE_MODE_SLEEP )
     1157        if( CONFIG_SCHED_IDLE_MODE_SLEEP )
    11581158        {
    11591159
     
    13541354#if DEBUG_BUSYLOCK
    13551355
    1356 // get root of list of taken busylocks
     1356// scan list of busylocks
     1357xptr_t    iter_xp;
    13571358xptr_t    root_xp  = XPTR( local_cxy , &thread->busylocks_root );
    1358 xptr_t    iter_xp;
    1359 
    1360 // scan list of busylocks
    13611359XLIST_FOREACH( root_xp , iter_xp )
    13621360{
  • trunk/kernel/kern/thread.h

    r580 r583  
    7272#define THREAD_FLAG_JOIN_DONE    0x0002  /*! Parent thread made a join request        */
    7373#define THREAD_FLAG_KILL_DONE    0x0004  /*! This thread received a kill request      */
    74 #define THREAD_FLAG_SCHED        0x0008  /*! Scheduling required for this thread      */
    7574#define THREAD_FLAG_REQ_ACK      0x0010  /*! Acknowledge required from scheduler      */
    7675#define THREAD_FLAG_REQ_DELETE   0x0020  /*! Destruction required from scheduler      */
     
    334333 * is marked for delete. This include the thread descriptor itself, the associated
    335334 * CPU and FPU context, and the physical memory allocated for an user thread local stack.
    336  * The destroyed thread is removed from the local process th_tbl[] array, and returns
    337  * true when the destroyed thread was the last thread registered in process.
    338335 ***************************************************************************************
    339336 * @ thread  : pointer on the thread descriptor to release.
    340337 * @ return true, if the thread was the last registerd thread in local process.
    341338 **************************************************************************************/
    342 bool_t thread_destroy( thread_t * thread );
     339void thread_destroy( thread_t * thread );
    343340
    344341/***************************************************************************************
     
    390387 * to asynchronously delete the target thread, at the next scheduling point.
    391388 * The calling thread can run in any cluster, as it uses remote accesses, but
    392  * the target thread cannot be the main thread of the process identified by the <pid>,
    393  * because the main thread must be deleted by the parent process argument.
     389 * the target thread cannot be the main thread of the process identified by the <pid>
     390 * argument, because the main thread must be deleted by the parent process argument.
    394391 * If the target thread is running in "attached" mode, and the <is_forced> argument
    395392 * is false, this function implements the required sychronisation with the joining
    396  * thread, blocking the calling thread until the pthread_join() syscall is executed.
     393 * thread, blocking the killer thread until the pthread_join() syscall is executed
     394 * by the joining thread.
    397395 ***************************************************************************************
    398396 * @ thread_xp   : extended pointer on the target thread.
Note: See TracChangeset for help on using the changeset viewer.