Changeset 624 for trunk/kernel


Ignore:
Timestamp:
Mar 12, 2019, 1:37:38 PM (6 years ago)
Author:
alain
Message:

Fix several bugs to use the instruction MMU in kernel mode
in replacement of the instruction address extension register,
and remove the "kentry" segment.

This version is running on the tsar_generic_iob" platform.

One interesting bug: the cp0_ebase defining the kernel entry point
(for interrupts, exceptions and syscalls) must be initialized
early in kernel_init(), because the VFS initialisation done by
kernel_ini() uses RPCs, and RPCs uses Inter-Processor-Interrup.

Location:
trunk/kernel
Files:
35 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/fs/devfs.c

    r623 r624  
    263263            if( chdev_cxy != local_cxy )
    264264            {
    265                 printk("\d[PANIC] in %s : illegal DMA chdev in cluster %x\n",
     265                printk("\n[PANIC] in %s : illegal DMA chdev in cluster %x\n",
    266266                __FUNCTION__, local_cxy );
    267267                hal_core_sleep();
  • trunk/kernel/kern/kernel_init.c

    r623 r624  
    9797process_t            process_zero                            CONFIG_CACHE_LINE_ALIGNED;
    9898
    99 // This variable defines extended pointers on the distributed chdevs
     99// This variable defines a set of extended pointers on the distributed chdevs
    100100__attribute__((section(".kdata")))
    101101chdev_directory_t    chdev_dir                               CONFIG_CACHE_LINE_ALIGNED;
     
    125125vfs_ctx_t            fs_context[FS_TYPES_NR]                 CONFIG_CACHE_LINE_ALIGNED;
    126126
    127 // kernel_init is the entry point defined in hal/tsar_mips32/kernel.ld
    128 // It is used by the bootloader to tranfer control to kernel.
    129 extern void kernel_init( boot_info_t * info );
    130 
    131127// This array is used for debug, and describes the kernel locks usage,
    132128// It must be kept consistent with the defines in kernel_config.h file.
     129__attribute__((section(".kdata")))
    133130char * lock_type_str[] =
    134131{
     
    229226
    230227// intrumentation variables : cumulated costs per syscall type in cluster
     228
     229#if CONFIG_INSTRUMENTATION_SYSCALLS
     230__attribute__((section(".kdata")))
    231231uint32_t   syscalls_cumul_cost[SYSCALLS_NR];
    232232
    233 // intrumentation variables : number of syscalls per syscal type in cluster
     233__attribute__((section(".kdata")))
    234234uint32_t   syscalls_occurences[SYSCALLS_NR];
     235#endif
    235236
    236237///////////////////////////////////////////////////////////////////////////////////////////
     
    978979#if DEBUG_KERNEL_INIT
    979980if( (core_lid ==  0) & (local_cxy == 0) )
    980 printk("\n[%s] : exit barrier 1 : TXT0 initialized / cycle %d\n",
     981printk("\n[%s] exit barrier 1 : TXT0 initialized / cycle %d\n",
    981982__FUNCTION__, (uint32_t)hal_get_cycles() );
    982983#endif
     
    10111012#if DEBUG_KERNEL_INIT
    10121013if( (core_lid ==  0) & (local_cxy == 0) )
    1013 printk("\n[%s] : exit barrier 2 : cluster manager initialized / cycle %d\n",
     1014printk("\n[%s] exit barrier 2 : cluster manager initialized / cycle %d\n",
    10141015__FUNCTION__, (uint32_t)hal_get_cycles() );
    10151016#endif
    10161017
    10171018    /////////////////////////////////////////////////////////////////////////////////
    1018     // STEP 3 : core[0] initializes the process_zero descriptor,
     1019    // STEP 3 : all cores initialize the idle thread descriptor.
     1020    //          core[0] initializes the process_zero descriptor,
    10191021    //          including the kernel VMM (both GPT and VSL)
    10201022    /////////////////////////////////////////////////////////////////////////////////
     
    10241026    core    = &cluster->core_tbl[core_lid];
    10251027
     1028    // all cores update the register(s) defining the kernel
     1029    // entry points for interrupts, exceptions and syscalls,
     1030    // this must be done before VFS initialisation, because
     1031    // kernel_init() uses RPCs requiring IPIs...
     1032    hal_set_kentry();
     1033
     1034    // all cores initialize the idle thread descriptor
     1035    thread_idle_init( thread,
     1036                      THREAD_IDLE,
     1037                      &thread_idle_func,
     1038                      NULL,
     1039                      core_lid );
     1040
    10261041    // core[0] initializes the process_zero descriptor,
    10271042    if( core_lid == 0 ) process_zero_create( &process_zero , info );
     
    10351050#if DEBUG_KERNEL_INIT
    10361051if( (core_lid ==  0) & (local_cxy == 0) )
    1037 printk("\n[%s] : exit barrier 3 : kernel processs initialized / cycle %d\n",
     1052printk("\n[%s] exit barrier 3 : kernel processs initialized / cycle %d\n",
    10381053__FUNCTION__, (uint32_t)hal_get_cycles() );
    10391054#endif
     
    10581073#if DEBUG_KERNEL_INIT
    10591074if( (core_lid ==  0) & (local_cxy == 0) )
    1060 printk("\n[%s] : exit barrier 4 : MMU and IOPIC initialized / cycle %d\n",
     1075printk("\n[%s] exit barrier 4 : MMU and IOPIC initialized / cycle %d\n",
    10611076__FUNCTION__, (uint32_t)hal_get_cycles() );
    10621077#endif
     
    10911106#if DEBUG_KERNEL_INIT
    10921107if( (core_lid ==  0) & (local_cxy == 0) )
    1093 printk("\n[%s] : exit barrier 5 : all chdevs initialised / cycle %d\n",
     1108printk("\n[%s] exit barrier 5 : chdevs initialised / cycle %d\n",
    10941109__FUNCTION__, (uint32_t)hal_get_cycles() );
    10951110#endif
     
    11011116   
    11021117    /////////////////////////////////////////////////////////////////////////////////
    1103     // STEP 6 : All cores enable IPI (Inter Procesor Interrupt),
    1104     //          Alh cores initialize IDLE thread.
    1105     //          Only core[0] in cluster[0] creates the VFS root inode.
     1118    // STEP 6 : all cores enable IPI (Inter Procesor Interrupt),
     1119    //          all cores unblock the idle thread, and register it in scheduler.
     1120    //          core[0] in cluster[0] creates the VFS root inode.
    11061121    //          It access the boot device to initialize the file system context.
    11071122    /////////////////////////////////////////////////////////////////////////////////
     
    11111126    hal_enable_irq( &status );
    11121127
    1113     // all cores initialize the idle thread descriptor
    1114     thread_idle_init( thread,
    1115                       THREAD_IDLE,
    1116                       &thread_idle_func,
    1117                       NULL,
    1118                       core_lid );
    1119 
    1120     // all cores unblock idle thread, and register it in scheduler
     1128    // all cores unblock the idle thread, and register it in scheduler
    11211129    thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
    11221130    core->scheduler.idle = thread;
     
    11711179            cxy_t         vfs_root_cxy = GET_CXY( vfs_root_inode_xp );
    11721180            vfs_inode_t * vfs_root_ptr = GET_PTR( vfs_root_inode_xp );
    1173             hal_remote_s32( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ), INODE_TYPE_DIR );
     1181            hal_remote_s32( XPTR( vfs_root_cxy , &vfs_root_ptr->type ), INODE_TYPE_DIR );
    11741182            hal_remote_spt( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ),
    11751183                            (void*)(intptr_t)root_dir_cluster );
     
    12081216#if DEBUG_KERNEL_INIT
    12091217if( (core_lid ==  0) & (local_cxy == 0) )
    1210 printk("\n[%s] : exit barrier 6 : VFS root (%x,%x) in cluster 0 / cycle %d\n",
     1218printk("\n[%s] exit barrier 6 : VFS root (%x,%x) in cluster 0 / cycle %d\n",
    12111219__FUNCTION__, GET_CXY(process_zero.vfs_root_xp),
    12121220GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() );
     
    12781286#if DEBUG_KERNEL_INIT
    12791287if( (core_lid ==  0) & (local_cxy == 1) )
    1280 printk("\n[%s] : exit barrier 7 : VFS root (%x,%x) in cluster 1 / cycle %d\n",
     1288printk("\n[%s] exit barrier 7 : VFS root (%x,%x) in cluster 1 / cycle %d\n",
    12811289__FUNCTION__, GET_CXY(process_zero.vfs_root_xp),
    12821290GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() );
     
    13281336#if DEBUG_KERNEL_INIT
    13291337if( (core_lid ==  0) & (local_cxy == 0) )
    1330 printk("\n[%s] : exit barrier 8 : DEVFS root initialized in cluster 0 / cycle %d\n",
     1338printk("\n[%s] exit barrier 8 : DEVFS root initialized in cluster 0 / cycle %d\n",
    13311339__FUNCTION__, (uint32_t)hal_get_cycles() );
    13321340#endif
    13331341
    13341342    /////////////////////////////////////////////////////////////////////////////////
    1335     // STEP 9 : All core[0]s complete in parallel the DEVFS initialization.
     1343    // STEP 9 : In all clusters in parallel, core[0] completes DEVFS initialization.
    13361344    //          Each core[0] get the "dev" and "external" extended pointers from
    1337     //          values stored in cluster 0.
    1338     //          Then each core[0] in cluster(i) creates the DEVFS "internal" directory,
    1339     //          and creates the pseudo-files for all chdevs in cluster (i).
     1345    //          values stored in cluster(0), creates the DEVFS "internal" directory,
     1346    //          and creates the pseudo-files for all chdevs in local cluster.
    13401347    /////////////////////////////////////////////////////////////////////////////////
    13411348
     
    13651372#if DEBUG_KERNEL_INIT
    13661373if( (core_lid ==  0) & (local_cxy == 0) )
    1367 printk("\n[%s] : exit barrier 9 : DEVFS initialized in cluster 0 / cycle %d\n",
     1374printk("\n[%s] exit barrier 9 : DEVFS initialized in cluster 0 / cycle %d\n",
    13681375__FUNCTION__, (uint32_t)hal_get_cycles() );
    13691376#endif
     
    13841391       process_init_create();
    13851392    }
     1393
     1394#if DEBUG_KERNEL_INIT
     1395if( (core_lid ==  0) & (local_cxy == 0) )
     1396printk("\n[%s] exit barrier 10 : process_init created in cluster 0 / cycle %d\n",
     1397__FUNCTION__, (uint32_t)hal_get_cycles() );
     1398#endif
    13861399
    13871400#if (DEBUG_KERNEL_INIT & 1)
     
    14441457#endif
    14451458
    1446     // each core updates the register(s) definig the kernel
    1447     // entry points for interrupts, exceptions and syscalls...
    1448     hal_set_kentry();
    1449 
    14501459    // each core activates its private TICK IRQ
    14511460    dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD );
  • trunk/kernel/kern/process.c

    r623 r624  
    12091209
    12101210// check th_nr value
    1211 assert( (count > 0) , "process th_nr cannot be 0\n" );
     1211assert( (count > 0) , "process th_nr cannot be 0" );
    12121212
    12131213    // remove thread from th_tbl[]
     
    12491249// check parent process is the reference process
    12501250assert( (parent_process_xp == ref_xp ) ,
    1251 "parent process must be the reference process\n" );
     1251"parent process must be the reference process" );
    12521252
    12531253#if DEBUG_PROCESS_MAKE_FORK
     
    13521352// check main thread LTID
    13531353assert( (LTID_FROM_TRDID(thread->trdid) == 0) ,
    1354 "main thread must have LTID == 0\n" );
     1354"main thread must have LTID == 0" );
    13551355
    13561356#if( DEBUG_PROCESS_MAKE_FORK & 1 )
     
    15521552#endif
    15531553
     1554    // get pointer on VMM
     1555    vmm_t * vmm = &process->vmm;
     1556
    15541557    // get PID from local cluster manager for this kernel process
    15551558    error = cluster_pid_alloc( process , &pid );
     
    15711574    process->term_state = 0;
    15721575
    1573     // initialise kernel GPT and VSL, depending on architecture
    1574     hal_vmm_kernel_init( info );
     1576    // initilise VSL as empty
     1577    vmm->vsegs_nr = 0;
     1578        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
     1579        remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL );
     1580
     1581    // initialise GPT as empty
     1582    error = hal_gpt_create( &vmm->gpt );
     1583
     1584    if( error )
     1585    {
     1586        printk("\n[PANIC] in %s : cannot create empty GPT\n", __FUNCTION__ );
     1587        hal_core_sleep();
     1588    }
     1589
     1590    // initialize GPT lock
     1591    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
     1592   
     1593    // create kernel vsegs in GPT and VSL, as required by the hardware architecture
     1594    error = hal_vmm_kernel_init( info );
     1595
     1596    if( error )
     1597    {
     1598        printk("\n[PANIC] in %s : cannot create kernel vsegs in VMM\n", __FUNCTION__ );
     1599        hal_core_sleep();
     1600    }
    15751601
    15761602    // reset th_tbl[] array and associated fields
     
    16291655// check memory allocator
    16301656assert( (process != NULL),
    1631 "no memory for process descriptor in cluster %x\n", local_cxy  );
     1657"no memory for process descriptor in cluster %x", local_cxy  );
    16321658
    16331659    // set the CWD and VFS_ROOT fields in process descriptor
     
    16401666// check PID allocator
    16411667assert( (error == 0),
    1642 "cannot allocate PID in cluster %x\n", local_cxy );
     1668"cannot allocate PID in cluster %x", local_cxy );
    16431669
    16441670// check PID value
    16451671assert( (pid == 1) ,
    1646 "process INIT must be first process in cluster 0\n" );
     1672"process INIT must be first process in cluster 0" );
    16471673
    16481674    // initialize process descriptor / parent is local process_zero
     
    16691695
    16701696assert( (error == 0),
    1671 "failed to open file <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1697"failed to open file <%s>", CONFIG_PROCESS_INIT_PATH );
    16721698
    16731699#if(DEBUG_PROCESS_INIT_CREATE & 1)
     
    16821708
    16831709assert( (error == 0),
    1684 "cannot access .elf file <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1710"cannot access .elf file <%s>", CONFIG_PROCESS_INIT_PATH );
    16851711
    16861712#if(DEBUG_PROCESS_INIT_CREATE & 1)
     
    17261752
    17271753assert( (error == 0),
    1728 "cannot create main thread for <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1754"cannot create main thread for <%s>", CONFIG_PROCESS_INIT_PATH );
    17291755
    17301756assert( (thread->trdid == 0),
    1731 "main thread must have index 0 for <%s>\n", CONFIG_PROCESS_INIT_PATH );
     1757"main thread must have index 0 for <%s>", CONFIG_PROCESS_INIT_PATH );
    17321758
    17331759#if(DEBUG_PROCESS_INIT_CREATE & 1)
     
    18161842
    18171843        assert( (txt_file_xp != XPTR_NULL) ,
    1818         "process must be attached to one TXT terminal\n" );
     1844        "process must be attached to one TXT terminal" );
    18191845
    18201846        // get TXT_RX chdev pointers
     
    20122038    // check owner cluster
    20132039    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
    2014     "process descriptor not in owner cluster\n" );
     2040    "process descriptor not in owner cluster" );
    20152041
    20162042    // get extended pointer on stdin pseudo file
     
    20672093    // check owner cluster
    20682094    assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
    2069     "process descriptor not in owner cluster\n" );
     2095    "process descriptor not in owner cluster" );
    20702096
    20712097    // get extended pointer on stdin pseudo file
     
    21982224pid_t process_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) );
    21992225assert( (process_cxy == CXY_FROM_PID( process_pid )) ,
    2200 "process descriptor not in owner cluster\n" );
     2226"process descriptor not in owner cluster" );
    22012227
    22022228    // get extended pointer on stdin pseudo file
  • trunk/kernel/kern/rpc.c

    r623 r624  
    8181    &rpc_vmm_create_vseg_server,           // 27
    8282    &rpc_vmm_set_cow_server,               // 28
    83     &rpc_vmm_display_server,               // 29
     83    &rpc_hal_vmm_display_server,               // 29
    8484};
    8585
     
    27292729
    27302730/////////////////////////////////////////////
    2731 void rpc_vmm_display_client( cxy_t       cxy,
     2731void rpc_hal_vmm_display_client( cxy_t       cxy,
    27322732                             process_t * process,
    27332733                             bool_t      detailed )
     
    27652765
    27662766////////////////////////////////////////
    2767 void rpc_vmm_display_server( xptr_t xp )
     2767void rpc_hal_vmm_display_server( xptr_t xp )
    27682768{
    27692769#if DEBUG_RPC_VMM_DISPLAY
     
    27872787   
    27882788    // call local kernel function
    2789     vmm_display( process , detailed );
     2789    hal_vmm_display( process , detailed );
    27902790
    27912791#if DEBUG_RPC_VMM_DISPLAY
  • trunk/kernel/kern/rpc.h

    r623 r624  
    683683 * @ detailed    : [in]  detailed display if true.
    684684 **********************************************************************************/
    685 void rpc_vmm_display_client( cxy_t              cxy,
     685void rpc_hal_vmm_display_client( cxy_t              cxy,
    686686                             struct process_s * process,
    687687                             bool_t             detailed );
    688688
    689 void rpc_vmm_display_server( xptr_t xp );
     689void rpc_hal_vmm_display_server( xptr_t xp );
    690690
    691691
  • trunk/kernel/kern/scheduler.c

    r619 r624  
    488488 
    489489#if (DEBUG_SCHED_YIELD & 0x1)
    490 if( sched->trace )
     490// if( sched->trace )
     491if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
    491492sched_display( lid );
    492493#endif
     
    541542
    542543#if DEBUG_SCHED_YIELD
    543 if( sched->trace )
     544// if( sched->trace )
     545if( (uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
    544546printk("\n[%s] core[%x,%d] / cause = %s\n"
    545547"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
     
    558560
    559561#if (DEBUG_SCHED_YIELD & 1)
    560 if( sched->trace )
     562// if( sched->trace )
     563if(uint32_t)hal_get_cycles() > DEBUG_SCHED_YIELD )
    561564printk("\n[%s] core[%x,%d] / cause = %s\n"
    562565"      thread %x (%s) (%x,%x) continue / cycle %d\n",
     
    601604
    602605    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
    603     local_cxy , core->lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid],
     606    local_cxy , lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid],
    604607    (uint32_t)hal_get_cycles() );
    605608
  • trunk/kernel/kern/thread.c

    r623 r624  
    389389printk("\n[%s] CPU & FPU contexts created\n",
    390390__FUNCTION__, thread->trdid );
    391 vmm_display( process , true );
     391hal_vmm_display( process , true );
    392392#endif
    393393
     
    689689printk("\n[%s] thread[%x,%x] set CPU context & jump to user code / cycle %d\n",
    690690__FUNCTION__, process->pid, thread->trdid, cycle );
    691 vmm_display( process , true );
     691hal_vmm_display( process , true );
    692692#endif
    693693
     
    13521352        "hold %d busylock(s) / cycle %d\n",
    13531353        func_str, thread->process->pid, thread->trdid,
    1354         thread->busylocks, (uint32_t)hal_get_cycles() );
     1354        thread->busylocks - 1, (uint32_t)hal_get_cycles() );
    13551355
    13561356#if DEBUG_BUSYLOCK
  • trunk/kernel/kernel_config.h

    r623 r624  
    4040
    4141#define DEBUG_BUSYLOCK                    0
    42 #define DEBUG_BUSYLOCK_THREAD_XP          0x0ULL  // selected thread xptr
     42#define DEBUG_BUSYLOCK_PID                0x10001    // thread pid (when detailed debug)
     43#define DEBUG_BUSYLOCK_TRDID              0x10000    // thread trdid (when detailed debug)
    4344                 
    4445#define DEBUG_CHDEV_CMD_RX                0
     
    8485#define DEBUG_FATFS_MOVE_PAGE             0
    8586#define DEBUG_FATFS_NEW_DENTRY            0
    86 #define DEBUG_FATFS_RELEASE_INODE         1
     87#define DEBUG_FATFS_RELEASE_INODE         0
    8788#define DEBUG_FATFS_REMOVE_DENTRY         0
    8889#define DEBUG_FATFS_SYNC_FAT              0
     
    9192#define DEBUG_FATFS_UPDATE_DENTRY         0
    9293
     94#define DEBUG_HAL_EXCEPTIONS              0
    9395#define DEBUG_HAL_GPT_SET_PTE             0
    9496#define DEBUG_HAL_GPT_COPY                0
    9597#define DEBUG_HAL_GPT_CREATE              0
    9698#define DEBUG_HAL_GPT_DESTROY             0
    97 #define DEBUG_HAL_USPACE                  0
     99#define DEBUG_HAL_IOC_RX                  0
     100#define DEBUG_HAL_IOC_TX                  0
     101#define DEBUG_HAL_IRQS                    0
    98102#define DEBUG_HAL_KENTRY                  0
    99 #define DEBUG_HAL_EXCEPTIONS              0
    100 #define DEBUG_HAL_IRQS                    0
    101103#define DEBUG_HAL_TXT_RX                  0
    102104#define DEBUG_HAL_TXT_TX                  0
    103 #define DEBUG_HAL_IOC_RX                  0
    104 #define DEBUG_HAL_IOC_TX                  0
     105#define DEBUG_HAL_USPACE                  0
     106#define DEBUG_HAL_VMM                     0
    105107
    106108#define DEBUG_KCM                         0
     
    162164
    163165#define DEBUG_SCHED_HANDLE_SIGNALS        2
    164 #define DEBUG_SCHED_YIELD                 2    // must be activated by the trace() syscall
     166#define DEBUG_SCHED_YIELD                 0     
    165167#define DEBUG_SCHED_RPC_ACTIVATE          0
    166168
     
    236238#define DEBUG_VFS_OPENDIR                 0
    237239#define DEBUG_VFS_STAT                    0
    238 #define DEBUG_VFS_UNLINK                  1
     240#define DEBUG_VFS_UNLINK                  0
    239241
    240242#define DEBUG_VMM_CREATE_VSEG             0
     
    407409
    408410////////////////////////////////////////////////////////////////////////////////////////////
    409 //                USER SPACE SEGMENTATION / all values are numbers of pages
     411//             32 bits  USER SPACE SEGMENTATION / all values are numbers of pages
    410412////////////////////////////////////////////////////////////////////////////////////////////
    411413
    412414#define CONFIG_VMM_VSPACE_SIZE        0x100000     // virtual space          : 4   Gbytes
    413415
    414 #define CONFIG_VMM_KENTRY_BASE        0x000004     // UTILS zone base        : 16  Kbytes
     416#define CONFIG_VMM_UTILS_BASE         0x000200     // UTILS zone base        : 2   Mbytes
    415417#define CONFIG_VMM_ELF_BASE           0x000400     // ELF zone base          : 4   Mbytes
    416418#define CONFIG_VMM_HEAP_BASE          0x002000     // HEAP zone base         : 32  Mbytes
    417419#define CONFIG_VMM_STACK_BASE         0x0C0000     // STACK zone base        : 3   Gbytes
    418420
    419 #define CONFIG_VMM_KENTRY_SIZE        0x000004     // kentry vseg size       : 16  Kbytes
    420421#define CONFIG_VMM_ARGS_SIZE          0x000004     // args vseg size         : 16  Kbytes
    421422#define CONFIG_VMM_ENVS_SIZE          0x000008     // envs vseg size         : 32  Kbytes
  • trunk/kernel/libk/busylock.c

    r600 r624  
    22 * busylock.c - local kernel-busy waiting lock implementation.
    33 *
    4  * Authors     Alain Greiner (2016,2017,2018)
     4 * Authors     Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    7676
    7777#if DEBUG_BUSYLOCK
    78 if( (lock->type != LOCK_CHDEV_TXT0) &&
    79     ((uint32_t)hal_get_cycles() > DEBUG_BUSYLOCK) )
     78if( lock->type != LOCK_CHDEV_TXT0 )
    8079{
     80    // update thread list of busylocks
    8181    xptr_t root_xp = XPTR( local_cxy , &this->busylocks_root );
    82 
    83     // update thread list of busylocks
    8482    xlist_add_last( root_xp , XPTR( local_cxy , &lock->xlist ) );
    8583}
    8684#endif
    8785
    88 #if( DEBUG_BUSYLOCK && DEBUG_BUSYLOCK_THREAD_XP )
     86#if( DEBUG_BUSYLOCK & 1 )
    8987if( (lock->type != LOCK_CHDEV_TXT0) &&
    90     (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) )
     88    (this->process->pid == DEBUG_BUSYLOCK_PID) &&
     89    (this->trdid == DEBUG_BUSYLOCK_TRDID) )
    9190{
    92     // get cluster and local pointer of target thread
    93     cxy_t      thread_cxy = GET_CXY( DEBUG_BUSYLOCK_THREAD_XP );
    94     thread_t * thread_ptr = GET_PTR( DEBUG_BUSYLOCK_THREAD_XP );
    95 
    96     // display message on kernel TXT0
    9791    printk("\n[%s] thread[%x,%x] ACQUIRE lock %s\n",
    9892    __FUNCTION__, this->process->pid, this->trdid, lock_type_str[lock->type] );
     
    120114
    121115#if DEBUG_BUSYLOCK
    122 if( (lock->type != LOCK_CHDEV_TXT0) &&
    123     ((uint32_t)hal_get_cycles() > DEBUG_BUSYLOCK) )
     116if( lock->type != LOCK_CHDEV_TXT0 )
    124117{
    125118    // remove lock from thread list of busylocks
     
    128121#endif
    129122
    130 #if( DEBUG_BUSYLOCK && DEBUG_BUSYLOCK_THREAD_XP )
     123#if( DEBUG_BUSYLOCK & 1 )
    131124if( (lock->type != LOCK_CHDEV_TXT0) &&
    132     (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) )
     125    (this->process->pid == DEBUG_BUSYLOCK_PID) &&
     126    (this->trdid == DEBUG_BUSYLOCK_TRDID) )
    133127{
    134     // get cluster and local pointer of target thread
    135     cxy_t      thread_cxy = GET_CXY( DEBUG_BUSYLOCK_THREAD_XP );
    136     thread_t * thread_ptr = GET_PTR( DEBUG_BUSYLOCK_THREAD_XP );
    137 
    138     // display message on kernel TXT0
    139128    printk("\n[%s] thread[%x,%x] RELEASE lock %s\n",
    140129    __FUNCTION__, this->process->pid, this->trdid, lock_type_str[lock->type] );
  • trunk/kernel/libk/busylock.h

    r623 r624  
    22 * busylock.h: local kernel busy-waiting lock definition.     
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018)
     4 * Authors  Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3434 * a shared object located in a given cluster, made by thread(s) running in same cluster.
    3535 * It uses a busy waiting policy when the lock is taken by another thread, and should
    36  * be used to execute very short actions, such as accessing basic allocators, or higher
     36 * be used to execute very short actions, such as accessing basic allocators or higher
    3737 * level synchronisation objects (barriers, queuelocks, or rwlocks).
    38  * WARNING: a thread cannot yield when it is owning a busylock.
    3938 *
    4039 * - To acquire the lock, we use a ticket policy to avoid starvation: the calling thread
     
    4241 *   value  until current == ticket.
    4342 *
    44  * - To release the lock, the owner thread increments the "current" value,
    45  *   decrements its busylocks counter.
     43 * - To release the lock, the owner thread increments the "current" value.
    4644 *
    47  * - When a thread takes a busylock, it enters a critical section: the busylock_acquire()
     45 * - When a thread takes a busylock, it enters a critical section: the acquire()
    4846 *   function disables the IRQs, takes the lock, increments the thread busylocks counter,
    49  *   and save the SR in lock descriptor and returns.
     47 *   save the SR in lock descriptor and returns.
    5048 *
    51  * - The busylock_release() function releases the lock, decrements the thread busylock
    52  *   counter, restores the SR to exit the critical section, and returns
     49 * - The release() function releases the lock, decrements the thread busylock
     50 *   counter, restores the SR to exit the critical section, and returns.
    5351 *
    54  * - If a thread owning a busylock (local or remote) tries to deschedule, the scheduler
    55  *   signals a kernel panic.
     52 * WARNING: a thread cannot yield when it is holding a busylock (local or remote).
     53 *
     54 * This rule is checked by all functions containing a thread_yield() AND by the scheduler,
     55 * thanks to the busylocks counter stored in the calling thread descriptor.
     56 * 1) all functions call "thread_assert_can_yield()" before calling "thread_yield()".
     57 * 2) The scheduler checks that the calling thread does not hold any busylock.
     58 * In case of violation the core goes to sleep after a [PANIC] message on TXT0.
    5659 ******************************************************************************************/
    5760
    5861/*******************************************************************************************
    5962 * This structure defines a busylock.
    60  * The <type> and <xlist> fields are used for debug.
    61  * The type defines the lock usage as detailed in the kernel_config.h file.
     63 * The <xlist> field is only used when DEBUG_BUSYLOCK is set.
     64 * The <type> field defines the lock usage as detailed in the kernel_config.h file.
    6265******************************************************************************************/
    6366
  • trunk/kernel/libk/remote_busylock.c

    r619 r624  
    22 * remote_busylock.c - remote kernel busy-waiting lock implementation.
    33 *
    4  * Authors     Alain Greiner (2016,2017,2018)
     4 * Authors     Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8787#if DEBUG_BUSYLOCK
    8888uint32_t type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
    89 if( (type != LOCK_CHDEV_TXT0) &&
    90     ((uint32_t)hal_get_cycles() > DEBUG_BUSYLOCK) )
     89if( type != LOCK_CHDEV_TXT0 )
    9190{
     91    // update thread list of busyslocks
    9292    xptr_t root_xp = XPTR( local_cxy , &this->busylocks_root );
    93 
    94     // update thread list of busyslocks
    9593    xlist_add_last( root_xp , XPTR( lock_cxy  , &lock_ptr->xlist ) );
    9694}
    9795#endif
    9896
    99 #if( DEBUG_BUSYLOCK && DEBUG_BUSYLOCK_THREAD_XP )
     97#if( DEBUG_BUSYLOCK & 1 )
    10098if( (type != LOCK_CHDEV_TXT0) &&
    101     (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) )
     99    (this->process->pid == DEBUG_BUSYLOCK_PID) &&
     100    (this->trdid == DEBUG_BUSYLOCK_TRDID) )
    102101{
    103102    printk("\n[%s] thread[%x,%x] ACQUIRE lock %s\n",
     
    131130#if DEBUG_BUSYLOCK
    132131uint32_t type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
    133 if( (type != LOCK_CHDEV_TXT0) &&
    134     (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) &&
    135     ((uint32_t)hal_get_cycles() > DEBUG_BUSYLOCK) )
     132if( type != LOCK_CHDEV_TXT0 )
    136133{
    137134    // remove lock from thread list of busyslocks
     
    140137#endif
    141138
    142 #if (DEBUG_BUSYLOCK && DEBUG_BUSYLOCK_THREAD_XP )
     139#if( DEBUG_BUSYLOCK & 1 )
    143140if( (type != LOCK_CHDEV_TXT0) &&
    144     (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) )
     141    (this->process->pid == DEBUG_BUSYLOCK_PID) &&
     142    (this->trdid == DEBUG_BUSYLOCK_TRDID) )
    145143{
    146144    printk("\n[%s] thread[%x,%x] RELEASE lock %s\n",
  • trunk/kernel/libk/remote_busylock.h

    r619 r624  
    22 * remote_busylock.h: remote kernel busy-waiting lock definition.     
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018)
     4 * Authors  Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    3737 * higher level synchronisation objects, such as remote_queuelock and remote_rwlock.
    3838 *
    39  * WARNING: a thread cannot yield when it is owning a busylock (local or remote).
    40  *
    4139 * - To acquire the lock, we use a ticket policy to avoid starvation: the calling thread
    4240 *   makes an atomic increment on a "ticket" allocator, and keep polling the "current"
    4341 *   value  until current == ticket.
    4442 *
    45  * - To release the lock, the owner thread increments the "current" value,
    46  *   decrements its busylocks counter.
     43 * - To release the lock, the owner thread increments the "current" value.
    4744 *
    48  * - When a thread takes a busylock, it enters a critical section: the busylock_acquire()
     45 * - When a thread takes a busylock, it enters a critical section: the acquire()
    4946 *   function disables the IRQs, takes the lock, increments the thread busylocks counter,
    50  *    save the SR in the lock descriptor and returns.
     47 *   save the SR in the lock descriptor and returns.
    5148 *
    52  * - The busylock_release() function decrements the thread busylock counter,
    53  *   restores the SR to exit the critical section, and returns
     49 * - The release() function releases the lock, decrements the thread busylock
     50 *   counter, restores the SR to exit the critical section, and returns.
    5451 *
    55  * - If a thread owning a busylock (local or remote) tries to deschedule, the scheduler
    56  *   signals a kernel panic.
     52 * WARNING: a thread cannot yield when it is holding a busylock (local or remote).
     53 *
     54 * This rule is checked by all functions containing a thread_yield() AND by the scheduler,
     55 * thanks to the busylocks counter stored in the calling thread descriptor.
     56 * 1) all functions call "thread_assert_can_yield()" before calling "thread_yield()".
     57 * 2) The scheduler checks that the calling thread does not hold any busylock.
     58 * In case of violation the core goes to sleep after a [PANIC] message on TXT0.
    5759 ******************************************************************************************/
    5860
  • trunk/kernel/mm/mapper.c

    r623 r624  
    440440            ppm_page_do_dirty( page_xp );
    441441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
    442 
    443 putb(" in mapper_move_user()" , map_ptr , page_count );
    444 
    445442        }
    446443
  • trunk/kernel/mm/vmm.c

    r623 r624  
    7676    vmm_t   * vmm = &process->vmm;
    7777
    78     // initialize local list of vsegs
     78    // initialize VSL (empty)
    7979    vmm->vsegs_nr = 0;
    8080        xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
    8181        remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL );
    8282
    83 assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE)
    84 <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" );
    85 
    86 assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) ,
    87 "no more than 32 threads per cluster for a single process\n");
     83assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <=
     84         (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) ,
     85         "UTILS zone too small\n" );
    8886
    8987assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
     
    9290
    9391    // register args vseg in VSL
    94     base = (CONFIG_VMM_KENTRY_BASE +
    95             CONFIG_VMM_KENTRY_SIZE ) << CONFIG_PPM_PAGE_SHIFT;
     92    base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
    9693    size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
    9794
     
    114111
    115112    // register the envs vseg in VSL
    116     base = (CONFIG_VMM_KENTRY_BASE +
    117             CONFIG_VMM_KENTRY_SIZE +
    118             CONFIG_VMM_ARGS_SIZE   ) << CONFIG_PPM_PAGE_SHIFT;
     113    base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
    119114    size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
    120115
     
    148143    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    149144
    150     // update process VMM with kernel vsegs
     145    // update process VMM with kernel vsegs as required by the hardware architecture
    151146    error = hal_vmm_kernel_update( process );
    152147
     
    185180}  // end vmm_init()
    186181
    187 //////////////////////////////////////
    188 void vmm_display( process_t * process,
    189                   bool_t      mapping )
    190 {
    191     vmm_t * vmm = &process->vmm;
    192     gpt_t * gpt = &vmm->gpt;
    193 
    194     printk("\n***** VSL and GPT(%x) for process %x in cluster %x\n\n",
    195     process->vmm.gpt.ptr , process->pid , local_cxy );
    196 
    197     // get lock protecting the VSL and the GPT
    198     remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) );
    199     remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->gpt_lock ) );
    200 
    201     // scan the list of vsegs
    202     xptr_t         root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    203     xptr_t         iter_xp;
    204     xptr_t         vseg_xp;
    205     vseg_t       * vseg;
    206     XLIST_FOREACH( root_xp , iter_xp )
    207     {
    208         vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    209         vseg    = GET_PTR( vseg_xp );
    210 
    211         printk(" - %s : base = %X / size = %X / npages = %d\n",
    212         vseg_type_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size );
    213 
    214         if( mapping )
    215         {
    216             vpn_t    vpn;
    217             ppn_t    ppn;
    218             uint32_t attr;
    219             vpn_t    base = vseg->vpn_base;
    220             vpn_t    size = vseg->vpn_size;
    221             for( vpn = base ; vpn < (base+size) ; vpn++ )
    222             {
    223                 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );
    224                 if( attr & GPT_MAPPED )
    225                 {
    226                     printk("    . vpn = %X / attr = %X / ppn = %X\n", vpn , attr , ppn );
    227                 }
    228             }
    229         }
    230     }
    231 
    232     // release the locks
    233     remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) );
    234     remote_rwlock_rd_release( XPTR( local_cxy , &vmm->gpt_lock ) );
    235 
    236 }  // vmm_display()
    237182
    238183//////////////////////////////////////////
     
    248193    // update vseg descriptor
    249194    vseg->vmm = vmm;
     195
     196    // increment vsegs number
     197    vmm->vsegs_nr++;
    250198
    251199    // add vseg in vmm list
     
    735683
    736684    // copy base addresses from parent VMM to child VMM
    737     child_vmm->kent_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->kent_vpn_base));
    738685    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
    739686    child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base));
     
    773720#if (DEBUG_VMM_DESTROY & 1 )
    774721if( DEBUG_VMM_DESTROY < cycle )
    775 vmm_display( process , true );
     722hal_vmm_display( process , true );
    776723#endif
    777724
     
    785732    // (don't use a FOREACH for item deletion in xlist)
    786733
    787 uint32_t count = 0;
    788 
    789         while( !xlist_is_empty( root_xp ) && (count < 10 ) )
     734        while( !xlist_is_empty( root_xp ) )
    790735        {
    791736        // get pointer on first vseg in VSL
     
    801746__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
    802747#endif
    803 
    804 count++;
    805748
    806749        }
     
    10931036    // check collisions
    10941037    vseg = vmm_check_conflict( process , vpn_base , vpn_size );
     1038
    10951039    if( vseg != NULL )
    10961040    {
     
    11621106    xptr_t      lock_xp;    // extended pointer on lock protecting forks counter
    11631107    uint32_t    forks;      // actual number of pendinf forks
    1164     uint32_t    type;       // vseg type
     1108    uint32_t    vseg_type;  // vseg type
    11651109
    11661110#if DEBUG_VMM_DELETE_VSEG
     
    11971141
    11981142    // get relevant vseg infos
    1199     type    = vseg->type;
    1200     vpn_min = vseg->vpn_base;
    1201     vpn_max = vpn_min + vseg->vpn_size;
     1143    vseg_type = vseg->type;
     1144    vpn_min   = vseg->vpn_base;
     1145    vpn_max   = vpn_min + vseg->vpn_size;
    12021146
    12031147    // loop to invalidate all vseg PTEs in GPT
    12041148        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    12051149    {
    1206         // get GPT entry
     1150        // get ppn and attr from GPT entry
    12071151        hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );
    12081152
     
    12171161            hal_gpt_reset_pte( gpt , vpn );
    12181162
    1219             // the allocated page is not released to KMEM for kernel vseg
    1220             if( (type != VSEG_TYPE_KCODE) &&
    1221                 (type != VSEG_TYPE_KDATA) &&
    1222                 (type != VSEG_TYPE_KDEV ) )
     1163            // the allocated page is not released to for kernel vseg
     1164            if( (vseg_type != VSEG_TYPE_KCODE) &&
     1165                (vseg_type != VSEG_TYPE_KDATA) &&
     1166                (vseg_type != VSEG_TYPE_KDEV ) )
    12231167            {
    1224 
    1225 // FIXME This code must be completely re-written, as the actual release must depend on
    1226 // - the vseg type
    1227 // - the reference cluster
    1228 // - the page refcount and/or the forks counter
    1229 
    12301168                // get extended pointer on physical page descriptor
    12311169                page_xp  = ppm_ppn2page( ppn );
     
    12331171                page_ptr = GET_PTR( page_xp );
    12341172
     1173// FIXME This code must be re-written, as the actual release depends on vseg type,
     1174// the reference cluster, the page refcount and/or the forks counter...
     1175
    12351176                // get extended pointers on forks and lock fields
    12361177                forks_xp = XPTR( page_cxy , &page_ptr->forks );
     
    12451186                if( forks )  // decrement pending forks counter
    12461187                {
     1188                    // update forks counter
    12471189                    hal_remote_atomic_add( forks_xp , -1 );
     1190
     1191                    // release the lock protecting the page
     1192                    remote_busylock_release( lock_xp );
    12481193                } 
    12491194                else         // release physical page to relevant cluster
    12501195                {
     1196                    // release the lock protecting the page
     1197                    remote_busylock_release( lock_xp );
     1198
     1199                    // release the page to kmem
    12511200                    if( page_cxy == local_cxy )   // local cluster
    12521201                    {
     
    12661215                }
    12671216
    1268                 // release the lock protecting the page
    1269                 remote_busylock_release( lock_xp );
    12701217            }
    12711218        }
  • trunk/kernel/mm/vmm.h

    r623 r624  
    121121        uint32_t         pgfault_nr;         /*! page fault counter (instrumentation)           */
    122122
    123     vpn_t            kent_vpn_base;      /*! kentry vseg first page                         */
    124123    vpn_t            args_vpn_base;      /*! args vseg first page                           */
    125     vpn_t            envs_vpn_base;      /*! envs zone first page                           */
    126     vpn_t            heap_vpn_base;      /*! envs zone first page                           */
    127         vpn_t            code_vpn_base;      /*! code zone first page                           */
    128         vpn_t            data_vpn_base;      /*! data zone first page                           */
     124    vpn_t            envs_vpn_base;      /*! envs vseg first page                           */
     125        vpn_t            code_vpn_base;      /*! code vseg first page                           */
     126        vpn_t            data_vpn_base;      /*! data vseg first page                           */
     127    vpn_t            heap_vpn_base;      /*! heap zone first page                           */
    129128
    130129        intptr_t         entry_point;        /*! main thread entry point                        */
     
    157156 * @ mapping   : detailed mapping if true.
    158157 ********************************************************************************************/
    159 void vmm_display( struct process_s * process,
     158void hal_vmm_display( struct process_s * process,
    160159                  bool_t             mapping );
    161160
  • trunk/kernel/syscalls/sys_barrier.c

    r619 r624  
    7474printk("\n[ERROR] in %s : unmapped barrier %x / thread %x / process %x\n",
    7575__FUNCTION__ , vaddr , this->trdid , process->pid );
    76 vmm_display( process , false );
     76hal_vmm_display( process , false );
    7777#endif
    7878        this->errno = error;
     
    9595printk("\n[ERROR] in %s : unmapped barrier attributes %x / thread %x / process %x\n",
    9696__FUNCTION__ , attr , this->trdid , process->pid );
    97 vmm_display( process , false );
     97hal_vmm_display( process , false );
    9898#endif
    9999                    this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_condvar.c

    r566 r624  
    22 * sys_condvar.c - Access a POSIX condvar.
    33 *
    4  * Author    Alain Greiner  (2016,2017,2018)
     4 * Author    Alain Greiner  (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2424#include <hal_kernel_types.h>
    2525#include <hal_special.h>
     26#include <hal_vmm.h>
    2627#include <errno.h>
    2728#include <thread.h>
     
    7576printk("\n[ERROR] in %s : unmapped condvar %x / thread %x / process %x\n",
    7677__FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid );
    77 vmm_display( process , false );
     78hal_vmm_display( process , false );
    7879#endif
    7980        this->errno = error;
  • trunk/kernel/syscalls/sys_display.c

    r623 r624  
    22 * sys_display.c - display the current state of a kernel structure on TXT0
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018, 2019)
    55 * 
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2424#include <hal_kernel_types.h>
    2525#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2627#include <errno.h>
    2728#include <vmm.h>
     
    167168            if( cxy == local_cxy )
    168169            {
    169                     vmm_display( process , true );
     170                    hal_vmm_display( process , true );
    170171            }
    171172            else
    172173            {
    173                 rpc_vmm_display_client( cxy , process , true );
     174                rpc_hal_vmm_display_client( cxy , process , true );
    174175            }
    175176
  • trunk/kernel/syscalls/sys_get_config.c

    r566 r624  
    6666printk("\n[ERROR] in %s : x_size buffer unmapped / thread %x / process %x\n",
    6767__FUNCTION__ , (intptr_t)x_size , this->trdid , process->pid );
    68 vmm_display( process , false );
     68hal_vmm_display( process , false );
    6969#endif
    7070        this->errno = EINVAL;
     
    8181printk("\n[ERROR] in %s : y_size buffer unmapped / thread %x / process %x\n",
    8282__FUNCTION__ , (intptr_t)y_size , this->trdid , process->pid );
    83 vmm_display( process , false );
     83hal_vmm_display( process , false );
    8484#endif
    8585        this->errno = EINVAL;
     
    9696printk("\n[ERROR] in %s : ncores buffer unmapped / thread %x / process %x\n",
    9797__FUNCTION__ , (intptr_t)ncores , this->trdid , process->pid );
    98 vmm_display( process , false );
     98hal_vmm_display( process , false );
    9999#endif
    100100        this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_get_core.c

    r506 r624  
    5555printk("\n[ERROR] in %s : cxy buffer unmapped %x / thread %x / process %x\n",
    5656__FUNCTION__ , (intptr_t)cxy , this->trdid , process->pid );
    57 vmm_display( process , false );
     57hal_vmm_display( process , false );
    5858#endif
    5959        this->errno = EFAULT;
     
    7070printk("\n[ERROR] in %s : lid buffer unmapped %x / thread %x / process %x\n",
    7171__FUNCTION__ , (intptr_t)lid , this->trdid , process->pid );
    72 vmm_display( process , false );
     72hal_vmm_display( process , false );
    7373#endif
    7474        this->errno = EFAULT;
  • trunk/kernel/syscalls/sys_get_cycle.c

    r506 r624  
    5353printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n",
    5454__FUNCTION__ , (intptr_t)cycle , this->trdid , process->pid );
    55 vmm_display( process , false );
     55hal_vmm_display( process , false );
    5656#endif
    5757        this->errno = EFAULT;
  • trunk/kernel/syscalls/sys_is_fg.c

    r566 r624  
    6767printk("\n[ERROR] in %s : unmapped owner buffer %x / thread %x in process %x\n",
    6868__FUNCTION__ , (intptr_t)is_fg, this->trdid, process->pid );
    69 vmm_display( process , false );
     69hal_vmm_display( process , false );
    7070#endif
    7171         this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_kill.c

    r594 r624  
    7474tm_start = hal_get_cycles();
    7575if( DEBUG_SYS_KILL < tm_start )
    76 printk("\n[%s] thread[%x,%x] enter / process %x / %s / cycle %d\n",
    77 __FUNCTION__, this->process->pid, this->trdid, pid,
    78 sig_type_str(sig_id), (uint32_t)tm_start );
     76printk("\n[%s] thread[%x,%x] enter : %s to process %x / cycle %d\n",
     77__FUNCTION__, this->process->pid, this->trdid,
     78sig_type_str(sig_id), pid, (uint32_t)tm_start );
    7979#endif
    8080
     
    8686#if (DEBUG_SYS_KILL & 1)
    8787if( DEBUG_SYS_KILL < tm_start )
    88 printk("\n[%s] thread[%x,%x] get owner process %x in cluster %x\n",
     88printk("\n[%s] thread[%x,%x] get target process descriptor %x in owner cluster %x\n",
    8989__FUNCTION__ , this->process->pid, this->trdid, owner_ptr, owner_cxy );
    9090#endif
     
    108108#if (DEBUG_SYS_KILL & 1)
    109109if( DEBUG_SYS_KILL < tm_start )
    110 printk("\n[%x] thread[%x,%x] get parent process %x in cluster %x\n",
     110printk("\n[%s] thread[%x,%x] get parent process descriptor %x in cluster %x\n",
    111111__FUNCTION__ , this->process->pid, this->trdid, parent_ptr, parent_cxy );
    112112#endif
     
    128128            process_sigaction( pid , BLOCK_ALL_THREADS );
    129129
     130#if (DEBUG_SYS_KILL & 1)
     131if( DEBUG_SYS_KILL < tm_start )
     132printk("\n[%s] thread[%x,%x] blocked all threads of process %x\n",
     133__FUNCTION__ , this->process->pid, this->trdid, pid );
     134#endif
    130135            // atomically update owner process termination state
    131136            hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) ,
     
    136141
    137142            // calling thread deschedules when it is itself a target thread
    138             if( this->process->pid == pid ) sched_yield("block itself");
     143            if( this->process->pid == pid )
     144            {
     145
     146#if (DEBUG_SYS_KILL & 1)
     147if( DEBUG_SYS_KILL < tm_start )
     148printk("\n[%s] thread[%x,%x] is a target thread => deschedule\n",
     149__FUNCTION__ , this->process->pid, this->trdid );
     150#endif
     151                sched_yield("block itself");
     152            }
    139153
    140154            break;
  • trunk/kernel/syscalls/sys_mmap.c

    r623 r624  
    7070printk("\n[ERROR] in %s : thread[%x,%x] / mmap attributes unmapped %x\n",
    7171__FUNCTION__ , process->pid, this->trdid, (intptr_t)attr );
    72 vmm_display( process , false );
     72hal_vmm_display( process , false );
    7373#endif
    7474                this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_munmap.c

    r623 r624  
    6767printk("\n[ERROR] in %s : thread[%x,%x] / user buffer unmapped %x\n",
    6868__FUNCTION__ , process->pid, this->trdid, (intptr_t)vaddr );
    69 vmm_display( process , false );
     69hal_vmm_display( process , false );
    7070#endif
    7171                this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_mutex.c

    r566 r624  
    7474printk("\n[ERROR] in %s : mutex unmapped %x / thread %x / process %x\n",
    7575__FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid );
    76 vmm_display( process , false );
     76hal_vmm_display( process , false );
    7777#endif
    7878        this->errno = error;
  • trunk/kernel/syscalls/sys_opendir.c

    r614 r624  
    6666printk("\n[ERROR] in %s / thread[%x,%x] : DIR buffer %x unmapped\n",
    6767__FUNCTION__ , process->pid , this->trdid, dirp );
    68 vmm_display( process , false );
     68hal_vmm_display( process , false );
    6969#endif
    7070                this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_read.c

    r610 r624  
    107107printk("\n[ERROR] in %s : thread[%x,%x] user buffer unmapped %x\n",
    108108__FUNCTION__ , process->pid, this->trdid, (intptr_t)vaddr );
    109 vmm_display( process , false );
     109hal_vmm_display( process , false );
    110110#endif
    111111                this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_readdir.c

    r612 r624  
    6969printk("\n[ERROR] in %s / thread[%x,%x] : user buffer %x unmapped\n",
    7070__FUNCTION__ , process->pid , this->trdid, buffer );
    71 vmm_display( process , false );
     71hal_vmm_display( process , false );
    7272#endif
    7373                this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_sem.c

    r566 r624  
    22 * sys_sem.c - Acces a POSIX unamed semaphore.
    33 *
    4  * Authors     Alain Greiner (2016,2017,2018)
     4 * Authors     Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2424#include <hal_kernel_types.h>
    2525#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2627#include <shared_semaphore.h>
    2728#include <errno.h>
     
    7475printk("\n[ERROR] in %s : unmapped semaphore pointer %x / thread %x in process %x / cycle %d\n",
    7576__FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid, (uint32_t)hal_get_cycles() );
    76 vmm_display( process , false );
     77hal_vmm_display( process , false );
    7778#endif
    7879        this->errno = EINVAL;
     
    112113printk("\n[ERROR] in %s GETVALUE: unmapped buffer %x / thread %x in process %x / cycle %d\n",
    113114__FUNCTION__ , (intptr_t)current_value, this->trdid, process->pid, (uint32_t)hal_get_cycles() );
    114 vmm_display( process , false );
     115hal_vmm_display( process , false );
    115116#endif
    116117                this->errno = EINVAL;
     
    154155printk("\n[ERROR] in %s WAIT: semaphore %x not found / thread %x in process %x / cycle %d\n",
    155156__FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid, (uint32_t)hal_get_cycles() );
    156 vmm_display( process , true );
     157hal_vmm_display( process , true );
    157158#endif
    158159                this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_stat.c

    r610 r624  
    22 * sys_stat.c - kernel function implementing the "stat" syscall.
    33 *
    4  * Author    Alain Greiner  (2016,2017,2018)
     4 * Author    Alain Greiner  (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2424#include <hal_kernel_types.h>
    2525#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2627#include <hal_special.h>
    2728#include <errno.h>
     
    6162printk("\n[ERROR] in %s / thread[%x,%x] : stat structure %x unmapped\n",
    6263__FUNCTION__ , process->pid , this->trdid, u_stat );
    63 vmm_display( process , false );
     64hal_vmm_display( process , false );
    6465#endif
    6566                this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_thread_create.c

    r619 r624  
    22 * sys_thread_create.c - creates a new user thread
    33 *
    4  * Author     Alain Greiner (2016,2017,2018)
     4 * Author     Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2525#include <hal_kernel_types.h>
    2626#include <hal_uspace.h>
     27#include <hal_vmm.h>
    2728#include <printk.h>
    2829#include <errno.h>
     
    8283printk("\n[ERROR] in %s : thread[%x,%x] / trdid buffer %x unmapped %x\n",
    8384__FUNCTION__, process->pid, parent->trdid, (intptr_t)trdid_ptr );
    84 vmm_display( process , false );
     85hal_vmm_display( process , false );
    8586#endif
    8687                parent->errno = EINVAL;
     
    99100printk("\n[ERROR] in %s : thread[%x,%x] / user_attr buffer unmapped %x\n",
    100101__FUNCTION__, process->pid, parent->trdid, (intptr_t)user_attr );
    101 vmm_display( process , false );
     102hal_vmm_display( process , false );
    102103#endif
    103104                    parent->errno = EINVAL;
     
    117118printk("\n[ERROR] in %s : thread[%x,%x] / start_func unmapped %x\n",
    118119__FUNCTION__, process->pid, parent->trdid, (intptr_t)start_func );
    119 vmm_display( process , false );
     120hal_vmm_display( process , false );
    120121#endif
    121122        parent->errno = EINVAL;
     
    134135printk("\n[ERROR] in %s : thread[%x,%x] / start_args buffer unmapped %x\n",
    135136__FUNCTION__, process->pid, parent->trdid, (intptr_t)start_args );
    136 vmm_display( process , false );
     137hal_vmm_display( process , false );
    137138#endif
    138139                    parent->errno = EINVAL;
  • trunk/kernel/syscalls/sys_timeofday.c

    r506 r624  
    22 * sys_timeofday.c - Get current time
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2424#include <hal_kernel_types.h>
    2525#include <hal_uspace.h>
     26#include <hal_vmm.h>
    2627#include <thread.h>
    2728#include <printk.h>
     
    7071printk("\n[ERROR] in %s : user buffer tz unmapped / thread %x / process %x\n",
    7172__FUNCTION__ , (intptr_t)tz , this->trdid , process->pid );
    72 vmm_display( process , false );
     73hal_vmm_display( process , false );
    7374#endif
    7475        this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_wait.c

    r566 r624  
    2525#include <hal_uspace.h>
    2626#include <hal_irqmask.h>
     27#include <hal_vmm.h>
    2728#include <remote_queuelock.h>
    2829#include <core.h>
     
    6869printk("\n[ERROR] in %s : status buffer %x unmapped for thread %x in process %x\n",
    6970__FUNCTION__ , (intptr_t)status, this->trdid , process->pid );
    70 vmm_display( process , false );
     71hal_vmm_display( process , false );
    7172#endif
    7273        this->errno = EINVAL;
  • trunk/kernel/syscalls/sys_write.c

    r623 r624  
    2424#include <kernel_config.h>
    2525#include <hal_kernel_types.h>
     26#include <hal_vmm.h>
    2627#include <hal_uspace.h>
    2728#include <hal_irqmask.h>
     
    106107printk("\n[ERROR] in %s : thread[%x,%x] user buffer unmapped %x\n",
    107108__FUNCTION__ , process->pid, this->trdid, (intptr_t)vaddr );
    108 vmm_display( process , false );
     109hal_vmm_display( process , false );
    109110#endif
    110111                this->errno = EINVAL;
Note: See TracChangeset for help on using the changeset viewer.