Changeset 437
- Timestamp:
- Mar 28, 2018, 2:40:29 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 1 added
- 50 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Makefile.tsar
r435 r437 171 171 build/kernel/syscalls/sys_closedir.o \ 172 172 build/kernel/syscalls/sys_getcwd.o \ 173 build/kernel/syscalls/sys_isatty.o \ 173 174 build/kernel/syscalls/sys_alarm.o \ 174 175 build/kernel/syscalls/sys_rmdir.o -
trunk/hal/tsar_mips32/core/hal_exception.c
r435 r437 154 154 155 155 ////////////////////////////////////////////////////////////////////////////////////////// 156 // This function is called when an MMU exception has been detected .156 // This function is called when an MMU exception has been detected (IBE / DBE). 157 157 // It get the relevant exception arguments from the MMU. 158 158 // It signal a fatal error in case of illegal access. In case of page unmapped … … 167 167 ////////////////////////////////////////////////////////////////////////////////////////// 168 168 error_t hal_mmu_exception( thread_t * this, 169 uint32_t excPC, 169 170 bool_t is_ins ) 170 171 { … … 294 295 default: // this is a kernel error => panic 295 296 { 296 assert( false , __FUNCTION__ , "thread %x / e xcp_code = %x/ vaddr = %x\n",297 this ->trdid , excp_code, bad_vaddr );297 assert( false , __FUNCTION__ , "thread %x / epc %x / %s / vaddr = %x\n", 298 this, excPC, hal_mmu_exception_str(excp_code) , bad_vaddr ); 298 299 299 300 return EXCP_KERNEL_PANIC; … … 379 380 error_t error; 380 381 uint32_t excCode; // 4 bits XCODE from CP0_CR 382 uint32_t excPC; // fauty instruction address 381 383 382 384 // get pointer on faulty thread uzone … … 384 386 uzone = (uint32_t *)CURRENT_THREAD->uzone_current; 385 387 386 // get 4 bits XCODE from CP0_CR register388 // get XCODE and EPC from UZONE 387 389 excCode = (uzone[UZ_CR] >> 2) & 0xF; 390 excPC = uzone[UZ_EPC]; 388 391 389 392 #if CONFIG_DEBUG_HAL_EXCEPTIONS 390 393 uint32_t cycle = (uint32_t)hal_get_cycles(); 391 394 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle ) 392 printk("\n[DBG] %s : thread %x on core[%x,%d] enter / process%x / xcode %x / cycle %d\n",393 __FUNCTION__, this, local_cxy, this->core->lid, this->process->pid, exc Code, cycle );395 printk("\n[DBG] %s : thread %x enter / core[%x,%d] / pid %x / epc %x / xcode %x / cycle %d\n", 396 __FUNCTION__, this, local_cxy, this->core->lid, this->process->pid, excPC, excCode, cycle ); 394 397 #endif 395 398 … … 398 401 case XCODE_DBE: // can be non fatal 399 402 { 400 error = hal_mmu_exception( this , false ); // data MMU exception403 error = hal_mmu_exception( this , excPC , false ); // data MMU exception 401 404 break; 402 405 } 403 406 case XCODE_IBE: // can be non fatal 404 407 { 405 error = hal_mmu_exception( this , true ); // ins MMU exception408 error = hal_mmu_exception( this , excPC , true ); // ins MMU exception 406 409 break; 407 410 } … … 450 453 cycle = (uint32_t)hal_get_cycles(); 451 454 if( CONFIG_DEBUG_HAL_EXCEPTIONS < cycle ) 452 printk("\n[DBG] %s : thread %x on core[%x,%d] exit / process%x / xcode %x / cycle %d\n",453 __FUNCTION__, this, local_cxy, this->core->lid, this->process->pid, exc Code, cycle );455 printk("\n[DBG] %s : thread %x exit / core[%x,%d] / pid %x / epc %x / xcode %x / cycle %d\n", 456 __FUNCTION__, this, local_cxy, this->core->lid, this->process->pid, excPC, excCode, cycle ); 454 457 #endif 455 458 -
trunk/hal/tsar_mips32/drivers/soclib_bdv.c
r436 r437 2 2 * soclib_bdv.c - soclib simple block device driver implementation. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 74 74 buf_xp = (xptr_t)hal_remote_lwd( XPTR( th_cxy , &th_ptr->ioc_cmd.buf_xp ) ); 75 75 ioc_xp = (xptr_t)hal_remote_lwd( XPTR( th_cxy , &th_ptr->ioc_cmd.dev_xp ) ); 76 77 #if CONFIG_DEBUG_HAL_IOC_RX 78 uint32_t cycle = (uint32_t)hal_get_cycles(); 79 if( (CONFIG_DEBUG_HAL_IOC_RX < cycle) && (cmd_type != IOC_WRITE ) ) 80 printk("\n[DBG] %s : thread %x enter for RX / cycle %d\n", 81 __FUNCTION__ , CURRENT_THREAD , cycle ); 82 #endif 83 84 #if CONFIG_DEBUG_HAL_IOC_TX 85 uint32_t cycle = (uint32_t)hal_get_cycles(); 86 if( (CONFIG_DEBUG_HAL_IOC_TX < cycle) && (cmd_type == IOC_WRITE) ) 87 printk("\n[DBG] %s : thread %x enter for TX / cycle %d\n", 88 __FUNCTION__ , CURRENT_THREAD , cycle ); 89 #endif 76 90 77 91 // get IOC device cluster and local pointer … … 138 152 } 139 153 154 #if CONFIG_DEBUG_HAL_IOC_RX 155 cycle = (uint32_t)hal_get_cycles(); 156 if( (CONFIG_DEBUG_HAL_IOC_RX < cycle) && (cmd_type != TXT_WRITE) ) 157 printk("\n[DBG] %s : thread %x exit after RX / cycle %d\n", 158 __FUNCTION__ , CURRENT_THREAD , cycle ); 159 #endif 160 161 #if CONFIG_DEBUG_HAL_IOC_TX 162 cycle = (uint32_t)hal_get_cycles(); 163 if( (CONFIG_DEBUG_HAL_IOC_TX < cycle) && (cmd_type == TXT_WRITE) ) 164 printk("\n[DBG] %s : thread %x exit after TX / cycle %d\n", 165 __FUNCTION__ , CURRENT_THREAD , cycle ); 166 #endif 167 140 168 } // end soclib_bdv_cmd() 141 169 … … 144 172 void __attribute__ ((noinline)) soclib_bdv_isr( chdev_t * chdev ) 145 173 { 174 error_t error = 0; 175 146 176 // get extended pointer on client thread 147 177 xptr_t root = XPTR( local_cxy , &chdev->wait_root ); … … 155 185 thread_t * client_ptr = (thread_t *)GET_PTR( client_xp ); 156 186 187 // get command type 188 uint32_t cmd_type = hal_remote_lw( XPTR( client_cxy , &client_ptr->ioc_cmd.type ) ); 189 157 190 // get SOCLIB_BDV device cluster and local pointer 158 191 cxy_t bdv_cxy = GET_CXY( chdev->base ); … … 162 195 uint32_t status = hal_remote_lw( XPTR( bdv_cxy , bdv_ptr + BDV_STATUS_REG ) ); 163 196 197 if( cmd_type == IOC_READ ) 198 { 199 error = (status != BDV_READ_SUCCESS); 200 201 #if CONFIG_DEBUG_HAL_IOC_RX 202 uint32_t cycle = (uint32_t)hal_get_cycles(); 203 if( CONFIG_DEBUG_HAL_IOC_RX < cycle ) 204 printk("\n[DBG] %s : IOC_IRQ / RX transfer / client %x / server %x / cycle %d\n", 205 __FUNCTION__, client_ptr , chdev->server , cycle ); 206 #endif 207 208 } 209 else if( cmd_type == IOC_WRITE ) 210 { 211 error = (status != BDV_WRITE_SUCCESS); 212 213 #if CONFIG_DEBUG_HAL_IOC_TX 214 uint32_t cycle = (uint32_t)hal_get_cycles(); 215 if( CONFIG_DEBUG_HAL_IOC_TX < cycle ) 216 printk("\n[DBG] %s : IOC_IRQ / RX transfer / client %x / server %x / cycle %d\n", 217 __FUNCTION__, client_ptr , chdev->server , cycle ); 218 #endif 219 220 } 221 else 222 { 223 assert( false , __FUNCTION__ , "IOC_SYNC_READ should not use IRQ" ); 224 } 225 164 226 // set operation status in command 165 if((status != BDV_READ_SUCCESS) && (status != BDV_WRITE_SUCCESS)) 166 { 167 hal_remote_sw( XPTR( client_cxy , &client_ptr->ioc_cmd.error ) , 1 ); 168 } 169 else 170 { 171 hal_remote_sw( XPTR( client_cxy , &client_ptr->ioc_cmd.error ) , 0 ); 172 } 227 hal_remote_sw( XPTR( client_cxy , &client_ptr->ioc_cmd.error ) , error ); 173 228 174 229 // unblock server thread 175 230 thread_unblock( server_xp , THREAD_BLOCKED_ISR ); 176 231 177 // unblock client thread178 thread_unblock( client_xp , THREAD_BLOCKED_IO );179 180 232 } // end soclib_bdv_isr() 181 233 -
trunk/hal/tsar_mips32/drivers/soclib_bdv.h
r75 r437 2 2 * soclib_bdv.h - SOCLIB_BDV (simple block device) driver definition. 3 3 * 4 * Author Alain Greiner 4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/hal/tsar_mips32/drivers/soclib_hba.c
r436 r437 2 2 * soclib_hba.c - soclib AHCI block device driver implementation. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 290 290 // unblock client thread 291 291 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 292 293 ioc_dmsg("INFO in %s : thread %x at cycle %d\n",294 __FUNCTION__ , hal_remote_lw( XPTR( client_cxy , &client_ptr->trdid ) ) ,295 hal_get_cycles() );296 292 } 297 293 } -
trunk/hal/tsar_mips32/drivers/soclib_hba.h
r75 r437 2 2 * soclib_hba.h - soclib AHCI block device driver definition. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/devices/dev_dma.c
r408 r437 2 2 * dev_dma.c - DMA (Interrupt Controler Unit) generic device API implementation. 3 3 * 4 * Authors Alain Greiner (201 7)4 * Authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 90 90 thread_t * this = CURRENT_THREAD; 91 91 92 dma_dmsg("\n[DBG] %s : enters for thread %x / dst = %l /src = %l / size = %x\n", 93 __FUNCTION__ , this->trdid , dst_xp , src_xp , size ); 92 #if CONGIG_DEBUG_DEV_DMA 93 uint32_t cycle = (uint32_t)hal_get_cycles(); 94 if( CONGIG_DEBUG_DEV_DMA < cycle ) 95 printk("\n[DBG] %s : thread %x enters / dst %l / src %l / size = %x\n", 96 __FUNCTION__ , this, dst_xp, src_xp, size ); 97 #endif 94 98 95 99 // select DMA channel corresponding to core lid … … 112 116 chdev_register_command( dev_xp ); 113 117 114 dma_dmsg("\n[DBG] %s : completes for thread %x / error = %d\n", 115 __FUNCTION__ , this->trdid , this->dma_cmd.error ); 118 #if CONGIG_DEBUG_DEV_DMA 119 cycle = (uint32_t)hal_get_cycles(); 120 if( CONGIG_DEBUG_DEV_DMA < cycle ) 121 printk("\n[DBG] %s : thread %x exit / dst %l / src %l / size = %x\n", 122 __FUNCTION__ , this, dst_xp, src_xp, size ); 123 #endif 116 124 117 125 // return I/O operation status from calling thread descriptor -
trunk/kernel/devices/dev_dma.h
r14 r437 2 2 * dev_dma.h - DMA (Direct Memory Access) generic device API definition. 3 3 * 4 * Authors Alain Greiner (201 7)4 * Authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/devices/dev_fbf.c
r422 r437 2 2 * dev_fbf.c - FBF (Block Device Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 137 137 error = vmm_v2p_translate( CONFIG_KERNEL_IDENTITY_MAP , buffer , &buf_paddr ); 138 138 139 if( error ) 140 { 141 printk("\n[ERROR] in %s : cannot translate vaddr = %p in process %x\n", 142 __FUNCTION__ , buffer , this->process->pid ); 143 return EINVAL; 144 } 145 146 fbf_dmsg("\n[DBG] %s : thread %x in process %x / vaddr = %p / paddr = %l\n", 147 __FUNCTION__ , this->trdid , this->process->pid , buffer , buf_paddr ); 139 // check buffer is mapped 140 assert( (error == 0) , __FUNCTION__ , 141 "cannot translate vaddr = %p in process %x\n", buffer, this->process->pid ); 148 142 149 143 // get extended pointer on FBF chdev descriptor … … 162 156 163 157 // check offset and length versus FBF size 164 if( (offset + length) > (width * height) ) 165 { 166 printk("\n[ERROR] in %s : offset = %d / length = %d / width = %d / height = %d\n", 167 __FUNCTION__ , offset , length , width , height ); 168 return EINVAL; 169 } 158 assert( ((offset + length) <= (width * height)) , __FUNCTION__ , 159 "offset %d / length %d / width %d / height %d\n", offset, length, width, height ); 170 160 171 161 // compute extended pointers on frame buffer and memory buffer … … 186 176 uint32_t offset ) 187 177 { 178 179 #if CONFIG_DEBUG_DEV_FBF_RX 180 uint32_t cycle = (uint32_t)hal_get_cycle(); 181 if( CONFIG_DEBUG_DEV_FBF_RX < cycle ) 182 printk("\n[DBG] %s : thread %x enter / process %x / vaddr %x / size %x\n", 183 __FUNCTION__ , this, this->process->pid , buffer , buf_paddr ); 184 #endif 185 188 186 return dev_fbf_access( false , buffer , length , offset ); 187 188 #if CONFIG_DEBUG_DEV_FBF_RX 189 cycle = (uint32_t)hal_get_cycle(); 190 if( CONFIG_DEBUG_DEV_FBF_RX < cycle ) 191 printk("\n[DBG] %s : thread %x exit / process %x / vaddr %x / size %x\n", 192 __FUNCTION__ , this, this->process->pid , buffer , buf_paddr ); 193 #endif 194 189 195 } 190 196 … … 194 200 uint32_t offset ) 195 201 { 202 203 #if CONFIG_DEBUG_DEV_FBF_TX 204 uint32_t cycle = (uint32_t)hal_get_cycle(); 205 if( CONFIG_DEBUG_DEV_FBF_TX < cycle ) 206 printk("\n[DBG] %s : thread %x enter / process %x / vaddr %x / size %x\n", 207 __FUNCTION__ , this, this->process->pid , buffer , buf_paddr ); 208 #endif 209 196 210 return dev_fbf_access( true , buffer , length , offset ); 211 212 #if CONFIG_DEBUG_DEV_FBF_RX 213 cycle = (uint32_t)hal_get_cycle(); 214 if( CONFIG_DEBUG_DEV_FBF_RX < cycle ) 215 printk("\n[DBG] %s : thread %x exit / process %x / vaddr %x / size %x\n", 216 __FUNCTION__ , this, this->process->pid , buffer , buf_paddr ); 217 #endif 218 197 219 } -
trunk/kernel/devices/dev_ioc.c
r408 r437 2 2 * dev_ioc.c - IOC (Block Device Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 101 101 thread_t * this = CURRENT_THREAD; // pointer on client thread 102 102 103 ioc_dmsg("\n[DBG] %s : thread %x in process %x"104 " for lba = %x / buffer = %x / at cycle %d\n",105 __FUNCTION__ , this->trdid , this->process->pid ,106 lba , (intptr_t)buffer , hal_get_cycles() );107 108 103 // software L2/L3 cache coherence for memory buffer 109 104 if( chdev_dir.iob ) … … 130 125 chdev_register_command( dev_xp ); 131 126 132 ioc_dmsg("\n[DBG] in %s : thread %x in process %x"133 " completes / error = %d / at cycle %d\n",134 __FUNCTION__ , this->trdid , this->process->pid ,135 this->ioc_cmd.error , hal_get_cycles() );136 137 127 // return I/O operation status 138 128 return this->ioc_cmd.error; … … 145 135 uint32_t count ) 146 136 { 137 138 #if CONFIG_DEBUG_DEV_IOC_RX 139 uint32_t cycle = (uint32_t)hal_get_cycles(); 140 if( CONFIG_DEBUG_DEV_IOC_RX < cycle ) 141 printk("\n[DBG] %s : thread %x enters / lba %x / buffer %x / cycle %d\n", 142 __FUNCTION__ , this, lba, buffer, cycle ); 143 #endif 144 147 145 return dev_ioc_access( IOC_READ , buffer , lba , count ); 146 147 #if CONFIG_DEBUG_DEV_IOC_RX 148 cycle = (uint32_t)hal_get_cycles(); 149 if( CONFIG_DEBUG_DEV_IOC_RX < cycle ) 150 printk("\n[DBG] %s : thread %x exit / lba %x / buffer %x / cycle %d\n", 151 __FUNCTION__ , this, lba, buffer, cycle ); 152 #endif 153 148 154 } 149 155 … … 153 159 uint32_t count ) 154 160 { 161 162 #if CONFIG_DEBUG_DEV_IOC_TX 163 uint32_t cycle = (uint32_t)hal_get_cycles(); 164 if( CONFIG_DEBUG_DEV_IOC_TX < cycle ) 165 printk("\n[DBG] %s : thread %x enters / lba %x / buffer %x / cycle %d\n", 166 __FUNCTION__ , this, lba, buffer, cycle ); 167 #endif 168 155 169 return dev_ioc_access( IOC_WRITE , buffer , lba , count ); 170 171 #if CONFIG_DEBUG_DEV_IOC_TX 172 cycle = (uint32_t)hal_get_cycles(); 173 if( CONFIG_DEBUG_DEV_IOC_TX < cycle ) 174 printk("\n[DBG] %s : thread %x exit / lba %x / buffer %x / cycle %d\n", 175 __FUNCTION__ , this, lba, buffer, cycle ); 176 #endif 177 156 178 } 157 179 … … 164 186 thread_t * this = CURRENT_THREAD; 165 187 166 ioc_dmsg("\n[DBG] %s : core[%x,%d] enter for %d blocks / lba = %x / cycle %d\n", 167 __FUNCTION__ , local_cxy , this->core->lid , count , lba , hal_time_stamp() ); 188 #if CONFIG_DEBUG_DEV_IOC_RX 189 uint32_t cycle = (uint32_t)hal_get_cycles(); 190 if( CONFIG_DEBUG_DEV_IOC_RX < cycle ) 191 printk("\n[DBG] %s : thread %x enters / lba %x / buffer %x / cycle %d\n", 192 __FUNCTION__ , this, lba, buffer, cycle ); 193 #endif 168 194 169 195 // software L2/L3 cache coherence for memory buffer … … 201 227 dev_pic_enable_irq( lid , ioc_xp ); 202 228 203 ioc_dmsg("\n[DBG] %s : core[%x,%d] exit / error = %d / cycle %d\n", 204 __FUNCTION__ , local_cxy , this->core->lid , this->ioc_cmd.error , hal_time_stamp() ); 229 #if CONFIG_DEBUG_DEV_IOC_RX 230 cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_DEV_IOC_RX < cycle ) 232 printk("\n[DBG] %s : thread %x exit / lba %x / buffer %x / cycle %d\n", 233 __FUNCTION__ , this, lba, buffer, cycle ); 234 #endif 205 235 206 236 // return I/O operation status from calling thread descriptor -
trunk/kernel/devices/dev_ioc.h
r246 r437 2 2 * dev_ioc.h - IOC (Block Device Controler) generic device API definition. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/devices/dev_mmc.c
r407 r437 2 2 * dev_mmc.c - MMC (Memory Cache Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 99 99 thread_t * this = CURRENT_THREAD; 100 100 101 mmc_dmsg("\n[DBG] %s enters for thread %x in process %x / buf_xp = %l\n", 102 __FUNCTION__ , this->trdid , this->process->pid , buf_xp ); 101 #if CONFIG_DEBUG_DEV_MMC 102 uint32_t cycle = (uint32_t)hal_get_cycles(); 103 if( CONFIG_DEBUG_DEV_MMC < cycle ) 104 printk("\n[DBG] %s : thread %x enters / process %x / buf_xp = %l\n", 105 __FUNCTION__, this, this->process->pid , buf_xp ); 106 #endif 103 107 104 108 // get buffer cluster and local pointer … … 124 128 error = dev_mmc_access( this ); 125 129 126 mmc_dmsg("\n[DBG] %s completes for thread %x in process %x / error = %d\n", 127 __FUNCTION__ , this->trdid , this->process->pid , error ); 130 #if CONFIG_DEBUG_DEV_MMC 131 cycle = (uint32_t)hal_get_cycles(); 132 if( CONFIG_DEBUG_DEV_MMC < cycle ) 133 printk("\n[DBG] %s : thread %x exit / process %x / buf_xp = %l\n", 134 __FUNCTION__, this, this->process->pid , buf_xp ); 135 #endif 128 136 129 137 return error; … … 139 147 thread_t * this = CURRENT_THREAD; 140 148 141 mmc_dmsg("\n[DBG] %s enters for thread %x in process %x / buf_xp = %l\n", 142 __FUNCTION__ , this->trdid , this->process->pid , buf_xp ); 149 #if CONFIG_DEBUG_DEV_MMC 150 uint32_t cycle = (uint32_t)hal_get_cycles(); 151 if( CONFIG_DEBUG_DEV_MMC < cycle ) 152 printk("\n[DBG] %s : thread %x enters / process %x / buf_xp = %l\n", 153 __FUNCTION__, this, this->process->pid , buf_xp ); 154 #endif 143 155 144 156 // get buffer cluster and local pointer … … 164 176 error = dev_mmc_access( this ); 165 177 166 mmc_dmsg("\n[DBG] %s completes for thread %x in process %x / error = %d\n", 167 __FUNCTION__ , this->trdid , this->process->pid , error ); 178 #if CONFIG_DEBUG_DEV_MMC 179 cycle = (uint32_t)hal_get_cycles(); 180 if( CONFIG_DEBUG_DEV_MMC < cycle ) 181 printk("\n[DBG] %s : thread %x exit / process %x / buf_xp = %l\n", 182 __FUNCTION__, this, this->process->pid , buf_xp ); 183 #endif 168 184 169 185 return error; -
trunk/kernel/devices/dev_mmc.h
r23 r437 2 2 * dev_mmc.h - MMC (Generic L2 cache controller) device API definition. 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/devices/dev_nic.c
r436 r437 2 2 * dev_nic.c - NIC (Network Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 99 99 core_t * core = thread_ptr->core; 100 100 101 nic_dmsg("\n[DBG] %s enters for NIC-RX thread on core %d in cluster %x\n", 102 __FUNCTION__ , core->lid , local_cxy ); 101 #if CONFIG_DEBUG_DEV_NIC_RX 102 uint32_t cycle = (uint32_t)hal_get_cycles(); 103 if( CONFIG_DEBUG_DEV_NIC_RX < cycle ) 104 printk("\n[DBG] %s : thread %x enters for packet %x in cluster %x\n", 105 __FUNCTION__ , thread_ptr , pkd , local_cxy ); 106 #endif 103 107 104 108 // get pointer on NIC-RX chdev descriptor … … 149 153 pkd->length = thread_ptr->nic_cmd.length; 150 154 151 nic_dmsg("\n[DBG] %s exit for NIC-RX thread on core %d in cluster %x\n", 152 __FUNCTION__ , core->lid , local_cxy ); 155 #if CONFIG_DEBUG_DEV_NIC_RX 156 cycle = (uint32_t)hal_get_cycles(); 157 if( CONFIG_DEBUG_DEV_NIC_RX < cycle ) 158 printk("\n[DBG] %s : thread %x exit for packet %x in cluster %x\n", 159 __FUNCTION__ , thread_ptr , pkd , local_cxy ); 160 #endif 153 161 154 162 return 0; … … 169 177 core_t * core = thread_ptr->core; 170 178 171 nic_dmsg("\n[DBG] %s enters for NIC-RX thread on core %d in cluster %x\n", 172 __FUNCTION__ , core->lid , local_cxy ); 179 #if CONFIG_DEBUG_DEV_NIC_RX 180 uint32_t cycle = (uint32_t)hal_get_cycles(); 181 if( CONFIG_DEBUG_DEV_NIC_RX < cycle ) 182 printk("\n[DBG] %s : thread %x enters for packet %x in cluster %x\n", 183 __FUNCTION__ , thread_ptr , pkd , local_cxy ); 184 #endif 173 185 174 186 // get pointer on NIC-TX chdev descriptor … … 217 229 if( error ) return error; 218 230 219 nic_dmsg("\n[DBG] %s exit for NIC-TX thread on core %d in cluster %x\n", 220 __FUNCTION__ , core->lid , local_cxy ); 231 #if CONFIG_DEBUG_DEV_NIC_RX 232 cycle = (uint32_t)hal_get_cycles(); 233 if( CONFIG_DEBUG_DEV_NIC_RX < cycle ) 234 printk("\n[DBG] %s : thread %x exit for packet %x in cluster %x\n", 235 __FUNCTION__ , thread_ptr , pkd , local_cxy ); 236 #endif 221 237 222 238 return 0; -
trunk/kernel/devices/dev_nic.h
r407 r437 2 2 * dev_nic.h - NIC (Network Controler) generic device API definition. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/devices/dev_pic.c
r422 r437 2 2 * dev_pic.c - PIC (External Interrupt Controler) generic device API implementation. 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 { 87 87 88 irq_dmsg("\n[DBG] %s : core = [%x,%d] / src_chdev_cxy = %x / src_chdev_ptr = %x\n", 89 __FUNCTION__ , local_cxy , lid , GET_CXY(src_chdev_xp) , GET_PTR(src_chdev_xp) ); 88 #if CONFIG_DEBUG_DEV_PIC 89 uint32_t cycle = (uint32_t)hal_get_cycles(); 90 if( CONFIG_DEBUG_DEV_PIC < cycle ) 91 printk("\n[DBG] %s : core[%x,%d] / src_chdev_cxy %x / src_chdev_ptr %x / cycle %d\n", 92 __FUNCTION__, local_cxy, lid, GET_CXY(src_chdev_xp), GET_PTR(src_chdev_xp), cycle ); 93 #endif 90 94 91 95 // get pointer on PIC chdev … … 105 109 { 106 110 107 irq_dmsg("\n[DBG] %s : core = [%x,%d] / src_chdev_cxy = %x / src_chdev_ptr = %x\n", 108 __FUNCTION__ , local_cxy , lid , GET_CXY(src_chdev_xp) , GET_PTR(src_chdev_xp) ); 111 #if CONFIG_DEBUG_DEV_PIC 112 uint32_t cycle = (uint32_t)hal_get_cycles(); 113 if( CONFIG_DEBUG_DEV_PIC < cycle ) 114 printk("\n[DBG] %s : core[%x,%d] / src_chdev_cxy %x / src_chdev_ptr %x / cycle %d\n", 115 __FUNCTION__, local_cxy, lid, GET_CXY(src_chdev_xp), GET_PTR(src_chdev_xp), cycle ); 116 #endif 109 117 110 118 // get pointer on PIC chdev … … 123 131 { 124 132 125 irq_dmsg("\n[DBG] %s : core = [%x,%d] / period = %d\n", 126 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , period ); 133 #if CONFIG_DEBUG_DEV_PIC 134 uint32_t cycle = (uint32_t)hal_get_cycles(); 135 if( CONFIG_DEBUG_DEV_PIC < cycle ) 136 printk("\n[DBG] %s : core[%x,%d] / period %d / cycle %d\n", 137 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , period, cycle ); 138 #endif 127 139 128 140 // get pointer on PIC chdev … … 141 153 { 142 154 143 irq_dmsg("\n[DBG] %s : core = [%x,%d] / cycle %d\n", 144 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() ); 155 #if CONFIG_DEBUG_DEV_PIC 156 uint32_t cycle = (uint32_t)hal_get_cycles(); 157 if( CONFIG_DEBUG_DEV_PIC < cycle ) 158 printk("\n[DBG] %s : core[%x,%d] / cycle %d\n", 159 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , cycle ); 160 #endif 145 161 146 162 // get pointer on PIC chdev … … 160 176 { 161 177 162 irq_dmsg("\n[DBG] %s : src_core = [%x,%d] / dst_core = [%x,%d] / cycle %d\n", 163 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, hal_time_stamp() ); 178 #if CONFIG_DEBUG_DEV_PIC 179 uint32_t cycle = (uint32_t)hal_get_cycles(); 180 if( CONFIG_DEBUG_DEV_PIC < cycle ) 181 printk("\n[DBG] %s : src_core[%x,%d] / dst_core[%x,%d] / cycle %d\n", 182 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cxy, lid, cycle ); 183 #endif 164 184 165 185 // get pointer on PIC chdev … … 178 198 { 179 199 180 irq_dmsg("\n[DBG] %s : core = [%x,%d] / cycle %d\n", 181 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, hal_time_stamp() ); 200 #if CONFIG_DEBUG_DEV_PIC 201 uint32_t cycle = (uint32_t)hal_get_cycles(); 202 if( CONFIG_DEBUG_DEV_PIC < cycle ) 203 printk("\n[DBG] %s : core[%x,%d] / cycle %d\n", 204 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 205 #endif 182 206 183 207 // get pointer on PIC chdev -
trunk/kernel/devices/dev_pic.h
r407 r437 2 2 * dev_pic.h - PIC (Programmable Interrupt Controler) generic device API definition. 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/fs/devfs.c
r435 r437 85 85 } 86 86 87 ///////////////////////////////////////////////// //87 ///////////////////////////////////////////////// 88 88 void devfs_global_init( xptr_t parent_inode_xp, 89 89 xptr_t * devfs_dev_inode_xp, … … 109 109 110 110 assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" ); 111 112 #if( CONFIG_DEBUG_DEVFS_INIT & 1 ) 113 if( CONFIG_DEBUG_DEVFS_INIT < cycle ) 114 printk("\n[DBG] %s : created <dev> inode at cycle %d\n", __FUNCTION__, cycle ); 115 #endif 111 116 112 117 // create DEVFS "external" inode in cluster IO -
trunk/kernel/fs/vfs.c
r436 r437 3 3 * 4 4 * Author Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 455 455 if( length >= CONFIG_VFS_MAX_NAME_LENGTH ) 456 456 { 457 printk("\n[ERROR] in %s : name too long\n", __FUNCTION__ ); 457 458 #if CONFIG_DEBUG_SYSCALLS_ERROR 459 printk("\n[ERROR] in %s : name <name> too long\n", __FUNCTION__ , name ); 460 #endif 458 461 return EINVAL; 459 462 } … … 467 470 if( dentry == NULL ) 468 471 { 469 printk("\n[ERROR] in %s : cannot allocate dentry descriptor\n", __FUNCTION__ ); 472 473 #if CONFIG_DEBUG_SYSCALLS_ERROR 474 printk("\n[ERROR] in %s : cannot allocate dentry\n", __FUNCTION__ ); 475 #endif 470 476 return ENOMEM; 471 477 } … … 478 484 strcpy( dentry->name , name ); 479 485 486 #if( CONFIG_DEBUG_VFS_DENTRY_CREATE & 1 ) 487 cycle = (uint32_t)hal_get_cycles(); 488 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle ) 489 printk("\n[DBG] %s : dentry initialised\n", __FUNCTION__ ); 490 #endif 491 480 492 // register dentry in hash table rooted in parent inode 481 493 xhtab_insert( XPTR( local_cxy , &parent->children ), 482 494 name, 483 495 XPTR( local_cxy , &dentry->list ) ); 496 497 #if( CONFIG_DEBUG_VFS_DENTRY_CREATE & 1 ) 498 cycle = (uint32_t)hal_get_cycles(); 499 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle ) 500 printk("\n[DBG] %s : dentry registerd in htab\n", __FUNCTION__ ); 501 #endif 484 502 485 503 // return extended pointer on dentry … … 1637 1655 1638 1656 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1) 1639 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle)1657 if( (CONFIG_DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) ) 1640 1658 printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, local_cxy ); 1641 1659 #endif … … 1652 1670 1653 1671 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1) 1654 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle)1672 if( (CONFIG_DEBUG_VFS_ADD_CHILD < cycle) && (error == 0) ) 1655 1673 printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, parent_cxy ); 1656 1674 #endif … … 1660 1678 if( error ) 1661 1679 { 1662 printk("\n[ERROR] in %s : cannot create dentry in cluster %x\n",1663 __FUNCTION__ , parent_cxy );1680 printk("\n[ERROR] in %s : cannot create dentry <%s> in cluster %x\n", 1681 __FUNCTION__ , name , parent_cxy ); 1664 1682 return ENOMEM; 1665 1683 } … … 1688 1706 __FUNCTION__ , GET_PTR(inode_xp) , local_cxy ); 1689 1707 #endif 1690 1691 vfs_dmsg("\n[DBG] %s : inode %x created in local cluster %x\n",1692 __FUNCTION__ , GET_PTR(inode_xp) , local_cxy );1693 1708 1694 1709 } -
trunk/kernel/fs/vfs.h
r430 r437 3 3 * 4 4 * Author Mohamed Lamine Karaoui (2014,2015) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/chdev.c
r436 r437 140 140 thread_t * this = CURRENT_THREAD; 141 141 142 #if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND143 uint32_t cycle = (uint32_t)hal_get_cycles();144 if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle )145 printk("\n[DBG] %s : client_thread %x (%s) enter / cycle %d\n",146 __FUNCTION__, this, thread_type_str(this->type) , cycle );147 #endif148 149 142 // get device descriptor cluster and local pointer 150 143 cxy_t chdev_cxy = GET_CXY( chdev_xp ); 151 144 chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); 145 146 #if (CONFIG_DEBUG_CHDEV_CMD_RX || CONFIG_DEBUG_CHDEV_CMD_TX) 147 bool_t is_rx = hal_remote_lw( XPTR( chdev_cxy , &chdev_ptr->is_rx ) ); 148 #endif 149 150 #if CONFIG_DEBUG_CHDEV_CMD_RX 151 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 152 if( (is_rx) && (CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) ) 153 printk("\n[DBG] %s : client_thread %x (%s) enter for RX / cycle %d\n", 154 __FUNCTION__, this, thread_type_str(this->type) , rx_cycle ); 155 #endif 156 157 #if CONFIG_DEBUG_CHDEV_CMD_TX 158 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 159 if( (is_rx == 0) && (CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) ) 160 printk("\n[DBG] %s : client_thread %x (%s) enter for TX / cycle %d\n", 161 __FUNCTION__, this, thread_type_str(this->type) , tx_cycle ); 162 #endif 152 163 153 164 // build extended pointers on client thread xlist and device root … … 196 207 hal_restore_irq( save_sr ); 197 208 198 #if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND 199 cycle = (uint32_t)hal_get_cycles(); 200 if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle ) 201 printk("\n[DBG] %s : client_thread %x (%s) exit / cycle %d\n", 202 __FUNCTION__, this, thread_type_str(this->type) , cycle ); 209 #if CONFIG_DEBUG_CHDEV_CMD_RX 210 rx_cycle = (uint32_t)hal_get_cycles(); 211 if( (is_rx) && (CONFIG_DEBUG_CHDEV_CMD_RX < rx_cycle) ) 212 printk("\n[DBG] %s : client_thread %x (%s) exit for RX / cycle %d\n", 213 __FUNCTION__, this, thread_type_str(this->type) , rx_cycle ); 214 #endif 215 216 #if CONFIG_DEBUG_CHDEV_CMD_TX 217 tx_cycle = (uint32_t)hal_get_cycles(); 218 if( (is_rx == 0) && (CONFIG_DEBUG_CHDEV_CMD_TX < tx_cycle) ) 219 printk("\n[DBG] %s : client_thread %x (%s) exit for TX / cycle %d\n", 220 __FUNCTION__, this, thread_type_str(this->type) , tx_cycle ); 203 221 #endif 204 222 … … 225 243 server = CURRENT_THREAD; 226 244 227 #if CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER 228 uint32_t cycle = (uint32_t)hal_get_cycles(); 229 if( CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER < cycle ) 230 printk("\n[DBG] %s : server_thread %x enter / chdev = %x / cycle %d\n", 231 __FUNCTION__ , server , chdev , cycle ); 232 #endif 233 245 // get root and lock on command queue 234 246 root_xp = XPTR( local_cxy , &chdev->wait_root ); 235 247 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); … … 253 265 else // waiting queue not empty 254 266 { 255 256 #if (CONFIG_DEBUG_SYS_READ & 1)257 enter_chdev_server_read = (uint32_t)hal_get_cycles();258 #endif259 260 #if (CONFIG_DEBUG_SYS_WRITE & 1)261 enter_chdev_server_write = (uint32_t)hal_get_cycles();262 #endif263 264 267 // release lock 265 268 remote_spinlock_unlock( lock_xp ); … … 271 274 client_cxy = GET_CXY( client_xp ); 272 275 client_ptr = (thread_t *)GET_PTR( client_xp ); 276 277 #if CONFIG_DEBUG_CHDEV_SERVER_RX 278 uint32_t rx_cycle = (uint32_t)hal_get_cycles(); 279 if( (chdev->is_rx) && (CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 280 printk("\n[DBG] %s : server_thread %x start RX / client %x / cycle %d\n", 281 __FUNCTION__ , server , client_ptr , rx_cycle ); 282 #endif 283 284 #if CONFIG_DEBUG_CHDEV_SERVER_TX 285 uint32_t tx_cycle = (uint32_t)hal_get_cycles(); 286 if( (chdev->is_rx == 0) && (CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 287 printk("\n[DBG] %s : server_thread %x start TX / client %x / cycle %d\n", 288 __FUNCTION__ , server , client_ptr , tx_cycle ); 289 #endif 290 291 #if (CONFIG_DEBUG_SYS_READ & 1) 292 enter_chdev_server_read = (uint32_t)hal_get_cycles(); 293 #endif 294 295 #if (CONFIG_DEBUG_SYS_WRITE & 1) 296 enter_chdev_server_write = (uint32_t)hal_get_cycles(); 297 #endif 273 298 274 299 // call driver command function to execute I/O operation … … 283 308 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 284 309 285 #if CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER 286 cycle = (uint32_t)hal_get_cycles(); 287 if( CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER < cycle ) 288 printk("\n[DBG] %s : server_thread %x complete operation for client %x / cycle %d\n", 289 __FUNCTION__ , server , client_ptr , cycle ); 310 #if CONFIG_DEBUG_CHDEV_SERVER_RX 311 rx_cycle = (uint32_t)hal_get_cycles(); 312 if( (chdev->is_rx) && (CONFIG_DEBUG_CHDEV_SERVER_RX < rx_cycle) ) 313 printk("\n[DBG] %s : server_thread %x completes RX / client %x / cycle %d\n", 314 __FUNCTION__ , server , client_ptr , rx_cycle ); 315 #endif 316 317 #if CONFIG_DEBUG_CHDEV_SERVER_TX 318 tx_cycle = (uint32_t)hal_get_cycles(); 319 if( (chdev->is_rx == 0) && (CONFIG_DEBUG_CHDEV_SERVER_TX < tx_cycle) ) 320 printk("\n[DBG] %s : server_thread %x completes TX / client %x / cycle %d\n", 321 __FUNCTION__ , server , client_ptr , tx_cycle ); 290 322 #endif 291 323 -
trunk/kernel/kern/cluster.c
r436 r437 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017 )6 * Alain Greiner (2016,2017,2018) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 92 92 uint32_t cycle = (uint32_t)hal_get_cycles(); 93 93 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 94 printk("\n[DBG] %s enters for cluster %x / cycle %d\n",95 __FUNCTION__ , local_cxy , cycle );94 printk("\n[DBG] %s : thread %x enters for cluster %x / cycle %d\n", 95 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); 96 96 #endif 97 97 … … 113 113 } 114 114 115 #if CONFIG_DEBUG_CLUSTER_INIT115 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 ) 116 116 cycle = (uint32_t)hal_get_cycles(); 117 117 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 118 cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n",118 printk("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n", 119 119 __FUNCTION__ , local_cxy , cycle ); 120 120 #endif … … 123 123 khm_init( &cluster->khm ); 124 124 125 cluster_dmsg("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n", 126 __FUNCTION__ , local_cxy , hal_get_cycles() ); 125 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 ) 126 uint32_t cycle = (uint32_t)hal_get_cycles(); 127 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 128 printk("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n", 129 __FUNCTION__ , local_cxy , hal_get_cycles() ); 130 #endif 127 131 128 132 // initialises embedded KCM 129 133 kcm_init( &cluster->kcm , KMEM_KCM ); 130 134 131 cluster_dmsg("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n", 132 __FUNCTION__ , local_cxy , hal_get_cycles() ); 135 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 ) 136 uint32_t cycle = (uint32_t)hal_get_cycles(); 137 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 138 printk("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n", 139 __FUNCTION__ , local_cxy , hal_get_cycles() ); 140 #endif 133 141 134 142 // initialises all cores descriptors … … 140 148 } 141 149 142 #if CONFIG_DEBUG_CLUSTER_INIT150 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 ) 143 151 cycle = (uint32_t)hal_get_cycles(); 144 152 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 145 cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n",153 printk("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n", 146 154 __FUNCTION__ , local_cxy , cycle ); 147 155 #endif … … 151 159 cluster->rpc_threads = 0; 152 160 153 cluster_dmsg("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n", 161 #if( CONFIG_DEBUG_CLUSTER_INIT & 1 ) 162 cycle = (uint32_t)hal_get_cycles(); 163 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 164 printk("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n", 154 165 __FUNCTION__ , local_cxy , hal_get_cycles() ); 166 #endif 155 167 156 168 // initialise pref_tbl[] in process manager … … 179 191 cycle = (uint32_t)hal_get_cycles(); 180 192 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 181 cluster_dmsg("\n[DBG] %s Process Manager initialized incluster %x / cycle %d\n",182 __FUNCTION__ , local_cxy , cycle );193 printk("\n[DBG] %s , thread %x exit for cluster %x / cycle %d\n", 194 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); 183 195 #endif 184 196 -
trunk/kernel/kern/cluster.h
r433 r437 4 4 * authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017 )6 * Alain Greiner (2016,2017,2018) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/do_syscall.c
r428 r437 78 78 sys_closedir, // 25 79 79 sys_getcwd, // 26 80 sys_ undefined,// 2780 sys_isatty, // 27 81 81 sys_alarm, // 28 82 82 sys_rmdir, // 29 -
trunk/kernel/kern/dqdt.c
r406 r437 2 2 * dqdt.c - Distributed Quaternary Decision Tree implementation. 3 3 * 4 * Author : Alain Greiner (2016 )4 * Author : Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/dqdt.h
r19 r437 2 2 * kern/dqdt.h - Distributed Quad Decision Tree 3 3 * 4 * Author : Alain Greiner (2016 )4 * Author : Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 40 40 * 41 41 * - If both Y_SIZE and Y_SIZE are larger than 1, it makes the assumption that 42 * the cluster topology is a 2D mesh. The [X,Y] coordinates of a cluster are42 * the clusters topology is a 2D mesh. The [X,Y] coordinates of a cluster are 43 43 * obtained from the CXY identifier using the following rules : 44 44 * X = CXY >> Y_WIDTH / Y = CXY & ((1<<Y_WIDTH)-1) … … 93 93 94 94 /**************************************************************************************** 95 * This recursive function displays usage information for all DQDT nodes in the subtree96 * defined by the node argument. It traverses the quadtree from root to bottom.97 ****************************************************************************************98 * @ node_xp : extended pointer on a DQDT node.99 ***************************************************************************************/100 void dqdt_global_print( xptr_t node_xp );101 102 /****************************************************************************************103 * This function displays summary usage information in a given DQDT local node.104 ****************************************************************************************105 * @ node : local pointer on a DQDT node.106 ***************************************************************************************/107 void dqdt_local_print( dqdt_node_t * node );108 109 /****************************************************************************************110 95 * This recursive function traverses the DQDT quad-tree from bottom to root, to propagate 111 96 * the change in the threads number and allocated pages number in a leaf cluster, … … 153 138 cxy_t dqdt_get_cluster_for_memory(); 154 139 140 /**************************************************************************************** 141 * This recursive function displays usage information for all DQDT nodes in the subtree 142 * defined by the node argument. It traverses the quadtree from root to bottom. 143 **************************************************************************************** 144 * @ node_xp : extended pointer on a DQDT node. 145 ***************************************************************************************/ 146 void dqdt_global_print( xptr_t node_xp ); 147 148 /**************************************************************************************** 149 * This function displays summary usage information in a given DQDT local node. 150 **************************************************************************************** 151 * @ node : local pointer on a DQDT node. 152 ***************************************************************************************/ 153 void dqdt_local_print( dqdt_node_t * node ); 154 155 155 156 156 #endif /* _DQDT_H_ */ -
trunk/kernel/kern/kernel_init.c
r436 r437 324 324 } 325 325 326 #if( CONFIG_ KINIT_DEBUG& 0x1 )327 if( hal_time_stamp() > CONFIG_ KINIT_DEBUG)326 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 ) 327 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT ) 328 328 printk("\n[DBG] %s : created MMC in cluster %x / chdev = %x\n", 329 329 __FUNCTION__ , local_cxy , chdev_ptr ); … … 353 353 chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr ); 354 354 355 #if( CONFIG_ KINIT_DEBUG& 0x1 )356 if( hal_time_stamp() > CONFIG_ KINIT_DEBUG)355 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 ) 356 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT ) 357 357 printk("\n[DBG] %s : created DMA[%d] in cluster %x / chdev = %x\n", 358 358 __FUNCTION__ , channel , local_cxy , chdev_ptr ); … … 488 488 } 489 489 490 #if( CONFIG_ KINIT_DEBUG& 0x1 )491 if( hal_time_stamp() > CONFIG_ KINIT_DEBUG)490 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 ) 491 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT ) 492 492 printk("\n[DBG] %s : create chdev %s / channel = %d / rx = %d / cluster %x / chdev = %x\n", 493 493 __FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy , chdev ); … … 623 623 } 624 624 625 #if( CONFIG_ KINIT_DEBUG& 0x1 )626 if( hal_time_stamp() > CONFIG_ KINIT_DEBUG)625 #if( CONFIG_DEBUG_KERNEL_INIT & 0x1 ) 626 if( hal_time_stamp() > CONFIG_DEBUG_KERNEL_INIT ) 627 627 { 628 628 printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n", … … 792 792 thread->core = &LOCAL_CLUSTER->core_tbl[core_lid]; 793 793 794 #if CONFIG_LOCKS_DEBUG 794 // each core initializes the idle thread lists of locks 795 795 list_root_init( &thread->locks_root ); 796 796 xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) ); 797 #endif 797 thread->local_locks = 0; 798 thread->remote_locks = 0; 798 799 799 800 // CP0 in I/O cluster initialises TXT0 chdev descriptor … … 804 805 (info->x_size * info->y_size) ); 805 806 barrier_wait( &local_barrier , info->cores_nr ); 806 807 if( (core_lid == 0) && (local_cxy == 0) ) 808 kinit_dmsg("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n", 809 __FUNCTION__, hal_time_stamp() ); 807 ///////////////////////////////////////////////////////////////////////////////// 808 809 #if CONFIG_DEBUG_KERNEL_INIT 810 if( (core_lid == 0) && (local_cxy == 0) ) 811 printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n", 812 __FUNCTION__, (uint32_t)hal_get_cycles() ); 813 #endif 810 814 811 815 ///////////////////////////////////////////////////////////////////////////// … … 841 845 ///////////////////////////////////////////////////////////////////////////////// 842 846 843 if( (core_lid == 0) && (local_cxy == 0) ) 844 kinit_dmsg("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n", 845 __FUNCTION__, hal_time_stamp() ); 847 #if CONFIG_DEBUG_KERNEL_INIT 848 if( (core_lid == 0) && (local_cxy == 0) ) 849 printk("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n", 850 __FUNCTION__, (uint32_t)hal_get_cycles() ); 851 #endif 846 852 847 853 ///////////////////////////////////////////////////////////////////////////////// … … 866 872 //////////////////////////////////////////////////////////////////////////////// 867 873 868 if( (core_lid == 0) && (local_cxy == 0) ) 869 kinit_dmsg("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n", 870 __FUNCTION__, hal_time_stamp() ); 874 #if CONFIG_DEBUG_KERNEL_INIT 875 if( (core_lid == 0) && (local_cxy == 0) ) 876 printk("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n", 877 __FUNCTION__, (uint32_t)hal_get_cycles() ); 878 #endif 871 879 872 880 //////////////////////////////////////////////////////////////////////////////// … … 897 905 ///////////////////////////////////////////////////////////////////////////////// 898 906 899 if( (core_lid == 0) && (local_cxy == 0) ) 900 kinit_dmsg("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n", 901 __FUNCTION__, hal_time_stamp()); 902 907 #if CONFIG_DEBUG_KERNEL_INIT 908 if( (core_lid == 0) && (local_cxy == 0) ) 909 printk("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n", 910 __FUNCTION__, (uint32_t)hal_get_cycles() ); 911 #endif 912 913 #if( CONFIG_DEBUG_KERNEL_INIT & 1 ) 914 chdev_dir_display(); 915 #endif 916 903 917 ///////////////////////////////////////////////////////////////////////////////// 904 918 // STEP 4 : All cores enable IPI (Inter Procesor Interrupt), … … 908 922 ///////////////////////////////////////////////////////////////////////////////// 909 923 910 #if CONFIG_KINIT_DEBUG911 chdev_dir_display();912 #endif913 914 924 // All cores enable the shared IPI channel 915 925 dev_pic_enable_ipi(); … … 932 942 core->scheduler.idle = thread; 933 943 934 #if CONFIG_KINIT_DEBUG944 #if( CONFIG_DEBUG_KERNEL_INIT & 1 ) 935 945 sched_display( core_lid ); 936 946 #endif … … 1004 1014 ///////////////////////////////////////////////////////////////////////////////// 1005 1015 1006 if( (core_lid == 0) && (local_cxy == 0) ) 1007 kinit_dmsg("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n", 1008 __FUNCTION__, vfs_root_inode_xp , hal_time_stamp()); 1016 #if CONFIG_DEBUG_KERNEL_INIT 1017 if( (core_lid == 0) && (local_cxy == 0) ) 1018 printk("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n", 1019 __FUNCTION__, vfs_root_inode_xp , (uint32_t)hal_get_cycles()); 1020 #endif 1009 1021 1010 1022 ///////////////////////////////////////////////////////////////////////////////// … … 1063 1075 ///////////////////////////////////////////////////////////////////////////////// 1064 1076 1065 if( (core_lid == 0) && (local_cxy == 0) ) 1066 kinit_dmsg("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster IO / cycle %d\n", 1067 __FUNCTION__, vfs_root_inode_xp , hal_time_stamp() ); 1077 #if CONFIG_DEBUG_KERNEL_INIT 1078 if( (core_lid == 0) && (local_cxy == io_cxy) ) 1079 printk("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster %x / cycle %d\n", 1080 __FUNCTION__, vfs_root_inode_xp , io_cxy , (uint32_t)hal_get_cycles()); 1081 #endif 1068 1082 1069 1083 ///////////////////////////////////////////////////////////////////////////////// … … 1096 1110 ///////////////////////////////////////////////////////////////////////////////// 1097 1111 1098 if( (core_lid == 0) && (local_cxy == 0) ) 1099 kinit_dmsg("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster IO / cycle %d\n", 1100 __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() ); 1112 #if CONFIG_DEBUG_KERNEL_INIT 1113 if( (core_lid == 0) && (local_cxy == io_cxy) ) 1114 printk("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster %x / cycle %d\n", 1115 __FUNCTION__, devfs_dev_inode_xp , io_cxy , (uint32_t)hal_get_cycles() ); 1116 #endif 1101 1117 1102 1118 ///////////////////////////////////////////////////////////////////////////////// … … 1133 1149 ///////////////////////////////////////////////////////////////////////////////// 1134 1150 1151 #if CONFIG_DEBUG_KERNEL_INIT 1152 if( (core_lid == 0) && (local_cxy == 0) ) 1153 printk("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n", 1154 __FUNCTION__, devfs_dev_inode_xp , (uint32_t)hal_get_cycles() ); 1155 #endif 1156 1157 ///////////////////////////////////////////////////////////////////////////////// 1158 // STEP 8 : CP0 in cluster 0 creates the first user process (process_init) 1159 ///////////////////////////////////////////////////////////////////////////////// 1160 1135 1161 if( (core_lid == 0) && (local_cxy == 0) ) 1136 kinit_dmsg("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n", 1137 __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() ); 1138 1139 ///////////////////////////////////////////////////////////////////////////////// 1140 // STEP 8 : CP0 in cluster 0 creates the first user process (process_init) 1141 ///////////////////////////////////////////////////////////////////////////////// 1142 1143 if( (core_lid == 0) && (local_cxy == 0) ) 1144 { 1145 1146 #if CONFIG_KINIT_DEBUG 1162 { 1163 1164 #if( CONFIG_DEBUG_KERNEL_INIT & 1 ) 1147 1165 vfs_display( vfs_root_inode_xp ); 1148 1166 #endif … … 1157 1175 ///////////////////////////////////////////////////////////////////////////////// 1158 1176 1159 if( (core_lid == 0) && (local_cxy == 0) ) 1160 kinit_dmsg("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n", 1161 __FUNCTION__ , hal_time_stamp() ); 1177 #if CONFIG_DEBUG_KERNEL_INIT 1178 if( (core_lid == 0) && (local_cxy == 0) ) 1179 printk("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n", 1180 __FUNCTION__ , (uint32_t)hal_get_cycles() ); 1181 #endif 1162 1182 1163 1183 ///////////////////////////////////////////////////////////////////////////////// … … 1169 1189 print_banner( (info->x_size * info->y_size) , info->cores_nr ); 1170 1190 1171 #if CONFIG_KINIT_DEBUG 1172 1173 printk("\n\n***** memory fooprint for main kernel objects\n\n" 1191 #if( CONFIG_DEBUG_KERNEL_INIT & 1 ) 1192 printk("\n\n***** memory fooprint for main kernel objects\n\n" 1174 1193 " - thread descriptor : %d bytes\n" 1175 1194 " - process descriptor : %d bytes\n" -
trunk/kernel/kern/printk.c
r428 r437 2 2 * printk.c - Kernel Log & debug messages API implementation. 3 3 * 4 * authors Alain Greiner (2016 )4 * authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/printk.h
r428 r437 2 2 * printk.h - Kernel Log & debug messages API definition. 3 3 * 4 * authors Alain Greiner (2016 )4 * authors Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 120 120 121 121 122 /////////////////////////////////////////////////////////////////////////////////// 123 // Conditional debug macros 124 /////////////////////////////////////////////////////////////////////////////////// 122 123 /* deprecated march 2018 [AG] 125 124 126 125 #if CONFIG_CHDEV_DEBUG … … 364 363 #endif 365 364 365 */ 366 366 367 367 #endif // _PRINTK_H -
trunk/kernel/kern/process.c
r436 r437 366 366 xptr_t children_lock_xp; 367 367 368 pid_t pid = process->pid; 369 368 370 assert( (process->th_nr == 0) , __FUNCTION__ , 369 "process %x in cluster %x has still active threads", p rocess->pid , local_cxy );371 "process %x in cluster %x has still active threads", pid , local_cxy ); 370 372 371 373 #if CONFIG_DEBUG_PROCESS_DESTROY … … 373 375 if( CONFIG_DEBUG_PROCESS_DESTROY ) 374 376 printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n", 375 __FUNCTION__ , CURRENT_THREAD , process, process->pid , cycle ); 377 __FUNCTION__ , CURRENT_THREAD , process, pid , cycle ); 378 #endif 379 380 #if CONFIG_DEBUG_PROCESS_DESTROY 381 if( CONFIG_DEBUG_PROCESS_DESTROY & 1 ) 382 cluster_processes_display( CXY_FROM_PID( pid ) ); 376 383 #endif 377 384 … … 383 390 384 391 // remove process from children_list if process is in owner cluster 385 if( CXY_FROM_PID( p rocess->pid ) == local_cxy )392 if( CXY_FROM_PID( pid ) == local_cxy ) 386 393 { 387 394 // get pointers on parent process … … 400 407 401 408 // release the process PID to cluster manager 402 cluster_pid_release( p rocess->pid );409 cluster_pid_release( pid ); 403 410 404 411 // FIXME close all open files and update dirty [AG] … … 419 426 if( CONFIG_DEBUG_PROCESS_DESTROY ) 420 427 printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n", 421 __FUNCTION__ , CURRENT_THREAD , process, p rocess->pid, cycle );428 __FUNCTION__ , CURRENT_THREAD , process, pid, cycle ); 422 429 #endif 423 430 -
trunk/kernel/kern/rpc.c
r436 r437 1 1 /* 2 * rpc.c - RPC relatedoperations implementation.2 * rpc.c - RPC operations implementation. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 52 52 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \ 53 53 printk("\n[DBG] %s : enter thread %x on core[%x,%d] / cycle %d\n", \ 54 __FUNCTION__ , CURRENT_THREAD ->trdid, local_cxy, CURRENT_THREAD->core->lid , cycle );54 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 55 55 56 56 #define RPC_DEBUG_EXIT \ … … 58 58 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \ 59 59 printk("\n[DBG] %s : exit thread %x on core[%x,%d] / cycle %d\n", \ 60 __FUNCTION__ , CURRENT_THREAD ->trdid, local_cxy, CURRENT_THREAD->core->lid , cycle );60 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 61 61 62 62 #else … … 129 129 uint32_t cycle = (uint32_t)hal_get_cycles(); 130 130 if( CONFIG_DEBUG_RPC_SEND < cycle ) 131 printk("\n[DBG] %s : thread %x enter for rpc[%d] / rpc_ptr %x / cycle %d\n",132 __FUNCTION__, CURRENT_THREAD, rpc->index, rpc, cycle );131 printk("\n[DBG] %s : thread %x in cluster %x enter for rpc[%d] / rpc_ptr %x / cycle %d\n", 132 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, rpc, cycle ); 133 133 #endif 134 134 … … 174 174 cycle = (uint32_t)hal_get_cycles(); 175 175 if( CONFIG_DEBUG_RPC_SEND < cycle ) 176 printk("\n[DBG] %s : thread %x busy waiting / rpc[%d] / server = %x/ cycle %d\n",177 __FUNCTION__, CURRENT_THREAD, rpc->index , server_cxy, cycle );176 printk("\n[DBG] %s : thread %x in cluster %x busy waiting / rpc[%d] / cycle %d\n", 177 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle ); 178 178 #endif 179 179 … … 183 183 cycle = (uint32_t)hal_get_cycles(); 184 184 if( CONFIG_DEBUG_RPC_SEND < cycle ) 185 printk("\n[DBG] %s : thread % resume / rpc[%d] / cycle %d\n",186 __FUNCTION__, CURRENT_THREAD, rpc->index, cycle );185 printk("\n[DBG] %s : thread % in cluster %x resume / rpc[%d] / cycle %d\n", 186 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle ); 187 187 #endif 188 188 } … … 193 193 cycle = (uint32_t)hal_get_cycles(); 194 194 if( CONFIG_DEBUG_RPC_SEND < cycle ) 195 printk("\n[DBG] %s : thread %x block & deschedule / rpc[%d] / server = %x/ cycle %d\n",196 __FUNCTION__, CURRENT_THREAD, rpc->index , server_cxy, cycle );195 printk("\n[DBG] %s : thread %x in cluster %x deschedule / rpc[%d] / cycle %d\n", 196 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index , cycle ); 197 197 #endif 198 198 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); … … 202 202 cycle = (uint32_t)hal_get_cycles(); 203 203 if( CONFIG_DEBUG_RPC_SEND < cycle ) 204 printk("\n[DBG] %s : thread % resume / rpcr[%d] / cycle %d\n",205 __FUNCTION__, CURRENT_THREAD, rpc->index, cycle );204 printk("\n[DBG] %s : thread % in cluster %x resume / rpcr[%d] / cycle %d\n", 205 __FUNCTION__, CURRENT_THREAD, local_cxy, rpc->index, cycle ); 206 206 #endif 207 207 } … … 219 219 cycle = (uint32_t)hal_get_cycles(); 220 220 if( CONFIG_DEBUG_RPC_SEND < cycle ) 221 printk("\n[DBG] %s : non blocking rpc[%d] => thread return / cycle %d\n",221 printk("\n[DBG] %s : non blocking rpc[%d] => thread %x return / cycle %d\n", 222 222 __FUNCTION__, rpc->index, CURRENT_THREAD, cycle ); 223 223 #endif … … 355 355 uint32_t cycle = (uint32_t)hal_get_cycles(); 356 356 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 357 printk("\n[DBG] %s : RPC thread %x takes RPC fifo ownership / cluster %x/ cycle %d\n",357 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n", 358 358 __FUNCTION__, this, local_cxy, cycle ); 359 359 #endif … … 378 378 desc_ptr = GET_PTR( desc_xp ); 379 379 380 index = desc_ptr->index;381 blocking = desc_ptr->blocking;380 index = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) ); 381 blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) ); 382 382 383 383 #if CONFIG_DEBUG_RPC_SERVER 384 384 cycle = (uint32_t)hal_get_cycles(); 385 385 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 386 printk("\n[DBG] %s : RPC thread %x got rpc[%d] / rpc_ptr %x / cycle %d\n",387 __FUNCTION__, this, index, desc_ptr, cycle );386 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_ptr %x / cycle %d\n", 387 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle ); 388 388 #endif 389 389 // call the relevant server function … … 393 393 cycle = (uint32_t)hal_get_cycles(); 394 394 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 395 printk("\n[DBG] %s : RPC thread %x completes rpc %d in cluster %x / cycle %d\n",396 __FUNCTION__, this, index, local_cxy, cycle );395 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n", 396 __FUNCTION__, this, local_cxy, index, cycle ); 397 397 #endif 398 398 // increment handled RPCs counter … … 435 435 uint32_t cycle = (uint32_t)hal_get_cycles(); 436 436 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 437 printk("\n[DBG] %s : RPC thread %x suicides in cluster %x/ cycle %d\n",437 printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n", 438 438 __FUNCTION__, this, local_cxy, cycle ); 439 439 #endif … … 450 450 uint32_t cycle = (uint32_t)hal_get_cycles(); 451 451 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 452 printk("\n[DBG] %s : RPC thread %x deschedules in cluster %x/ cycle %d\n",452 printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n", 453 453 __FUNCTION__, this, local_cxy, cycle ); 454 454 #endif … … 460 460 cycle = (uint32_t)hal_get_cycles(); 461 461 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 462 printk("\n[DBG] %s : RPC thread %x resumes in cluster %x/ cycle %d\n",462 printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n", 463 463 __FUNCTION__, this, local_cxy, cycle ); 464 464 #endif … … 478 478 page_t ** page ) // out 479 479 { 480 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",481 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,482 CURRENT_THREAD->core->lid , hal_time_stamp() );483 480 484 481 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 499 496 *page = (page_t *)(intptr_t)rpc.args[1]; 500 497 501 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",502 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,503 CURRENT_THREAD->core->lid , hal_time_stamp() );504 498 } 505 499 … … 507 501 void rpc_pmem_get_pages_server( xptr_t xp ) 508 502 { 509 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",510 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,511 CURRENT_THREAD->core->lid , hal_time_stamp() );512 503 513 504 // get client cluster identifier and pointer on RPC descriptor … … 524 515 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 525 516 526 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",527 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,528 CURRENT_THREAD->core->lid , hal_time_stamp() );529 517 } 530 518 … … 537 525 page_t * page ) // out 538 526 { 539 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",540 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,541 CURRENT_THREAD->core->lid , hal_time_stamp() );542 527 543 528 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 555 540 rpc_send( cxy , &rpc ); 556 541 557 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",558 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,559 CURRENT_THREAD->core->lid , hal_time_stamp() );560 542 } 561 543 … … 563 545 void rpc_pmem_release_pages_server( xptr_t xp ) 564 546 { 565 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",566 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,567 CURRENT_THREAD->core->lid , hal_time_stamp() );568 547 569 548 // get client cluster identifier and pointer on RPC descriptor … … 580 559 kmem_free( &req ); 581 560 582 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",583 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,584 CURRENT_THREAD->core->lid , hal_time_stamp() );585 561 } 586 562 … … 601 577 error_t * error ) // out 602 578 { 603 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",604 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,605 CURRENT_THREAD->core->lid , hal_time_stamp() );606 607 579 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 608 580 … … 625 597 *error = (error_t)rpc.args[4]; 626 598 627 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",628 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,629 CURRENT_THREAD->core->lid , hal_time_stamp() );630 599 } 631 600 … … 633 602 void rpc_process_make_fork_server( xptr_t xp ) 634 603 { 635 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",636 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,637 CURRENT_THREAD->core->lid , hal_time_stamp() );638 604 639 605 xptr_t ref_process_xp; // extended pointer on reference parent process … … 662 628 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 663 629 664 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",665 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,666 CURRENT_THREAD->core->lid , hal_time_stamp() );667 630 } 668 631 … … 688 651 error_t * error ) // out 689 652 { 690 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",691 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,692 CURRENT_THREAD->core->lid , hal_time_stamp() );693 694 653 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 695 654 … … 713 672 *error = (error_t)rpc.args[5]; 714 673 715 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",716 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,717 CURRENT_THREAD->core->lid , hal_time_stamp() );718 674 } 719 675 … … 721 677 void rpc_thread_user_create_server( xptr_t xp ) 722 678 { 723 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",724 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,725 CURRENT_THREAD->core->lid , hal_time_stamp() );726 679 727 680 pthread_attr_t * attr_ptr; // pointer on attributes structure in client cluster … … 764 717 hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 765 718 766 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",767 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,768 CURRENT_THREAD->core->lid , hal_time_stamp() );769 719 } 770 720 … … 781 731 error_t * error ) // out 782 732 { 783 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",784 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,785 CURRENT_THREAD->core->lid , hal_time_stamp() );786 787 733 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 788 734 … … 805 751 *error = (error_t)rpc.args[4]; 806 752 807 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",808 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,809 CURRENT_THREAD->core->lid , hal_time_stamp() );810 753 } 811 754 … … 813 756 void rpc_thread_kernel_create_server( xptr_t xp ) 814 757 { 815 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",816 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,817 CURRENT_THREAD->core->lid , hal_time_stamp() );818 819 758 thread_t * thread_ptr; // local pointer on thread descriptor 820 759 xptr_t thread_xp; // extended pointer on thread descriptor … … 842 781 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); 843 782 844 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",845 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,846 CURRENT_THREAD->core->lid , hal_time_stamp() );847 783 } 848 784 … … 962 898 error_t * error ) // out 963 899 { 964 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",965 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,966 CURRENT_THREAD->core->lid , hal_time_stamp() );967 968 900 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 969 901 … … 991 923 *error = (error_t)rpc.args[9]; 992 924 993 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",994 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,995 CURRENT_THREAD->core->lid , hal_time_stamp() );996 925 } 997 926 … … 1010 939 error_t error; 1011 940 1012 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1013 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1014 CURRENT_THREAD->core->lid , hal_time_stamp() );1015 1016 941 // get client cluster identifier and pointer on RPC descriptor 1017 942 cxy_t client_cxy = GET_CXY( xp ); … … 1043 968 hal_remote_swd( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error ); 1044 969 1045 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1046 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1047 CURRENT_THREAD->core->lid , hal_time_stamp() );1048 970 } 1049 971 … … 1056 978 struct vfs_inode_s * inode ) 1057 979 { 1058 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1059 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1060 CURRENT_THREAD->core->lid , hal_time_stamp() );1061 1062 980 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1063 981 … … 1074 992 rpc_send( cxy , &rpc ); 1075 993 1076 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1077 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1078 CURRENT_THREAD->core->lid , hal_time_stamp() );1079 994 } 1080 995 … … 1083 998 { 1084 999 vfs_inode_t * inode; 1085 1086 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1087 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1088 CURRENT_THREAD->core->lid , hal_time_stamp() );1089 1000 1090 1001 // get client cluster identifier and pointer on RPC descriptor … … 1098 1009 vfs_inode_destroy( inode ); 1099 1010 1100 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1101 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1102 CURRENT_THREAD->core->lid , hal_time_stamp() );1103 1011 } 1104 1012 … … 1115 1023 error_t * error ) // out 1116 1024 { 1117 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 1118 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1119 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1025 RPC_DEBUG_ENTER 1120 1026 1121 1027 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 1139 1045 *error = (error_t)rpc.args[4]; 1140 1046 1141 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1142 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1143 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1047 RPC_DEBUG_EXIT 1144 1048 } 1145 1049 … … 1153 1057 error_t error; 1154 1058 1059 RPC_DEBUG_ENTER 1060 1155 1061 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 1156 1157 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1158 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1159 CURRENT_THREAD->core->lid , hal_time_stamp() );1160 1062 1161 1063 // get client cluster identifier and pointer on RPC descriptor … … 1181 1083 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1182 1084 1183 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 1184 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 1185 CURRENT_THREAD->core->lid , hal_time_stamp() ); 1085 RPC_DEBUG_EXIT 1186 1086 } 1187 1087 … … 1195 1095 vfs_dentry_t * dentry ) 1196 1096 { 1197 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1198 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1199 CURRENT_THREAD->core->lid , hal_time_stamp() );1200 1201 1097 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1202 1098 … … 1213 1109 rpc_send( cxy , &rpc ); 1214 1110 1215 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1216 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1217 CURRENT_THREAD->core->lid , hal_time_stamp() );1218 1111 } 1219 1112 … … 1222 1115 { 1223 1116 vfs_dentry_t * dentry; 1224 1225 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1226 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1227 CURRENT_THREAD->core->lid , hal_time_stamp() );1228 1117 1229 1118 // get client cluster identifier and pointer on RPC descriptor … … 1237 1126 vfs_dentry_destroy( dentry ); 1238 1127 1239 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1240 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1241 CURRENT_THREAD->core->lid , hal_time_stamp() );1242 1128 } 1243 1129 … … 1254 1140 error_t * error ) // out 1255 1141 { 1256 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1257 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1258 CURRENT_THREAD->core->lid , hal_time_stamp() );1259 1260 1142 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1261 1143 … … 1277 1159 *error = (error_t)rpc.args[3]; 1278 1160 1279 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1280 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1281 CURRENT_THREAD->core->lid , hal_time_stamp() );1282 1161 } 1283 1162 … … 1289 1168 xptr_t file_xp; 1290 1169 error_t error; 1291 1292 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1293 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1294 CURRENT_THREAD->core->lid , hal_time_stamp() );1295 1170 1296 1171 // get client cluster identifier and pointer on RPC descriptor … … 1311 1186 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1312 1187 1313 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1314 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1315 CURRENT_THREAD->core->lid , hal_time_stamp() );1316 1188 } 1317 1189 … … 1324 1196 vfs_file_t * file ) 1325 1197 { 1326 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1327 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1328 CURRENT_THREAD->core->lid , hal_time_stamp() );1329 1330 1198 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1331 1199 … … 1342 1210 rpc_send( cxy , &rpc ); 1343 1211 1344 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1345 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1346 CURRENT_THREAD->core->lid , hal_time_stamp() );1347 1212 } 1348 1213 … … 1351 1216 { 1352 1217 vfs_file_t * file; 1353 1354 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1355 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1356 CURRENT_THREAD->core->lid , hal_time_stamp() );1357 1218 1358 1219 // get client cluster identifier and pointer on RPC descriptor … … 1366 1227 vfs_file_destroy( file ); 1367 1228 1368 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1369 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1370 CURRENT_THREAD->core->lid , hal_time_stamp() );1371 1229 } 1372 1230 … … 1382 1240 error_t * error ) // out 1383 1241 { 1384 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1385 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1386 CURRENT_THREAD->core->lid , hal_time_stamp() );1387 1388 1242 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1389 1243 … … 1405 1259 *error = (error_t)rpc.args[3]; 1406 1260 1407 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1408 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1409 CURRENT_THREAD->core->lid , hal_time_stamp() );1410 1261 } 1411 1262 … … 1420 1271 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 1421 1272 1422 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1423 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1424 CURRENT_THREAD->core->lid , hal_time_stamp() );1425 1426 1273 // get client cluster identifier and pointer on RPC descriptor 1427 1274 cxy_t client_cxy = GET_CXY( xp ); … … 1443 1290 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1444 1291 1445 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1446 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1447 CURRENT_THREAD->core->lid , hal_time_stamp() );1448 1292 } 1449 1293 … … 1457 1301 error_t * error ) // out 1458 1302 { 1459 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1460 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1461 CURRENT_THREAD->core->lid , hal_time_stamp() );1462 1463 1303 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1464 1304 … … 1478 1318 *error = (error_t)rpc.args[1]; 1479 1319 1480 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1481 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1482 CURRENT_THREAD->core->lid , hal_time_stamp() );1483 1320 } 1484 1321 … … 1489 1326 vfs_inode_t * inode; 1490 1327 1491 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1492 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1493 CURRENT_THREAD->core->lid , hal_time_stamp() );1494 1495 1328 // get client cluster identifier and pointer on RPC descriptor 1496 1329 cxy_t client_cxy = GET_CXY( xp ); … … 1506 1339 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 1507 1340 1508 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1509 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1510 CURRENT_THREAD->core->lid , hal_time_stamp() );1511 1341 } 1512 1342 … … 1523 1353 error_t * error ) // out 1524 1354 { 1525 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1526 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1527 CURRENT_THREAD->core->lid , hal_time_stamp() );1528 1529 1355 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1530 1356 … … 1547 1373 *error = (error_t)rpc.args[4]; 1548 1374 1549 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1550 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1551 CURRENT_THREAD->core->lid , hal_time_stamp() );1552 1375 } 1553 1376 … … 1561 1384 error_t error; 1562 1385 1563 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1564 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1565 CURRENT_THREAD->core->lid , hal_time_stamp() );1566 1567 1386 // get client cluster identifier and pointer on RPC descriptor 1568 1387 cxy_t client_cxy = GET_CXY( xp ); … … 1581 1400 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 1582 1401 1583 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1584 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1585 CURRENT_THREAD->core->lid , hal_time_stamp() );1586 1402 } 1587 1403 … … 1597 1413 error_t * error ) // out 1598 1414 { 1599 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1600 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1601 CURRENT_THREAD->core->lid , hal_time_stamp() );1602 1603 1415 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1604 1416 … … 1620 1432 *error = (error_t)rpc.args[3]; 1621 1433 1622 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1623 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1624 CURRENT_THREAD->core->lid , hal_time_stamp() );1625 1434 } 1626 1435 … … 1633 1442 xptr_t vseg_xp; 1634 1443 error_t error; 1635 1636 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1637 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1638 CURRENT_THREAD->core->lid , hal_time_stamp() );1639 1444 1640 1445 // get client cluster identifier and pointer on RPC descriptor … … 1654 1459 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1655 1460 1656 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1657 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1658 CURRENT_THREAD->core->lid , hal_time_stamp() );1659 1461 } 1660 1462 … … 1673 1475 error_t * error ) // out 1674 1476 { 1675 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1676 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1677 CURRENT_THREAD->core->lid , hal_time_stamp() );1678 1679 1477 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1680 1478 … … 1698 1496 *error = (error_t)rpc.args[5]; 1699 1497 1700 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1701 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1702 CURRENT_THREAD->core->lid , hal_time_stamp() );1703 1498 } 1704 1499 … … 1712 1507 ppn_t ppn; 1713 1508 error_t error; 1714 1715 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1716 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1717 CURRENT_THREAD->core->lid , hal_time_stamp() );1718 1509 1719 1510 // get client cluster identifier and pointer on RPC descriptor … … 1734 1525 hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 1735 1526 1736 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1737 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1738 CURRENT_THREAD->core->lid , hal_time_stamp() );1739 1527 } 1740 1528 … … 1748 1536 xptr_t * buf_xp ) // out 1749 1537 { 1750 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1751 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1752 CURRENT_THREAD->core->lid , hal_time_stamp() );1753 1754 1538 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1755 1539 … … 1769 1553 *buf_xp = (xptr_t)rpc.args[1]; 1770 1554 1771 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1772 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1773 CURRENT_THREAD->core->lid , hal_time_stamp() );1774 1555 } 1775 1556 … … 1777 1558 void rpc_kcm_alloc_server( xptr_t xp ) 1778 1559 { 1779 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1780 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1781 CURRENT_THREAD->core->lid , hal_time_stamp() );1782 1783 1560 // get client cluster identifier and pointer on RPC descriptor 1784 1561 cxy_t client_cxy = GET_CXY( xp ); … … 1798 1575 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp ); 1799 1576 1800 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1801 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1802 CURRENT_THREAD->core->lid , hal_time_stamp() );1803 1577 } 1804 1578 … … 1812 1586 uint32_t kmem_type ) // in 1813 1587 { 1814 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1815 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1816 CURRENT_THREAD->core->lid , hal_time_stamp() );1817 1818 1588 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1819 1589 … … 1831 1601 rpc_send( cxy , &rpc ); 1832 1602 1833 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1834 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1835 CURRENT_THREAD->core->lid , hal_time_stamp() );1836 1603 } 1837 1604 … … 1839 1606 void rpc_kcm_free_server( xptr_t xp ) 1840 1607 { 1841 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1842 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1843 CURRENT_THREAD->core->lid , hal_time_stamp() );1844 1845 1608 // get client cluster identifier and pointer on RPC descriptor 1846 1609 cxy_t client_cxy = GET_CXY( xp ); … … 1857 1620 kmem_free( &req ); 1858 1621 1859 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1860 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1861 CURRENT_THREAD->core->lid , hal_time_stamp() );1862 1622 } 1863 1623 … … 1876 1636 error_t * error ) // out 1877 1637 { 1878 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1879 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1880 CURRENT_THREAD->core->lid , hal_time_stamp() );1881 1882 1638 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1883 1639 … … 1902 1658 *error = (error_t)rpc.args[6]; 1903 1659 1904 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1905 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1906 CURRENT_THREAD->core->lid , hal_time_stamp() );1907 1660 } 1908 1661 … … 1918 1671 uint32_t size; 1919 1672 error_t error; 1920 1921 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1922 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1923 CURRENT_THREAD->core->lid , hal_time_stamp() );1924 1673 1925 1674 // get client cluster identifier and pointer on RPC descriptor … … 1959 1708 hal_remote_swd( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error ); 1960 1709 1961 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1962 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1963 CURRENT_THREAD->core->lid , hal_time_stamp() );1964 1710 } 1965 1711 … … 1974 1720 page_t ** page ) // out 1975 1721 { 1976 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",1977 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,1978 CURRENT_THREAD->core->lid , hal_time_stamp() );1979 1980 1722 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1981 1723 … … 1996 1738 *page = (page_t *)(intptr_t)rpc.args[2]; 1997 1739 1998 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",1999 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2000 CURRENT_THREAD->core->lid , hal_time_stamp() );2001 1740 } 2002 1741 … … 2004 1743 void rpc_mapper_get_page_server( xptr_t xp ) 2005 1744 { 2006 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2007 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2008 CURRENT_THREAD->core->lid , hal_time_stamp() );2009 2010 1745 // get client cluster identifier and pointer on RPC descriptor 2011 1746 cxy_t cxy = GET_CXY( xp ); … … 2022 1757 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 2023 1758 2024 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",2025 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2026 CURRENT_THREAD->core->lid , hal_time_stamp() );2027 1759 } 2028 1760 … … 2043 1775 struct vseg_s ** vseg ) 2044 1776 { 2045 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2046 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2047 CURRENT_THREAD->core->lid , hal_time_stamp() );2048 2049 1777 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 2050 1778 … … 2071 1799 *vseg = (vseg_t *)(intptr_t)rpc.args[8]; 2072 1800 2073 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",2074 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2075 CURRENT_THREAD->core->lid , hal_time_stamp() );2076 1801 } 2077 1802 … … 2079 1804 void rpc_vmm_create_vseg_server( xptr_t xp ) 2080 1805 { 2081 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2082 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2083 CURRENT_THREAD->core->lid , hal_time_stamp() );2084 2085 1806 // get client cluster identifier and pointer on RPC descriptor 2086 1807 cxy_t cxy = GET_CXY( xp ); … … 2110 1831 hal_remote_swd( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg ); 2111 1832 2112 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",2113 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2114 CURRENT_THREAD->core->lid , hal_time_stamp() );2115 1833 } 2116 1834 … … 2123 1841 lid_t lid) 2124 1842 { 2125 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2126 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2127 CURRENT_THREAD->core->lid , hal_time_stamp() );2128 2129 1843 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 2130 1844 … … 2141 1855 rpc_send( cxy , &rpc ); 2142 1856 2143 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",2144 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2145 CURRENT_THREAD->core->lid , hal_time_stamp() );2146 1857 } 2147 1858 … … 2149 1860 void rpc_sched_display_server( xptr_t xp ) 2150 1861 { 2151 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2152 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2153 CURRENT_THREAD->core->lid , hal_time_stamp() );2154 2155 1862 // get client cluster identifier and pointer on RPC descriptor 2156 1863 cxy_t cxy = GET_CXY( xp ); … … 2163 1870 sched_display( lid ); 2164 1871 2165 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",2166 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2167 CURRENT_THREAD->core->lid , hal_time_stamp() );2168 1872 } 2169 1873 … … 2176 1880 process_t * process ) 2177 1881 { 2178 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2179 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2180 CURRENT_THREAD->core->lid , hal_time_stamp() );2181 2182 1882 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 2183 1883 … … 2194 1894 rpc_send( cxy , &rpc ); 2195 1895 2196 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",2197 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2198 CURRENT_THREAD->core->lid , hal_time_stamp() );2199 1896 } 2200 1897 … … 2202 1899 void rpc_vmm_set_cow_server( xptr_t xp ) 2203 1900 { 2204 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2205 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2206 CURRENT_THREAD->core->lid , hal_time_stamp() );2207 2208 1901 process_t * process; 2209 1902 … … 2218 1911 vmm_set_cow( process ); 2219 1912 2220 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",2221 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2222 CURRENT_THREAD->core->lid , hal_time_stamp() );2223 1913 } 2224 1914 … … 2232 1922 bool_t detailed ) 2233 1923 { 2234 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2235 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2236 CURRENT_THREAD->core->lid , hal_time_stamp() );2237 2238 1924 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 2239 1925 … … 2251 1937 rpc_send( cxy , &rpc ); 2252 1938 2253 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",2254 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2255 CURRENT_THREAD->core->lid , hal_time_stamp() );2256 1939 } 2257 1940 … … 2259 1942 void rpc_vmm_display_server( xptr_t xp ) 2260 1943 { 2261 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",2262 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,2263 CURRENT_THREAD->core->lid , hal_time_stamp() );2264 2265 1944 process_t * process; 2266 1945 bool_t detailed; … … 2277 1956 vmm_display( process , detailed ); 2278 1957 2279 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 2280 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 2281 CURRENT_THREAD->core->lid , hal_time_stamp() ); 2282 } 2283 2284 1958 } 1959 1960 -
trunk/kernel/kern/rpc.h
r436 r437 2 2 * rpc.h - RPC (Remote Procedure Call) operations definition. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/scheduler.c
r436 r437 97 97 list_entry_t * current; 98 98 list_entry_t * last; 99 list_entry_t * root; 100 bool_t done; 99 101 100 102 // take lock protecting sheduler lists 101 103 spinlock_lock( &sched->lock ); 102 104 103 // first loop: scan the kernel threads list if not empty105 // first : scan the kernel threads list if not empty 104 106 if( list_is_empty( &sched->k_root ) == false ) 105 107 { 108 root = &sched->k_root; 106 109 last = sched->k_last; 107 current = sched->k_last; 108 do 110 current = last; 111 done = false; 112 113 while( done == false ) 109 114 { 110 115 // get next entry in kernel list 111 current = list_next( &sched->k_root , current ); 116 current = current->next; 117 118 // check exit condition 119 if( current == last ) done = true; 112 120 113 121 // skip the root that does not contain a thread 114 if( current == NULL ) current = sched->k_root.next;122 if( current == root ) continue; 115 123 116 124 // get thread pointer for this entry … … 120 128 switch( thread->type ) 121 129 { 122 case THREAD_IDLE: // skip IDLE thread 123 break; 124 125 case THREAD_RPC: // RPC thread if non blocked and FIFO non-empty 130 case THREAD_RPC: // if non blocked and RPC FIFO non-empty 126 131 if( (thread->blocked == 0) && 127 132 (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) ) … … 132 137 break; 133 138 134 default: // DEV threadif non blocked and waiting queue non empty139 case THREAD_DEV: // if non blocked and waiting queue non empty 135 140 if( (thread->blocked == 0) && 136 141 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) … … 140 145 } 141 146 break; 142 } // end switch type 143 } 144 while( current != last ); 145 } 146 147 // second loop : scan the user threads list if not empty 147 148 default: 149 break; 150 } 151 } // end loop on kernel threads 152 } // end if kernel threads 153 154 // second : scan the user threads list if not empty 148 155 if( list_is_empty( &sched->u_root ) == false ) 149 156 { 157 root = &sched->u_root; 150 158 last = sched->u_last; 151 current = sched->u_last; 152 do 159 current = last; 160 done = false; 161 162 while( done == false ) 153 163 { 154 164 // get next entry in user list 155 current = list_next( &sched->u_root , current ); 165 current = current->next; 166 167 // check exit condition 168 if( current == last ) done = true; 156 169 157 170 // skip the root that does not contain a thread 158 if( current == NULL ) current = sched->u_root.next;171 if( current == root ) continue; 159 172 160 173 // get thread pointer for this entry … … 167 180 return thread; 168 181 } 169 } 170 while( current != last ); 171 } 172 173 // third : return idle thread if no runnable thread 182 } // end loop on user threads 183 } // end if user threads 184 185 // third : return idle thread if no other runnable thread 174 186 spinlock_unlock( &sched->lock ); 175 187 return sched->idle; … … 180 192 void sched_handle_signals( core_t * core ) 181 193 { 194 182 195 list_entry_t * iter; 183 196 thread_t * thread; … … 214 227 process = thread->process; 215 228 229 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS 230 uint32_t cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 232 printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n", 233 __FUNCTION__ , thread , process->pid , cycle ); 234 #endif 216 235 // release FPU if required 217 236 if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; … … 232 251 233 252 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS 234 uint32_tcycle = (uint32_t)hal_get_cycles();253 cycle = (uint32_t)hal_get_cycles(); 235 254 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 236 printk("\n[DBG] %s : thread %x deleted thread %x/ cycle %d\n",237 __FUNCTION__ , CURRENT_THREAD , thread , cycle );255 printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n", 256 __FUNCTION__ , thread , process->pid , cycle ); 238 257 #endif 239 258 // destroy process descriptor if no more threads … … 246 265 cycle = (uint32_t)hal_get_cycles(); 247 266 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 248 printk("\n[DBG] %s : thread %x deleted process %x/ cycle %d\n",249 __FUNCTION__ , CURRENT_THREAD , process, cycle );267 printk("\n[DBG] %s : process %x has been deleted / cycle %d\n", 268 __FUNCTION__ , process->pid , cycle ); 250 269 #endif 251 270 … … 374 393 remote_spinlock_lock_busy( lock_xp , &save_sr ); 375 394 376 nolock_printk("\n***** scheduler state for core[%x,%d] / cycle %d / current = (%x,%x)\n", 377 local_cxy , core->lid, (uint32_t)hal_get_cycles(), 378 sched->current->process->pid , sched->current->trdid ); 395 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", 396 local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() ); 379 397 380 398 // display kernel threads … … 390 408 else 391 409 { 392 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X 410 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", 393 411 thread_type_str( thread->type ), thread->process->pid, thread->trdid, 394 thread, thread->blocked, thread->flags 412 thread, thread->blocked, thread->flags ); 395 413 } 396 414 } -
trunk/kernel/kern/scheduler.h
r436 r437 40 40 typedef struct scheduler_s 41 41 { 42 spinlock_t lock; /*! readlock protecting lists of threads*/42 spinlock_t lock; /*! lock protecting lists of threads */ 43 43 uint16_t u_threads_nr; /*! total number of attached user threads */ 44 44 uint16_t k_threads_nr; /*! total number of attached kernel threads */ -
trunk/kernel/kern/thread.h
r436 r437 150 150 xptr_t parent; /*! extended pointer on parent thread */ 151 151 152 uint32_t local_locks; /*! number of local locks owned by thread */153 uint32_t remote_locks; /*! number of remote locks owned by thread */154 155 152 remote_spinlock_t join_lock; /*! lock protecting the join/exit */ 156 153 xptr_t join_xp; /*! joining/killer thread extended pointer */ … … 197 194 list_entry_t locks_root; /*! root of list of locks taken */ 198 195 xlist_entry_t xlocks_root; /*! root of xlist of remote locks taken */ 196 uint32_t local_locks; /*! number of local locks owned by thread */ 197 uint32_t remote_locks; /*! number of remote locks owned by thread */ 199 198 200 199 thread_info_t info; /*! embedded thread_info_t */ -
trunk/kernel/libk/list.h
r24 r437 97 97 #define LIST_LAST( root , type , member ) \ 98 98 LIST_ELEMENT( (root)->pred , type , member ) 99 100 /***************************************************************************101 * This function returns the pointer on the next list_entry_t.102 ***************************************************************************103 * @ root : pointer on the list root.104 * @ current : pointer on the current list_entry_t.105 * @ returns pointer on next entry if success.106 * returns NULL if list empty or next is the root.107 **************************************************************************/108 static inline list_entry_t * list_next( list_entry_t * root,109 list_entry_t * current )110 {111 if((root == root->next) || (current->next == root)) return NULL;112 113 return current->next;114 }115 116 /***************************************************************************117 * This function returns the pointer on the previous list_entry_t.118 ***************************************************************************119 * @ root : pointer on the list root.120 * @ current : pointer on the current list_entry.121 * @ returns pointer on previous entry if success.122 * returns NULL if list empty or previous is the root.123 **************************************************************************/124 static inline list_entry_t * list_pred( list_entry_t * root,125 list_entry_t * current )126 {127 if((root == root->next) || (current->pred == root))128 return NULL;129 130 return current->pred;131 }132 99 133 100 /*************************************************************************** … … 211 178 list_entry_t * entry ) 212 179 { 213 list_add_first( root->pred , entry ); 180 list_entry_t * pred_entry; 181 list_entry_t * next_entry; 182 183 pred_entry = root->pred; 184 next_entry = root; 185 186 entry->next = next_entry; 187 entry->pred = pred_entry; 188 189 pred_entry->next = entry; 190 next_entry->pred = entry; 214 191 } 215 192 -
trunk/kernel/mm/kcm.c
r435 r437 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 239 239 uint32_t blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size; 240 240 kcm->blocks_nr = blocks_nr; 241 242 kcm_dmsg("\n[DBG] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n",243 __FUNCTION__ , kmem_type_str( type ) , kcm->block_size , kcm->blocks_nr );244 241 } 245 242 -
trunk/kernel/mm/kcm.h
r188 r437 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/ppm.c
r433 r437 54 54 55 55 cxy_t page_cxy = GET_CXY( page_xp ); 56 page_t * page_ptr = (page_t *)GET_PTR( page_xp );56 page_t * page_ptr = GET_PTR( page_xp ); 57 57 58 58 void * base_ptr = ppm->vaddr_base + … … 69 69 70 70 cxy_t base_cxy = GET_CXY( base_xp ); 71 void * base_ptr = (void *)GET_PTR( base_xp );71 void * base_ptr = GET_PTR( base_xp ); 72 72 73 73 page_t * page_ptr = ppm->pages_tbl + … … 86 86 87 87 cxy_t page_cxy = GET_CXY( page_xp ); 88 page_t * page_ptr = (page_t *)GET_PTR( page_xp );88 page_t * page_ptr = GET_PTR( page_xp ); 89 89 90 90 paddr_t paddr = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT ); 91 91 92 return paddr >> CONFIG_PPM_PAGE_SHIFT;92 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT); 93 93 94 94 } // end hal_page2ppn() … … 97 97 inline xptr_t ppm_ppn2page( ppn_t ppn ) 98 98 { 99 ppm_t * ppm= &LOCAL_CLUSTER->ppm;100 101 paddr_t paddr = ppn<< CONFIG_PPM_PAGE_SHIFT;102 103 cxy_t page_cxy= CXY_FROM_PADDR( paddr );104 lpa_t page_lpa= LPA_FROM_PADDR( paddr );105 106 return XPTR( page_cxy , &ppm->pages_tbl[page_lpa>>CONFIG_PPM_PAGE_SHIFT] );99 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 100 101 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT; 102 103 cxy_t cxy = CXY_FROM_PADDR( paddr ); 104 lpa_t lpa = LPA_FROM_PADDR( paddr ); 105 106 return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] ); 107 107 108 108 } // end hal_ppn2page … … 113 113 inline xptr_t ppm_ppn2base( ppn_t ppn ) 114 114 { 115 ppm_t * ppm 115 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 116 116 117 paddr_t paddr = ppn << CONFIG_PPM_PAGE_SHIFT; 118 119 cxy_t page_cxy = CXY_FROM_PADDR( paddr ); 120 lpa_t page_lpa = LPA_FROM_PADDR( paddr ); 121 122 void * base_ptr = (void *)ppm->vaddr_base + (page_lpa & ~CONFIG_PPM_PAGE_SHIFT); 123 124 return XPTR( page_cxy , base_ptr ); 117 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT; 118 119 cxy_t cxy = CXY_FROM_PADDR( paddr ); 120 lpa_t lpa = LPA_FROM_PADDR( paddr ); 121 122 return XPTR( cxy , (void *)ppm->vaddr_base + lpa ); 125 123 126 124 } // end ppm_ppn2base() … … 132 130 133 131 cxy_t base_cxy = GET_CXY( base_xp ); 134 void * base_ptr = (void *)GET_PTR( base_xp );132 void * base_ptr = GET_PTR( base_xp ); 135 133 136 134 paddr_t paddr = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) ); 137 135 138 return paddr >> CONFIG_PPM_PAGE_SHIFT;136 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT); 139 137 140 138 } // end ppm_base2ppn() -
trunk/kernel/mm/vmm.c
r435 r437 673 673 #endif 674 674 675 #if (CONFIG_DEBUG_VMM_DESTROY & 1 ) 676 vmm_display( process , true ); 677 #endif 678 675 679 // get pointer on local VMM 676 680 vmm_t * vmm = &process->vmm; … … 690 694 vseg = GET_PTR( vseg_xp ); 691 695 692 // unmap rand release physical pages if required) 696 #if( CONFIG_DEBUG_VMM_DESTROY & 1 ) 697 if( CONFIG_DEBUG_VMM_DESTROY < cycle ) 698 printk("\n[DBG] %s : %s / vpn_base %x / vpn_size %d\n", 699 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 700 #endif 701 702 // unmap and release physical pages 693 703 vmm_unmap_vseg( process , vseg ); 694 704 … … 1120 1130 if( attr & GPT_MAPPED ) // entry is mapped 1121 1131 { 1132 1133 #if( CONFIG_DEBUG_VMM_UNMAP_VSEG & 1 ) 1134 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle ) 1135 printk("- vpn %x / ppn %x\n" , vpn , ppn ); 1136 #endif 1137 1122 1138 // check small page 1123 1139 assert( (attr & GPT_SMALL) , __FUNCTION__ , … … 1140 1156 // FIXME lock the physical page 1141 1157 1142 // get extended pointer on pending forks counter1143 forks_xp = XPTR( page_cxy , &page_ptr->forks );1144 1145 1158 // get pending forks counter 1146 count = hal_remote_lw( forks_xp);1159 count = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ) ); 1147 1160 1148 1161 if( count ) // decrement pending forks counter 1149 1162 { 1163 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1150 1164 hal_remote_atomic_add( forks_xp , -1 ); 1151 1165 } -
trunk/kernel/mm/vmm.h
r433 r437 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017 )6 * Alain Greiner (2016,2017,2018) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/syscalls/shared_syscalls.h
r435 r437 61 61 SYS_CLOSEDIR = 25, 62 62 SYS_GETCWD = 26, 63 SYS_ UNDEFINED_27 = 27, ///63 SYS_ISATTY = 27, 64 64 SYS_ALARM = 28, 65 65 SYS_RMDIR = 29, -
trunk/kernel/syscalls/sys_munmap.c
r410 r437 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 40 40 error_t error; 41 41 42 uint32_t tm_start;43 uint32_t tm_end;44 45 tm_start = hal_get_cycles();46 47 42 thread_t * this = CURRENT_THREAD; 48 43 process_t * process = this->process; 44 45 #if CONFIG_DEBUG_SYS_MUNMAP 46 uint64_t tm_start; 47 uint64_t tm_end; 48 tm_start = hal_get_cycles(); 49 if( CONFIG_DEBUG_SYS_MUNMAP < tm_start ) 50 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n" 51 __FUNCTION__ , this, process->pid, (uint32_t)tm_start ); 52 #endif 49 53 50 54 // call relevant kernel function … … 53 57 if ( error ) 54 58 { 55 printk("\n[ERROR] in %s : cannot remove mapping\n", __FUNCTION__ ); 59 60 #if CONFIG_DEBUG_SYSCALLS_ERROR 61 printk("\n[ERROR] in %s : cannot remove mapping\n", __FUNCTION__ ); 62 #endif 56 63 this->errno = EINVAL; 57 64 return -1; 58 65 } 59 66 60 tm_end = hal_get_cycles(); 67 #if CONFIG_DEBUG_SYS_MUNMAP 68 tm_end = hal_get_cycles(); 69 if( CONFIG_DEBUG_SYS_MUNMAP < tm_start ) 70 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n" 71 __FUNCTION__ , this, process->pid, (uint32_t)tm_end ); 72 #endif 61 73 62 syscall_dmsg("\n[DBG] %s : core[%x,%d] removed vseg in process %x / cycle %d\n" 63 " base = %x / size = %x / cost = %d\n", 64 __FUNCTION__, local_cxy , this->core->lid , process->pid , tm_start , 65 vaddr , size , tm_end - tm_start ); 74 return 0; 66 75 67 return 0; 76 } // end sys_munmap() 68 77 69 } // end sys_mmap()70 -
trunk/kernel/syscalls/sys_thread_create.c
r407 r437 2 2 * sys_thread_create.c - creates a new user thread 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 57 57 error_t error; 58 58 59 uint32_t tm_start;60 uint32_t tm_end;61 62 tm_start = hal_get_cycles();63 64 59 // get parent thead pointer, extended pointer, and process 65 60 parent = CURRENT_THREAD; … … 67 62 process = parent->process; 68 63 64 #if CONFIG_DEBUG_SYS_THREAD_CREATE 65 uint64_t tm_start; 66 uint64_t tm_end; 67 tm_start = hal_get_cycles(); 68 if( CONFIG_DEBUG_SYS_THREAD_CREATE < tm_start ) 69 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n" 70 __FUNCTION__ , parent , process->pid, (uint32_t)tm_start ); 71 #endif 72 69 73 // check user_attr in user space & copy to kernel space 70 74 if( user_attr != NULL ) … … 74 78 if( error ) 75 79 { 76 printk("\n[ERROR] in %s : user_attr unmapped\n", __FUNCTION__ ); 80 81 #if CONFIG_DEBUG_SYSCALLS_ERROR 82 printk("\n[ERROR] in %s : user_attr unmapped\n", __FUNCTION__ ); 83 #endif 77 84 parent->errno = EINVAL; 78 85 return -1; … … 87 94 if( error ) 88 95 { 89 printk("\n[ERROR] in %s : start_func unmapped\n", __FUNCTION__ ); 96 97 #if CONFIG_DEBUG_SYSCALLS_ERROR 98 printk("\n[ERROR] in %s : start_func unmapped\n", __FUNCTION__ ); 99 #endif 90 100 parent->errno = EINVAL; 91 101 return -1; … … 97 107 if( error ) 98 108 { 99 printk("\n[ERROR] in %s : start_arg unmapped\n", __FUNCTION__ ); 109 110 #if CONFIG_DEBUG_SYSCALLS_ERROR 111 printk("\n[ERROR] in %s : start_arg unmapped\n", __FUNCTION__ ); 112 #endif 100 113 parent->errno = EINVAL; 101 114 return -1; … … 110 123 if( cluster_is_undefined( kern_attr.cxy ) ) 111 124 { 112 printk("\n[ERROR] in %s : illegal target cluster = %x\n", 113 __FUNCTION__ , kern_attr.cxy ); 125 126 #if CONFIG_DEBUG_SYSCALLS_ERROR 127 printk("\n[ERROR] in %s : illegal target cluster = %x\n", __FUNCTION__ , kern_attr.cxy ); 128 #endif 114 129 parent->errno = EINVAL; 115 130 return -1; … … 158 173 if( error ) 159 174 { 160 printk("\n[ERROR] in %s : cannot create thread\n", __FUNCTION__ ); 175 176 #if CONFIG_DEBUG_SYSCALLS_ERROR 177 printk("\n[ERROR] in %s : cannot create thread\n", __FUNCTION__ ); 178 #endif 161 179 return ENOMEM; 162 180 } … … 178 196 hal_fence(); 179 197 180 tm_end = hal_get_cycles(); 181 182 syscall_dmsg("\n[DBG] %s : core[%x,%d] created thread %x for process %x / cycle %d\n" 183 " cluster %x / cost = %d cycles\n", 184 __FUNCTION__ , local_cxy , parent->core->lid , trdid , process->pid , tm_end ,185 target_cxy , tm_end - tm_start ); 198 #if CONFIG_DEBUG_SYS_THREAD_CREATE 199 tm_end = hal_get_cycles(); 200 if( CONFIG_DEBUG_SYS_THREAD_CREATE < tm_end ) 201 printk("\n[DBG] %s : thread %x created thread %x for process %x in cluster %x / cycle %d\n" 202 __FUNCTION__, parent, child_ptr, process->pid, target_cxy, (uint32_t)tm_end ); 203 #endif 186 204 187 205 return 0; -
trunk/kernel/syscalls/syscalls.h
r436 r437 356 356 357 357 /****************************************************************************************** 358 * [27] This slot is not used. 359 *****************************************************************************************/ 358 * [27] This function tests whether a given file descriptor dentified by the <file_id> 359 * argument is an open file descriptor referring to a terminal. 360 ****************************************************************************************** 361 * @ file_id : file descriptor index 362 * @ return 1 if it is a TXT device / return 0 if it is not a TXT device. 363 *****************************************************************************************/ 364 int sys_isatty( uint32_t file_id ); 360 365 361 366 /****************************************************************************************** -
trunk/kernel_config.h
r436 r437 38 38 39 39 40 #define CONFIG_DEBUG_CHDEV_REGISTER_COMMAND 0 41 #define CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER 0 40 #define CONFIG_DEBUG_CHDEV_CMD_RX 0 41 #define CONFIG_DEBUG_CHDEV_CMD_TX 0 42 #define CONFIG_DEBUG_CHDEV_SERVER_RX 0 43 #define CONFIG_DEBUG_CHDEV_SERVER_TX 0 42 44 43 45 #define CONFIG_DEBUG_CLUSTER_INIT 0 … … 46 48 #define CONFIG_DEBUG_DEV_TXT_RX 0 47 49 #define CONFIG_DEBUG_DEV_TXT_TX 0 48 #define CONFIG_DEBUG_DEV_IOC 0 50 #define CONFIG_DEBUG_DEV_IOC_RX 0 51 #define CONFIG_DEBUG_DEV_IOC_TX 0 49 52 #define CONFIG_DEBUG_DEV_NIC_RX 0 50 53 #define CONFIG_DEBUG_DEV_NIC_RX 0 51 #define CONFIG_DEBUG_DEV_FBF 0 54 #define CONFIG_DEBUG_DEV_FBF_RX 0 55 #define CONFIG_DEBUG_DEV_FBF_TX 0 56 #define CONFIG_DEBUG_DEV_DMA 0 52 57 #define CONFIG_DEBUG_DEV_MMC 0 58 #define CONFIG_DEBUG_DEV_PIC 0 53 59 54 60 #define CONFIG_DEBUG_DEVFS_INIT 0 … … 62 68 63 69 #define CONFIG_DEBUG_HAL_KENTRY 0 70 #define CONFIG_DEBUG_HAL_EXCEPTIONS 0 71 #define CONFIG_DEBUG_HAL_IRQS 0 64 72 #define CONFIG_DEBUG_HAL_TXT_RX 0 65 73 #define CONFIG_DEBUG_HAL_TXT_TX 0 66 #define CONFIG_DEBUG_HAL_ EXCEPTIONS067 #define CONFIG_DEBUG_HAL_I RQS 074 #define CONFIG_DEBUG_HAL_IOC_RX 0 75 #define CONFIG_DEBUG_HAL_IOC_TX 0 68 76 69 77 #define CONFIG_DEBUG_KCM 0 … … 83 91 #define CONFIG_DEBUG_PROCESS_DESTROY 0 84 92 #define CONFIG_DEBUG_PROCESS_INIT_CREATE 0 85 #define CONFIG_DEBUG_PROCESS_MAKE_EXEC 086 #define CONFIG_DEBUG_PROCESS_MAKE_FORK 093 #define CONFIG_DEBUG_PROCESS_MAKE_EXEC 1 94 #define CONFIG_DEBUG_PROCESS_MAKE_FORK 1 87 95 #define CONFIG_DEBUG_PROCESS_REFERENCE_INIT 0 88 96 #define CONFIG_DEBUG_PROCESS_SIGACTION 0 … … 100 108 101 109 #define CONFIG_DEBUG_SYS_DISPLAY 0 102 #define CONFIG_DEBUG_SYS_EXEC 0110 #define CONFIG_DEBUG_SYS_EXEC 1 103 111 #define CONFIG_DEBUG_SYS_EXIT 0 104 112 #define CONFIG_DEBUG_SYS_FG 0 105 #define CONFIG_DEBUG_SYS_FORK 0113 #define CONFIG_DEBUG_SYS_FORK 1 106 114 #define CONFIG_DEBUG_SYS_GET_CONFIG 0 107 #define CONFIG_DEBUG_SYS_KILL 0 115 #define CONFIG_DEBUG_SYS_ISATTY 0 116 #define CONFIG_DEBUG_SYS_KILL 1 108 117 #define CONFIG_DEBUG_SYS_MMAP 0 109 118 #define CONFIG_DEBUG_SYS_READ 0 -
trunk/libs/malloc.c
r426 r437 76 76 // This static function display the current state of the allocator in cluster <cxy>. 77 77 //////////////////////////////////////////////////////////////////////////////////////////// 78 79 #if 0 78 80 static void display_free_array( unsigned int cxy ) 79 81 { … … 98 100 } 99 101 } // end display_free_array() 102 #endif 100 103 101 104 -
trunk/params.mk
r435 r437 3 3 ARCH = /users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob 4 4 X_SIZE = 1 5 Y_SIZE = 15 Y_SIZE = 2 6 6 NB_PROCS = 1 7 7 NB_TTYS = 3 -
trunk/user/init/init.c
r436 r437 41 41 if( ret_fork < 0 ) // error in fork 42 42 { 43 // INIT display error message on TXT0 terminal44 snprintf( string , 64 , "INIT cannot fork child[%d] " , i );43 // INIT display error message 44 snprintf( string , 64 , "INIT cannot fork child[%d] => suicide" , i ); 45 45 display_string( string ); 46 46 … … 55 55 if ( ret_exec ) // error in exec 56 56 { 57 // CHILD[i] display error message on TXT0 terminal57 // CHILD[i] display error message 58 58 snprintf( string , 64 , 59 59 "CHILD[%d] cannot exec KSH[%d] / ret_exec = %d" , i , i , ret_exec ); … … 68 68 } 69 69 } 70 71 // display processes and threads in clusters 0 & 1 72 display_cluster_processes( 0 ); 73 display_sched( 0 , 0 ); 74 display_cluster_processes( 1 ); 75 display_sched( 1 , 0 ); 70 76 71 77 // This loop detects the termination of the KSH[i] processes, … … 88 94 if( WIFSIGNALED( status ) || WIFEXITED( status ) ) // killed => recreate it 89 95 { 90 // display string to report unexpectedKSH process termination96 // display string to report KSH process termination 91 97 snprintf( string , 64 , "KSH process %x terminated => recreate KSH", rcv_pid ); 92 98 display_string( string ); … … 97 103 if( ret_fork < 0 ) // error in fork 98 104 { 99 // INIT display error message on TXT0 terminal100 snprintf( string , 64 , "INIT cannot fork child ");105 // INIT display error message 106 snprintf( string , 64 , "INIT cannot fork child => suicide"); 101 107 display_string( string ); 102 108 -
trunk/user/ksh/ksh.c
r436 r437 715 715 // @@@ 716 716 717 char string[64];718 719 717 while (1) 720 718 {
Note: See TracChangeset
for help on using the changeset viewer.