Changeset 433 for trunk/kernel
- Timestamp:
- Feb 14, 2018, 3:40:19 PM (7 years ago)
- Location:
- trunk/kernel
- Files:
-
- 38 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_txt.c
r422 r433 117 117 thread_t * this = CURRENT_THREAD; 118 118 119 txt_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) enters / cycle %d\n", 120 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type), hal_time_stamp() ); 119 #if CONFIG_DEBUG_DEV_TXT 120 uint32_t cycle = (uint32_t)hal_get_cycles(); 121 if( CONFIG_DEBUG_DEV_TXT < cycle ) 122 printk("\n[DBG] %s : thread %x enters / cycle %d\n", 123 __FUNCTION__, CURRENT_THREAD , cycle ); 124 #endif 121 125 122 126 // check channel argument … … 140 144 chdev_register_command( dev_xp ); 141 145 142 txt_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) exit / cycle %d\n", 143 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type), hal_time_stamp() ); 146 #if CONFIG_DEBUG_DEV_TXT 147 cycle = (uint32_t)hal_get_cycles(); 148 if( CONFIG_DEBUG_DEV_TXT < cycle ) 149 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 150 __FUNCTION__, CURRENT_THREAD , cycle ); 151 #endif 144 152 145 153 // return I/O operation status from calling thread descriptor -
trunk/kernel/fs/devfs.c
r430 r433 28 28 #include <printk.h> 29 29 #include <chdev.h> 30 #include <thread.h> 30 31 #include <dev_txt.h> 31 32 #include <cluster.h> … … 86 87 error_t error; 87 88 88 devfs_dmsg("\n[DBG] %s : enter in cluster %x\n", 89 __FUNCTION__ , local_cxy ); 89 #if CONFIG_DEBUG_DEVFS_INIT 90 uint32_t cycle = (uint32_t)hal_get_cycles(); 91 if( CONFIG_DEBUG_DEVFS_INIT < cycle ) 92 printk("\n[DBG] %s : thread %x enter at cycle %d\n", 93 __FUNCTION__ , CURRENT_THREAD , cycle ); 94 #endif 90 95 91 96 // creates DEVFS "dev" inode in cluster IO … … 100 105 assert( (error == 0) , __FUNCTION__ , "cannot create <dev>\n" ); 101 106 102 devfs_dmsg("\n[DBG] %s : <dev> created in cluster %x\n",103 __FUNCTION__ , local_cxy );104 105 107 // create DEVFS "external" inode in cluster IO 106 108 error = vfs_add_child_in_parent( LOCAL_CLUSTER->io_cxy, … … 114 116 assert( (error == 0) , __FUNCTION__ , "cannot create <external>\n" ); 115 117 116 devfs_dmsg("\n[DBG] %s : <external> created in cluster %x\n", 117 __FUNCTION__ , local_cxy ); 118 #if CONFIG_DEBUG_DEVFS_INIT 119 cycle = (uint32_t)hal_get_cycles(); 120 if( CONFIG_DEBUG_DEVFS_INIT < cycle ) 121 printk("\n[DBG] %s : thread %x exit at cycle %d\n", 122 __FUNCTION__ , CURRENT_THREAD , cycle ); 123 #endif 124 118 125 } 119 126 … … 129 136 xptr_t inode_xp; 130 137 uint32_t channel; 138 139 #if CONFIG_DEBUG_DEVFS_INIT 140 uint32_t cycle = (uint32_t)hal_get_cycles(); 141 if( CONFIG_DEBUG_DEVFS_INIT < cycle ) 142 printk("\n[DBG] %s : thread %x enter at cycle %d\n", 143 __FUNCTION__ , CURRENT_THREAD , cycle ); 144 #endif 131 145 132 146 // create "internal" directory linked to "dev" … … 140 154 devfs_internal_inode_xp ); 141 155 142 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",143 __FUNCTION__ , node_name , local_cxy );144 145 156 // create MMC chdev inode 146 157 chdev_xp = chdev_dir.mmc[local_cxy]; … … 155 166 GET_PTR( chdev_xp ), 156 167 &inode_xp ); 157 158 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",159 __FUNCTION__ , chdev_ptr->name , local_cxy );160 161 168 } 162 169 … … 175 182 GET_PTR( chdev_xp ), 176 183 &inode_xp ); 177 178 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",179 __FUNCTION__ , chdev_ptr->name , local_cxy );180 181 184 } 182 185 } … … 197 200 GET_PTR( chdev_xp ), 198 201 &inode_xp ); 199 200 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",201 __FUNCTION__ , chdev_ptr->name , local_cxy );202 203 202 } 204 203 } … … 219 218 GET_PTR( chdev_xp ), 220 219 &inode_xp ); 221 222 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",223 __FUNCTION__ , chdev_ptr->name , local_cxy );224 225 220 } 226 221 } … … 243 238 GET_PTR( chdev_xp ), 244 239 &inode_xp ); 245 246 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",247 __FUNCTION__ , chdev_ptr->name , local_cxy );248 249 240 } 250 241 } … … 268 259 GET_PTR( chdev_xp ), 269 260 &inode_xp ); 270 271 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",272 __FUNCTION__ , chdev_ptr->name , local_cxy );273 274 261 } 275 262 } … … 293 280 GET_PTR( chdev_xp ), 294 281 &inode_xp ); 295 296 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",297 __FUNCTION__ , chdev_ptr->name , local_cxy );298 299 282 } 300 283 } … … 318 301 GET_PTR( chdev_xp ), 319 302 &inode_xp ); 320 321 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",322 __FUNCTION__ , chdev_ptr->name , local_cxy );323 324 303 } 325 304 } … … 343 322 GET_PTR( chdev_xp ), 344 323 &inode_xp ); 345 346 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n",347 __FUNCTION__ , chdev_ptr->name , local_cxy );348 349 324 } 350 325 } … … 368 343 GET_PTR( chdev_xp ), 369 344 &inode_xp ); 370 371 devfs_dmsg("\n[DBG] %s : created <%s> inode in cluster %x\n", 372 __FUNCTION__ , chdev_ptr->name , local_cxy ); 373 374 } 375 } 376 } 345 } 346 } 347 } 348 349 #if CONFIG_DEBUG_DEVFS_INIT 350 cycle = (uint32_t)hal_get_cycles(); 351 if( CONFIG_DEBUG_DEVFS_INIT < cycle ) 352 printk("\n[DBG] %s : thread %x exit at cycle %d\n", 353 __FUNCTION__ , CURRENT_THREAD , cycle ); 354 #endif 355 377 356 } // end devfs_local_init() 378 357 … … 396 375 char k_buf[CONFIG_TXT_KBUF_SIZE]; // local kernel buffer 397 376 398 devfs_dmsg("\n[DBG] %s enter / cycle %d\n", 399 __FUNCTION__ , hal_time_stamp() ); 377 #if CONFIG_DEBUG_DEVFS_MOVE 378 uint32_t cycle = (uint32_t)hal_get_cycles(); 379 if( CONFIG_DEBUG_DEVFS_MOVE < cycle ) 380 printk("\n[DBG] %s : thread %x enter / to_mem %d / cycle %d\n", 381 __FUNCTION__ , CURRENT_THREAD , to_buffer , cycle ); 382 #endif 400 383 401 384 #if CONFIG_READ_DEBUG … … 426 409 if( error ) 427 410 { 428 429 devfs_dmsg("\n[DBG] %s exit error / cycle %d\n",430 __FUNCTION__ , hal_time_stamp() );431 432 411 return -1; 433 412 } … … 438 417 } 439 418 419 #if CONFIG_DEBUG_DEVFS_MOVE 420 cycle = (uint32_t)hal_get_cycles(); 421 if( CONFIG_DEBUG_DEVFS_MOVE < cycle ) 422 printk("\n[DBG] %s : thread %x exit / to_mem %d / cycle %d\n", 423 __FUNCTION__ , CURRENT_THREAD , to_buffer / cycle ); 424 #endif 425 440 426 #if CONFIG_READ_DEBUG 441 427 exit_devfs_move = hal_time_stamp(); 442 428 #endif 443 444 devfs_dmsg("\n[DBG] %s exit success / size = %d / cycle %d\n",445 __FUNCTION__ , size , hal_time_stamp() );446 447 429 return size; 448 430 } … … 454 436 if( error ) 455 437 { 456 457 devfs_dmsg("\n[DBG] %s exit error / cycle %d\n",458 __FUNCTION__ , hal_time_stamp() );459 460 438 return -1; 461 439 } … … 463 441 { 464 442 465 devfs_dmsg("\n[DBG] %s exit success / size = %d / cycle %d\n", 466 __FUNCTION__ , size , hal_time_stamp() ); 443 #if CONFIG_DEBUG_DEVFS_MOVE 444 cycle = (uint32_t)hal_get_cycles(); 445 if( CONFIG_DEBUG_DEVFS_MOVE < cycle ) 446 printk("\n[DBG] %s : thread %x exit / to_mem %d / cycle %d\n", 447 __FUNCTION__ , CURRENT_THREAD , to_buffer / cycle ); 448 #endif 467 449 468 450 return size; … … 477 459 return -1; 478 460 } 461 479 462 } // end devfs_user_move() 480 463 -
trunk/kernel/fs/vfs.c
r430 r433 157 157 error_t error; 158 158 159 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter / dentry = %x in cluster %x\n", 160 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, GET_PTR(dentry_xp), GET_CXY(dentry_xp) ); 159 #if CONFIG_DEBUG_VFS_INODE_CREATE 160 uint32_t cycle = (uint32_t)hal_get_cycles(); 161 if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle ) 162 printk("\n[DBG] %s : thread %x enter / dentry = %x in cluster %x / cycle %d\n", 163 __FUNCTION__, CURRENT_THREAD, GET_PTR(dentry_xp), GET_CXY(dentry_xp), cycle ); 164 #endif 161 165 162 166 // check fs type and get pointer on context … … 230 234 remote_spinlock_init( XPTR( local_cxy , &inode->main_lock ) ); 231 235 232 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit / inode = %x in cluster %x\n", 233 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, inode , local_cxy ); 236 #if CONFIG_DEBUG_VFS_INODE_CREATE 237 uint32_t cycle = (uint32_t)hal_get_cycles(); 238 if( CONFIG_DEBUG_VFS_INODE_CREATE < cycle ) 239 printk("\n[DBG] %s : thread %x exit / inode = %x in cluster %x / cycle %d\n", 240 __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); 241 #endif 234 242 235 243 // return extended pointer on inode … … 263 271 xptr_t child_xp ) 264 272 { 265 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / cycle %d\n", 266 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , name , hal_time_stamp() ); 273 274 #if CONFIG_DEBUG_VFS_INODE_LOAD 275 uint32_t cycle = (uint32_t)hal_get_cycles(); 276 if( CONFIG_DEBUG_VFS_INODE_LOAD < cycle ) 277 printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n", 278 __FUNCTION__, CURRENT_THREAD , name , cycle ); 279 #endif 267 280 268 281 error_t error = 0; … … 293 306 } 294 307 295 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / cycle %d\n", 296 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , name , hal_time_stamp() ); 308 #if CONFIG_DEBUG_VFS_INODE_LOAD 309 cycle = (uint32_t)hal_get_cycles(); 310 if( CONFIG_DEBUG_VFS_INODE_LOAD < cycle ) 311 printk("\n[DBG] %s : thread %x exit for <%s> / cycle %d\n", 312 __FUNCTION__, CURRENT_THREAD , name , cycle ); 313 #endif 297 314 298 315 return error; … … 416 433 kmem_req_t req; // request to kernel memory allocator 417 434 418 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / parent inode = %x / cycle %d\n", 419 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, name, parent, hal_time_stamp() ); 435 #if CONFIG_DEBUG_VFS_DENTRY_CREATE 436 uint32_t cycle = (uint32_t)hal_get_cycles(); 437 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle ) 438 printk("\n[DBG] %s : thread %x enter for <%s> / parent_inode %x / cycle %d\n", 439 __FUNCTION__, CURRENT_THREAD , name , parent , cycle ); 440 #endif 420 441 421 442 // get pointer on context … … 465 486 *dentry_xp = XPTR( local_cxy , dentry ); 466 487 467 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / dentry = %x in cluster %x / cycle %d\n", 468 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, name, dentry, local_cxy , hal_time_stamp() ); 488 #if CONFIG_DEBUG_VFS_DENTRY_CREATE 489 cycle = (uint32_t)hal_get_cycles(); 490 if( CONFIG_DEBUG_VFS_DENTRY_CREATE < cycle ) 491 printk("\n[DBG] %s : thread %x exit for <%s> / dentry %x / cycle %d\n", 492 __FUNCTION__, CURRENT_THREAD , name , dentry , cycle ); 493 #endif 469 494 470 495 return 0; … … 584 609 uint32_t file_id; // created file descriptor index in reference fd_array 585 610 586 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / cycle %d\n", 587 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path, (uint32_t)hal_time_stamp() ); 611 #if CONFIG_DEBUG_VFS_OPEN 612 uint32_t cycle = (uint32_t)hal_get_cycles(); 613 if( CONFIG_DEBUG_VFS_OPEN < cycle ) 614 printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n", 615 __FUNCTION__, CURRENT_THREAD, path, cycle ); 616 #endif 588 617 589 618 // compute lookup working mode … … 610 639 inode_ptr = (vfs_inode_t *)GET_PTR( inode_xp ); 611 640 612 vfs_dmsg("\n[DBG] %s : core[%x,%d] found inode for <%s> in cluster %x / cycle %d\n",613 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path, inode_cxy , (uint32_t)hal_time_stamp() );614 615 641 // create a new file descriptor in cluster containing inode 616 642 if( inode_cxy == local_cxy ) // target cluster is local … … 630 656 if( error ) return error; 631 657 632 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / file = %x in cluster %x / cycle %d\n", 633 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path, 634 GET_PTR(file_xp), GET_CXY(file_xp), hal_time_stamp() ); 658 #if CONFIG_DEBUG_VFS_OPEN 659 cycle = (uint32_t)hal_get_cycles(); 660 if( CONFIG_DEBUG_VFS_OPEN < cycle ) 661 printk("\n[DBG] %s : thread %x exit for <%s> / file %x in cluster %x / cycle %d\n", 662 __FUNCTION__, CURRENT_THREAD, path, GET_PTR(file_xp), GET_CXY(file_xp), cycle ); 663 #endif 635 664 636 665 // success … … 647 676 uint32_t size ) 648 677 { 649 assert( ( file_xp != XPTR_NULL ) , __FUNCTION__ , 650 "file_xp == XPTR_NULL" ); 678 assert( ( file_xp != XPTR_NULL ) , __FUNCTION__ , "file_xp == XPTR_NULL" ); 651 679 652 680 cxy_t file_cxy; // remote file descriptor cluster … … 1319 1347 process = this->process; 1320 1348 1321 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / cycle %d\n", 1322 __FUNCTION__ , local_cxy , this->core->lid , pathname , hal_time_stamp() ); 1349 #if CONFIG_DEBUG_VFS_LOOKUP 1350 uint32_t cycle = (uint32_t)hal_get_cycles(); 1351 if( CONFIG_DEBUG_VFS_LOOKUP < cycle ) 1352 printk("\n[DBG] %s : thread %x enter for <%s> / cycle %d\n", 1353 __FUNCTION__, CURRENT_THREAD, path, cycle ); 1354 #endif 1323 1355 1324 1356 // get extended pointer on first inode to search … … 1343 1375 vfs_get_name_from_path( current , name , &next , &last ); 1344 1376 1345 vfs_dmsg("\n[DBG] %s : core[%x,%d] look for <%s> / last = %d\n", 1346 __FUNCTION__ , local_cxy , this->core->lid , name , last ); 1377 #if (CONFIG_DEBUG_VFS_LOOKUP & 1) 1378 if( CONFIG_DEBUG_VFS_LOOKUP < cycle ) 1379 printk("\n[DBG] %s : look for <%s> / last = %d\n", __FUNCTION__ , name , last ); 1380 #endif 1347 1381 1348 1382 // search a child dentry matching name in parent inode … … 1362 1396 { 1363 1397 1364 vfs_dmsg("\n[DBG] %s : core[%x,%d] miss <%s> => load it\n", 1365 __FUNCTION__ , local_cxy , this->core->lid , name ); 1398 #if (CONFIG_DEBUG_VFS_LOOKUP & 1) 1399 if( CONFIG_DEBUG_VFS_LOOKUP < cycle ) 1400 printk("\n[DBG] %s : miss <%s> => load it\n", __FUNCTION__ , name ); 1401 #endif 1366 1402 1367 1403 // release lock on parent inode … … 1446 1482 vfs_inode_lock( parent_xp ); 1447 1483 1448 vfs_dmsg("\n[DBG] %s : core[%x,%d] created node <%s>\n", 1449 __FUNCTION__ , local_cxy , this->core->lid , name ); 1484 #if (CONFIG_DEBUG_VFS_LOOKUP & 1) 1485 if( CONFIG_DEBUG_VFS_LOOKUP < cycle ) 1486 printk("\n[DBG] %s : created node <%s>\n", __FUNCTION__ , name ); 1487 #endif 1450 1488 1451 1489 } 1452 1490 1453 vfs_dmsg("\n[DBG] %s : core[%x,%d] found <%s> / inode = %x in cluster %x\n", 1454 __FUNCTION__ , local_cxy , this->core->lid , name , GET_PTR(child_xp) , GET_CXY(child_xp) ); 1491 #if (CONFIG_DEBUG_VFS_LOOKUP & 1) 1492 if( CONFIG_DEBUG_VFS_LOOKUP < cycle ) 1493 printk("\n[DBG] %s : found <%s> / inode %x in cluster %x\n", 1494 __FUNCTION__ , name , GET_PTR(child_xp) , GET_CXY(child_xp) ); 1495 #endif 1455 1496 1456 1497 // TODO check access rights here [AG] … … 1477 1518 vfs_inode_unlock( parent_xp ); 1478 1519 1479 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / inode = %x in cluster %x\n", 1480 __FUNCTION__,local_cxy,this->core->lid,pathname,GET_PTR(child_xp),GET_CXY(child_xp) ); 1520 #if CONFIG_DEBUG_VFS_LOOKUP 1521 cycle = (uint32_t)hal_get_cycles(); 1522 if( CONFIG_DEBUG_VFS_LOOKUP < cycle ) 1523 printk("\n[DBG] %s : thread %x exit for <%s> / inode %x in cluster %x / cycle %d\n", 1524 __FUNCTION__, CURRENT_THREAD, path, GET_PTR(child_xp), GET_CXY(child_xp), cycle ); 1525 #endif 1481 1526 1482 1527 // return searched pointer … … 1502 1547 // we use two variables "index" and "count" because the buffer 1503 1548 // is written in decreasing index order (from leaf to root) 1504 // TODO : handle conflict with a concurrent rename [AG]1505 // TODO: handle synchro in the loop [AG]1549 // TODO : handle conflict with a concurrent rename [AG] 1550 // FIXME : handle synchro in the loop [AG] 1506 1551 1507 1552 // set the NUL character in buffer / initialise buffer index and count … … 1576 1621 parent_ptr = (vfs_inode_t *)GET_PTR( parent_xp ); 1577 1622 1578 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s> / child_cxy = %x / parent_cxy = %x\n", 1579 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , name , child_cxy , parent_cxy ); 1623 #if CONFIG_DEBUG_VFS_ADD_CHILD 1624 uint32_t cycle = (uint32_t)hal_get_cycles(); 1625 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle ) 1626 printk("\n[DBG] %s : thread %x enter for <%s> / child_cxy = %x / parent_cxy = %x\n", 1627 __FUNCTION__ , CURRENT_THREAD , name , child_cxy , parent_cxy ); 1628 #endif 1580 1629 1581 1630 // 1. create dentry … … 1587 1636 &dentry_xp ); 1588 1637 1589 vfs_dmsg("\n[DBG] %s : dentry <%s> created in local cluster %x\n", 1590 __FUNCTION__ , name , local_cxy ); 1638 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1) 1639 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle ) 1640 printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, local_cxy ); 1641 #endif 1591 1642 1592 1643 } … … 1600 1651 &error ); 1601 1652 1602 vfs_dmsg("\n[DBG] %s : dentry <%s> created in remote cluster %x\n", 1603 __FUNCTION__ , name , parent_cxy ); 1653 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1) 1654 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle ) 1655 printk("\n[DBG] %s : dentry <%s> created in cluster %x\n", __FUNCTION__, name, parent_cxy ); 1656 #endif 1604 1657 1605 1658 } … … 1629 1682 gid, 1630 1683 &inode_xp ); 1684 1685 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1) 1686 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle ) 1687 printk("\n[DBG] %s : inode <%x> created in cluster %x\n", 1688 __FUNCTION__ , GET_PTR(inode_xp) , local_cxy ); 1689 #endif 1631 1690 1632 1691 vfs_dmsg("\n[DBG] %s : inode %x created in local cluster %x\n", … … 1648 1707 &error ); 1649 1708 1650 vfs_dmsg("\n[DBG] %s : inode %x created in remote cluster %x\n", 1709 #if (CONFIG_DEBUG_VFS_ADD_CHILD & 1) 1710 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle ) 1711 printk("\n[DBG] %s : inode <%s> created in cluster %x\n", 1651 1712 __FUNCTION__ , GET_PTR(inode_xp) , child_cxy ); 1713 #endif 1652 1714 1653 1715 } … … 1669 1731 hal_remote_swd( XPTR( dentry_cxy , &dentry_ptr->child_xp ) , inode_xp ); 1670 1732 1671 vfs_dmsg("\n[DBG] %s : exit in cluster %x for <%s>\n", 1672 __FUNCTION__ , local_cxy , name ); 1733 #if CONFIG_DEBUG_VFS_ADD_CHILD 1734 cycle = (uint32_t)hal_get_cycles(); 1735 if( CONFIG_DEBUG_VFS_ADD_CHILD < cycle ) 1736 printk("\n[DBG] %s : thread %x exit for <%s>\n", 1737 __FUNCTION__ , CURRENT_THREAD , name ); 1738 #endif 1673 1739 1674 1740 // success : return extended pointer on child inode … … 1694 1760 assert( (mapper != NULL) , __FUNCTION__ , "no mapper for page\n" ); 1695 1761 1696 vfs_dmsg("\n[DBG] %s : core[%x,%d] enters for page %d / mapper = %x / inode = %x\n", 1697 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , page->index , mapper, mapper->inode ); 1762 #if CONFIG_DEBUG_VFS_MAPPER_MOVE 1763 uint32_t cycle = (uint32_t)hal_get_cycles(); 1764 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle ) 1765 printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / inode %x / cycle %d\n", 1766 __FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle ); 1767 #endif 1698 1768 1699 1769 // get FS type … … 1720 1790 } 1721 1791 1722 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for page %d / mapper = %x / inode = %x\n", 1723 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, page->index, mapper, mapper->inode ); 1792 #if CONFIG_DEBUG_VFS_MAPPER_MOVE 1793 cycle = (uint32_t)hal_get_cycles(); 1794 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle ) 1795 printk("\n[DBG] %s : thread %x exit for page %d / mapper %x / inode %x / cycle %d\n", 1796 __FUNCTION__, CURRENT_THREAD, page->index, mapper, mapper->inode, cycle ); 1797 #endif 1724 1798 1725 1799 return error; … … 1740 1814 assert( (mapper != NULL) , __FUNCTION__ , "mapper pointer is NULL\n" ); 1741 1815 1742 vfs_dmsg("\n[DBG] %s : core[%x,%d] enter for inode %x in cluster %x/ cycle %d\n", 1743 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, inode , local_cxy , hal_time_stamp() ); 1816 #if CONFIG_DEBUG_VFS_MAPPER_LOAD 1817 uint32_t cycle = (uint32_t)hal_get_cycles(); 1818 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle ) 1819 printk("\n[DBG] %s : thread %x enter for inode %x in cluster %x / cycle %d\n", 1820 __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); 1821 #endif 1744 1822 1745 1823 // compute number of pages … … 1757 1835 } 1758 1836 1759 vfs_dmsg("\n[DBG] %s : core[%x,%d] exit for inode %x in cluster %x / cycle %d\n", 1760 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, inode , local_cxy , hal_time_stamp() ); 1837 #if CONFIG_DEBUG_VFS_MAPPER_LOAD 1838 cycle = (uint32_t)hal_get_cycles(); 1839 if( CONFIG_DEBUG_VFS_MAPPER_MOVE < cycle ) 1840 printk("\n[DBG] %s : thread %x exit for inode %x in cluster %x / cycle %d\n", 1841 __FUNCTION__, CURRENT_THREAD, inode, local_cxy, cycle ); 1842 #endif 1761 1843 1762 1844 return 0; -
trunk/kernel/kern/chdev.c
r428 r433 129 129 thread_t * this = CURRENT_THREAD; 130 130 131 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) enter / cycle %d\n", 132 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() ); 131 #if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND 132 uint32_t cycle = (uint32_t)hal_get_cycles(); 133 if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle ) 134 printk("\n[DBG] %s : client_thread %x (%s) enter / cycle %d\n", 135 __FUNCTION__, this, thread_type_str(this->type) , cycle ); 136 #endif 133 137 134 138 // get device descriptor cluster and local pointer … … 142 146 // get local pointer on server thread 143 147 server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) ); 144 145 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) / server_cxy %x / server_ptr %x / server_type %\n",146 __FUNCTION__, local_cxy, this->core->lid, server_cxy, server_ptr,147 thread_type_str( hal_remote_lw( XPTR( server_cxy , &server_ptr->type) ) ) );148 148 149 149 // build extended pointer on chdev lock protecting queue … … 178 178 if( different ) dev_pic_send_ipi( chdev_cxy , lid ); 179 179 180 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) deschedules / cycle %d\n", 181 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() ); 180 #if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND 181 cycle = (uint32_t)hal_get_cycles(); 182 if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle ) 183 printk("\n[DBG] %s : client_thread %x (%s) exit / cycle %d\n", 184 __FUNCTION__, this, thread_type_str(this->type) , cycle ); 185 #endif 182 186 183 187 // deschedule … … 185 189 sched_yield("blocked on I/O"); 186 190 187 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) resumes / cycle %d\n",188 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() );189 190 191 // exit critical section 191 192 hal_restore_irq( save_sr ); 193 194 #if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND 195 cycle = (uint32_t)hal_get_cycles(); 196 if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle ) 197 printk("\n[DBG] %s : client_thread %x (%s) resumes / cycle %d\n", 198 __FUNCTION__, this, thread_type_str(this->type) , cycle ); 199 #endif 192 200 193 201 #if CONFIG_READ_DEBUG … … 209 217 server = CURRENT_THREAD; 210 218 211 chdev_dmsg("\n[DBG] %s : enter / server = %x / chdev = %x / cycle %d\n", 212 __FUNCTION__ , server , chdev , hal_time_stamp() ); 219 #if CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER 220 uint32_t cycle = (uint32_t)hal_get_cycles(); 221 if( CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER < cycle ) 222 printk("\n[DBG] %s : server_thread %x enter / chdev = %x / cycle %d\n", 223 __FUNCTION__ , server , chdev , cycle ); 224 #endif 213 225 214 226 root_xp = XPTR( local_cxy , &chdev->wait_root ); … … 265 277 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 266 278 267 chdev_dmsg("\n[DBG] %s : thread %x complete operation for client %x / cycle %d\n", 268 __FUNCTION__ , server , client_ptr , hal_time_stamp() ); 279 #if CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER 280 cycle = (uint32_t)hal_get_cycles(); 281 if( CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER < cycle ) 282 printk("\n[DBG] %s : server_thread %x complete operation for client %x / cycle %d\n", 283 __FUNCTION__ , server , client_ptr , cycle ); 284 #endif 269 285 270 286 #if CONFIG_READ_DEBUG -
trunk/kernel/kern/cluster.c
r428 r433 89 89 spinlock_init( &cluster->kcm_lock ); 90 90 91 cluster_dmsg("\n[DBG] %s for cluster %x enters\n", 92 __FUNCTION__ , local_cxy ); 91 #if CONFIG_DEBUG_CLUSTER_INIT 92 uint32_t cycle = (uint32_t)hal_get_cycles(); 93 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 94 printk("\n[DBG] %s enters for cluster %x / cycle %d\n", 95 __FUNCTION__ , local_cxy , cycle ); 96 #endif 93 97 94 98 // initialises DQDT … … 109 113 } 110 114 111 cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x at cycle %d\n", 112 __FUNCTION__ , local_cxy , hal_get_cycles() ); 115 #if CONFIG_DEBUG_CLUSTER_INIT 116 cycle = (uint32_t)hal_get_cycles(); 117 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 118 cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n", 119 __FUNCTION__ , local_cxy , cycle ); 120 #endif 113 121 114 122 // initialises embedded KHM … … 132 140 } 133 141 134 cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x at cycle %d\n", 135 __FUNCTION__ , local_cxy , hal_get_cycles() ); 142 #if CONFIG_DEBUG_CLUSTER_INIT 143 cycle = (uint32_t)hal_get_cycles(); 144 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 145 cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n", 146 __FUNCTION__ , local_cxy , cycle ); 147 #endif 136 148 137 149 // initialises RPC fifo … … 164 176 } 165 177 166 cluster_dmsg("\n[DBG] %s Process Manager initialized in cluster %x at cycle %d\n", 167 __FUNCTION__ , local_cxy , hal_get_cycles() ); 178 #if CONFIG_DEBUG_CLUSTER_INIT 179 cycle = (uint32_t)hal_get_cycles(); 180 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 181 cluster_dmsg("\n[DBG] %s Process Manager initialized in cluster %x / cycle %d\n", 182 __FUNCTION__ , local_cxy , cycle ); 183 #endif 168 184 169 185 hal_fence(); … … 215 231 // Process related functions 216 232 //////////////////////////////////////////////////////////////////////////////////// 233 234 235 ////////////////////////////////////////////////////// 236 xptr_t cluster_get_owner_process_from_pid( pid_t pid ) 237 { 238 xptr_t root_xp; // xptr on root of list of processes in owner cluster 239 xptr_t lock_xp; // xptrr on lock protecting this list 240 xptr_t iter_xp; // iterator 241 xptr_t current_xp; // xptr on current process descriptor 242 process_t * current_ptr; // local pointer on current process 243 pid_t current_pid; // current process identifier 244 bool_t found; 245 246 cluster_t * cluster = LOCAL_CLUSTER; 247 248 // get owner cluster and lpid 249 cxy_t owner_cxy = CXY_FROM_PID( pid ); 250 251 // get lock & root of list of process in owner cluster 252 root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root ); 253 lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock ); 254 255 // take the lock protecting the list of processes 256 remote_spinlock_lock( lock_xp ); 257 258 // scan list of processes in owner cluster 259 found = false; 260 XLIST_FOREACH( root_xp , iter_xp ) 261 { 262 current_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); 263 current_ptr = GET_PTR( current_xp ); 264 current_pid = hal_remote_lw( XPTR( owner_cxy , ¤t_ptr->pid ) ); 265 266 if( current_pid == pid ) 267 { 268 found = true; 269 break; 270 } 271 } 272 273 // release the lock protecting the list of processes 274 remote_spinlock_unlock( lock_xp ); 275 276 // return extended pointer on process descriptor in owner cluster 277 if( found ) return current_xp; 278 else return XPTR_NULL; 279 } 217 280 218 281 ////////////////////////////////////////////////////////// … … 442 505 443 506 // skip one line 444 printk("\n ");507 printk("\n***** processes in cluster %x / cycle %d\n", cxy , (uint32_t)hal_get_cycles() ); 445 508 446 509 // loop on all reference processes in cluster cxy -
trunk/kernel/kern/cluster.h
r428 r433 189 189 190 190 /****************************************************************************************** 191 * This function returns an extended pointer on the process descriptor in owner cluster 192 * from the process PID. This PID can be be different from the calling process PID. 193 * It can be called by any thread running in any cluster, 194 ****************************************************************************************** 195 * @ pid : process identifier. 196 * @ return extended pointer on owner process if found / XPTR_NULL if not found. 197 *****************************************************************************************/ 198 xptr_t cluster_get_owner_process_from_pid( pid_t pid ); 199 200 /****************************************************************************************** 191 201 * This function returns an extended pointer on the reference process descriptor 192 202 * from the process PID. This PID can be be different from the calling process PID. … … 194 204 ****************************************************************************************** 195 205 * @ pid : process identifier. 196 * @ return extended pointer on reference process if success / return XPTR_NULL if error.206 * @ return extended pointer on reference process if found / XPTR_NULL if not found. 197 207 *****************************************************************************************/ 198 208 xptr_t cluster_get_reference_process_from_pid( pid_t pid ); -
trunk/kernel/kern/core.c
r409 r433 75 75 } 76 76 77 /* deprecated 14/08/2017 [AG]78 //////////////////////////////////////79 void core_time_update( core_t * core )80 {81 uint32_t elapsed;82 uint32_t ticks_nr = core->ticks_nr;83 uint64_t cycles = core->cycles;84 uint32_t time_stamp = core->time_stamp;85 uint32_t time_now = hal_get_cycles();86 87 // compute number of elapsed cycles taking into account 32 bits register wrap88 if( time_now < time_stamp ) elapsed = (0xFFFFFFFF - time_stamp) + time_now;89 else elapsed = time_now - time_stamp;90 91 cycles += elapsed;92 ticks_nr = elapsed / core->ticks_period;93 94 core->time_stamp = time_now;95 core->cycles = cycles + elapsed;96 core->ticks_nr = ticks_nr + (elapsed / core->ticks_period);97 hal_fence();98 }99 */100 101 77 //////////////////////////////// 102 78 void core_clock( core_t * core ) … … 136 112 hal_fence(); 137 113 138 #if CONFIG_SHOW_CPU_USAGE139 printk(INFO, "INFO: core %d in cluster %x : busy_percent = %d / cumulated_usage = %d\n",140 core->lid, local_cxy , busy_percent , usage );141 #endif142 143 114 core->ticks_nr = 0; 144 115 idle->ticks_nr = 0; -
trunk/kernel/kern/process.c
r428 r433 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017 )6 * Alain Greiner (2016,2017,2018) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 124 124 model_pid = hal_remote_lw( XPTR( model_cxy , &model_ptr->pid ) ); 125 125 126 process_dmsg("\n[DBG] %s : core[%x,%d] enters / pid = %x / ppid = %x / model_pid = %x\n", 127 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid , parent_pid , model_pid ); 126 #if CONFIG_DEBUG_PROCESS_REFERENCE_INIT 127 uint32_t cycle = (uint32_t)hal_get_cycles(); 128 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT ) 129 printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n", 130 __FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle ); 131 #endif 128 132 129 133 // initialize PID, REF_XP, PARENT_XP, and STATE 130 process->pid = pid;131 process->ref_xp = XPTR( local_cxy , process );132 process->parent_xp = parent_xp;133 process-> state = PROCESS_STATE_RUNNING;134 process->pid = pid; 135 process->ref_xp = XPTR( local_cxy , process ); 136 process->parent_xp = parent_xp; 137 process->term_state = 0; 134 138 135 139 // initialize vmm as empty … … 137 141 assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" ); 138 142 139 process_dmsg("\n[DBG] %s : core[%x,%d] / vmm inialised as empty for process %x\n", 140 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 143 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1) 144 cycle = (uint32_t)hal_get_cycles(); 145 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT ) 146 printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n", 147 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 148 #endif 141 149 142 150 // initialize fd_array as empty … … 224 232 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 225 233 226 process_dmsg("\n[DBG] %s : core[%x,%d] / fd array initialised for process %x\n", 227 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 234 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1) 235 cycle = (uint32_t)hal_get_cycles(); 236 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT ) 237 printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n", 238 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 239 #endif 228 240 229 241 // reset children list root … … 260 272 hal_fence(); 261 273 262 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 263 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 274 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1) 275 cycle = (uint32_t)hal_get_cycles(); 276 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT ) 277 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n", 278 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 279 #endif 264 280 265 281 } // process_reference_init() … … 276 292 277 293 // initialize PID, REF_XP, PARENT_XP, and STATE 278 local_process->pid = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) ); 279 local_process->parent_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) ); 280 local_process->ref_xp = reference_process_xp; 281 local_process->state = PROCESS_STATE_RUNNING; 282 283 process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n", 284 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid ); 294 local_process->pid = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) ); 295 local_process->parent_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) ); 296 local_process->ref_xp = reference_process_xp; 297 local_process->term_state = 0; 298 299 #if CONFIG_DEBUG_PROCESS_COPY_INIT 300 uint32_t cycle = (uint32_t)hal_get_cycles(); 301 if( CONFIG_DEBUG_PROCESS_COPY_INIT ) 302 printk("\n[DBG] %s : thread %x enter for process %x\n", 303 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); 304 #endif 285 305 286 306 // reset local process vmm … … 327 347 hal_fence(); 328 348 329 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 330 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid ); 349 #if CONFIG_DEBUG_PROCESS_COPY_INIT 350 cycle = (uint32_t)hal_get_cycles(); 351 if( CONFIG_DEBUG_PROCESS_COPY_INIT ) 352 printk("\n[DBG] %s : thread %x exit for process %x\n", 353 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); 354 #endif 331 355 332 356 return 0; … … 347 371 "process %x in cluster %x has still active threads", process->pid , local_cxy ); 348 372 349 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x\n", 350 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 373 #if CONFIG_DEBUG_PROCESS_DESTROY 374 uint32_t cycle = (uint32_t)hal_get_cycles(); 375 if( CONFIG_DEBUG_PROCESS_DESTROY ) 376 printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n", 377 __FUNCTION__ , CURRENT_THREAD , process, process->pid , cycle ); 378 #endif 351 379 352 380 // get local process manager pointer … … 386 414 xlist_unlink( XPTR( local_cxy , &process->children_list ) ); 387 415 remote_spinlock_unlock( children_lock_xp ); 388 389 // get extende pointer on parent main thread390 parent_thread_xp = XPTR( parent_cxy ,391 hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->th_tbl[1] )));392 393 // unblock parent process main thread394 thread_unblock( parent_thread_xp , THREAD_BLOCKED_WAIT );395 416 } 396 417 … … 411 432 process_free( process ); 412 433 413 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 414 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 434 #if CONFIG_DEBUG_PROCESS_DESTROY 435 cycle = (uint32_t)hal_get_cycles(); 436 if( CONFIG_DEBUG_PROCESS_DESTROY ) 437 printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n", 438 __FUNCTION__ , CURRENT_THREAD , process, process->pid, cycle ); 439 #endif 415 440 416 441 } // end process_destroy() … … 440 465 uint32_t responses; // number of remote process copies 441 466 uint32_t rsp_count; // used to assert number of copies 442 443 467 rpc_desc_t rpc; // rpc descriptor allocated in stack 444 468 445 process_dmsg("\n[DBG] %s : enter to %s process %x in cluster %x\n", 446 __FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy ); 469 #if CONFIG_DEBUG_PROCESS_SIGACTION 470 uint32_t cycle = (uint32_t)hal_get_cycles(); 471 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 472 printk("\n[DBG] %s : thread %x enter to %s process %x in cluster %x / cycle %d\n", 473 __FUNCTION__ , CURRENT_THREAD, process_action_str( action_type ) , 474 process->pid , local_cxy , cycle ); 475 #endif 447 476 448 477 thread_t * client = CURRENT_THREAD; 449 xptr_t client_xp = XPTR( local_cxy , client );450 478 451 479 // get local pointer on local cluster manager … … 492 520 { 493 521 494 process_dmsg("\n[DBG] %s : send RPC to remote cluster %x\n", 495 __FUNCTION__ , process_cxy ); 522 #if CONFIG_DEBUG_PROCESS_SIGACTION 523 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 524 printk("\n[DBG] %s : send RPC to remote cluster %x\n", __FUNCTION__ , process_cxy ); 525 #endif 496 526 497 527 rpc.args[0] = (uint64_t)action_type; … … 517 547 } 518 548 519 process_dmsg("\n[DBG] %s : make action in owner cluster %x\n", 520 __FUNCTION__ , local_cxy ); 521 549 #if CONFIG_DEBUG_PROCESS_SIGACTION 550 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 551 printk("\n[DBG] %s : make action in owner cluster %x\n", __FUNCTION__ , local_cxy ); 552 #endif 522 553 523 554 // call directly the relevant function in local owner cluster 524 if (action_type == DELETE_ALL_THREADS ) process_delete_threads ( process , client_xp ); 525 else if (action_type == BLOCK_ALL_THREADS ) process_block_threads ( process , client_xp ); 526 else if (action_type == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 527 528 process_dmsg("\n[DBG] %s : exit after %s process %x in cluster %x\n", 529 __FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy ); 555 if (action_type == DELETE_ALL_THREADS ) process_delete_threads ( process ); 556 else if (action_type == BLOCK_ALL_THREADS ) process_block_threads ( process ); 557 else if (action_type == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 558 559 #if CONFIG_DEBUG_PROCESS_SIGACTION 560 cycle = (uint32_t)hal_get_cycles(); 561 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 562 printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n", 563 __FUNCTION__ , CURRENT_THREAD, process_action_str( action_type ) , 564 process->pid , local_cxy , cycle ); 565 #endif 530 566 531 567 } // end process_sigaction() 532 568 533 //////////////////////////////////////////////// 534 void process_block_threads( process_t * process, 535 xptr_t client_xp ) 569 ///////////////////////////////////////////////// 570 void process_block_threads( process_t * process ) 536 571 { 537 572 thread_t * target; // pointer on target thread 573 thread_t * this; // pointer on calling thread 538 574 uint32_t ltid; // index in process th_tbl 539 thread_t * requester; // requesting thread pointer540 575 uint32_t count; // requests counter 541 576 volatile uint32_t rsp_count; // responses counter 542 577 543 578 // get calling thread pointer 544 requester = CURRENT_THREAD; 545 546 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n", 547 __FUNCTION__ , process->pid , local_cxy ); 579 this = CURRENT_THREAD; 580 581 #if CONFIG_DEBUG_PROCESS_SIGACTION 582 uint32_t cycle = (uint32_t)hal_get_cycles(); 583 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 584 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 585 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 586 #endif 548 587 549 588 // get lock protecting process th_tbl[] … … 559 598 target = process->th_tbl[ltid]; 560 599 600 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" ); 601 561 602 if( target != NULL ) // thread found 562 603 { 563 604 count++; 564 605 565 // - if the target thread is the client thread, we do nothing,566 // and we simply decrement the responses counter.567 606 // - if the calling thread and the target thread are on the same core, 568 607 // we block the target thread, we don't need confirmation from scheduler, … … 572 611 // to be sure that the target thread is not running. 573 612 574 if( XPTR( local_cxy , target ) == client_xp ) 575 { 576 // decrement responses counter 577 hal_atomic_add( (void *)&rsp_count , -1 ); 578 } 579 else if( requester->core->lid == target->core->lid ) 613 if( this->core->lid == target->core->lid ) 580 614 { 581 615 // set the global blocked bit in target thread descriptor. … … 612 646 } 613 647 614 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n", 615 __FUNCTION__ , process->pid , local_cxy , count ); 648 #if CONFIG_DEBUG_PROCESS_SIGACTION 649 cycle = (uint32_t)hal_get_cycles(); 650 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 651 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 652 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 653 #endif 616 654 617 655 } // end process_block_threads() … … 621 659 { 622 660 thread_t * target; // pointer on target thead 661 thread_t * this; // pointer on calling thread 623 662 uint32_t ltid; // index in process th_tbl 624 663 uint32_t count; // requests counter 625 664 626 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n", 627 __FUNCTION__ , process->pid , local_cxy ); 665 // get calling thread pointer 666 this = CURRENT_THREAD; 667 668 #if CONFIG_DEBUG_PROCESS_SIGACTION 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 671 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 672 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 673 #endif 628 674 629 675 // get lock protecting process th_tbl[] … … 636 682 target = process->th_tbl[ltid]; 637 683 684 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" ); 685 638 686 if( target != NULL ) // thread found 639 687 { … … 648 696 spinlock_unlock( &process->th_lock ); 649 697 650 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n", 651 __FUNCTION__ , process->pid , local_cxy , count ); 698 #if CONFIG_DEBUG_PROCESS_SIGACTION 699 cycle = (uint32_t)hal_get_cycles(); 700 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 701 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 702 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 703 #endif 652 704 653 705 } // end process_unblock_threads() 654 706 655 ///////////////////////////////////////////////// 656 void process_delete_threads( process_t * process, 657 xptr_t client_xp ) 707 ////////////////////////////////////////////////// 708 void process_delete_threads( process_t * process ) 658 709 { 659 710 thread_t * target; // pointer on target thread 711 thread_t * this; // pointer on calling thread 660 712 uint32_t ltid; // index in process th_tbl 661 713 uint32_t count; // request counter 662 663 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x at cycle %d\n", 664 __FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() ); 714 cxy_t owner_cxy; // owner cluster identifier 715 716 // get calling thread pointer 717 this = CURRENT_THREAD; 718 owner_cxy = CXY_FROM_PID( process->pid ); 719 720 #if CONFIG_DEBUG_PROCESS_SIGACTION 721 uint32_t cycle = (uint32_t)hal_get_cycles(); 722 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 723 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 724 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 725 #endif 665 726 666 727 // get lock protecting process th_tbl[] … … 673 734 target = process->th_tbl[ltid]; 674 735 675 if( target != NULL ) // thread found 736 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" ); 737 738 if( target != NULL ) // thread found 676 739 { 677 740 count++; 678 679 // delete only if the target is not the client680 if( XPTR( local_cxy , target ) != client_xp )681 { 741 742 // the main thread should not be deleted 743 if( (owner_cxy != local_cxy) || (ltid != 0) ) 744 { 682 745 hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE ); 683 746 } … … 688 751 spinlock_unlock( &process->th_lock ); 689 752 690 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x at cycle %d\n", 691 __FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() ); 753 #if CONFIG_DEBUG_PROCESS_SIGACTION 754 cycle = (uint32_t)hal_get_cycles(); 755 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 756 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 757 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 758 #endif 692 759 693 760 } // end process_delete_threads() … … 988 1055 "parent process must be the reference process\n" ); 989 1056 990 fork_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n", 991 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() ); 1057 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1058 uint32_t cycle = (uint32_t)hal_get_cycles(); 1059 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1060 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1061 __FUNCTION__, CURRENT_THREAD, parent_pid, cycle ); 1062 #endif 992 1063 993 1064 // allocate a process descriptor … … 999 1070 return -1; 1000 1071 } 1001 1002 fork_dmsg("\n[DBG] %s : core[%x,%d] created child process %x at cycle %d\n",1003 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process, (uint32_t)hal_get_cycles() );1004 1072 1005 1073 // allocate a child PID from local cluster … … 1012 1080 return -1; 1013 1081 } 1014 1015 fork_dmsg("\n[DBG] %s : core[%x, %d] child process PID = %x at cycle %d\n",1016 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , (uint32_t)hal_get_cycles() );1017 1082 1018 1083 // initializes child process descriptor from parent process descriptor … … 1022 1087 parent_process_xp ); 1023 1088 1024 fork_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n", 1025 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 1089 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1090 cycle = (uint32_t)hal_get_cycles(); 1091 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1092 printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n", 1093 __FUNCTION__, CURRENT_THREAD, process, new_pid, cycle ); 1094 #endif 1026 1095 1027 1096 // copy VMM from parent descriptor to child descriptor … … 1037 1106 } 1038 1107 1039 fork_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n", 1040 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1108 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1109 cycle = (uint32_t)hal_get_cycles(); 1110 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1111 printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n", 1112 __FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle ); 1113 #endif 1041 1114 1042 1115 // update extended pointer on .elf file … … 1059 1132 assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1060 1133 1061 fork_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n", 1062 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1063 1064 // update parent process GPT to set Copy_On_Write for shared data vsegs 1134 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1135 cycle = (uint32_t)hal_get_cycles(); 1136 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1137 printk("\n[DBG] %s : thread %x created child thread %x / cycle %d\n", 1138 __FUNCTION__ , CURRENT_THREAD, thread, cycle ); 1139 #endif 1140 1141 // set Copy_On_Write flag in parent process GPT 1065 1142 // this includes all replicated GPT copies 1066 1143 if( parent_process_cxy == local_cxy ) // reference is local … … 1074 1151 } 1075 1152 1076 fork_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n", 1077 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1153 // set Copy_On_Write flag in child process GPT 1154 vmm_set_cow( process ); 1155 1156 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1157 cycle = (uint32_t)hal_get_cycles(); 1158 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1159 printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n", 1160 __FUNCTION__ , CURRENT_THREAD, cycle ); 1161 #endif 1078 1162 1079 1163 // get extended pointers on parent children_root, children_lock and children_nr … … 1092 1176 *child_pid = new_pid; 1093 1177 1094 1095 fork_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n", 1096 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1178 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1179 cycle = (uint32_t)hal_get_cycles(); 1180 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1181 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1182 __FUNCTION__, CURRENT_THREAD, cycle ); 1183 #endif 1097 1184 1098 1185 return 0; … … 1105 1192 { 1106 1193 char * path; // pathname to .elf file 1107 pid_t pid; // old_process PID given to new_process1108 pid_t temp_pid; // temporary PID given to old_process1194 pid_t pid; // old_process PID / given to new_process 1195 pid_t temp_pid; // temporary PID / given to old_process 1109 1196 process_t * old_process; // local pointer on old process 1197 thread_t * old_thread; // local pointer on old thread 1110 1198 process_t * new_process; // local pointer on new process 1111 thread_t * new_thread; // local pointer on main thread 1112 pthread_attr_t attr; // main thread attributes 1199 thread_t * new_thread; // local pointer on new thread 1200 xptr_t parent_xp; // extended pointer on parent process 1201 pthread_attr_t attr; // new thread attributes 1113 1202 lid_t lid; // selected core local index 1114 1203 error_t error; 1115 1204 1116 // get .elf pathname and PID from exec_info 1205 // get old_thread / old_process / PID / parent_xp 1206 old_thread = CURRENT_THREAD; 1207 old_process = old_thread->process; 1208 pid = old_process->pid; 1209 parent_xp = old_process->parent_xp; 1210 1211 // get .elf pathname from exec_info 1117 1212 path = exec_info->path; 1118 pid = exec_info->pid;1119 1213 1120 1214 // this function must be executed by a thread running in owner cluster 1121 1215 assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__, 1122 "local cluster %x is not owner for process %x\n", local_cxy, pid ); 1123 1124 exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / %s / cycle %d\n", 1125 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path, (uint32_t)hal_get_cycles() ); 1126 1127 // get old_process local pointer 1128 old_process = (process_t *)cluster_get_local_process_from_pid( pid ); 1129 1130 if( old_process == NULL ) 1131 { 1132 printk("\n[ERROR] in %s : cannot get old process descriptor\n", __FUNCTION__ ); 1133 return -1; 1134 } 1216 "local_cluster must be owner_cluster\n" ); 1217 1218 assert( (LTID_FROM_TRDID( old_thread->trdid ) == 0) , __FUNCTION__, 1219 "must be called by the main thread\n" ); 1220 1221 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1222 uint32_t cycle = (uint32_t)hal_get_cycles(); 1223 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1224 printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n", 1225 __FUNCTION__, old_thread, pid, path, cycle ); 1226 #endif 1135 1227 1136 1228 // allocate memory for new_process descriptor … … 1144 1236 } 1145 1237 1146 // get a newPID for old_process1238 // get a temporary PID for old_process 1147 1239 error = cluster_pid_alloc( old_process , &temp_pid ); 1148 1240 if( error ) … … 1154 1246 } 1155 1247 1156 // request blocking for all threads in old_process (but the calling thread) 1157 process_sigaction( old_process , BLOCK_ALL_THREADS ); 1158 1159 // request destruction for all threads in old_process (but the calling thread) 1160 process_sigaction( old_process , DELETE_ALL_THREADS ); 1161 1162 exec_dmsg("\n[DBG] %s : core[%x,%d] marked old threads for destruction / cycle %d\n", 1163 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() ); 1164 1165 // set new PID to old_process 1248 // set temporary PID to old_process 1166 1249 old_process->pid = temp_pid; 1167 1250 … … 1169 1252 process_reference_init( new_process, 1170 1253 pid, 1171 old_process->parent_xp,// parent_process_xp1172 XPTR(local_cxy , old_process) ); // model_process _xp1254 parent_xp, // parent_process_xp 1255 XPTR(local_cxy , old_process) ); // model_process 1173 1256 1174 1257 // give TXT ownership to new_process 1175 1258 process_txt_set_ownership( XPTR( local_cxy , new_process ) ); 1176 1259 1177 exec_dmsg("\n[DBG] %s : core[%x,%d] initialised new process %x / cycle %d \n", 1178 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, new_process, (uint32_t)hal_get_cycles() ); 1260 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1261 cycle = (uint32_t)hal_get_cycles(); 1262 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1263 printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n", 1264 __FUNCTION__ , old_thread , new_process , cycle ); 1265 #endif 1179 1266 1180 1267 // register code & data vsegs as well as entry-point in new process VMM, … … 1188 1275 } 1189 1276 1190 exec_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered in new process %x / cycle %d\n", 1191 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, new_process, (uint32_t)hal_get_cycles() ); 1277 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1278 cycle = (uint32_t)hal_get_cycles(); 1279 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1280 printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n", 1281 __FUNCTION__, old_thread , new_process->pid , cycle ); 1282 #endif 1192 1283 1193 1284 // select a core in local cluster to execute the main thread … … 1216 1307 assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1217 1308 1218 exec_dmsg("\n[DBG] %s : core[%x,%d] created new_process main thread / cycle %d\n", 1219 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1220 1221 // get pointers on parent process 1222 xptr_t parent_xp = new_process->parent_xp; 1309 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1310 cycle = (uint32_t)hal_get_cycles(); 1311 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1312 printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n", 1313 __FUNCTION__ , old_thread , new_thread , cycle ); 1314 #endif 1315 1316 // get cluster and local pointer on parent process 1223 1317 process_t * parent_ptr = GET_PTR( parent_xp ); 1224 1318 cxy_t parent_cxy = GET_CXY( parent_xp ); … … 1235 1329 remote_spinlock_unlock( lock_xp ); 1236 1330 1237 exec_dmsg("\n[DBG] %s : core[%x,%d] updated parent process children list / cycle %d\n",1238 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );1239 1240 // block and mark calling thread for deletion1241 // only when it is an user thread1242 thread_t * this = CURRENT_THREAD;1243 if( this->type == THREAD_USER )1244 {1245 thread_block( this , THREAD_BLOCKED_GLOBAL );1246 hal_atomic_or( &this->flags , THREAD_FLAG_REQ_DELETE );1247 }1248 1249 1331 // activate new thread 1250 1332 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 1251 1333 1334 // request old_thread destruction => old_process destruction 1335 thread_block( old_thread , THREAD_BLOCKED_GLOBAL ); 1336 hal_atomic_or( &old_thread->flags , THREAD_FLAG_REQ_DELETE ); 1337 1252 1338 hal_fence(); 1253 1339 1254 exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s / cycle %d\n", 1255 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path , (uint32_t)hal_get_cycles() ); 1256 1340 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1341 cycle = (uint32_t)hal_get_cycles(); 1342 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1343 printk("\n[DBG] %s : old_thread %x blocked / new_thread %x activated / cycle %d\n", 1344 __FUNCTION__ , old_thread , new_thread , cycle ); 1345 #endif 1346 1257 1347 return 0; 1258 1348 1259 1349 } // end process_make_exec() 1260 1350 1261 /////////////////////////////////////// 1262 void process_make_kill( pid_t pid, 1263 uint32_t sig_id ) 1264 { 1265 // this function must be executed by a thread running in owner cluster 1266 assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ , 1267 "must execute in owner cluster" ); 1268 1351 //////////////////////////////////////////// 1352 void process_make_kill( process_t * process, 1353 bool_t is_exit, 1354 uint32_t exit_status ) 1355 { 1269 1356 thread_t * this = CURRENT_THREAD; 1270 1357 1271 kill_dmsg("\n[DBG] %s : core[%x,%d] enter / process %x / sig %d\n", 1272 __FUNCTION__, local_cxy, this->core->lid, pid , sig_id ); 1273 1274 // get pointer on local target process descriptor 1275 process_t * process = process_get_local_copy( pid ); 1276 1277 // does nothing if process does not exist 1278 if( process == NULL ) 1279 { 1280 printk("\n[WARNING] %s : process %x does not exist => do nothing\n", 1281 __FUNCTION__ , pid ); 1282 return; 1283 } 1284 1285 // analyse signal type 1286 switch( sig_id ) 1287 { 1288 case SIGSTOP: 1289 { 1290 // block all threads in all clusters 1291 process_sigaction( process , BLOCK_ALL_THREADS ); 1292 1293 // remove TXT ownership to target process 1294 process_txt_reset_ownership( XPTR( local_cxy , process ) ); 1295 } 1296 break; 1297 case SIGCONT: // unblock all threads in all clusters 1298 { 1299 process_sigaction( process , UNBLOCK_ALL_THREADS ); 1300 } 1301 break; 1302 case SIGKILL: // block all threads, then delete all threads 1303 { 1304 // block all threads in all clusters 1305 process_sigaction( process , BLOCK_ALL_THREADS ); 1306 1307 // remove TXT ownership to target process 1308 process_txt_reset_ownership( XPTR( local_cxy , process ) ); 1309 1310 // delete all threads (but the calling thread) 1311 process_sigaction( process , DELETE_ALL_THREADS ); 1312 1313 // delete the calling thread if required 1314 if( CURRENT_THREAD->process == process ) 1315 { 1316 // set REQ_DELETE flag 1317 hal_atomic_or( &this->flags , THREAD_FLAG_REQ_DELETE ); 1318 1319 // deschedule 1320 sched_yield( "suicide after kill" ); 1321 } 1322 } 1323 break; 1324 } 1325 1326 kill_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / sig %d \n", 1327 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , sig_id ); 1358 assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ , 1359 "must be executed in process owner cluster\n" ); 1360 1361 assert( ( this->type == THREAD_RPC ) , __FUNCTION__ , 1362 "must be executed by an RPC thread\n" ); 1363 1364 #if CONFIG_DEBUG_PROCESS_MAKE_KILL 1365 uint32_t cycle = (uint32_t)hal_get_cycles(); 1366 if( CONFIG_DEBUG_PROCESS_MAKE_KILL < cycle ) 1367 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1368 __FUNCTION__, this , process->pid , cycle ); 1369 #endif 1370 1371 // register exit_status in owner process descriptor 1372 if( is_exit ) process->term_state = exit_status; 1373 1374 // atomically update owner process descriptor flags 1375 if( is_exit ) hal_atomic_or( &process->term_state , PROCESS_FLAG_EXIT ); 1376 else hal_atomic_or( &process->term_state , PROCESS_FLAG_KILL ); 1377 1378 // remove TXT ownership from owner process descriptor 1379 process_txt_reset_ownership( XPTR( local_cxy , process ) ); 1380 1381 // block all process threads in all clusters 1382 process_sigaction( process , BLOCK_ALL_THREADS ); 1383 1384 // mark all process threads in all clusters for delete 1385 process_sigaction( process , DELETE_ALL_THREADS ); 1386 1387 /* unused if sys_wait deschedules without blocking [AG] 1388 1389 // get cluster and pointers on reference parent process 1390 xptr_t parent_xp = process->parent_xp; 1391 process_t * parent_ptr = GET_PTR( parent_xp ); 1392 cxy_t parent_cxy = GET_CXY( parent_xp ); 1393 1394 // get loal pointer on parent main thread 1395 thread_t * main_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->th_tbl[0] ) ); 1396 1397 // reset THREAD_BLOCKED_WAIT bit in parent process main thread 1398 thread_unblock( XPTR( parent_cxy , main_ptr ) , THREAD_BLOCKED_WAIT ); 1399 */ 1400 1401 #if CONFIG_DEBUG_PROCESS_MAKE_KILL 1402 cycle = (uint32_t)hal_get_cycles(); 1403 if( CONFIG_DEBUG_PROCESS_MAKE_KILL < cycle ) 1404 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 1405 __FUNCTION__, this, process->pid , cycle ); 1406 #endif 1328 1407 1329 1408 } // end process_make_kill() 1330 1331 /////////////////////////////////////////1332 void process_make_exit( pid_t pid,1333 uint32_t status )1334 {1335 // this function must be executed by a thread running in owner cluster1336 assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,1337 "must execute in owner cluster" );1338 1339 // get pointer on local process descriptor1340 process_t * process = process_get_local_copy( pid );1341 1342 // does nothing if process does not exist1343 if( process == NULL )1344 {1345 printk("\n[WARNING] %s : process %x does not exist => do nothing\n",1346 __FUNCTION__ , pid );1347 return;1348 }1349 1350 // block all threads in all clusters (but the calling thread)1351 process_sigaction( process , BLOCK_ALL_THREADS );1352 1353 // delete all threads in all clusters (but the calling thread)1354 process_sigaction( process , DELETE_ALL_THREADS );1355 1356 // delete the calling thread1357 hal_atomic_or( &CURRENT_THREAD->flags , THREAD_FLAG_REQ_DELETE );1358 1359 // deschedule1360 sched_yield( "suicide after exit" );1361 1362 } // end process_make_exit()1363 1409 1364 1410 /////////////////////////////////////////////// … … 1366 1412 { 1367 1413 1368 process_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n", 1369 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1414 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE 1415 uint32_t cycle = (uint32_t)hal_get_cycles(); 1416 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle ) 1417 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1418 #endif 1370 1419 1371 1420 // initialize PID, REF_XP, PARENT_XP, and STATE 1372 process->pid = 0;1373 process->ref_xp = XPTR( local_cxy , process );1374 process->parent_xp = XPTR_NULL;1375 process-> state = PROCESS_STATE_RUNNING;1421 process->pid = 0; 1422 process->ref_xp = XPTR( local_cxy , process ); 1423 process->parent_xp = XPTR_NULL; 1424 process->term_state = 0; 1376 1425 1377 1426 // reset th_tbl[] array as empty … … 1391 1440 hal_fence(); 1392 1441 1393 process_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n", 1394 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() ); 1442 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE 1443 cycle = (uint32_t)hal_get_cycles(); 1444 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle ) 1445 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1446 #endif 1395 1447 1396 1448 } // end process_zero_init() … … 1406 1458 error_t error; 1407 1459 1408 process_dmsg("\n[DBG] %s : core[%x,%d] enters at cycle %d\n", 1409 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid ); 1460 #if CONFIG_DEBUG_PROCESS_INIT_CREATE 1461 uint32_t cycle = (uint32_t)hal_get_cycles(); 1462 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle ) 1463 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1464 #endif 1410 1465 1411 1466 // allocates memory for process descriptor from local cluster … … 1434 1489 XPTR( local_cxy , &process_zero ), // parent 1435 1490 XPTR( local_cxy , &process_zero ) ); // model 1436 1437 process_dmsg("\n[DBG] %s : core[%x,%d] / initialisation done\n",1438 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );1439 1491 1440 1492 // register "code" and "data" vsegs as well as entry-point … … 1446 1498 process_destroy( process ); 1447 1499 } 1448 1449 process_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",1450 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, CONFIG_PROCESS_INIT_PATH );1451 1500 1452 1501 // get extended pointers on process_zero children_root, children_lock … … 1489 1538 hal_fence(); 1490 1539 1491 process_dmsg("\n[DBG] %s : core[%x,%d] exit / main thread = %x\n", 1492 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, thread ); 1540 #if CONFIG_DEBUG_PROCESS_INIT_CREATE 1541 cycle = (uint32_t)hal_get_cycles(); 1542 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle ) 1543 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1544 #endif 1493 1545 1494 1546 } // end process_init_create() 1495 1496 //////////////////////////////////////////1497 char * process_state_str( uint32_t state )1498 {1499 if ( state == PROCESS_STATE_RUNNING ) return "RUNNING";1500 else if( state == PROCESS_STATE_KILLED ) return "KILLED";1501 else if( state == PROCESS_STATE_EXITED ) return "EXITED";1502 else return "undefined";1503 }1504 1547 1505 1548 ///////////////////////////////////////// … … 1542 1585 // get PID and state 1543 1586 pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); 1544 state = hal_remote_lw( XPTR( process_cxy , &process_ptr-> state ) );1587 state = hal_remote_lw( XPTR( process_cxy , &process_ptr->term_state ) ); 1545 1588 1546 1589 // get PPID … … 1577 1620 if( owner_xp == process_xp ) 1578 1621 { 1579 printk("PID %X | PPID %X | %s\t| %s (FG) | %X | %d | %s\n",1580 pid, ppid, process_state_str(state), txt_name, process_ptr, th_nr, elf_name );1622 printk("PID %X | PPID %X | STS %X | %s (FG) | %X | %d | %s\n", 1623 pid, ppid, state, txt_name, process_ptr, th_nr, elf_name ); 1581 1624 } 1582 1625 else 1583 1626 { 1584 printk("PID %X | PPID %X | %s\t| %s (BG) | %X | %d | %s\n",1585 pid, ppid, process_state_str(state), txt_name, process_ptr, th_nr, elf_name );1627 printk("PID %X | PPID %X | STS %X | %s (BG) | %X | %d | %s\n", 1628 pid, ppid, state, txt_name, process_ptr, th_nr, elf_name ); 1586 1629 } 1587 1630 } // end process_display() … … 1632 1675 xptr_t lock_xp; // extended pointer on list lock in chdev 1633 1676 1634 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x at cycle\n", 1635 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() ); 1677 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1678 uint32_t cycle = (uint32_t)hal_get_cycles(); 1679 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1680 printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d / cycle %d\n", 1681 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle ); 1682 #endif 1636 1683 1637 1684 // check process is reference … … 1657 1704 remote_spinlock_unlock( lock_xp ); 1658 1705 1659 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x at cycle\n", 1660 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() ); 1706 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1707 cycle = (uint32_t)hal_get_cycles(); 1708 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1709 printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n", 1710 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle ); 1711 #endif 1661 1712 1662 1713 } // end process_txt_attach() … … 1670 1721 xptr_t lock_xp; // extended pointer on list lock in chdev 1671 1722 1672 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x at cycle\n", 1673 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() ); 1723 #if CONFIG_DEBUG_PROCESS_TXT_DETACH 1724 uint32_t cycle = (uint32_t)hal_get_cycles(); 1725 if( CONFIG_DEBUG_PROCESS_TXT_DETACH < cycle ) 1726 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1727 __FUNCTION__, CURRENT_THREAD, process->pid , cycle ); 1728 #endif 1674 1729 1675 1730 // check process is reference … … 1690 1745 remote_spinlock_unlock( lock_xp ); 1691 1746 1692 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x at cycle %d\n", 1693 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() ); 1747 #if CONFIG_DEBUG_PROCESS_TXT_DETACH 1748 cycle = (uint32_t)hal_get_cycles(); 1749 if( CONFIG_DEBUG_PROCESS_TXT_DETACH < cycle ) 1750 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 1751 __FUNCTION__, CURRENT_THREAD, process->pid, cycle ); 1752 #endif 1694 1753 1695 1754 } // end process_txt_detach() … … 1732 1791 xptr_t file_xp; // extended pointer on TXT_RX pseudo file 1733 1792 xptr_t txt_xp; // extended pointer on TXT_RX chdev 1734 chdev_t * txt_ptr; 1735 cxy_t txt_cxy; 1793 chdev_t * txt_ptr; // local pointer on TXT_RX chdev 1794 cxy_t txt_cxy; // cluster of TXT_RX chdev 1795 uint32_t txt_id; // TXT_RX channel 1736 1796 xptr_t owner_xp; // extended pointer on current TXT_RX owner 1737 1797 xptr_t root_xp; // extended pointer on root of attached process list 1738 1798 xptr_t iter_xp; // iterator for xlist 1739 1799 xptr_t current_xp; // extended pointer on current process 1740 process_t * current_ptr; 1741 cxy_t current_cxy; 1742 pid_t ppid; 1800 process_t * current_ptr; // local pointer on current process 1801 cxy_t current_cxy; // cluster for current process 1802 pid_t ppid; // parent process identifier for current process 1743 1803 1744 1804 // get cluster and local pointer on process … … 1752 1812 txt_xp = chdev_from_file( file_xp ); 1753 1813 txt_cxy = GET_CXY( txt_xp ); 1754 txt_ptr = (chdev_t *)GET_PTR( txt_xp );1755 1756 // get extended pointer on TXT_RX owner 1814 txt_ptr = GET_PTR( txt_xp ); 1815 1816 // get extended pointer on TXT_RX owner and TXT channel 1757 1817 owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) ); 1818 txt_id = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) ); 1758 1819 1759 1820 // transfer ownership to KSH if required 1760 if( owner_xp == process_xp)1821 if( (owner_xp == process_xp) && (txt_id > 0) ) 1761 1822 { 1762 1823 // get extended pointer on root of list of attached processes … … 1782 1843 } 1783 1844 } 1784 } 1785 1786 assert( false , __FUNCTION__ , "KSH process not found" ); 1787 1845 1846 assert( false , __FUNCTION__ , "KSH process not found" ); 1847 } 1788 1848 } // end process_txt_reset_ownership() 1789 1849 -
trunk/kernel/kern/process.h
r428 r433 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017 )6 * Alain Greiner (2016,2017,2018) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 65 65 66 66 /********************************************************************************************* 67 * This enum defines the process states for ALMOS_MKH. 68 ********************************************************************************************/ 69 70 enum process_states 71 { 72 PROCESS_STATE_RUNNING = 0, /*! process is executing */ 73 PROCESS_STATE_STOPPED = 1, /*! process has been stopped by a signal */ 74 PROCESS_STATE_KILLED = 2, /*! process has been killed by a signal */ 75 PROCESS_STATE_EXITED = 3, /*! process terminated with an exit */ 76 }; 67 * The termination state is a 32 bits word: 68 * - the 8 LSB bits contain the user defined exit status 69 * - the 24 other bits contain the flags defined below 70 ********************************************************************************************/ 71 72 #define PROCESS_FLAG_BLOCK 0x100 /*! process received as SIGSTOP signal */ 73 #define PROCESS_FLAG_KILL 0x200 /*! process terminated by a sys_kill() */ 74 #define PROCESS_FLAG_EXIT 0x400 /*! process terminated by a sys_exit() */ 75 #define PROCESS_FLAG_WAIT 0x800 /*! parent process executed successfully a sys_wait() */ 77 76 78 77 /********************************************************************************************* … … 118 117 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields 119 118 * are defined in all process descriptors copies. 119 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster. 120 120 ********************************************************************************************/ 121 121 … … 155 155 remote_spinlock_t sync_lock; /*! lock protecting sem,mutex,barrier,condvar lists */ 156 156 157 uint32_t state; /*! RUNNING / STOPPED / KILLED / EXITED*/157 uint32_t term_state; /*! termination status (flags & exit status) */ 158 158 159 159 bool_t txt_owner; /*! current TXT owner */ … … 168 168 typedef struct exec_info_s 169 169 { 170 pid_t pid; /*! process identifier (both parent and child) */171 172 170 char path[CONFIG_VFS_MAX_PATH_LENGTH]; /*! .elf file path */ 173 171 … … 276 274 277 275 /********************************************************************************************* 278 * This function returns a printable string defining the process state.279 *********************************************************************************************280 * @ state : RUNNING / BLOCKED / EXITED / KILLED281 * @ return a string pointer.282 ********************************************************************************************/283 char * process_state_str( uint32_t state );284 285 /*********************************************************************************************286 276 * This debug function diplays on the kernel terminal TXT0 detailed informations on a 287 277 * reference process identified by the <process_xp> argument. … … 324 314 325 315 /********************************************************************************************* 326 * This function blocks all threads (but the client thread defined by the <client_xp>327 * argument) for a given <process> in a given cluster.316 * This function blocks all threads for a given <process> in a given cluster. 317 * The calling thread cannot be a target thread. 328 318 * It loops on all local threads of the process, set the THREAD_BLOCKED_GLOBAL bit, 329 319 * and request the relevant schedulers to acknowledge the blocking, using IPI if required. … … 332 322 ********************************************************************************************* 333 323 * @ process : pointer on the target process descriptor. 334 * @ client_xp : extended pointer on the client thread, that should not be blocked. 335 ********************************************************************************************/ 336 void process_block_threads( process_t * process, 337 xptr_t client_xp ); 324 ********************************************************************************************/ 325 void process_block_threads( process_t * process ); 338 326 339 327 /********************************************************************************************* … … 345 333 346 334 /********************************************************************************************* 347 * This function delete all threads, (but the client thread defined by the <client_xp> 348 * argument) for a given <process> in a given cluster. 335 * This function marks for deletion all threads - but one _ for a given <process> 336 * in a given cluster. The main thread in owner cluster is NOT marked. 337 * It will be marked for deletion by the parent process sys_wait(). 338 * The calling thread cannot be a target thread. 349 339 * It loops on all local threads of the process, and set the THREAD_FLAG_REQ_DELETE bit. 350 340 * For each marked thread, the following actions will be done by the scheduler at the next … … 357 347 ********************************************************************************************* 358 348 * @ process : pointer on the process descriptor. 359 * @ client_xp : extended pointer on the client thread, that should not be deleted. 360 ********************************************************************************************/ 361 void process_delete_threads( process_t * process, 362 xptr_t client_xp ); 349 ********************************************************************************************/ 350 void process_delete_threads( process_t * process ); 363 351 364 352 /********************************************************************************************* … … 396 384 * associated "child" thread descriptor in the local cluster. This function can involve 397 385 * up to three different clusters : 398 * - the local (child) cluster can be any cluster defined by the sys_fork function.386 * - the child (local) cluster can be any cluster defined by the sys_fork function. 399 387 * - the parent cluster must be the reference cluster for the parent process. 400 388 * - the client cluster containing the thread requesting the fork can be any cluster. … … 416 404 417 405 /********************************************************************************************* 418 * This function implement the "exit" system call, and is called by the sys_exit() function. 419 * It must be executed by a thread running in the calling process owner cluster. 420 * It uses twice the multicast RPC_PROCESS_SIGNAL to first block all process threads 421 * in all clusters, and then delete all threads and process descriptors. 422 ********************************************************************************************* 423 * @ pid : process identifier. 424 * @ status : exit return value. 425 ********************************************************************************************/ 426 void process_make_exit( pid_t pid, 427 uint32_t status ); 428 429 /********************************************************************************************* 430 * This function implement the "kill" system call, and is called by the sys_kill() function. 431 * It must be executed by a thread running in the target process owner cluster. 432 * Only the SIGKILL, SIGSTOP, and SIGCONT signals are supported. 433 * User defined handlers are not supported. 434 * It uses once or twice the multicast RPC_PROCESS_SIGNAL to block, unblock or delete 435 * all process threads in all clusters, and then delete process descriptors. 436 ********************************************************************************************* 437 * @ pid : process identifier. 438 * @ sig_id : signal type. 439 ********************************************************************************************/ 440 void process_make_kill( pid_t pid, 441 uint32_t sig_id ); 406 * This function is called by both the sys_kill() and sys_exit() system calls. 407 * It must be executed by an RPC thread running in the target process owner cluster. 408 * It uses twice the process_sigaction() function: 409 * - first, to block all target process threads, in all clusters. 410 * - second, to delete all target process threads in all clusters. 411 * Finally, it synchronizes with the parent process sys_wait() function that MUST be called 412 * by the parent process main thread. 413 ********************************************************************************************* 414 * @ process : pointer on process descriptor in owner cluster. 415 * @ is_exit : true when called by sys_exit() / false when called by sys_kill(). 416 * @ exit_status : exit status, when called by sys_exit(). 417 ********************************************************************************************/ 418 void process_make_kill( process_t * process, 419 bool_t is_exit, 420 uint32_t exit_status ); 442 421 443 422 -
trunk/kernel/kern/rpc.c
r428 r433 42 42 #include <rpc.h> 43 43 44 45 ///////////////////////////////////////////////////////////////////////////////////////// 46 // Debug macros for marshalling functions 47 ///////////////////////////////////////////////////////////////////////////////////////// 48 49 #if CONFIG_DEBUG_RPC_MARSHALING 50 51 #define RPC_DEBUG_ENTER \ 52 uint32_t cycle = (uint32_t)hal_get_cycles(); \ 53 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \ 54 printk("\n[DBG] %s : enter thread %x on core[%x,%d] / cycle %d\n", \ 55 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, CURRENT_THREAD->core->lid , cycle ); 56 57 #define RPC_DEBUG_EXIT \ 58 cycle = (uint32_t)hal_get_cycles(); \ 59 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \ 60 printk("\n[DBG] %s : exit thread %x on core[%x,%d] / cycle %d\n", \ 61 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, CURRENT_THREAD->core->lid , cycle ); 62 63 #else 64 65 #define RPC_DEBUG_ENTER 66 67 #define RPC_DEBUG_EXIT 68 69 #endif 70 44 71 ///////////////////////////////////////////////////////////////////////////////////////// 45 72 // array of function pointers (must be consistent with enum in rpc.h) … … 50 77 &rpc_pmem_get_pages_server, // 0 51 78 &rpc_pmem_release_pages_server, // 1 52 &rpc_ process_make_exec_server, // 279 &rpc_undefined, // 2 unused slot 53 80 &rpc_process_make_fork_server, // 3 54 &rpc_ process_make_exit_server, // 481 &rpc_undefined, // 4 unused slot 55 82 &rpc_process_make_kill_server, // 5 56 83 &rpc_thread_user_create_server, // 6 … … 68 95 &rpc_vfs_mapper_load_all_server, // 17 69 96 &rpc_fatfs_get_cluster_server, // 18 70 &rpc_undefined, // 19 97 &rpc_undefined, // 19 unused slot 71 98 72 99 &rpc_vmm_get_vseg_server, // 20 … … 497 524 498 525 ///////////////////////////////////////////////////////////////////////////////////////// 499 // [2] Marshaling functions attached to RPC_PROCESS_MAKE_EXEC (blocking) 500 ///////////////////////////////////////////////////////////////////////////////////////// 501 502 ///////////////////////////////////////////////////// 503 void rpc_process_make_exec_client( cxy_t cxy, 504 exec_info_t * info, // in 505 error_t * error ) // out 506 { 507 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 508 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 509 CURRENT_THREAD->core->lid , hal_time_stamp() ); 510 511 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 512 513 // initialise RPC descriptor header 514 rpc_desc_t rpc; 515 rpc.index = RPC_PROCESS_MAKE_EXEC; 516 rpc.response = 1; 517 rpc.blocking = true; 518 519 // set input arguments in RPC descriptor 520 rpc.args[0] = (uint64_t)(intptr_t)info; 521 522 // register RPC request in remote RPC fifo (blocking function) 523 rpc_send( cxy , &rpc ); 524 525 // get output arguments from RPC descriptor 526 *error = (error_t)rpc.args[1]; 527 528 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 529 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 530 CURRENT_THREAD->core->lid , hal_time_stamp() ); 531 } 532 533 ////////////////////////////////////////////// 534 void rpc_process_make_exec_server( xptr_t xp ) 535 { 536 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 537 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 538 CURRENT_THREAD->core->lid , hal_time_stamp() ); 539 540 exec_info_t * ptr; // local pointer on remote exec_info structure 541 exec_info_t info; // local copy of exec_info structure 542 error_t error; // local error error status 543 544 // get client cluster identifier and pointer on RPC descriptor 545 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 546 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 547 548 // get pointer on exec_info structure in client cluster from RPC descriptor 549 ptr = (exec_info_t*)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 550 551 // copy exec_info structure from client buffer to server buffer 552 hal_remote_memcpy( XPTR( client_cxy , ptr ), 553 XPTR( local_cxy , &info ), 554 sizeof(exec_info_t) ); 555 556 // call local kernel function 557 error = process_make_exec( &info ); 558 559 // set output argument into client RPC descriptor 560 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 561 562 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 563 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 564 CURRENT_THREAD->core->lid , hal_time_stamp() ); 565 } 526 // [2] undefined slot 527 ///////////////////////////////////////////////////////////////////////////////////////// 566 528 567 529 ///////////////////////////////////////////////////////////////////////////////////////// … … 644 606 645 607 ///////////////////////////////////////////////////////////////////////////////////////// 646 // [4] Marshaling functions attached to RPC_PROCESS_MAKE_EXIT (blocking) 608 // [4] undefined slot 609 ///////////////////////////////////////////////////////////////////////////////////////// 610 611 ///////////////////////////////////////////////////////////////////////////////////////// 612 // [5] Marshaling functions attached to RPC_PROCESS_MAKE_KILL (blocking) 647 613 ///////////////////////////////////////////////////////////////////////////////////////// 648 614 649 615 /////////////////////////////////////////////////// 650 void rpc_process_make_exit_client( cxy_t cxy, 651 pid_t pid, 616 void rpc_process_make_kill_client( cxy_t cxy, 617 process_t * process, 618 bool_t is_exit, 652 619 uint32_t status ) 653 620 { … … 656 623 CURRENT_THREAD->core->lid , hal_time_stamp() ); 657 624 658 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 659 660 // initialise RPC descriptor header 661 rpc_desc_t rpc; 662 rpc.index = RPC_PROCESS_MAKE_EXIT; 625 // initialise RPC descriptor header 626 rpc_desc_t rpc; 627 rpc.index = RPC_PROCESS_MAKE_KILL; 663 628 rpc.response = 1; 664 629 rpc.blocking = true; 665 630 666 631 // set input arguments in RPC descriptor 667 rpc.args[0] = (uint64_t)pid; 668 rpc.args[1] = (uint64_t)status; 632 rpc.args[0] = (uint64_t)(intptr_t)process; 633 rpc.args[1] = (uint64_t)is_exit; 634 rpc.args[2] = (uint64_t)status; 669 635 670 636 // register RPC request in remote RPC fifo (blocking function) … … 677 643 678 644 ////////////////////////////////////////////// 679 void rpc_process_make_ exit_server( xptr_t xp )645 void rpc_process_make_kill_server( xptr_t xp ) 680 646 { 681 647 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", … … 683 649 CURRENT_THREAD->core->lid , hal_time_stamp() ); 684 650 685 pid_t pid; 686 uint32_t status; 651 process_t * process; 652 bool_t is_exit; 653 uint32_t status; 687 654 688 655 // get client cluster identifier and pointer on RPC descriptor … … 691 658 692 659 // get arguments from RPC descriptor 693 pid = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 694 status = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 660 process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 661 is_exit = (bool_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 662 status = (uint32_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) ); 695 663 696 664 // call local kernel function 697 process_make_ exit( pid, status );665 process_make_kill( process , is_exit , status ); 698 666 699 667 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", … … 703 671 704 672 ///////////////////////////////////////////////////////////////////////////////////////// 705 // [5] Marshaling functions attached to RPC_PROCESS_MAKE_KILL (blocking) 706 ///////////////////////////////////////////////////////////////////////////////////////// 707 708 /////////////////////////////////////////////////// 709 void rpc_process_make_kill_client( cxy_t cxy, 710 pid_t pid, 711 uint32_t sig_id ) 712 { 713 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 714 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 715 CURRENT_THREAD->core->lid , hal_time_stamp() ); 716 717 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 718 719 // initialise RPC descriptor header 720 rpc_desc_t rpc; 721 rpc.index = RPC_PROCESS_MAKE_KILL; 722 rpc.response = 1; 723 rpc.blocking = true; 724 725 // set input arguments in RPC descriptor 726 rpc.args[0] = (uint64_t)pid; 727 rpc.args[1] = (uint64_t)sig_id; 728 729 // register RPC request in remote RPC fifo (blocking function) 730 rpc_send( cxy , &rpc ); 731 732 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 733 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 734 CURRENT_THREAD->core->lid , hal_time_stamp() ); 735 } 736 737 ////////////////////////////////////////////// 738 void rpc_process_make_kill_server( xptr_t xp ) 739 { 740 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 741 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 742 CURRENT_THREAD->core->lid , hal_time_stamp() ); 743 744 pid_t pid; 745 uint32_t sig_id; 746 747 // get client cluster identifier and pointer on RPC descriptor 748 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 749 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 750 751 // get arguments from RPC descriptor 752 pid = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 753 sig_id = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 754 755 // call local kernel function 756 process_make_exit( pid , sig_id ); 757 758 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 759 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 760 CURRENT_THREAD->core->lid , hal_time_stamp() ); 761 } 762 763 ///////////////////////////////////////////////////////////////////////////////////////// 764 // [6] Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 673 // [6] Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 765 674 ///////////////////////////////////////////////////////////////////////////////////////// 766 675 … … 1036 945 1037 946 // call relevant kernel function 1038 if (action == DELETE_ALL_THREADS ) process_delete_threads ( process , client_xp);1039 else if (action == BLOCK_ALL_THREADS ) process_block_threads ( process , client_xp);1040 else if (action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process 947 if (action == DELETE_ALL_THREADS ) process_delete_threads ( process ); 948 else if (action == BLOCK_ALL_THREADS ) process_block_threads ( process ); 949 else if (action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 1041 950 1042 951 // decrement the responses counter in RPC descriptor, -
trunk/kernel/kern/rpc.h
r428 r433 62 62 RPC_PMEM_GET_PAGES = 0, 63 63 RPC_PMEM_RELEASE_PAGES = 1, 64 RPC_ PROCESS_MAKE_EXEC= 2,64 RPC_UNDEFINED_2 = 2, 65 65 RPC_PROCESS_MAKE_FORK = 3, 66 RPC_ PROCESS_MAKE_EXIT= 4,66 RPC_UNDEFINED_4 = 4, 67 67 RPC_PROCESS_MAKE_KILL = 5, 68 68 RPC_THREAD_USER_CREATE = 6, … … 80 80 RPC_VFS_MAPPER_LOAD_ALL = 17, 81 81 RPC_FATFS_GET_CLUSTER = 18, 82 RPC_UNDEFINED_19 = 19, 82 83 83 84 RPC_VMM_GET_VSEG = 20, … … 210 211 211 212 /*********************************************************************************** 212 * [2] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing 213 * process descriptor in a remote server cluster. This server cluster must be 214 * the owner cluster for the existing process. The new process descriptor is 215 * initialized from informations found in the <exec_info> structure. 216 * A new main thread descriptor is created in the server cluster. 217 * All copies of the old process descriptor and all old threads are destroyed. 218 *********************************************************************************** 219 * @ cxy : server cluster identifier. 220 * @ process : [in] local pointer on the exec_info structure in client cluster. 221 * @ error : [out] error status (0 if success). 222 **********************************************************************************/ 223 void rpc_process_make_exec_client( cxy_t cxy, 224 struct exec_info_s * info, 225 error_t * error ); 226 227 void rpc_process_make_exec_server( xptr_t xp ); 213 * [2] undefined slot 214 **********************************************************************************/ 228 215 229 216 /*********************************************************************************** … … 251 238 252 239 /*********************************************************************************** 253 * [4] The RPC_PROCESS_MAKE_EXIT can be called by any thread to request the owner 254 * cluster to execute the process_make_exit() function for the target process. 255 *********************************************************************************** 256 * @ cxy : owner cluster identifier. 257 * @ pid : target process identifier. 258 * @ status : calling process exit status. 259 **********************************************************************************/ 260 void rpc_process_make_exit_client( cxy_t cxy, 261 pid_t pid, 262 uint32_t status ); 263 264 void rpc_process_make_exit_server( xptr_t xp ); 240 * [4] undefined slot 241 **********************************************************************************/ 265 242 266 243 /*********************************************************************************** … … 269 246 *********************************************************************************** 270 247 * @ cxy : owner cluster identifier. 271 * @ pid : target process identifier. 272 * @ seg_id : signal type (only SIGKILL / SIGSTOP / SIGCONT are supported). 248 * @ process : pointer on process in owner cluster. 249 * @ is_exit : true if called by sys_exit() / false if called by sys_kill() 250 * @ status : exit status (only when called by sys_exit() 273 251 **********************************************************************************/ 274 252 void rpc_process_make_kill_client( cxy_t cxy, 275 pid_t pid, 276 uint32_t seg_id ); 253 struct process_s * process, 254 bool_t is_exit, 255 uint32_t status ); 277 256 278 257 void rpc_process_make_kill_server( xptr_t xp ); … … 517 496 518 497 /*********************************************************************************** 498 * [19] undefined slot 499 **********************************************************************************/ 500 501 /*********************************************************************************** 519 502 * [20] The RPC_VMM_GET_VSEG returns an extended pointer 520 503 * on the vseg containing a given virtual address in a given process. -
trunk/kernel/kern/scheduler.c
r428 r433 178 178 179 179 /////////////////////////////////////////// 180 void sched_handle_ requests( core_t * core )180 void sched_handle_signals( core_t * core ) 181 181 { 182 182 list_entry_t * iter; … … 231 231 thread_destroy( thread ); 232 232 233 sched_dmsg("\n[DBG] %s : thread %x deleted thread %x / cycle %d\n", 234 __FUNCTION__ , CURRENT_THREAD , thread , (uint32_t)hal_get_cycles() ); 235 233 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS 234 uint32_t cycle = (uint32_t)hal_get_cycles(); 235 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 236 printk("\n[DBG] %s : thread %x deleted thread %x / cycle %d\n", 237 __FUNCTION__ , CURRENT_THREAD , thread , cycle ); 238 #endif 236 239 // destroy process descriptor if no more threads 237 240 if( process->th_nr == 0 ) … … 240 243 process_destroy( process ); 241 244 242 sched_dmsg("\n[DBG] %s : thread %x deleted process %x / cycle %d\n", 243 __FUNCTION__ , CURRENT_THREAD , process , (uint32_t)hal_get_cycles() ); 245 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS 246 cycle = (uint32_t)hal_get_cycles(); 247 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 248 printk("\n[DBG] %s : thread %x deleted process %x / cycle %d\n", 249 __FUNCTION__ , CURRENT_THREAD , process , cycle ); 250 #endif 244 251 245 252 } … … 251 258 spinlock_unlock( &sched->lock ); 252 259 253 } // end sched_handle_ requests()260 } // end sched_handle_signals() 254 261 255 262 //////////////////////////////// … … 261 268 scheduler_t * sched = &core->scheduler; 262 269 263 #if( CONFIG_SCHED_DEBUG & 0x1 ) 264 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( core->lid ); 270 #if (CONFIG_DEBUG_SCHED_YIELD & 0x1) 271 if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() ) 272 sched_display( core->lid ); 265 273 #endif 266 274 … … 291 299 { 292 300 293 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 301 #if CONFIG_DEBUG_SCHED_YIELD 302 uint32_t cycle = (uint32_t)hal_get_cycles(); 303 if( CONFIG_DEBUG_SCHED_YIELD < cycle ) 304 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 294 305 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 295 306 __FUNCTION__, local_cxy, core->lid, cause, 296 307 current, thread_type_str(current->type), current->process->pid, current->trdid, 297 next , thread_type_str(next->type) , next->process->pid , next->trdid,298 (uint32_t)hal_get_cycles() ); 308 next , thread_type_str(next->type) , next->process->pid , next->trdid , cycle ); 309 #endif 299 310 300 311 // update scheduler … … 316 327 { 317 328 318 #if( CONFIG_ SCHED_DEBUG& 0x1 )319 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) 320 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 321 "thread %x (%s) (%x,%x) continue / cycle %d\n",329 #if( CONFIG_DEBUG_SCHED_YIELD & 0x1 ) 330 uint32_t cycle = (uint32_t)hal_get_cycles(); 331 if( CONFIG_DEBUG_SCHED_YIELD < cycle ) 332 printk("\n[DBG] %s : core[%x,%d] / cause = %s / thread %x (%s) (%x,%x) continue / cycle %d\n", 322 333 __FUNCTION__, local_cxy, core->lid, cause, 323 current, thread_type_str(current->type), current->process->pid, current->trdid, 324 (uint32_t)hal_get_cycles() ); 334 current, thread_type_str(current->type), current->process->pid, current->trdid, cycle ); 325 335 #endif 326 336 … … 328 338 329 339 // handle pending requests for all threads executing on this core. 330 sched_handle_ requests( core );340 sched_handle_signals( core ); 331 341 332 342 // exit critical section / restore SR from next thread context -
trunk/kernel/kern/scheduler.h
r428 r433 91 91 * @ core : local pointer on the core descriptor. 92 92 ********************************************************************************************/ 93 void sched_handle_ requests( struct core_s * core );93 void sched_handle_signals( struct core_s * core ); 94 94 95 95 /********************************************************************************************* -
trunk/kernel/kern/thread.c
r428 r433 227 227 assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" ); 228 228 229 thread_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x\n", 230 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid ); 229 #if CONFIG_DEBUG_THREAD_USER_CREATE 230 uint32_t cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle ) 232 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 233 __FUNCTION__, CURRENT_THREAD, pid , cycle ); 234 #endif 231 235 232 236 // get process descriptor local copy 233 237 process = process_get_local_copy( pid ); 234 235 238 if( process == NULL ) 236 239 { … … 326 329 dqdt_local_update_threads( 1 ); 327 330 328 thread_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / trdid = %x / core = %d\n", 329 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, thread->trdid, core_lid ); 331 #if CONFIG_DEBUG_THREAD_USER_CREATE 332 cycle = (uint32_t)hal_get_cycles(); 333 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle ) 334 printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n", 335 __FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle ); 336 #endif 330 337 331 338 *new_thread = thread; … … 359 366 vseg_t * vseg; // child thread STACK vseg 360 367 361 thread_dmsg("\n[DBG] %s : core[%x,%d] enters at cycle %d\n", 362 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() ); 368 #if CONFIG_DEBUG_THREAD_USER_FORK 369 uint32_t cycle = (uint32_t)hal_get_cycles(); 370 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle ) 371 printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n", 372 __FUNCTION__, CURRENT_THREAD, child_process->pid, cycle ); 373 #endif 363 374 364 375 // select a target core in local cluster … … 474 485 } 475 486 476 // increment p age descriptor fork_nr for the referencedpage if mapped487 // increment pending forks counter for the page if mapped 477 488 if( mapped ) 478 489 { … … 480 491 cxy_t page_cxy = GET_CXY( page_xp ); 481 492 page_t * page_ptr = (page_t *)GET_PTR( page_xp ); 482 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 ); 483 484 thread_dmsg("\n[DBG] %s : core[%x,%d] copied PTE to child GPT : vpn %x\n", 485 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 493 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 494 495 #if (CONFIG_DEBUG_THREAD_USER_FORK & 1) 496 cycle = (uint32_t)hal_get_cycles(); 497 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle ) 498 printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n", 499 __FUNCTION__, CURRENT_THREAD, vpn ); 500 #endif 486 501 487 502 } 488 503 } 489 504 490 // set COW flag for STAK vseg in parent thread GPT 491 hal_gpt_flip_cow( true, // set cow 492 parent_gpt_xp, 493 vpn_base, 494 vpn_size ); 505 // set COW flag for all mapped entries of STAK vseg in parent thread GPT 506 hal_gpt_set_cow( parent_gpt_xp, 507 vpn_base, 508 vpn_size ); 495 509 496 510 // update DQDT for child thread 497 511 dqdt_local_update_threads( 1 ); 498 512 499 thread_dmsg("\n[DBG] %s : core[%x,%d] exit / created main thread %x for process %x\n", 500 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, child_ptr->trdid, child_process->pid ); 513 #if CONFIG_DEBUG_THREAD_USER_FORK 514 cycle = (uint32_t)hal_get_cycles(); 515 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle ) 516 printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n", 517 __FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle ); 518 #endif 501 519 502 520 return 0; … … 514 532 thread_t * thread; // pointer on new thread descriptor 515 533 516 thread_dmsg("\n[DBG] %s : core[%x,%d] enters / type %s / cycle %d\n",517 __FUNCTION__ , local_cxy , core_lid , thread_type_str( type ) , hal_time_stamp() );518 519 534 assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) , 520 535 __FUNCTION__ , "illegal thread type" ); … … 522 537 assert( (core_lid < LOCAL_CLUSTER->cores_nr) , 523 538 __FUNCTION__ , "illegal core_lid" ); 539 540 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE 541 uint32_t cycle = (uint32_t)hal_get_cycles(); 542 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle ) 543 printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n", 544 __FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle ); 545 #endif 524 546 525 547 // allocate memory for new thread descriptor … … 549 571 dqdt_local_update_threads( 1 ); 550 572 551 thread_dmsg("\n[DBG] %s : core = [%x,%d] exit / trdid = %x / type %s / cycle %d\n", 552 __FUNCTION__, local_cxy, core_lid, thread->trdid, thread_type_str(type), hal_time_stamp() ); 573 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE 574 cycle = (uint32_t)hal_get_cycles(); 575 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle ) 576 printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n", 577 __FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle ); 578 #endif 553 579 554 580 *new_thread = thread; … … 589 615 void thread_destroy( thread_t * thread ) 590 616 { 591 uint32_t tm_start;592 uint32_t tm_end;593 617 reg_t save_sr; 594 618 … … 596 620 core_t * core = thread->core; 597 621 598 thread_dmsg("\n[DBG] %s : enters for thread %x in process %x / type = %s\n", 599 __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) ); 622 #if CONFIG_DEBUG_THREAD_DESTROY 623 uint32_t cycle = (uint32_t)hal_get_cycles(); 624 if( CONFIG_DEBUG_THREAD_DESTROY < cycle ) 625 printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n", 626 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); 627 #endif 600 628 601 629 assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" ); … … 604 632 605 633 assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" ); 606 607 tm_start = hal_get_cycles();608 634 609 635 // update intrumentation values … … 635 661 thread_release( thread ); 636 662 637 tm_end = hal_get_cycles(); 638 639 thread_dmsg("\n[DBG] %s : exit for thread %x in process %x / duration = %d\n", 640 __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start ); 663 #if CONFIG_DEBUG_THREAD_DESTROY 664 cycle = (uint32_t)hal_get_cycles(); 665 if( CONFIG_DEBUG_THREAD_DESTROY < cycle ) 666 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n", 667 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); 668 #endif 641 669 642 670 } // end thread_destroy() … … 779 807 hal_fence(); 780 808 809 #if CONFIG_DEBUG_THREAD_BLOCK 810 uint32_t cycle = (uint32_t)hal_get_cycles(); 811 if( CONFIG_DEBUG_THREAD_BLOCK < cycle ) 812 printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / state %x / cycle %d\n", 813 __FUNCTION__ , CURRENT_THREAD , thread , cause , thread->blocked , cycle ); 814 #endif 815 781 816 } // end thread_block() 782 817 783 ///////////////////////////////////////// 784 uint32_t thread_unblock( xptr_t thread ,818 //////////////////////////////////////////// 819 uint32_t thread_unblock( xptr_t thread_xp, 785 820 uint32_t cause ) 786 821 { 787 822 // get thread cluster and local pointer 788 cxy_t cxy = GET_CXY( thread );789 thread_t * ptr = (thread_t *)GET_PTR( thread);823 cxy_t cxy = GET_CXY( thread_xp ); 824 thread_t * ptr = GET_PTR( thread_xp ); 790 825 791 826 // reset blocking cause … … 793 828 hal_fence(); 794 829 830 #if CONFIG_DEBUG_THREAD_BLOCK 831 uint32_t cycle = (uint32_t)hal_get_cycles(); 832 if( CONFIG_DEBUG_THREAD_BLOCK < cycle ) 833 printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / state %x / cycle %d\n", 834 __FUNCTION__ , CURRENT_THREAD , ptr , cause , ptr->blocked , cycle ); 835 #endif 836 795 837 // return a non zero value if the cause bit is modified 796 838 return( previous & cause ); … … 805 847 thread_t * killer = CURRENT_THREAD; 806 848 807 thread_dmsg("\n[DBG] %s : killer thread %x enter for target thread %x\n", 808 __FUNCTION__, local_cxy, killer->trdid , target->trdid ); 849 #if CONFIG_DEBUG_THREAD_KILL 850 uint32_t cycle = (uint32_t)hal_get_cycles; 851 if( CONFIG_DEBUG_THREAD_KILL < cycle ) 852 printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n", 853 __FUNCTION__, killer, target, cycle ); 854 #endif 809 855 810 856 // set the global blocked bit in target thread descriptor. … … 835 881 hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE ); 836 882 837 thread_dmsg("\n[DBG] %s : killer thread %x exit for target thread %x\n", 838 __FUNCTION__, local_cxy, killer->trdid , target->trdid ); 883 #if CONFIG_DEBUG_THREAD_KILL 884 cycle = (uint32_t)hal_get_cycles; 885 if( CONFIG_DEBUG_THREAD_KILL < cycle ) 886 printk("\n[DBG] %s : thread %x exit for target thread %x / cycle %d\n", 887 __FUNCTION__, killer, target, cycle ); 888 #endif 839 889 840 890 } // end thread_kill() … … 851 901 { 852 902 853 idle_dmsg("\n[DBG] %s : core[%x][%d] goes to sleep at cycle %d\n", 854 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() ); 903 #if CONFIG_DEBUG_THREAD_IDLE 904 uint32_t cycle = (uint32_t)hal_get_cycles; 905 thread_t * this = CURRENT_THREAD; 906 if( CONFIG_DEBUG_THREAD_IDLE < cycle ) 907 printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n", 908 __FUNCTION__, this, local_cxy, this->core->lid, cycle ); 909 #endif 855 910 856 911 hal_core_sleep(); 857 912 858 idle_dmsg("\n[DBG] %s : core[%x][%d] wake up at cycle %d\n", 859 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() ); 913 #if CONFIG_DEBUG_THREAD_IDLE 914 cycle = (uint32_t)hal_get_cycles; 915 if( CONFIG_DEBUG_THREAD_IDLE < cycle ) 916 printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n", 917 __FUNCTION__, this, local_cxy, this->core->lid, cycle ); 918 #endif 860 919 861 920 } -
trunk/kernel/libk/elf.c
r407 r433 201 201 vfs_file_count_up( file_xp ); 202 202 203 elf_dmsg("\n[DBG] %s : found %s vseg / base = %x / size = %x\n" 204 " file_size = %x / file_offset = %x / mapper_xp = %l\n", 205 __FUNCTION__ , vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min , 206 vseg->file_size , vseg->file_offset , vseg->mapper_xp ); 203 #if CONFIG_DEBUG_ELF_LOAD 204 uint32_t cycle = (uint32_t)hal_get_cycles(); 205 if( CONFIG_DEBUG_ELF_LOAD < cycle ) 206 printk("\n[DBG] %s : found %s vseg / base %x / size %x\n" 207 " file_size %x / file_offset %x / mapper_xp %l / cycle %d\n", 208 __FUNCTION__ , vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min , 209 vseg->file_size , vseg->file_offset , vseg->mapper_xp ); 210 #endif 211 207 212 } 208 213 … … 223 228 error_t error; 224 229 225 elf_dmsg("\n[DBG] %s : core[%x,%d] enter for <%s>\n", 226 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pathname ); 230 #if CONFIG_DEBUG_ELF_LOAD 231 uint32_t cycle = (uint32_t)hal_get_cycles(); 232 if( CONFIG_DEBUG_ELF_LOAD < cycle ) 233 printk("\n[DBG] %s : thread %d enter for <%s> / cycle %d\n", 234 __FUNCTION__, CURRENT_THREAD, pathname, cycle ); 235 #endif 227 236 228 237 // avoid GCC warning … … 243 252 } 244 253 245 elf_dmsg("\n[DBG] %s : open file <%s>\n", __FUNCTION__ , pathname ); 254 #if (CONFIG_DEBUG_ELF_LOAD & 1) 255 if( CONFIG_DEBUG_ELF_LOAD < cycle ) 256 printk("\n[DBG] %s : open file <%s>\n", __FUNCTION__, pathname ); 257 #endif 246 258 247 259 // load header in local buffer … … 256 268 } 257 269 258 elf_dmsg("\n[DBG] %s : loaded elf header for %s\n", __FUNCTION__ , pathname ); 270 #if (CONFIG_DEBUG_ELF_LOAD & 1) 271 if( CONFIG_DEBUG_ELF_LOAD < cycle ) 272 printk("\n[DBG] %s : loaded elf header for %s\n", __FUNCTION__ , pathname ); 273 #endif 259 274 260 275 if( header.e_phnum == 0 ) … … 293 308 } 294 309 295 elf_dmsg("\n[DBG] %s : segments array allocated for %s\n", __FUNCTION__ , pathname ); 310 #if (CONFIG_DEBUG_ELF_LOAD & 1) 311 if( CONFIG_DEBUG_ELF_LOAD < cycle ) 312 printk("\n[DBG] %s : segments array allocated for %s\n", __FUNCTION__ , pathname ); 313 #endif 296 314 297 315 // load seg descriptors array to local buffer … … 310 328 } 311 329 312 elf_dmsg("\n[DBG] %s loaded segments descriptors for %s \n", __FUNCTION__ , pathname ); 330 #if (CONFIG_DEBUG_ELF_LOAD & 1) 331 if( CONFIG_DEBUG_ELF_LOAD < cycle ) 332 printk("\n[DBG] %s loaded segments descriptors for %s \n", __FUNCTION__ , pathname ); 333 #endif 313 334 314 335 // register loadable segments in process VMM … … 335 356 kmem_free(&req); 336 357 337 elf_dmsg("\n[DBG] %s : core[%x,%d] exit for <%s> / entry_point = %x\n", 338 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pathname , header.e_entry ); 358 #if CONFIG_DEBUG_ELF_LOAD 359 cycle = (uint32_t)hal_get_cycles(); 360 if( CONFIG_DEBUG_ELF_LOAD < cycle ) 361 printk("\n[DBG] %s : thread %d exit for <%s> / entry_point %x / cycle %d\n", 362 __FUNCTION__, CURRENT_THREAD, pathname, header.e_entry, cycle ); 363 #endif 339 364 340 365 return 0; -
trunk/kernel/libk/remote_rwlock.c
r423 r433 41 41 hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count ) , 0 ); 42 42 43 #if CONFIG_ LOCKS_DEBUG43 #if CONFIG_DEBUG_LOCKS 44 44 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 45 45 xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) ); … … 86 86 thread_ptr->remote_locks++; 87 87 88 #if CONFIG_ LOCKS_DEBUG88 #if CONFIG_DEBUG_LOCKS 89 89 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , 90 90 XPTR( lock_cxy , &lock_ptr->list ) ); … … 126 126 thread_ptr->remote_locks--; 127 127 128 #if CONFIG_ LOCKS_DEBUG128 #if CONFIG_DEBUG_LOCKS 129 129 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); 130 130 #endif … … 176 176 } 177 177 178 #if CONFIG_ LOCKS_DEBUG178 #if CONFIG_DEBUG_LOCKS 179 179 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 180 180 XPTR( local_cxy , thread_ptr ) ); -
trunk/kernel/libk/remote_spinlock.c
r423 r433 39 39 hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 ); 40 40 41 #if CONFIG_ LOCKS_DEBUG41 #if CONFIG_DEBUG_LOCKS 42 42 hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL ); 43 43 xlist_entry_init( XPTR( cxy , &ptr->list ) ); … … 76 76 thread_ptr->remote_locks++; 77 77 78 #if CONFIG_ LOCKS_DEBUG78 #if CONFIG_DEBUG_LOCKS 79 79 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 80 80 XPTR( local_cxy , thread_ptr) ); … … 121 121 thread_ptr->remote_locks++; 122 122 123 #if CONFIG_ LOCKS_DEBUG123 #if CONFIG_DEBUG_LOCKS 124 124 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 125 125 XPTR( local_cxy , thread_ptr) ); … … 144 144 thread_t * thread_ptr = CURRENT_THREAD; 145 145 146 #if CONFIG_ LOCKS_DEBUG146 #if CONFIG_DEBUG_LOCKS 147 147 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 148 148 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); … … 197 197 thread_ptr->remote_locks++; 198 198 199 #if CONFIG_ LOCKS_DEBUG199 #if CONFIG_DEBUG_LOCKS 200 200 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 201 201 XPTR( local_cxy , thread_ptr) ); … … 218 218 thread_t * thread_ptr = CURRENT_THREAD; 219 219 220 #if CONFIG_ LOCKS_DEBUG220 #if CONFIG_DEBUG_LOCKS 221 221 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 222 222 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); -
trunk/kernel/libk/remote_spinlock.h
r409 r433 72 72 * This function releases a remote busy_waiting spinlock. 73 73 * It restores the CPU SR state. 74 * It decrements the calling thread locks count. 74 75 ******************************************************************************************* 75 76 * @ lock_xp : extended pointer on remote spinlock. … … 100 101 /*************************************************************************************** 101 102 * This function releases a remote spinlock. 103 * It decrements the calling thread locks count. 102 104 *************************************************************************************** 103 105 * @ lock_xp : extended pointer on the remote spinlock -
trunk/kernel/libk/rwlock.c
r409 r433 38 38 lock->count = 0; 39 39 40 #if CONFIG_ LOCKS_DEBUG40 #if CONFIG_DEBUG_LOCKS 41 41 lock->owner = NULL; 42 42 list_entry_init( &lock->list ); … … 70 70 this->local_locks++; 71 71 72 #if CONFIG_ LOCKS_DEBUG72 #if CONFIG_DEBUG_LOCKS 73 73 list_add_first( &this->locks_root , &lock->list ); 74 74 #endif … … 98 98 this->local_locks--; 99 99 100 #if CONFIG_ LOCKS_DEBUG100 #if CONFIG_DEBUG_LOCKS 101 101 list_unlink( &lock->list ); 102 102 #endif … … 138 138 this->local_locks++; 139 139 140 #if CONFIG_ LOCKS_DEBUG140 #if CONFIG_DEBUG_LOCKS 141 141 lock->owner = this; 142 142 list_add_first( &this->locks_root , &lock->list ); … … 157 157 hal_disable_irq( &mode ); 158 158 159 #if CONFIG_ LOCKS_DEBUG159 #if CONFIG_DEBUG_LOCKS 160 160 lock->owner = NULL; 161 161 list_unlink( &lock->list ); -
trunk/kernel/libk/spinlock.c
r409 r433 38 38 lock->taken = 0; 39 39 40 #if CONFIG_ LOCKS_DEBUG40 #if CONFIG_DEBUG_LOCKS 41 41 lock->owner = NULL; 42 42 list_entry_init( &lock->list ); … … 71 71 this->local_locks++; 72 72 73 #if CONFIG_ LOCKS_DEBUG73 #if CONFIG_DEBUG_LOCKS 74 74 lock->owner = this; 75 75 list_add_first( &this->locks_root , &lock->list ); … … 86 86 thread_t * this = CURRENT_THREAD;; 87 87 88 #if CONFIG_ LOCKS_DEBUG88 #if CONFIG_DEBUG_LOCKS 89 89 lock->owner = NULL; 90 90 list_unlink( &lock->list ); … … 132 132 this->local_locks++; 133 133 134 #if CONFIG_ LOCKS_DEBUG134 #if CONFIG_DEBUG_LOCKS 135 135 lock->owner = this; 136 136 list_add_first( &this->locks_root , &lock->list ); … … 162 162 this->local_locks++; 163 163 164 #if CONFIG_ LOCKS_DEBUG164 #if CONFIG_DEBUG_LOCKS 165 165 lock->owner = this; 166 166 list_add_first( &this->locks_root , &lock->list ); … … 177 177 thread_t * this = CURRENT_THREAD; 178 178 179 #if CONFIG_ LOCKS_DEBUG179 #if CONFIG_DEBUG_LOCKS 180 180 lock->owner = NULL; 181 181 list_unlink( &lock->list ); -
trunk/kernel/mm/kcm.c
r407 r433 47 47 kcm_page_t * kcm_page ) 48 48 { 49 kcm_dmsg("\n[DBG] %s : enters for %s / page %x / count = %d / active = %d\n", 50 __FUNCTION__ , kmem_type_str( kcm->type ) , 51 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); 49 50 #if CONFIG_DEBUG_KCM_ALLOC 51 uint32_t cycle = (uint32_t)hal_get_cycles(); 52 if( CONFIG_DEBUG_KCM_ALLOC < cycle ) 53 printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n", 54 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , 55 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); 56 #endif 52 57 53 58 assert( kcm_page->active , __FUNCTION__ , "kcm_page should be active" ); … … 80 85 + (index * kcm->block_size) ); 81 86 82 kcm_dmsg("\n[DBG] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n", 83 __FUNCTION__ , kmem_type_str( kcm->type ) , ptr , 84 (intptr_t)kcm_page , kcm_page->count ); 87 #if CONFIG_DEBUG_KCM_ALLOC 88 cycle = (uint32_t)hal_get_cycles(); 89 if( CONFIG_DEBUG_KCM_ALLOC < cycle ) 90 printk("\n[DBG] %s : thread %x exit / type %s / ptr %p / page %x / count %d\n", 91 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr , 92 (intptr_t)kcm_page , kcm_page->count ); 93 #endif 85 94 86 95 return ptr; … … 300 309 kcm->active_pages_nr ++; 301 310 kcm_page->active = 1; 302 303 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",304 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() ,305 (intptr_t)kcm_page , kcm_page->count );306 307 311 } 308 312 else // get first page from active list … … 310 314 // get page pointer from active list 311 315 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 312 313 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / page = %x / count = %d\n",314 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() ,315 (intptr_t)kcm_page , kcm_page->count );316 316 } 317 317 -
trunk/kernel/mm/kmem.c
r429 r433 198 198 if( type == KMEM_PAGE ) // PPM allocator 199 199 { 200 201 #if CONFIG_DEBUG_KMEM_ALLOC 202 if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() ) 203 printk("\n[DBG] in %s : thread %x enter for %d page(s)\n", 204 __FUNCTION__ , CURRENT_THREAD , 1<<size ); 205 #endif 206 200 207 // allocate the number of requested pages 201 208 ptr = (void *)ppm_alloc_pages( size ); … … 213 220 __FUNCTION__, local_cxy , kmem_type_str( type ) , 214 221 (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) ); 222 223 #if CONFIG_DEBUG_KMEM_ALLOC 224 if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() ) 225 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x\n", 226 __FUNCTION__ , CURRENT_THREAD , 1<<size , ppm_page2ppn( XPTR( local_cxy , ptr ) ) ); 227 #endif 228 215 229 } 216 230 else if( type == KMEM_GENERIC ) // KHM allocator -
trunk/kernel/mm/page.c
r408 r433 47 47 page->index = 0; 48 48 page->refcount = 0; 49 page->fork _nr= 0;49 page->forks = 0; 50 50 51 51 spinlock_init( &page->lock ); -
trunk/kernel/mm/page.h
r408 r433 56 56 * This structure defines a physical page descriptor. 57 57 * Size is 64 bytes for a 32 bits core... 58 * TODO : the list of waiting threads seems to be unused [AG] 59 $ TODO : the spinlock use has to be clarified [AG] 58 60 ************************************************************************************/ 59 61 … … 67 69 xlist_entry_t wait_root; /*! root of list of waiting threads (16) */ 68 70 uint32_t refcount; /*! reference counter (4) */ 69 uint32_t fork _nr;/*! number of pending forks (4) */70 spinlock_t lock; /*! only used to set the PG_LOCKED flag(16) */71 uint32_t forks; /*! number of pending forks (4) */ 72 spinlock_t lock; /*! To Be Defined [AG] (16) */ 71 73 } 72 74 page_t; -
trunk/kernel/mm/ppm.c
r407 r433 193 193 list_add_first( &ppm->free_pages_root[current_order] , ¤t->list ); 194 194 ppm->free_pages_nr[current_order] ++; 195 } 195 196 } // end ppm_free_pages_nolock() 196 197 197 198 //////////////////////////////////////////// … … 201 202 page_t * remaining_block; 202 203 uint32_t current_size; 204 205 #if CONFIG_DEBUG_PPM_ALLOC_PAGES 206 uint32_t cycle = (uint32_t)hal_get_cycles(); 207 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle ) 208 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 209 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle ); 210 #endif 211 212 #if(CONFIG_DEBUG_PPM_ALLOC_PAGES & 0x1) 213 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle ) 214 ppm_print(); 215 #endif 203 216 204 217 ppm_t * ppm = &LOCAL_CLUSTER->ppm; … … 208 221 209 222 page_t * block = NULL; 210 211 ppm_dmsg("\n[DBG] %s : enters / order = %d\n",212 __FUNCTION__ , order );213 223 214 224 // take lock protecting free lists … … 231 241 spinlock_unlock( &ppm->free_lock ); 232 242 243 #if CONFIG_DEBUG_PPM_ALLOC_PAGES 244 cycle = (uint32_t)hal_get_cycles(); 245 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle ) 246 printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n", 247 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle ); 248 #endif 249 233 250 return NULL; 234 251 } … … 260 277 spinlock_unlock( &ppm->free_lock ); 261 278 262 ppm_dmsg("\n[DBG] %s : base = %x / order = %d\n", 263 __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order ); 279 #if CONFIG_DEBUG_PPM_ALLOC_PAGES 280 cycle = (uint32_t)hal_get_cycles(); 281 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle ) 282 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n", 283 __FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle ); 284 #endif 264 285 265 286 return block; 266 } 287 288 } // end ppm_alloc_pages() 267 289 268 290 … … 272 294 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 273 295 296 #if CONFIG_DEBUG_PPM_FREE_PAGES 297 uint32_t cycle = (uint32_t)hal_get_cycles(); 298 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle ) 299 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 300 __FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle ); 301 #endif 302 303 #if(CONFIG_DEBUG_PPM_FREE_PAGES & 0x1) 304 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle ) 305 ppm_print(); 306 #endif 307 274 308 // get lock protecting free_pages[] array 275 309 spinlock_lock( &ppm->free_lock ); … … 279 313 // release lock protecting free_pages[] array 280 314 spinlock_unlock( &ppm->free_lock ); 315 316 #if CONFIG_DEBUG_PPM_FREE_PAGES 317 cycle = (uint32_t)hal_get_cycles(); 318 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle ) 319 printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n", 320 __FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 321 #endif 322 281 323 } 282 324 283 //////////////////////////// 284 void ppm_print( ppm_t * ppm, 285 char * string ) 325 //////////////// 326 void ppm_print() 286 327 { 287 328 uint32_t order; … … 289 330 page_t * page; 290 331 332 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 333 291 334 // get lock protecting free lists 292 335 spinlock_lock( &ppm->free_lock ); 293 336 294 printk("\n*** PPM in cluster %x : %d pages / &pages_tbl = %x / vaddr_base = %x ***\n", 295 local_cxy , ppm->pages_nr , (intptr_t)ppm->pages_tbl , (intptr_t)ppm->vaddr_base ); 337 printk("\n*** PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr ); 296 338 297 339 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) 298 340 { 299 printk("- order = %d / free_pages = %d [",341 printk("- order = %d / free_pages = %d\t: ", 300 342 order , ppm->free_pages_nr[order] ); 301 343 … … 303 345 { 304 346 page = LIST_ELEMENT( iter , page_t , list ); 305 printk("% d," , page - ppm->pages_tbl );347 printk("%x," , page - ppm->pages_tbl ); 306 348 } 307 349 308 printk(" ]\n", NULL);350 printk("\n"); 309 351 } 310 352 -
trunk/kernel/mm/ppm.h
r409 r433 52 52 * from the "kernel_heap" section. 53 53 * This low-level allocator implements the buddy algorithm: an allocated block is 54 * an integer number n of 4 Kbytespages, and n (called order) is a power of 2.54 * an integer number n of 4 small pages, and n (called order) is a power of 2. 55 55 ****************************************************************************************/ 56 56 … … 163 163 164 164 /***************************************************************************************** 165 * This function prints the PPM allocator status. 166 ***************************************************************************************** 167 * @ ppm : pointer on PPM allocator. 168 * @ string : define context of display. 165 * This function prints the PPM allocator status in the calling thread cluster. 169 166 ****************************************************************************************/ 170 void ppm_print( ppm_t * ppm, 171 char * string ); 167 void ppm_print(); 172 168 173 169 /***************************************************************************************** -
trunk/kernel/mm/vmm.c
r429 r433 63 63 intptr_t size; 64 64 65 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n", 66 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 65 #if CONFIG_DEBUG_VMM_INIT 66 uint32_t cycle = (uint32_t)hal_get_cycles(); 67 if( CONFIG_DEBUG_VMM_INIT ) 68 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 69 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 70 #endif 67 71 68 72 // get pointer on VMM … … 179 183 hal_fence(); 180 184 181 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / entry_point = %x\n", 182 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 183 process->pid , process->vmm.entry_point ); 185 #if CONFIG_DEBUG_VMM_INIT 186 cycle = (uint32_t)hal_get_cycles(); 187 if( CONFIG_DEBUG_VMM_INIT ) 188 printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n", 189 __FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle ); 190 #endif 184 191 185 192 return 0; … … 211 218 { 212 219 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 213 vseg = (vseg_t *)GET_PTR( vseg_xp );220 vseg = GET_PTR( vseg_xp ); 214 221 215 222 printk(" - %s : base = %X / size = %X / npages = %d\n", … … 239 246 } // vmm_display() 240 247 241 /////////////////////i//////////////////// 242 void vmm_ update_pte( process_t * process,243 vpn_t vpn,244 uint32_t attr,245 ppn_t ppn )248 /////////////////////i////////////////////////// 249 void vmm_global_update_pte( process_t * process, 250 vpn_t vpn, 251 uint32_t attr, 252 ppn_t ppn ) 246 253 { 247 254 … … 258 265 cxy_t owner_cxy; 259 266 lpid_t owner_lpid; 267 268 #if CONFIG_DEBUG_VMM_UPDATE_PTE 269 uint32_t cycle = (uint32_t)hal_get_cycles(); 270 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle ) 271 printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n", 272 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); 273 #endif 274 275 // check cluster is reference 276 assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__, 277 "not called in reference cluster\n"); 260 278 261 279 // get extended pointer on root of process copies xlist in owner cluster … … 271 289 // get cluster and local pointer on remote process 272 290 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 273 remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );291 remote_process_ptr = GET_PTR( remote_process_xp ); 274 292 remote_process_cxy = GET_CXY( remote_process_xp ); 293 294 #if (CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1) 295 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle ) 296 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 297 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); 298 #endif 275 299 276 300 // get extended pointer on remote gpt 277 301 remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); 278 302 279 hal_gpt_update_pte( remote_gpt_xp, 280 vpn, 281 attr, 282 ppn ); 303 // update remote GPT 304 hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn ); 283 305 } 284 } // end vmm_update_pte() 306 307 #if CONFIG_DEBUG_VMM_UPDATE_PTE 308 cycle = (uint32_t)hal_get_cycles(); 309 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle ) 310 printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n", 311 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); 312 #endif 313 314 } // end vmm_global_update_pte() 285 315 286 316 /////////////////////////////////////// … … 308 338 lpid_t owner_lpid; 309 339 310 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n", 311 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 340 #if CONFIG_DEBUG_VMM_SET_COW 341 uint32_t cycle = (uint32_t)hal_get_cycles(); 342 if( CONFIG_DEBUG_VMM_SET_COW < cycle ) 343 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 344 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 345 #endif 312 346 313 347 // check cluster is reference … … 333 367 // get cluster and local pointer on remote process 334 368 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 335 remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );369 remote_process_ptr = GET_PTR( remote_process_xp ); 336 370 remote_process_cxy = GET_CXY( remote_process_xp ); 337 371 338 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling process %x in cluster %x\n", 339 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid , remote_process_cxy ); 372 #if (CONFIG_DEBUG_VMM_SET_COW &0x1) 373 if( CONFIG_DEBUG_VMM_SET_COW < cycle ) 374 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 375 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); 376 #endif 340 377 341 378 // get extended pointer on remote gpt … … 347 384 // get pointer on vseg 348 385 vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); 349 vseg = (vseg_t *)GET_PTR( vseg_xp );386 vseg = GET_PTR( vseg_xp ); 350 387 351 388 assert( (GET_CXY( vseg_xp ) == local_cxy) , __FUNCTION__, … … 357 394 vpn_t vpn_size = vseg->vpn_size; 358 395 359 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling vseg %s / vpn_base = %x / vpn_size = %x\n", 360 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vseg_type_str(type), vpn_base, vpn_size ); 361 362 // set COW flag on the remote GPT depending on vseg type 396 #if (CONFIG_DEBUG_VMM_SET_COW & 0x1) 397 if( CONFIG_DEBUG_VMM_SET_COW < cycle ) 398 printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n", 399 __FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size ); 400 #endif 401 // only DATA, ANON and REMOTE vsegs 363 402 if( (type == VSEG_TYPE_DATA) || 364 403 (type == VSEG_TYPE_ANON) || 365 404 (type == VSEG_TYPE_REMOTE) ) 366 405 { 367 hal_gpt_flip_cow( true, // set_cow 368 remote_gpt_xp, 369 vpn_base, 370 vpn_size ); 371 } 372 } // en loop on vsegs 406 vpn_t vpn; 407 uint32_t attr; 408 ppn_t ppn; 409 xptr_t page_xp; 410 cxy_t page_cxy; 411 page_t * page_ptr; 412 xptr_t forks_xp; 413 414 // update flags in remote GPT 415 hal_gpt_set_cow( remote_gpt_xp, 416 vpn_base, 417 vpn_size ); 418 419 // atomically increment pending forks counter in physical pages, 420 // for all vseg pages that are mapped in reference cluster 421 if( remote_process_cxy == local_cxy ) 422 { 423 // the reference GPT is the local GPT 424 gpt_t * gpt = GET_PTR( remote_gpt_xp ); 425 426 // scan all pages in vseg 427 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 428 { 429 // get page attributes and PPN from reference GPT 430 hal_gpt_get_pte( gpt , vpn , &attr , &ppn ); 431 432 // atomically update pending forks counter if page is mapped 433 if( attr & GPT_MAPPED ) 434 { 435 page_xp = ppm_ppn2page( ppn ); 436 page_cxy = GET_CXY( page_xp ); 437 page_ptr = GET_PTR( page_xp ); 438 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 439 hal_remote_atomic_add( forks_xp , 1 ); 440 } 441 } // end loop on vpn 442 } // end if local 443 } // end if vseg type 444 } // end loop on vsegs 373 445 } // end loop on process copies 374 446 375 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 376 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 447 #if CONFIG_DEBUG_VMM_SET_COW 448 cycle = (uint32_t)hal_get_cycles(); 449 if( CONFIG_DEBUG_VMM_SET_COW < cycle ) 450 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 451 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 452 #endif 377 453 378 454 } // end vmm_set-cow() … … 404 480 ppn_t ppn; 405 481 406 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n", 407 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 482 #if CONFIG_DEBUG_VMM_FORK_COPY 483 uint32_t cycle = (uint32_t)hal_get_cycles(); 484 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 485 printk("\n[DBG] %s : thread %x enter / cycle %d\n", 486 __FUNCTION__ , CURRENT_THREAD, cycle ); 487 #endif 408 488 409 489 // get parent process cluster and local pointer 410 490 parent_cxy = GET_CXY( parent_process_xp ); 411 parent_process = (process_t *)GET_PTR( parent_process_xp );491 parent_process = GET_PTR( parent_process_xp ); 412 492 413 493 // get local pointers on parent and child VMM … … 445 525 // get local and extended pointers on current parent vseg 446 526 parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 447 parent_vseg = (vseg_t *)GET_PTR( parent_vseg_xp );527 parent_vseg = GET_PTR( parent_vseg_xp ); 448 528 449 529 // get vseg type 450 530 type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) ); 451 531 452 453 vmm_dmsg("\n[DBG] %s : core[%x,%d] found parent vseg %s / vpn_base = %x\n", 454 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type), 455 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) ); 532 #if CONFIG_DEBUG_VMM_FORK_COPY 533 cycle = (uint32_t)hal_get_cycles(); 534 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 535 printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n", 536 __FUNCTION__ , CURRENT_THREAD, vseg_type_str(type), 537 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 538 #endif 456 539 457 540 // all parent vsegs - but STACK - must be copied in child VSL … … 473 556 vseg_attach( child_vmm , child_vseg ); 474 557 475 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child VSL : vseg %s / vpn_base = %x\n", 476 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type), 477 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) ); 558 #if CONFIG_DEBUG_VMM_FORK_COPY 559 cycle = (uint32_t)hal_get_cycles(); 560 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 561 printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", 562 __FUNCTION__ , CURRENT_THREAD , vseg_type_str(type), 563 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 564 #endif 478 565 479 566 // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT … … 502 589 } 503 590 504 // increment p age descriptor fork_nr for the referencedpage if mapped591 // increment pending forks counter in page if mapped 505 592 if( mapped ) 506 593 { 507 594 page_xp = ppm_ppn2page( ppn ); 508 595 page_cxy = GET_CXY( page_xp ); 509 page_ptr = (page_t *)GET_PTR( page_xp ); 510 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 ); 511 512 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child GPT : vpn %x\n", 513 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 596 page_ptr = GET_PTR( page_xp ); 597 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 598 599 #if CONFIG_DEBUG_VMM_FORK_COPY 600 cycle = (uint32_t)hal_get_cycles(); 601 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 602 printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n", 603 __FUNCTION__ , CURRENT_THREAD , vpn , cycle ); 604 #endif 514 605 515 606 } … … 558 649 hal_fence(); 559 650 651 #if CONFIG_DEBUG_VMM_FORK_COPY 652 cycle = (uint32_t)hal_get_cycles(); 653 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 654 printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n", 655 __FUNCTION__ , CURRENT_THREAD , cycle ); 656 #endif 657 560 658 return 0; 561 659 … … 568 666 vseg_t * vseg; 569 667 570 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n", 571 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 572 573 // get pointer on VMM 668 #if CONFIG_DEBUG_VMM_DESTROY 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 if( CONFIG_DEBUG_VMM_DESTROY < cycle ) 671 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 672 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 673 #endif 674 675 // get pointer on local VMM 574 676 vmm_t * vmm = &process->vmm; 575 677 … … 586 688 // get pointer on first vseg in VSL 587 689 vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist ); 588 vseg = (vseg_t *)GET_PTR( vseg_xp );589 590 // unmap and release all pages690 vseg = GET_PTR( vseg_xp ); 691 692 // unmap rand release physical pages if required) 591 693 vmm_unmap_vseg( process , vseg ); 592 694 … … 598 700 } 599 701 600 // release lock 702 // release lock protecting VSL 601 703 remote_rwlock_wr_unlock( lock_xp ); 602 704 … … 616 718 hal_gpt_destroy( &vmm->gpt ); 617 719 618 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit\n", 619 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 720 #if CONFIG_DEBUG_VMM_DESTROY 721 cycle = (uint32_t)hal_get_cycles(); 722 if( CONFIG_DEBUG_VMM_DESTROY < cycle ) 723 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 724 __FUNCTION__ , CURRENT_THREAD , cycle ); 725 #endif 620 726 621 727 } // end vmm_destroy() … … 637 743 { 638 744 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 639 vseg = (vseg_t *)GET_PTR( vseg_xp );745 vseg = GET_PTR( vseg_xp ); 640 746 641 747 if( ((vpn_base + vpn_size) > vseg->vpn_base) && … … 766 872 error_t error; 767 873 768 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters / process %x / base %x / size %x / %s / cxy = %x\n", 769 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 770 process->pid , base , size , vseg_type_str(type) , cxy ); 874 #if CONFIG_DEBUG_VMM_CREATE_VSEG 875 uint32_t cycle = (uint32_t)hal_get_cycles(); 876 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle ) 877 printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n", 878 __FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle ); 879 #endif 771 880 772 881 // get pointer on VMM … … 854 963 remote_rwlock_wr_unlock( lock_xp ); 855 964 856 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / base %x / size %x / type %s\n", 857 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 858 process->pid , base , size , vseg_type_str(type) ); 965 #if CONFIG_DEBUG_VMM_CREATE_VSEG 966 cycle = (uint32_t)hal_get_cycles(); 967 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle ) 968 printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n", 969 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle ); 970 #endif 859 971 860 972 return vseg; … … 985 1097 cxy_t page_cxy; // page descriptor cluster 986 1098 page_t * page_ptr; // page descriptor pointer 987 988 vmm_dmsg("\n[DBG] %s : core[%x, %d] enter / process %x / vseg %s / base %x / cycle %d\n", 989 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid , 990 vseg_type_str( vseg->type ), vseg->vpn_base, (uint32_t)hal_get_cycles() ); 991 992 // get pointer on process GPT 1099 xptr_t forks_xp; // extended pointer on pending forks counter 1100 uint32_t count; // actual number of pendinf forks 1101 1102 #if CONFIG_DEBUG_VMM_UNMAP_VSEG 1103 uint32_t cycle = (uint32_t)hal_get_cycles(); 1104 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle ) 1105 printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n", 1106 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); 1107 #endif 1108 1109 // get pointer on local GPT 993 1110 gpt_t * gpt = &process->vmm.gpt; 994 1111 … … 1007 1124 "an user vseg must use small pages" ); 1008 1125 1009 // unmap GPT entry 1126 // unmap GPT entry in all GPT copies 1010 1127 hal_gpt_reset_pte( gpt , vpn ); 1011 1128 1012 // release memory if not identity mapped 1013 if( (vseg->flags & VSEG_IDENT) == 0 ) 1129 // handle pending forks counter if 1130 // 1) not identity mapped 1131 // 2) running in reference cluster 1132 if( ((vseg->flags & VSEG_IDENT) == 0) && 1133 (GET_CXY( process->ref_xp ) == local_cxy) ) 1014 1134 { 1015 // get extended pointer on p age descriptor1135 // get extended pointer on physical page descriptor 1016 1136 page_xp = ppm_ppn2page( ppn ); 1017 1137 page_cxy = GET_CXY( page_xp ); 1018 page_ptr = (page_t *)GET_PTR( page_xp ); 1019 1020 // release physical page to relevant cluster 1021 if( page_cxy == local_cxy ) // local cluster 1138 page_ptr = GET_PTR( page_xp ); 1139 1140 // FIXME lock the physical page 1141 1142 // get extended pointer on pending forks counter 1143 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1144 1145 // get pending forks counter 1146 count = hal_remote_lw( forks_xp ); 1147 1148 if( count ) // decrement pending forks counter 1022 1149 { 1023 req.type = KMEM_PAGE; 1024 req.ptr = page_ptr; 1025 kmem_free( &req ); 1150 hal_remote_atomic_add( forks_xp , -1 ); 1151 } 1152 else // release physical page to relevant cluster 1153 { 1154 if( page_cxy == local_cxy ) // local cluster 1155 { 1156 req.type = KMEM_PAGE; 1157 req.ptr = page_ptr; 1158 kmem_free( &req ); 1159 } 1160 else // remote cluster 1161 { 1162 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1163 } 1026 1164 } 1027 else // remote cluster 1028 { 1029 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1030 } 1165 1166 // FIXME unlock the physical page 1031 1167 } 1032 1168 } 1033 1169 } 1170 1171 #if CONFIG_DEBUG_VMM_UNMAP_VSEG 1172 cycle = (uint32_t)hal_get_cycles(); 1173 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle ) 1174 printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n", 1175 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); 1176 #endif 1177 1034 1178 } // end vmm_unmap_vseg() 1035 1179 … … 1061 1205 { 1062 1206 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1063 vseg = (vseg_t *)GET_PTR( vseg_xp );1207 vseg = GET_PTR( vseg_xp ); 1064 1208 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) 1065 1209 { … … 1185 1329 // get cluster and local pointer on reference process 1186 1330 cxy_t ref_cxy = GET_CXY( ref_xp ); 1187 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );1331 process_t * ref_ptr = GET_PTR( ref_xp ); 1188 1332 1189 1333 if( local_cxy == ref_cxy ) return -1; // local cluster is the reference … … 1224 1368 vpn_t vpn ) 1225 1369 { 1370 1371 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE 1372 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1373 printk("\n[DBG] in %s : thread %x enter for vpn %x\n", 1374 __FUNCTION__ , CURRENT_THREAD, vpn ); 1375 #endif 1376 1226 1377 // compute target cluster 1227 1378 page_t * page_ptr; … … 1262 1413 } 1263 1414 1415 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE 1416 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1417 printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n", 1418 __FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) ); 1419 #endif 1420 1264 1421 if( page_ptr == NULL ) return XPTR_NULL; 1265 1422 else return XPTR( page_cxy , page_ptr ); … … 1281 1438 index = vpn - vseg->vpn_base; 1282 1439 1283 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n", 1284 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, vseg_type_str(type), index ); 1440 #if CONFIG_DEBUG_VMM_GET_ONE_PPN 1441 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1442 printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n", 1443 __FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index ); 1444 #endif 1285 1445 1286 1446 // FILE type : get the physical page from the file mapper … … 1295 1455 // get mapper cluster and local pointer 1296 1456 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 1297 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );1457 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 1298 1458 1299 1459 // get page descriptor from mapper … … 1316 1476 else 1317 1477 { 1318 // allocate physical page1478 // allocate one physical page 1319 1479 page_xp = vmm_page_allocate( vseg , vpn ); 1320 1480 … … 1322 1482 1323 1483 // initialise missing page from .elf file mapper for DATA and CODE types 1324 // => the mapper_xp field is an extended pointer on the .elf file mapper1484 // (the vseg->mapper_xp field is an extended pointer on the .elf file mapper) 1325 1485 if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) 1326 1486 { … … 1333 1493 // get mapper cluster and local pointer 1334 1494 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 1335 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );1495 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 1336 1496 1337 1497 // compute missing page offset in vseg … … 1341 1501 uint32_t elf_offset = vseg->file_offset + offset; 1342 1502 1343 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n", 1344 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, elf_offset ); 1503 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1) 1504 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1505 printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n", 1506 __FUNCTION__, CURRENT_THREAD, vpn, elf_offset ); 1507 #endif 1345 1508 1346 1509 // compute extended pointer on page base … … 1352 1515 if( file_size < offset ) // missing page fully in BSS 1353 1516 { 1354 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in BSS\n", 1355 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 1517 1518 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1) 1519 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1520 printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n", 1521 __FUNCTION__, CURRENT_THREAD, vpn ); 1522 #endif 1356 1523 1357 1524 if( GET_CXY( page_xp ) == local_cxy ) … … 1367 1534 { 1368 1535 1369 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in mapper\n", 1370 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 1536 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1) 1537 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1538 printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n", 1539 __FUNCTION__, CURRENT_THREAD, vpn ); 1540 #endif 1371 1541 1372 1542 if( mapper_cxy == local_cxy ) … … 1396 1566 { 1397 1567 1398 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n" 1399 " %d bytes from mapper / %d bytes from BSS\n", 1400 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, 1568 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1) 1569 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1570 printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n" 1571 " %d bytes from mapper / %d bytes from BSS\n", 1572 __FUNCTION__, CURRENT_THREAD, vpn, 1401 1573 file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); 1402 1574 #endif 1403 1575 // initialize mapper part 1404 1576 if( mapper_cxy == local_cxy ) … … 1441 1613 *ppn = ppm_page2ppn( page_xp ); 1442 1614 1443 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n", 1444 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , *ppn ); 1615 #if CONFIG_DEBUG_VMM_GET_ONE_PPN 1616 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1617 printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n", 1618 __FUNCTION__ , CURRENT_THREAD , vpn , *ppn ); 1619 #endif 1445 1620 1446 1621 return 0; … … 1455 1630 ppn_t * ppn ) 1456 1631 { 1457 vseg_t * vseg; // pointer onvseg containing VPN1632 vseg_t * vseg; // vseg containing VPN 1458 1633 ppn_t old_ppn; // current PTE_PPN 1459 1634 uint32_t old_attr; // current PTE_ATTR … … 1466 1641 "not called in the reference cluster\n" ); 1467 1642 1468 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x in process %x / cow = %d\n", 1469 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid , cow ); 1643 #if CONFIG_DEBUG_VMM_GET_PTE 1644 uint32_t cycle = (uint32_t)hal_get_cycles(); 1645 if( CONFIG_DEBUG_VMM_GET_PTE > cycle ) 1646 printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n", 1647 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle ); 1648 #endif 1470 1649 1471 1650 // get VMM pointer 1472 1651 vmm_t * vmm = &process->vmm; 1473 1652 1474 // get vseg pointer from ref VSL1653 // get vseg pointer from reference VSL 1475 1654 error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg ); 1476 1655 … … 1482 1661 } 1483 1662 1484 vmm_dmsg("\n[DBG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n", 1485 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 1486 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size ); 1663 #if CONFIG_DEBUG_VMM_GET_PTE 1664 cycle = (uint32_t)hal_get_cycles(); 1665 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1666 printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n", 1667 __FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size ); 1668 #endif 1487 1669 1488 1670 // access GPT to get current PTE attributes and PPN … … 1493 1675 // clusters containing a copy, and return the new_ppn and new_attr 1494 1676 1495 if( cow ) ////////////// copy_on_write request///////////1677 if( cow ) /////////////////////////// copy_on_write request ////////////////////// 1496 1678 { 1497 1679 assert( (old_attr & GPT_MAPPED) , __FUNCTION__ , 1498 1680 "PTE must be mapped for a copy-on-write exception\n" ); 1499 1681 1500 excp_dmsg("\n[DBG] %s : core[%x,%d] handling COW for vpn %x\n", 1501 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1502 1503 // get extended pointer, cluster and local pointer on page descriptor 1682 #if CONFIG_DEBUG_VMM_GET_PTE 1683 cycle = (uint32_t)hal_get_cycles(); 1684 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1685 printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n", 1686 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); 1687 #endif 1688 1689 // get extended pointer, cluster and local pointer on physical page descriptor 1504 1690 xptr_t page_xp = ppm_ppn2page( old_ppn ); 1505 1691 cxy_t page_cxy = GET_CXY( page_xp ); 1506 page_t * page_ptr = (page_t *)GET_PTR( page_xp );1692 page_t * page_ptr = GET_PTR( page_xp ); 1507 1693 1508 1694 // get number of pending forks in page descriptor 1509 uint32_t count = hal_remote_lw( XPTR( page_cxy , &page_ptr->fork_nr) );1510 1511 if( count ) // pending fork => allocate a new page, copy it, reset COW1695 uint32_t forks = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ) ); 1696 1697 if( forks ) // pending fork => allocate a new page, copy old to new 1512 1698 { 1513 1699 // allocate a new physical page … … 1539 1725 1540 1726 // update GPT[vpn] for all GPT copies 1541 // to maintain coherence of copies 1542 vmm_update_pte( process, 1543 vpn, 1544 new_attr, 1545 new_ppn ); 1546 1547 // decrement fork_nr in page descriptor 1548 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , -1 ); 1549 } 1550 else /////////////// page_fault request /////////// 1727 vmm_global_update_pte( process, vpn, new_attr, new_ppn ); 1728 1729 // decrement pending forks counter in page descriptor 1730 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 ); 1731 } 1732 else ////////////////////////////////// page_fault request //////////////////////// 1551 1733 { 1552 1734 if( (old_attr & GPT_MAPPED) == 0 ) // true page_fault => map it 1553 1735 { 1554 1736 1555 excp_dmsg("\n[DBG] %s : core[%x,%d] handling page fault for vpn %x\n", 1556 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1737 #if CONFIG_DEBUG_VMM_GET_PTE 1738 cycle = (uint32_t)hal_get_cycles(); 1739 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1740 printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n", 1741 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); 1742 #endif 1557 1743 1558 1744 // allocate new_ppn, depending on vseg type … … 1592 1778 } 1593 1779 1594 excp_dmsg("\n[DBG] %s : core[%x,%d] update GPT for vpn %x / ppn = %x / attr = %x\n", 1595 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , new_ppn , new_attr ); 1596 1597 // retur success 1780 #if CONFIG_DEBUG_VMM_GET_PTE 1781 cycle = (uint32_t)hal_get_cycles(); 1782 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1783 printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr = %x / cycle %d\n", 1784 __FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle ); 1785 #endif 1786 1787 // return success 1598 1788 *ppn = new_ppn; 1599 1789 *attr = new_attr; … … 1612 1802 // get reference process cluster and local pointer 1613 1803 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1614 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );1804 process_t * ref_ptr = GET_PTR( process->ref_xp ); 1615 1805 1616 1806 // get missing PTE attributes and PPN from reference cluster … … 1651 1841 vpn_t vpn ) 1652 1842 { 1653 uint32_t attr; // missingpage attributes1654 ppn_t ppn; // missingpage PPN1843 uint32_t attr; // page attributes 1844 ppn_t ppn; // page PPN 1655 1845 error_t error; 1656 1846 1847 1657 1848 // get reference process cluster and local pointer 1658 1849 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1659 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );1850 process_t * ref_ptr = GET_PTR( process->ref_xp ); 1660 1851 1661 1852 // get new PTE attributes and PPN from reference cluster … … 1722 1913 { 1723 1914 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1724 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );1915 process_t * ref_ptr = GET_PTR( process->ref_xp ); 1725 1916 rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , false , &attr , &ppn , &error ); 1726 1917 } -
trunk/kernel/mm/vmm.h
r429 r433 99 99 * a remote_rwlock, because it can be accessed by a thread running in a remote cluster. 100 100 * An exemple is the vmm_fork_copy() function. 101 * 2. In most c usters, the VSL and GPT are only partial copies of the reference VSL and GPT101 * 2. In most clusters, the VSL and GPT are only partial copies of the reference VSL and GPT 102 102 * structures, stored in the reference cluster. 103 103 ********************************************************************************************/ … … 155 155 156 156 /********************************************************************************************* 157 * This function is called by the process_ fork_create() function. It partially copies157 * This function is called by the process_make_fork() function. It partially copies 158 158 * the content of a remote parent process VMM to the local child process VMM: 159 159 * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child … … 176 176 177 177 /********************************************************************************************* 178 * This function is called by the process_make_fork() function to handlethe fork syscall.178 * This function is called by the process_make_fork() function executing the fork syscall. 179 179 * It set the COW flag, and reset the WRITABLE flag of all GPT entries of the DATA, MMAP, 180 180 * and REMOTE vsegs of a process identified by the <process> argument. 181 181 * It must be called by a thread running in the reference cluster, that contains the complete 182 * list of vsegs. Use the rpc_vmm_set_cow_client() when the calling thread client is remote.182 * VSL and GPT (use the rpc_vmm_set_cow_client() when the calling thread client is remote). 183 183 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies, 184 184 * using the list of copies stored in the owner process, and using remote_write accesses to 185 * update the remote GPTs. It cannot fail, as only mapped entries in GPT copies are updated. 185 * update the remote GPTs. It atomically increment the pending_fork counter, in all involved 186 * physical page descriptors. It cannot fail, as only mapped entries in GPTs are updated. 186 187 ********************************************************************************************* 187 188 * @ process : local pointer on local reference process descriptor. … … 190 191 191 192 /********************************************************************************************* 192 * This function is called by the vmm_get_pte() function in case of COW exception.193 * It modifies both the PPN an the attributes for a GPT entry identified by the <process>194 * and <vpn> arguments.193 * This global function modifies a GPT entry identified by the <process> and <vpn> 194 * arguments in all clusters containing a process copy. 195 * It must be called by a thread running in the reference cluster. 195 196 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies, 196 197 * using the list of copies stored in the owner process, and using remote_write accesses to … … 202 203 * @ ppn : PTE / physical page index. 203 204 ********************************************************************************************/ 204 void vmm_update_pte( struct process_s * process, 205 vpn_t vpn, 206 uint32_t attr, 207 ppn_t ppn ); 208 209 /********************************************************************************************* 210 * This function scan the list of vsegs registered in the VSL of the process 211 * identified by the <process> argument, and for each vseg: 212 * - it unmap from the GPT and releases all mapped pages in vseg. 213 * - it removes the vseg from the process VSL. 214 * - It releases the memory allocated to the vseg descriptor. 205 void vmm_global_update_pte( struct process_s * process, 206 vpn_t vpn, 207 uint32_t attr, 208 ppn_t ppn ); 209 210 /********************************************************************************************* 211 * This function unmaps from the local GPT all mapped PTEs of a vseg identified by the 212 * <process> and <vseg> arguments. It can be used for any type of vseg. 213 * If this function is executed in the reference cluster, it handles for each referenced 214 * physical pages the pending forks counter : 215 * - if counter is non-zero, it decrements it. 216 * - if counter is zero, it releases the physical page to local kmem allocator. 217 ********************************************************************************************* 218 * @ process : pointer on process descriptor. 219 * @ vseg : pointer on the vseg to be unmapped. 220 ********************************************************************************************/ 221 void vmm_unmap_vseg( struct process_s * process, 222 vseg_t * vseg ); 223 224 /********************************************************************************************* 225 * This function deletes, in the local cluster, all vsegs registered in the VSL 226 * of the process identified by the <process> argument. For each vseg: 227 * - it unmaps all vseg PTEs from the GPT (release the physical pages when required). 228 * - it removes the vseg from the local VSL. 229 * - it releases the memory allocated to the local vseg descriptors. 215 230 * Finally, it releases the memory allocated to the GPT itself. 216 231 ********************************************************************************************* … … 291 306 292 307 /********************************************************************************************* 293 * This function unmaps all mapped PTEs of a given vseg, from the generic page table294 * associated to a given process descriptor, and releases the physical memory allocated295 * to all mapped GPT entries. It can be used for any type of vseg.296 *********************************************************************************************297 * @ process : pointer on process descriptor.298 * @ vseg : pointer on the vseg to be unmapped.299 ********************************************************************************************/300 void vmm_unmap_vseg( struct process_s * process,301 vseg_t * vseg );302 303 /*********************************************************************************************304 308 * This function removes a given region (defined by a base address and a size) from 305 309 * the VMM of a given process descriptor. This can modify the number of vsegs: … … 340 344 /********************************************************************************************* 341 345 * This function is called by the generic exception handler when a page-fault event 342 * has been detected in a given cluster.346 * has been detected for a given process in a given cluster. 343 347 * - If the local cluster is the reference, it call directly the vmm_get_pte() function. 344 348 * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE … … 355 359 /********************************************************************************************* 356 360 * This function is called by the generic exception handler when a copy-on-write event 357 * has been detected in a given cluster. 358 * - If the local cluster is the reference, it call directly the vmm_get_pte() function. 359 * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE 360 * to the reference cluster to get the missing PTE attributes and PPN, 361 * and update the local page table. 361 * has been detected for a given process in a given cluster. 362 * It takes the lock protecting the physical page, and test the pending forks counter. 363 * If no pending fork: 364 * - it reset the COW flag and set the WRITE flag in the reference GPT entry, and in all 365 * the GPT copies 366 367 * If there is a pending forkon the 368 * - It get the involved vseg pointer. 369 * - It allocates a new physical page from the cluster defined by the vseg type. 370 * - It copies the old physical page content to the new physical page. 371 * - It decrements the pending_fork counter in old physical page descriptor. 372 362 373 ********************************************************************************************* 363 374 * @ process : pointer on process descriptor. … … 369 380 370 381 /********************************************************************************************* 371 * This function is called when a new PTE (GPT entry) is required because a "page-fault", 372 * or "copy-on_write" event has been detected for a given <vpn> in a given <process>. 373 * The <cow> argument defines the type of event to be handled. 382 * This function handle both the "page-fault" and "copy-on_write" events for a given <vpn> 383 * in a given <process>. The <cow> argument defines the type of event to be handled. 374 384 * This function must be called by a thread running in reference cluster, and the vseg 375 * containing the searched VPN should be registered in the reference VMM. 376 * - for an actual page-fault, it allocates the missing physical page from the target cluster 377 * defined by the vseg type, initialize it, and update the reference page table. 385 * containing the searched VPN must be registered in the reference VMM. 386 * - for an page-fault, it allocates the missing physical page from the target cluster 387 * defined by the vseg type, initializes it, and updates the reference GPT, but not 388 * the copies GPT, that will be updated on demand. 378 389 * - for a copy-on-write, it allocates a new physical page from the target cluster, 379 * initialise it from the old physical page, and update the reference page table. 380 * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page if the 381 * target cluster is not the reference cluster. 390 * initialise it from the old physical page, and updates the reference GPT and all 391 * the GPT copies, for coherence. 392 * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page when 393 * the target cluster is not the reference cluster. 382 394 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE. 383 395 ********************************************************************************************* … … 400 412 * (Physical Page Number) associated to a missing page defined by the <vpn> argument. 401 413 * - For the FILE type, it returns directly the physical page from the file mapper. 402 * - For the CODE and DATA types, it allocates a new ph sical page from the cluster defined414 * - For the CODE and DATA types, it allocates a new physical page from the cluster defined 403 415 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg, 404 416 * and initialize this page from the .elf file mapper. -
trunk/kernel/syscalls/sys_display.c
r421 r433 25 25 #include <hal_uspace.h> 26 26 #include <errno.h> 27 #include <vmm.h> 27 28 #include <cluster.h> 28 29 #include <thread.h> 29 30 #include <process.h> 31 #include <string.h> 30 32 31 33 … … 35 37 reg_t arg1 ) 36 38 { 39 // get thread, process and core 40 thread_t * this = CURRENT_THREAD; 41 process_t * process = this->process; 42 core_t * core = this->core; 43 44 #if CONFIG_DEBUG_SYS_DISPLAY 45 uint64_t tm_start; 46 uint64_t tm_end; 47 tm_start = hal_get_cycles(); 48 if( CONFIG_DEBUG_SYS_DISPLAY < tm_start ) 49 printk("\n[DBG] %s : thread %d enter / process %x / cycle = %d\n", 50 __FUNCTION__, this, process->pid, (uint32_t)tm_start ); 51 #endif 52 37 53 if( type == DISPLAY_STRING ) 38 54 { 39 55 paddr_t paddr; 40 56 char kbuf[256]; 57 uint32_t length; 41 58 42 59 char * string = (char *)arg0; 43 60 44 61 // check string in user space 45 if( vmm_v2p_translate( false , string , &paddr ) ) return -1; 62 if( vmm_v2p_translate( false , string , &paddr ) ) 63 { 64 printk("\n[ERROR] in %s : string buffer %x unmapped\n", 65 __FUNCTION__ , string ); 66 return -1; 67 } 46 68 47 69 // ckeck string length 48 if( hal_strlen_from_uspace( string ) >= 256 ) return -1; 70 length = hal_strlen_from_uspace( string ); 71 if( length >= 256 ) 72 { 73 printk("\n[ERROR] in %s : string length %d too large\n", 74 __FUNCTION__ , length ); 75 return -1; 76 } 49 77 50 78 // copy string in kernel space 51 79 hal_strcpy_from_uspace( kbuf , string , 256 ); 52 53 // get thread, process and core54 thread_t * this = CURRENT_THREAD;55 process_t * process = this->process;56 core_t * core = this->core;57 80 58 81 // print message on TXT0 kernel terminal … … 60 83 this->trdid , process->pid , local_cxy, core->lid , 61 84 (uint32_t)hal_get_cycles() , kbuf ); 62 63 return 0;64 85 } 65 86 else if( type == DISPLAY_VMM ) … … 70 91 xptr_t process_xp = cluster_get_reference_process_from_pid( pid ); 71 92 72 if( process_xp == XPTR_NULL ) return -1; 93 if( process_xp == XPTR_NULL ) 94 { 95 printk("\n[ERROR] in %s : undefined PID %x\n", 96 __FUNCTION__ , pid ); 97 return -1; 98 } 73 99 74 100 // get cluster and local pointer on process … … 85 111 rpc_vmm_display_client( process_cxy , process_ptr , true ); 86 112 } 87 88 return 0;89 113 } 90 114 else if( type == DISPLAY_SCHED ) … … 94 118 95 119 // check cluster argument 96 if( cluster_is_undefined( cxy ) ) return -1; 120 if( cluster_is_undefined( cxy ) ) 121 { 122 printk("\n[ERROR] in %s : undefined cluster identifier %x\n", 123 __FUNCTION__ , cxy ); 124 return -1; 125 } 97 126 98 127 // check core argument 99 if( lid >= LOCAL_CLUSTER->cores_nr ) return -1; 128 if( lid >= LOCAL_CLUSTER->cores_nr ) 129 { 130 printk("\n[ERROR] in %s : undefined local index %d\n", 131 __FUNCTION__ , lid ); 132 return -1; 133 } 100 134 101 // call kernel function102 135 if( cxy == local_cxy ) 103 136 { … … 108 141 rpc_sched_display_client( cxy , lid ); 109 142 } 110 111 return 0;112 143 } 113 144 else if( type == DISPLAY_PROCESS ) … … 116 147 117 148 // check cluster argument 118 if( cluster_is_undefined( cxy ) ) return -1; 149 if( cluster_is_undefined( cxy ) ) 150 { 151 printk("\n[ERROR] in %s : undefined cluster identifier %x\n", 152 __FUNCTION__ , cxy ); 153 return -1; 154 } 119 155 120 // call kernel function121 156 cluster_processes_display( cxy ); 122 123 return 0;124 157 } 125 158 else if( type == DISPLAY_VFS ) … … 128 161 process_t * process = CURRENT_THREAD->process; 129 162 vfs_display( process->vfs_root_xp ); 130 131 return 0;132 163 } 133 164 else if( type == DISPLAY_CHDEV ) 134 165 { 135 // call kernel function136 166 chdev_dir_display(); 167 } 168 else 169 { 170 printk("\n[ERROR] in %s : undefined display type %x\n", 171 __FUNCTION__ , type ); 172 return -1; 173 } 137 174 138 return 0; 139 } 140 else return -1; 175 #if CONFIG_DEBUG_SYS_DISPLAY 176 tm_end = hal_get_cycles(); 177 if( CONFIG_DEBUG_SYS_DISPLAY < tm_end ) 178 printk("\n[DBG] %s : thread %x exit / process %x / cost = %d / cycle %d\n", 179 __FUNCTION__, this, process->pid, (uint32_t)(tm_end - tm_start) , (uint32_t)tm_end ); 180 #endif 141 181 142 } // end sys_get_sched() 182 return 0; 183 184 } // end sys_display() -
trunk/kernel/syscalls/sys_exec.c
r421 r433 149 149 ///////////////////////////////////////////////////////////////////////////////////////// 150 150 // Implementation note: 151 // This function build an exec_info_t structure containing all informations 151 // This function must be called by the main thread (thread 0 in owner cluster). 152 // IT build an exec_info_t structure containing all informations 152 153 // required to initialize the new process descriptor and the associated thread. 153 // It includes the process PID (unchanged), main() arguments,environment variables,154 // It includes the process main() arguments, the environment variables, 154 155 // and the pathname to the new process .elf file. 155 156 // It calls the process_exec_get_strings() functions to copy the main() arguments and … … 169 170 error_t error; 170 171 171 // get parent processpid172 // get calling thread, process, & pid 172 173 thread_t * this = CURRENT_THREAD; 173 174 process_t * process = this->process; 174 175 pid_t pid = process->pid; 175 176 176 #if CONFIG_SYSCALL_DEBUG 177 assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ , 178 "must be called in the owner cluster\n"); 179 180 assert( (LTID_FROM_TRDID( this->trdid ) == 0) , __FUNCTION__ , 181 "must be called by the main thread\n"); 182 183 assert( (args == NULL) , __FUNCTION__ , 184 "args not supported yet\n" ); 185 186 assert( (envs == NULL) , __FUNCTION__ , 187 "args not supported yet\n" ); 188 189 // get owner cluster 190 191 // check pathname length 192 if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 193 { 194 195 #if CONFIG_DEBUG_SYSCALLS_ERROR 196 printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ ); 197 #endif 198 this->errno = ENFILE; 199 return -1; 200 } 201 202 // copy pathname in exec_info structure (kernel space) 203 hal_strcpy_from_uspace( exec_info.path , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 204 205 #if CONFIG_DEBUG_SYS_EXEC 177 206 uint64_t tm_start; 178 207 uint64_t tm_end; 179 208 tm_start = hal_get_cycles(); 180 printk("\n[DBG] %s : core[%x,%d] enter / process %x / cycle = %d\n", 181 __FUNCTION__, local_cxy, this->core->lid, pid, (uint32_t)tm_start ); 182 #endif 183 184 // get owner cluster 185 cxy_t owner_cxy = CXY_FROM_PID( pid ); 186 187 // check pathname length 188 if( hal_strlen_from_uspace( pathname ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 189 { 190 printk("\n[ERROR] in %s : pathname too long\n", __FUNCTION__ ); 191 this->errno = ENFILE; 192 return -1; 193 } 194 195 // copy pathname in exec_info structure (kernel space) 196 hal_strcpy_from_uspace( exec_info.path , pathname , CONFIG_VFS_MAX_PATH_LENGTH ); 197 198 // check args argument 199 assert( (args == NULL) , __FUNCTION__ , 200 "args not supported yet\n" ); 201 202 // check envs argument 203 assert( (envs == NULL) , __FUNCTION__ , 204 "args not supported yet\n" ); 209 if( CONFIG_DEBUG_SYS_EXEC < tm_start ) 210 printk("\n[DBG] %s : thread %x enter / process %x / path %s / cycle = %d\n", 211 __FUNCTION__, this, pid, exec_info.path, (uint32_t)tm_start ); 212 #endif 205 213 206 214 // check and store args in exec_info structure if required … … 209 217 if( process_exec_get_strings( &exec_info , true , args ) ) 210 218 { 211 printk("\n[ERROR] in %s : cannot access args\n", __FUNCTION__ ); 219 220 #if CONFIG_DEBUG_SYSCALLS_ERROR 221 printk("\n[ERROR] in %s : cannot access args\n", __FUNCTION__ ); 222 #endif 212 223 this->errno = error; 213 224 return -1; … … 220 231 if( process_exec_get_strings( &exec_info , false , envs ) ) 221 232 { 222 printk("\n[ERROR] in %s : cannot access envs\n", __FUNCTION__ ); 233 234 #if CONFIG_DEBUG_SYCALLS_ERROR 235 printk("\n[ERROR] in %s : cannot access envs\n", __FUNCTION__ ); 236 #endif 223 237 this->errno = error; 224 238 return -1; … … 226 240 } 227 241 228 // register PID in exec_info 229 exec_info.pid = pid; 230 231 // call process_make_exec (local or remote) 232 if( owner_cxy == local_cxy ) 233 { 234 error = process_make_exec( &exec_info ); 235 } 236 else 237 { 238 rpc_process_make_exec_client( owner_cxy, 239 &exec_info, 240 &error ); 241 } 242 // call relevant kernel function 243 error = process_make_exec( &exec_info ); 242 244 243 245 if( error ) 244 246 { 245 printk("\n[ERROR] in %s : cannot create new process %x in cluster %x\n", 246 __FUNCTION__, pid, owner_cxy ); 247 248 #if CONFIG_DEBUG_SYSCALLS_ERROR 249 printk("\n[ERROR] in %s : cannot create process %x in cluster %x\n", 250 __FUNCTION__, pid, CXY_FROM_PID( pid ); 251 #endif 247 252 this->errno = error; 248 253 return -1; 249 254 } 250 255 251 #if CONFIG_ SYSCALL_DEBUG256 #if CONFIG_DEBUG_SYS_EXEC 252 257 tm_end = hal_get_cycles(); 253 printk("\n[DBG] %s : core[%x,%d] exit / process %x / path = %s / cost = %d / cycle %d\n", 254 __FUNCTION__, local_cxy, this->core->lid, pid, exec_info.path, 255 (uint32_t)(tm_end - tm_start) , (uint32_t)tm_end ); 256 #endif 257 258 return 0; 258 if( CONFIG_DEBUG_SYS_EXEC < tm_end ) 259 printk("\n[DBG] %s : thread %x exit / process %x / cost = %d / cycle %d\n", 260 __FUNCTION__, this, pid, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 261 #endif 262 263 // deschedule <=> old thread suicide because the BLOCKED_GLOBAL 264 // and the FLAG_REQ_DELETE have been set by process_make_exec() 265 sched_yield( "old process suicide in sys_exec()" ); 266 267 assert( false , __FUNCTION__ , "This code should not be executed\n" ); 268 269 return 0; 259 270 260 271 } // end sys_exec() -
trunk/kernel/syscalls/sys_exit.c
r416 r433 36 36 int sys_exit( uint32_t status ) 37 37 { 38 uint32_tsave_sr; // required to enable IRQs38 reg_t save_sr; // required to enable IRQs 39 39 40 thread_t * this = CURRENT_THREAD; 41 pid_t pid = this->process->pid; 40 thread_t * this = CURRENT_THREAD; 41 process_t * process = this->process; 42 pid_t pid = process->pid; 42 43 43 #if CONFIG_ SYSCALL_DEBUG44 #if CONFIG_DEBUG_SYS_EXIT 44 45 uint64_t tm_start; 45 46 uint64_t tm_end; 46 47 tm_start = hal_get_cycles(); 47 printk("\n[DBG] %s : core[%x,%d] enter / process %x / status %x / cycle %d\n", 48 __FUNCTION__ , local_cxy , this->core->lid , pid , status , (uint32_t)tm_start ); 48 if( CONFIG_DEBUG_SYS_EXIT < tm_start ) 49 printk("\n[DBG] %s : thread %x enter / process %x / status %x / cycle %d\n", 50 __FUNCTION__ , this, pid , status , (uint32_t)tm_start ); 49 51 #endif 50 52 51 // get owner process cluster 52 cxy_t owner_cxy = CXY_FROM_PID( pid ); 53 // get cluster and pointers on process in owner cluster 54 xptr_t owner_xp = cluster_get_owner_process_from_pid( pid ); 55 cxy_t owner_cxy = GET_CXY( owner_xp ); 56 process_t * owner_ptr = GET_PTR( owner_xp ); 57 58 assert( (owner_xp != XPTR_NULL) , __FUNCTION__ , "owner_xp cannot be NULL\n" ); 53 59 54 60 // enable IRQs 55 61 hal_enable_irq( &save_sr ); 56 62 57 // execute process_make_exit() function in owner cluster 58 if( local_cxy == owner_cxy ) // owner is local 59 { 60 process_make_exit( pid , status ); 61 } 62 else // owner is remote 63 { 64 rpc_process_make_exit_client( owner_cxy, pid , status ); 65 } 63 // the process_make_kill() function must be executed 64 // by an RPC thread in reference cluster 65 rpc_process_make_kill_client( owner_cxy, owner_ptr, true , status ); 66 66 67 67 // restore IRQs … … 70 70 hal_fence(); 71 71 72 #if CONFIG_ SYSCALL_DEBUG72 #if CONFIG_DEBUG_SYS_EXIT 73 73 tm_end = hal_get_cycles(); 74 printk("\n[DBG] %s : core[%x,%d] exit / process %x / status %x / cost = %d\n", 75 __FUNCTION__ , local_cxy , this->core->lid , pid , status , (uint32_t)(tm_end - tm_start) ); 74 if( CONFIG_DEBUG_SYS_EXIT < tm_end ) 75 printk("\n[DBG] %s : thread %x exit / process %x / status %x / cost = %d / cycle %d\n", 76 __FUNCTION__, this, pid, status, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 76 77 #endif 77 78 -
trunk/kernel/syscalls/sys_fork.c
r416 r433 63 63 parent_pid = parent_process_ptr->pid; 64 64 65 #if CONFIG_ SYSCALL_DEBUG65 #if CONFIG_DEBUG_SYS_FORK 66 66 uint64_t tm_start; 67 67 uint64_t tm_end; 68 68 tm_start = hal_get_cycles(); 69 printk("\n[DBG] %s : core[%x,%d] enter / process %x / cycle = %d\n", 70 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid, parent_pid, 71 (uint32_t)tm_start );69 if( CONFIG_DEBUG_SYS_FORK < tm_start ) 70 printk("\n[DBG] %s : thread %x enter / parent %x / cycle = %d\n", 71 __FUNCTION__, parent_thread_ptr, parent_pid, (uint32_t)tm_start ); 72 72 #endif 73 73 … … 148 148 thread_unblock( XPTR( target_cxy , child_thread_ptr ) , THREAD_BLOCKED_GLOBAL ); 149 149 150 #if CONFIG_ SYSCALL_DEBUG150 #if CONFIG_DEBUG_SYS_FORK 151 151 tm_end = hal_get_cycles(); 152 printk("\n[DBG] %s : core[%x,%d] parent_process %x exit / cost = %d\n", 153 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid, parent_pid, 154 (uint32_t)(tm_end - tm_start));152 if( CONFIG_DEBUG_SYS_FORK < tm_end ) 153 printk("\n[DBG] %s : parent_thread %x exit / cost = %d / cycle %d\n", 154 __FUNCTION__ , parent_thread_ptr, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 155 155 #endif 156 156 … … 160 160 { 161 161 162 #if CONFIG_ SYSCALL_DEBUG162 #if CONFIG_DEBUG_SYS_FORK 163 163 tm_end = hal_get_cycles(); 164 printk("\n[DBG] %s : core[%x,%d] child process %x exit / cost = %d\n", 165 __FUNCTION__, local_cxy, parent_thread_ptr->core->lid, child_pid, 166 (uint32_t)(tm_end - tm_start));164 if( CONFIG_DEBUG_SYS_FORK < tm_end ) 165 printk("\n[DBG] %s : child_thread %x exit / cost = %d / cycle %d\n", 166 __FUNCTION__ , child_thread_ptr, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 167 167 #endif 168 168 -
trunk/kernel/syscalls/sys_kill.c
r421 r433 38 38 { 39 39 uint32_t save_sr; // required to enable IRQs 40 xptr_t process_xp;// extended pointer on target reference process41 cxy_t process_cxy;// target process cluster42 process_t * process_ptr;// local pointer on target process40 xptr_t owner_xp; // extended pointer on target reference process 41 cxy_t owner_cxy; // target process cluster 42 process_t * owner_ptr; // local pointer on target process 43 43 xptr_t parent_xp; // extended pointer on parent process 44 44 cxy_t parent_cxy; // parent process cluster 45 45 process_t * parent_ptr; // local pointer on parent process 46 46 pid_t ppid; // parent process PID 47 uint32_t retval; // return value for the switch 47 48 48 49 thread_t * this = CURRENT_THREAD; 49 50 50 #if CONFIG_ SYSCALL_DEBUG51 #if CONFIG_DEBUG_SYS_KILL 51 52 uint64_t tm_start; 52 53 uint64_t tm_end; 53 54 tm_start = hal_get_cycles(); 54 printk("\n[DBG] %s : core[%x,%d] enter / process %x / sig %d / cycle %d\n", 55 __FUNCTION__ , local_cxy , this->core->lid , pid, sig_id, (uint32_t)tm_start ); 55 if( CONFIG_DEBUG_SYS_KILL < tm_start ) 56 printk("\n[DBG] %s : thread %x enter / process %x / sig %d / cycle %d\n", 57 __FUNCTION__ , this, pid, sig_id, (uint32_t)tm_start ); 56 58 #endif 57 59 58 // get cluster and pointers on referenceprocess59 process_xp = cluster_get_reference_process_from_pid( pid );60 process_cxy = GET_CXY( process_xp );61 process_ptr = (process_t *)GET_PTR( process_xp );60 // get cluster and pointers on owner process 61 owner_xp = cluster_get_owner_process_from_pid( pid ); 62 owner_cxy = GET_CXY( owner_xp ); 63 owner_ptr = GET_PTR( owner_xp ); 62 64 63 65 // check process existence 64 if( process_xp == XPTR_NULL )66 if( owner_xp == XPTR_NULL ) 65 67 { 66 syscall_dmsg("\n[ERROR] in %s : process %x not found\n", 67 __FUNCTION__ , pid ); 68 69 syscall_dmsg("\n[ERROR] in %s : process %x not found\n", __FUNCTION__ , pid ); 70 68 71 this->errno = EINVAL; 69 72 return -1; … … 71 74 72 75 // get parent process PID 73 parent_xp = hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) );76 parent_xp = hal_remote_lwd( XPTR( owner_cxy , &owner_ptr->parent_xp ) ); 74 77 parent_cxy = GET_CXY( parent_xp ); 75 78 parent_ptr = GET_PTR( parent_xp ); … … 79 82 if( ppid < 2 ) 80 83 { 81 syscall_dmsg("\n[ERROR] in %s : process %x cannot be killed\n",82 __FUNCTION__ , pid );83 this->errno = EINVAL;84 return -1;85 }86 84 87 // does nothing if sig_id == 0 88 if( sig_id == 0 ) return 0; 89 90 // check sig_id 91 if( (sig_id != SIGSTOP) && (sig_id != SIGCONT) && (sig_id != SIGKILL) ) 92 { 93 syscall_dmsg("\n[ERROR] in %s : illegal signal type for process %x\n", 94 __FUNCTION__ , sig_id , pid ); 85 syscall_dmsg("\n[ERROR] in %s : process %x cannot be killed\n", __FUNCTION__ , pid ); 86 95 87 this->errno = EINVAL; 96 88 return -1; … … 100 92 hal_enable_irq( &save_sr ); 101 93 102 // execute process_make_kill() function in owner cluster 103 if( local_cxy == process_cxy ) // owner cluster is local 94 // analyse signal type 95 // supported values are : 0, SIGSTOP, SIGCONT, SIGKILL 96 switch( sig_id ) 104 97 { 105 process_make_kill( pid , sig_id ); 98 case 0 : 99 { 100 // does nothing 101 retval = 0; 102 break; 103 } 104 case SIGSTOP: 105 { 106 // remove TXT ownership from target process 107 process_txt_reset_ownership( owner_xp ); 108 109 // block all threads in all clusters 110 process_sigaction( owner_ptr , BLOCK_ALL_THREADS ); 111 112 // atomically update reference process termination state 113 hal_remote_atomic_or( XPTR( owner_cxy , &owner_ptr->term_state ) , 114 PROCESS_FLAG_BLOCK ); 115 116 retval = 0; 117 break; 118 } 119 case SIGCONT: 120 { 121 // unblock all threads in all clusters 122 process_sigaction( owner_ptr , UNBLOCK_ALL_THREADS ); 123 124 // atomically update reference process termination state 125 hal_remote_atomic_and( XPTR( owner_cxy , &owner_ptr->term_state ) , 126 ~PROCESS_FLAG_BLOCK ); 127 retval = 0; 128 break; 129 } 130 break; 131 case SIGKILL: 132 { 133 // the process_make_kill() function must be executed 134 // by an RPC thread in process owner cluster 135 // It deletes all target process threads in all clusters, 136 // and updates the process termination state 137 rpc_process_make_kill_client( owner_cxy , owner_ptr , false , 0 ); 138 139 retval = 0; 140 break; 141 } 142 default: 143 { 144 145 syscall_dmsg("\n[ERROR] in %s : illegal signal type %d for process %x\n", 146 __FUNCTION__ , sig_id , pid ); 147 148 this->errno = EINVAL; 149 retval = -1; 150 break; 151 } 106 152 } 107 else // owner cluster is remote 108 { 109 rpc_process_make_kill_client( process_cxy , pid , sig_id ); 110 } 111 153 112 154 // restore IRQs 113 155 hal_restore_irq( save_sr ); … … 115 157 hal_fence(); 116 158 117 #if CONFIG_ SYSCALL_DEBUG159 #if CONFIG_DEBUG_SYS_KILL 118 160 tm_end = hal_get_cycles(); 119 printk("\n[DBG] %s : core[%x,%d] exit / process %x / sig %d / cost = %d\n", 120 __FUNCTION__ , local_cxy , this->core->lid , pid, sig_id, (uint32_t)(tm_end - tm_start) ); 161 if( CONFIG_DEBUG_SYS_KILL < tm_end ) 162 printk("\n[DBG] %s : thread %x enter / process %x / sig %d / cost = %d / cycle %d\n", 163 __FUNCTION__ , this, pid, sig_id, (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 121 164 #endif 122 123 return 0;165 166 return retval; 124 167 125 168 } // end sys_kill() -
trunk/kernel/syscalls/sys_read.c
r421 r433 65 65 66 66 #if CONFIG_READ_DEBUG 67 enter_sys_read = (uint32_t)tm_start; 68 #endif 69 70 thread_t * this = CURRENT_THREAD; 71 process_t * process = this->process; 72 73 #if CONFIG_DEBUG_SYS_READ 67 74 uint64_t tm_start; 68 75 uint64_t tm_end; 69 76 tm_start = hal_get_cycles(); 70 #endif 71 72 #if CONFIG_READ_DEBUG 73 enter_sys_read = (uint32_t)tm_start; 74 #endif 75 76 thread_t * this = CURRENT_THREAD; 77 process_t * process = this->process; 78 77 if( CONFIG_DEBUG_SYS_READ < tm_start ) 78 printk("\n[DBG] %s : thread %d enter / process %x / vaddr = %x / count %d / cycle %d\n", 79 __FUNCTION__, this, process->pid, vaddr, count, (uint32_t)tm_start ); 80 #endif 81 79 82 // check file_id argument 80 83 if( file_id >= CONFIG_PROCESS_FILE_MAX_NR ) … … 188 191 hal_fence(); 189 192 190 #if CONFIG_ READ_DEBUG193 #if CONFIG_DEBUG_SYS_READ 191 194 tm_end = hal_get_cycles(); 192 printk("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n" 195 if( CONFIG_DEBUG_SYS_READ < tm_end ) 196 printk("\n[DBG] %s : thread %x / process %x / cycle %d\n" 193 197 "nbytes = %d / first byte = %c / file_id = %d / cost = %d\n", 194 198 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid , -
trunk/kernel/syscalls/sys_thread_exit.c
r409 r433 87 87 thread_block( this , THREAD_BLOCKED_JOIN ); 88 88 89 // release the lock protecting the flags89 // release the lock protecting the join 90 90 remote_spinlock_unlock( XPTR( local_cxy, &this->join_lock ) ); 91 91 -
trunk/kernel/syscalls/sys_wait.c
r421 r433 41 41 pid_t child_pid; 42 42 int child_state; 43 thread_t * child_thread; 43 44 44 45 thread_t * this = CURRENT_THREAD; 45 46 process_t * process = this->process; 47 pid_t pid = process->pid; 46 48 47 #if CONFIG_ SYSCALL_DEBUG49 #if CONFIG_DEBUG_SYS_WAIT 48 50 uint64_t tm_start; 49 51 uint64_t tm_end; 50 52 tm_start = hal_get_cycles(); 51 printk("\n[DBG] %s : core[%x,%d] enter / process %x / cycle %d\n", 52 __FUNCTION__ , local_cxy , this->core->lid , process->pid, (uint32_t)tm_start ); 53 if( CONFIG_DEBUG_SYS_WAIT < tm_start ) 54 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 55 __FUNCTION__, this, process->pid, (uint32_t)tm_start ); 53 56 #endif 54 57 … … 64 67 } 65 68 66 // get cluster and local pointer on reference process 67 xptr_t ref_xp = process->ref_xp; 68 cxy_t ref_cxy = GET_CXY( ref_xp ); 69 process_t * ref_ptr = GET_PTR( ref_xp ); 69 // get process owner cluster 70 cxy_t owner_cxy = CXY_FROM_PID( pid ); 70 71 71 // get extended pointer on children list root 72 xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->children_root ); 72 // This function must be executed in owner cluster 73 assert( (owner_cxy == local_cxy) , __FUNCTION__ , 74 "calling thread must execute in owner cluster" ); 73 75 74 // get extended pointer on lock protecting the children list 75 xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->children_lock ); 76 // This function must be executed by the main thread 77 assert( (process->th_tbl[0] == this) , __FUNCTION__ , 78 "this function must be executed by the main thread" ); 79 80 // get extended pointer on children list root and lock 81 xptr_t children_root_xp = XPTR( owner_cxy , &process->children_root ); 82 xptr_t children_lock_xp = XPTR( owner_cxy , &process->children_lock ); 76 83 77 84 // exit this blocking loop only when a child processes change state 78 85 while( 1 ) 79 86 { 80 // get lock 81 remote_spinlock_lock( lock_xp );87 // get lock protecting children list 88 remote_spinlock_lock( children_lock_xp ); 82 89 83 // scan the list of child process84 XLIST_FOREACH( root_xp , iter_xp )90 // scan the list of owner child process 91 XLIST_FOREACH( children_root_xp , iter_xp ) 85 92 { 86 // get child process cluster and local pointer93 // get child process owner cluster and local pointer 87 94 child_xp = XLIST_ELEMENT( iter_xp , process_t , children_list ); 88 95 child_ptr = GET_PTR( child_xp ); 89 96 child_cxy = GET_CXY( child_xp ); 90 97 91 // get t he child PID92 child_ pid = (int)hal_remote_lw( XPTR( child_cxy , &child_ptr->pid ));98 // get term_state from child owner process 99 child_state = (int)hal_remote_lw ( XPTR(child_cxy,&child_ptr->term_state)); 93 100 94 // get the child process state 95 child_state = hal_remote_lw( XPTR( child_cxy , &child_ptr->state ) ); 101 // test if child process is terminated, 102 // but termination not yet reported to parent process 103 if( ((child_state & PROCESS_FLAG_EXIT) || 104 (child_state & PROCESS_FLAG_KILL) || 105 (child_state & PROCESS_FLAG_BLOCK)) && 106 ((child_state & PROCESS_FLAG_WAIT) == 0) ) 107 { 108 // get pointer on main thread and PID from child owner process 109 child_pid = (pid_t) hal_remote_lw ( XPTR(child_cxy,&child_ptr->pid)); 110 child_thread = (thread_t *)hal_remote_lpt( XPTR(child_cxy,&child_ptr->th_tbl[0])); 96 111 97 // check child process state 98 if( child_state != PROCESS_STATE_RUNNING ) 99 { 100 // release lock 101 remote_spinlock_unlock( lock_xp ); 112 // set the PROCESS_FLAG_WAIT in owner child descriptor 113 hal_remote_atomic_or( XPTR( child_cxy , &child_ptr->term_state ), 114 PROCESS_FLAG_WAIT ); 102 115 103 #if CONFIG_SYSCALL_DEBUG 116 // set the THREAD_FLAG_REQ_DELETE in child main thread 117 hal_remote_atomic_or( XPTR( child_cxy , &child_thread->flags ) , 118 THREAD_FLAG_REQ_DELETE ); 119 120 #if CONFIG_DEBUG_SYS_WAIT 104 121 tm_end = hal_get_cycles(); 105 printk("\n[DBG] %s : core[%x,%d] exit / process %x / cost = %d\n", 106 __FUNCTION__ , local_cxy, this->core->lid, process->pid, (uint32_t)(tm_end - tm_start) ); 122 if( CONFIG_DEBUG_SYS_WAIT < tm_end ) 123 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n", 124 __FUNCTION__, this, process->pid, (uint32_t)tm_end ); 107 125 #endif 108 126 109 // return relevant info toprocess110 hal_copy_to_uspace( status , &child_state , sizeof(int) );111 return child_pid;127 // return relevant info to calling parent process 128 hal_copy_to_uspace( status , &child_state , sizeof(int) ); 129 return child_pid; 112 130 } 113 131 } 114 132 115 133 // release lock 116 remote_spinlock_unlock( lock_xp );134 remote_spinlock_unlock( children_lock_xp ); 117 135 118 // block the calling thread until a child process change state 119 thread_block( this , THREAD_BLOCKED_WAIT ); 120 sched_yield( "wait child termination" ); 136 // deschedule without blocking 137 sched_yield( "parent wait children termination" ); 121 138 } 122 139 -
trunk/kernel/syscalls/sys_write.c
r421 r433 46 46 reg_t save_sr; // required to enable IRQs during syscall 47 47 48 #if CONFIG_WRITE_DEBUG 48 thread_t * this = CURRENT_THREAD; 49 process_t * process = this->process; 50 51 #if CONFIG_DEBUG_SYS_WRITE 49 52 uint32_t tm_start; 50 53 uint32_t tm_end; 51 54 tm_start = hal_get_cycles(); 55 if( CONFIG_DEBUG_SYS_WRITE < tm_start ) 56 printk("\n[DBG] %s : thread %x / process %x / vaddr %x / count %d / cycle %d\n", 57 __FUNCTION__, this, process->pid, vaddr, count, (uint32_t)tm_start ); 52 58 #endif 53 54 thread_t * this = CURRENT_THREAD;55 process_t * process = this->process;56 59 57 60 // check file_id argument 58 61 if( file_id >= CONFIG_PROCESS_FILE_MAX_NR ) 59 62 { 60 printk("\n[ERROR] in %s : illegal file descriptor index\n", __FUNCTION__ ); 63 64 #if CONFIG_DEBUG_SYSCALLS_ERROR 65 printk("\n[ERROR] in %s : illegal file descriptor index\n", __FUNCTION__ ); 66 #endif 61 67 this->errno = EBADFD; 62 68 return -1; … … 68 74 if ( error ) 69 75 { 70 printk("\n[ERROR] in %s : user buffer unmapped = %x\n", 71 __FUNCTION__ , (intptr_t)vaddr ); 76 77 #if CONFIG_DEBUG_SYSCALLS_ERROR 78 printk("\n[ERROR] in %s : user buffer unmapped = %x\n", __FUNCTION__ , (intptr_t)vaddr ); 79 #endif 72 80 this->errno = EINVAL; 73 81 return -1; … … 82 90 if( file_xp == XPTR_NULL ) 83 91 { 84 printk("\n[ERROR] in %s : undefined file descriptor index = %d in process %x\n", 85 __FUNCTION__ , file_id , process->pid ); 92 93 #if CONFIG_DEBUG_SYSCALLS_ERROR 94 printk("\n[ERROR] in %s : undefined file descriptor index = %d in process %x\n", 95 __FUNCTION__ , file_id , process->pid ); 96 #endif 86 97 this->errno = EBADFD; 87 98 return -1; … … 103 114 if( (attr & FD_ATTR_WRITE_ENABLE) == 0 ) 104 115 { 105 printk("\n[ERROR] in %s : file %d not writable in process %x\n", 106 __FUNCTION__ , file_id , process->pid ); 116 117 #if CONFIG_DEBUG_SYSCALLS_ERROR 118 printk("\n[ERROR] in %s : file %d not writable in process %x\n", 119 __FUNCTION__ , file_id , process->pid ); 120 #endif 107 121 this->errno = EBADFD; 108 122 return -1; … … 131 145 if( nbytes != count ) 132 146 { 133 printk("\n[ERROR] in %s cannot write data to file %d in process %x\n", 134 __FUNCTION__ , file_id , process->pid ); 147 148 #if CONFIG_DEBUG_SYSCALLS_ERROR 149 printk("\n[ERROR] in %s cannot write data to file %d in process %x\n", 150 __FUNCTION__ , file_id , process->pid ); 151 #endif 135 152 this->errno = error; 136 153 return -1; … … 142 159 hal_fence(); 143 160 144 #if CONFIG_ WRITE_DEBUG161 #if CONFIG_DEBUG_SYS_WRITE 145 162 tm_end = hal_get_cycles(); 146 printk("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n" 163 if( CONFIG_DEBUG_SYS_WRITE < tm_end ) 164 printk("\n[DBG] %s : thread %x in process %x / cycle %d\n" 147 165 "nbytes = %d / first byte = %c / file_id = %d / cost = %d\n", 148 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid , 149 (uint32_t)tm_start , nbytes , *((char *)(intptr_t)paddr) , file_id , 150 (uint32_t)(tm_end - tm_start) ); 166 __FUNCTION__, this, process->pid, (uint32_t)tm_start, 167 nbytes, *((char *)(intptr_t)paddr) , file_id , (uint32_t)(tm_end - tm_start) ); 151 168 #endif 152 169 153 #if (CONFIG_WRITE_DEBUG & 0x1)154 printk("\n@@@@@@@@@@@@ timing to write character %c\n"155 " - enter_sys_write = %d\n"156 " - exit_sys_write = %d\n",157 *((char *)(intptr_t)paddr) , (uint32_t)tm_start , (uint32_t)tm_end );158 #endif159 160 170 return nbytes; 161 171 -
trunk/kernel/syscalls/syscalls.h
r421 r433 171 171 /****************************************************************************************** 172 172 * [10] This function implement the exit system call terminating a POSIX process. 173 * In the present implementation, this function implements actually the _exit(): 174 * - it does not flush open ourput steams. 175 * - it does not close open streams. 173 176 ****************************************************************************************** 174 177 * @ status : terminaison status (not used in present implementation). … … 421 424 422 425 /****************************************************************************************** 423 * [34] This function implements the "kill" system call .426 * [34] This function implements the "kill" system call on the kernel side. 424 427 * It register the signal defined by the <sig_id> argument in all thread descriptors 425 428 * of a target process identified by the <pid> argument. This is done in all clusters … … 432 435 ****************************************************************************************** 433 436 * @ pid : target process identifier. 434 * @ sig_id : index defining the signal type (from 1 to 31).437 * @ sig_id : index defining the signal type. 435 438 * @ return 0 if success / returns -1 if failure. 436 439 *****************************************************************************************/ … … 439 442 440 443 /****************************************************************************************** 441 * [35] This function implements the "getpid" system call .444 * [35] This function implements the "getpid" system call on the kernel side. 442 445 ****************************************************************************************** 443 446 * @ returns the process PID for the calling thread. … … 446 449 447 450 /****************************************************************************************** 448 * [36] This function implement the "fork" system call .449 * The calling process descriptor (parent process), and the associated thread descriptor are450 * replicated in the same cluster as the calling thread, but the new process (child process)451 * is registered in another target cluster, that is the new process owner.452 * The child process and the associated main thread will be migrated to the target cluster453 * later, when the child process makes an "exec" or any other system call... TODO [AG]451 * [36] This function implement the "fork" system call on the kernel side. 452 * The calling process descriptor (parent process), and the associated thread descriptor 453 * are replicated in a - likely - remote cluster, that becomes the child process owner. 454 * The child process get a new PID, and is linked to the parent PID. The child process 455 * inherit from its parent the memory image, and all open files (including the TXT). 456 * The child process becomes the TXT terminal owner. 454 457 * The target cluster depends on the "fork_user" flag and "fork_cxy" variable that can be 455 458 * stored in the calling thread descriptor by the specific fork_place() system call. 456 * If not, the sys_fork()function makes a query to the DQDT to select the target cluster.459 * If not, the kernel function makes a query to the DQDT to select the target cluster. 457 460 ****************************************************************************************** 458 461 * @ if success, returns child process PID to parent, and return O to child. … … 462 465 463 466 /****************************************************************************************** 464 * [37] This function implement the "exec" system call , that creates a new process465 * descriptor.466 * It is executed in the client cluster, but the new process descriptor and the main467 * thread are created in a server cluster, that is generally another cluster.468 * - if the server_cluster is the client cluster, it calls directly the process_make_exec()469 * function to create a new process, and launch a new thread in local cluster.470 * - if the target_cluster is remote, it calls the rpc_process_exec_client() to execute471 * process_signedmake_exec() on the remote cluster.472 * In both case this function build an exec_info_t structure containing all informations473 * required to build the new process descriptor and the associated thread.474 * Finally, the calling process and thread are deleted.467 * [37] This function implement the "exec" system call on the kernel side. 468 * It creates, in the same cluster as the calling thread, a new process descriptor, 469 * and a new associated main thread descriptor, executing a new memory image defined 470 * by the <filename> argument. This new process inherit from the old process the PID 471 * and the PPID, as well as all open files (including the TXT). 472 * The old process descriptor, and all its threads are blocked, and marked for deletion. 473 * Therefore the exec syscall does not return to the calling thread in case of success. 474 * This function build an exec_info_t structure containing the new process arguments, 475 * as defined by the <arv> argument, and the new process environment variables, 476 * as defined by the <envp> argument. 477 * TODO : the <argv> and <envp> arguments are not supported yet (both must be NULL). 475 478 ****************************************************************************************** 476 479 * @ filename : string pointer on .elf filename (pointer in user space) 477 480 * @ argv : array of strings on process arguments (pointers in user space) 478 481 * @ envp : array of strings on environment variables (pointers in user space) 479 * @ returns Oif success / returns -1 if failure.482 * @ does not return if success / returns -1 if failure. 480 483 *****************************************************************************************/ 481 484 int sys_exec( char * filename, … … 495 498 496 499 /****************************************************************************************** 497 * [39] This blocking function wait a change of a child process state. A changecan be:498 * - a termination of child following a child exit.499 * - a termination of child following a SIGKILL signal.500 * [39] This blocking function waits a change of a child process state, that can be: 501 * - a termination of child following a process_make_exit(). 502 * - a termination of child following a process_make_kill(). 500 503 * - a blocking of child following a SIGSTOP signal. 501 * It returns the PID of the involved child process, after storing in the memory slot 502 * pointed by the <status> argument relevant information on the child state change. 504 * In case of a multi-thread process, this function must be called by the main thread 505 * runningin the reference cluster. 506 * When a change has been observed, it returns the PID of the child process, and stores 507 * in the <status> argument relevant information on the child state change. 503 508 * The following macros can be used to extract information from status: 504 509 * - WIFEXITED(status) : is true if the child process terminated with an exit(). … … 506 511 * - WIFSTOPPED(status) : is true if the child process is stopped by a signal. 507 512 * - WEXITSTATUS(status) : returns the low-order 8 bits of the exit() argument. 508 * A status of 0 indicates a normal termination.509 513 * If a parent process terminates without waiting for all child processes to terminate, 510 514 * the remaining child processes are attached to the init process. 511 ****************************************************************************************** 512 * @ status : pointer on the child PID status. 513 * @ return child PID if success / return -1 if failure. 515 * WARNING: negative values for the <pid> argument are not supported. 516 ****************************************************************************************** 517 * @ searched_pid : searched child process identifier. 518 * @ status : [out] child termination status. 519 * @ return child PID if success / return -1 if searched PID not found. 514 520 *****************************************************************************************/ 515 521 int sys_wait( uint32_t * status );
Note: See TracChangeset
for help on using the changeset viewer.