- Timestamp:
- Jan 13, 2021, 12:36:17 AM (4 years ago)
- Location:
- trunk/kernel
- Files:
-
- 1 deleted
- 75 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/Makefile
r675 r683 107 107 build/mm/page.o \ 108 108 build/mm/kcm.o \ 109 build/mm/khm.o \110 109 build/mm/mapper.o \ 111 110 build/mm/kmem.o … … 179 178 build/syscalls/sys_wait.o 180 179 181 SYS_OBJS_4 = build/syscalls/sys_get_config.o \ 182 build/syscalls/sys_get_core_id.o \ 183 build/syscalls/sys_get_cycle.o \ 180 SYS_OBJS_4 = build/syscalls/sys_get.o \ 184 181 build/syscalls/sys_display.o \ 185 182 build/syscalls/sys_place_fork.o \ … … 188 185 build/syscalls/sys_trace.o \ 189 186 build/syscalls/sys_fg.o \ 190 build/syscalls/sys_is_fg.o 187 build/syscalls/sys_is_fg.o \ 188 build/syscalls/sys_fbf.o 191 189 192 190 SYS_OBJS_5 = build/syscalls/sys_exit.o \ 193 191 build/syscalls/sys_sync.o \ 194 192 build/syscalls/sys_fsync.o \ 195 build/syscalls/sys_get_best_core.o \196 build/syscalls/sys_get_nb_cores.o \197 build/syscalls/sys_get_thread_info.o \198 build/syscalls/sys_fbf.o \199 193 build/syscalls/sys_socket.o 200 194 -
trunk/kernel/devices/dev_fbf.c
r674 r683 159 159 intptr_t * user_buffer ) 160 160 { 161 kmem_req_t req;162 161 fbf_window_t * window; // window descriptor (created in local cluster) 163 162 vseg_t * vseg; // vseg descriptor (created in reference cluster) … … 202 201 203 202 // allocate memory for the window descriptor in local cluster 204 req.type = KMEM_KCM; 205 req.order = bits_log2( sizeof(fbf_window_t) ); 206 req.flags = AF_ZERO | AF_KERNEL; 207 window = kmem_alloc( &req ); 203 window = kmem_alloc( bits_log2(sizeof(fbf_window_t)) , AF_ZERO ); 208 204 209 205 if( window == NULL ) … … 256 252 printk("\n[ERROR] in %s / thread[%x,%x] cannot create vseg in reference cluster\n", 257 253 __FUNCTION__, process->pid, this->trdid ); 258 req.ptr = (void *)window; 259 kmem_free( &req ); 254 kmem_free( window , bits_log2(sizeof(fbf_window_t)) ); 260 255 return -1; 261 256 } … … 281 276 printk("\n[ERROR] in %s / thread[%x,%x] cannot allocate buffer for window\n", 282 277 __FUNCTION__, process->pid, this->trdid ); 283 req.ptr = (void *)window; 284 kmem_free( &req ); 278 kmem_free( window , bits_log2(sizeof(fbf_window_t)) ); 285 279 vmm_remove_vseg( process , vseg ); 286 280 return -1; … … 521 515 error_t dev_fbf_delete_window( uint32_t wid ) 522 516 { 523 kmem_req_t req;524 525 517 thread_t * this = CURRENT_THREAD; 526 518 process_t * process = this->process; … … 581 573 582 574 // 8. release memory allocated for window descriptor 583 req.type = KMEM_KCM; 584 req.ptr = window_ptr; 585 kmem_remote_free( window_cxy , &req ); 575 kmem_remote_free( window_cxy , window_ptr , bits_log2(sizeof(fbf_window_t)) ); 586 576 587 577 // 9. release the associated vseg -
trunk/kernel/devices/dev_nic.c
r674 r683 1 2 1 /* 3 2 * dev_nic.c - NIC (Network Controler) generic device API implementation. … … 46 45 void dev_nic_init( chdev_t * chdev ) 47 46 { 47 48 assert( __FUNCTION__ , (chdev->func == DEV_FUNC_NIC) , 49 "bad func value"); 50 48 51 thread_t * new_thread; 49 52 error_t error; … … 74 77 75 78 // build pointer on server function 76 void * func = is_rx ? &dev_nic_rx_server : &dev_nic_tx_server;79 void * server_func = is_rx ? &dev_nic_rx_server : &dev_nic_tx_server; 77 80 78 81 // create server thread 79 82 error = thread_kernel_create( &new_thread, 80 83 THREAD_DEV, 81 func,84 server_func, 82 85 chdev, 83 86 lid ); … … 120 123 thread_t * this = CURRENT_THREAD; 121 124 122 xptr_t dev_xp = chdev_dir.nic_tx[0]; 125 // get cluster and local pointer fo the nic_tx[0] chdev 126 xptr_t dev_xp = chdev_dir.nic_tx[0]; 123 127 chdev_t * dev_ptr = GET_PTR( dev_xp ); 124 125 if( dev_xp == XPTR_NULL ) return -1; 128 cxy_t dev_cxy = GET_CXY( dev_xp ); 129 130 if( dev_xp == XPTR_NULL ) 131 { 132 133 #if DEBUG_DEV_NIC_ERROR 134 printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n", 135 __FUNCTION__, local_cxy ); 136 #endif 137 return -1; 138 } 126 139 127 140 // set command arguments in client thread descriptor … … 131 144 this->nic_cmd.type = NIC_CMD_GET_KEY; 132 145 146 // get cmd function pointer from nic_tx[0] chdev descriptor 147 dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd )); 148 133 149 // call driver 134 dev_ptr->cmd( XPTR( local_cxy , this ) );135 136 // get "status"150 cmd( XPTR( local_cxy , this ) ); 151 152 // return command status 137 153 return this->nic_cmd.status; 138 } 154 155 } // end dev_nic_get_key() 139 156 140 157 ////////////////////////////////////////// … … 146 163 if( channel >= LOCAL_CLUSTER->nb_nic_channels ) return -1; 147 164 148 xptr_t dev_xp = chdev_dir.nic_tx[0]; 165 // get cluster and local pointer fo the nic_tx[channel] chdev 166 xptr_t dev_xp = chdev_dir.nic_tx[channel]; 149 167 chdev_t * dev_ptr = GET_PTR( dev_xp ); 150 151 if( dev_xp == XPTR_NULL ) return -1; 168 cxy_t dev_cxy = GET_CXY( dev_xp ); 169 170 if( dev_xp == XPTR_NULL ) 171 { 172 173 #if DEBUG_DEV_NIC_ERROR 174 printk("\n[ERROR] in %s : nic_tx[%d] chdev undefined in chdev_dir of cluster %x\n", 175 __FUNCTION__, channel, local_cxy ); 176 #endif 177 return -1; 178 } 152 179 153 180 // set command arguments in client thread descriptor … … 157 184 this->nic_cmd.status = run; 158 185 186 // get cmd function pointer from nic_tx[channel] chdev descriptor 187 dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd )); 188 159 189 // call driver 160 dev_ptr->cmd( XPTR( local_cxy , this ) );190 cmd( XPTR( local_cxy , this ) ); 161 191 162 192 // return "error" 163 193 return this->nic_cmd.error; 164 } 194 195 } // end dev_nic_set_run() 165 196 166 197 ////////////////////////////////// … … 169 200 thread_t * this = CURRENT_THREAD; 170 201 202 // get cluster and local pointer fo the nic_tx[0] chdev 171 203 xptr_t dev_xp = chdev_dir.nic_tx[0]; 172 204 chdev_t * dev_ptr = GET_PTR( dev_xp ); 205 cxy_t dev_cxy = GET_CXY( dev_xp ); 173 206 174 if( dev_xp == XPTR_NULL ) return -1; 207 if( dev_xp == XPTR_NULL ) 208 { 209 210 #if DEBUG_DEV_NIC_ERROR 211 printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n", 212 __FUNCTION__, local_cxy ); 213 #endif 214 return -1; 215 } 175 216 176 217 // set command arguments in client thread descriptor … … 178 219 this->nic_cmd.type = NIC_CMD_GET_INSTRU; 179 220 221 // get cmd function pointer from nic_tx[0] chdev descriptor 222 dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd )); 223 180 224 // call driver 181 dev_ptr->cmd( XPTR( local_cxy , this ) );225 cmd( XPTR( local_cxy , this ) ); 182 226 183 227 // return "error" 184 228 return this->nic_cmd.error; 185 } 229 230 } // end dev_nic_get_instru() 231 186 232 187 233 //////////////////////////////////// … … 190 236 thread_t * this = CURRENT_THREAD; 191 237 238 // get cluster and local pointer fo the nic_tx[0] chdev 192 239 xptr_t dev_xp = chdev_dir.nic_tx[0]; 193 240 chdev_t * dev_ptr = GET_PTR( dev_xp ); 241 cxy_t dev_cxy = GET_CXY( dev_xp ); 194 242 195 if( dev_xp == XPTR_NULL ) return -1; 243 if( dev_xp == XPTR_NULL ) 244 { 245 246 #if DEBUG_DEV_NIC_ERROR 247 printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n", 248 __FUNCTION__, local_cxy ); 249 #endif 250 return -1; 251 } 196 252 197 253 // set command arguments in client thread descriptor … … 199 255 this->nic_cmd.type = NIC_CMD_GET_INSTRU; 200 256 257 // get cmd function pointer from nic_tx[0] chdev descriptor 258 dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd )); 259 201 260 // call driver 202 dev_ptr->cmd( XPTR( local_cxy , this ) );261 cmd( XPTR( local_cxy , this ) ); 203 262 204 263 // return "error" 205 264 return this->nic_cmd.error; 206 } 265 266 } // end dev_nic_clear_instru() 207 267 208 268 … … 261 321 262 322 //////////////////////////////////////////////////////////////////////////////////////// 263 // This static function computes the checksum for an UDP packet defined by 264 // the <buffer> and <size> arguments. 323 // This static function computes the checksum for a TCP segment or an UDP packet, 324 // defined by the <buffer> and <length> arguments. 325 // It includes the "pseudo header "defined by the <src_ip_addr>, <dst_ip_addr>, and 326 // <tcp_length> arguments, and by the UDP/TCP protocol code. 265 327 //////////////////////////////////////////////////////////////////////////////////////// 266 // @ buffer : [in] pointer on UDP packet base. 267 // @ size : [in] number of bytes in this packet (including header). 328 // @ buffer : [in] pointer on buffer containing the TCP segment or UDP packet. 329 // @ length : [in] number of bytes in this packet/segment (including header). 330 // @ src_ip_addr : [in] source IP address (pseudo header). 331 // @ dst_ip_addr : [in] destination IP address (pseudo header). 332 // @ is_tcp : [in] TCP if true / UDP if false (pseudo header). 268 333 // @ return the checksum value on 16 bits 269 334 //////////////////////////////////////////////////////////////////////////////////////// 270 static uint16_t dev_nic_udp_checksum( uint8_t * buffer, 271 uint32_t size ) 272 { 273 uint32_t i; 274 uint32_t carry; 275 uint32_t cs; // 32 bits accumulator 276 uint16_t * buf; 277 uint32_t max; // number of uint16_t in packet 278 279 // compute max & buf 280 buf = (uint16_t *)buffer; 281 max = size >> 1; 282 283 // extend buffer[] if required 284 if( size & 1 ) 285 { 286 max++; 287 buffer[size] = 0; 288 } 289 290 // compute checksum for UDP packet 291 for( i = 0 , cs = 0 ; i < size ; i++ ) cs += buf[i]; 292 293 // handle carry 294 carry = (cs >> 16); 295 if( carry ) 296 { 297 cs += carry; 298 carry = (cs >> 16); 299 if( carry ) cs += carry; 300 } 301 302 // one's complement 303 return ~cs; 304 } 305 306 //////////////////////////////////////////////////////////////////////////////////////// 307 // This static function computes the checksum for a TCP segment defined by the <buffer> 308 // and <size> arguments. It includes the pseudo header defined by the <src_ip_addr>, 309 // <dst_ip_addr>, <size> arguments, and by the TCP_PROTOCOL code. 310 //////////////////////////////////////////////////////////////////////////////////////// 311 // @ buffer : [in] pointer on TCP segment base. 312 // @ tcp_length : [in] number of bytes in this TCP segment (including header). 313 // @ src_ip_addr : [in] source IP address (pseudo header) 314 // @ dst_ip_addr : [in] destination IP address (pseudo header) 315 // @ return the checksum value on 16 bits 316 //////////////////////////////////////////////////////////////////////////////////////// 317 static uint16_t dev_nic_tcp_checksum( uint8_t * buffer, 318 uint32_t tcp_length, 319 uint32_t src_ip_addr, 320 uint32_t dst_ip_addr ) 335 static uint16_t dev_nic_tcp_udp_checksum( uint8_t * buffer, 336 uint32_t length, 337 uint32_t src_ip_addr, 338 uint32_t dst_ip_addr, 339 bool_t is_tcp ) 321 340 { 322 341 uint32_t i; … … 324 343 uint32_t cs; // 32 bits accumulator 325 344 uint16_t * buf; 326 uint32_t max; // number of uint16_t in segment 345 uint32_t max; // number of uint16_t in segment/paket 327 346 328 347 // compute max & buf 329 348 buf = (uint16_t *)buffer; 330 max = tcp_length >> 1;349 max = length >> 1; 331 350 332 351 // extend buffer[] if required 333 if( tcp_length & 1 )352 if( length & 1 ) 334 353 { 335 354 max++; 336 buffer[ tcp_length] = 0;355 buffer[length] = 0; 337 356 } 338 357 339 358 // compute checksum for TCP segment 340 for( i = 0 , cs = 0 ; i < tcp_length; i++ ) cs += buf[i];359 for( i = 0 , cs = 0 ; i < max ; i++ ) cs += buf[i]; 341 360 342 361 // complete checksum for pseudo-header 343 cs += src_ip_addr; 344 cs += dst_ip_addr; 345 cs += PROTOCOL_TCP; 346 cs += tcp_length; 362 cs += (src_ip_addr & 0xFFFF); 363 cs += (src_ip_addr >> 16 ); 364 cs += (dst_ip_addr & 0xFFFF); 365 cs += (dst_ip_addr >> 16 ); 366 cs += length; 367 cs += (is_tcp ? PROTOCOL_TCP : PROTOCOL_UDP); 347 368 348 369 // handle carry … … 360 381 361 382 /////////////////////////////////////////////////////////////////////////////////////////// 362 // This static function is called by the NIC_TX orNIC_RX server threads to unblock383 // This static function is called by the NIC_TX and NIC_RX server threads to unblock 363 384 // the TX client thread after completion (success or error) of a TX command registered 364 // in a socket identified by the <socket_xp> argument. The <status> argument defines365 // the command success/failure status: a null value signals a success, a non-null value366 // signals a failure. For all commands, it copies the status value in the tx_sts field,367 // and print an errormessage on TXT0 in case of failure.385 // in a socket identified by the <socket_xp> argument. 386 // The <status> argument defines the command success/failure status. 387 // For all commands, it copies the status value in the tx_sts field, and print an error 388 // message on TXT0 in case of failure. 368 389 /////////////////////////////////////////////////////////////////////////////////////////// 369 390 // @ socket_xp : [in] extended pointer on socket … … 377 398 cxy_t socket_cxy = GET_CXY( socket_xp ); 378 399 379 if( status != CMD_STS_SUCCESS)400 if( (status != CMD_STS_SUCCESS) && (status != CMD_STS_EOF) ) 380 401 { 381 402 uint32_t sock_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state )); … … 400 421 } // end dev_nic_unblock_tx_client() 401 422 402 /////////////////////////////////////////////////////////////////////////////////////////// 403 // This static function is called by the NIC_TX or NIC_RX server threads to unblock 423 424 /////////////////////////////////////////////////////////////////////////////////////////// 425 // Functions called by the NIC_RX server thread 426 /////////////////////////////////////////////////////////////////////////////////////////// 427 428 /////////////////////////////////////////////////////////////////////////////////////////// 429 // This static function is called by the NIC_RX server threads to unblock 404 430 // the RX client thread after completion (success or error) of an RX command registered 405 // in a socket identified by the <socket_xp> argument. The <status> argument defines406 // the command success/failure status: a null value signals a success, a non-null value407 // signals a failure. For all commands, it copies the status value in the rx_sts field,408 // and print an errormessage on TXT0 in case of failure.431 // in a socket identified by the <socket_xp> argument. 432 // The <status> argument defines the command success/failure status. 433 // For all commands, it copies the status value in the rx_sts field, and print an error 434 // message on TXT0 in case of failure. 409 435 /////////////////////////////////////////////////////////////////////////////////////////// 410 436 // @ socket_xp : [in] extended pointer on socket … … 418 444 cxy_t socket_cxy = GET_CXY( socket_xp ); 419 445 420 if( status != CMD_STS_SUCCESS)446 if( (status != CMD_STS_SUCCESS) && (status != CMD_STS_EOF) ) 421 447 { 422 448 uint32_t sock_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state )); … … 440 466 441 467 } // end dev_nic_unblock_rx_client() 442 443 ///////////////////////////////////////////////////////////////////////////////////////////444 // Functions called by the NIC_RX server thread445 ///////////////////////////////////////////////////////////////////////////////////////////446 468 447 469 /////////////////////////////////////////////////////////////////////////////////////////// … … 553 575 554 576 return 0; 555 } 577 578 } // end dev_nic_rx_check_ip() 556 579 557 580 /////////////////////////////////////////////////////////////////////////////////////////// … … 595 618 xptr_t socket_rbuf_xp; // extended pointer on socket rx_buf 596 619 xptr_t socket_lock_xp; // extended pointer on socket lock 597 xptr_t socket_client_xp; // extended pointer on socket rx_client field 598 xptr_t client_xp; // extended pointer on client thread descriptor 620 xptr_t socket_rx_client; // socket rx_client thread 621 bool_t socket_rx_valid; // socket rx_command valid 622 uint32_t socket_rx_cmd; // socket rx_command type 599 623 uint32_t payload; // number of bytes in payload 600 624 uint32_t status; // number of bytes in rx_buf … … 602 626 uint32_t moved_bytes; // number of bytes actually moved to rx_buf 603 627 628 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 629 thread_t * this = CURRENT_THREAD; 630 uint32_t cycle = (uint32_t)hal_get_cycles(); 631 #endif 632 633 #if DEBUG_DEV_NIC_RX 634 uint32_t fdid; 635 uint32_t pid; 636 if( DEBUG_DEV_NIC_RX < cycle ) 637 printk("\n[%s] thread[%x,%x] enter / channel %d / plen %d / cycle %d\n", 638 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, k_length, cycle ); 639 if( (DEBUG_DEV_NIC_RX < cycle) && (DEBUG_DEV_NIC_RX & 1)) 640 putb("64 first bytes in k_buf" , k_buf , 64 ); 641 #endif 642 604 643 // build extended pointers on list of sockets attached to NIC_RX chdev 605 644 root_xp = XPTR( local_cxy , &chdev->wait_root ); 606 645 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 607 646 608 // compute UDP packet checksum 609 checksum = dev_nic_udp_checksum( k_buf , k_length ); 610 611 // get checksum from received packet header 647 // extract checksum from received UDP packet header 612 648 pkt_checksum = ((uint16_t)k_buf[6] << 8) | (uint16_t)k_buf[7]; 613 649 650 // reset checksum field 651 k_buf[6] = 0; 652 k_buf[7] = 0; 653 654 // compute checksum from received UDP packet 655 checksum = dev_nic_tcp_udp_checksum( k_buf, 656 k_length, 657 pkt_src_addr, 658 pkt_dst_addr, 659 false ); // is_not_tcp 614 660 // discard corrupted packet 615 if( pkt_checksum != checksum ) return; 661 if( pkt_checksum != checksum ) 662 { 663 664 #if DEBUG_DEV_NIC_ERROR 665 printk("\n[WARNING] in %s : thread[%x,%x] discard corrupted packet on channel %d / cycle %d\n" 666 " expected checksum %x / received checksum %x\n", 667 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle, 668 (uint32_t)checksum, (uint32_t)pkt_checksum ); 669 #endif 670 return; 671 } 616 672 617 673 // get src_port and dst_port from UDP header … … 619 675 uint32_t pkt_dst_port = ((uint32_t)k_buf[2] << 8) | (uint32_t)k_buf[3]; 620 676 621 // discard unexpected packet622 if( xlist_is_empty( root_xp ) ) return;623 624 677 // take the lock protecting the sockets list 625 678 remote_busylock_acquire( lock_xp ); … … 658 711 else match_socket = local_match; 659 712 660 // exit loop when socket found 661 if( match_socket ) break; 713 // exit loop if matching 714 if( match_socket ) 715 { 716 717 #if DEBUG_DEV_NIC_RX 718 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 719 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 720 if( DEBUG_DEV_NIC_RX < cycle ) 721 printk("\n[%s] thread[%x,%x] found matching UDP socket[%d,%d] / state %s\n", 722 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 723 #endif 724 break; 725 } 662 726 } 663 727 … … 666 730 667 731 // discard unexpected packet 668 if( match_socket == false ) return; 669 670 // build extended pointers on various socket fields 732 if( match_socket == false ) 733 { 734 735 #if DEBUG_DEV_NIC_ERROR 736 printk("\n[WARNING] in %s : thread[%x,%s] discard unexpected packet on channel %d / cycle %d\n", 737 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle ); 738 #endif 739 return; 740 } 741 742 // build extended pointers on socket.rx_buf and socket.lock 671 743 socket_rbuf_xp = XPTR( socket_cxy , &socket_ptr->rx_buf ); 672 744 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 673 socket_client_xp = XPTR( socket_cxy , &socket_ptr->rx_client );674 745 675 746 // take the lock protecting the socket … … 678 749 // get status & space from rx_buf 679 750 status = remote_buf_status( socket_rbuf_xp ); 680 space = CONFIG_SOCK_RX_BUF_SIZE - status; 681 682 // get client thread 683 client_xp = hal_remote_l64( socket_client_xp ); 751 space = (1 << CONFIG_SOCK_RX_BUF_ORDER) - status; 752 753 // get socket rx_client, rx_valid and rx_cmd values 754 socket_rx_client = hal_remote_l64( XPTR( socket_cxy , &socket_ptr->rx_client ) ); 755 socket_rx_valid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->rx_valid ) ); 756 socket_rx_cmd = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->rx_cmd ) ); 684 757 685 758 // get number of bytes in payload … … 691 764 // move payload from kernel buffer to socket rx_buf 692 765 remote_buf_put_from_kernel( socket_rbuf_xp, 693 k_buf + UDP_HEAD_LEN, 694 moved_bytes ); 695 // unblock client thread 696 if( client_xp != XPTR_NULL ) 697 { 698 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 766 k_buf + UDP_HEAD_LEN, 767 moved_bytes ); 768 #if DEBUG_DEV_NIC_RX 769 if( DEBUG_DEV_NIC_RX < cycle ) 770 printk("\n[%s] thread[%x,%x] for socket[%d,%d] move %d bytes to rx_buf / buf_sts %d\n", 771 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 772 moved_bytes, remote_buf_status(socket_rbuf_xp), moved_bytes ); 773 #endif 774 775 // signal client thread if pending RECV command 776 if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) ) 777 { 778 // reset rx_valid 779 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->rx_valid), false ); 780 781 // report success to RX client thread 782 dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS ); 783 784 #if DEBUG_DEV_NIC_RX 785 if( DEBUG_DEV_NIC_RX < cycle ) 786 printk("\n[%s] thread[%x,%x] for UDP socket[%x,%d] / unblock client thread\n", 787 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 788 #endif 789 790 } 791 else 792 { 793 794 #if DEBUG_DEV_NIC_RX 795 if( DEBUG_DEV_NIC_RX < cycle ) 796 printk("\n[%s] thread[%x,%x] for socket[%x,%d] / no client thread\n" 797 " rx_valid %d / rx_cmd %s\n", 798 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 799 socket_rx_valid , socket_cmd_type_str(socket_rx_cmd) ); 800 #endif 801 699 802 } 700 803 … … 707 810 // This static function is called by the dev_nic_rx_server() function to handle one RX 708 811 // TCP segment contained in a kernel buffer defined by the <k_buf> & <k_length> arguments. 709 // The <seg_remote_addr> and <seg_local_addr> arguments are obtained from the received 710 // IP packet header. It the received segment doesn't match any connected socket attached 711 // to the selected chdev[k], or any listening socket waiting connection, or if the segment 712 // is corrupted, this segment is discarded. 713 // If required by the TCP flags, it registers an R2T request in the socket R2T queue 714 // to implement the TCP handcheck for close and connect. 812 // The <seg_remote_addr> and <seg_local_addr> arguments have been extracted from the IP 813 // IP header. The local and remote ports are obtained from the TCP header. 814 // It the received segment doesn't match any connected socket attached to the selected 815 // <chdev>, or any listening socket waiting connection, or if the segment is corrupted, 816 // the segment is discarded. This function implement the TCP error recovery protocol, 817 // as specified by the RFC. Depending on both the socket state, and the segment header: 818 // - it register data in the RX buffer, 819 // - it update the socket state and TCB, 820 // - it register acknolegce requests in the R2T queue, 821 // - it register connection requests in the CRQ queue, 715 822 /////////////////////////////////////////////////////////////////////////////////////////// 716 823 // Implementation note: … … 724 831 // the SYN, FIN, ACK and RST flags. It updates the socket state when required, moves 725 832 // data to the rx_buf when possible, and return. It takes the lock protecting the socket, 726 // because a nconnected socket is accessed by both the NIC_TX and NIC_RX server threads.833 // because a connected socket is accessed by both the NIC_TX and NIC_RX server threads. 727 834 // 4) If no matching connected socket has been found, it scans the list of listening 728 835 // sockets to find a matching listening socket. … … 760 867 bool_t socket_tx_valid; // TX command valid 761 868 uint32_t socket_tx_cmd; // TX command type 762 uint32_t socket_tx_todo; // number of TX bytes not sent yet763 869 uint32_t socket_tx_nxt; // next byte to send in TX stream 764 870 uint32_t socket_tx_una; // first unacknowledged byte in TX stream 871 uint32_t socket_tx_len; // number of bytes in tx_buf 872 uint32_t socket_tx_ack; // number of acknowledged bytes in tx_buf 765 873 bool_t socket_rx_valid; // RX command valid 766 874 uint32_t socket_rx_cmd; // TX command type … … 804 912 uint32_t seg_data_len = k_length - seg_hlen; // number of bytes in payload 805 913 806 #if DEBUG_DEV_NIC_RX 807 thread_t * this = CURRENT_THREAD; 808 uint32_t cycle; 914 uint32_t seg_data_dup; // number of duplicated bytes in payload 915 uint32_t seg_data_new; // number of new bytes in payload 916 917 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 809 918 uint32_t fdid; 810 919 pid_t pid; 811 #endif 812 813 #if DEBUG_DEV_NIC_RX 814 cycle = (uint32_t)hal_get_cycles(); 815 if( cycle > DEBUG_DEV_NIC_RX ) 920 thread_t * this = CURRENT_THREAD; 921 uint32_t cycle = (uint32_t)hal_get_cycles(); 922 #endif 923 924 #if DEBUG_DEV_NIC_RX 925 if( DEBUG_DEV_NIC_RX < cycle ) 816 926 printk("\n[%s] thread[%x,%x] enters / tcp_length %d / tcp_flags %x / cycle %d\n", 817 927 __FUNCTION__, this->process->pid, this->trdid, k_length, seg_flags , cycle ); 818 928 #endif 819 929 820 // compute and check TCP checksum930 // reset checksum field 821 931 k_buf[16] = 0; 822 932 k_buf[17] = 0; 823 checksum = dev_nic_tcp_checksum( k_buf, 824 k_length, 825 seg_remote_addr, 826 seg_local_addr ); 827 933 934 // compute TCP checksum 935 checksum = dev_nic_tcp_udp_checksum( k_buf, 936 k_length, 937 seg_remote_addr, 938 seg_local_addr, 939 true ); // is_tcp 828 940 // discard segment if corrupted 829 941 if( seg_checksum != checksum ) 830 942 { 831 943 832 #if DEBUG_DEV_NIC_RX 833 if( cycle > DEBUG_DEV_NIC_RX ) 834 printk("\n[%s] thread[%x,%x] tcp checksum failure : received %x / computed %x\n", 835 __FUNCTION__, this->process->pid, this->trdid, seg_checksum, checksum ); 944 #if DEBUG_DEV_NIC_ERROR 945 printk("\n[WARNING] in %s : thread[%x,%x] / checksum failure on channel %d / cycle %d\n", 946 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle ); 836 947 #endif 837 948 return; 838 949 } 839 950 840 // scan list of attached sockets to find a matching TCP socket841 attached_match = false;842 843 951 // build extended pointer on xlist of sockets attached to NIC_RX chdev 844 952 root_xp = XPTR( local_cxy , &chdev->wait_root ); 845 953 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 846 954 955 attached_match = false; 956 847 957 // take the lock protecting the list of attached sockets 848 958 remote_busylock_acquire( lock_xp ); 849 959 960 // scan list of attached sockets to find a matching TCP socket 850 961 XLIST_FOREACH( root_xp , iter_xp ) 851 962 { … … 878 989 { 879 990 880 #if DEBUG_DEV_NIC_RX 881 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 882 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 883 if( cycle > DEBUG_DEV_NIC_RX ) 884 printk("\n[%s] thread[%x,%x] matching attached socket[%d,%d] / state %s\n", 885 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 991 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 992 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 993 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 994 #endif 995 996 #if DEBUG_DEV_NIC_RX 997 if( DEBUG_DEV_NIC_RX < cycle ) 998 printk("\n[%s] matching attached TCP socket[%d,%d] / state %s\n", 999 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 886 1000 #endif 887 1001 break; … … 912 1026 socket_tx_valid = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_valid )); 913 1027 socket_tx_cmd = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_cmd )); 914 socket_tx_todo = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_todo ));915 1028 socket_tx_nxt = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_nxt )); 916 1029 socket_tx_una = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_una )); 1030 socket_tx_ack = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_ack )); 1031 socket_tx_len = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_len )); 917 1032 918 1033 socket_rx_valid = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->rx_valid )); … … 926 1041 { 927 1042 //////////////////////// 928 case TCP_STATE_SYN_SENT: // TCP client waiting for SYN-ACK in connect handshake1043 case TCP_STATE_SYN_SENT: // TCP client waiting for SYN-ACK 929 1044 { 930 // [1] check ACK flag1045 // [1] & [2] check ACK and RST 931 1046 if( seg_ack_set ) 932 1047 { 933 if( seg_ack_num != TCP_ISS_CLIENT + 1 ) // bad ACK => report error 1048 bool_t ack_ok = (seg_ack_num == (CONFIG_SOCK_ISS_CLIENT + 1) ); 1049 1050 if( seg_rst_set && ack_ok ) 934 1051 { 935 1052 936 1053 #if DEBUG_DEV_NIC_RX 937 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect ack_num %x / get %x\n", 938 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 939 socket_state_str(socket_state), TCP_ISS_CLIENT + 1, seg_ack_num ); 940 #endif 941 // make an RST request to R2T queue 1054 if( DEBUG_DEV_NIC_RX < cycle ) 1055 printk("\n[%s] socket[%x,%d] %s RST received from remote TCP => close\n", 1056 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1057 #endif 1058 // report RST to local TCP client thread 1059 dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST ); 1060 1061 // update socket state 1062 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1063 TCP_STATE_BOUND ); 1064 break; 1065 } 1066 1067 if( seg_rst_set && (ack_ok == false) ) 1068 { 1069 1070 #if DEBUG_DEV_NIC_ERROR 1071 printk("\n[ERROR] in %s : socket[%x,%d] %s RST but expect ack_num %x != rcvd %x => discard\n", 1072 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1073 CONFIG_SOCK_ISS_CLIENT + 1, seg_ack_num ); 1074 #endif 1075 break; 1076 } 1077 1078 if( (seg_rst_set == false) && (ack_ok == false) ) 1079 { 1080 1081 #if DEBUG_DEV_NIC_ERROR 1082 printk("\n[ERROR] in %s : socket[%x,%d] %s expected ack_num %x != rcvd %x => send RST\n", 1083 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1084 CONFIG_SOCK_ISS_CLIENT + 1, seg_ack_num ); 1085 #endif 1086 // send RST to remote TCP 942 1087 socket_put_r2t_request( socket_r2tq_xp, 943 1088 TCP_FLAG_RST, 944 1089 chdev->channel ); 945 946 // report error to local TX client thread947 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADACK );948 949 1090 break; 950 1091 } 951 1092 } 952 1093 953 // [2] check RST flag // receive RST => report error954 if( seg_rst_set )955 {956 957 #if DEBUG_DEV_NIC_RX958 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n",959 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) );960 #endif961 // update socket state962 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ),963 TCP_STATE_BOUND );964 965 // signal error to local TX client thread966 dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST );967 968 break;969 }970 971 1094 // [3] handle security & precedence TODO ... someday 972 1095 … … 976 1099 977 1100 #if DEBUG_DEV_NIC_RX 978 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received expected SYN-ACK\n", 979 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1101 if( DEBUG_DEV_NIC_RX < cycle ) 1102 printk("\n[%s] socket[%x,%d] %s : received expected SYN-ACK\n", 1103 __FUNCTION__, pid, fdid , socket_state_str(socket_state) ); 980 1104 #endif 981 1105 // set socket.tx_una … … 999 1123 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1000 1124 } 1001 else // received SYN without ACK => client becomesserver1125 else // SYN without ACK => TCP client becomes a TCP server 1002 1126 { 1003 1127 1004 1128 #if DEBUG_DEV_NIC_RX 1005 printk("\n[%s] thread[%x,%x] for socket[%x,%d] %s : received SYN-ACK => become server\n", 1006 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1129 if( DEBUG_DEV_NIC_RX < cycle ) 1130 printk("\n[%s] socket[%x,%d] %s : received SYN without ACK => send a SYN_ACK\n", 1131 __FUNCTION__, pid, fdid , socket_state_str(socket_state) ); 1007 1132 #endif 1008 1133 // update socket.state 1009 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->state), TCP_STATE_SYN_RCVD ); 1134 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->state), 1135 TCP_STATE_SYN_RCVD ); 1010 1136 1011 1137 // set socket.tx_nxt 1012 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_SERVER ); 1138 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), 1139 CONFIG_SOCK_ISS_SERVER ); 1013 1140 1014 1141 // set socket.rx_nxt to seg_seq_num + 1 1015 1142 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_nxt), seg_seq_num + 1 ); 1016 1143 1017 // make a SYN.ACK request to R2T queue1144 // send SYN.ACK to remote TCP 1018 1145 socket_put_r2t_request( socket_r2tq_xp, 1019 1146 TCP_FLAG_SYN | TCP_FLAG_ACK, … … 1021 1148 } 1022 1149 break; 1023 } 1024 //////////////////////// 1025 case TCP_STATE_SYN_RCVD: // TCP server waiting last ACK in connect handshake 1026 { 1027 // [1] check sequence number 1028 if( seg_seq_num != socket_rx_nxt ) // unexpected SEQ_NUM => discard 1029 { 1030 1031 #if DEBUG_DEV_NIC_RX 1032 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect seq_num %x / get %x\n", 1033 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1034 socket_state_str(socket_state), socket_rx_nxt, seg_seq_num ); 1035 #endif 1036 // discard segment without reporting 1037 break; 1038 } 1039 1040 // [2] handle RST flag // received RST => report error 1041 if( seg_rst_set ) 1042 { 1043 1044 #if DEBUG_DEV_NIC_RX 1045 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n", 1046 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1047 #endif 1048 // update socket state 1049 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), TCP_STATE_BOUND ); 1050 1051 // report error to local TX client thread 1052 dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST ); 1053 1054 break; 1055 } 1056 1057 // [3] handle security & precedence TODO ... someday 1058 1059 // [4] handle SYN flag 1060 if( seg_syn_set ) // received SYN => discard 1061 { 1062 1063 #if DEBUG_DEV_NIC_RX 1064 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received SYN flag\n", 1065 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1066 #endif 1067 // discard segment without reporting 1068 break; 1069 } 1070 1071 // [5] handle ACK flag 1072 if( seg_ack_set == false ) // missing ACK => discard 1073 { 1074 1075 #if DEBUG_DEV_NIC_RX 1076 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : no ACK in TCP segment\n", 1077 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1078 #endif 1079 // discard segment without reporting 1080 break; 1081 } 1082 else if( seg_ack_num != (TCP_ISS_SERVER + 1) ) // unacceptable ACK 1083 { 1084 1085 #if DEBUG_DEV_NIC_RX 1086 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect ack_num %x / get %x\n", 1087 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1088 socket_state_str(socket_state), TCP_ISS_SERVER + 1, seg_ack_num ); 1089 #endif 1090 1091 // register an RST request to R2TQ for remote TCP client 1092 socket_put_r2t_request( socket_r2tq_xp, 1093 TCP_FLAG_RST, 1094 chdev->channel ); 1095 1096 // report error to local TX client thread 1097 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADACK ); 1098 } 1099 else // acceptable ACK 1100 { 1101 1102 #if DEBUG_DEV_NIC_RX 1103 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received expected ACK\n", 1104 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1105 #endif 1106 // set socket.tx_una 1107 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num ); 1108 1109 // update socket.state 1110 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1111 TCP_STATE_ESTAB ); 1112 1113 // report success to local TX client thread 1114 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1115 } 1116 break; 1117 } 1118 ///////////////////// 1150 } // end state SYN_SENT 1151 1152 //////////////////////// all "connected" states 1153 case TCP_STATE_SYN_RCVD: 1119 1154 case TCP_STATE_ESTAB: 1120 1155 case TCP_STATE_FIN_WAIT1: … … 1125 1160 case TCP_STATE_TIME_WAIT: 1126 1161 { 1127 // [1] check sequence number : out_of_order segments not accepted 1128 if( seg_seq_num != socket_rx_nxt ) 1162 // [1] check SEQ_NUM 1163 // - we accept duplicate segments (i.e. seq_num < rx_next) 1164 // - we don't accept out of order segment (i.e. seq_num_num > rx_next) 1165 // => seq_num must be in window [rx_nxt - rx_win , rx_nxt] 1166 1167 bool_t seq_ok = is_in_window( seg_seq_num, 1168 (socket_rx_nxt - socket_rx_wnd), 1169 socket_rx_nxt ); 1170 1171 if( seq_ok == false ) // SEQ_NUM not acceptable 1129 1172 { 1130 1131 #if DEBUG_DEV_NIC_RX 1132 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : illegal SEQ_NUM %x / expected %x\n", 1133 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1134 socket_state_str(socket_state), seg_seq_num, socket_rx_nxt ); 1135 #endif 1136 // discard segment 1137 break; 1138 } 1139 1140 // check all bytes in window when the payload exist 1141 // TODO : we could accept bytes that are in window, 1142 // but this implementation reject all bytes in segment 1143 if( seg_data_len > 0 ) 1144 { 1145 // compute min & max acceptable sequence numbers 1146 uint32_t seq_min = socket_rx_nxt; 1147 uint32_t seq_max = socket_rx_nxt + socket_rx_wnd - 1; 1148 1149 // compute sequence number for last byte in segment 1150 uint32_t seg_seq_last = seg_seq_num + seg_data_len - 1; 1151 1152 if( is_in_window( seg_seq_last, seq_min, seq_max ) == false ) 1173 if( seg_rst_set ) 1153 1174 { 1154 1175 1155 #if DEBUG_DEV_NIC_RX 1156 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : last SEQ_NUM %x not in [%x,%x]\n", 1157 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1158 socket_state_str(socket_state), seg_seq_last, seq_min, seq_max ); 1159 #endif 1160 // discard segment 1176 #if DEBUG_DEV_NIC_ERROR 1177 printk("\n[ERROR] in %s : socket[%x,%d] %s expect seq_num %x != rcvd %x and RST => discard\n", 1178 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1179 CONFIG_SOCK_ISS_CLIENT + 1, seg_seq_num ); 1180 #endif 1161 1181 break; 1162 1182 } 1163 } 1164 1165 // [2] handle RST flag 1166 if( seg_rst_set ) 1183 else // no RST 1184 { 1185 // send ACK to remote TCP 1186 socket_put_r2t_request( socket_r2tq_xp, 1187 TCP_FLAG_ACK, 1188 chdev->channel ); 1189 #if DEBUG_DEV_NIC_ERROR 1190 printk("\n[ERROR] in %s : socket[%x,%d] %s expect seq_num %x != rcvd %x => ACK and discard\n", 1191 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1192 CONFIG_SOCK_ISS_CLIENT + 1, seg_seq_num ); 1193 #endif 1194 break; 1195 } 1196 } 1197 else // SEQ_NUM acceptable 1167 1198 { 1168 1169 #if DEBUG_DEV_NIC_RX 1170 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n", 1171 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 1172 #endif 1173 if( (socket_state == TCP_STATE_ESTAB ) || 1174 (socket_state == TCP_STATE_FIN_WAIT1 ) || 1175 (socket_state == TCP_STATE_FIN_WAIT2 ) || 1176 (socket_state == TCP_STATE_CLOSE_WAIT) ) 1199 // compute number of new bytes & number of duplicated bytes 1200 if( seg_seq_num != socket_rx_nxt ) // duplicate segment 1177 1201 { 1178 // TODO all pending send & received commands 1179 // must receive "reset" responses 1180 1181 // TODO destroy the socket 1202 seg_data_dup = socket_rx_nxt - seg_seq_num; 1203 seg_data_new = (seg_data_len > seg_data_dup) ? 1204 (seg_data_len - seg_data_dup) : 0; 1205 } 1206 else // expected segment 1207 { 1208 seg_data_dup = 0; 1209 seg_data_new = seg_data_len; 1210 } 1211 1212 #if DEBUG_DEV_NIC_RX 1213 if( DEBUG_DEV_NIC_RX < cycle ) 1214 printk("\n[%s] socket[%x,%d] %s seq_num %x / rx_nxt %x / len %d / new %d / dup %d\n", 1215 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1216 seg_seq_num, socket_rx_nxt, seg_data_len, seg_data_new, seg_data_dup ); 1217 #endif 1218 } 1219 1220 // [2] handle RST flag (depending on socket state) 1221 if( seg_rst_set ) 1222 { 1223 if( socket_state == TCP_STATE_SYN_RCVD ) 1224 { 1225 1226 #if DEBUG_DEV_NIC_RX 1227 if( DEBUG_DEV_NIC_RX < cycle ) 1228 printk("\n[%s] socket[%x,%d] %s RST received from remote TCP => report to user\n", 1229 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1230 #endif 1231 // report RST to local TX client thread 1232 dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST ); 1233 1234 // update socket state 1235 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1236 TCP_STATE_BOUND ); 1237 break; 1238 } 1239 1240 else if( (socket_state == TCP_STATE_ESTAB ) || 1241 (socket_state == TCP_STATE_FIN_WAIT1 ) || 1242 (socket_state == TCP_STATE_FIN_WAIT2 ) || 1243 (socket_state == TCP_STATE_CLOSE_WAIT) ) 1244 { 1245 1246 #if DEBUG_DEV_NIC_RX 1247 if( DEBUG_DEV_NIC_RX < cycle ) 1248 printk("\n[%s] socket[%x,%d] %s / received RST flag\n", 1249 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1250 #endif 1251 // report RST to local TX client thread 1252 if( socket_tx_valid ) dev_nic_unblock_tx_client( socket_xp, 1253 CMD_STS_RST ); 1254 // report RST to local RX client thread 1255 if( socket_rx_valid ) dev_nic_unblock_rx_client( socket_xp, 1256 CMD_STS_RST ); 1257 // update socket state 1258 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1259 TCP_STATE_BOUND ); 1260 break; 1182 1261 } 1183 1262 else // states CLOSING / LAST_ACK / TIME_WAIT 1184 1263 { 1185 // TODO 1264 // update socket state 1265 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1266 TCP_STATE_BOUND ); 1267 break; 1186 1268 } 1269 } 1270 1271 // [3] handle security & precedence TODO ... someday 1272 1273 // [4] check SYN 1274 if( seg_syn_set ) // received SYN => send RST to remote 1275 { 1276 1277 #if DEBUG_DEV_NIC_ERROR 1278 printk("\n[ERROR] in %s socket[%x,%d] %s : received SYN flag => send RST-ACK\n", 1279 __FUNCTION__, pid, fdid , socket_state_str(socket_state) ); 1280 #endif 1281 // send RST & ACK to remote TCP 1282 socket_put_r2t_request( socket_r2tq_xp, 1283 TCP_FLAG_RST | TCP_FLAG_ACK, 1284 chdev->channel ); 1285 1286 // report RST to local TX client thread 1287 if( socket_tx_valid ) dev_nic_unblock_tx_client( socket_xp, 1288 CMD_STS_RST ); 1289 // report RST to local RX client thread 1290 if( socket_rx_valid ) dev_nic_unblock_rx_client( socket_xp, 1291 CMD_STS_RST ); 1292 // update socket state 1293 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1294 TCP_STATE_BOUND ); 1187 1295 break; 1188 1296 } 1189 1297 1190 // [3] handle security & precedence TODO ... someday 1191 1192 // [4] check SYN flag 1193 if( seg_syn_set ) // received SYN => ERROR 1298 // [5] handle ACK (depending on socket state) 1299 if( seg_ack_set == false ) // missing ACK => discard segment 1194 1300 { 1195 1301 1196 #if DEBUG_DEV_NIC_RX 1197 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received unexpected SYN\n", 1198 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1199 #endif 1200 // TODO signal error to user 1201 1202 // make an RST request to R2T queue 1203 socket_put_r2t_request( socket_r2tq_xp, 1204 TCP_FLAG_RST, 1205 chdev->channel ); 1206 1207 // update socket state 1208 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), TCP_STATE_BOUND ); 1209 1302 #if DEBUG_DEV_NIC_ERROR 1303 printk("\n[ERROR] in %s : socket[%x,%d] %s / no ACK in segment => discard\n", 1304 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1305 #endif 1210 1306 break; 1211 1307 } 1212 1308 1213 // [5] check ACK 1214 if( seg_ack_set == false ) // missing ACK 1309 // compute acceptable ACK 1310 bool_t ack_ok = is_in_window( seg_ack_num, 1311 socket_tx_una, 1312 socket_tx_nxt ); 1313 1314 if( socket_state == TCP_STATE_SYN_RCVD ) 1215 1315 { 1216 1217 #if DEBUG_DEV_NIC_RX 1218 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : no ACK flag\n", 1219 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 1220 #endif 1221 // discard segment 1222 break; 1223 } 1224 else if( is_in_window( seg_ack_num, 1225 socket_tx_una, 1226 socket_tx_nxt ) == false ) // unacceptable ACK 1227 { 1228 1229 #if DEBUG_DEV_NIC_RX 1230 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : ACK_NUM %x not in [%x,%x]\n", 1231 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state), 1232 seg_ack_num, socket_tx_una, socket_tx_nxt ); 1233 #endif 1234 // discard segment 1235 break; 1236 } 1237 else // acceptable ack 1238 { 1239 // update socket.tx_una 1240 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num ); 1241 1242 // update socket.tx_wnd 1243 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_wnd), seg_window ); 1244 1245 // check last data byte acknowledged for a SEND command 1246 if( (socket_tx_todo == 0) && 1247 (seg_ack_num == socket_tx_nxt) && 1248 (socket_tx_cmd == CMD_TX_SEND) ) 1316 if( ack_ok ) // acceptable ACK 1249 1317 { 1250 // signal success to TX client thread 1318 1319 #if DEBUG_DEV_NIC_RX 1320 if( DEBUG_DEV_NIC_RX < cycle ) 1321 printk("\n[%s] socket[%x,%d] %s : received expected ACK => update socket\n", 1322 __FUNCTION__, pid, fdid , socket_state_str(socket_state) ); 1323 #endif 1324 // set socket.tx_una 1325 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num ); 1326 1327 // update socket.state 1328 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1329 TCP_STATE_ESTAB ); 1330 1331 // report success to local TX client thread 1251 1332 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1252 1333 } 1334 else // send RST to remote 1335 { 1336 1337 #if DEBUG_DEV_NIC_ERROR 1338 printk("\n[ERROR] in %s : socket[%x,%d] %s / ACK %x not in [%x,%x] => discard\n", 1339 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1340 seg_ack_num, socket_tx_una, socket_tx_nxt ); 1341 #endif 1342 // send RST & ACK to remote TCP 1343 socket_put_r2t_request( socket_r2tq_xp, 1344 TCP_FLAG_RST | TCP_FLAG_ACK, 1345 chdev->channel ); 1346 break; 1347 } 1253 1348 } 1254 1255 // [7] handle URG flag TODO ... someday 1256 1257 // [8] Move DATA to rx_buf / ACK request to R2T queue / unblock rx_client 1258 if( seg_data_len ) 1349 1350 else if( (socket_state == TCP_STATE_ESTAB) || 1351 (socket_state == TCP_STATE_FIN_WAIT1) || 1352 (socket_state == TCP_STATE_FIN_WAIT2) || 1353 (socket_state == TCP_STATE_FIN_WAIT2) || 1354 (socket_state == TCP_STATE_CLOSE_WAIT) || 1355 (socket_state == TCP_STATE_CLOSING) ) 1259 1356 { 1260 if( (socket_state == TCP_STATE_ESTAB) || 1261 (socket_state == TCP_STATE_FIN_WAIT1) || 1262 (socket_state == TCP_STATE_FIN_WAIT2) ) 1357 if( ack_ok ) // acceptable ack 1358 { 1359 // compute number of acknowledged bytes 1360 uint32_t ack_bytes = seg_ack_num - socket_tx_una; 1361 1362 if( ack_bytes ) // handle acknowledged bytes 1363 { 1364 #if DEBUG_DEV_NIC_RX 1365 if( DEBUG_DEV_NIC_RX < cycle ) 1366 printk("\n[%s] socket[%x,%d] %d bytes acknowledged => update socket\n", 1367 __FUNCTION__, pid, fdid, ack_bytes ); 1368 #endif 1369 // update socket.tx_una, socket.tx_ack, and socket.tx_wnd fields 1370 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), 1371 seg_ack_num ); 1372 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_ack), 1373 socket_tx_ack + ack_bytes ); 1374 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_wnd), 1375 seg_window ); 1376 1377 // unblock the TX client thread if last byte acknowledged 1378 if( (socket_tx_ack + ack_bytes) == socket_tx_len ) 1379 { 1380 // report success to TX client thread 1381 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1382 #if DEBUG_DEV_NIC_RX 1383 if( DEBUG_DEV_NIC_RX < cycle ) 1384 printk("\n[%s] socket[%x,%d] %s : last ack => unblock TX client thread\n", 1385 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1386 #endif 1387 } 1388 } 1389 1390 if( socket_state == TCP_STATE_FIN_WAIT1 ) 1391 { 1392 // update socket state 1393 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1394 TCP_STATE_FIN_WAIT2 ); 1395 } 1396 if( socket_state == TCP_STATE_FIN_WAIT2 ) 1397 { 1398 // TODO 1399 } 1400 else if( socket_state == TCP_STATE_CLOSING ) 1401 { 1402 // update socket state 1403 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1404 TCP_STATE_TIME_WAIT ); 1405 } 1406 else if( socket_state == TCP_STATE_CLOSING ) 1407 { 1408 // TODO 1409 } 1410 } 1411 else // unacceptable ACK => discard segment 1412 { 1413 1414 #if DEBUG_DEV_NIC_ERROR 1415 printk("\n[ERROR] in %s : socket[%x,%d] %s / ACK %x not in [%x,%x] => discard\n", 1416 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1417 seg_ack_num, socket_tx_una, socket_tx_nxt ); 1418 #endif 1419 break; 1420 } 1421 } 1422 1423 else if( socket_state == TCP_STATE_LAST_ACK ) 1424 { 1425 // TODO 1426 } 1427 1428 else if( socket_state == TCP_STATE_TIME_WAIT ) 1429 { 1430 // TODO 1431 } 1432 1433 // [6] handle URG flag TODO ... someday 1434 1435 // [7] handle received data : update socket state, 1436 // move data to rx_buf, register ACK request to R2T queue, 1437 // unblock the RX client thread in case of pending RX_RECV command 1438 if((socket_state == TCP_STATE_ESTAB) || 1439 (socket_state == TCP_STATE_FIN_WAIT1) || 1440 (socket_state == TCP_STATE_FIN_WAIT2) ) 1441 { 1442 // register new bytes if requested 1443 if( seg_data_new ) 1263 1444 { 1264 1445 // get number of bytes already stored in rx_buf 1265 1446 uint32_t status = remote_buf_status( socket_rx_buf_xp ); 1266 1447 1267 // compute empty space in rx_buf 1268 uint32_t space = CONFIG_SOCK_RX_BUF_SIZE - status; 1269 1270 // compute number of bytes to move : min (space , seg_data_len) 1271 uint32_t nbytes = ( space < seg_data_len ) ? space : seg_data_len; 1272 1273 // move payload from k_buf to rx_buf 1448 // compute space in rx_buf and actual number of acceptable bytes 1449 // when (space < seg_data_new) the last new bytes are discarded 1450 uint32_t space = (1 << CONFIG_SOCK_RX_BUF_ORDER) - status; 1451 uint32_t rcv_bytes = (space < seg_data_new) ? space : seg_data_new; 1452 1453 // move new bytes from k_buf to rx_buf 1274 1454 remote_buf_put_from_kernel( socket_rx_buf_xp, 1275 k_buf + seg_hlen ,1276 nbytes );1277 #if DEBUG_DEV_NIC_RX 1278 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : move %d bytes to rx_buf\n", 1279 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1280 socket_state_str(socket_state), nbytes );1281 #endif1282 // update socket.rx_nxt 1455 k_buf + seg_hlen + seg_data_dup, 1456 rcv_bytes ); 1457 #if DEBUG_DEV_NIC_RX 1458 if( DEBUG_DEV_NIC_RX < cycle ) 1459 printk("\n[%s] socket[%x,%d] %s : move %d bytes to rx_buf\n", 1460 __FUNCTION__, pid, fdid, socket_state_str(socket_state), rcv_bytes ); 1461 #endif 1462 // update socket.rx_nxt and socket_rx_wnd fields 1283 1463 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1284 socket_rx_nxt + nbytes ); 1285 1286 // update socket.rx_wnd 1464 socket_rx_nxt + rcv_bytes ); 1287 1465 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_wnd ), 1288 socket_rx_wnd - nbytes ); 1289 1290 // make an ACK request to R2T queue 1466 socket_rx_wnd - rcv_bytes ); 1467 1468 // unblock RX client if required 1469 if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) ) 1470 { 1471 // reset rx_valid 1472 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false ); 1473 1474 // report success to RX client thread 1475 dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS ); 1476 #if DEBUG_DEV_NIC_RX 1477 if( DEBUG_DEV_NIC_RX < cycle ) 1478 printk("\n[%s] socket[%x,%d] %s : last data => unblock RX client thread\n", 1479 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1480 #endif 1481 } 1482 } 1483 1484 // make an ACK request to remote 1485 socket_put_r2t_request( socket_r2tq_xp, 1486 TCP_FLAG_ACK, 1487 chdev->channel ); 1488 } // end payload handling 1489 1490 // [8] handle FIN flag depending on socket state 1491 if( (socket_state == TCP_STATE_SYN_RCVD) || 1492 (socket_state == TCP_STATE_ESTAB ) ) 1493 { 1494 if( seg_fin_set ) 1495 { 1496 1497 #if DEBUG_DEV_NIC_RX 1498 if( DEBUG_DEV_NIC_RX < cycle ) 1499 printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSE_WAIT\n", 1500 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1501 #endif 1502 // update socket.rx_nxt when FIN received 1503 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1504 socket_rx_nxt + 1 ); 1505 1506 // update socket state 1507 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1508 TCP_STATE_CLOSE_WAIT ); 1509 1510 // send ACK to remote TCP 1291 1511 socket_put_r2t_request( socket_r2tq_xp, 1292 1512 TCP_FLAG_ACK, … … 1294 1514 1295 1515 // check pending RX_RECV command 1296 if( (socket_rx_valid == true) && 1297 (socket_rx_cmd == CMD_RX_RECV) ) 1516 if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) ) 1298 1517 { 1299 1518 // reset rx_valid 1300 1519 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false ); 1301 1520 1302 // report success to RX client thread 1303 dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS ); 1304 #if DEBUG_DEV_NIC_RX 1305 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : unblock waiting RX client thread\n", 1306 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1307 socket_state_str(socket_state) ); 1308 #endif 1309 } 1310 } 1311 } 1312 1313 // [9] handle FIN flag 1314 if( socket_state == TCP_STATE_ESTAB ) 1315 { 1316 if( seg_fin_set ) // received ACK & FIN 1317 { 1318 1319 #if DEBUG_DEV_NIC_RX 1320 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSE_WAIT\n", 1321 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1322 socket_state_str(socket_state) ); 1323 #endif 1324 // update socket.rx_nxt when FIN received 1325 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1326 socket_rx_nxt + 1 ); 1327 1328 // update socket state 1329 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1330 TCP_STATE_CLOSE_WAIT ); 1331 1332 // make an ACK request to R2T queue 1333 socket_put_r2t_request( socket_r2tq_xp, 1334 TCP_FLAG_ACK, 1335 chdev->channel ); 1336 1337 // check pending RX_RECV command 1338 if( (socket_rx_valid == true) && 1339 (socket_rx_cmd == CMD_RX_RECV) ) 1340 { 1341 // reset rx_valid 1342 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false ); 1343 1344 // report error to RX client thread 1521 // report FIN to RX client thread 1345 1522 dev_nic_unblock_rx_client( socket_xp , CMD_STS_EOF ); 1346 1523 #if DEBUG_DEV_NIC_RX 1347 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : unblock RX client waiting on RECV\n", 1348 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1349 socket_state_str(socket_state) );1524 if( DEBUG_DEV_NIC_RX < cycle ) 1525 printk("\n[%s] socket[%x,%d] %s : unblock RX client waiting on RECV\n", 1526 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1350 1527 #endif 1351 1528 } … … 1354 1531 else if( socket_state == TCP_STATE_FIN_WAIT1 ) 1355 1532 { 1356 if( seg_fin_set ) // received ACK & FIN1533 if( seg_fin_set ) 1357 1534 { 1358 1535 1359 1536 #if DEBUG_DEV_NIC_RX 1360 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSING\n", 1361 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1362 socket_state_str(socket_state) );1363 #endif 1364 // update socket.rx_nxt when FIN received1537 if( DEBUG_DEV_NIC_RX < cycle ) 1538 printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSING\n", 1539 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1540 #endif 1541 // update socket.rx_nxt 1365 1542 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1366 1543 socket_rx_nxt + 1 ); … … 1370 1547 TCP_STATE_CLOSING ); 1371 1548 1372 // make an ACK request to R2T queue1549 // send ACK request to remote 1373 1550 socket_put_r2t_request( socket_r2tq_xp, 1374 1551 TCP_FLAG_ACK, … … 1379 1556 1380 1557 #if DEBUG_DEV_NIC_RX 1381 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : only ACK => goes FIN_WAIT2\n", 1382 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1383 socket_state_str(socket_state) );1558 if( DEBUG_DEV_NIC_RX < cycle ) 1559 printk("\n[%s] socket[%x,%d] %s : only ACK => goes FIN_WAIT2\n", 1560 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1384 1561 #endif 1385 1562 // update socket state … … 1394 1571 1395 1572 #if DEBUG_DEV_NIC_RX 1396 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSED / unblock client\n", 1397 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1398 socket_state_str(socket_state) );1573 if( DEBUG_DEV_NIC_RX < cycle ) 1574 printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSED / unblock client\n", 1575 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1399 1576 #endif 1400 1577 // update socket.rx_nxt when FIN received … … 1438 1615 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1439 1616 } 1440 } // end case connected edstates1617 } // end case connected states 1441 1618 } // end switch socket state 1442 1619 … … 1491 1668 { 1492 1669 1493 #if DEBUG_DEV_NIC_RX 1494 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 1495 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 1496 if( cycle > DEBUG_DEV_NIC_RX ) 1497 printk("\n[%s] thread[%x,%x] matching listening socket[%d,%d] / state %s\n", 1498 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 1670 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 1671 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 1672 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 1673 #endif 1674 1675 #if DEBUG_DEV_NIC_RX 1676 if( DEBUG_DEV_NIC_RX < cycle ) 1677 printk("\n[%s] matching listening socket[%d,%d] / state %s\n", 1678 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1499 1679 #endif 1500 1680 break; … … 1509 1689 { 1510 1690 // The actions depend on the received segment flags 1511 // - discard segment for RST or ACK 1512 // - update socket state & remote IP address, 1513 // register connect request in socket CRQ queue, 1514 // and unblock client thread for SYN 1691 // - discard segment for RST or ACK, 1692 // - for SYN, register the connect request in listening socket CRQ queue, 1693 // and unblock the client thread in case of pending RX_ACCEPT command. 1515 1694 1516 // discard segment if RST flag1517 if( seg_rst_set ) 1695 // [1] check RST 1696 if( seg_rst_set ) // discard segment 1518 1697 { 1519 1698 1520 #if DEBUG_DEV_NIC_RX 1521 if( cycle > DEBUG_DEV_NIC_RX ) 1522 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : received RST\n", 1523 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 1699 #if DEBUG_DEV_NIC_ERROR 1700 printk("\n[ERROR] in %s : socket[%x,%d] %s / received RST => discard segment\n", 1701 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1524 1702 #endif 1525 1703 return; 1526 1704 } 1527 1705 1528 // discard segment if ACK flag1529 if( seg_ack_set ) 1706 // [2] check ACK 1707 if( seg_ack_set ) // send RST to remote 1530 1708 { 1531 1709 1532 #if DEBUG_DEV_NIC_RX 1533 if( cycle > DEBUG_DEV_NIC_RX ) 1534 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : received ACK\n", 1535 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 1536 #endif 1710 #if DEBUG_DEV_NIC_ERROR 1711 printk("\n[ERROR] in %s : socket[%x,%d] %s received ACK => send RST & discard \n", 1712 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1713 #endif 1714 // make an RST request to R2T queue 1715 socket_put_r2t_request( socket_r2tq_xp, 1716 TCP_FLAG_RST, 1717 chdev->channel ); 1537 1718 return; 1538 1719 } 1539 1720 1540 // SYN flag == CONNECT request / seq_num cannot be wrong 1721 // [3] handle security & precedence TODO ... someday 1722 1723 // handle SYN == CONNECT request 1541 1724 if( seg_syn_set ) 1542 1725 { 1543 // build extended pointer on listening socket CRQ 1726 // build extended pointers on various listening socket fields 1727 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 1544 1728 socket_crqq_xp = XPTR( socket_cxy , &socket_ptr->crqq ); 1729 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 1730 1731 // take the lock protecting the matching socket 1732 remote_queuelock_acquire( socket_lock_xp ); 1545 1733 1546 1734 // try to register request into CRQ queue … … 1550 1738 seg_seq_num, 1551 1739 seg_window ); 1552 1553 1740 if ( error ) // CRQ full 1554 1741 { 1555 1742 1556 #if DEBUG_DEV_NIC_RX 1557 if( cycle > DEBUG_DEV_NIC_RX ) 1558 printk("\n[%s] thread[%x,%x] listening socket[%x,%d] CRQ full => send RST\n", 1559 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 1743 #if DEBUG_DEV_NIC_ERROR 1744 printk("\n[ERROR] in %s : listening socket[%x,%d] %s receive SYN but CRQ full => send RST\n", 1745 __FUNCTION__, pid, fdid ); 1560 1746 #endif 1561 1747 // make an RST request to R2T queue … … 1564 1750 chdev->channel ); 1565 1751 } 1566 else // new connection request registered inCRQ1752 else // register request in listening socket CRQ 1567 1753 { 1568 1754 1569 1755 #if DEBUG_DEV_NIC_RX 1756 if( DEBUG_DEV_NIC_RX < cycle ) 1570 1757 if( cycle > DEBUG_DEV_NIC_RX ) 1571 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : register request in CRQ\n", 1572 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 1573 #endif 1574 // check pending RX_ACCEPT command 1575 if( (hal_remote_l32(XPTR(socket_cxy,&socket_ptr->rx_valid)) == true) && 1576 (hal_remote_l32(XPTR(socket_cxy,&socket_ptr->rx_cmd)) == CMD_RX_ACCEPT) ) 1758 printk("\n[%s] listening socket[%x,%d] register request in CRQ\n", 1759 __FUNCTION__, pid, fdid ); 1760 #endif 1761 bool_t rx_valid = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->rx_valid)); 1762 uint32_t rx_cmd = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->rx_cmd)); 1763 1764 // check pending ACCEPT command 1765 if( rx_valid && (rx_cmd == CMD_RX_ACCEPT) ) 1577 1766 { 1578 1767 // reset rx_valid 1579 1768 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_valid ), false ); 1580 1769 1581 // report success to RX client thread 1770 // report success to RX client thread, that will 1771 // create a new socket and request a SYN-ACK to TX server thread 1582 1772 dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS ); 1583 1773 1584 1774 #if DEBUG_DEV_NIC_RX 1775 if( DEBUG_DEV_NIC_RX < cycle ) 1585 1776 if( cycle > DEBUG_DEV_NIC_RX ) 1586 printk("\n[%s] thread[%x,%x] forlistening socket[%x,%d] unblock RX client thread\n",1587 __FUNCTION__, this->process->pid, this->trdid, pid,fdid );1777 printk("\n[%s] listening socket[%x,%d] unblock RX client thread\n", 1778 __FUNCTION__, fdid ); 1588 1779 #endif 1589 1780 } 1590 1781 } // end register request in CRQ 1782 1783 // release the lock protecting the matching socket 1784 remote_queuelock_release( socket_lock_xp ); 1785 1591 1786 } // end if SYN 1592 1787 1593 1788 return; 1594 1789 1595 1790 } // end if listening_match 1596 1791 1597 // 6. no socket found => discard segment 1598 1599 #if DEBUG_DEV_NIC_RX 1600 if( cycle > DEBUG_DEV_NIC_RX ) 1601 printk("\n[%s] thread[%x,%x] exit failure : no socket found => discard segment\n", 1602 __FUNCTION__, this->process->pid, this->trdid ); 1792 // 6. no attached socket found and no listening socket found => discard segment 1793 1794 #if DEBUG_DEV_NIC_ERROR 1795 printk("\n[ERROR] in %s : thread[%x,%d] / unexpected TCP segment => discard / cycle %d\n", 1796 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle ); 1603 1797 #endif 1604 1798 … … 1618 1812 1619 1813 thread_t * this = CURRENT_THREAD; 1620 1814 1621 1815 // check thread can yield 1622 1816 thread_assert_can_yield( this , __FUNCTION__ ); … … 1626 1820 "illegal chdev type or direction" ); 1627 1821 1628 #if DEBUG_DEV_NIC_RX 1629 uint32_t cycle = (uint32_t)hal_get_cycles(); 1822 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 1823 uint32_t cycle = (uint32_t)hal_get_cycles(); 1824 #endif 1825 1826 #if DEBUG_DEV_NIC_RX 1630 1827 if( cycle > DEBUG_DEV_NIC_RX ) 1631 1828 printk("\n[%s] thread[%x,%x] starts / cycle %d\n", 1632 1829 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1633 1830 #endif 1831 1832 // avoid warning 1833 ip_length = 0; 1834 error = 0; 1634 1835 1635 1836 // get extended pointers on server tread and chdev … … 1674 1875 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1675 1876 #endif 1676 1877 // check possible error reported by NIC ISR 1878 if( this->nic_cmd.error ) 1879 { 1880 printk("\n[PANIC] in %s : %s DMA engine cannot access RX_QUEUE / cycle %d\n", 1881 __FUNCTION__, chdev->name , (uint32_t)hal_get_cycles() ); 1882 } 1677 1883 } 1678 1884 else // success => handle packet … … 1682 1888 cycle = (uint32_t)hal_get_cycles(); 1683 1889 if( DEBUG_DEV_NIC_RX < cycle ) 1684 dev_nic_packet_display( false, // is_tx1685 this->process->pid,1686 this->trdid,1687 cycle,1688 k_buf );1689 1890 #endif 1690 1891 … … 1697 1898 { 1698 1899 1699 #if DEBUG_DEV_NIC_RX 1700 cycle = (uint32_t)hal_get_cycles(); 1701 if( DEBUG_DEV_NIC_RX < cycle ) 1702 printk("\n[%s] thread[%x,%x] discard ETH packet / cycle %d\n", 1900 #if DEBUG_DEV_NIC_ERROR 1901 printk("\n[WARNING] in %s : thread[%x,%x] discard ETH packet / cycle %d\n", 1703 1902 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1704 1903 #endif … … 1723 1922 { 1724 1923 1725 #if DEBUG_DEV_NIC_RX 1726 cycle = (uint32_t)hal_get_cycles(); 1727 if( DEBUG_DEV_NIC_RX < cycle ) 1728 printk("\n[%s] thread[%x,%x] discarded IP packet / cycle %d\n", 1924 #if DEBUG_DEV_NIC_ERROR 1925 printk("\n[WARNING] in %s : thread[%x,%x] discard IP packet / cycle %d\n", 1729 1926 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1730 1927 #endif … … 1762 1959 { 1763 1960 1764 #if DEBUG_DEV_NIC_ RX1961 #if DEBUG_DEV_NIC_ERROR 1765 1962 cycle = (uint32_t)hal_get_cycles(); 1766 1963 if( DEBUG_DEV_NIC_RX < cycle ) 1767 printk("\n[ %s] thread[%x,%x] discarded unsupported transport protocol%d\n",1964 printk("\n[WARNING] in %s : thread[%x,%x] unsupported transport protocol %d / cycle %d\n", 1768 1965 __FUNCTION__, this->process->pid, this->trdid, trsp_protocol, cycle ); 1769 1966 #endif 1770 1967 continue; 1771 1968 } 1772 } 1773 } // end of while loop1774 } // end dev_nic_rx_server()1969 } // end else success 1970 } // end of while loop 1971 } // end dev_nic_rx_server() 1775 1972 1776 1973 … … 1782 1979 1783 1980 /////////////////////////////////////////////////////////////////////////////////////////// 1784 // This static function is called by the dev_nic_tx_ build_packet() function.1981 // This static function is called by the dev_nic_tx_send_packet() function. 1785 1982 // It moves one ETH/IP/UDP packet from the kernel buffer identified by the <buffer> and 1786 1983 // <length> arguments to the NIC_TX_QUEUE identified the <chdev> argument. … … 1816 2013 this->nic_cmd.buffer = k_buf; 1817 2014 this->nic_cmd.length = length; 2015 this->nic_cmd.error = 0; 1818 2016 1819 2017 while( 1 ) … … 1868 2066 // <socket_xp> argument. The <length> argument defines the number of bytes in payload. 1869 2067 // It set the "src_port", "dst_port", "total_length" and "checksum" fields in UDP header. 1870 // The payload must be previouly loaded in the pernel buffer.2068 // The payload must be previouly loaded in the kernel buffer. 1871 2069 /////////////////////////////////////////////////////////////////////////////////////////// 1872 2070 // @ k_buf : [in] pointer on first byte of UDP header in kernel buffer. … … 1904 2102 k_buf[3] = remote_port; 1905 2103 2104 // reset checksum 2105 k_buf[6] = 0; 2106 k_buf[7] = 0; 2107 1906 2108 // set packet length in header 1907 2109 k_buf[4] = total_length >> 8; … … 1909 2111 1910 2112 // compute UDP packet checksum 1911 checksum = dev_nic_udp_checksum( k_buf , total_length ); 1912 2113 checksum = dev_nic_tcp_udp_checksum( k_buf, 2114 total_length, 2115 local_addr, 2116 remote_addr, 2117 false ); // is_not_tcp 1913 2118 // set checksum 1914 2119 k_buf[6] = checksum >> 8; … … 1920 2125 // This static function is called by the dev_nic_tx_server() function. 1921 2126 // It builds a TCP header in the kernel buffer defined by the <k_buf> argument. 1922 // The payload must have been previouly registered in this buffer .2127 // The payload must have been previouly registered in this buffer (for checksum). 1923 2128 // The "local_addr", "local_port", "remote_addr", "remote_port", seq_num", "ack_num", 1924 2129 // and "window" fields are obtained from the <socket_xp> argument. … … 1999 2204 2000 2205 // compute TCP segment checksum 2001 checksum = dev_nic_tcp_checksum( k_buf, 2002 total_length, 2003 src_addr, 2004 dst_addr ); 2206 checksum = dev_nic_tcp_udp_checksum( k_buf, 2207 total_length, 2208 src_addr, 2209 dst_addr, 2210 true ); // is_tcp 2005 2211 // set "checksum" 2006 2212 k_buf[16] = checksum >> 8; … … 2108 2314 } // end dev_nic_tx_build_eth_header() 2109 2315 2110 /////////////////////////////////////////////////////////////////////////////////////////// 2111 // This static function is called by the dev_nic_tx_server() function to handle one TX 2112 // command, or one R2T request, as defined by the <cmd_valid> and <r2t_valid> arguments, 2113 // for the socket identified by the <socket_xp> argument. It builds an ETH/IP/UDP packet 2114 // or ETH/IP/TCP segment, in the buffer defined by the <k_buf> argument, and registers 2115 // it in the NIC_TX queue defined by the <chdev> argument. 2116 // For a TCP header, the "seq_num", ack_num", and "window" fiels are defined by the 2117 // "socket.tx_next", "socket.rx_next" and "socket.rx_wnd" fields respectively. 2118 // It updates the "socket.state", "socket.tx_nxt", "socket.r2tq", and "socket.crqq" 2119 // The supported TX command types are CONNECT / ACCEPT / SEND / CLOSE. 2120 // fields as required by the command type. 2121 // - For an UDP socket, it reset the "socket.tx_valid" field, and unblock the client 2122 // thread when the packet has been sent, or when an error must be reported. 2123 // - For a TCP socket, it reset the "socket.tx_valid" field when the segment has been 2124 // sent, but does not unblocks the client thread, that will be unblocqued by the 2125 // NIC_RX thread when the TX command is fully completed. 2316 2317 /////////////////////////////////////////////////////////////////////////////////////////// 2318 // This static function implement the TCP protocol as specified by the RFC. 2319 // It is called by the dev_nic_tx_server() function to handle one TX command, 2320 // or one R2T request, for the socket identified by the <socket_xp> argument. 2321 // It builds an ETH/IP/UDP packet or ETH/IP/TCP segment, in the 2 Kbytes kernel buffer, 2322 // defined by the <k_buf> argument from informations found in socket descriptor. 2323 // It returns a command status code (defined in the ksocket.h file), and returns in the 2324 // <total_length> argument the actual packet length. 2325 // It updates the "socket.state", "socket.tx_nxt", "socket.r2tq", "socket.crqq", 2326 // "socket.todo" fields as required by the command type, but it does NOT reset 2327 // the "socket.tx_valid" field and does NOT unblock the client thread. 2328 // It does NOt take the socket lock, that is taken by the dev_nic_server(). 2126 2329 /////////////////////////////////////////////////////////////////////////////////////////// 2127 2330 // To build a packet, it makes the following actions: 2128 // 1) it takes the lock protecting the socket state. 2129 // 2) it get the command arguments from socket descriptor. 2130 // 3) it build an UDP packet or a TCP segment, depending on command type and socket state. 2131 // 4) it updates the socket state. 2132 // 5) it releases the lock protecting the socket. 2133 // 6) it build the IP header. 2134 // 7) it build the ETH header. 2135 // 8) it copies the packet in the NIC_TX queue. 2136 /////////////////////////////////////////////////////////////////////////////////////////// 2137 // @ cmd_state : [in] TX command valid in socket descriptor. 2138 // @ r2t_valid : [in] R2T request valid in command descriptor. 2139 // @ socket_xp : [in] extended pointer on client socket. 2140 // @ k_buf : [in] local pointer on kernel buffer (2 Kbytes). 2141 // @ chdev : [in] local pointer on NIC_RX chdev. 2142 /////////////////////////////////////////////////////////////////////////////////////////// 2143 static void dev_nic_tx_build_packet( bool_t cmd_valid, 2144 bool_t r2t_valid, 2145 xptr_t socket_xp, 2146 uint8_t * k_buf, 2147 chdev_t * chdev ) 2331 // 1) it get the command arguments from socket descriptor. 2332 // 2) it build an UDP packet or a TCP segment, and update socket state. 2333 // 3) it build the IP header. 2334 // 4) it build the ETH header. 2335 /////////////////////////////////////////////////////////////////////////////////////////// 2336 // @ socket_xp : [in] extended pointer on client socket. 2337 // @ k_buf : [in] local pointer on kernel buffer (2 Kbytes). 2338 // @ total_length : [out] total number of bytes written in k_buf. 2339 // @ return command status. 2340 /////////////////////////////////////////////////////////////////////////////////////////// 2341 static socket_cmd_sts_t dev_nic_tx_build_packet( xptr_t socket_xp, 2342 uint8_t * k_buf, 2343 uint32_t * total_length ) 2148 2344 { 2149 2345 socket_t * socket_ptr; 2150 2346 cxy_t socket_cxy; 2151 2347 xptr_t client_xp; // extended pointer on client thread 2348 bool_t cmd_valid; // valid user command 2349 bool_t r2t_valid; // valid R2T queue request 2152 2350 uint32_t cmd_type; // NIC command type 2153 uint8_t * tx_buf; // local pointer on kernelbuffer for payload2351 uint8_t * tx_buf; // local pointer on socket buffer for payload 2154 2352 uint32_t len; // tx_buf length (bytes) 2155 2353 uint32_t todo; // number of bytes not yet sent 2156 2354 uint32_t socket_type; // socket type (UDP/TCP) 2157 2355 uint32_t socket_state; // socket state 2158 xptr_t socket_lock_xp; // extended pointer on socket lock2159 2356 xptr_t socket_r2tq_xp; // extended pointer on R2T queue 2160 2357 uint32_t src_ip_addr; // source IP address … … 2166 2363 uint8_t trsp_protocol; // transport protocol type (UDP/TCP) 2167 2364 uint8_t r2t_flags; // flags defined by one R2T queue request 2168 bool_t do_send; // build & send a packet when true 2169 2365 2170 2366 // get socket cluster and local pointer 2171 2367 socket_cxy = GET_CXY( socket_xp ); 2172 2368 socket_ptr = GET_PTR( socket_xp ); 2173 2369 2370 #if DEBUG_DEV_NIC_TX || DEBUG_DEV_NIC_ERROR 2371 uint32_t cycle = (uint32_t)hal_get_cycles(); 2372 uint32_t socket_fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid )); 2373 uint32_t socket_pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid )); 2374 #endif 2375 2376 // build extended pointer on socket r2t queue 2377 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 2378 2379 // get cmd_valid & t2t_valid from socket descriptor 2380 cmd_valid = (bool_t)hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid )); 2381 r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq )); 2382 2174 2383 #if DEBUG_DEV_NIC_TX 2175 thread_t * this = CURRENT_THREAD;;2176 uint32_t cycle = (uint32_t)hal_get_cycles();2177 uint32_t fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ));2178 uint32_t pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ));2179 2384 if( cycle > DEBUG_DEV_NIC_TX ) 2180 printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] : cmd_valid %d / r2t_valid %d / cycle %d\n", 2181 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle ); 2182 #endif 2183 2184 // build extended pointers on socket lock and r2t queue 2185 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 2186 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 2187 2188 // 1. take lock protecting this socket 2189 remote_queuelock_acquire( socket_lock_xp ); 2190 2191 // get relevant socket infos 2385 printk("\n[%s] enter for socket[%x,%d] : cmd_val %d / r2t_val %d / cycle %d\n", 2386 __FUNCTION__, socket_pid, socket_fdid, cmd_valid, r2t_valid, cycle ); 2387 #endif 2388 2389 // 1. get relevant socket infos 2192 2390 socket_type = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->type )); 2193 2391 socket_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state )); … … 2195 2393 dst_ip_addr = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->remote_addr )); 2196 2394 2197 // compute UDP/TCP packet base in kernel buffer2395 // compute UDP/TCP packet base in local kernel buffer 2198 2396 k_trsp_base = k_buf + ETH_HEAD_LEN + IP_HEAD_LEN; 2199 2397 2200 // set default values 2201 do_send = false; 2398 // default value 2202 2399 trsp_length = 0; 2203 nbytes = 0; 2204 2205 if( cmd_valid ) // handle TX command 2206 { 2207 // 2. get command arguments from socket 2400 2401 if( cmd_valid ) // handle TX command depending on type 2402 { 2403 // get command arguments from socket 2208 2404 cmd_type = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_cmd )); 2209 2405 tx_buf = hal_remote_lpt( XPTR( socket_cxy , &socket_ptr->tx_buf )); … … 2213 2409 2214 2410 #if DEBUG_DEV_NIC_TX 2215 cycle = (uint32_t)hal_get_cycles();2216 2411 if( cycle > DEBUG_DEV_NIC_TX ) 2217 printk("\n[%s] thread[%x,%x] cmd_valid for socket[%x,%d] : %s / %s / cycle %d\n",2218 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,2219 socket_cmd_type_str(cmd_type), socket_state_str(socket_state), cycle);2412 printk("\n[%s] socket[%x,%d] / %s / command %s \n", 2413 __FUNCTION__, socket_pid, socket_fdid, 2414 socket_cmd_type_str(cmd_type),socket_state_str(socket_state) ); 2220 2415 #endif 2221 2416 2222 2417 ////////////////////////////////////////////////////////// 2223 // 3. UDP : build UDP packet and update UDP socket state2418 // 2. UDP : build UDP packet and update UDP socket state 2224 2419 if( socket_type == SOCK_DGRAM ) 2225 2420 { … … 2228 2423 if( socket_state != UDP_STATE_ESTAB ) 2229 2424 { 2230 // reset tx_valid 2231 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false ); 2232 2233 // unblock client thread / report error 2234 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2425 return CMD_STS_BADSTATE; 2235 2426 } 2236 else 2427 else if( cmd_type == CMD_TX_SEND ) 2237 2428 { 2238 if( cmd_type == CMD_TX_SEND ) 2239 { 2240 // compute payload length 2241 nbytes = ( PAYLOAD_MAX_LEN < todo ) ? PAYLOAD_MAX_LEN : todo; 2242 2243 // move payload from tx_buf to 2 Kbytes kernel buffer 2244 memcpy( k_trsp_base + UDP_HEAD_LEN, 2245 tx_buf + (len - todo), 2246 nbytes ); 2247 2248 // build UDP header 2249 dev_nic_tx_build_udp_header( k_trsp_base, 2250 socket_xp, 2251 nbytes ); 2252 2253 // update "tx_todo" in socket descriptor 2254 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_todo), todo - nbytes ); 2255 2256 // send UDP packet 2257 trsp_length = UDP_HEAD_LEN + nbytes; 2258 do_send = true; 2259 2260 #if( DEBUG_DEV_NIC_TX & 1) 2261 cycle = (uint32_t)hal_get_cycles(); 2429 // compute payload length 2430 nbytes = ( CONFIG_SOCK_PAYLOAD_MAX < todo ) ? CONFIG_SOCK_PAYLOAD_MAX : todo; 2431 2432 // move payload from remote socket tx_buf to local kernel buffer 2433 hal_remote_memcpy( XPTR( local_cxy , k_trsp_base + UDP_HEAD_LEN ), 2434 XPTR( socket_cxy , tx_buf + (len - todo) ), 2435 nbytes ); 2436 2437 // build UDP header 2438 dev_nic_tx_build_udp_header( k_trsp_base, 2439 socket_xp, 2440 nbytes ); 2441 2442 // update "tx_todo" in socket descriptor 2443 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_todo), todo - nbytes ); 2444 2445 // set UDP packet length 2446 trsp_length = UDP_HEAD_LEN + nbytes; 2447 2448 #if DEBUG_DEV_NIC_TX 2262 2449 if( cycle > DEBUG_DEV_NIC_TX ) 2263 printk("\n[%s] thread[%x,%x] socket[%x,%d] UDP packet build / length %d / cycle %d\n", 2264 __FUNCTION__, this->process->pid, this->trdid, trsp_length , cycle ); 2265 #endif 2266 if( nbytes == todo ) // last byte sent 2267 { 2268 // reset tx_valid 2269 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false ); 2270 2271 // report success to TX client 2272 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 2273 } 2274 } 2275 else // CONNECT, ACCEPT, or CLOSE commands are illegal for UDP 2276 { 2277 // reset tx_valid 2278 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false ); 2279 2280 // report error 2281 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADCMD ); 2282 } 2450 printk("\n[%s] socket[%x,%d] UDP packet build / %d bytes\n", 2451 __FUNCTION__, socket_pid, socket_fdid, nbytes ); 2452 #endif 2453 } 2454 else // CONNECT, ACCEPT, or CLOSE commands are illegal for UDP 2455 { 2456 2457 #if DEBUG_DEV_NIC_ERROR 2458 printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n", 2459 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2460 #endif 2461 return CMD_STS_BADCMD; 2283 2462 } 2284 2463 } // end UDP 2285 2464 2286 2465 /////////////////////////////////////////////////////////// 2287 // 3. TCP : build TCP segment and update TCP socket state2466 // 2. TCP : build TCP segment and update TCP socket state 2288 2467 else if( socket_type == SOCK_STREAM ) 2289 2468 { … … 2296 2475 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 2297 2476 2298 // get one request from R2T queue 2299 remote_buf_get_to_kernel( socket_r2tq_xp , &r2t_flags , 1);2477 // get one request from R2T queue, and update R2T queue 2478 socket_get_r2t_request( socket_r2tq_xp , &r2t_flags ); 2300 2479 } 2301 2480 else … … 2311 2490 { 2312 2491 // initialises socket tx_nxt, and rx_wnd 2313 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_CLIENT ); 2314 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), TCP_MAX_WINDOW ); 2492 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), 2493 CONFIG_SOCK_ISS_CLIENT ); 2494 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), 2495 CONFIG_SOCK_MAX_WINDOW ); 2315 2496 2316 2497 // build TCP SYN segment … … 2319 2500 0, // length 2320 2501 TCP_FLAG_SYN ); 2321 // se nd segment2502 // set TCP packet length 2322 2503 trsp_length = TCP_HEAD_LEN; 2323 do_send = true; 2324 2325 #if DEBUG_DEV_NIC_TX 2326 cycle = (uint32_t)hal_get_cycles(); 2327 if( cycle > DEBUG_DEV_NIC_TX ) 2328 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CONNECT / " 2329 "TCP SYN build / cycle %d\n", 2330 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2331 socket_state_str( socket_state ), cycle ); 2332 #endif 2504 2333 2505 // update socket.state 2334 2506 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), … … 2337 2509 // update socket.tx_nxt 2338 2510 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 2339 TCP_ISS_CLIENT + 1 ); 2340 2341 // reset tx_valid but do not unblock client thread 2342 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2511 CONFIG_SOCK_ISS_CLIENT + 1 ); 2512 #if DEBUG_DEV_NIC_TX 2513 if( cycle > DEBUG_DEV_NIC_TX ) 2514 printk("\n[%s] socket[%x,%d] %s / CONNECT / TCP SYN build\n", 2515 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) ); 2516 #endif 2343 2517 } 2344 2518 else // report error for all other socket states 2345 2519 { 2346 // reset tx_valid 2347 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false ); 2348 2349 // report error 2350 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2520 2521 #if DEBUG_DEV_NIC_ERROR 2522 printk("\n[ERROR] in %s : bad state %s socket[%x,%x] / cycle %d\n", 2523 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2524 #endif 2525 return CMD_STS_BADSTATE; 2351 2526 } 2352 2527 } … … 2358 2533 { 2359 2534 // initialize socket tx_nxt, and rx_wnd 2360 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_SERVER ); 2361 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), CONFIG_SOCK_RX_BUF_SIZE); 2535 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), 2536 CONFIG_SOCK_ISS_SERVER ); 2537 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), 2538 (1 << CONFIG_SOCK_RX_BUF_ORDER) ); 2362 2539 2363 2540 // build TCP ACK-SYN segment … … 2366 2543 0, // length 2367 2544 TCP_FLAG_SYN | TCP_FLAG_ACK ); 2368 // se nd segment2545 // set TCP packet length 2369 2546 trsp_length = TCP_HEAD_LEN; 2370 do_send = true; 2371 2372 #if DEBUG_DEV_NIC_TX 2373 cycle = (uint32_t)hal_get_cycles(); 2374 if( cycle > DEBUG_DEV_NIC_TX ) 2375 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / ACCEPT / send SYN-ACK / cycle %d\n", 2376 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2377 socket_state_str( socket_state ), cycle ); 2378 #endif 2547 2379 2548 // update socket.state 2380 2549 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), … … 2383 2552 // update socket.tx_nxt 2384 2553 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 2385 TCP_ISS_SERVER + 1 ); 2386 2387 // reset tx_valid but do not unblock client thread 2388 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2554 CONFIG_SOCK_ISS_SERVER + 1 ); 2555 #if DEBUG_DEV_NIC_TX 2556 if( cycle > DEBUG_DEV_NIC_TX ) 2557 printk("\n[%s] socket[%x,%d] %s / ACCEPT / SYN-ACK build\n", 2558 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) ); 2559 #endif 2389 2560 } 2390 2561 else // report error in all other socket states 2391 2562 { 2392 // reset tx_valid 2393 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2394 2395 // report error to TX client thread 2396 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2563 2564 #if DEBUG_DEV_NIC_ERROR 2565 printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n", 2566 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2567 #endif 2568 return CMD_STS_BADSTATE; 2397 2569 } 2398 2570 } … … 2423 2595 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), tx_nxt + 1 ); 2424 2596 2425 // se nd segment2597 // set TCP packet length 2426 2598 trsp_length = TCP_HEAD_LEN; 2427 do_send = true;2428 2599 2429 2600 #if DEBUG_DEV_NIC_TX 2430 cycle = (uint32_t)hal_get_cycles();2431 2601 if( cycle > DEBUG_DEV_NIC_TX ) 2432 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CLOSE / send FIN-ACK / cycle %d\n", 2433 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2434 socket_state_str( socket_state ), cycle ); 2435 #endif 2436 // reset tx_valid but do not unblock client thread 2437 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2602 printk("\n[%s] socket[%x,%d] %s / CLOSE / FIN-ACK build\n", 2603 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) ); 2604 #endif 2438 2605 } 2439 2606 else // all other states => signal error 2440 2607 { 2441 2608 2442 #if DEBUG_DEV_NIC_TX 2443 cycle = (uint32_t)hal_get_cycles(); 2444 if( cycle > DEBUG_DEV_NIC_TX ) 2445 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CLOSE / error BADSTATE / cycle %d\n", 2446 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2447 socket_state_str( socket_state ), cycle ); 2448 #endif 2449 // reset tx_valid 2450 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2451 2452 // report error 2453 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2609 #if DEBUG_DEV_NIC_ERROR 2610 printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n", 2611 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2612 #endif 2613 return CMD_STS_BADSTATE; 2454 2614 } 2455 2615 } 2456 ////////////////////////////////// ///2616 ////////////////////////////////// 2457 2617 else if( cmd_type == CMD_TX_SEND ) 2458 2618 { … … 2464 2624 2465 2625 // compute actual payload length 2466 nbytes = ( PAYLOAD_MAX_LEN < todo ) ? PAYLOAD_MAX_LEN : todo; 2467 2468 // compute TCP segment base in kernel buffer 2469 k_trsp_base = k_buf + ETH_HEAD_LEN + IP_HEAD_LEN; 2470 2471 // move payload to k_buf 2472 memcpy( k_trsp_base + TCP_HEAD_LEN, 2473 tx_buf + (len - todo), 2474 nbytes ); 2626 nbytes = ( CONFIG_SOCK_PAYLOAD_MAX < todo ) ? 2627 CONFIG_SOCK_PAYLOAD_MAX : todo; 2628 2629 // move payload from remote tx_buf to local kernel buffer 2630 hal_remote_memcpy( XPTR( local_cxy , k_trsp_base + TCP_HEAD_LEN ), 2631 XPTR( socket_cxy , tx_buf + (len - todo) ), 2632 nbytes ); 2475 2633 2476 2634 // build TCP header … … 2486 2644 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), tx_nxt + nbytes ); 2487 2645 2488 // se nd TCP segment2646 // set TCP packet length 2489 2647 trsp_length = TCP_HEAD_LEN + nbytes; 2490 do_send = true;2491 2492 if( todo == nbytes ) // last byte sent2493 {2494 // reset tx_valid when last byte has been sent2495 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_valid), false );2496 }2497 2648 2498 2649 #if DEBUG_DEV_NIC_TX 2499 cycle = (uint32_t)hal_get_cycles();2500 2650 if( cycle > DEBUG_DEV_NIC_TX ) 2501 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / SEND / " 2502 "TCP DATA build / payload %d / cycle %d\n", 2503 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2504 socket_state_str( socket_state ), nbytes, cycle ); 2651 printk("\n[%s] socket[%x,%d] %s / SEND / %d bytes\n", 2652 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), nbytes ); 2505 2653 #endif 2506 2654 } 2507 2655 else // all other socket states 2508 2656 { 2509 // reset tx_valid 2510 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2511 2512 // report error to TX client thread 2513 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2657 2658 #if DEBUG_DEV_NIC_ERROR 2659 printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n", 2660 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2661 #endif 2662 return CMD_STS_BADSTATE; 2514 2663 } 2515 2664 } … … 2517 2666 else // undefined TX command type 2518 2667 { 2519 // reset tx_valid 2520 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2521 2522 // report error to TX client thread 2523 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADCMD ); 2668 2669 #if DEBUG_DEV_NIC_ERROR 2670 printk("\n[ERROR] in %s : undefined command type for socket[%x,%x] %s / cycle %d\n", 2671 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), cycle ); 2672 #endif 2673 return CMD_STS_BADCMD; 2524 2674 } 2525 2675 } // end TCP … … 2527 2677 else // no valid TX command => handle R2T request only 2528 2678 { 2679 2680 assert( __FUNCTION__ , (socket_type == SOCK_STREAM) , "don't use R2T queue for UDP" ); 2681 2529 2682 // get one request from R2T queue 2530 remote_buf_get_to_kernel( socket_r2tq_xp , &r2t_flags , 1);2683 socket_get_r2t_request( socket_r2tq_xp , &r2t_flags ); 2531 2684 2532 2685 #if DEBUG_DEV_NIC_TX 2533 2686 cycle = (uint32_t)hal_get_cycles(); 2534 2687 if( cycle > DEBUG_DEV_NIC_TX ) 2535 printk("\n[%s] thread[%x,%x] only r2t_valid for socket[%x,%d] / flags %x / cycle %d\n", 2536 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, r2t_flags, cycle ); 2537 #endif 2538 2688 printk("\n[%s] socket[%x,%d] %s / send only flags %x / no data\n", 2689 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), r2t_flags ); 2690 #endif 2539 2691 // build TCP header 2540 2692 dev_nic_tx_build_tcp_header( k_trsp_base, 2541 2693 socket_xp, 2542 0, // payload length2694 0, // no payload 2543 2695 r2t_flags ); // flags 2544 // se nd TCP segment2696 // set protocol 2545 2697 trsp_protocol = PROTOCOL_TCP; 2698 2699 // set TCP packet length 2546 2700 trsp_length = TCP_HEAD_LEN; 2547 do_send = true;2548 2701 } 2549 2702 2550 // 4. release the lock protecting the socket 2551 remote_queuelock_release( socket_lock_xp ); 2552 2553 // return if no packet to send 2554 if( do_send == false ) return; 2555 2556 // 5. build IP header 2703 // 3. build IP header 2557 2704 dev_nic_tx_build_ip_header( k_buf + ETH_HEAD_LEN, 2558 2705 src_ip_addr, … … 2561 2708 trsp_length ); 2562 2709 2563 #if( DEBUG_DEV_NIC_TX & 1) 2564 cycle = (uint32_t)hal_get_cycles(); 2565 if( cycle > DEBUG_DEV_NIC_TX ) 2566 printk("\n[%s] thread[%x,%x] IP header build / length %d / cycle %d\n", 2567 __FUNCTION__, this->process->pid, this->trdid, IP_HEAD_LEN + trsp_length , cycle ); 2568 #endif 2569 2570 // 6. build ETH header 2710 // 4. build ETH header 2571 2711 dev_nic_tx_build_eth_header( k_buf, 2572 2712 (uint8_t)DST_MAC_5, … … 2584 2724 IP_HEAD_LEN + trsp_length ); 2585 2725 2586 #if( DEBUG_DEV_NIC_TX & 1)2587 cycle = (uint32_t)hal_get_cycles();2588 if( cycle > DEBUG_DEV_NIC_TX )2589 printk("\n[%s] thread[%x,%x] ETH header build / cycle %d\n",2590 __FUNCTION__, this->process->pid, this->trdid, cycle );2591 #endif2592 2593 // 7. move packet to NIC_TX queue (blocking function)2594 dev_nic_tx_move_packet( chdev,2595 k_buf,2596 ETH_HEAD_LEN + IP_HEAD_LEN + trsp_length );2597 2598 2726 #if DEBUG_DEV_NIC_TX 2599 2727 cycle = (uint32_t)hal_get_cycles(); 2600 2728 if( cycle > DEBUG_DEV_NIC_TX ) 2601 printk("\n[%s] thread[%x,%x] for socket[%x,%d] moved packet to NIC_TX / cycle %d\n", 2602 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cycle ); 2603 #endif 2729 printk("\n[%s] exit for socket[%x,%d] / packet build / cycle %d\n", 2730 __FUNCTION__, socket_pid, socket_fdid, cycle ); 2731 #endif 2732 2733 // return success and total packet length 2734 *total_length = ETH_HEAD_LEN + IP_HEAD_LEN + trsp_length; 2735 return CMD_STS_SUCCESS; 2604 2736 2605 2737 } // end dev_nic_tx_build_packet() 2606 2607 2738 2608 2739 ///////////////////////////////////////// 2609 2740 void dev_nic_tx_server( chdev_t * chdev ) 2610 2741 { 2611 uint8_t k_buf[CONFIG_SOCK_PKT_BUF_SIZE]; // buffer for one packet 2612 2613 xptr_t queue_root_xp; // extended pointer on sockets list root 2614 xptr_t queue_lock_xp; // extended pointer on lock protecting this list 2615 xptr_t socket_xp; // extended pointer on on registered socket 2616 socket_t * socket_ptr; 2617 cxy_t socket_cxy; 2618 xptr_t iter_xp; // iterator for loop on registered sockets 2619 xlist_entry_t temp_root; // root of temporary list of sockets 2620 xptr_t temp_root_xp; // extended pointer on temporary list of sockets 2621 uint32_t temp_nr; // number of active registered sockets 2622 bool_t cmd_valid; // TX command valid in socket descriptor 2623 bool_t r2t_valid; // valid R2T request in socket descriptor 2624 2742 uint8_t k_buf[CONFIG_SOCK_PKT_BUF_SIZE]; // buffer for one packet 2743 2744 xptr_t queue_lock_xp; // extended pointer on lock for sockets list 2745 xptr_t root_xp; // extended pointer on sockets list root 2746 xptr_t iter_xp; // iterator for loop on sockets list 2747 xptr_t list_xp; // extended pointer on socket tx_list field 2748 xptr_t socket_xp; // extended pointer on found socket 2749 socket_t * socket_ptr; // local pointer on found socket 2750 cxy_t socket_cxy; // found socket cluster identifier 2751 xptr_t socket_lock_xp; // extented pointer on found socket lock 2752 bool_t cmd_valid; // TX command valid in socket descriptor 2753 bool_t r2t_valid; // valid R2T request in socket descriptor 2754 uint32_t sock_type; // socket type 2755 socket_cmd_sts_t cmd_sts; // value returned by dev_nic_tx_build_packet() 2756 socket_cmd_type_t tx_cmd; // socket TX command type 2757 uint32_t tx_todo; // socket number of bytes not sent yet 2758 uint32_t total_length; // length of the ETH/IP/TCP packet (bytes) 2759 bool_t found; // one active socket found 2760 2625 2761 thread_t * this = CURRENT_THREAD; 2626 2762 … … 2638 2774 "illegal chdev type or direction" ); 2639 2775 2640 // check thread can yield 2641 thread_assert_can_yield( this , __FUNCTION__ ); 2642 2643 // build extended pointer on temporary list 2644 temp_root_xp = XPTR( local_cxy , &temp_root ); 2645 2646 // build extended pointer on client sockets queue (lock & root) 2776 // build extended pointers on client sockets queue lock 2647 2777 queue_lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 2648 queue_root_xp = XPTR( local_cxy , &chdev->wait_root ); 2778 2779 // build extended pointers on client sockets queue root and first item 2780 root_xp = XPTR( local_cxy , &chdev->wait_root ); 2649 2781 2650 2782 while( 1 ) // TX server infinite loop 2651 2783 { 2652 // initialize temporary list of registered sockets as empty2653 xlist_root_init( temp_root_xp );2654 temp_nr = 0;2655 2656 2784 // take the lock protecting the client sockets queue 2657 2785 remote_busylock_acquire( queue_lock_xp ); 2658 2786 2659 // build temporary list of all registered sockets 2660 if( xlist_is_empty( queue_root_xp ) == false ) 2787 found = false; 2788 2789 // scan registered sockets to find one active socket 2790 // with a round robin priority between the registered sockets 2791 if( xlist_is_empty( root_xp ) == false ) 2661 2792 { 2662 XLIST_FOREACH( queue_root_xp , iter_xp )2793 XLIST_FOREACH( root_xp , iter_xp ) 2663 2794 { 2664 // get client socket cluster and local pointer2795 // get client socket cluster and pointers 2665 2796 socket_xp = XLIST_ELEMENT( iter_xp , socket_t , tx_list ); 2666 2797 socket_ptr = GET_PTR( socket_xp ); 2667 2798 socket_cxy = GET_CXY( socket_xp ); 2668 2799 2669 // register socket in temporary list 2670 xlist_add_last( temp_root_xp , XPTR( socket_cxy , &socket_ptr->tx_temp )); 2671 temp_nr++; 2672 } 2800 // build extended pointer on socket tx_list field 2801 list_xp = XPTR( socket_cxy , &socket_ptr->tx_list ); 2802 2803 // get cmd_valid & r2t_valid from socket descriptor 2804 cmd_valid = (bool_t)hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid )); 2805 2806 // get r2t_valid from socket descriptor 2807 r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq )); 2808 2809 if( cmd_valid || r2t_valid ) // active => move socket, and exit loop 2810 { 2811 // move selected socket to last position for round-robin 2812 xlist_unlink( list_xp ); 2813 xlist_add_last( root_xp , list_xp ); 2814 2815 // exit loop 2816 found = true; 2817 break; 2818 } 2819 } // end loop on sockets 2673 2820 } 2674 2821 2675 2822 // release the lock protecting the client sockets queue 2676 2823 remote_busylock_release( queue_lock_xp ); 2677 2824 2678 if( temp_nr > 0 ) 2679 { 2680 // loop on temporary list 2681 XLIST_FOREACH( temp_root_xp , iter_xp ) 2682 { 2683 // get client socket cluster and local pointer 2684 socket_xp = XLIST_ELEMENT( iter_xp , socket_t , tx_temp ); 2685 socket_ptr = GET_PTR( socket_xp ); 2686 socket_cxy = GET_CXY( socket_xp ); 2687 2688 // get cmd_valid & t2t_valid from socket descriptor 2689 cmd_valid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid )); 2690 2691 // get r2t_valid from socket descriptor 2692 r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq )); 2693 2694 // test if socket is active 2695 if( cmd_valid || r2t_valid ) // active socket 2696 { 2697 2698 #if DEBUG_DEV_NIC_TX 2699 cycle = (uint32_t)hal_get_cycles(); 2700 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid )); 2701 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid )); 2702 if( cycle > DEBUG_DEV_NIC_TX ) 2703 printk("\n[%s] thread[%x,%x] found socket[%x,%d] / cmd_valid %d / r2t_valid %d / cycle %d\n", 2704 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle ); 2705 #endif 2706 // build and send one packet/segment for this socket 2707 dev_nic_tx_build_packet( cmd_valid, 2708 r2t_valid, 2709 socket_xp, 2710 k_buf, 2711 chdev ); 2712 #if DEBUG_DEV_NIC_TX 2713 cycle = (uint32_t)hal_get_cycles(); 2714 if( cycle > DEBUG_DEV_NIC_TX ) 2715 dev_nic_packet_display( true, // is_tx 2716 this->process->pid, 2717 this->trdid, 2718 cycle, 2719 k_buf ); 2720 #endif 2721 } 2722 else // inactive socket 2723 { 2724 temp_nr--; 2725 } 2726 } // end loop on temporary list 2727 } 2728 2729 // block & deschedule if no active socket found in current iteration 2730 if( temp_nr == 0 ) 2825 if( found == false ) // block & deschedule if no active socket 2731 2826 { 2732 2827 … … 2737 2832 __FUNCTION__, this->process->pid, this->trdid, cycle ); 2738 2833 #endif 2739 2740 2834 // block and deschedule 2741 2835 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_CLIENT ); … … 2749 2843 #endif 2750 2844 } 2845 else // handle active socket request 2846 { 2847 // avoid warning 2848 total_length = 0; 2849 2850 // build extended pointer on socket lock 2851 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 2852 2853 // take socket lock 2854 remote_queuelock_acquire( socket_lock_xp ); 2855 2856 #if DEBUG_DEV_NIC_TX 2857 cycle = (uint32_t)hal_get_cycles(); 2858 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid )); 2859 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid )); 2860 #endif 2861 2862 #if DEBUG_DEV_NIC_TX 2863 if( cycle > DEBUG_DEV_NIC_TX ) 2864 printk("\n[%s] thread[%x,%x] select socket[%x,%d] / cmd_val %d / r2t_val %d / cycle %d\n", 2865 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle ); 2866 #endif 2867 // build one UDP packet / TCP segment 2868 cmd_sts = dev_nic_tx_build_packet( socket_xp, 2869 k_buf, 2870 &total_length ); 2871 #if DEBUG_DEV_NIC_TX 2872 cycle = (uint32_t)hal_get_cycles(); 2873 if( cycle > DEBUG_DEV_NIC_TX ) 2874 printk("\n[%s] thread[%x,%x] for socket[%x,%x] build packet / %d bytes / sts %d / cycle %d\n", 2875 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, total_length, cmd_sts, cycle ); 2876 #endif 2877 // release socket lock 2878 remote_queuelock_release( socket_lock_xp ); 2879 2880 if( cmd_sts == CMD_STS_SUCCESS ) // move packet to TX queue 2881 { 2882 // move packet to NIC_TX queue 2883 dev_nic_tx_move_packet( chdev, 2884 k_buf, 2885 total_length ); 2886 #if DEBUG_DEV_NIC_TX 2887 cycle = (uint32_t)hal_get_cycles(); 2888 if( cycle > DEBUG_DEV_NIC_TX ) 2889 dev_nic_packet_display( pid, fdid, cycle, k_buf ); 2890 #endif 2891 // get socket.type, socket.tx_cmd and socket.tx_todo values 2892 tx_cmd = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_cmd )); 2893 tx_todo = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_todo )); 2894 sock_type = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->type )); 2895 2896 // client signaling depends on command type and socket type 2897 if( (tx_cmd == CMD_TX_SEND) && (tx_todo == 0) ) 2898 { 2899 // reset tx_valid for both UDP and TCP 2900 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false ); 2901 2902 // unblock client thread for UDP only 2903 if(sock_type == SOCK_DGRAM) 2904 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 2905 } 2906 else // type is CONNECT / ACCEPT / CLOSE 2907 { 2908 // reset tx_valid 2909 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false ); 2910 } 2911 } 2912 else // signal error to client thread 2913 { 2914 // reset tx_valid 2915 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false ); 2916 2917 // unblock tx_client thread 2918 dev_nic_unblock_tx_client( socket_xp , cmd_sts ); 2919 } 2920 } // end active socket handling 2751 2921 } // end infinite while loop 2752 2922 } // end dev_nic_tx_server() 2753 2923 2754 2924 2755 ///////////////////////////////////////////// 2756 void dev_nic_packet_display( bool_t is_tx, 2757 pid_t thread_pid, 2758 trdid_t thread_trdid, 2925 2926 2927 2928 ////////////////////////////////////////////////// 2929 void dev_nic_packet_display( pid_t socket_pid, 2930 uint32_t socket_fdid, 2759 2931 uint32_t cycle, 2760 2932 uint8_t * buf ) … … 2815 2987 remote_busylock_acquire( lock_xp ); 2816 2988 2817 if( is_tx ) 2818 { 2819 nolock_printk("\n*** NIC_TX server thread[%x,%x] send packet / cycle %d\n", 2820 thread_pid, thread_trdid, cycle ); 2821 } 2822 else 2823 { 2824 nolock_printk("\n*** NIC_RX server thread[%x,%x] get packet / cycle %d\n", 2825 thread_pid, thread_trdid, cycle ); 2826 } 2827 2828 nolock_printk("\n***** ETH header *****\n"); 2989 nolock_printk("\n***** packet sent by NIC_TX server for socket[%x,%d] / cycle %d\n", 2990 socket_pid, socket_fdid, cycle ); 2991 2992 nolock_printk(" ETH header\n"); 2829 2993 nolock_printk(" - dst_mac [6] = %l\n" , eth_dst_mac ); 2830 2994 nolock_printk(" - src_mac [6] = %l\n" , eth_src_mac ); 2831 2995 nolock_printk(" - length [2] = %d\n" , (uint32_t)eth_length ); 2832 nolock_printk(" ***** IP header *****\n");2996 nolock_printk(" IP header\n"); 2833 2997 nolock_printk(" - version [1] = %x\n" , (uint32_t)ip_version ); 2834 2998 nolock_printk(" - tos [1] = %x\n" , (uint32_t)ip_tos ); … … 2850 3014 ((uint16_t)buf[37] ) ; 2851 3015 2852 nolock_printk(" ***** UDP header *****\n");3016 nolock_printk(" UDP header\n"); 2853 3017 nolock_printk(" - src_port [2] = %d\n" , (uint32_t)udp_src_port ); 2854 3018 nolock_printk(" - dst_port [2] = %d\n" , (uint32_t)udp_dst_port ); … … 2881 3045 ((uint16_t)buf[53] ) ; 2882 3046 2883 nolock_printk(" ***** TCP header *****\n");3047 nolock_printk(" TCP header\n"); 2884 3048 nolock_printk(" - src_port [2] = %x\n" , (uint32_t)tcp_src_port ); 2885 3049 nolock_printk(" - dst_port [2] = %x\n" , (uint32_t)tcp_dst_port ); -
trunk/kernel/devices/dev_nic.h
r674 r683 99 99 * 100 100 * - GET_KEY : get channel index from remote IP address and port 101 * - SET_RUN : activate/desactivate one channel 101 * - SET_RUN : activate/desactivate one channel (both directions) 102 102 * - GET_INSTRU : get one instrumentation counter value 103 103 * - CLEAR_INSTRU : reset all instrumentation counters … … 140 140 #define PROTOCOL_TCP 0x06 141 141 142 #define TCP_ISS_CLIENT 0x10000 // initial sequence number for TCP client143 #define TCP_ISS_SERVER 0x20000 // initial sequence number for TCP server144 #define TCP_MAX_WINDOW 0xFFFFF // initial TCP send window145 146 #define PAYLOAD_MAX_LEN 1500 // max length for an UDP packet / TCP segment147 148 142 #define TCP_FLAG_FIN 0x01 149 143 #define TCP_FLAG_SYN 0x02 … … 152 146 #define TCP_FLAG_ACK 0x10 153 147 #define TCP_FLAG_URG 0x20 154 155 #define TCP_RETRANSMISSION_TIMEOUT 10000000156 148 157 149 /***************************************************************************************** … … 192 184 * in the server thread descriptor, to access the NIC_RX & NIC_TX packet queues. 193 185 * The buffer is always a 2K bytes kernel buffer, containing an Ethernet packet. 194 * - The next 4 synchronous commands are used by the client th , and stored in the186 * - The next 4 synchronous commands are used by the client thread, and stored in the 195 187 * client thread descriptor, to directly access the NIC registers. 196 188 ****************************************************************************************/ … … 212 204 xptr_t dev_xp; /*! extended pointer on NIC chdev descriptor */ 213 205 nic_cmd_t type; /*! command type */ 214 uint8_t * buffer; /*! local pointer on kernel buffer 215 uint32_t length; /*! number of bytes in buffer 206 uint8_t * buffer; /*! local pointer on kernel buffer (when READ / WRITE) */ 207 uint32_t length; /*! number of bytes in buffer (when READ / WRITE ) */ 216 208 uint32_t status; /*! return value (depends on command type) */ 217 209 uint32_t error; /*! return an error from the hardware (0 if no error) */ … … 282 274 * This TX server thread is created by the dev_nic_init() function. 283 275 * It build and send UDP packets or TCP segments for all clients threads registered in 284 * the NIC_TX[channel] chdev. The command types are (CONNECT / SEND / CLOSE), and the 285 * priority between clients is round-robin. It takes into account the request registered 286 * by the RX server thread in the R2T queue associated to the involved socket. 287 * When a command is completed, it unblocks the client thread. For a SEND command, the 288 * last byte must have been sent for an UDP socket, and it must have been acknowledged 289 * for a TCP socket. 290 * When the TX client threads queue is empty, it blocks on THREAD_BLOCKED_CLIENT 291 * condition and deschedules. It is re-activated by a client thread registering a command. 276 * the NIC_TX[channel] chdev. The command types are (CONNECT / ACCEPT / CLOSE / SEND). 277 * It takes into account the request registered by the RX server thread in the R2T queues. 278 * The loop on registered sockets implements a round-robin priority between sockets. 279 * When no registered socket is active, it blocks on the THREAD_BLOCKED_CLIENT condition 280 * and deschedules. It is re-activated by a client thread registering a command. 292 281 * When the NIC_TX packet queue is full, it blocks on the THREAD_BLOCKED_ISR condition 293 282 * and deschedules. It is reactivated by the NIC_TX DMA engine. 294 283 ****************************************************************************************** 295 284 * Implementation note: 296 * It execute an infinite loop in which it takes the lock protecting the clients list 297 * to build a "kleenex" list of currently registered clients. 298 * For each client registered in this "kleenex" list, it takes the lock protecting the 299 * socket state, build one packet/segment in a local 2K bytes kernel buffer, calls the 300 * transport layer to add the UDP/TCP header, calls the IP layer to add the IP header, 285 * At each iteration in the infinite loop, it takes the lock protecting the registered 286 * client sockets queue to find one active socket (tx_valid or r2t_valid flags set). 287 * For each registered socket, it takes the lock protecting the socket state, and 288 * exit the scan when an active socket has been found, without releasing the socket state. 289 * When the scan is completed, it release the lock protecting the queue, before handling 290 * the found active socket. The socket lock is released only when the requested packet 291 * has been build, and the active socket state has been updated. 292 * To handle a socket request, it calls the transport layer to build the UDP packet or 293 * TCP segment in a local 2K bytes kernel buffer, calls the IP layer to add the IP header, 301 294 * calls the ETH layer to add the ETH header, and moves the packet to the NIC_TX_QUEUE. 302 * Finally, it updates the socket state, and release the socket lock.303 295 ****************************************************************************************** 304 296 * @ chdev : [in] local pointer on one local NIC_TX[channel] chdev descriptor. … … 331 323 332 324 /****************************************************************************************** 333 * This function displays all the fields of an ETH/IP/TCP segment or ETH/IP/UDP packet.334 * *****************************************************************************************335 * @ is_tx : [in] sent packet if true / received packet if false.325 * This debug function can be called by the dev_nic_tx_server() function to display 326 * on TXT0 the header of a TX [ETH/IP/TCP] segment or [ETH/IP/UDP] packet. 327 ****************************************************************************************** 336 328 * @ pid : [in] process identifier. 337 * @ trdid : [in] threadidentifier.329 * @ fdid : [in] socket identifier. 338 330 * @ cycle : [in] date (number of cycles). 339 331 * @ buf : [in] local pointer on kernel buffer containing the packet. 340 332 *****************************************************************************************/ 341 void dev_nic_packet_display( bool_t is_tx, 342 pid_t pid, 343 trdid_t trdid, 333 void dev_nic_packet_display( pid_t pid, 334 uint32_t fdid, 344 335 uint32_t cycle, 345 336 uint8_t * buf ); -
trunk/kernel/fs/devfs.c
r673 r683 56 56 xptr_t devfs_ctx_alloc( cxy_t cxy ) 57 57 { 58 kmem_req_t req;59 60 req.type = KMEM_KCM;61 req.order = bits_log2( sizeof(devfs_ctx_t) );62 req.flags = AF_KERNEL | AF_ZERO;63 64 58 // allocates devfs context from target cluster 65 return XPTR( cxy , kmem_remote_alloc( cxy , &req ) ); 59 void * ptr = kmem_remote_alloc( cxy, 60 bits_log2(sizeof(devfs_ctx_t)), 61 AF_ZERO ); 62 63 if( ptr == NULL ) return XPTR_NULL; 64 else return XPTR( cxy , ptr ); 66 65 } 67 66 … … 90 89 void devfs_ctx_destroy( xptr_t devfs_ctx_xp ) 91 90 { 92 kmem_req_t req;93 94 91 // get cluster and local pointer on devfs context 95 92 devfs_ctx_t * devfs_ctx_ptr = GET_PTR( devfs_ctx_xp ); 96 93 cxy_t devfs_ctx_cxy = GET_CXY( devfs_ctx_xp ); 97 94 98 req.type = KMEM_KCM;99 req.ptr = devfs_ctx_ptr;100 101 95 // release devfs context descriptor to remote cluster 102 kmem_remote_free( devfs_ctx_cxy , &req ); 96 kmem_remote_free( devfs_ctx_cxy, 97 devfs_ctx_ptr, 98 bits_log2(sizeof(devfs_ctx_t)) ); 103 99 } 104 100 -
trunk/kernel/fs/fatfs.c
r673 r683 1630 1630 xptr_t fatfs_ctx_alloc( cxy_t cxy ) 1631 1631 { 1632 kmem_req_t req;1633 1634 1632 // allocate memory from remote cluster 1635 req.type = KMEM_KCM; 1636 req.order = bits_log2( sizeof(fatfs_ctx_t) ); 1637 req.flags = AF_KERNEL | AF_ZERO; 1638 1639 return XPTR( cxy , kmem_remote_alloc( cxy , &req ) ); 1633 void * ptr = kmem_remote_alloc( cxy, 1634 bits_log2(sizeof(fatfs_ctx_t)), 1635 AF_ZERO ); 1636 1637 if( ptr == NULL ) return XPTR_NULL; 1638 else return XPTR( cxy , ptr ); 1640 1639 1641 1640 } //end faffs_ctx_alloc() … … 1645 1644 { 1646 1645 error_t error; 1647 kmem_req_t req;1648 1646 cxy_t cxy; // FATFS context cluster identifier 1649 1647 fatfs_ctx_t * fatfs_ctx_ptr; // local pointer on FATFS context … … 1667 1665 // allocate a 512 bytes buffer in remote cluster, used to store 1668 1666 // temporarily the BOOT sector, and permanently the FS_INFO sector 1669 req.type = KMEM_KCM; 1670 req.order = 9; // 512 bytes 1671 req.flags = AF_KERNEL | AF_ZERO; 1672 buffer = kmem_remote_alloc( cxy , &req ); 1673 1667 buffer = kmem_remote_alloc( cxy, 1668 9, 1669 AF_ZERO ); 1674 1670 if( buffer == NULL ) 1675 1671 { … … 1827 1823 void fatfs_ctx_destroy( xptr_t fatfs_ctx_xp ) 1828 1824 { 1829 kmem_req_t req;1830 1825 mapper_t * fat_mapper; 1831 1826 uint8_t * fs_info_buffer; … … 1844 1839 fs_info_buffer = hal_remote_lpt( XPTR( fatfs_ctx_cxy , &fatfs_ctx_ptr->fs_info_buffer ) ); 1845 1840 1846 // release FS_INFO buffer 1847 req.type = KMEM_KCM;1848 req.ptr = fs_info_buffer;1849 kmem_remote_free( fatfs_ctx_cxy , &req );1841 // release FS_INFO buffer (512 bytes) 1842 kmem_remote_free( fatfs_ctx_cxy, 1843 fs_info_buffer, 1844 9 ); 1850 1845 1851 1846 // release FATFS context descriptor 1852 req.type = KMEM_KCM;1853 req.ptr = fatfs_ctx_ptr;1854 kmem_remote_free( fatfs_ctx_cxy , &req);1847 kmem_remote_free( fatfs_ctx_cxy, 1848 fatfs_ctx_ptr, 1849 bits_log2(sizeof(fatfs_ctx_t)) ); 1855 1850 1856 1851 } // end fatfs_ctx_destroy() … … 2857 2852 2858 2853 // compute number of pages 2859 npages = size >> CONFIG_PPM_PAGE_ SHIFT;2854 npages = size >> CONFIG_PPM_PAGE_ORDER; 2860 2855 if( size & CONFIG_PPM_PAGE_MASK ) npages++; 2861 2856 -
trunk/kernel/fs/vfs.c
r673 r683 48 48 49 49 ////////////////////////////////////////////////////////////////////////////////////////// 50 // Extern variables50 // Extern global variables 51 51 ////////////////////////////////////////////////////////////////////////////////////////// 52 52 … … 54 54 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 55 55 extern char * lock_type_str[]; // allocated in kernel_init.c 56 extern process_t process_zero; // allocated in kernel_init.c 56 57 57 58 /////////////////////////////////////////////////////////////////////////////////////////// … … 186 187 uint32_t inum; // inode identifier (to be allocated) 187 188 vfs_ctx_t * ctx; // file system context 188 kmem_req_t req; // request to kernel memory allocator189 189 error_t error; 190 190 … … 192 192 uint32_t cycle = (uint32_t)hal_get_cycles(); 193 193 thread_t * this = CURRENT_THREAD; 194 pid_t pid = this->process->pid; 195 trdid_t trdid = this->trdid; 194 196 #endif 195 197 … … 202 204 203 205 #if DEBUG_VFS_ERROR 204 if( DEBUG_VFS_ERROR < cycle ) 205 printk("\n[ERROR] in %s : thread[%x,%x] / illegal FS type\n", 206 __FUNCTION__ , this->process->pid , this->trdid ); 206 printk("\n[ERROR] in %s : thread[%x,%x] / illegal FS type / cycle %d\n", 207 __FUNCTION__ , pid , trdid, cycle ); 207 208 #endif 208 209 return -1; … … 220 221 221 222 #if DEBUG_VFS_ERROR 222 if( DEBUG_VFS_ERROR < cycle ) 223 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inum\n", 224 __FUNCTION__ , this->process->pid , this->trdid ); 223 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inum / cycle %d\n", 224 __FUNCTION__ , pid , trdid, cycle ); 225 225 #endif 226 226 return -1; … … 234 234 235 235 #if DEBUG_VFS_ERROR 236 if( DEBUG_VFS_ERROR < cycle ) 237 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate mapper\n", 238 __FUNCTION__ , this->process->pid , this->trdid ); 236 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate mapper / cycle %d\n", 237 __FUNCTION__ , pid , trdid, cycle ); 239 238 #endif 240 239 vfs_ctx_inum_release( XPTR( cxy , ctx ) , inum ); … … 244 243 mapper_ptr = GET_PTR( mapper_xp ); 245 244 246 // allocate one page for VFS inode descriptor 247 // because the embedded "children" xhtab footprint 248 req.type = KMEM_PPM; 249 req.order = 0; 250 req.flags = AF_KERNEL | AF_ZERO; 251 inode_ptr = kmem_remote_alloc( cxy , &req ); 252 245 // allocate memory for inode descriptor 246 inode_ptr = kmem_remote_alloc( cxy, 247 bits_log2(sizeof(vfs_inode_t)), 248 AF_ZERO ); 253 249 if( inode_ptr == NULL ) 254 250 { 255 251 256 252 #if DEBUG_VFS_ERROR 257 if( DEBUG_VFS_ERROR < cycle ) 258 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inode\n", 259 __FUNCTION__ , this->process->pid , this->trdidi ); 253 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inode / cycle %d\n", 254 __FUNCTION__ , pid , trdid, cycle ); 260 255 #endif 261 256 vfs_ctx_inum_release( XPTR( cxy , ctx ) , inum ); … … 297 292 if( DEBUG_VFS_INODE_CREATE < cycle ) 298 293 printk("\n[%s] thread[%x,%x] created inode (%x,%x) / ctx %x / fs_type %d / cycle %d\n", 299 __FUNCTION__, this->process->pid, this->trdid, cxy, inode_ptr, ctx, ctx->type, cycle );294 __FUNCTION__, pid, trdid, cxy, inode_ptr, ctx, ctx->type, cycle ); 300 295 #endif 301 296 … … 318 313 319 314 // release memory allocated for inode descriptor 320 kmem_req_t req; 321 req.type = KMEM_PPM; 322 req.ptr = inode_ptr; 323 kmem_remote_free( inode_cxy , &req ); 315 kmem_remote_free( inode_cxy, 316 inode_ptr, 317 bits_log2(sizeof(vfs_inode_t)) ); 324 318 325 319 } // end vfs_inode_destroy() … … 447 441 uint32_t size = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->size ) ); 448 442 449 #if DEBUG_VFS_INODE_LOAD_ALL 450 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 443 #if DEBUG_VFS_INODE_LOAD_ALL || DEBUG_VFS_ERROR 451 444 uint32_t cycle = (uint32_t)hal_get_cycles(); 452 445 thread_t * this = CURRENT_THREAD; 446 #endif 447 448 #if DEBUG_VFS_INODE_LOAD_ALL 449 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 453 450 vfs_inode_get_name( inode_xp , name ); 454 451 if( DEBUG_VFS_INODE_LOAD_ALL < cycle ) … … 458 455 459 456 // compute number of pages 460 uint32_t npages = size >> CONFIG_PPM_PAGE_ SHIFT;457 uint32_t npages = size >> CONFIG_PPM_PAGE_ORDER; 461 458 if( (size & CONFIG_PPM_PAGE_MASK) || (size == 0) ) npages++; 462 459 … … 468 465 page_xp = mapper_get_page( XPTR( inode_cxy , mapper ), page_id ); 469 466 470 if( page_xp == XPTR_NULL ) return -1; 467 if( page_xp == XPTR_NULL ) 468 { 469 470 #if DEBUG_VFS_ERROR 471 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate memory for mapper / cycle %d\n", 472 __FUNCTION__, this->process->pid, this->trdid, cycle ); 473 #endif 474 return -1; 475 } 471 476 } 472 477 … … 534 539 xptr_t * dentry_xp ) 535 540 { 536 kmem_req_t req; // request to kernel memory allocator537 541 vfs_ctx_t * ctx = NULL; // context descriptor 538 542 vfs_dentry_t * dentry_ptr; // dentry descriptor (to be allocated) … … 557 561 558 562 #if DEBUG_VFS_ERROR 559 if( DEBUG_VFS_ERROR < cycle ) 560 printk("\n[ERROR] in %s : thread[%x,%x] / undefined fs_type %d\n", 561 __FUNCTION__ , this->process->pid, this->trdid, fs_type ); 563 printk("\n[ERROR] in %s : thread[%x,%x] / undefined fs_type %d / cycle %d\n", 564 __FUNCTION__ , this->process->pid, this->trdid, fs_type, cycle ); 562 565 #endif 563 566 return -1; … … 570 573 571 574 // allocate memory for dentry descriptor 572 req.type = KMEM_KCM; 573 req.order = bits_log2( sizeof(vfs_dentry_t) ); 574 req.flags = AF_KERNEL | AF_ZERO; 575 dentry_ptr = kmem_remote_alloc( cxy , &req ); 576 575 dentry_ptr = kmem_remote_alloc( cxy, 576 bits_log2(sizeof(vfs_dentry_t)), 577 AF_ZERO ); 577 578 if( dentry_ptr == NULL ) 578 579 { 579 580 580 581 #if DEBUG_VFS_ERROR 581 if( DEBUG_VFS_ERROR < cycle ) 582 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate dentry descriptor\n", 583 __FUNCTION__ , this->process->pid, this->trdid ); 582 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate dentry descriptor / cycle %d\n", 583 __FUNCTION__ , this->process->pid, this->trdid, cycle ); 584 584 #endif 585 585 return -1; … … 616 616 617 617 // release memory allocated to dentry 618 kmem_req_t req; 619 req.type = KMEM_KCM; 620 req.ptr = dentry_ptr; 621 kmem_remote_free( dentry_cxy , &req ); 618 kmem_remote_free( dentry_cxy, 619 dentry_ptr, 620 bits_log2(sizeof(vfs_dentry_t)) ); 622 621 623 622 } // end vfs_dentry_destroy() … … 634 633 { 635 634 vfs_file_t * file_ptr; 636 kmem_req_t req;637 635 uint32_t type; 638 636 mapper_t * mapper; … … 644 642 cxy_t inode_cxy = GET_CXY( inode_xp ); 645 643 644 #if DEBUG_VFS_FILE_CREATE || DEBUG_VFS_ERROR 645 thread_t * this = CURRENT_THREAD; 646 uint32_t cycle = (uint32_t)hal_get_cycles(); 647 #endif 648 646 649 #if DEBUG_VFS_FILE_CREATE 647 thread_t * this = CURRENT_THREAD;648 uint32_t cycle = (uint32_t)hal_get_cycles();649 650 if( DEBUG_VFS_FILE_CREATE < cycle ) 650 651 printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) / cycle %d\n", … … 653 654 654 655 // allocate memory for new file descriptor 655 req.type = KMEM_KCM; 656 req.order = bits_log2( sizeof(vfs_file_t) ); 657 req.flags = AF_KERNEL | AF_ZERO; 658 file_ptr = kmem_remote_alloc( inode_cxy , &req ); 659 660 if( file_ptr == NULL ) return -1; 656 file_ptr = kmem_remote_alloc( inode_cxy, 657 bits_log2(sizeof(vfs_file_t)), 658 AF_ZERO ); 659 660 if( file_ptr == NULL ) 661 { 662 663 #if DEBUG_VFS_ERROR 664 printk("\n[ERROR] in %s : thread[%x,%x] / cannot allocate memory / cycle %d\n", 665 __FUNCTION__ , this->process->pid, this->trdid, cycle ); 666 #endif 667 return -1; 668 } 661 669 662 670 // get type, ctx, mapper, and buffer from inode descriptor … … 718 726 719 727 // release file descriptor 720 kmem_req_t req; 721 req.type = KMEM_KCM; 722 req.ptr = file_ptr; 723 kmem_remote_free( file_cxy , &req ); 728 kmem_remote_free( file_cxy, 729 file_ptr, 730 bits_log2(sizeof(vfs_file_t)) ); 724 731 725 732 #if DEBUG_VFS_FILE_DESTROY … … 775 782 xptr_t lock_xp; // extended pointer on Inode Tree lock 776 783 784 #if DEBUG_VFS_OPEN || DEBUG_VFS_ERROR 785 uint32_t cycle = (uint32_t)hal_get_cycles(); 786 thread_t * this = CURRENT_THREAD; 787 pid_t pid = this->process->pid; 788 trdid_t trdid = this->trdid; 789 #endif 790 777 791 if( mode != 0 ) 778 792 { 779 printk("\n[ERROR] in %s : the mode parameter is not supported yet\n" ); 793 794 #if DEBUG_VFS_ERROR 795 printk("\n[ERROR] in %s : the mode parameter is not supported yet\n" ); 796 #endif 780 797 return -1; 781 798 } 782 783 thread_t * this = CURRENT_THREAD;784 process_t * process = this->process;785 799 786 800 // compute lookup working mode … … 790 804 if( (flags & O_EXCL ) ) lookup_mode |= VFS_LOOKUP_EXCL; 791 805 792 #if DEBUG_VFS_OPEN || DEBUG_VFS_ERROR793 uint32_t cycle = (uint32_t)hal_get_cycles();794 #endif795 796 806 #if DEBUG_VFS_OPEN 797 807 if( DEBUG_VFS_OPEN < cycle ) 798 808 printk("\n[%s] thread[%x,%x] enter for <%s> / root_inode (%x,%x) / cycle %d\n", 799 __FUNCTION__, p rocess->pid, this->trdid, path, GET_CXY(root_xp), GET_PTR(root_xp), cycle );809 __FUNCTION__, pid, trdid, path, GET_CXY(root_xp), GET_PTR(root_xp), cycle ); 800 810 #endif 801 811 … … 809 819 810 820 // build extended pointer on lock protecting Inode Tree 811 vfs_root_xp = process ->vfs_root_xp;821 vfs_root_xp = process_zero.vfs_root_xp; 812 822 vfs_root_ptr = GET_PTR( vfs_root_xp ); 813 823 vfs_root_cxy = GET_CXY( vfs_root_xp ); … … 831 841 832 842 #if DEBUG_VFS_ERROR 833 if( DEBUG_VFS_ERROR < cycle ) 834 printk("\n[ERROR] in %s : thread[%x,%x] cannot get inode <%s>\n", 835 __FUNCTION__ , process->pid, this->trdid , path ); 843 printk("\n[ERROR] in %s : thread[%x,%x] cannot get inode <%s> / cycle %d\n", 844 __FUNCTION__ , pid, trdid , path , cycle ); 836 845 #endif 837 846 return -1; … … 843 852 844 853 #if (DEBUG_VFS_OPEN & 1) 845 cycle = (uint32_t)hal_get_cycles();846 854 if( DEBUG_VFS_OPEN < cycle ) 847 855 printk("\n[%s] thread[%x,%x] found inode(%x,%x) for <%s>\n", 848 __FUNCTION__, p rocess->pid, this->trdid, inode_cxy, inode_ptr, path );856 __FUNCTION__, pid, trdid, inode_cxy, inode_ptr, path ); 849 857 #endif 850 858 … … 852 860 error = vfs_file_create( inode_xp , file_attr , &file_xp ); 853 861 854 if( error ) return error; 862 if( error ) 863 { 864 865 #if DEBUG_VFS_ERROR 866 printk("\n[ERROR] in %s : thread[%x,%x] cannot create file descriptor for <%s> / cycle %d\n", 867 __FUNCTION__ , pid, trdid , path , cycle ); 868 #endif 869 return error; 870 } 855 871 856 872 #if (DEBUG_VFS_OPEN & 1) 857 cycle = (uint32_t)hal_get_cycles();858 873 if( DEBUG_VFS_OPEN < cycle ) 859 874 printk("\n[%s] thread[%x,%x] created file descriptor (%x,%x) for <%s>\n", 860 __FUNCTION__, p rocess->pid, this->trdid, GET_CXY(file_xp), GET_PTR(file_xp), path );875 __FUNCTION__, pid, trdid, GET_CXY(file_xp), GET_PTR(file_xp), path ); 861 876 #endif 862 877 … … 864 879 error = process_fd_register( process_xp , file_xp , &file_id ); 865 880 866 if( error ) return error; 881 if( error ) 882 { 883 884 #if DEBUG_VFS_ERROR 885 printk("\n[ERROR] in %s : thread[%x,%x] cannot register file descriptor for <%s> / cycle %d\n", 886 __FUNCTION__ , pid, trdid , path , cycle ); 887 #endif 888 return error; 889 } 867 890 868 891 // get new file descriptor cluster and local pointer … … 891 914 if( DEBUG_VFS_OPEN < cycle ) 892 915 printk("\n[%s] thread[%x,%x] exit for <%s> / fdid %d / file(%x,%x) / cycle %d\n", 893 __FUNCTION__, p rocess->pid, this->trdid, path, file_id,916 __FUNCTION__, pid, trdid, path, file_id, 894 917 GET_CXY( file_xp ), GET_PTR( file_xp ), cycle ); 895 918 #endif … … 997 1020 998 1021 #if DEBUG_VFS_ERROR 999 if( DEBUG_VFS_ERROR < cycle ) 1000 printk("\n[ERROR] in %s thread[%x,%x] cannot move data", 1001 __FUNCTION__, this->process->pid, this->trdid ); 1022 printk("\n[ERROR] in %s thread[%x,%x] cannot move data / cycle %d", 1023 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1002 1024 #endif 1003 1025 return -1; … … 1008 1030 1009 1031 #if DEBUG_VFS_USER_MOVE 1010 cycle = (uint32_t)hal_get_cycles();1011 1032 if( cycle > DEBUG_VFS_USER_MOVE ) 1012 1033 { … … 1032 1053 cxy_t file_cxy; // remote file descriptor cluster 1033 1054 vfs_file_t * file_ptr; // remote file descriptor local pointer 1034 vfs_file_type_t inode_type; // remote file type1035 1055 uint32_t file_offset; // current offset in file 1036 1056 mapper_t * mapper_ptr; // remote mapper local pointer … … 1041 1061 assert( __FUNCTION__, (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" ); 1042 1062 1063 #if DEBUG_VFS_KERNEL_MOVE || DEBUG_VFS_ERROR 1064 uint32_t cycle = (uint32_t)hal_get_cycles(); 1065 thread_t * this = CURRENT_THREAD; 1066 #endif 1067 1043 1068 // get cluster and local pointer on remote file descriptor 1044 1069 file_cxy = GET_CXY( file_xp ); 1045 1070 file_ptr = GET_PTR( file_xp ); 1046 1047 // get inode type from remote file descriptor1048 inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) );1049 1050 // check inode type1051 assert( __FUNCTION__, (inode_type == FILE_TYPE_REG), "bad file type" );1052 1071 1053 1072 // get mapper pointers and file offset from file descriptor … … 1064 1083 if( error ) 1065 1084 { 1066 printk("\n[ERROR] in %s : cannot move data", __FUNCTION__ ); 1085 1086 #if DEBUG_VFS_ERROR 1087 printk("\n[ERROR] in %s : thread[%x,%x] / cannot move data / cycle %d\n", 1088 __FUNCTION__ , this->process->pid , this->trdid , cycle ); 1089 #endif 1067 1090 return -1; 1091 1068 1092 } 1069 1093 1070 1094 #if DEBUG_VFS_KERNEL_MOVE 1071 1095 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 1072 uint32_t cycle = (uint32_t)hal_get_cycles();1073 thread_t * this = CURRENT_THREAD;1074 1096 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 1075 1097 void * buffer_ptr = GET_PTR( buffer_xp ); … … 1109 1131 assert( __FUNCTION__, (new_offset != NULL ) , "new_offset == NULL" ); 1110 1132 1133 #if DEBUG_VFS_LSEEK || DEBUG_VFS_ERROR 1134 uint32_t cycle = (uint32_t)hal_get_cycles(); 1135 thread_t * this = CURRENT_THREAD; 1136 #endif 1137 1111 1138 // get cluster and local pointer on remote file descriptor 1112 1139 file_cxy = GET_CXY( file_xp ); … … 1138 1165 else 1139 1166 { 1140 printk("\n[ERROR] in %s : illegal whence value\n", __FUNCTION__ ); 1167 1168 #if DEBUG_VFS_ERROR 1169 printk("\n[ERROR] in %s : thread[%x,%x] / undefined whence value / cycle %d", 1170 __FUNCTION__ , this->process->pid , this->trdid , cycle ); 1171 #endif 1141 1172 remote_rwlock_wr_release( lock_xp ); 1142 1173 return -1; … … 1191 1222 cluster_t * cluster = LOCAL_CLUSTER; 1192 1223 1224 #if DEBUG_VFS_CLOSE || DEBUG_VFS_ERROR 1225 uint32_t cycle = (uint32_t)hal_get_cycles(); 1226 #endif 1227 1193 1228 // get file name 1194 1229 vfs_file_get_name( file_xp , name ); 1195 1230 1196 1231 #if DEBUG_VFS_CLOSE 1197 uint32_t cycle = (uint32_t)hal_get_cycles();1198 1232 if( DEBUG_VFS_CLOSE < cycle ) 1199 1233 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", … … 1215 1249 if( error ) 1216 1250 { 1217 printk("\n[ERROR] in %s : cannot synchronise dirty pages for <%s>\n", 1218 __FUNCTION__, name ); 1251 1252 #if DEBUG_VFS_ERROR 1253 printk("\n[ERROR] in %s : thread[%x,%x] / cannot synchronise dirty pages for <%s> / cycle %d\n", 1254 __FUNCTION__ , this->process->pid , this->trdid , name , cycle ); 1255 #endif 1219 1256 return -1; 1220 1257 } … … 1222 1259 #if DEBUG_VFS_CLOSE 1223 1260 if( DEBUG_VFS_CLOSE < cycle ) 1224 printk("\n[%s] thread[%x,%x] synchronised mapper of <%s>to device\n",1261 printk("\n[%s] thread[%x,%x] synchronised <%s> mapper to device\n", 1225 1262 __FUNCTION__, process->pid, this->trdid, name ); 1226 1263 #endif … … 1259 1296 if( error ) 1260 1297 { 1261 printk("\n[ERROR] in %s : cannot update size in parent\n", 1262 __FUNCTION__ ); 1298 1299 #if DEBUG_VFS_ERROR 1300 printk("\n[ERROR] in %s : thread[%x,%x] / cannot update size in parent / cycle %d\n", 1301 __FUNCTION__ , this->process->pid , this->trdid , cycle ); 1302 #endif 1263 1303 return -1; 1264 1304 } … … 1277 1317 if( error ) 1278 1318 { 1279 printk("\n[ERROR] in %s : cannot synchronise parent mapper to device\n", 1280 __FUNCTION__ ); 1319 1320 #if DEBUG_VFS_ERROR 1321 printk("\n[ERROR] in %s : thread[%x,%x] / cannot synchronise mapper & device / cycle %d\n", 1322 __FUNCTION__ , this->process->pid , this->trdid , cycle ); 1323 #endif 1281 1324 return -1; 1282 1325 } … … 1367 1410 char last_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1368 1411 1412 #if DEBUG_VFS_MKDIR || DEBUG_VFS_ERROR 1413 uint32_t cycle = (uint32_t)hal_get_cycles(); 1414 #endif 1415 1369 1416 thread_t * this = CURRENT_THREAD; 1370 1417 process_t * process = this->process; … … 1373 1420 char root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1374 1421 vfs_inode_get_name( root_xp , root_name ); 1375 uint32_t cycle = (uint32_t)hal_get_cycles();1376 1422 if( DEBUG_VFS_MKDIR < cycle ) 1377 1423 printk("\n[%s] thread[%x,%x] enter / root <%s> / path <%s> / cycle %d\n", … … 1396 1442 if( error ) 1397 1443 { 1444 1445 #if DEBUG_VFS_ERROR 1446 printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n", 1447 __FUNCTION__, process->pid, this->trdid, path , cycle ); 1448 #endif 1398 1449 remote_rwlock_wr_release( lock_xp ); 1399 printk("\n[ERROR] in %s : cannot get parent inode for <%s>\n",1400 __FUNCTION__, path );1401 1450 return -1; 1402 1451 } … … 1423 1472 if( error ) 1424 1473 { 1474 1475 #if DEBUG_VFS_ERROR 1476 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry in cluster %x for <%s> / cycle %d\n", 1477 __FUNCTION__, process->pid, this->trdid, parent_cxy, path , cycle ); 1478 #endif 1425 1479 remote_rwlock_wr_release( lock_xp ); 1426 printk("\n[ERROR] in %s : cannot create new dentry in cluster %x for <%s>\n",1427 __FUNCTION__, parent_cxy, path );1428 1480 return -1; 1429 1481 } … … 1457 1509 if( error ) 1458 1510 { 1511 1512 #if DEBUG_VFS_ERROR 1513 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x for <%s> / cycle %d\n", 1514 __FUNCTION__, process->pid, this->trdid, parent_cxy, path , cycle ); 1515 #endif 1459 1516 remote_rwlock_wr_release( lock_xp ); 1460 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n",1461 __FUNCTION__ , inode_cxy , path );1462 1517 vfs_dentry_destroy( dentry_xp ); 1463 1518 return -1; … … 1504 1559 if( error ) 1505 1560 { 1561 1562 #if DEBUG_VFS_ERROR 1563 printk("\n[ERROR] in %s : thread[%x,%x] cannot create <.> & <..> dentries for <%s> / cycle %d\n", 1564 __FUNCTION__, process->pid, this->trdid, path , cycle ); 1565 #endif 1566 vfs_remove_child_from_parent( dentry_xp ); 1506 1567 remote_rwlock_wr_release( lock_xp ); 1507 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n",1508 __FUNCTION__ , inode_cxy , path );1509 vfs_dentry_destroy( dentry_xp );1510 1568 return -1; 1511 1569 } … … 1520 1578 if( error ) 1521 1579 { 1522 printk("\n[ERROR] in %s : cannot update parent directory for <%s>\n", 1523 __FUNCTION__, path ); 1580 1581 #if DEBUG_VFS_ERROR 1582 printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory for <%s> / cycle %d\n", 1583 __FUNCTION__, process->pid, this->trdid, path , cycle ); 1584 #endif 1585 vfs_remove_child_from_parent( dentry_xp ); 1524 1586 return -1; 1525 1587 } … … 1527 1589 #if(DEBUG_VFS_MKDIR & 1) 1528 1590 if( DEBUG_VFS_MKDIR < cycle ) 1529 printk("\n[%s] thread[%x,%x] updated parent dir (mapper and IOC) for <%s>\n",1591 printk("\n[%s] thread[%x,%x] created <%s> dir (Inode-Tree, Mapper and IOC)\n", 1530 1592 __FUNCTION__, process->pid, this->trdid, path ); 1531 1593 #endif … … 1565 1627 char new_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1566 1628 1629 #if DEBUG_VFS_LINK || DEBUG_VFS_ERROR 1630 uint32_t cycle = (uint32_t)hal_get_cycles(); 1631 #endif 1632 1567 1633 thread_t * this = CURRENT_THREAD; 1568 1634 process_t * process = this->process; … … 1573 1639 vfs_inode_get_name( old_root_xp , old_root_name ); 1574 1640 vfs_inode_get_name( new_root_xp , new_root_name ); 1575 uint32_t cycle = (uint32_t)hal_get_cycles();1576 1641 if( DEBUG_VFS_LINK < cycle ) 1577 1642 printk("\n[%s] thread[%x,%x] enter / old_root <%s> / old_path <%s> / " … … 1598 1663 if( error ) 1599 1664 { 1665 1666 #if DEBUG_VFS_ERROR 1667 printk("\n[ERROR] in %s : thread[%x,%x] cannot get target inode for <%s> / cycle %d\n", 1668 __FUNCTION__, process->pid, this->trdid, old_path , cycle ); 1669 #endif 1600 1670 remote_rwlock_wr_release( lock_xp ); 1601 printk("\n[ERROR] in %s : cannot get target inode for <%s>\n",1602 __FUNCTION__, old_path );1603 1671 return -1; 1604 1672 } … … 1619 1687 if( error ) 1620 1688 { 1689 1690 #if DEBUG_VFS_ERROR 1691 printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n", 1692 __FUNCTION__, process->pid, this->trdid, new_path , cycle ); 1693 #endif 1621 1694 remote_rwlock_wr_release( lock_xp ); 1622 printk("\n[ERROR] in %s : cannot get parent inode for <%s>\n",1623 __FUNCTION__, new_path );1624 1695 return -1; 1625 1696 } … … 1655 1726 if( error ) 1656 1727 { 1728 1729 #if DEBUG_VFS_ERROR 1730 printk("\n[ERROR] in %s : thread[%x,%x] cannot create new dentry for <%s> / cycle %d\n", 1731 __FUNCTION__, process->pid, this->trdid, new_path , cycle ); 1732 #endif 1657 1733 remote_rwlock_wr_release( lock_xp ); 1658 printk("\n[ERROR] in %s : cannot create new dentry for <%s>\n",1659 __FUNCTION__, new_path );1660 1734 return -1; 1661 1735 } … … 1696 1770 if( error ) 1697 1771 { 1698 printk("\n[ERROR] in %s : cannot update new parent directory for <%s>\n", 1699 __FUNCTION__, new_path ); 1772 1773 #if DEBUG_VFS_ERROR 1774 printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory for <%s> / cycle %d\n", 1775 __FUNCTION__, process->pid, this->trdid, new_path , cycle ); 1776 #endif 1700 1777 return -1; 1701 1778 } … … 1710 1787 else 1711 1788 { 1712 // release the lock protecting Inode Tree 1789 1790 #if DEBUG_VFS_ERROR 1791 printk("\n[ERROR] in %s : thread[%x,%x] / unsupported inode type %s / cycle %d\n", 1792 __FUNCTION__, process->pid, this->trdid, vfs_inode_type_str( inode_type ), cycle ); 1793 #endif 1713 1794 remote_rwlock_wr_release( lock_xp ); 1714 1715 printk("\n[ERROR] in %s : unsupported inode type %s\n",1716 __FUNCTION__ , vfs_inode_type_str( inode_type ) );1717 1795 return -1; 1718 1796 } … … 1746 1824 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; // name of parent directory 1747 1825 1826 #if DEBUG_VFS_UNLINK || DEBUG_VFS_ERROR 1827 uint32_t cycle = (uint32_t)hal_get_cycles(); 1828 #endif 1829 1748 1830 thread_t * this = CURRENT_THREAD; 1749 1831 process_t * process = this->process; 1750 1832 1751 1833 #if DEBUG_VFS_UNLINK 1752 uint32_t cycle = (uint32_t)hal_get_cycles();1753 1834 char root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1754 1835 vfs_inode_get_name( root_xp , root_name ); … … 1775 1856 if( error ) 1776 1857 { 1858 1859 #if DEBUG_VFS_ERROR 1860 printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n", 1861 __FUNCTION__, process->pid, this->trdid, path , cycle ); 1862 #endif 1777 1863 remote_rwlock_wr_release( lock_xp ); 1778 printk("\n[ERROR] in %s : cannot get parent inode for <%s> in <%s>\n",1779 __FUNCTION__, child_name, path );1780 1864 return -1; 1781 1865 } … … 1824 1908 if( error ) 1825 1909 { 1826 printk("\n[ERROR] in %s : cannot create inode <%s> in Inode Tree\n", 1827 __FUNCTION__ , child_name ); 1910 1911 #if DEBUG_VFS_ERROR 1912 printk("\n[ERROR] in %s : thread[%x,%x] cannot create node <%s> in Inode_Tree / cycle %d\n", 1913 __FUNCTION__, process->pid, this->trdid, path, cycle ); 1914 #endif 1915 remote_rwlock_wr_release( lock_xp ); 1828 1916 return -1; 1829 1917 } … … 1839 1927 if ( error ) 1840 1928 { 1841 printk("\n[ERROR] in %s : cannot get entry <%s> in parent <%s> mapper\n", 1842 __FUNCTION__ , child_name, parent_name ); 1929 1930 #if DEBUG_VFS_ERROR 1931 printk("\n[ERROR] in %s : thread[%x,%x] cannot get dentry <%s> in parent <%s> mapper / cycle %d\n", 1932 __FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle ); 1933 #endif 1934 remote_rwlock_wr_release( lock_xp ); 1843 1935 return -1; 1844 1936 } … … 1861 1953 } 1862 1954 1863 // At this point the Inode Tree contains the target dentry and child inode1955 // At this point the Inode-Tree contains the parent dentry and child inode 1864 1956 // we can safely remove this dentry from both the parent mapper, and the Inode Tree. 1865 1957 … … 1897 1989 if( inode_children != 0 ) 1898 1990 { 1991 1992 #if DEBUG_VFS_ERROR 1993 printk("\n[ERROR] in %s : thread[%x,%x] cannot remove <%s> inode that has children / cycle %d\n", 1994 __FUNCTION__, process->pid, this->trdid, path, cycle ); 1995 #endif 1899 1996 remote_rwlock_wr_release( lock_xp ); 1900 printk("\n[ERROR] in %s : cannot remove <%s> inode that has children\n",1901 __FUNCTION__, path );1902 1997 return -1; 1903 1998 } … … 1908 2003 if( error ) 1909 2004 { 2005 2006 #if DEBUG_VFS_ERROR 2007 printk("\n[ERROR] in %s : thread[%x,%x] cannot update FAT mapper to remove <s> / cycle %d\n", 2008 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2009 #endif 1910 2010 remote_rwlock_wr_release( lock_xp ); 1911 printk("\n[ERROR] in %s : cannot update FAT mapper to remove <%s> inode\n",1912 __FUNCTION__ , path );1913 2011 return -1; 1914 2012 } … … 1927 2025 if( error ) 1928 2026 { 2027 2028 #if DEBUG_VFS_ERROR 2029 printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory on IOC for <s> / cycle %d\n", 2030 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2031 #endif 1929 2032 remote_rwlock_wr_release( lock_xp ); 1930 printk("\n[ERROR] in %s : cannot update dentry on device for <%s>\n",1931 __FUNCTION__ , path );1932 2033 return -1; 1933 2034 } … … 1979 2080 else 1980 2081 { 2082 2083 #if DEBUG_VFS_ERROR 2084 printk("\n[ERROR] in %s : thread[%x,%x] unsupported inode type %d for <s> / cycle %d\n", 2085 __FUNCTION__, process->pid, this->trdid, vfs_inode_type_str( inode_type ), path, cycle ); 2086 #endif 1981 2087 remote_rwlock_wr_release( lock_xp ); 1982 printk("\n[ERROR] in %s : unsupported inode type %s\n",1983 __FUNCTION__ , vfs_inode_type_str( inode_type ) );1984 2088 return -1; 1985 2089 } … … 2004 2108 process_t * process = this->process; 2005 2109 2110 #if DEBUG_VFS_STAT || DEBUG_VFS_ERROR 2111 uint32_t cycle = (uint32_t)hal_get_cycles(); 2112 #endif 2113 2006 2114 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 2007 2115 vfs_root_xp = process->vfs_root_xp; … … 2025 2133 if( error ) 2026 2134 { 2027 printk("\n[ERROR] in %s : cannot found inode <%s>\n", 2028 __FUNCTION__ , path ); 2135 2136 #if DEBUG_VFS_ERROR 2137 printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n", 2138 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2139 #endif 2029 2140 return -1; 2030 2141 } … … 2050 2161 2051 2162 #if DEBUG_VFS_STAT 2052 uint32_t cycle = (uint32_t)hal_get_cycles();2053 2163 if( DEBUG_VFS_STAT < cycle ) 2054 printk("\n[%s] thread[%x,%x] set stat %x for inode %x in cluster %x / cycle %d\n" 2055 " %s / inum %d / size %d\n", 2056 __FUNCTION__, process->pid, this->trdid, st, inode_ptr, inode_cxy, cycle, 2057 vfs_inode_type_str( type ), inum, size ); 2164 printk("\n[%s] thread[%x,%x] set stat for <%s> / %s / inum %d / size %d / cycle %d\n", 2165 __FUNCTION__, process->pid, this->trdid, path, vfs_inode_type_str( type ), inum, size, cycle ); 2058 2166 #endif 2059 2167 … … 2084 2192 process_t * process = this->process; 2085 2193 2086 #if DEBUG_VFS_CHDIR 2087 uint32_t cycle = (uint32_t)hal_get_cycles(); 2088 if( DEBUG_VFS_CHDIR < cycle ) 2089 printk("\n[%s] thread[%x,%x] enter for path <%s> / cycle %d\n", 2090 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2194 #if DEBUG_VFS_CHDIR || DEBUG_VFS_ERROR 2195 uint32_t cycle = (uint32_t)hal_get_cycles(); 2091 2196 #endif 2092 2197 … … 2112 2217 if( error ) 2113 2218 { 2114 printk("\n[ERROR] in %s : <%s> not found\n", 2115 __FUNCTION__, path ); 2219 2220 #if DEBUG_VFS_ERROR 2221 printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n", 2222 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2223 #endif 2116 2224 return -1; 2117 2225 } … … 2124 2232 if( inode_type != FILE_TYPE_DIR ) 2125 2233 { 2126 printk("\n[ERROR] in %s : <%s> is not a directory\n", 2127 __FUNCTION__, path ); 2234 2235 #if DEBUG_VFS_ERROR 2236 printk("\n[ERROR] in %s : thread[%x,%x] / <%s> is not a directory / cycle %d\n", 2237 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2238 #endif 2128 2239 return -1; 2129 2240 } … … 2146 2257 2147 2258 #if DEBUG_VFS_CHDIR 2148 cycle = (uint32_t)hal_get_cycles();2149 2259 if( DEBUG_VFS_CHDIR < cycle ) 2150 printk("\n[%s] thread[%x,%x] exit : inode (%x,%x) / &cwd_xp (%x,%x) / cycle %d\n", 2151 __FUNCTION__, process->pid, this->trdid, inode_cxy, inode_ptr, 2152 GET_CXY(cwd_xp_xp), GET_PTR(cwd_xp_xp), cycle ); 2260 printk("\n[%s] thread[%x,%x] set new cwd <%s> / inode_xp (%x,%x) / cycle %d\n", 2261 __FUNCTION__, process->pid, this->trdid, path, inode_cxy, inode_ptr, cycle ); 2153 2262 #endif 2154 2263 … … 2163 2272 { 2164 2273 error_t error; 2165 xptr_t inode_xp; // extended pointer on target inode 2166 cxy_t inode_cxy; // inode cluster identifier 2167 vfs_inode_t * inode_ptr; // inode local pointer 2168 2169 // check lookup working mode 2170 assert( __FUNCTION__, (rights == 0), "access rights non implemented yet" ); 2171 2274 xptr_t vfs_root_xp; // extended pointer on VFS root inode 2275 vfs_inode_t * vfs_root_ptr; // local_pointer on VFS root inode 2276 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 2277 xptr_t main_lock_xp; // extended pointer on lock protecting Inode Tree 2278 xptr_t inode_xp; // extended pointer on target inode 2279 cxy_t inode_cxy; // inode cluster identifier 2280 vfs_inode_t * inode_ptr; // inode local pointer 2281 vfs_file_type_t inode_type; // inode type 2282 2283 thread_t * this = CURRENT_THREAD; 2284 process_t * process = this->process; 2285 2286 #if DEBUG_VFS_CHMOD || DEBUG_VFS_ERROR 2287 uint32_t cycle = (uint32_t)hal_get_cycles(); 2288 #endif 2289 2290 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 2291 vfs_root_xp = process->vfs_root_xp; 2292 vfs_root_ptr = GET_PTR( vfs_root_xp ); 2293 vfs_root_cxy = GET_CXY( vfs_root_xp ); 2294 main_lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 2295 2296 // take lock protecting Inode Tree in read mode 2297 remote_rwlock_rd_acquire( main_lock_xp ); 2298 2172 2299 // get extended pointer on target inode 2173 2300 error = vfs_lookup( cwd_xp, … … 2177 2304 NULL ); 2178 2305 2179 if( error ) return error; 2306 // release lock protecting Inode Tree in read mode 2307 remote_rwlock_rd_release( main_lock_xp ); 2308 2309 if( error ) 2310 { 2311 2312 #if DEBUG_VFS_ERROR 2313 printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n", 2314 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2315 #endif 2316 return -1; 2317 } 2180 2318 2181 2319 // get inode cluster and local pointer … … 2184 2322 2185 2323 // get inode type from remote inode 2186 //inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) );2324 inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 2187 2325 2188 2326 // TODO finalize implementation 2189 2327 2190 assert( __FUNCTION__, false , "not implemented" );2328 assert( __FUNCTION__, false , "not fully implemented" ); 2191 2329 2192 2330 // set inode rights in remote inode 2193 2331 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->rights ) , rights ); 2332 2333 #if DEBUG_VFS_CHMOD 2334 if( DEBUG_VFS_CHMOD < cycle ) 2335 printk("\n[%s] thread[%x,%x] set access rights %x for <%s> / inode_xp (%x,%x) / cycle %d\n", 2336 __FUNCTION__, process->pid, this->trdid, rights, path, inode_cxy, inode_ptr, cycle ); 2337 #endif 2194 2338 2195 2339 return 0; … … 2212 2356 thread_t * this = CURRENT_THREAD; 2213 2357 process_t * process = this->process; 2358 2359 #if DEBUG_VFS_MKFIFO || DEBUG_VFS_ERROR 2360 uint32_t cycle = (uint32_t)hal_get_cycles(); 2361 #endif 2214 2362 2215 2363 // build extended pointer on lock protecting Inode Tree … … 2230 2378 if( error ) 2231 2379 { 2232 printk("\n[ERROR] in %s : cannot get parent inode for <%s> path\n", 2233 __FUNCTION__ , path ); 2380 2381 #if DEBUG_VFS_ERROR 2382 printk("\n[ERROR] in %s : thread[%x,%x] cannot found parent inode for <%s> / cycle %d\n", 2383 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2384 #endif 2385 remote_rwlock_wr_release( vfs_lock_xp ); 2234 2386 return -1; 2235 2387 } … … 2259 2411 if( error ) 2260 2412 { 2261 printk("\n[ERROR] in %s : cannot create fifo inode for <%s> path\n", 2262 __FUNCTION__ , path ); 2413 2414 #if DEBUG_VFS_ERROR 2415 printk("\n[ERROR] in %s : thread[%x,%x] cannot create fifo inode for <%s> / cycle %d\n", 2416 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2417 #endif 2418 remote_rwlock_wr_release( vfs_lock_xp ); 2263 2419 return -1; 2264 2420 } … … 2270 2426 if( pipe == NULL ) 2271 2427 { 2272 printk("\n[ERROR] in %s : cannot create pipe for <%s> path\n", 2273 __FUNCTION__ , path ); 2428 2429 #if DEBUG_VFS_ERROR 2430 printk("\n[ERROR] in %s : thread[%x,%x] cannot create pipe for <%s> / cycle %d\n", 2431 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2432 #endif 2433 vfs_remove_child_from_parent( fifo_dentry_xp ); 2434 remote_rwlock_wr_release( vfs_lock_xp ); 2274 2435 return -1; 2275 2436 } … … 2282 2443 // release the lock protecting the Inode-Tree from write mode 2283 2444 remote_rwlock_wr_release( vfs_lock_xp ); 2445 2446 #if DEBUG_VFS_MKDIR 2447 if( DEBUG_VFS_MKDIR < cycle ) 2448 printk("\n[%s] thread[%x,%x] creared fifo <%s> / inode_xp [%x,%x] / cycle %d\n", 2449 __FUNCTION__, process->pid, this->trdid, path, fifo_cxy, fifo_inode_ptr, cycle ); 2450 #endif 2284 2451 2285 2452 return 0; … … 2746 2913 2747 2914 #if DEBUG_VFS_ERROR 2748 if( DEBUG_VFS_ERROR < cycle ) 2749 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode <%s> in path <%s>\n", 2750 __FUNCTION__ , process->pid, this->trdid, name, pathname ); 2915 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode <%s> in path <%s> / cycle %d\n", 2916 __FUNCTION__ , process->pid, this->trdid, name, pathname, cycle ); 2751 2917 #endif 2752 2918 return -1; … … 2777 2943 2778 2944 #if DEBUG_VFS_ERROR 2779 if( DEBUG_VFS_ERROR < cycle ) 2780 printk("\n[ERROR] in %s : thread[%x,%x] cannot add dentry <%s> in parent dir\n", 2781 __FUNCTION__, process->pid, this->trdid, name ); 2945 printk("\n[ERROR] in %s : thread[%x,%x] cannot add dentry <%s> in parent dir / cycle %d\n", 2946 __FUNCTION__, process->pid, this->trdid, name, cycle ); 2782 2947 #endif 2783 2948 vfs_remove_child_from_parent( dentry_xp ); … … 2795 2960 2796 2961 #if DEBUG_VFS_ERROR 2797 if( DEBUG_VFS_ERROR < cycle ) 2798 printk("\n[ERROR] in %s : thread[%x,%x] cannot found node <%s> in parent for <%s>\n", 2799 __FUNCTION__ , process->pid, this->trdid, name, pathname ); 2962 printk("\n[ERROR] in %s : thread[%x,%x] cannot found node <%s> in parent for <%s> / cycle %d\n", 2963 __FUNCTION__ , process->pid, this->trdid, name, pathname, cycle ); 2800 2964 #endif 2801 2965 vfs_remove_child_from_parent( dentry_xp ); … … 2810 2974 2811 2975 #if DEBUG_VFS_ERROR 2812 if( DEBUG_VFS_ERROR < cycle ) 2813 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s> %\n", 2814 __FUNCTION__ , process->pid, this->trdid, pathname ); 2976 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s> / cycle %d\n", 2977 __FUNCTION__ , process->pid, this->trdid, pathname, cycle ); 2815 2978 #endif 2816 2979 return -1; … … 2831 2994 { 2832 2995 #if DEBUG_VFS_ERROR 2833 if( DEBUG_VFS_ERROR < cycle ) 2834 printk("\n[ERROR] in %s : thread[%x,%x] cannot load <%s> from device\n", 2835 __FUNCTION__ , process->pid, this->trdid, name ); 2996 printk("\n[ERROR] in %s : thread[%x,%x] cannot load <%s> from device / cycle %d\n", 2997 __FUNCTION__ , process->pid, this->trdid, name, cycle ); 2836 2998 #endif 2837 2999 vfs_remove_child_from_parent( dentry_xp ); … … 2864 3026 2865 3027 #if DEBUG_VFS_ERROR 2866 if( DEBUG_VFS_ERROR < cycle ) 2867 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s>\n", 2868 __FUNCTION__ , process->pid, this->trdid, pathname ); 3028 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s> / cycle %d\n", 3029 __FUNCTION__ , process->pid, this->trdid, pathname, cycle ); 2869 3030 #endif 2870 3031 return -1; … … 2946 3107 xptr_t children_entry_xp; // extended pointer on dentry "children" field 2947 3108 3109 #if DEBUG_VFS_ADD_SPECIAL || DEBUG_VFS_ERROR 3110 uint32_t cycle = (uint32_t)hal_get_cycles(); 3111 thread_t * this = CURRENT_THREAD; 3112 process_t * process = this->process; 3113 #endif 3114 2948 3115 #if DEBUG_VFS_ADD_SPECIAL 2949 uint32_t cycle = (uint32_t)hal_get_cycles();2950 thread_t * this = CURRENT_THREAD;2951 3116 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2952 3117 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; … … 2955 3120 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2956 3121 printk("\n[%s] thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n", 2957 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle );3122 __FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle ); 2958 3123 #endif 2959 3124 … … 2973 3138 if( error ) 2974 3139 { 2975 printk("\n[ERROR] in %s : cannot create dentry <.> in cluster %x\n", 2976 __FUNCTION__ , child_cxy ); 3140 3141 #if DEBUG_VFS_ERROR 3142 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <.> in cluster %x / cycle %d\n", 3143 __FUNCTION__ , process->pid, this->trdid, child_cxy, cycle ); 3144 #endif 2977 3145 return -1; 2978 3146 } … … 2982 3150 2983 3151 #if(DEBUG_VFS_ADD_SPECIAL & 1) 2984 cycle = (uint32_t)hal_get_cycles();2985 3152 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2986 3153 printk("\n[%s] thread[%x,%x] created dentry <.> (%x,%x) / cycle %d\n", 2987 __FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr, cycle );3154 __FUNCTION__, process->pid, this->trdid, child_cxy, dentry_ptr, cycle ); 2988 3155 #endif 2989 3156 … … 2996 3163 if( error ) 2997 3164 { 2998 printk("\n[ERROR] in %s : cannot register dentry <.> in xhtab\n", 2999 __FUNCTION__ ); 3165 3166 #if DEBUG_VFS_ERROR 3167 printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <.> in xhtab / cycle %d\n", 3168 __FUNCTION__ , process->pid, this->trdid, cycle ); 3169 #endif 3000 3170 return -1; 3001 3171 } … … 3009 3179 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3010 3180 printk("\n[%s] thread[%x,%x] linked dentry <.> to parent and child inodes / cycle %d\n", 3011 __FUNCTION__, this->process->pid, this->trdid, cycle );3181 __FUNCTION__, process->pid, this->trdid, cycle ); 3012 3182 #endif 3013 3183 … … 3020 3190 if( error ) 3021 3191 { 3022 printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n", 3023 __FUNCTION__ ); 3192 3193 #if DEBUG_VFS_ERROR 3194 printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <.> in mapper / cycle %d\n", 3195 __FUNCTION__ , process->pid, this->trdid, cycle ); 3196 #endif 3024 3197 return -1; 3025 3198 } … … 3029 3202 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3030 3203 printk("\n[%s] thread[%x,%x] registered dentry <.> in child mapper / cycle %d\n", 3031 __FUNCTION__, this->process->pid, this->trdid, cycle );3204 __FUNCTION__, process->pid, this->trdid, cycle ); 3032 3205 #endif 3033 3206 … … 3041 3214 if( error ) 3042 3215 { 3043 printk("\n[ERROR] in %s : cannot create dentry <..> in cluster %x\n", 3044 __FUNCTION__ , child_cxy ); 3216 3217 #if DEBUG_VFS_ERROR 3218 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <..> in cluster %x / cycle %d\n", 3219 __FUNCTION__ , process->pid, this->trdid, child_cxy, cycle ); 3220 #endif 3045 3221 return -1; 3046 3222 } … … 3053 3229 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3054 3230 printk("\n[%s] thread[%x,%x] created dentry <..> (%x,%x) / cycle %d\n", 3055 __FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr, cycle );3231 __FUNCTION__, process->pid, this->trdid, child_cxy, dentry_ptr, cycle ); 3056 3232 #endif 3057 3233 … … 3059 3235 children_xhtab_xp = XPTR( child_cxy , &child_ptr->children ); 3060 3236 children_entry_xp = XPTR( child_cxy , &dentry_ptr->children ); 3237 3061 3238 error = xhtab_insert( children_xhtab_xp , ".." , children_entry_xp ); 3239 3062 3240 if( error ) 3063 3241 { 3064 printk("\n[ERROR] in %s : cannot register dentry <..> in xhtab\n", 3065 __FUNCTION__ ); 3242 3243 #if DEBUG_VFS_ERROR 3244 printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <..> in xhtab / cycle %d\n", 3245 __FUNCTION__ , process->pid, this->trdid, cycle ); 3246 #endif 3066 3247 return -1; 3067 3248 } … … 3077 3258 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3078 3259 printk("\n[%s] thread[%x,%x] linked dentry <..> to parent and child inodes / cycle %d\n", 3079 __FUNCTION__, this->process->pid, this->trdid, cycle );3260 __FUNCTION__, process->pid, this->trdid, cycle ); 3080 3261 #endif 3081 3262 … … 3088 3269 if( error ) 3089 3270 { 3090 printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n", 3091 __FUNCTION__ ); 3271 3272 #if DEBUG_VFS_ERROR 3273 printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <..> in mapper / cycle %d\n", 3274 __FUNCTION__ , process->pid, this->trdid, cycle ); 3275 #endif 3092 3276 return -1; 3093 3277 } … … 3097 3281 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3098 3282 printk("\n[%s] thread[%x,%x] registered dentry <..> in child mapper / cycle %d\n", 3099 __FUNCTION__, this->process->pid, this->trdid, cycle );3283 __FUNCTION__, process->pid, this->trdid, cycle ); 3100 3284 #endif 3101 3285 … … 3106 3290 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3107 3291 printk("\n[%s] thread[%x,%x] exit for child <%s> in parent <%s> / cycle %d\n", 3108 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle );3292 __FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle ); 3109 3293 #endif 3110 3294 … … 3139 3323 3140 3324 #if DEBUG_VFS_GET_PATH 3141 uint32_t cycle = (uint32_t)hal_get_cycles(); 3325 uint32_t cycle = (uint32_t)hal_get_cycles(); 3326 #endif 3327 3328 #if DEBUG_VFS_GET_PATH 3142 3329 if( DEBUG_VFS_GET_PATH < cycle ) 3143 3330 printk("\n[%s] thread[%x,%x] enter : inode (%x,%x) / cycle %d\n", … … 3296 3483 3297 3484 #if DEBUG_VFS_ERROR 3298 if( DEBUG_VFS_ERROR < cycle ) 3299 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <%s> in cluster %x\n", 3300 __FUNCTION__ , this->process->pid, this->trdid , name , parent_cxy ); 3485 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <%s> in cluster %x / cycle %d\n", 3486 __FUNCTION__ , this->process->pid, this->trdid , name , parent_cxy, cycle ); 3301 3487 #endif 3302 3488 return -1; … … 3330 3516 3331 3517 #if DEBUG_VFS_ERROR 3332 if( DEBUG_VFS_ERROR < cycle ) 3333 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x\n", 3334 __FUNCTION__ , this->process->pid , this->trdid , child_cxy ); 3518 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x / cycle %d\n", 3519 __FUNCTION__ , this->process->pid , this->trdid , child_cxy, cycle ); 3335 3520 #endif 3336 3521 … … 3428 3613 3429 3614 #if DEBUG_VFS_REMOVE_CHILD 3430 if( DEBUG_VFS_REMOVE_CHILD < cycle )3431 3615 printk("\n[%s] thread[%x,%x] enter for dentry[%x,%x] / inode[%x,%x] / cycle %d\n", 3432 3616 __FUNCTION__, this->process->pid, this->trdid, … … 3441 3625 if( error ) 3442 3626 { 3443 printk("\n[WARNING] in %s] thread[%x,%x] cannot remove dentry %s from parent dir\n", 3444 __FUNCTION__, this->process->pid, this->trdid, dentry_name ); 3445 } 3446 3447 #if DEBUG_VFS_REMOVE_CHILD 3448 cycle = (uint32_t)hal_get_cycles(); 3627 printk("\n[WARNING] in %s : thread[%x,%x] cannot remove dentry <%s> from parent\n", 3628 __FUNCTION__ , this->process->pid , this->trdid , dentry_name ); 3629 } 3630 3631 #if(DEBUG_VFS_REMOVE_CHILD & 1) 3449 3632 if( DEBUG_VFS_REMOVE_CHILD < cycle ) 3450 printk("\n[%s] thread[%x,%x] removed dentry from parent inode / cycle %d\n",3451 __FUNCTION__, this->process->pid, this->trdid , cycle);3633 printk("\n[%s] thread[%x,%x] removed dentry from parent inode\n", 3634 __FUNCTION__, this->process->pid, this->trdid ); 3452 3635 #endif 3453 3636 … … 3458 3641 links = hal_remote_atomic_add( XPTR( child_cxy , &child_inode_ptr->links ) , -1 ); 3459 3642 3460 #if DEBUG_VFS_REMOVE_CHILD 3461 cycle = (uint32_t)hal_get_cycles(); 3643 #if(DEBUG_VFS_REMOVE_CHILD & 1) 3462 3644 if( DEBUG_VFS_REMOVE_CHILD < cycle ) 3463 printk("\n[%s] thread[%x,%x] removed dentry from child inode / cycle %d\n",3464 __FUNCTION__, this->process->pid, this->trdid , cycle);3645 printk("\n[%s] thread[%x,%x] removed dentry from child inode\n", 3646 __FUNCTION__, this->process->pid, this->trdid ); 3465 3647 #endif 3466 3648 … … 3723 3905 assert( __FUNCTION__, (array != NULL) , "child pointer is NULL"); 3724 3906 assert( __FUNCTION__, (detailed == false) , "detailed argument not supported\n"); 3725 3726 // check inode type 3727 if( inode->type != FILE_TYPE_DIR ) 3728 { 3729 printk("\n[ERROR] in %s : target inode is not a directory\n", 3730 __FUNCTION__ ); 3731 return -1; 3732 } 3907 assert( __FUNCTION__, (inode->type == FILE_TYPE_DIR), "inode is not a directory\n"); 3733 3908 3734 3909 // get parent inode FS type -
trunk/kernel/fs/vfs.h
r673 r683 168 168 *****************************************************************************************/ 169 169 170 /* this enum define the VFS inode types values*/170 /* this enum define the VFS file types */ 171 171 /* WARNING : this enum must be kept consistent with macros in <shared_stat.h> file */ 172 172 /* and with types in <shared_dirent.h> file. */ … … 174 174 typedef enum 175 175 { 176 FILE_TYPE_REG = 0, /*! regular file */177 FILE_TYPE_DIR = 1, /*! directory */178 FILE_TYPE_FIFO = 2, /*! POSIX named fifo */179 FILE_TYPE_PIPE = 3, /*! POSIX anonymous pipe */180 FILE_TYPE_SOCK = 4, /*! POSIX anonymous socket */181 FILE_TYPE_DEV = 5, /*! character device */182 FILE_TYPE_BLK = 6, /*! block device */183 FILE_TYPE_SYML = 7, /*! symbolic link */176 FILE_TYPE_REG = 0, /*! regular file */ 177 FILE_TYPE_DIR = 1, /*! directory */ 178 FILE_TYPE_FIFO = 2, /*! POSIX named fifo */ 179 FILE_TYPE_PIPE = 3, /*! POSIX anonymous pipe */ 180 FILE_TYPE_SOCK = 4, /*! POSIX anonymous socket */ 181 FILE_TYPE_DEV = 5, /*! character device */ 182 FILE_TYPE_BLK = 6, /*! block device */ 183 FILE_TYPE_SYML = 7, /*! symbolic link */ 184 184 } 185 185 vfs_file_type_t; … … 200 200 struct vfs_ctx_s * ctx; /*! local pointer on FS context. */ 201 201 vfs_file_attr_t attr; /*! file attributes bit vector (see above) */ 202 vfs_file_type_t type; /*! same type as inode */202 vfs_file_type_t type; /*! same type as inode */ 203 203 uint32_t offset; /*! seek position in file */ 204 204 remote_rwlock_t lock; /*! protect offset modifications */ … … 285 285 uint32_t inum; /*! inode identifier (unique in file system) */ 286 286 uint32_t attr; /*! inode attributes (see above) */ 287 vfs_file_type_t type; /*! inode type (see above)*/287 vfs_file_type_t type; /*! inode type (see vfs_file_t) */ 288 288 uint32_t size; /*! number of bytes */ 289 289 uint32_t uid; /*! user owner identifier */ … … 829 829 /****************************************************************************************** 830 830 * This function returns, in the structure pointed by the <st> pointer, various 831 * informations on the inodeidentified by the <root_inode_xp> and <patname> arguments.831 * informations on the file identified by the <root_inode_xp> and <patname> arguments. 832 832 * 833 833 * TODO : only partially implemented yet (only size and inum fields). -
trunk/kernel/kern/alarm.c
r669 r683 31 31 32 32 //////////////////////////////////////////////////////////////////////////////////////////// 33 // This static function registers the alarm identified ny the <new_alarm> argument33 // This static function registers the alarm identified by the <alarm> & <cxy> arguments 34 34 // in the list of alarms rooted in the core identified by the <core> argument. 35 35 // When the existing list of alarms is not empty, it scan the list to insert the new 36 36 // alarm in the right place to respect the absolute dates ordering. 37 37 //////////////////////////////////////////////////////////////////////////////////////////// 38 // @ new_alarm : local pointer on the new alarm. 39 // @ core : local pointer on the target core. 38 // @ cxy : cluster containing both the new alarm and the core. 39 // @ alarm : local pointer on the alarm. 40 // @ core : local pointer on the core. 40 41 //////////////////////////////////////////////////////////////////////////////////////////// 41 static void alarm_register( alarm_t * new_alarm, 42 static void alarm_register( cxy_t cxy, 43 alarm_t * alarm, 42 44 core_t * core ) 43 45 { 44 list_entry_t * current; // pointer on current list_entry in existing list 45 list_entry_t * previous; // pointer on previous list_entry in existing list 46 alarm_t * current_alarm; // pointer on current alarm in existing list 47 cycle_t current_date; // date of current alarm in existing list 48 49 bool_t done = false; 50 51 // get pointers on root of alarms and lock 46 // get alarm date 47 cycle_t new_date = hal_remote_l64( XPTR( cxy , &alarm->date ) ); 48 49 // build local pointer on root of alarms list 52 50 list_entry_t * root = &core->alarms_root; 53 busylock_t * lock = &core->alarms_lock; 54 55 // get pointer on new_alarm list_entry 56 list_entry_t * new_entry = &new_alarm->list; 57 58 // get new_alarm date 59 cycle_t new_date = new_alarm->date; 60 61 // take the lock 62 busylock_acquire( lock ); 51 52 // build local pointer on new alarm list_entry 53 list_entry_t * new = &alarm->list; 63 54 64 55 // insert new alarm to respect dates order 65 if( list_ is_empty( root ) )// list empty56 if( list_remote_is_empty( cxy , &core->alarms_root ) ) // list empty 66 57 { 67 list_ add_first( root , new_entry);58 list_remote_add_first( cxy , root , new ); 68 59 } 69 else // list non empty60 else // list non empty 70 61 { 71 for( current = root->next ; 72 (current != root) && (done == false) ; 73 current = current->next ) 62 list_entry_t * iter; // local pointer on current list_entry in existing list 63 alarm_t * iter_alarm; // local pointer on current alarm in existing list 64 cycle_t iter_date; // date of current alarm in existing list 65 bool_t done = false; 66 67 for( iter = hal_remote_lpt( XPTR( cxy , &root->next ) ) ; 68 (iter != root) && (done == false) ; 69 iter = hal_remote_lpt( XPTR( cxy , &iter->next ) ) ) 74 70 { 75 // get pointer on previous entry in existing list76 previous = current->pred;77 78 // get pointer on current alarm79 current_alarm = LIST_ELEMENT( current, alarm_t , list );71 // get local pointer on pred and next for iter 72 list_entry_t * prev = hal_remote_lpt( XPTR( cxy , &iter->pred ) ); 73 74 // get local pointer on current alarm 75 iter_alarm = LIST_ELEMENT( iter , alarm_t , list ); 80 76 81 77 // get date for current alarm 82 current_date = current_alarm->date; 83 84 if( current_date > new_date ) // insert new alarm just before current 78 iter_date = hal_remote_l64( XPTR( cxy , &iter_alarm->date ) ); 79 80 // insert new alarm just before current when required 81 if( iter_date > new_date ) 85 82 { 86 new_entry->next = current;87 new_entry->pred = previous;88 89 current->pred = new_entry;90 previous->next = new_entry;83 hal_remote_spt( XPTR( cxy , &new->next ) , iter ); 84 hal_remote_spt( XPTR( cxy , &new->pred ) , prev ); 85 86 hal_remote_spt( XPTR( cxy , &iter->pred ) , new ); 87 hal_remote_spt( XPTR( cxy , &prev->next ) , new ); 91 88 92 89 done = true; … … 96 93 if( done == false ) // new_date is larger than all registered dates 97 94 { 98 list_ add_last( root , new_entry);95 list_remote_add_last( cxy, root , new ); 99 96 } 100 97 } 101 98 } // end alarm_register() 99 100 101 /////////////////////////////////// 102 void alarm_init( alarm_t * alarm ) 103 { 104 alarm->linked = false; 105 list_entry_init( &alarm->list ); 106 } 107 108 /////////////////////////////////////// 109 void alarm_start( xptr_t thread_xp, 110 cycle_t date, 111 void * func_ptr, 112 xptr_t args_xp ) 113 { 114 // get cluster and local pointer on target thread 115 thread_t * tgt_ptr = GET_PTR( thread_xp ); 116 cxy_t tgt_cxy = GET_CXY( thread_xp ); 117 118 // check alarm state 119 assert( __FUNCTION__ , (hal_remote_l32( XPTR(tgt_cxy,&tgt_ptr->alarm.linked)) == false ), 120 "alarm already started"); 121 122 // get local pointer on core running target thread 123 core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) ); 124 125 // build extended pointer on lock protecting alarms list 126 xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock ); 127 128 // initialize alarm descriptor 129 hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.date ) , date ); 130 hal_remote_spt( XPTR( tgt_cxy , &tgt_ptr->alarm.func_ptr ) , func_ptr ); 131 hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.args_xp ) , args_xp ); 132 hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) , true ); 133 134 // take the lock 135 remote_busylock_acquire( lock_xp ); 136 137 // register alarm in core list 138 alarm_register( tgt_cxy , &tgt_ptr->alarm , core ); 139 140 //release the lock 141 remote_busylock_release( lock_xp ); 142 143 } // end alarm_start() 144 145 146 ///////////////////////////////////// 147 void alarm_stop( xptr_t thread_xp ) 148 { 149 // get cluster and local pointer on target thread 150 thread_t * tgt_ptr = GET_PTR( thread_xp ); 151 cxy_t tgt_cxy = GET_CXY( thread_xp ); 152 153 // get local pointer on core running target thread 154 core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) ); 155 156 // build extended pointer on lock protecting alarms list 157 xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock ); 158 159 // take the lock 160 remote_busylock_acquire( lock_xp ); 161 162 // unlink the alarm from the list rooted in core 163 list_remote_unlink( tgt_cxy , &tgt_ptr->alarm.list ); 164 165 // update alarm state 166 hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) , false ); 167 168 //release the lock 169 remote_busylock_release( lock_xp ); 170 171 } // end alarm_stop() 172 173 174 ////////////////////////////////////// 175 void alarm_update( xptr_t thread_xp, 176 cycle_t new_date ) 177 { 178 // get cluster and local pointer on target thread 179 thread_t * tgt_ptr = GET_PTR( thread_xp ); 180 cxy_t tgt_cxy = GET_CXY( thread_xp ); 181 182 // get local pointer on core running target thread 183 core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) ); 184 185 // build extended pointer on lock protecting alarms list 186 xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock ); 187 188 // take the lock 189 remote_busylock_acquire( lock_xp ); 190 191 // unlink the alarm from the core list 192 list_remote_unlink( tgt_cxy , &tgt_ptr->alarm.list ); 193 194 // update the alarm date and state 195 hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.date ) , new_date ); 196 hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) , true ); 197 198 // register alarm in core list 199 alarm_register( tgt_cxy , &tgt_ptr->alarm , core ); 200 102 201 // release the lock 103 busylock_release( lock ); 104 105 } // end alarm_register() 106 107 ////////////////////////////////////// 108 void alarm_start( cycle_t date, 109 void * func_ptr, 110 xptr_t args_xp, 111 thread_t * thread ) 112 { 113 // get pointer on alarm 114 alarm_t * alarm = &thread->alarm; 115 116 // initialize alarm descriptor 117 alarm->date = date; 118 alarm->func_ptr = func_ptr; 119 alarm->args_xp = args_xp; 120 121 // register alarm in core list 122 alarm_register( alarm , thread->core ); 123 124 } // end alarm_start() 125 126 ///////////////////////////////////// 127 void alarm_update( thread_t * thread, 128 cycle_t new_date ) 129 { 130 // get pointer on alarm 131 alarm_t * alarm = &thread->alarm; 132 133 // get pointer on core 134 core_t * core = thread->core; 135 136 // get pointer on lock protecting the alarms list 137 busylock_t * lock = &core->alarms_lock; 138 139 // unlink the alarm from the core list 140 busylock_acquire( lock ); 141 list_unlink( &alarm->list ); 142 busylock_release( lock ); 143 144 // update the alarm date 145 alarm->date = new_date; 146 147 // register alarm in core list 148 alarm_register( alarm , core ); 149 202 remote_busylock_release( lock_xp ); 203 150 204 } // end alarm_update() 151 205 152 //////////////////////////////////// 153 void alarm_stop( thread_t * thread ) 154 { 155 // get pointer on alarm 156 alarm_t * alarm = &thread->alarm; 157 158 // get pointer on core 159 core_t * core = thread->core; 160 161 // get pointer on lock protecting the alarms list 162 busylock_t * lock = &core->alarms_lock; 163 164 // unlink the alarm from the list rooted in core 165 busylock_acquire( lock ); 166 list_unlink( &alarm->list ); 167 busylock_release( lock ); 168 169 } // end alarm_stop() 170 206 -
trunk/kernel/kern/alarm.h
r669 r683 36 36 * This structure defines a generic, timer based, kernel alarm. 37 37 * 38 * - An alarm being attached to a given thread,the alarm descriptor is embedded in the38 * - An alarm is attached to a given thread, and the alarm descriptor is embedded in the 39 39 * thread descriptor. A client thread can use the alarm_start() function to dynamically 40 40 * activate the alarm. It can use the alarm_stop() function to desactivate this alarm. 41 41 * - This kernel alarm is generic, as the alarm handler (executed when the alarm rings), 42 * and the handler arguments are defined by two pointers <func_ptr> and <args_xp>.42 * and the handler arguments are defined by two pointers: <func_ptr> and <args_xp>. 43 43 * - When an alarm is created by a client thread, it is registered in the list of alarms 44 44 * rooted in the core running the client thread. When it is stopped, the alarm is simply 45 45 * removed from this list. 46 * - When creating an alarm , the client thread must define an absolute date (in cycles),47 * the func_ptr localpointer, and the args_xp extended pointer.46 * - When creating an alarm with the alarm_start() function, the client thread must define 47 * an absolute date (in cycles), the func_ptr pointer, and the args_xp extended pointer. 48 48 * - The list of alarms is ordered by increasing dates. At each TICK received by a core, 49 49 * the date of the first registered alarm is compared to the current date (in the 50 50 * core_clock() function). The alarm handler is executed when current_date >= alarm_date. 51 * - It is the handler responsability to stop a ringing alarm, or update the date. 51 * - It is the handler responsability to stop and delete a ringing alarm using the 52 * alarm_stop() function, or update the alarm date using the alarm_update() function. 53 * - The three alarm_start(), alarm_stop(), and alarm_update() access functions use 54 * the lock protecting the alarms list to handle concurrent accesses. These functions 55 * use extended pointers to access the alarm list, and can be called by a thread 56 * running in any cluster. 52 57 * 53 * This mechanism is used bi the almos_mkh implementation of the TCP protocoL. 58 * This embedded alarm mechanism is used by: 59 * 1. the socket_accept(), socket_connect(), socket_send(), socket_close() functions, 60 * to implement the TCP retransmission machanism. 61 * 2. the sys_thread_sleep() function, to implement the "sleep" mechanism. 54 62 ******************************************************************************************/ 55 63 56 64 typedef struct alarm_s 57 65 { 66 bool_t linked; /*! active when true (i.e linked to the core list) */ 58 67 cycle_t date; /*! absolute date for handler execution */ 59 68 void * func_ptr; /*! local pointer on alarm handler function */ 60 69 xptr_t args_xp; /*! local pointer on handler arguments */ 61 list_entry_t list; /*! all alarms attached to the same core*/70 list_entry_t list; /*! set of active alarms attached to the same core */ 62 71 } 63 72 alarm_t; … … 70 79 71 80 /******************************************************************************************* 81 * This function initialises the alarm state to "inactive". 82 ******************************************************************************************* 83 * @ alarm : local pointer on alarm. 84 ******************************************************************************************/ 85 void alarm_init( alarm_t * alarm ); 86 87 /******************************************************************************************* 72 88 * This function initializes the alarm descriptor embedded in the thread identified by the 73 * <thread> argument from the <date>, <func_ptr>, <args_ptr> arguments, and registers it 74 * in the ordered list rooted in the core running this <thread>. 89 * <thread_xp> argument from the <date>, <func_ptr>, <args_ptr> arguments, and registers 90 * this alarm in the ordered list rooted in the core running this thread. 91 * It takes the lock protecting the alarms list against concurrent accesses. 75 92 ******************************************************************************************* 93 * @ thread_xp : extended pointer on the target thread. 76 94 * @ date : absolute date (in cycles). 77 95 * @ func_ptr : local pointer on the handler to execute when the alarm rings. 78 96 * @ args_xp : extended pointer on the handler arguments. 79 * @ thread : local pointer on the client thread.80 97 ******************************************************************************************/ 81 void alarm_start( cycle_t date,82 void * func_ptr,83 xptr_t args_xp,84 struct thread_s * thread);98 void alarm_start( xptr_t thread_xp, 99 cycle_t date, 100 void * func_ptr, 101 xptr_t args_xp ); 85 102 86 103 /******************************************************************************************* … … 88 105 * <thread> argument. The list of alarms rooted in the core running the client thread 89 106 * is modified to respect the absolute dates ordering. 107 * It takes the lock protecting the alarms list against concurrent accesses. 90 108 ******************************************************************************************* 91 * @ thread : local pointer on the client thread.109 * @ thread_xp : extended pointer on the target thread. 92 110 * @ new_date : absolute new date (in cycles). 93 111 ******************************************************************************************/ 94 void alarm_update( struct thread_s * thread,95 cycle_t 112 void alarm_update( xptr_t thread_xp, 113 cycle_t new_date ); 96 114 97 115 /******************************************************************************************* 98 116 * This function unlink an alarm identified by the <thread> argument from the list of 99 117 * alarms rooted in the core descriptor. 118 * It takes the lock protecting the alarms list against concurrent accesses. 100 119 ******************************************************************************************* 101 * @ thread : local pointer on the client thread.120 * @ thread_xp : extended pointer on the target thread. 102 121 ******************************************************************************************/ 103 void alarm_stop( struct thread_s * thread);122 void alarm_stop( xptr_t thread_xp ); 104 123 105 124 -
trunk/kernel/kern/chdev.c
r669 r683 87 87 { 88 88 chdev_t * chdev; 89 kmem_req_t req;90 89 91 90 // allocate memory for chdev 92 req.type = KMEM_KCM; 93 req.order = bits_log2( sizeof(chdev_t) ); 94 req.flags = AF_ZERO | AF_KERNEL; 95 chdev = kmem_alloc( &req ); 91 chdev = kmem_alloc( bits_log2(sizeof(chdev_t)) , AF_ZERO | AF_KERNEL ); 96 92 97 93 if( chdev == NULL ) return NULL; … … 114 110 } // end chdev_create() 115 111 116 /////////////////////////////////// 117 void chdev_print( chdev_t * chdev ) 118 { 119 printk("\n - func = %s" 120 "\n - channel = %d" 121 "\n - base = %l" 112 ///////////////////////////////////// 113 void chdev_display( xptr_t chdev_xp ) 114 { 115 chdev_t * chdev = GET_PTR( chdev_xp ); 116 cxy_t cxy = GET_CXY( chdev_xp ); 117 118 char name[16]; 119 120 hal_remote_memcpy( XPTR( local_cxy, name ), 121 XPTR( cxy , &chdev->name ), 16 ); 122 123 printk("\n - chdev = [%x,%x]" 124 "\n - name = %s" 125 "\n - base = [%x,%x]" 122 126 "\n - cmd = %x" 123 "\n - isr = %x" 124 "\n - chdev = %x\n", 125 chdev_func_str(chdev->func), 126 chdev->channel, 127 chdev->base, 128 chdev->cmd, 129 chdev->isr, 130 chdev ); 131 } 127 "\n - isr = %x\n", 128 cxy, 129 chdev, 130 name, 131 GET_CXY( hal_remote_l64( XPTR( cxy , &chdev->base ))), 132 GET_PTR( hal_remote_l64( XPTR( cxy , &chdev->base ))), 133 hal_remote_lpt( XPTR( cxy , &chdev->cmd )), 134 hal_remote_lpt( XPTR( cxy , &chdev->isr )) ); 135 136 } // end chdev_display() 132 137 133 138 ////////////////////////////////////////////////// … … 450 455 chdev_t * chdev_ptr; 451 456 452 453 "file_xp == XPTR_NULL\n" );457 assert( __FUNCTION__, (file_xp != XPTR_NULL) , 458 "file_xp == XPTR_NULL" ); 454 459 455 460 // get cluster and local pointer on remote file descriptor … … 462 467 inode_ptr = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 463 468 464 465 "inode type %d is not FILE_TYPE_DEV\n", inode_type );469 assert( __FUNCTION__, (inode_type == FILE_TYPE_DEV) , 470 "inode type %d is not FILE_TYPE_DEV", inode_type ); 466 471 467 472 // get chdev local pointer from inode extension -
trunk/kernel/kern/chdev.h
r669 r683 121 121 * . This busylock is also used to protect direct access to the shared 122 122 * kernel TXT0 terminal, that does not use the waiting queue. 123 * . For most d chdevs, the client waiting queue is an xlist of threads, but it is123 * . For most chdevs, the client waiting queue is a list of threads, but it is 124 124 * a list of sockets for the NIC chdevs. It is unused for ICU, PIC, and IOB. 125 125 *****************************************************************************************/ … … 190 190 191 191 /**************************************************************************************** 192 * This function display relevant values for a chdev descriptor.193 ****************************************************************************************194 * @ chdev : pointer on chdev.195 ***************************************************************************************/196 void chdev_print( chdev_t * chdev );197 198 /****************************************************************************************199 192 * This function returns a printable string for a device functionnal types. 200 193 **************************************************************************************** … … 223 216 224 217 /**************************************************************************************** 225 * This generi dfunction is executed by an user thread requesting an IOC or TXT chdev218 * This generic function is executed by an user thread requesting an IOC or TXT chdev 226 219 * service. It registers the calling thread in the waiting queue of a the remote 227 220 * chdev descriptor identified by the <chdev_xp> argument. … … 282 275 283 276 /**************************************************************************************** 277 * This function display relevant values for a remote chdev descriptor. 278 **************************************************************************************** 279 * @ chdev_xp : pointer on chdev. 280 ***************************************************************************************/ 281 void chdev_display( xptr_t chdev_xp ); 282 283 /**************************************************************************************** 284 284 * This function displays the local copy of the external chdevs directory. 285 285 * (global variable replicated in all clusters) -
trunk/kernel/kern/cluster.c
r669 r683 2 2 * cluster.c - Cluster-Manager related operations 3 3 * 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012)4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019,2020)6 * Alain Greiner (2016,2017,2018,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 74 74 cluster->y_size = info->y_size; 75 75 cluster->io_cxy = info->io_cxy; 76 cluster->sys_clk = info->sys_clk; 76 77 77 78 // initialize the cluster_info[][] array … … 177 178 printk("\n[%s] PPM initialized in cluster %x / cycle %d\n", 178 179 __FUNCTION__ , local_cxy , cycle ); 179 #endif180 181 // initialises embedded KHM182 khm_init( &cluster->khm );183 184 #if( DEBUG_CLUSTER_INIT & 1 )185 cycle = (uint32_t)hal_get_cycles();186 if( DEBUG_CLUSTER_INIT < cycle )187 printk("\n[%s] KHM initialized in cluster %x at cycle %d\n",188 __FUNCTION__ , local_cxy , hal_get_cycles() );189 180 #endif 190 181 -
trunk/kernel/kern/cluster.h
r657 r683 2 2 * cluster.h - Cluster-Manager definition 3 3 * 4 * authors Ghassan Almaless (2008,2009,2010,2011,2012)4 * authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019,2019,2020)6 * Alain Greiner (2016,2017,2018,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 39 39 #include <ppm.h> 40 40 #include <kcm.h> 41 #include <khm.h>42 41 #include <rpc.h> 43 42 #include <core.h> … … 105 104 uint32_t x_size; /*! number of clusters in a row (can be 1) */ 106 105 uint32_t y_size; /*! number of clusters in a column (can be 1) */ 107 cxy_t io_cxy; /*! io cluster identifier */ 106 uint32_t io_cxy; /*! io cluster identifier */ 107 uint32_t sys_clk; /*! system_clock frequency (in Hertz) */ 108 108 uint32_t dqdt_root_level; /*! index of root node in dqdt_tbl[] */ 109 109 uint32_t nb_txt_channels; /*! number of TXT channels */ … … 124 124 list_entry_t dev_root; /*! root of list of devices in cluster */ 125 125 126 // memory allocators 127 ppm_t ppm; /*! embedded kernel page manager */ 128 khm_t khm; /*! embedded kernel heap manager */ 129 kcm_t kcm[6]; /*! embedded kernel cache managers [6:11] */ 126 // physical memory allocators: one PPM and severa KCM 127 ppm_t ppm; 128 kcm_t kcm[CONFIG_PPM_PAGE_ORDER - CONFIG_CACHE_LINE_ORDER]; 130 129 131 130 // RPC -
trunk/kernel/kern/core.c
r669 r683 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018)5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 47 47 core->ticks_nr = 0; 48 48 core->usage = 0; 49 core->spurious_irqs = 0;50 49 core->fpu_owner = NULL; 51 50 core->rand_last = hal_time_stamp() & 0xFFF; … … 55 54 56 55 // initialise the alarms lock 57 busylock_init( &core->alarms_lock, LOCK_CORE_ALARMS );56 remote_busylock_init( XPTR( local_cxy , &core->alarms_lock ) , LOCK_CORE_ALARMS ); 58 57 59 58 // initialise the alarms list … … 61 60 } 62 61 63 /////////////////////////////////////// 64 void core_check_alarms( core_t * core ) 62 //////////////////////////////////////////////////////////////////////////////////// 63 // This static function checks the alarms registered in the core, and calls the 64 // relevant alarm handler for all alarms whose time is elapded. 65 // It does not take the lock protecting the alarm list, because it access only 66 // the first alarm in the list, and all modifications in he list are done 67 // the low level access functions called by the handler(s). 68 //////////////////////////////////////////////////////////////////////////////////// 69 static void core_check_alarms( core_t * core ) 65 70 { 66 71 alarm_handler_t * handler; … … 72 77 if( list_is_empty( root ) ) return; 73 78 74 // get pointer on first alarm when list non empty 75 alarm_t * alarm = LIST_FIRST( root , alarm_t , list ); 76 77 // get first alarm date 78 cycle_t alarm_date = alarm->date; 79 80 // get current date 81 cycle_t current_date = hal_get_cycles(); 82 83 if( current_date >= alarm_date ) 79 while( 1 ) 84 80 { 85 // get pointer on registered alarm handler 86 handler = (alarm_handler_t *)alarm->func_ptr; 87 88 // call alarm handler 89 handler( alarm->args_xp ); 81 // get pointer on first alarm 82 alarm_t * alarm = LIST_FIRST( root , alarm_t , list ); 83 84 // get first alarm date 85 cycle_t alarm_date = alarm->date; 86 87 // get current date 88 cycle_t current_date = hal_get_cycles(); 89 90 // call handler if delay elapsed, and retry 91 if( current_date >= alarm_date ) 92 { 93 // get pointer on registered alarm handler 94 handler = (alarm_handler_t *)alarm->func_ptr; 95 96 // call alarm handler 97 handler( alarm->args_xp ); 98 } 99 else // exit loop when first alarm delay not elapsed 100 { 101 break; 102 } 90 103 } 91 104 } // end core_check_alarms() … … 127 140 uint32_t * tm_us ) 128 141 { 129 *tm_s = (core->ticks_nr*CONFIG_SCHED_TICK_MS_PERIOD)/1000; 130 *tm_us = (core->ticks_nr*CONFIG_SCHED_TICK_MS_PERIOD*1000)%1000000; 142 // get number of cycles 143 uint64_t cycles = core->cycles; 144 145 // get number of cycles per second 146 uint32_t cycles_per_second = LOCAL_CLUSTER->sys_clk; 147 148 *tm_s = cycles / cycles_per_second; 149 *tm_us = (cycles * 1000000) % cycles_per_second; 131 150 } 132 151 … … 139 158 ticks = core->ticks_nr++; 140 159 160 // handle alarms 161 core_check_alarms( core ); 162 141 163 // handle scheduler 142 164 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK"); 143 144 // handle alarms145 core_check_alarms( core );146 165 } 147 166 -
trunk/kernel/kern/core.h
r669 r683 39 39 40 40 /**************************************************************************************** 41 * This structure defines the core descriptor. 42 * Besides the core identifiers (gid,lid), it contains an embedded private scheduler. 41 * This structure defines a core descriptor. 42 * Besides the core identifiers (gid,lid), it contains an embedded private scheduler 43 * and a software cycles counter on 64 bits. 44 * It contains also the root of local list of alarms, dynamically registered by the 45 * threads running on this core. This local list is protected by a remote_busylock, 46 * because it can be accessed by any thread, running in any cluster, and using the 47 * access functions defined in the <alarm.c> & <alarm.h> files. 43 48 * It contains an architecture specific extension to store the interrupt vector(s). 44 49 * The core_init()function must allocate memory for this extension, depending on the … … 51 56 gid_t gid; /*! core global identifier (hardware index) */ 52 57 58 scheduler_t scheduler; /*! embedded private scheduler */ 59 53 60 uint64_t cycles; /*! total number of cycles (from hard reset) */ 54 61 uint32_t time_stamp; /*! previous time stamp (read from register) */ 55 62 56 63 list_entry_t alarms_root; /*! root of list of attached alarms */ 57 busylock_talarms_lock; /*! lock protecting the list of alarms */64 remote_busylock_t alarms_lock; /*! lock protecting the list of alarms */ 58 65 59 66 uint32_t ticks_nr; /*! number of elapsed ticks */ 60 67 uint32_t usage; /*! cumulated busy_percent (idle / total) */ 61 uint32_t spurious_irqs; /*! for instrumentation... */62 68 struct thread_s * fpu_owner; /*! pointer on current FPU owner thread */ 63 69 uint32_t rand_last; /*! last computed random value */ 64 65 scheduler_t scheduler; /*! embedded private scheduler */66 70 67 71 void * pic_extend; /*! PIC implementation specific extension */ -
trunk/kernel/kern/do_syscall.c
r669 r683 2 2 * do_syscall.c - architecture independant entry-point for system calls. 3 3 * 4 * Author Alain Greiner (2016,2017,2018, 2019)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 43 43 /////////////////////////////////////////////////////////////////////////////////////// 44 44 // This array of pointers define the kernel functions implementing the syscalls. 45 // It must be kept consistent with the enum in "syscalls_numbers.h" file. 45 // It must be kept consistent with the enum in <syscalls_numbers.h> file, 46 // and with the SYS_OBJs defined in the kernel <Makefile>. 46 47 /////////////////////////////////////////////////////////////////////////////////////// 47 48 … … 94 95 sys_wait, // 39 95 96 96 sys_get _config,// 4097 sys_ get_core_id,// 4198 sys_ get_cycle,// 4299 sys_ display,// 43100 sys_ place_fork,// 44101 sys_t hread_sleep,// 45102 sys_ thread_wakeup,// 46103 sys_ trace, // 47104 sys_f g,// 48105 sys_ is_fg, // 4997 sys_get, // 40 98 sys_display, // 41 99 sys_place_fork, // 42 100 sys_thread_sleep, // 43 101 sys_thread_wakeup, // 44 102 sys_trace, // 45 103 sys_fg, // 46 104 sys_is_fg, // 47 105 sys_fbf, // 48 106 sys_undefined, // 49 // 106 107 107 108 sys_exit, // 50 108 109 sys_sync, // 51 109 110 sys_fsync, // 52 110 sys_get_best_core, // 53 111 sys_get_nb_cores, // 54 112 sys_get_thread_info, // 55 113 sys_fbf, // 56 114 sys_socket, // 57 111 sys_socket, // 53 115 112 }; 116 113 … … 164 161 case SYS_WAIT: return "WAIT"; // 39 165 162 166 case SYS_GET_CONFIG: return "GET_CONFIG"; // 40 167 case SYS_GET_CORE_ID: return "GET_CORE_ID"; // 41 168 case SYS_GET_CYCLE: return "GET_CYCLE"; // 42 169 case SYS_DISPLAY: return "DISPLAY"; // 43 170 case SYS_PLACE_FORK: return "PLACE_FORK"; // 44 171 case SYS_THREAD_SLEEP: return "THREAD_SLEEP"; // 45 172 case SYS_THREAD_WAKEUP: return "THREAD_WAKEUP"; // 46 173 case SYS_TRACE: return "TRACE"; // 47 174 case SYS_FG: return "FG"; // 48 175 case SYS_IS_FG: return "IS_FG"; // 49 163 case SYS_GET: return "GET"; // 40 164 case SYS_DISPLAY: return "DISPLAY"; // 41 165 case SYS_PLACE_FORK: return "PLACE_FORK"; // 42 166 case SYS_THREAD_SLEEP: return "THREAD_SLEEP"; // 43 167 case SYS_THREAD_WAKEUP: return "THREAD_WAKEUP"; // 44 168 case SYS_TRACE: return "TRACE"; // 45 169 case SYS_FG: return "FG"; // 46 170 case SYS_IS_FG: return "IS_FG"; // 47 171 case SYS_FBF: return "FBF"; // 48 176 172 177 173 case SYS_EXIT: return "EXIT"; // 50 178 174 case SYS_SYNC: return "SYNC"; // 51 179 175 case SYS_FSYNC: return "FSYNC"; // 52 180 case SYS_GET_BEST_CORE: return "GET_BEST_CORE"; // 53 181 case SYS_GET_NB_CORES: return "GET_NB_CORES"; // 54 182 case SYS_GET_THREAD_INFO: return "GET_THREAD_INFO"; // 55 183 case SYS_FBF: return "FBF"; // 56 184 case SYS_SOCKET: return "SOCKET"; // 57 176 case SYS_SOCKET: return "SOCKET"; // 53 185 177 186 178 default: return "undefined"; -
trunk/kernel/kern/kernel_init.c
r669 r683 3 3 * 4 4 * Authors : Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018,2019,2020)5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) Sorbonne Universites … … 46 46 #include <memcpy.h> 47 47 #include <ppm.h> 48 #include <kcm.h> 48 49 #include <page.h> 49 50 #include <chdev.h> … … 379 380 if( func == DEV_FUNC_MMC ) 380 381 { 381 382 // check channels383 if( channels != 1 )384 {385 printk("\n[PANIC] in %s : MMC device must be single channel\n",386 __FUNCTION__ );387 hal_core_sleep();388 }389 390 382 // create chdev in local cluster 391 383 chdev_ptr = chdev_create( func, … … 394 386 false, // direction 395 387 base ); 396 397 // check memory398 388 if( chdev_ptr == NULL ) 399 389 { … … 403 393 } 404 394 395 #if (DEBUG_KERNEL_INIT & 0x1) 396 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 397 printk("\n[%s] created chdev[%x,%x] for MMC\n", 398 __FUNCTION__ , local_cxy , chdev_ptr ); 399 #endif 405 400 // make MMC specific initialisation 406 401 dev_mmc_init( chdev_ptr ); … … 423 418 #if( DEBUG_KERNEL_INIT & 0x1 ) 424 419 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 425 printk("\n[%s] : created MMC in cluster %x / chdev = %x\n",420 printk("\n[%s] initialised chdev[%x,%x] for MMC\n", 426 421 __FUNCTION__ , local_cxy , chdev_ptr ); 427 422 #endif … … 439 434 false, // direction 440 435 base ); 441 442 // check memory443 436 if( chdev_ptr == NULL ) 444 437 { … … 448 441 } 449 442 443 #if (DEBUG_KERNEL_INIT & 0x1) 444 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 445 printk("\n[%s] cxy %x : created chdev[%x,%x] for DMA[%d]\n", 446 __FUNCTION__ , local_cxy , chdev_ptr , channel ); 447 #endif 450 448 // make DMA specific initialisation 451 449 dev_dma_init( chdev_ptr ); … … 457 455 #if( DEBUG_KERNEL_INIT & 0x1 ) 458 456 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 459 printk("\n[%s] : created DMA[%d] in cluster %x / chdev = %x\n",460 __FUNCTION__ , channel , local_cxy , chdev_ptr);457 printk("\n[%s] initialised chdev[%x,%x] for DMA[%d]\n", 458 __FUNCTION__ , local_cxy , chdev_ptr , channel ); 461 459 #endif 462 460 } … … 471 469 // These chdev descriptors are distributed on all clusters, using a modulo on a global 472 470 // index, identically computed in all clusters. 473 // This function is executed in all clusters by the core[0] core, that computes a global index474 // for all external chdevs. Each core[0] core creates only the chdevs that must be placed in475 // the local cluster, because the global index matches the local index.471 // This function is executed in all clusters by the core[0], that computes a global index 472 // for all external chdevs. Each core[0] core creates only the chdevs that must be placed 473 // in the local cluster, because the global index matches the local index. 476 474 // The relevant entries in all copies of the devices directory are initialised. 477 475 /////////////////////////////////////////////////////////////////////////////////////////// … … 499 497 dev_tbl = info->ext_dev; 500 498 501 // initializes global index (PIC is already placed in cluster 0 499 // initializes global index (PIC is already placed in cluster 0) 502 500 ext_chdev_gid = 1; 503 501 … … 529 527 530 528 // check external device functionnal type 531 if( (func != DEV_FUNC_IOB) && (func != DEV_FUNC_IOC) && (func != DEV_FUNC_TXT) && 532 (func != DEV_FUNC_NIC) && (func != DEV_FUNC_FBF) ) 529 if( (func != DEV_FUNC_IOB) && 530 (func != DEV_FUNC_IOC) && 531 (func != DEV_FUNC_TXT) && 532 (func != DEV_FUNC_NIC) && 533 (func != DEV_FUNC_FBF) ) 533 534 { 534 535 printk("\n[PANIC] in %s : undefined peripheral type\n", … … 537 538 } 538 539 539 // loop son channels540 // loop on channels 540 541 for( channel = 0 ; channel < channels ; channel++ ) 541 542 { … … 547 548 548 549 // all kernel instances compute the target cluster for all chdevs, 549 // computingthe global index ext_chdev_gid[func,channel,direction]550 // and the global index ext_chdev_gid[func,channel,direction] 550 551 cxy_t target_cxy; 551 552 while( 1 ) … … 568 569 if( target_cxy == local_cxy ) 569 570 { 570 571 #if( DEBUG_KERNEL_INIT & 0x3 )572 if( hal_time_stamp() > DEBUG_KERNEL_INIT )573 printk("\n[%s] : found chdev %s / channel = %d / rx = %d / cluster %x\n",574 __FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy );575 #endif576 571 chdev = chdev_create( func, 577 572 impl, … … 587 582 } 588 583 584 #if (DEBUG_KERNEL_INIT & 0x1) 585 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 586 printk("\n[%s] created chdev[%x,%x] for %s[%d] / is_rx %d\n", 587 __FUNCTION__ , local_cxy , chdev , chdev_func_str(func) , channel , rx ); 588 #endif 589 589 // make device type specific initialisation 590 590 if ( func == DEV_FUNC_IOB ) dev_iob_init( chdev ); … … 621 621 } 622 622 623 #if( DEBUG_KERNEL_INIT & 0x3)623 #if( DEBUG_KERNEL_INIT & 1 ) 624 624 if( hal_time_stamp() > DEBUG_KERNEL_INIT ) 625 printk("\n[%s] : created chdev %s / channel = %d / rx = %d / cluster %x / chdev = %x\n", 626 __FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy , chdev ); 627 #endif 625 printk("\n[%s] initialised chdev[%x,%x] for %s\n", 626 __FUNCTION__ , local_cxy, chdev , chdev->name ); 627 #endif 628 628 629 } // end if match 629 630 … … 637 638 638 639 /////////////////////////////////////////////////////////////////////////////////////////// 639 // This function is called by core[0] in cluster 0to allocate memory and initialize the PIC640 // This function is called by core[0][0] to allocate memory and initialize the PIC 640 641 // device, namely the informations attached to the external IOPIC controller, that 641 642 // must be replicated in all clusters (struct iopic_input). … … 1102 1103 // and allocates memory for the corresponding chdev descriptors. 1103 1104 if( core_lid == 0 ) internal_devices_init( info ); 1104 1105 1105 1106 1106 // All core[0]s contribute to initialise external peripheral chdev descriptors. … … 1494 1494 " - core descriptor : %d bytes\n" 1495 1495 " - scheduler : %d bytes\n" 1496 " - socket 1496 " - socket descriptor : %d bytes\n" 1497 1497 " - rpc fifo : %d bytes\n" 1498 1498 " - page descriptor : %d bytes\n" … … 1501 1501 " - ppm manager : %d bytes\n" 1502 1502 " - kcm manager : %d bytes\n" 1503 " - khm manager : %d bytes\n"1504 1503 " - vmm manager : %d bytes\n" 1505 1504 " - vfs inode : %d bytes\n" … … 1529 1528 sizeof( ppm_t ), 1530 1529 sizeof( kcm_t ), 1531 sizeof( khm_t ),1532 1530 sizeof( vmm_t ), 1533 1531 sizeof( vfs_inode_t ), … … 1546 1544 #endif 1547 1545 1546 // number of cycles per TICK (depends on the actual system clock frequency 1547 uint32_t cycles_per_tick = cluster->sys_clk / CONFIG_SCHED_TICKS_PER_SECOND; 1548 1548 1549 // each core activates its private TICK IRQ 1549 dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD);1550 dev_pic_enable_timer( cycles_per_tick ); 1550 1551 1551 1552 ///////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/kern/ksocket.c
r669 r683 1 1 /* 2 * ksocket.c - kernel socket APIimplementation.2 * ksocket.c - kernel socket implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019,2020)4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 117 117 switch( sts ) 118 118 { 119 case CMD_STS_SUCCESS : return " TX_CONNECT";119 case CMD_STS_SUCCESS : return "SUCCESS"; 120 120 case CMD_STS_EOF : return "EOF"; 121 121 case CMD_STS_RST : return "RST"; … … 135 135 // and request the NIC_TX server thread to re-send the unacknowledged segment. 136 136 /////////////////////////////////////////////////////////////////////////////////////////// 137 // @ args_xp : extended pointer on the involved socket.137 // @ sock_xp : extended pointer on the involved socket. 138 138 /////////////////////////////////////////////////////////////////////////////////////////// 139 static void __attribute__((noinline)) socket_alarm_handler( xptr_t args_xp )139 static void __attribute__((noinline)) socket_alarm_handler( xptr_t sock_xp ) 140 140 { 141 141 // get cluster and local pointer on socket descriptor 142 socket_t * sock_ptr = GET_PTR( args_xp ); 143 cxy_t sock_cxy = GET_CXY( args_xp ); 142 socket_t * sock_ptr = GET_PTR( sock_xp ); 143 cxy_t sock_cxy = GET_CXY( sock_xp ); 144 145 #if DEBUG_SOCKET_ALARM 146 uint32_t cycle = (uint32_t)hal_get_cycles(); 147 #endif 148 149 // build extended pointer on lock protecting socket 150 xptr_t socket_lock_xp = XPTR( sock_cxy , &sock_ptr->lock ); 151 152 // take the socket lock 153 remote_queuelock_acquire( socket_lock_xp ); 144 154 145 155 // get relevant infos from socket descriptor … … 151 161 "illegal tx_client field for a retransmission timeout" ); 152 162 153 // get TX client thread cluster and local pointer 154 thread_t * thread_ptr = GET_PTR( thread_xp ); 163 // get TX client thread cluster 155 164 cxy_t thread_cxy = GET_CXY( thread_xp ); 156 165 … … 168 177 169 178 // update the date in alarm 170 alarm_update( thread_ ptr , hal_get_cycles() + TCP_RETRANSMISSION_TIMEOUT );179 alarm_update( thread_xp , hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT ); 171 180 172 181 ////////////////////////////// … … 175 184 176 185 #if DEBUG_SOCKET_ALARM 177 uint32_t cycle = (uint32_t)hal_get_cycles(); 178 printk("\n[%s] rings for TX_CONNECT : request a new SYN segment / cycle %d\n",186 if( DEBUG_SOCKET_ALARM < cycle ) 187 printk("\n[%s] rings for CONNECT : request a new SYN segment / cycle %d\n", 179 188 __FUNCTION__ , cycle ); 180 189 #endif … … 193 202 194 203 #if DEBUG_SOCKET_ALARM 195 uint32_t cycle = (uint32_t)hal_get_cycles(); 196 printk("\n[%s] rings for TX_ACCEPT : request a new SYN-ACK segment / cycle %d\n",204 if( DEBUG_SOCKET_ALARM < cycle ) 205 printk("\n[%s] rings for ACCEPT : request a new SYN-ACK segment / cycle %d\n", 197 206 __FUNCTION__ , cycle ); 198 207 #endif … … 211 220 212 221 #if DEBUG_SOCKET_ALARM 213 uint32_t cycle = (uint32_t)hal_get_cycles(); 214 printk("\n[%s] rings for TX_CLOSE : request a new FIN-ACK segment / cycle %d\n",222 if( DEBUG_SOCKET_ALARM < cycle ) 223 printk("\n[%s] rings for CLOSE : request a new FIN-ACK segment / cycle %d\n", 215 224 __FUNCTION__ , cycle ); 216 225 #endif … … 227 236 if( tx_cmd == CMD_TX_SEND ) 228 237 { 229 // TODO build a new TX_SEND command 230 } 238 // get get relevant infos from socket pointer 239 uint32_t tx_una = hal_remote_l32( XPTR( sock_cxy , &sock_ptr->tx_una )); 240 uint32_t tx_ack = hal_remote_l32( XPTR( sock_cxy , &sock_ptr->tx_ack )); 241 uint32_t tx_len = hal_remote_l32( XPTR( sock_cxy , &sock_ptr->tx_len )); 242 243 #if DEBUG_SOCKET_ALARM 244 if( DEBUG_SOCKET_ALARM < cycle ) 245 printk("\n[%s] rings for SEND : request %d bytes / cycle %d\n", 246 __FUNCTION__ , tx_len , cycle ); 247 #endif 248 // update command fields in socket 249 hal_remote_s32( XPTR( sock_cxy , &sock_ptr->tx_nxt ) , tx_una ); 250 hal_remote_s32( XPTR( sock_cxy , &sock_ptr->tx_todo ) , tx_len - tx_ack ); 251 hal_remote_s32( XPTR( sock_cxy , &sock_ptr->tx_valid ) , true ); 252 253 // unblock the NIC_TX server thread 254 thread_unblock( tx_server_xp , THREAD_BLOCKED_CLIENT ); 255 } 256 257 // release the socket lock 258 remote_queuelock_release( socket_lock_xp ); 259 231 260 } // end socket_alarm_handler() 232 233 ///////////////////////////////////////////////////////////////////////////////////////////234 // This static function activates the alarm embedded in the calling thread descriptor,235 // using the <date> argument.236 ///////////////////////////////////////////////////////////////////////////////////////////237 // @ delay : number of cycles (from the current absolute date).238 ///////////////////////////////////////////////////////////////////////////////////////////239 static void socket_alarm_start( xptr_t socket_xp,240 uint32_t delay )241 {242 // compute absolute date243 cycle_t date = hal_get_cycles() + delay;244 245 // get pointer on calling threadf246 thread_t * this = CURRENT_THREAD;247 248 // start the alarm249 alarm_start( date,250 &socket_alarm_handler, // func_ptr251 socket_xp, // args_xp252 this );253 }254 255 ///////////////////////////////////////////////////////////////////////////////////////////256 // This static function activates the alarm embedded in the calling thread descriptor,257 // using the <date> argument.258 ///////////////////////////////////////////////////////////////////////////////////////////259 // @ date : absolute date for this alarm.260 ///////////////////////////////////////////////////////////////////////////////////////////261 static void socket_alarm_stop( void )262 {263 // get pointer on calling threadf264 thread_t * this = CURRENT_THREAD;265 266 // stop the alarm267 alarm_stop( this );268 }269 261 270 262 ///////////////////////////////////////////////////////////////////////////////////////// … … 470 462 // associated to a socket: file descriptor, socket descriptor, RX buffer, R2T queue, 471 463 // and CRQ queue. It allocates an fdid, and register it in the process fd_array. 472 // It initialise the thesocket desccriptor static fields, other than local_addr,464 // It initialise the socket desccriptor static fields, other than local_addr, 473 465 // local_port, remote_addr, remote_port), and set the socket state to UNBOUND. 474 466 // It returns the local pointer on socket descriptor and the fdid value in buffers … … 489 481 { 490 482 uint32_t fdid; 491 kmem_req_t req;492 483 socket_t * socket; 493 484 vfs_file_t * file; 494 485 uint32_t state; 486 void * tx_buf; 495 487 error_t error; 488 496 489 497 490 thread_t * this = CURRENT_THREAD; 498 491 process_t * process = this->process; 499 492 493 #if DEBUG_SOCKET_CREATE || DEBUG_SOCKET_ERROR 494 uint32_t cycle = (uint32_t)hal_get_cycles(); 495 #endif 496 500 497 #if DEBUG_SOCKET_CREATE 501 uint32_t cycle = (uint32_t)hal_get_cycles();502 498 if( DEBUG_SOCKET_CREATE < cycle ) 503 499 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", … … 506 502 507 503 // 1. allocate memory for socket descriptor 508 req.type = KMEM_KCM; 509 req.order = bits_log2( sizeof(socket_t) ); 510 req.flags = AF_ZERO; 511 socket = kmem_remote_alloc( cxy , &req ); 504 socket = kmem_remote_alloc( cxy , bits_log2(sizeof(socket_t)) , AF_ZERO ); 512 505 513 506 if( socket == NULL ) 514 507 { 515 printk("\n[ERROR] in %s : cannot allocate socket descriptor / thread[%x,%x]\n", 516 __FUNCTION__, process->pid, this->trdid ); 517 return -1; 518 } 519 520 // 2. allocate memory for rx_buf buffer 508 509 #if DEBUG_SOCKET_ERROR 510 printk("\n[ERROR] in %s : cannot allocate socket descriptor / thread[%x,%x] / cycle %d\n", 511 __FUNCTION__, process->pid, this->trdid, cycle ); 512 #endif 513 return -1; 514 } 515 516 // 2. allocate memory for rx_buf data buffer 521 517 error = remote_buf_init( XPTR( cxy , &socket->rx_buf ), 522 bits_log2( CONFIG_SOCK_RX_BUF_SIZE ));518 CONFIG_SOCK_RX_BUF_ORDER ); 523 519 524 520 if( error ) 525 521 { 526 printk("\n[ERROR] in %s : cannot allocate rx_buf / thread[%x,%x]\n", 527 __FUNCTION__, process->pid, this->trdid ); 528 req.type = KMEM_KCM; 529 req.ptr = socket; 530 kmem_remote_free( cxy , &req ); 531 return -1; 532 } 533 534 // 3. allocate memory for r2tq queue 522 523 #if DEBUG_SOCKET_ERROR 524 printk("\n[ERROR] in %s : no memory for rx_buf / thread[%x,%x] / cycle %d\n", 525 __FUNCTION__, process->pid, this->trdid, cycle ); 526 #endif 527 kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) ); // 1 528 return -1; 529 } 530 531 // 3. allocate memory for tx_buf 532 tx_buf = kmem_remote_alloc( cxy , CONFIG_SOCK_TX_BUF_ORDER , AF_NONE ); 533 534 if( tx_buf == NULL ) 535 { 536 537 #if DEBUG_SOCKET_ERROR 538 printk("\n[ERROR] in %s : no memory for tx_buf / thread[%x,%x] / cycle %d\n", 539 __FUNCTION__, process->pid, this->trdid, cycle ); 540 #endif 541 remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) ); // 2 542 kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) ); // 1 543 return -1; 544 } 545 546 // 4. allocate memory for r2tq queue 535 547 error = remote_buf_init( XPTR( cxy , &socket->r2tq ), 536 548 bits_log2( CONFIG_SOCK_R2T_BUF_SIZE ) ); 537 549 if( error ) 538 550 { 539 printk("\n[ERROR] in %s : cannot allocate R2T queue / thread[%x,%x]\n", 540 __FUNCTION__, process->pid, this->trdid ); 541 remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) ); 542 req.type = KMEM_KCM; 543 req.ptr = socket; 544 kmem_remote_free( cxy , &req ); 545 return -1; 546 } 547 548 // don't allocate memory for crqq queue, as it is done by the socket_listen function 549 550 // 4. allocate memory for file descriptor 551 req.type = KMEM_KCM; 552 req.order = bits_log2( sizeof(vfs_file_t) ); 553 req.flags = AF_ZERO; 554 file = kmem_remote_alloc( cxy , &req ); 551 552 #if DEBUG_SOCKET_ERROR 553 printk("\n[ERROR] in %s : cannot allocate R2T queue / thread[%x,%x] / cycle %d\n", 554 __FUNCTION__, process->pid, this->trdid, cycle ); 555 #endif 556 kmem_remote_free( cxy , tx_buf , CONFIG_SOCK_TX_BUF_ORDER ); // 3 557 remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) ); // 2 558 kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) ); // 1 559 return -1; 560 } 561 562 // don't allocate memory for CRQ queue / done by the socket_listen function 563 564 // 5. allocate memory for file descriptor 565 file = kmem_remote_alloc( cxy , bits_log2(sizeof(vfs_file_t)) , AF_ZERO ); 555 566 556 567 if( file == NULL ) 557 568 { 558 printk("\n[ERROR] in %s : cannot allocate file descriptor / thread[%x,%x]\n", 559 __FUNCTION__, process->pid, this->trdid ); 560 remote_buf_release_data( XPTR( cxy , &socket->r2tq ) ); 561 remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) ); 562 req.type = KMEM_KCM; 563 req.ptr = socket; 564 kmem_remote_free( cxy , &req ); 569 570 #if DEBUG_SOCKET_ERROR 571 printk("\n[ERROR] in %s : cannot allocate file descriptor / thread[%x,%x] / cycle %d\n", 572 __FUNCTION__, process->pid, this->trdid, cycle ); 573 #endif 574 remote_buf_release_data( XPTR( cxy , &socket->r2tq ) ); // 4 575 kmem_remote_free( cxy , tx_buf , CONFIG_SOCK_TX_BUF_ORDER ); // 3 576 remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) ); // 2 577 kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) ); // 1 565 578 return -1; 566 579 } 567 580 568 // 5. get an fdid value, and register file descriptor in fd_array[]581 // 6. get an fdid value, and register file descriptor in fd_array[] 569 582 error = process_fd_register( process->ref_xp, 570 583 XPTR( cxy , file ), … … 572 585 if ( error ) 573 586 { 574 printk("\n[ERROR] in %s : cannot register file descriptor / thread[%x,%x]\n", 575 __FUNCTION__, process->pid, this->trdid ); 576 req.type = KMEM_KCM; 577 req.ptr = file; 578 kmem_free( &req ); 579 remote_buf_release_data( XPTR( cxy , &socket->r2tq ) ); 580 remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) ); 581 req.ptr = socket; 582 kmem_free( &req ); 587 588 #if DEBUG_SOCKET_ERROR 589 if( DEBUG_SOCKET_ERROR < cycle ) 590 printk("\n[ERROR] in %s : cannot register file descriptor / thread[%x,%x] / cycle %d\n", 591 __FUNCTION__, process->pid, this->trdid, cycle ); 592 #endif 593 kmem_remote_free( cxy , file , bits_log2(sizeof(vfs_file_t)) ); // 5 594 remote_buf_release_data( XPTR( cxy , &socket->r2tq ) ); // 4 595 kmem_remote_free( cxy , tx_buf , CONFIG_SOCK_TX_BUF_ORDER ); // 3 596 remote_buf_release_data( XPTR( cxy , &socket->rx_buf ) ); // 2 597 kmem_remote_free( cxy , socket , bits_log2(sizeof(socket_t)) ); // 1 583 598 return -1; 584 599 } … … 597 612 hal_remote_s32( XPTR( cxy , &socket->rx_valid ) , false ); 598 613 hal_remote_s32( XPTR( cxy , &socket->nic_channel ) , 0 ); 614 hal_remote_spt( XPTR( cxy , &socket->tx_buf ) , tx_buf ); 599 615 600 616 // initialize file descriptor … … 606 622 607 623 #if DEBUG_SOCKET_CREATE 624 cycle = (uint32_t)hal_get_cycles(); 608 625 if( DEBUG_SOCKET_CREATE < cycle ) 609 626 printk("\n[%s] thread[%x,%x] exit / socket[%x,%d] / xptr[%x,%x] / cycle %d\n", … … 631 648 static void socket_destroy( xptr_t file_xp ) 632 649 { 633 kmem_req_t req;634 635 650 thread_t * this = CURRENT_THREAD; 636 651 process_t * process = this->process; … … 677 692 678 693 // release memory allocated for file descriptor 679 req.type = KMEM_KCM; 680 req.ptr = file_ptr; 681 kmem_remote_free( file_cxy , &req ); 694 kmem_remote_free( file_cxy , file_ptr , bits_log2(sizeof(vfs_file_t)) ); 682 695 683 696 // release memory allocated for buffers attached to socket descriptor … … 687 700 688 701 // release memory allocated for socket descriptor 689 req.type = KMEM_KCM; 690 req.ptr = socket_ptr; 691 kmem_remote_free( file_cxy , &req ); 702 kmem_remote_free( file_cxy , socket_ptr , bits_log2(sizeof(socket_t)) ); 692 703 693 704 #if DEBUG_SOCKET_DESTROY … … 702 713 //////////////////////////////////////////////// 703 714 void socket_put_r2t_request( xptr_t queue_xp, 704 uint 32_tflags,715 uint8_t flags, 705 716 uint32_t channel ) 706 717 { … … 715 726 // try to register R2T request 716 727 error_t error = remote_buf_put_from_kernel( queue_xp, 717 (uint8_t *)(&flags),728 &flags, 718 729 1 ); 719 730 if( error ) … … 740 751 } 741 752 } // end socket_put_r2t_request() 753 754 /////////////////////////////////////////////////// 755 error_t socket_get_r2t_request( xptr_t queue_xp, 756 uint8_t * flags ) 757 { 758 // get one request from R2T queue 759 return remote_buf_get_to_kernel( queue_xp, 760 flags, 761 1 ); 762 } // end socket_get_r2T_request() 742 763 743 764 /////////////////////////////////////////////////// … … 843 864 process_t * process = this->process; 844 865 866 #if DEBUG_SOCKET_BIND || DEBUG_SOCKET_ERROR 867 uint32_t cycle = (uint32_t)hal_get_cycles(); 868 #endif 869 845 870 #if DEBUG_SOCKET_BIND 846 uint32_t cycle = (uint32_t)hal_get_cycles();847 871 if( DEBUG_SOCKET_BIND < cycle ) 848 872 printk("\n[%s] thread[%x,%x] enter / socket[%x,%d] / addr %x / port %x / cycle %d\n", … … 858 882 if( file_xp == XPTR_NULL ) 859 883 { 860 printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x]\n", 861 __FUNCTION__, fdid, process->pid, this->trdid ); 884 885 #if DEBUG_SOCKET_ERROR 886 printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x] / cycle %d\n", 887 __FUNCTION__, fdid, process->pid, this->trdid, cycle ); 888 #endif 862 889 return -1; 863 890 } … … 869 896 if( file_type != FILE_TYPE_SOCK ) 870 897 { 871 printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x]", 872 __FUNCTION__, vfs_inode_type_str( file_type ), process->pid, this->trdid ); 898 899 #if DEBUG_SOCKET_ERROR 900 printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x] / cycle %d", 901 __FUNCTION__, vfs_inode_type_str( file_type ), process->pid, this->trdid, cycle ); 902 #endif 873 903 return -1; 874 904 } … … 918 948 process_t * process = this->process; 919 949 950 #if DEBUG_SOCKET_LISTEN || DEBUG_SOCKET_ERROR 951 uint32_t cycle = (uint32_t)hal_get_cycles(); 952 #endif 953 920 954 #if DEBUG_SOCKET_LISTEN 921 uint32_t cycle = (uint32_t)hal_get_cycles();922 955 if( DEBUG_SOCKET_LISTEN < cycle ) 923 956 printk("\n[%s] thread[%x,%x] enter / socket[%x,%d] / crq_depth %x / cycle %d\n", … … 933 966 if( file_xp == XPTR_NULL ) 934 967 { 935 printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x]\n", 936 __FUNCTION__, fdid, process->pid, this->trdid ); 968 969 #if DEBUG_SOCKET_ERROR 970 printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x] / cycle %d\n", 971 __FUNCTION__, fdid, process->pid, this->trdid, cycle ); 972 #endif 937 973 return -1; 938 974 } … … 944 980 if( file_type != FILE_TYPE_SOCK ) 945 981 { 946 printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x]\n", 947 __FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid ); 982 983 #if DEBUG_SOCKET_ERROR 984 printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x] / cycle %d\n", 985 __FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid, cycle ); 986 #endif 948 987 return -1; 949 988 } … … 958 997 if( socket_type != SOCK_STREAM ) 959 998 { 960 printk("\n[ERROR] in %s : illegal socket type %s / thread[%x,%x]\n", 961 __FUNCTION__, socket_type_str(socket_type), process->pid, this->trdid ); 999 1000 #if DEBUG_SOCKET_ERROR 1001 printk("\n[ERROR] in %s : illegal socket type %s / thread[%x,%x] / cycle %d\n", 1002 __FUNCTION__, socket_type_str(socket_type), process->pid, this->trdid, cycle ); 1003 #endif 962 1004 return -1; 963 1005 } … … 966 1008 if( socket_state != TCP_STATE_BOUND ) 967 1009 { 968 printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x]\n", 969 __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid ); 1010 1011 #if DEBUG_SOCKET_ERROR 1012 printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x] / cycle %d\n", 1013 __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid, cycle ); 1014 #endif 970 1015 return -1; 971 1016 } … … 980 1025 if( error ) 981 1026 { 982 printk("\n[ERROR] in %s : cannot allocate CRQ queue / thread[%x,%x]\n", 983 __FUNCTION__, process->pid, this->trdid ); 1027 1028 #if DEBUG_SOCKET_ERROR 1029 printk("\n[ERROR] in %s : cannot allocate CRQ queue / thread[%x,%x] / cycle %d\n", 1030 __FUNCTION__, process->pid, this->trdid, cycle ); 1031 #endif 984 1032 return -1; 985 1033 } … … 1011 1059 vfs_file_t * file_ptr; 1012 1060 cxy_t file_cxy; 1013 vfs_file_type_t file_type; // file descriptor type1061 vfs_file_type_t file_type; // file descriptor type 1014 1062 socket_t * socket_ptr; // local pointer on remote waiting socket 1015 1063 uint32_t socket_type; // listening socket type … … 1045 1093 process_t * process = this->process; 1046 1094 1047 #if DEBUG_SOCKET_ACCEPT 1095 #if DEBUG_SOCKET_ACCEPT || DEBUG_SOCKET_ERROR 1048 1096 uint32_t cycle = (uint32_t)hal_get_cycles(); 1097 #endif 1098 1099 #if DEBUG_SOCKET_ACCEPT 1049 1100 if( DEBUG_SOCKET_ACCEPT < cycle ) 1050 1101 printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] / cycle %d\n", … … 1060 1111 if( file_xp == XPTR_NULL ) 1061 1112 { 1062 printk("\n[ERROR] in %s : undefined fdid %d", 1063 __FUNCTION__, fdid ); 1113 1114 #if DEBUG_SOCKET_ERROR 1115 printk("\n[ERROR] in %s : undefined fdid %d / thead[%x,%x] / cycle %d", 1116 __FUNCTION__, fdid, process->pid, this->trdid, cycle ); 1117 #endif 1064 1118 return -1; 1065 1119 } … … 1071 1125 if( file_type != FILE_TYPE_SOCK ) 1072 1126 { 1073 printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x]\n", 1074 __FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid ); 1127 1128 #if DEBUG_SOCKET_ERROR 1129 printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x] / cycle %d\n", 1130 __FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid, cycle ); 1131 #endif 1075 1132 return -1; 1076 1133 } … … 1097 1154 if( socket_type != SOCK_STREAM ) 1098 1155 { 1099 // release listening socket lock 1156 1157 #if DEBUG_SOCKET_ERROR 1158 printk("\n[ERROR] in %s : illegal socket type %s / thread[%x,%x] / cycle %d\n", 1159 __FUNCTION__, socket_type_str(socket_type), process->pid , this->trdid, cycle ); 1160 #endif 1100 1161 remote_queuelock_release( socket_lock_xp ); 1101 1102 printk("\n[ERROR] in %s : illegal socket type %s / thread[%x,%x]\n",1103 __FUNCTION__, socket_type_str(socket_type), process->pid , this->trdid );1104 1162 return -1; 1105 1163 } … … 1108 1166 if( socket_state != TCP_STATE_LISTEN ) 1109 1167 { 1110 // release listening socket lock 1168 1169 #if DEBUG_SOCKET_ERROR 1170 printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x] / cycle %d\n", 1171 __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid, cycle ); 1172 #endif 1111 1173 remote_queuelock_release( socket_lock_xp ); 1112 1113 printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x]\n",1114 __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid );1115 1174 return -1; 1116 1175 } … … 1119 1178 if( (socket_rx_valid == true) || (socket_rx_client != XPTR_NULL) ) 1120 1179 { 1121 // release listening socket lock 1180 1181 #if DEBUG_SOCKET_ERROR 1182 printk("\n[ERROR] in %s : previous RX cmd on socket[%x,%d] / thread[%x,%x] / cycle %d\n", 1183 __FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle ); 1184 #endif 1122 1185 remote_queuelock_release( socket_lock_xp ); 1123 1124 printk("\n[ERROR] in %s : previous RX cmd on socket[%x,%d] / thread[%x,%x]\n",1125 __FUNCTION__, process->pid, fdid, process->pid, this->trdid );1126 1186 return -1; 1127 1187 } … … 1130 1190 if( (socket_tx_valid == true) || (socket_tx_client != XPTR_NULL) ) 1131 1191 { 1132 // release socket lock 1192 1193 #if DEBUG_SOCKET_ERROR 1194 printk("\n[ERROR] in %s : previous TX cmd on socket[%x,%d] / thread[%x,%x] / cycle %d\n", 1195 __FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle ); 1196 #endif 1133 1197 remote_queuelock_release( socket_lock_xp ); 1134 1135 printk("\n[ERROR] in %s : previous TX cmd on socket[%x,%d] / thread[%x,%x]\n", 1136 __FUNCTION__, process->pid, fdid, process->pid, this->trdid ); 1137 return -1; 1138 } 1139 1140 // 2) build extended pointer on listening socket.crq 1198 return -1; 1199 } 1200 1201 // 2) check the listenig socket CRQ 1141 1202 crq_xp = XPTR( file_cxy , &socket_ptr->crqq ); 1142 1203 … … 1144 1205 crq_status = remote_buf_status( crq_xp ); 1145 1206 1146 // block & deschedule when CRQ empty1207 // block & deschedule to wait a client request when CRQ empty 1147 1208 if( crq_status == 0 ) 1148 1209 { 1149 // register command arguments in listening socket1210 // register command arguments for NIC_RX server in listening socket 1150 1211 hal_remote_s32( XPTR( file_cxy , &socket_ptr->rx_cmd ), CMD_RX_ACCEPT ); 1151 1212 hal_remote_s64( XPTR( file_cxy , &socket_ptr->rx_client ), client_xp ); … … 1179 1240 crq_status = remote_buf_status( crq_xp ); 1180 1241 1181 assert( __FUNCTION__, (((crq_status > 0) || (cmd_status!= CMD_STS_SUCCESS)) && (cmd_valid == false)), 1242 assert( __FUNCTION__, 1243 (((crq_status > 0) || (cmd_status!= CMD_STS_SUCCESS)) && (cmd_valid == false)), 1182 1244 "illegal socket state when client thread resumes after RX_ACCEPT" ); 1183 1245 … … 1187 1249 if( cmd_status != CMD_STS_SUCCESS ) 1188 1250 { 1189 // release socket lock 1251 1252 #if DEBUG_SOCKET_ERROR 1253 printk("\n[ERROR] in %s : reported for RX / socket[%x,%d] / thread[%x,%x] / cycle %d\n", 1254 __FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle ); 1255 #endif 1190 1256 remote_queuelock_release( socket_lock_xp ); 1191 1192 printk("\n[ERROR] in %s for RX_ACCEPT command / socket[%x,%d] / thread[%x,%x]\n",1193 __FUNCTION__, process->pid, fdid, process->pid, this->trdid );1194 1257 return -1; 1195 1258 } 1196 1197 // extract first request from the listening socket CRQ 1198 error = socket_get_crq_request( crq_xp, 1259 } // end blocking on CRQ empty 1260 1261 // from this point, we can extract a request from listening socket CRQ 1262 error = socket_get_crq_request( crq_xp, 1199 1263 &new_remote_addr, 1200 1264 &new_remote_port, 1201 1265 &new_remote_iss, 1202 1266 &new_remote_window ); 1203 1204 1267 assert( __FUNCTION__, (error == 0), 1205 1268 "cannot get a connection request from a non-empty CRQ" ); 1206 1269 1207 // reset listening socket rx_client 1208 hal_remote_s32( XPTR( file_cxy , &socket_ptr->rx_client ) , XPTR_NULL ); 1209 1210 // release socket lock 1211 remote_queuelock_release( socket_lock_xp ); 1212 1213 } // end blocking on CRQ status 1214 1215 // from this point, we can create a new socket 1216 // and ask the NIC_TX to send a SYN-ACK segment 1270 // release listening socket lock 1271 remote_queuelock_release( socket_lock_xp ); 1217 1272 1218 1273 #if DEBUG_SOCKET_ACCEPT 1219 1274 cycle = (uint32_t)hal_get_cycles(); 1220 1275 if( DEBUG_SOCKET_ACCEPT < cycle ) 1221 printk("\n[%s] thread[%x,%x] socket[%x,%d] / got a CRQ request / cycle %d\n", 1222 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle ); 1276 printk("\n[%s] thread[%x,%x] socket[%x,%d] / CRQ request [addr %x / port %x] / cycle %d\n", 1277 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, 1278 new_remote_addr, new_remote_port, cycle ); 1223 1279 #endif 1224 1280 … … 1234 1290 if( error ) 1235 1291 { 1236 printk("\n[ERROR] in %s : cannot allocate new socket / thread[%x,%x]\n", 1237 __FUNCTION__, process->pid, this->trdid ); 1292 1293 #if DEBUG_SOCKET_ERROR 1294 printk("\n[ERROR] in %s : cannot create new socket / thread[%x,%x] / cycle %d\n", 1295 __FUNCTION__, process->pid, this->trdid, cycle ); 1296 #endif 1238 1297 return -1; 1239 1298 } … … 1287 1346 1288 1347 // start retransmission timer 1289 socket_alarm_start( new_socket_xp , TCP_RETRANSMISSION_TIMEOUT ); 1348 alarm_start( client_xp, 1349 hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT, 1350 &socket_alarm_handler, 1351 new_socket_xp ); 1290 1352 1291 1353 #if DEBUG_SOCKET_ACCEPT 1292 1354 cycle = (uint32_t)hal_get_cycles(); 1293 1355 if( DEBUG_SOCKET_ACCEPT < cycle ) 1294 printk("\n[%s] thread[%x,%x] new_socket[%x,%d] blocks on <IO> waiting ESTAB/ cycle %d\n",1356 printk("\n[%s] thread[%x,%x] for socket[%x,%d] request SYN-ACK & blocks on <IO> / cycle %d\n", 1295 1357 __FUNCTION__, process->pid, this->trdid, process->pid, new_fdid, cycle ); 1296 1358 #endif … … 1307 1369 #endif 1308 1370 1309 // stop retransmission timer 1310 socket_alarm_stop();1371 // stop retransmission timer in thread descriptor 1372 alarm_stop( client_xp ); 1311 1373 1312 1374 // get new socket state, tx_valid and tx_sts … … 1315 1377 cmd_status = hal_remote_l32( XPTR( new_socket_cxy , &new_socket_ptr->tx_sts )); 1316 1378 1317 assert( __FUNCTION__, (((new_state == TCP_STATE_ESTAB) || (cmd_status != CMD_STS_SUCCESS))1318 1379 assert( __FUNCTION__, 1380 (((new_state == TCP_STATE_ESTAB) || (cmd_status != CMD_STS_SUCCESS)) && (cmd_valid == false)), 1319 1381 "illegal socket state when client thread resumes after TX_ACCEPT" ); 1320 1382 … … 1324 1386 if( cmd_status != CMD_STS_SUCCESS ) 1325 1387 { 1326 printk("\n[ERROR] in %s for TX_ACCEPT command / socket[%x,%d] / thread[%x,%x]\n", 1327 __FUNCTION__, process->pid, new_fdid, process->pid, this->trdid ); 1388 1389 #if DEBUG_SOCKET_ERROR 1390 printk("\n[ERROR] in %s reported for TX / socket[%x,%d] / thread[%x,%x] / cycle %d\n", 1391 __FUNCTION__, process->pid, new_fdid, process->pid, this->trdid, cycle ); 1392 #endif 1328 1393 return -1; 1329 1394 } … … 1370 1435 trdid_t trdid = this->trdid; 1371 1436 1437 #if DEBUG_SOCKET_CONNECT || DEBUG_SOCKET_ERROR 1438 uint32_t cycle = (uint32_t)hal_get_cycles(); 1439 #endif 1440 1372 1441 // get pointers on file descriptor 1373 1442 xptr_t file_xp = process_fd_get_xptr_from_local( this->process , fdid ); … … 1378 1447 if( file_xp == XPTR_NULL ) 1379 1448 { 1380 printk("\n[ERROR] in %s : undefined fdid %d", 1381 __FUNCTION__, fdid ); 1449 1450 #if DEBUG_SOCKET_ERROR 1451 printk("\n[ERROR] in %s : undefined fdid %d / thread[%x,%x] / cycle %d", 1452 __FUNCTION__, fdid, pid, trdid, cycle ); 1453 #endif 1382 1454 return -1; 1383 1455 } … … 1388 1460 1389 1461 #if DEBUG_SOCKET_CONNECT 1390 uint32_t cycle = (uint32_t)hal_get_cycles();1391 1462 if( DEBUG_SOCKET_CONNECT < cycle ) 1392 printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] / addr %x / port % d/ cycle %d\n",1463 printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] / addr %x / port %x / cycle %d\n", 1393 1464 __FUNCTION__, pid, trdid, pid, fdid, remote_addr, remote_port, cycle ); 1394 1465 #endif … … 1397 1468 if( file_type != FILE_TYPE_SOCK ) 1398 1469 { 1399 printk("\n[ERROR] in %s : illegal file type %s", 1400 __FUNCTION__, vfs_inode_type_str( file_type ) ); 1470 1471 #if DEBUG_SOCKET_ERROR 1472 printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x] / cycle %d", 1473 __FUNCTION__, vfs_inode_type_str( file_type ), pid, trdid, cycle ); 1474 #endif 1401 1475 return -1; 1402 1476 } … … 1412 1486 if( socket_state != UDP_STATE_BOUND ) 1413 1487 { 1414 printk("\n[ERROR] in %s : illegal socket state %s for type %s", 1415 __FUNCTION__, socket_state_str(socket_state), socket_type_str(socket_type) ); 1488 1489 #if DEBUG_SOCKET_ERROR 1490 printk("\n[ERROR] in %s : illegal socket state %s for type %s / thread[%x,%x] / cycle %d", 1491 __FUNCTION__, socket_state_str(socket_state), socket_type_str(socket_type), pid, trdid, cycle ); 1492 #endif 1416 1493 return -1; 1417 1494 } … … 1421 1498 if( socket_state != TCP_STATE_BOUND ) 1422 1499 { 1423 printk("\n[ERROR] in %s : illegal socket state %s for type %s", 1424 __FUNCTION__, socket_state_str(socket_state), socket_type_str(socket_type) ); 1500 1501 #if DEBUG_SOCKET_ERROR 1502 printk("\n[ERROR] in %s : illegal socket state %s for type %s / thread[%x,%x] / cycle %d", 1503 __FUNCTION__, socket_state_str(socket_state), socket_type_str(socket_type), pid, trdid, cycle ); 1504 #endif 1425 1505 return -1; 1426 1506 } … … 1428 1508 else 1429 1509 { 1430 printk("\n[ERROR] in %s : illegal socket type %s", 1431 __FUNCTION__, socket_type_str(socket_type) ); 1510 1511 #if DEBUG_SOCKET_ERROR 1512 printk("\n[ERROR] in %s : illegal socket type / thread[%x,%x] / cycle %d", 1513 __FUNCTION__, pid, trdid, cycle ); 1514 #endif 1432 1515 return -1; 1433 1516 } … … 1475 1558 1476 1559 // start retransmission timer 1477 socket_alarm_start( socket_xp , TCP_RETRANSMISSION_TIMEOUT ); 1560 alarm_start( client_xp, 1561 hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT, 1562 &socket_alarm_handler, 1563 socket_xp ); 1478 1564 1479 1565 #if DEBUG_SOCKET_CONNECT … … 1494 1580 #endif 1495 1581 1496 // stop retransmission timer 1497 socket_alarm_stop();1582 // stop retransmission timer in thread descriptor 1583 alarm_stop( client_xp ); 1498 1584 1499 1585 // get socket state, tx_valid and tx_sts … … 1507 1593 1508 1594 // reset socket.tx_client 1509 hal_remote_s 32( XPTR( file_cxy , &socket_ptr->tx_client ) , XPTR_NULL );1595 hal_remote_s64( XPTR( file_cxy , &socket_ptr->tx_client ) , XPTR_NULL ); 1510 1596 1511 1597 if( cmd_status != CMD_STS_SUCCESS ) 1512 1598 { 1513 printk("\n[ERROR] in %s : for command TX_CONNECT / socket[%x,%d] / thread[%x,%x]\n", 1514 __FUNCTION__, pid, fdid, pid, trdid ); 1599 1600 #if DEBUG_SOCKET_ERROR 1601 printk("\n[ERROR] in %s reported by server / socket[%x,%d] / thread[%x,%x] / cycle %d\n", 1602 __FUNCTION__, pid, fdid, pid, trdid, cycle ); 1603 #endif 1515 1604 return -1; 1516 1605 } … … 1548 1637 trdid_t trdid = this->trdid; 1549 1638 1639 #if DEBUG_SOCKET_CLOSE || DEBUG_SOCKET_ERROR 1640 uint32_t cycle = (uint32_t)hal_get_cycles(); 1641 #endif 1642 1550 1643 // get pointers on socket descriptor 1551 1644 cxy_t file_cxy = GET_CXY( file_xp ); … … 1558 1651 1559 1652 #if DEBUG_SOCKET_CLOSE 1560 uint32_t cycle = (uint32_t)hal_get_cycles();1561 1653 if (DEBUG_SOCKET_CLOSE < cycle ) 1562 1654 printk("\n[%s] thread[%x,%x] enters for socket[%x,%d] / cycle %d\n", … … 1574 1666 (hal_remote_l64( XPTR( file_cxy , &socket_ptr->tx_client)) != XPTR_NULL) ) 1575 1667 { 1576 // release socket lock 1668 1669 #if DEBUG_SOCKET_ERROR 1670 printk("\n[ERROR] in %s : previous TX cmd on socket[%x,%d] / thread[%x,%x] / cycle %d\n", 1671 __FUNCTION__, pid, fdid, pid, trdid, cycle ); 1672 #endif 1577 1673 remote_queuelock_release( socket_lock_xp ); 1578 1579 printk("\n[ERROR] in %s : previous TX cmd on socket[%x,%d] / thread[%x,%x]\n",1580 __FUNCTION__, pid, fdid, pid, trdid );1581 1674 return -1; 1582 1675 } … … 1645 1738 1646 1739 // start retransmission timer 1647 socket_alarm_start( socket_xp , TCP_RETRANSMISSION_TIMEOUT ); 1740 alarm_start( client_xp, 1741 hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT, 1742 &socket_alarm_handler, 1743 socket_xp ); 1648 1744 1649 1745 #if DEBUG_SOCKET_CLOSE … … 1663 1759 __FUNCTION__, pid, trdid, pid, fdid, cycle ); 1664 1760 #endif 1665 // stop retransmission timer 1666 socket_alarm_stop();1761 // stop retransmission timer in thread descriptor 1762 alarm_stop( client_xp ); 1667 1763 1668 1764 // take socket lock … … 1674 1770 cmd_valid = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_valid ) ); 1675 1771 1676 assert( __FUNCTION__, (((socket_state == TCP_STATE_CLOSED) || (cmd_status != CMD_STS_SUCCESS)) 1677 && (cmd_valid == false)), 1678 "illegal socket state when client thread resumes after TX_CLOSE\n" 1679 " socket_state = %s / cmd_status = %d / cmd_valid = %d\n", 1772 assert( __FUNCTION__, 1773 (((socket_state == TCP_STATE_CLOSED) || (cmd_status != CMD_STS_SUCCESS)) && (cmd_valid == false)), 1774 " socket_state = %s / cmd_status = %d / cmd_valid = %d", 1680 1775 socket_state_str(socket_state), cmd_status, cmd_valid ); 1681 1776 1682 1777 // reset socket.tx_client 1683 hal_remote_s 32( XPTR( file_cxy , &socket_ptr->tx_client ) , XPTR_NULL );1778 hal_remote_s64( XPTR( file_cxy , &socket_ptr->tx_client ) , XPTR_NULL ); 1684 1779 1685 1780 if( cmd_status != CMD_STS_SUCCESS ) // error reported 1686 1781 { 1687 printk("\n[ERROR] in %s for command TX_CLOSE / socket[%x,%d] / thread[%x,%x]\n", 1688 __FUNCTION__, pid, fdid, pid, this->trdid ); 1782 1783 #if DEBUG_SOCKET_ERROR 1784 printk("\n[ERROR] in %s for command TX_CLOSE / socket[%x,%d] / thread[%x,%x] / cycle %d\n", 1785 __FUNCTION__, pid, fdid, pid, this->trdid, cycle ); 1786 #endif 1689 1787 return -1; 1690 1788 } … … 1708 1806 //////////////////////////////////////////////////////////////////////////////////////// 1709 1807 // This static function is called by the two functions socket_send() & socket_recv(). 1710 // It can beused for both UDP and TCP sockets.1808 // It is used for both UDP and TCP sockets. 1711 1809 //////////////////////////////////////////////////////////////////////////////////////// 1712 1810 // @ is_send : send when true / receive when false. 1713 1811 // @ fdid : socket identifier. 1714 1812 // @ u_buf : pointer on user buffer in user space. 1715 // @ length : number of bytes .1813 // @ length : number of bytes in buffer. 1716 1814 //////////////////////////////////////////////////////////////////////////////////////// 1717 1815 // Implementation note : The behavior is different for SEND & RECV … … 1749 1847 chdev_t * chdev_ptr; 1750 1848 cxy_t chdev_cxy; 1751 uint32_t buf_status; // number of bytes in rx_buf1752 1849 int32_t moved_bytes; // total number of moved bytes (fot return) 1753 xptr_t server_xp; // extended pointer on NIC_TX / NIC_RX server thread 1754 thread_t * server_ptr; // local pointer on NIC_TX / NIC_RX server thread 1755 kmem_req_t req; // KCM request for TX kernel buffer 1756 uint8_t * tx_buf; // kernel buffer for TX transfer 1757 bool_t cmd_valid; // from socket descriptor 1758 uint32_t cmd_status; // from socket descriptor 1759 uint32_t tx_todo; // from socket descriptor 1850 xptr_t server_xp; // ext pointer on NIC_TX / NIC_RX thread 1851 thread_t * server_ptr; // local pointer on NIC_TX / NIC_RX thread 1852 uint8_t * tx_buf; // pointer on kernel buffer for TX transfer 1853 bool_t cmd_valid; // RX or TX command from socket descriptor 1854 uint32_t cmd_sts; // RX or TX command from socket descriptor 1855 uint32_t tx_todo; // number of bytes still to send 1856 xptr_t rx_buf_xp; // extended pointer on socket rx_buf 1857 uint32_t rx_buf_sts; // current status of socket rx_buf 1760 1858 1761 1859 thread_t * this = CURRENT_THREAD; 1762 1860 process_t * process = this->process; 1861 1862 #if DEBUG_SOCKET_SEND || DEBUG_SOCKET_RECV || DEBUG_SOCKET_ERROR 1863 uint32_t cycle = (uint32_t)hal_get_cycles(); 1864 #endif 1865 1866 #if DEBUG_SOCKET_SEND || DEBUG_SOCKET_RECV 1867 if( is_send ) 1868 printk("\n[%s] thread[%x,%x] socket[%x,%d] enter : SEND / buf %x / length %d / cycle %d\n", 1869 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, u_buf, length, cycle ); 1870 else 1871 printk("\n[%s] thread[%x,%x] socket[%x,%d] enter : RECV / buf %x / length %d / cycle %d\n", 1872 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, u_buf, length, cycle ); 1873 #endif 1763 1874 1764 1875 // build extended pointer on client thread … … 1772 1883 if( file_xp == XPTR_NULL ) 1773 1884 { 1774 printk("\n[ERROR] in %s : undefined fdid %d / thread%x,%x]\n", 1775 __FUNCTION__, fdid , process->pid, this->trdid ); 1885 1886 #if DEBUG_SOCKET_ERROR 1887 printk("\n[ERROR] in %s : undefined fdid %d / thread%x,%x] / cycle %d\n", 1888 __FUNCTION__, fdid , process->pid, this->trdid, cycle ); 1889 #endif 1776 1890 return -1; 1777 1891 } … … 1787 1901 if( file_type != FILE_TYPE_SOCK ) 1788 1902 { 1789 printk("\n[ERROR] in %s : illegal file type %s / socket[%x,%d]\n", 1790 __FUNCTION__, vfs_inode_type_str(file_type), process->pid, fdid ); 1903 1904 #if DEBUG_SOCKET_ERROR 1905 printk("\n[ERROR] in %s : illegal file type thread[%x,%x] / cycle %d\n", 1906 __FUNCTION__, process->pid, this->trdid, cycle ); 1907 #endif 1791 1908 return -1; 1792 1909 } … … 1803 1920 nic_channel = hal_remote_l32( XPTR( file_cxy , &socket_ptr->nic_channel )); 1804 1921 1805 ///////////// 1922 ////////////////////////////////////////////////////// 1806 1923 if( is_send ) // SEND command 1807 1924 { 1808 1925 1809 1926 #if DEBUG_SOCKET_SEND 1810 uint32_tcycle = (uint32_t)hal_get_cycles();1927 cycle = (uint32_t)hal_get_cycles(); 1811 1928 if (DEBUG_SOCKET_SEND < cycle ) 1812 printk("\n[%s] thread[%x,%x] received SEND command for socket[%x,%d]/ length %d / cycle %d\n",1929 printk("\n[%s] thread[%x,%x] / socket[%x,%d] get SEND / length %d / cycle %d\n", 1813 1930 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, length, cycle ); 1814 1931 #endif 1932 1815 1933 // check no previous TX command 1816 if( (hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_valid )) == true) || 1817 (hal_remote_l64( XPTR( file_cxy , &socket_ptr->tx_client)) != XPTR_NULL) ) 1934 if( hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_valid )) == true ) 1818 1935 { 1819 // release socket lock 1936 1937 #if DEBUG_SOCKET_ERROR 1938 printk("\n[ERROR] in %s : previous TX command / socket[%x,%d] / thread[%x,%x] / cycle %d\n", 1939 __FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle ); 1940 #endif 1820 1941 remote_queuelock_release( socket_lock_xp ); 1821 1822 printk("\n[ERROR] in %s : previous TX command / socket[%x,%d] / thread[%x,%x]\n",1823 __FUNCTION__, process->pid, fdid, process->pid, this->trdid );1824 1942 return -1; 1825 1943 } 1826 1944 1827 // allocate a temporary kernel buffer 1828 req.type = KMEM_KCM; 1829 req.order = bits_log2( length ); 1830 req.flags = AF_NONE; 1831 tx_buf = kmem_alloc( &req ); 1832 1833 if( tx_buf == NULL ) 1834 { 1835 // release socket lock 1836 remote_queuelock_release( socket_lock_xp ); 1837 1838 printk("\n[ERROR] in %s : no memory for tx_buf / socket[%x,%d] / thread[%x,%x]\n", 1839 __FUNCTION__, process->pid, fdid, process->pid, this->trdid ); 1840 return -1; 1841 } 1842 1843 // copy data from user u_buf to kernel tx_buf 1844 hal_copy_from_uspace( XPTR( local_cxy , tx_buf ), 1945 // get tx_buf pointer from socket pointer 1946 tx_buf = (uint8_t*)hal_remote_lpt( XPTR( file_cxy , &socket_ptr->tx_buf )); 1947 1948 // copy data from user u_buf to kernel socket tx_buf 1949 hal_copy_from_uspace( XPTR( file_cxy , tx_buf ), 1845 1950 u_buf, 1846 1951 length ); 1952 #if DEBUG_SOCKET_SEND 1953 if (DEBUG_SOCKET_SEND < cycle ) 1954 printk("\n[%s] thread[%x,%x] / socket[%x,%d] copied %d bytes to tx_buf (%x,%x)\n", 1955 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, length, file_cxy, tx_buf ); 1956 putb("tx_buf : 16 first data bytes" , tx_buf , 16 ); 1957 #endif 1847 1958 1848 1959 // register command in socket descriptor … … 1852 1963 hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_len ) , length ); 1853 1964 hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_todo ) , length ); 1965 hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_ack ) , 0 ); 1854 1966 hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_valid ) , true ); 1855 1967 … … 1869 1981 thread_unblock( server_xp , THREAD_BLOCKED_CLIENT ); 1870 1982 1871 // start retransmission timer 1872 socket_alarm_start( socket_xp , TCP_RETRANSMISSION_TIMEOUT ); 1983 // start retransmission timer for TCP socket 1984 if( socket_type == SOCK_STREAM ) 1985 { 1986 alarm_start( client_xp, 1987 hal_get_cycles() + CONFIG_SOCK_RETRY_TIMEOUT, 1988 &socket_alarm_handler, 1989 socket_xp ); 1990 } 1873 1991 1874 1992 #if DEBUG_SOCKET_SEND 1875 cycle = (uint32_t)hal_get_cycles();1876 1993 if( DEBUG_SOCKET_SEND < cycle ) 1877 printk("\n[%s] thread[%x,%x] socket[%x,%d] register SEND => blocks on <IO> / cycle %d\n",1878 __FUNCTION__, process->pid, this->trdid, process->pid, fdid , cycle);1994 printk("\n[%s] thread[%x,%x] / socket[%x,%d] registers SEND => blocks on <IO>\n", 1995 __FUNCTION__, process->pid, this->trdid, process->pid, fdid ); 1879 1996 #endif 1880 1997 // client thread blocks itself and deschedules … … 1885 2002 cycle = (uint32_t)hal_get_cycles(); 1886 2003 if( DEBUG_SOCKET_SEND < cycle ) 1887 printk("\n[%s] thread[%x,%x] socket[%x,%d] for SEND resumes/ cycle %d\n",2004 printk("\n[%s] thread[%x,%x] / socket[%x,%d] resumes for SEND / cycle %d\n", 1888 2005 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle ); 1889 2006 #endif 1890 // stop retransmission timer 1891 socket_alarm_stop(); 1892 1893 // take socket lock 2007 // stop retransmission timer for TCP socket 2008 if( socket_type == SOCK_STREAM ) 2009 { 2010 alarm_stop( client_xp ); 2011 } 2012 2013 // take socket lock 1894 2014 remote_queuelock_acquire( socket_lock_xp ); 1895 2015 … … 1897 2017 tx_todo = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_todo )); 1898 2018 cmd_valid = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_valid )); 1899 cmd_st atus= hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_sts ));2019 cmd_sts = hal_remote_l32( XPTR( file_cxy , &socket_ptr->tx_sts )); 1900 2020 1901 2021 // reset tx_client in socket descriptor … … 1906 2026 1907 2027 // check SEND command completed when TX client thread resumes 1908 assert( __FUNCTION__, (((tx_todo == 0) || (cmd_status != CMD_STS_SUCCESS)) && (cmd_valid == false)), 1909 "illegal socket state when client thread resumes after TX_SEND\n" 1910 " tx_todo = %d / tx_status = %d / tx_valid = %d\n", 1911 tx_todo, cmd_status, cmd_valid ); 1912 1913 // release the tx_buf 1914 req.ptr = tx_buf; 1915 kmem_free( &req ); 1916 1917 if( cmd_status != CMD_STS_SUCCESS ) 2028 assert( __FUNCTION__, 2029 (((tx_todo == 0) || (cmd_sts != CMD_STS_SUCCESS)) && (cmd_valid == false)), 2030 "client thread resumes from SEND / bad state : tx_todo %d / tx_sts %d / tx_valid %d", 2031 tx_todo, cmd_sts, cmd_valid ); 2032 2033 if( cmd_sts != CMD_STS_SUCCESS ) 1918 2034 { 1919 2035 1920 #if DEBUG_SOCKET_SEND 1921 cycle = (uint32_t)hal_get_cycles(); 1922 if( DEBUG_SOCKET_RECV < cycle ) 1923 printk("\n[%s] error %s for TX_SEND / socket[%x,%d] / thread[%x,%x]\n", 1924 __FUNCTION__, socket_cmd_sts_str(cmd_status), process->pid, fdid, process->pid, this->trdid ); 2036 #if DEBUG_SOCKET_ERROR 2037 printk("\n[ERROR] in %s : reported for SEND / socket[%x,%d] / thread[%x,%x] / cycle %d\n", 2038 __FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle ); 1925 2039 #endif 1926 2040 return -1; … … 1932 2046 cycle = (uint32_t)hal_get_cycles(); 1933 2047 if (DEBUG_SOCKET_SEND < cycle ) 1934 printk("\n[%s] thread[%x,%x] success for SEND / socket[%x,%d] / length%d / cycle %d\n",2048 printk("\n[%s] thread[%x,%x] SEND success / socket[%x,%d] / bytes %d / cycle %d\n", 1935 2049 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, length, cycle ); 1936 2050 #endif … … 1940 2054 } // end SEND command 1941 2055 1942 //// 1943 else // RECV command2056 ///////////////////////////////////////////////////////////// 2057 else // RECV command 1944 2058 { 1945 2059 1946 2060 #if DEBUG_SOCKET_RECV 1947 uint32_t cycle = (uint32_t)hal_get_cycles(); 1948 if (DEBUG_SOCKET_SEND < cycle ) 1949 printk("\n[%s] thread[%x,%x] received RECV command for socket[%x,%d] / length %d / cycle %d\n", 2061 if (DEBUG_SOCKET_RECV < cycle ) 2062 printk("\n[%s] thread[%x,%x] / socket[%x,%d] get RECV / length %d / cycle %d\n", 1950 2063 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, length, cycle ); 1951 2064 #endif 1952 2065 // check no previous RX command 1953 if( (hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_valid )) == true) || 1954 (hal_remote_l64( XPTR( file_cxy , &socket_ptr->rx_client)) != XPTR_NULL) ) 2066 if( hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_valid )) == true ) 1955 2067 { 1956 // release socket lock 2068 2069 #if DEBUG_SOCKET_ERROR 2070 printk("\n[ERROR] in %s : previous RX command on socket[%x,%d] / thread[%x,%x] / cycle %d\n", 2071 __FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle ); 2072 #endif 1957 2073 remote_queuelock_release( socket_lock_xp ); 1958 1959 printk("\n[ERROR] in %s : previous RX command on socket[%x,%d] / thread[%x,%x]\n",1960 __FUNCTION__, process->pid, fdid, process->pid, this->trdid );1961 2074 return -1; 1962 2075 } … … 1969 2082 1970 2083 #if DEBUG_SOCKET_RECV 1971 uint32_tcycle = (uint32_t)hal_get_cycles();2084 cycle = (uint32_t)hal_get_cycles(); 1972 2085 if( DEBUG_SOCKET_RECV < cycle ) 1973 printk("\n[%s] thread[%x,%x] socket[%x,%d] TCP connection closed / cycle %d\n",2086 printk("\n[%s] thread[%x,%x] / socket[%x,%d] TCP connection closed / cycle %d\n", 1974 2087 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle ); 1975 2088 #endif 1976 2089 return 0; 1977 2090 } 1978 // build extended pointer on socket.rx_buf 1979 xptr_t rx_buf_xp = XPTR( file_cxy , &socket_ptr->rx_buf ); 1980 1981 // get rx_buf status 1982 buf_status = remote_buf_status( rx_buf_xp ); 1983 1984 if( buf_status == 0 ) 2091 2092 // build extended pointer on socket rx_buf 2093 rx_buf_xp = XPTR( file_cxy , &socket_ptr->rx_buf ); 2094 2095 // get socket rx_buf status 2096 rx_buf_sts = remote_buf_status( rx_buf_xp ); 2097 2098 // register RECV command and deschedule when rx_buf empty 2099 if( rx_buf_sts == 0 ) 1985 2100 { 1986 2101 // registers RX_RECV command in socket descriptor … … 1993 2108 1994 2109 #if DEBUG_SOCKET_RECV 1995 uint32_t cycle = (uint32_t)hal_get_cycles();1996 2110 if( DEBUG_SOCKET_RECV < cycle ) 1997 printk("\n[%s] thread[%x,%x] socket[%x,%d] rx_buf empty => blocks on <IO> / cycle %d\n",1998 __FUNCTION__, process->pid, this->trdid, process->pid, fdid , cycle);2111 printk("\n[%s] thread[%x,%x] socket[%x,%d] for RECV : rx_buf empty => blocks on <IO>\n", 2112 __FUNCTION__, process->pid, this->trdid, process->pid, fdid ); 1999 2113 #endif 2000 2114 // client thread blocks itself and deschedules … … 2005 2119 cycle = (uint32_t)hal_get_cycles(); 2006 2120 if( DEBUG_SOCKET_RECV < cycle ) 2007 printk("\n[%s] thread[%x,%x] socket[%x,%d] for RECV resumes / cycle %d\n",2121 printk("\n[%s] thread[%x,%x] socket[%x,%d] for RECV : resumes / cycle %d\n", 2008 2122 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, cycle ); 2009 2123 #endif … … 2011 2125 remote_queuelock_acquire( socket_lock_xp ); 2012 2126 2013 // get rx_stsand rx_buf status2127 // get command status, command valid, and rx_buf status 2014 2128 cmd_valid = hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_valid )); 2015 cmd_st atus= hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_sts ));2016 buf_status = remote_buf_status( rx_buf_xp );2129 cmd_sts = hal_remote_l32( XPTR( file_cxy , &socket_ptr->rx_sts )); 2130 rx_buf_sts = remote_buf_status( rx_buf_xp ); 2017 2131 2018 assert( __FUNCTION__, (((buf_status != 0) || (cmd_status != CMD_STS_SUCCESS)) && (cmd_valid == false)), 2019 "illegal socket state when client thread resumes after RX_RECV\n" 2020 " buf_status = %d / rx_sts = %d / rx_valid = %d\n", 2021 buf_status , cmd_status , cmd_valid ); 2022 2023 // reset rx_client in socket descriptor 2024 hal_remote_s64( XPTR( file_cxy , &socket_ptr->rx_client ) , XPTR_NULL ); 2025 2026 // reset rx_buf for an UDP socket 2027 if( socket_type == SOCK_DGRAM ) remote_buf_reset( rx_buf_xp ); 2028 2029 // release socket lock 2030 remote_queuelock_release( socket_lock_xp ); 2031 2032 if( cmd_status == CMD_STS_EOF ) // EOF (remote close) reported 2132 assert( __FUNCTION__, (cmd_valid == false), 2133 "client thread resumes from RECV but rx_valid is true" ); 2134 2135 if( cmd_sts == CMD_STS_EOF ) // EOF reported by RX server 2033 2136 { 2034 2137 2035 2138 #if DEBUG_SOCKET_RECV 2036 cycle = (uint32_t)hal_get_cycles();2037 2139 if( DEBUG_SOCKET_RECV < cycle ) 2038 printk("\n[%s] EOF for RX_RECV /socket[%x,%d] / thread[%x,%x]\n",2140 printk("\n[%s] EOF received for socket[%x,%d] / thread[%x,%x]\n", 2039 2141 __FUNCTION__, process->pid, fdid, process->pid, this->trdid ); 2040 2142 #endif 2143 // release socket lock 2144 remote_queuelock_release( socket_lock_xp ); 2145 2041 2146 return 0; 2042 2147 } 2043 else if( cmd_st atus != CMD_STS_SUCCESS ) // other error reported2148 else if( cmd_sts != CMD_STS_SUCCESS ) // error reported by RX server 2044 2149 { 2045 2150 2046 #if DEBUG_SOCKET_RECV 2047 cycle = (uint32_t)hal_get_cycles(); 2048 if( DEBUG_SOCKET_RECV < cycle ) 2049 printk("\n[%s] error %s for RX_RECV / socket[%x,%d] / thread[%x,%x]\n", 2050 __FUNCTION__, socket_cmd_sts_str(cmd_status), process->pid, fdid, process->pid, this->trdid ); 2051 #endif 2151 #if DEBUG_SOCKET_ERROR 2152 printk("\n[ERROR] in %s : rx_server for socket[%x,%d] / thread[%x,%x] / cycle %d\n", 2153 __FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle ); 2154 #endif 2155 // release socket lock 2156 remote_queuelock_release( socket_lock_xp ); 2157 2052 2158 return -1; 2053 2159 } 2054 2160 else if( rx_buf_sts == 0 ) // annormally empty rx_buf 2161 { 2162 2163 #if DEBUG_SOCKET_ERROR 2164 printk("\n[ERROR] in %s : rx_buf empty for socket[%x,%d] / thread[%x,%x] / cycle %d\n", 2165 __FUNCTION__, process->pid, fdid, process->pid, this->trdid, cycle ); 2166 #endif 2167 // release socket lock 2168 remote_queuelock_release( socket_lock_xp ); 2169 2170 return -1; 2171 } 2055 2172 } 2056 2173 2057 2174 // number of bytes extracted from rx_buf cannot be larger than u_buf size 2058 moved_bytes = ( length < buf_status ) ? length : buf_status;2175 moved_bytes = ( length < rx_buf_sts ) ? length : rx_buf_sts; 2059 2176 2060 2177 // move data from kernel rx_buf to user u_buf … … 2062 2179 u_buf, 2063 2180 moved_bytes ); 2064 #if DEBUG_SOCKET_ SEND2065 cycle = (uint32_t)hal_get_cycles(); 2066 if (DEBUG_SOCKET_ SEND< cycle )2067 printk("\n[%s] thread[%x,%x] success for RECV / socket[%x,%d] / length%d / cycle %d\n",2181 #if DEBUG_SOCKET_RECV 2182 cycle = (uint32_t)hal_get_cycles(); 2183 if (DEBUG_SOCKET_RECV < cycle ) 2184 printk("\n[%s] thread[%x,%x] : RECV success / socket[%x,%d] / bytes %d / cycle %d\n", 2068 2185 __FUNCTION__, process->pid, this->trdid, process->pid, fdid, moved_bytes, cycle ); 2069 2186 #endif 2187 // release socket lock 2188 remote_queuelock_release( socket_lock_xp ); 2189 2070 2190 return moved_bytes; 2071 2191 … … 2095 2215 } // end socket_recv() 2096 2216 2217 //////////////////////////////////// 2218 int socket_sendto( uint32_t fdid, 2219 uint8_t * u_buf, 2220 uint32_t length, 2221 uint32_t remote_ip, 2222 uint16_t remote_port ) 2223 { 2224 printk("\n[ERROR] in %s : this function is not implemented yet\n", 2225 __FUNCTION__, fdid, u_buf, length, remote_ip, remote_port ); 2226 return -1; 2227 2228 } // end socket_sendto() 2229 2230 ////////////////////////////////////// 2231 int socket_recvfrom( uint32_t fdid, 2232 uint8_t * u_buf, 2233 uint32_t length, 2234 uint32_t remote_ip, 2235 uint16_t remote_port ) 2236 { 2237 printk("\n[ERROR] in %s : this function is not implemented yet\n", 2238 __FUNCTION__, fdid, u_buf, length, remote_ip, remote_port ); 2239 return -1; 2240 2241 } // end socket_recvfrom() 2242 2097 2243 //////////////////////////////////////////// 2098 2244 void socket_display( xptr_t socket_xp, 2099 const char * func_str ) 2100 { 2245 const char * func_str, 2246 const char * string ) 2247 { 2248 uint32_t cycle = (uint32_t)hal_get_cycles(); 2249 2101 2250 socket_t * socket = GET_PTR( socket_xp ); 2102 2251 cxy_t cxy = GET_CXY( socket_xp ); … … 2111 2260 uint32_t remote_port = hal_remote_l32( XPTR( cxy , &socket->remote_port )); 2112 2261 uint32_t tx_valid = hal_remote_l32( XPTR( cxy , &socket->tx_valid )); 2262 xptr_t tx_client = hal_remote_l64( XPTR( cxy , &socket->tx_client )); 2113 2263 uint32_t tx_cmd = hal_remote_l32( XPTR( cxy , &socket->tx_cmd )); 2114 2264 uint32_t tx_sts = hal_remote_l32( XPTR( cxy , &socket->tx_sts )); … … 2118 2268 uint32_t tx_nxt = hal_remote_l32( XPTR( cxy , &socket->tx_nxt )); 2119 2269 uint32_t tx_wnd = hal_remote_l32( XPTR( cxy , &socket->tx_wnd )); 2270 uint32_t tx_ack = hal_remote_l32( XPTR( cxy , &socket->tx_ack )); 2120 2271 uint32_t rx_valid = hal_remote_l32( XPTR( cxy , &socket->rx_valid )); 2272 xptr_t rx_client = hal_remote_l64( XPTR( cxy , &socket->rx_client )); 2121 2273 uint32_t rx_cmd = hal_remote_l32( XPTR( cxy , &socket->rx_cmd )); 2122 2274 uint32_t rx_sts = hal_remote_l32( XPTR( cxy , &socket->rx_sts )); 2123 2275 uint32_t rx_nxt = hal_remote_l32( XPTR( cxy , &socket->rx_nxt )); 2124 2276 uint32_t rx_wnd = hal_remote_l32( XPTR( cxy , &socket->rx_wnd )); 2125 uint32_t rx_irs = hal_remote_l32( XPTR( cxy , &socket->rx_irs )); 2126 2127 if( func_str == NULL ) 2128 { 2129 printk("\n****** socket[%x,%d] / xptr[%x,%x]*****\n", 2130 pid, fdid, cxy, socket ); 2277 uint32_t rx_irs = hal_remote_l32( XPTR( cxy , &socket->rx_irs )); 2278 2279 remote_queuelock_t * lock_ptr = &socket->lock; 2280 uint32_t taken = hal_remote_l32( XPTR( cxy , &lock_ptr->taken )); 2281 2282 thread_t * tx_ptr = GET_PTR( tx_client ); 2283 cxy_t tx_cxy = GET_CXY( tx_client ); 2284 trdid_t tx_tid = hal_remote_l32( XPTR( tx_cxy , &tx_ptr->trdid )); 2285 2286 thread_t * rx_ptr = GET_PTR( rx_client ); 2287 cxy_t rx_cxy = GET_CXY( rx_client ); 2288 trdid_t rx_tid = hal_remote_l32( XPTR( rx_cxy , &rx_ptr->trdid )); 2289 2290 if( string == NULL ) 2291 { 2292 printk("\n****** socket[%x,%d] / lock %d / in %s / cycle %d *****\n", 2293 pid, fdid, taken, func_str, cycle ); 2131 2294 } 2132 2295 else 2133 2296 { 2134 printk("\n***** socket[%x,%d] / xptr[%x,%x] / from %s *****\n", 2135 pid, fdid, cxy, socket, func_str ); 2136 } 2137 printk(" - state %s / channel %d\n" 2138 " - local_addr %x / local_port %x\n" 2139 " - remote_addr %x / remote_port %x\n" 2140 " - tx_valid %d (%s) / tx_sts %d / tx_len %x / tx_todo %x\n" 2141 " - tx_una %x / tx_nxt %x / tx_wnd %x\n" 2142 " - rx_valid %d (%s) / rx_sts %d\n" 2143 " - rx_nxt %x / rx_wnd %x / rx_irs %x\n", 2144 socket_state_str(state), channel , 2145 local_addr, local_port, 2146 remote_addr, remote_port, 2147 tx_valid, socket_cmd_type_str(tx_cmd), tx_sts, tx_len, tx_todo, 2148 tx_una, tx_nxt, tx_wnd, 2149 rx_valid, socket_cmd_type_str(rx_cmd), rx_sts, 2150 rx_nxt, rx_wnd, rx_irs ); 2297 printk("\n***** socket[%x,%d] / lock %d / in %s %s / cycle %d *****\n", 2298 pid, fdid, taken, func_str, string, cycle ); 2299 } 2300 printk(" - state %s / channel %d / local [%x,%x] / remote[%x,%x]\n" 2301 " - tx : valid %d / client [%x,%x] / cmd %s \n" 2302 " sts %d / len %x / todo %x / ack %x / una %x / nxt %x / wnd %x\n" 2303 " - rx : valid %d / client [%x,%x] / cmd %s\n" 2304 " sts %d / nxt %x / wnd %x / irs %x\n", 2305 socket_state_str(state), channel, 2306 local_addr, local_port, remote_addr, remote_port, 2307 tx_valid, pid, tx_tid, socket_cmd_type_str(tx_cmd), 2308 tx_sts, tx_len, tx_todo, tx_ack, tx_una, tx_nxt, tx_wnd, 2309 rx_valid, pid, rx_tid, socket_cmd_type_str(rx_cmd), 2310 rx_sts, rx_nxt, rx_wnd, rx_irs ); 2151 2311 2152 2312 } // end socket_display() -
trunk/kernel/kern/ksocket.h
r669 r683 1 1 /* 2 * ksocket.h - kernel socket de scriptor and API definition.2 * ksocket.h - kernel socket definition. 3 3 * 4 4 * Authors Alain Greiner (2016,2017,2018,2019,2020) … … 40 40 * existing sockets is split in as many subsets as the number of NIC channels, in order 41 41 * to parallelize the transfers. The distribution key defining the channel index 42 * is computed from the (remote_addr/remote_port) couple :by the NIC hardware for the43 * RX packets; by the software for the TX packets ,using a dedicated NIC driver function.42 * is computed from the (remote_addr/remote_port) couple (by the NIC hardware for the 43 * RX packets; by the software for the TX packets) using a dedicated NIC driver function. 44 44 * All sockets that have the same key share the same channel, and each socket is 45 45 * therefore linked to two chdevs : NIC_TX[key] & NIC_RX[key]. … … 52 52 * to the associated TX server (mainly used to handle the TCP ACKs). 53 53 * - the kernel "crq" buffer allows to store concurrent remote client connect requests 54 * to a local server socket. It is allocated in socket.54 * to a local server socket. 55 55 * 56 56 * The synchronisation mechanism between the client threads and the server threads 57 57 * is different for the TX and RX directions: 58 58 * 59 * 1) TX stream59 * 1) TX direction (sent packets) 60 60 * 61 61 * - The internal API between the TX client thread and the NIC_TX server thread defines 62 62 * four command types, stored in the "tx_cmd" variable of the socket descriptor: 63 * . SOCKET_TX_CONNECT : TCP client request to start the 3 steps connection handshake.64 * . SOCKET_TX_ACCEPT : TCP server request to accept one pending connection request.63 * . SOCKET_TX_CONNECT : request to start the connection handshake (TCP client only). 64 * . SOCKET_TX_ACCEPT : request to accept one connection request (TCP server only). 65 65 * . SOCKET_TX_SEND : local (UDP/TCP) request to send data to a remote (UDP/TCP). 66 66 * . SOCKET_TX_CLOSE : local TCP socket request remote TCP socket to close connection. … … 69 69 * reset the "tx_error" field, and registers itself in the "tx_client" field. 70 70 * Then, it unblocks the TX server thread from the BLOCKED_CLIENT condition, blocks itself 71 * on the BLOCKED_IO condition, and deschedules. For a SEND, the "tx_buf" kernel buffer72 * is dynamicaly allocated by the client thread, that copies the payload from the user73 * buffer to this kernel buffer,that is used as retransmission buffer, when required.71 * on the BLOCKED_IO condition, and deschedules. For a SEND, the client thread copies 72 * the payload contained in the "u_buf" user buffer to the socket "tx_buf" kernel buffer 73 * that is used as retransmission buffer, when required. 74 74 * - A command is valid for the TX server when the socket descriptor "tx_valid" is true. 75 * For a SEND command, the "tx_valid" is reset by the NIC_TX server when the last byte has76 * b een sent, but the TX client thread is unblocked by the NIC_RX server thread only when77 * the last byte has been acknowledged, or to report an error.75 * For a SEND command, the "tx_valid" is reset by the NIC_TX server thread when the last 76 * byte has been sent, but the TX client thread is unblocked by the NIC_RX server thread 77 * only when the last byte has been acknowledged, or to report an error. 78 78 * For the CONNECT, ACCEPT and CLOSE commands, the "tx_valid" is reset by the NIC_TX server 79 79 * when the first segment of the handshake has been sent, but the TX client thread is … … 88 88 * When "tx_valid" or "r2t_valid" are true, the TX server thread build and send an UDP 89 89 * packet or TCP segment. A single SEND command can require a large number of TCP 90 * segments to move a big data buffer .90 * segments to move a big data buffer, before unblocking the client thread. 91 91 * This TX server thread blocks and deschedules on the BLOCKED_ISR condition when there 92 92 * the NIC_RX queue is full . It is unblocked by the hardware NIC_TX_ISR. 93 * - In order to detect and report error for multiple simultaneous TX accesses to the same94 * socket, the clientthread makes a double check before posting a new TX command :93 * - As multiple simultaneous TX accesses to the same socket are forbiden, the client 94 * thread makes a double check before posting a new TX command : 95 95 * the "tx_valid" field must be false, and the "tx_client" field must be XPTR_NULL. 96 96 * The "tx_valid" field is reset by the TX server thread, and the "tx_client" … … 136 136 * 3) R2T queue 137 137 * 138 * To implement the TCP "3 steps handshake" protocol for connection or to send RST, 139 * the RX server thread can directly request the associated TX server thread to send 140 * control packets in the TX stream, using a dedicate R2T (RX to TX) FIFO stored in 141 * the socket descriptor. Each R2T request occupy one byte in this R2T queue. 138 * The RX server thread can directly request the associated TX server thread to send 139 * control packets in the TX stream, using a dedicate R2T (RX to TX) queue embedded in 140 * the socket descriptor, and implemented as a remote_buf_t FIFO. 141 * It is used for TCP acknowledge and for the TCP three-steps handshake. 142 * Each R2T request occupy exactly one single byte defining the TCP flags to be set. 142 143 * 143 144 * 4) CRQ queue 144 145 * 145 146 * The remote CONNECT requests received by a TCP socket (SYN segments) are stored in a 146 * dedicated CRQ FIFO stored in the local socket descriptor. These requests are consumed147 * by the local client thread executing an ACCEPT.148 * Each CRQ request occupy sizeof(connect_request_t) bytes in this CRQqueue.147 * dedicated CRQ queue, and consumed by the local client thread executing an ACCEPT. 148 * This CRQ queue is embedded in the local socket descriptor, and implemented as a 149 * remote_buf_t FIFO. Each request occupy sizeof(connect_request_t) bytes in the queue. 149 150 * The connect_request_t structure containing the request arguments is defined below. 150 151 * … … 171 172 * This enum defines the set of command status that can be returned by the NIC_RX and 172 173 * NIC_TX server threads to the TX & RX client threads. 173 * The success must be signaled by the null value / the various failure cases are174 * signaled by a non-null value.175 174 ****************************************************************************************/ 176 175 typedef enum socket_cmd_sts_e … … 217 216 tcp_socket_state_t; 218 217 219 /**************************************************************************************** *218 /**************************************************************************************** 220 219 * This structure defines one connection request, registered in the CRQ queue. 221 *************************************************************************************** */220 ***************************************************************************************/ 222 221 typedef struct connect_request_s 223 222 { … … 229 228 connect_request_t; 230 229 231 /**************************************************************************************** *230 /**************************************************************************************** 232 231 * This structure defines the socket descriptor. 233 *************************************************************************************** */232 ***************************************************************************************/ 234 233 typedef struct socket_s 235 234 { … … 253 252 uint8_t * tx_buf; /*! pointer on TX data buffer in kernel space */ 254 253 uint32_t tx_len; /*! number of data bytes for a SEND command */ 255 uint32_t tx_todo; /*! number of bytes not yet sent 256 xlist_entry_t tx_temp; /*! temporary list of sockets (root in TX chdev)*/254 uint32_t tx_todo; /*! number of bytes not yet sent in tx_buf */ 255 uint32_t tx_ack; /*! number of bytes acknowledged in tx_buf */ 257 256 258 257 xlist_entry_t rx_list; /*! all sockets attached to same NIC_RX channel */ … … 271 270 uint32_t tx_wnd; /*! number of acceptable bytes in TX_data stream */ 272 271 uint32_t tx_una; /*! first unack byte in TX_data stream */ 272 273 273 uint32_t rx_nxt; /*! next expected byte in RX_data stream */ 274 274 uint32_t rx_wnd; /*! number of acceptable bytes in RX_data stream */ … … 319 319 320 320 /**************************************************************************************** 321 * This function is called by the dev_nic_rx_handle_tcp() function, executed by the322 * NIC_RX[channel] server thread, to register a R2T request defined by the <flags>321 * This blocking function is called by the dev_nic_rx_handle_tcp() function, executed by 322 * the NIC_RX[channel] server thread, to register a R2T request defined by the <flags> 323 323 * argument in the socket R2T queue, specified by the <queue_xp> argument. 324 324 * This function unblocks the NIC_TX[channel] server thread, identified by the <channel> 325 325 * argumentfrom the THREAD_BLOCKED_CLIENT condition. 326 * 327 * WARNING : It contains a waiting loop and return only when an empty slot has been 328 * found in the R2T queue. 326 329 **************************************************************************************** 327 330 * @ queue_xp : [in] extended pointer on the R2T qeue descriptor. … … 330 333 ***************************************************************************************/ 331 334 void socket_put_r2t_request( xptr_t queue_xp, 332 uint 32_tflags,335 uint8_t flags, 333 336 uint32_t channel ); 337 338 /**************************************************************************************** 339 * This function is called by the nic_tx_server thread to extract an R2T request 340 * (one byte) from a R2T queue, specified by the <queue_xp> argument, to the buffer 341 * defined by the <flags> argument. 342 ***************************************************************************************** 343 * @ queue_xp : [in] extended pointer on the CRQ queue descriptor. 344 * @ flags : [out] buffer for TCP flags to be set. 345 * @ return 0 if success / return -1 if queue empty. 346 ***************************************************************************************/ 347 error_t socket_get_r2t_request (xptr_t queue_xp, 348 uint8_t * flags ); 334 349 335 350 /**************************************************************************************** … … 339 354 * by the <queue_xp> argument. 340 355 **************************************************************************************** 341 * @ queue_xp : [in] extended pointer on the CRQ q eue descriptor.356 * @ queue_xp : [in] extended pointer on the CRQ queue descriptor. 342 357 * @ remote_addr : [in] remote socket IP address. 343 358 * @ remote_port : [in] remote socket port. … … 374 389 **************************************************************************************** 375 390 * @ socket_xp : [in] extended pointer on socket descriptor. 376 $ @ string : [in] name of calling function. 391 * @ func_str : [in] name of calling function. 392 * @ string : [in] string defining the calling context (can be NULL) 377 393 ***************************************************************************************/ 378 394 void socket_display( xptr_t socket_xp, 379 const char * func_str ); 395 const char * func_str, 396 const char * string ); 380 397 381 398 … … 464 481 * This blocking function contains two blocking conditions because it requests services 465 482 * to both the NIC_RX server thread, and he NIC_TX server thread. 466 * It can be splitin five steps:483 * It is structured in five steps: 467 484 * 1) It makes several checkings on the listening socket domain, type, and state. 468 485 * 2) If the socket CRQ queue is empty, the function makes an SOCKET_RX_ACCEPT command … … 529 546 * arguments, to a connected (TCP or UDP) socket, identified by the <fdid> argument. 530 547 * The work is actually done by the NIC_TX server thread, and the synchronisation 548 * between the client and the server threads uses the "tx_valid" set/reset flip-flop: 549 * The client thread registers itself in the socket descriptor, registers in the queue 550 * rooted in the NIC_TX[index] chdev, set "tx_valid", unblocks the server thread, and 551 * finally blocks on THREAD_BLOCKED_IO, and deschedules. 552 * When the TX server thread completes the command (all data has been sent for an UDP 553 * socket, or acknowledged for a TCP socket), the server thread reset "rx_valid" and 554 * unblocks the client thread. 555 * This function can be called by a thread running in any cluster. 556 * WARNING : This implementation does not support several concurent SEND commands 557 * on the same socket, as only one TX thread can register in a given socket. 558 **************************************************************************************** 559 * @ fdid : [in] file descriptor index identifying the socket. 560 * @ u_buf : [in] pointer on buffer containing packet in user space. 561 * @ length : [in] packet size in bytes. 562 * @ return number of sent bytes if success / return -1 if failure. 563 ***************************************************************************************/ 564 int socket_send( uint32_t fdid, 565 uint8_t * u_buf, 566 uint32_t length ); 567 568 /**************************************************************************************** 569 * This blocking function implements the recv() syscall. 570 * It is used to receive data that has been stored by the NIC_RX server thread in the 571 * rx_buf of a connected socket, identified by the <fdid> argument. 572 * The synchronisation between the client and the server threads uses the "rx_valid" 573 * set/reset flip-flop: If "rx_valid" is set, the client simply moves the available 574 * data from the "rx_buf" to the user buffer identified by the <u_buf> and <length> 575 * arguments, and reset the "rx_valid" flip_flop. If "rx_valid" is not set, the client 576 * thread register itself in the socket descriptor, registers in the clients queue rooted 577 * in the NIC_RX[index] chdev, and finally blocks on THREAD_BLOCKED_IO, and deschedules. 578 * The client thread is re-activated by the RX server, that set the "rx_valid" flip-flop 579 * as soon as data is available in the "rx_buf". The number of bytes actually transfered 580 * can be less than the user buffer size. 581 * This function can be called by a thread running in any cluster. 582 * WARNING : This implementation does not support several concurent RECV 583 * commands on the same socket, as only one RX thread can register in a given socket. 584 **************************************************************************************** 585 * @ fdid : [in] file descriptor index identifying the local socket. 586 * @ u_buf : [in] pointer on buffer in user space. 587 * @ length : [in] buffer size in bytes. 588 * @ return number of received bytes if success / return -1 if failure. 589 ***************************************************************************************/ 590 int socket_recv( uint32_t fdid, 591 uint8_t * u_buf, 592 uint32_t length ); 593 594 /**************************************************************************************** 595 * This blocking function implements the sendto() syscall. 596 * It is used to send data stored in the user buffer, identified the <u_buf> and <length> 597 * to a remote process identified by the <remote_ip> and <remote_port> arguments, 598 * through a local, unconnected (UDP) socket, identified by the <fdid> argument. 599 * The work is actually done by the NIC_TX server thread, and the synchronisation 531 600 * between the client and the server threads uses the "rx_valid" set/reset flip-flop: 532 601 * The client thread registers itself in the socket descriptor, registers in the queue … … 539 608 * WARNING : This implementation does not support several concurent SEND/SENDTO commands 540 609 * on the same socket, as only one TX thread can register in a given socket. 541 **************************************************************************************** 542 * @ fdid : [in] file descriptor index identifying the socket. 543 * @ u_buf : [in] pointer on buffer containing packet in user space. 544 * @ length : [in] packet size in bytes. 610 * TODO : this function is not implemented yet. 611 **************************************************************************************** 612 * @ fdid : [in] file descriptor index identifying the local socket. 613 * @ u_buf : [in] pointer on buffer containing packet in user space. 614 * @ length : [in] packet size in bytes. 615 * @ remote_ip : [in] remote socket IP address. 616 * @ remote_port : [in] remote socket port address. 545 617 * @ return number of sent bytes if success / return -1 if failure. 546 618 ***************************************************************************************/ 547 int socket_send( uint32_t fdid, 548 uint8_t * u_buf, 549 uint32_t length ); 550 551 /**************************************************************************************** 552 * This blocking function implements the recv() syscall. 619 int socket_sendto( uint32_t fdid, 620 uint8_t * u_buf, 621 uint32_t length, 622 uint32_t remote_ip, 623 uint16_t remote_port ); 624 625 /**************************************************************************************** 626 * This blocking function implements the recvfrom() syscall. 553 627 * It is used to receive data that has been stored by the NIC_RX server thread in the 554 * rx_buf of a connected (TCP or UDP) socket, identified by the <fdid> argument. 628 * rx_buf of a non connected socket, identified by the <fdid> argument, from a 629 * remote process identified by the <remote_ip> and <remote_port> arguments. 555 630 * The synchronisation between the client and the server threads uses the "rx_valid" 556 631 * set/reset flip-flop: If "rx_valid" is set, the client simply moves the available … … 565 640 * WARNING : This implementation does not support several concurent RECV/RECVFROM 566 641 * commands on the same socket, as only one RX thread can register in a given socket. 567 **************************************************************************************** 568 * @ fdid : [in] file descriptor index identifying the socket. 569 * @ u_buf : [in] pointer on buffer in user space. 570 * @ length : [in] buffer size in bytes. 642 * TODO : this function is not implemented yet. 643 **************************************************************************************** 644 * @ fdid : [in] file descriptor index identifying the local socket. 645 * @ u_buf : [in] pointer on buffer in user space. 646 * @ length : [in] buffer size in bytes. 647 * @ remote_ip : [in] remote socket IP address. 648 * @ remote_port : [in] remote socket port address. 571 649 * @ return number of received bytes if success / return -1 if failure. 572 650 ***************************************************************************************/ 573 int socket_recv( uint32_t fdid, 574 uint8_t * u_buf, 575 uint32_t length ); 651 int socket_recvfrom( uint32_t fdid, 652 uint8_t * u_buf, 653 uint32_t length, 654 uint32_t remote_ip, 655 uint16_t remote_port ); 576 656 577 657 /**************************************************************************************** -
trunk/kernel/kern/pipe.c
r669 r683 2 2 * pipe.c - single writer, single reader pipe implementation 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019,2020)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 32 32 uint32_t size ) 33 33 { 34 kmem_req_t req;35 34 remote_buf_t * buf; 36 35 pipe_t * pipe; … … 55 54 56 55 // 3. allocate memory for pipe descriptor 57 req.type = KMEM_KCM; 58 req.order = bits_log2( sizeof(pipe_t) ); 59 req.flags = AF_ZERO; 60 pipe = kmem_remote_alloc( cxy , &req ); 56 pipe = kmem_remote_alloc( cxy , bits_log2(sizeof(pipe_t)) , AF_ZERO ); 61 57 62 58 if( pipe == NULL ) … … 76 72 void pipe_destroy( xptr_t pipe_xp ) 77 73 { 78 kmem_req_t req;79 80 74 pipe_t * pipe_ptr = GET_PTR( pipe_xp ); 81 75 cxy_t pipe_cxy = GET_CXY( pipe_xp ); … … 88 82 89 83 // release pipe descriptor 90 req.type = KMEM_KCM; 91 req.ptr = pipe_ptr; 92 kmem_remote_free( pipe_cxy , &req ); 84 kmem_remote_free( pipe_cxy , pipe_ptr , bits_log2(sizeof(pipe_t)) ); 93 85 94 86 } // end pipe_destroy() 95 96 87 97 88 ////////////////////////////////////////// -
trunk/kernel/kern/printk.c
r669 r683 41 41 ////////////////////////////////////////////////////////////////////////////////////// 42 42 // This static function is called by printk(), nolock_printk(), and snprintk(), 43 // functions to build a string from a printf-like format, and stores it 44 // in the buffer defined by the <string> and <length> arguments. 45 // It does NOT add a terminating NUL character in the <string> buffer. 46 // If success, it returns the number of bytes actually copied in the string buffer. 43 // functions to build a string from a printf-like <format>, and stores it 44 // in the buffer defined by the <string> and <size> arguments. 45 // The <format> itself is supposed to be a NUL terminated string. The <string> 46 // buffer <size> must be large enough to contains also the NUL terminating character. 47 // If success, it returns the number of bytes actually copied in the <string> buffer, 48 // but this length does NOT include the terminating NUL character. 47 49 // It returns -2 in case of illegal format, it returns -1 if the formated string 48 // exceeds the lengthargument.50 // exceeds the <size> argument. 49 51 ////////////////////////////////////////////////////////////////////////////////////// 50 52 // @ string : buffer allocated by caller. 51 // @ length: buffer size in bytes53 // @ size : buffer size in bytes 52 54 // @ format : printf like format. 53 55 // @ args : va_list of arguments. … … 55 57 ////////////////////////////////////////////////////////////////////////////////////// 56 58 static int32_t format_to_string( char * string, 57 uint32_t length,59 uint32_t size, 58 60 const char * format, 59 61 va_list * args ) 60 62 { 61 63 62 #define TO_STRING(x) do { string[ps] = (x); ps++; if(ps== length) return -1; } while(0);64 #define TO_STRING(x) do { string[ps] = (x); ps++; if(ps==size) return -1; } while(0); 63 65 64 66 uint32_t ps = 0; // index in string buffer … … 74 76 goto format_to_string_arguments; 75 77 } 76 else // copy one char to string78 else // copy one char of format to string 77 79 { 78 80 TO_STRING( *format ); … … 81 83 } 82 84 83 TO_STRING( 0 ); 84 return ps;85 TO_STRING( 0 ); // NUL character written in buffer 86 return (ps - 1); // but not counted in length 85 87 86 88 format_to_string_arguments: … … 95 97 switch (*format) 96 98 { 97 case ('c'): // char conversion 98 { 99 int val = va_arg( *args , int ); 100 buf[0] = (char)val; 99 case ('c'): // one printable character 100 { 101 buf[0] = (char)va_arg( *args , uint32_t ); 101 102 pbuf = buf; 102 103 len = 1; 103 104 break; 104 105 } 105 case ('d'): // up to 10 digits decimal signed integer 106 case ('b'): // one ASCII code value (2 hexadecimal digits) 107 { 108 uint8_t val = (uint8_t)va_arg( *args , uint32_t ); 109 buf[1] = HexaTab[val & 0xF]; 110 buf[0] = HexaTab[(val >> 4) & 0xF]; 111 pbuf = buf; 112 len = 2; 113 break; 114 } 115 case ('d'): // one int32_t (up to 10 decimal digits after sign) 106 116 { 107 117 int32_t val = va_arg( *args , int32_t ); … … 120 130 break; 121 131 } 122 case ('u'): // up to 10 digits decimal unsigned integer132 case ('u'): // one uint32_t (up to 10 decimal digits) 123 133 { 124 134 uint32_t val = va_arg( *args , uint32_t ); … … 132 142 break; 133 143 } 134 case ('x'): // up to 8 digits hexad after "0x"135 case ('X'): // exactly 8 digits hexa after "0x" 144 case ('x'): // one uint32_t (up to 8 hexa digits after "0x") 145 136 146 { 137 147 uint32_t val = va_arg( *args , uint32_t ); … … 141 151 { 142 152 buf[7 - i] = HexaTab[val & 0xF]; 143 if( (*format == 'x') && ((val >> 4) == 0) ) break;144 153 val = val >> 4; 154 if(val == 0) break; 145 155 } 146 156 len = i + 1; … … 148 158 break; 149 159 } 150 case ('l'): // up to 16 digits hexa after "0x" 151 case ('L'): // exactly 16 digits hexa after "0x" 160 case ('X'): // one uint32_t (exactly 8 hexa digits after "0x") 161 { 162 uint32_t val = va_arg( *args , uint32_t ); 163 TO_STRING( '0' ); 164 TO_STRING( 'x' ); 165 for(i = 0 ; i < 8 ; i++) 166 { 167 buf[7 - i] = (val != 0) ? HexaTab[val & 0xF] : '0'; 168 val = val >> 4; 169 } 170 len = 8; 171 pbuf = &buf[0]; 172 break; 173 } 174 case ('l'): // one uint64_t (up to 16 digits hexa after "0x") 152 175 { 153 176 uint64_t val = (((uint64_t)va_arg( *args, uint32_t)) << 32) | … … 158 181 { 159 182 buf[15 - i] = HexaTab[val & 0xF]; 160 if( (*format == 'l') && ((val >> 4) == 0) ) break;161 183 val = val >> 4; 184 if( val == 0) break; 162 185 } 163 186 len = i + 1; … … 165 188 break; 166 189 } 167 case ('s'): /* string */ 190 case ('L'): // one uint64_t (exactly 16 digits hexa after "0x") 191 { 192 uint64_t val = (((uint64_t)va_arg( *args, uint32_t)) << 32) | 193 ((uint64_t)va_arg( *args, uint32_t)); 194 TO_STRING( '0' ); 195 TO_STRING( 'x' ); 196 for(i = 0 ; i < 16 ; i++) 197 { 198 buf[15 - i] = (val != 0) ? HexaTab[val & 0xF] : '0'; 199 val = val >> 4; 200 } 201 len = 16; 202 pbuf = &buf[0]; 203 break; 204 } 205 case ('s'): /* one characters string */ 168 206 { 169 207 char* str = va_arg( *args , char* ); … … 213 251 // build a string from format 214 252 length = format_to_string( buffer, 215 CONFIG_PRINTK_BUF_SIZE,216 format,217 &args );253 CONFIG_PRINTK_BUF_SIZE, 254 format, 255 &args ); 218 256 va_end( args ); 219 257 … … 258 296 // build a string from format 259 297 length = format_to_string( buffer, 260 CONFIG_PRINTK_BUF_SIZE,261 format,262 &args );298 CONFIG_PRINTK_BUF_SIZE, 299 format, 300 &args ); 263 301 va_end( args ); 264 302 … … 315 353 if( length > 0 ) // display panic message on TXT0, including formated string 316 354 { 317 printk("\n[ASSERT] in %s / core[%x,%d] / thread[%x,%x] / cycle %d\n %s\n",355 printk("\n[ASSERT] in %s / core[%x,%d] / thread[%x,%x] / cycle %d\n <%s>\n", 318 356 func_name, local_cxy, lid, pid, trdid, cycle, buffer ); 319 357 } … … 332 370 { 333 371 va_list args; 334 int32_t string_length;372 int32_t length; 335 373 336 374 // build args va_list … … 338 376 339 377 // build a string from format 340 string_length = format_to_string( buffer , size , format , &args ); 378 length = format_to_string( buffer , size , format , &args ); 379 380 // release args list 341 381 va_end( args ); 342 382 343 if( (string_length < 0) || (string_length == (int32_t)size) ) // failure 344 { 345 return -1; 346 } 347 else // success 348 { 349 // add NUL character 350 buffer[string_length] = 0; 351 352 return string_length; 353 } 383 if( length < 0 ) return -1; 384 else return length; 385 354 386 } // end snprintk() 355 387 -
trunk/kernel/kern/printk.h
r669 r683 24 24 /////////////////////////////////////////////////////////////////////////////////// 25 25 // The printk.c and printk.h files define the functions used by the kernel 26 // to display messages on the kernel terminal TXT0, using a busy waiting policy.27 // I t calls synchronously the TXT0 driver, without descheduling.26 // to build, or display on terminal TXT0, formated strings. 27 // In case ofdisplay, it calls synchronously the TXT0 driver, without descheduling. 28 28 // 29 // For the formated string, the supported formats are defined below :29 // The supported formats are defined below : 30 30 // %c : single ascii character (8 bits) 31 // %b : exactly 2 hexadecimal digits (8 bits) 31 32 // %d : up to 10 digits decimal integer (32 bits) 32 33 // %u : up to 10 digits unsigned decimal (32 bits) … … 47 48 48 49 /********************************************************************************** 49 * These debugfunctions display a formated string defined by the <format,...>50 * These functions display a formated string defined by the <format,...> 50 51 * argument on the kernel terminal TXT0, with or without taking the TXT0 lock. 51 52 ********************************************************************************** … … 64 65 65 66 /********************************************************************************** 66 * This debug function displays a[ASSERT] message on kernel TXT0 terminal67 * This function displays an [ASSERT] message on kernel TXT0 terminal 67 68 * if Boolean expression <expr> is false. It prints a detailed message including: 68 69 * - the calling core [cxy,lpid] … … 83 84 * This function build a formated string in a buffer defined by the <buffer> 84 85 * and <buf_size> arguments, from the format defined by the <format,...> argument. 85 * This function set the NUL terminating character in target <buffer>. 86 * This function set the NUL terminating character in target <buffer>, 87 * but the returned length does not include this NUL character. 86 88 ********************************************************************************** 87 89 * @ buffer : pointer on target buffer (allocated by caller). … … 142 144 * @ string : buffer name or identifier. 143 145 * @ buffer : local pointer on bytes array. 144 * @ size : number of bytes bytesto display.146 * @ size : number of bytes to display. 145 147 *********************************************************************************/ 146 148 void putb( char * string, -
trunk/kernel/kern/process.c
r669 r683 2 2 * process.c - process related functions definition. 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012)4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019,2020)6 * Alain Greiner (2016,2017,2018,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 70 70 ////////////////////////////////////////////////////////////////////////////////////////// 71 71 72 /////////////////////////////////73 process_t * process_alloc( void )74 {75 76 assert( __FUNCTION__, (sizeof(process_t) < CONFIG_PPM_PAGE_SIZE),77 "process descriptor exceeds 1 page" );78 79 kmem_req_t req;80 81 req.type = KMEM_PPM;82 req.order = 0;83 req.flags = AF_KERNEL | AF_ZERO;84 return kmem_alloc( &req );85 }86 87 ////////////////////////////////////////88 void process_free( process_t * process )89 {90 kmem_req_t req;91 92 req.type = KMEM_PPM;93 req.ptr = process;94 kmem_free( &req );95 }96 97 72 //////////////////////////////////////////////////// 98 73 error_t process_reference_init( process_t * process, … … 116 91 vmm_t * vmm; 117 92 118 // build extended pointer on this reference process 93 #if DEBUG_PROCESS_REFERENCE_INIT || DEBUG_PROCESS_ERROR 94 thread_t * this = CURRENT_THREAD; 95 uint32_t cycle = (uint32_t)hal_get_cycles(); 96 #endif 97 98 // build extended pointer on reference process 119 99 process_xp = XPTR( local_cxy , process ); 120 100 … … 130 110 131 111 #if DEBUG_PROCESS_REFERENCE_INIT 132 thread_t * this = CURRENT_THREAD;133 uint32_t cycle = (uint32_t)hal_get_cycles();134 112 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 135 113 printk("\n[%s] thread[%x,%x] enter to initialize process %x / cycle %d\n", … … 156 134 if( error ) 157 135 { 158 printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ ); 136 137 #if DEBUG_PROCESS_ERROR 138 printk("\n[ERROR] in %s : thread[%x,%x] cannot create empty GPT / cycle %d\n", 139 __FUNCTION__, this->process->pid, this->trdid, cycle ); 140 #endif 159 141 return -1; 160 142 } … … 173 155 if( error ) 174 156 { 157 158 #if DEBUG_PROCESS_ERROR 159 printk("\n[ERROR] in %s : thread[%x,%x] cannot register kernel vsegs in VMM / cycle %d\n", 160 __FUNCTION__, this->process->pid, this->trdid, cycle ); 161 #endif 175 162 printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ ); 176 163 return -1; … … 233 220 if( error ) 234 221 { 235 printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ ); 222 223 #if DEBUG_PROCESS_ERROR 224 printk("\n[ERROR] in %s : thread[%x,%x] cannot open stdin pseudo file / cycle %d\n", 225 __FUNCTION__, this->process->pid, this->trdid, cycle ); 226 #endif 236 227 return -1; 237 228 } … … 256 247 if( error ) 257 248 { 258 printk("\n[ERROR] in %s : cannot open stdout pseudo-file\n", __FUNCTION__ ); 249 250 #if DEBUG_PROCESS_ERROR 251 printk("\n[ERROR] in %s : thread[%x,%x] cannot open stdout pseudo file / cycle %d\n", 252 __FUNCTION__, this->process->pid, this->trdid, cycle ); 253 #endif 259 254 return -1; 260 255 } … … 279 274 if( error ) 280 275 { 281 printk("\n[ERROR] in %s : cannot open stderr pseudo-file\n", __FUNCTION__ ); 276 277 #if DEBUG_PROCESS_ERROR 278 printk("\n[ERROR] in %s : thread[%x,%x] cannot open stderr pseudo file / cycle %d\n", 279 __FUNCTION__, this->process->pid, this->trdid, cycle ); 280 #endif 282 281 return -1; 283 282 } … … 302 301 303 302 // recreate all open files from parent process fd_array to child process fd_array 304 process_fd_replicate( process_xp , parent_xp ); 303 error = process_fd_replicate( process_xp , parent_xp ); 304 305 if( error ) 306 { 307 308 #if DEBUG_PROCESS_ERROR 309 printk("\n[ERROR] in %s : thread[%x,%x] cannot replicate fd_array / cycle %d\n", 310 __FUNCTION__, this->process->pid, this->trdid, cycle ); 311 #endif 312 return -1; 313 } 314 305 315 } 306 316 … … 379 389 vmm_t * vmm; 380 390 391 #if DEBUG_PROCESS_COPY_INIT || DEBUG_PROCESS_ERROR 392 thread_t * this = CURRENT_THREAD; 393 uint32_t cycle = (uint32_t)hal_get_cycles(); 394 #endif 395 381 396 // get reference process cluster and local pointer 382 397 cxy_t ref_cxy = GET_CXY( reference_process_xp ); … … 394 409 395 410 #if DEBUG_PROCESS_COPY_INIT 396 thread_t * this = CURRENT_THREAD;397 uint32_t cycle = (uint32_t)hal_get_cycles();398 411 if( DEBUG_PROCESS_COPY_INIT < cycle ) 399 412 printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", … … 410 423 // create an empty GPT as required by the architecture 411 424 error = hal_gpt_create( &vmm->gpt ); 425 412 426 if( error ) 413 427 { 414 printk("\n[ERROR] in %s : cannot create empty GPT\n", __FUNCTION__ ); 428 429 #if DEBUG_PROCESS_ERROR 430 printk("\n[ERROR] in %s : thread[%x,%x] cannot create empty GPT / cycle %d\n", 431 __FUNCTION__, this->process->pid, this->trdid, cycle ); 432 #endif 415 433 return -1; 416 434 } … … 421 439 // register kernel vsegs in VMM as required by the architecture 422 440 error = hal_vmm_kernel_update( local_process ); 441 423 442 if( error ) 424 443 { 425 printk("\n[ERROR] in %s : cannot register kernel vsegs in VMM\n", __FUNCTION__ ); 444 445 #if DEBUG_PROCESS_ERROR 446 printk("\n[ERROR] in %s : thread[%x,%x] cannot register kernel vsegs in VMM / cycle %d\n", 447 __FUNCTION__, this->process->pid, this->trdid, cycle ); 448 #endif 426 449 return -1; 427 450 } … … 431 454 // initialize locks protecting GPT and VSL 432 455 error = vmm_user_init( local_process ); 456 433 457 if( error ) 434 458 { 435 printk("\n[ERROR] in %s : cannot register user vsegs in VMM\n", __FUNCTION__ ); 459 460 #if DEBUG_PROCESS_ERROR 461 printk("\n[ERROR] in %s : thread[%x,%x] cannot register user vsegs in VMM / cycle %d\n", 462 __FUNCTION__, this->process->pid, this->trdid, cycle ); 463 #endif 436 464 return -1; 437 465 } … … 598 626 599 627 // release memory allocated to process descriptor 600 process_free( process);628 kmem_free( process , bits_log2(sizeof(process_t)) ); 601 629 602 630 #if DEBUG_PROCESS_DESTROY … … 974 1002 { 975 1003 error_t error; 976 process_t * process _ptr;// local pointer on process1004 process_t * process; // local pointer on process 977 1005 xptr_t process_xp; // extended pointer on process 978 1006 1007 #if DEBUG_PROCESS_GET_LOCAL_COPY || DEBUG_PROCESS_ERROR 1008 thread_t * this = CURRENT_THREAD; 1009 uint32_t cycle = (uint32_t)hal_get_cycles(); 1010 #endif 1011 979 1012 cluster_t * cluster = LOCAL_CLUSTER; 980 1013 981 1014 #if DEBUG_PROCESS_GET_LOCAL_COPY 982 thread_t * this = CURRENT_THREAD;983 uint32_t cycle = (uint32_t)hal_get_cycles();984 1015 if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) 985 1016 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", … … 996 1027 { 997 1028 process_xp = XLIST_ELEMENT( iter , process_t , local_list ); 998 process _ptr= GET_PTR( process_xp );999 if( process _ptr->pid == pid )1029 process = GET_PTR( process_xp ); 1030 if( process->pid == pid ) 1000 1031 { 1001 1032 found = true; … … 1017 1048 1018 1049 // allocate memory for local process descriptor 1019 process _ptr = process_alloc();1020 1021 if( process _ptr== NULL ) return NULL;1050 process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO ); 1051 1052 if( process == NULL ) return NULL; 1022 1053 1023 1054 // initialize local process descriptor copy 1024 error = process_copy_init( process_ptr , ref_xp ); 1025 1026 if( error ) return NULL; 1055 error = process_copy_init( process , ref_xp ); 1056 1057 if( error ) 1058 { 1059 1060 #if DEBUG_PROCESS_ERROR 1061 printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize local process copy / cycle %d\n", 1062 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1063 #endif 1064 return NULL; 1065 } 1027 1066 } 1028 1067 … … 1031 1070 if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) 1032 1071 printk("\n[%s] thread[%x,%x] exit in cluster %x / process %x / cycle %d\n", 1033 __FUNCTION__, this->process->pid, this->trdid, local_cxy, process _ptr, cycle );1034 #endif 1035 1036 return process _ptr;1072 __FUNCTION__, this->process->pid, this->trdid, local_cxy, process, cycle ); 1073 #endif 1074 1075 return process; 1037 1076 1038 1077 } // end process_get_local_copy() … … 1111 1150 xptr_t max_xp; // extended pointer on max field in fd_array 1112 1151 1152 #if DEBUG_PROCESS_FD_REGISTER 1153 thread_t * this = CURRENT_THREAD; 1154 uint32_t cycle = (uint32_t)hal_get_cycles(); 1155 #endif 1156 1113 1157 // get target process cluster and local pointer 1114 1158 process_t * process_ptr = GET_PTR( process_xp ); … … 1120 1164 1121 1165 #if DEBUG_PROCESS_FD_REGISTER 1122 thread_t * this = CURRENT_THREAD; 1123 uint32_t cycle = (uint32_t)hal_get_cycles(); 1124 pid_t pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) ); 1166 pid_t tgt_pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) ); 1125 1167 if( DEBUG_PROCESS_FD_REGISTER < cycle ) 1126 1168 printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", 1127 __FUNCTION__, this->process->pid, this->trdid, pid, cycle );1169 __FUNCTION__, this->process->pid, this->trdid, tgt_pid, cycle ); 1128 1170 #endif 1129 1171 … … 1168 1210 if( DEBUG_PROCESS_FD_REGISTER < cycle ) 1169 1211 printk("\n[%s] thread[%x,%x] exit for process %x / fdid %d / cycle %d\n", 1170 __FUNCTION__, this->process->pid, this->trdid, pid, id, cycle );1212 __FUNCTION__, this->process->pid, this->trdid, tgt_pid, id, cycle ); 1171 1213 #endif 1172 1214 … … 1384 1426 } // end process_fd_get_xptr_from_local() 1385 1427 1386 ///////////////////////////////////////// 1387 voidprocess_fd_replicate( xptr_t dst_xp,1388 xptr_t src_xp )1428 //////////////////////////////////////////// 1429 error_t process_fd_replicate( xptr_t dst_xp, 1430 xptr_t src_xp ) 1389 1431 { 1390 1432 uint32_t fdid; // current file descriptor index … … 1435 1477 if( error ) 1436 1478 { 1437 printk("\n[ERROR] in %s : cannot create new file\n", __FUNCTION__ ); 1438 return; 1479 1480 #if DEBUG_PROCESS_ERROR 1481 thread_t * this = CURRENT_THREAD; 1482 uint32_t cycle = (uint32_t)hal_get_cycles(); 1483 printk("\n[ERROR] in %s : thread[%x,%x] cannot create file descriptor / cycle %d\n", 1484 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1485 #endif 1486 return -1; 1439 1487 } 1440 1488 … … 1446 1494 // release lock on source process fd_array 1447 1495 remote_queuelock_release( src_lock_xp ); 1496 1497 return 0; 1448 1498 1449 1499 } // end process_fd_replicate() … … 1494 1544 uint32_t max = hal_remote_l32( XPTR( owner_cxy , &owner_ptr->fd_array.max )); 1495 1545 1496 printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n", 1546 // get pointers on TXT0 chdev 1547 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 1548 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 1549 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 1550 1551 // get extended pointer on remote TXT0 lock 1552 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 1553 1554 // get TXT0 lock 1555 remote_busylock_acquire( lock_xp ); 1556 1557 nolock_printk("\n***** fd_array for pid %x in cluster %x / max %d *****\n", 1497 1558 pid, process_cxy, max ); 1498 1559 … … 1520 1581 1521 1582 // display relevant file descriptor info 1522 printk(" - %d : type %s / ptr %x (%s)\n",1583 nolock_printk(" - %d : type %s / ptr %x (%s)\n", 1523 1584 fdid, process_fd_type_str(file_type), file_ptr, name ); 1524 1585 } … … 1526 1587 { 1527 1588 // display relevant file decriptor info 1528 printk(" - %d : type %s / ptr %x\n",1589 nolock_printk(" - %d : type %s / ptr %x\n", 1529 1590 fdid , process_fd_type_str(file_type), file_ptr ); 1530 1591 } … … 1532 1593 else 1533 1594 { 1534 printk(" - %d : empty slot\n", 1535 fdid ); 1595 nolock_printk(" - %d : empty slot\n", fdid ); 1536 1596 } 1537 1597 } 1598 1599 // get TXT0 lock 1600 remote_busylock_acquire( lock_xp ); 1601 1538 1602 } // end process_fd_display() 1539 1603 … … 1548 1612 { 1549 1613 ltid_t ltid; 1614 ltid_t ltid_min; 1615 1550 1616 bool_t found = false; 1617 lpid_t lpid = LPID_FROM_PID( process->pid ); 1551 1618 1552 1619 // check arguments … … 1554 1621 assert( __FUNCTION__, (thread != NULL) , "thread argument is NULL" ); 1555 1622 1556 // get the lock protecting th_tbl for all threads 1557 // but the idle thread executing kernel_init (cannot yield) 1623 // get the lock protecting th_tbl for all threads but the idle thread 1558 1624 if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock ); 1559 1625 1626 // compute ltid_min : 0 for an user thread / 1 for a kernel thread 1627 ltid_min = (lpid == 0) ? 1 : 0; 1628 1560 1629 // scan th_tbl 1561 for( ltid = 0; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ )1630 for( ltid = ltid_min ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ ) 1562 1631 { 1563 1632 if( process->th_tbl[ltid] == NULL ) … … 1581 1650 if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock ); 1582 1651 1583 return (found) ? 0 : 0xFFFFFFFF;1652 return (found) ? 0 : -1; 1584 1653 1585 1654 } // end process_register_thread() … … 1647 1716 "parent process must be the reference process" ); 1648 1717 1649 #if DEBUG_PROCESS_MAKE_FORK 1650 uint32_t cycle ;1718 #if DEBUG_PROCESS_MAKE_FORK || DEBUG_PROCESS_ERROR 1719 uint32_t cycle = (uint32_t)hal_get_cycles(); 1651 1720 thread_t * this = CURRENT_THREAD; 1652 1721 trdid_t trdid = this->trdid; … … 1655 1724 1656 1725 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1657 cycle = (uint32_t)hal_get_cycles();1658 1726 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1659 1727 printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n", … … 1662 1730 1663 1731 // allocate a process descriptor 1664 process = process_alloc();1732 process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO ); 1665 1733 1666 1734 if( process == NULL ) 1667 1735 { 1668 printk("\n[ERROR] in %s : cannot get process in cluster %x\n", 1669 __FUNCTION__, local_cxy ); 1736 1737 #if DEBUG_PROCESS_ERROR 1738 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate process descriptor / cxy %x / cycle %d\n", 1739 __FUNCTION__, pid, trdid, local_cxy, cycle ); 1740 #endif 1670 1741 return -1; 1671 1742 } … … 1675 1746 if( error ) 1676 1747 { 1677 printk("\n[ERROR] in %s : cannot get PID in cluster %x\n", 1678 __FUNCTION__, local_cxy ); 1679 process_free( process ); 1748 1749 #if DEBUG_PROCESS_ERROR 1750 printk("\n[ERROR] in %s : thread[%x,%x] cannot get PID / cxy %x / cycle %d\n", 1751 __FUNCTION__, pid, trdid, local_cxy, cycle ); 1752 #endif 1753 kmem_free( process , bits_log2(sizeof(process_t)) ); 1680 1754 return -1; 1681 1755 } 1682 1756 1683 1757 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1684 cycle = (uint32_t)hal_get_cycles();1685 1758 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1686 printk("\n[%s] thread[%x,%x] allocated child_process %x / cycle %d\n",1687 __FUNCTION__, pid, trdid, new_pid , cycle);1759 printk("\n[%s] thread[%x,%x] allocated child_process %x\n", 1760 __FUNCTION__, pid, trdid, new_pid ); 1688 1761 #endif 1689 1762 … … 1694 1767 if( error ) 1695 1768 { 1696 printk("\n[ERROR] in %s : cannot initialize child process in cluster %x\n", 1697 __FUNCTION__, local_cxy ); 1698 process_free( process ); 1769 1770 #if DEBUG_PROCESS_ERROR 1771 printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize child process / cxy %x / cycle %d\n", 1772 __FUNCTION__, pid, trdid, local_cxy, cycle ); 1773 #endif 1774 cluster_pid_release( new_pid ); 1775 kmem_free( process , bits_log2(sizeof(process_t)) ); 1699 1776 return -1; 1700 1777 } 1701 1778 1702 1779 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1703 cycle = (uint32_t)hal_get_cycles();1704 1780 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1705 printk("\n[%s] thread[%x,%x] initialized child_process %x / cycle %d\n",1706 __FUNCTION__, pid, trdid, new_pid , cycle);1781 printk("\n[%s] thread[%x,%x] initialized child_process %x\n", 1782 __FUNCTION__, pid, trdid, new_pid ); 1707 1783 #endif 1708 1784 … … 1712 1788 if( error ) 1713 1789 { 1714 printk("\n[ERROR] in %s : cannot copy VMM in cluster %x\n", 1715 __FUNCTION__, local_cxy ); 1716 process_free( process ); 1790 1791 #if DEBUG_PROCESS_ERROR 1792 printk("\n[ERROR] in %s : thread[%x,%x] cannot copy VMM to child process / cxy %x / cycle %d\n", 1793 __FUNCTION__, pid, trdid, local_cxy, cycle ); 1794 #endif 1717 1795 cluster_pid_release( new_pid ); 1796 kmem_free( process , bits_log2(sizeof(process_t)) ); 1718 1797 return -1; 1719 1798 } 1720 1799 1721 1800 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1722 cycle = (uint32_t)hal_get_cycles();1723 1801 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1724 1802 { 1725 printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n",1726 __FUNCTION__, pid, trdid , cycle);1803 printk("\n[%s] thread[%x,%x] copied VMM from parent to child\n", 1804 __FUNCTION__, pid, trdid ); 1727 1805 hal_vmm_display( XPTR( local_cxy , process ) , true ); 1728 1806 } … … 1736 1814 1737 1815 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1738 cycle = (uint32_t)hal_get_cycles();1739 1816 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1740 printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n",1741 __FUNCTION__ , pid, trdid, new_pid , cycle);1817 printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership\n", 1818 __FUNCTION__ , pid, trdid, new_pid ); 1742 1819 #endif 1743 1820 … … 1753 1830 if( error ) 1754 1831 { 1755 printk("\n[ERROR] in %s : cannot create thread in cluster %x\n", 1756 __FUNCTION__, local_cxy ); 1757 process_free( process ); 1832 1833 #if DEBUG_PROCESS_ERROR 1834 printk("\n[ERROR] in %s : thread[%x,%x] cannot create main thread / cxy %x / cycle %d\n", 1835 __FUNCTION__, pid, trdid, local_cxy, cycle ); 1836 #endif 1758 1837 cluster_pid_release( new_pid ); 1838 kmem_free( process , bits_log2(sizeof(process_t)) ); 1759 1839 return -1; 1760 1840 } … … 1765 1845 1766 1846 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1767 cycle = (uint32_t)hal_get_cycles();1768 1847 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1769 printk("\n[%s] thread[%x,%x] created main thread %x / cycle %d\n",1770 __FUNCTION__, pid, trdid, thread , cycle);1848 printk("\n[%s] thread[%x,%x] created main thread %x\n", 1849 __FUNCTION__, pid, trdid, thread ); 1771 1850 #endif 1772 1851 … … 1787 1866 1788 1867 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1789 cycle = (uint32_t)hal_get_cycles();1790 1868 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1791 printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n",1792 __FUNCTION__, pid, trdid , cycle);1869 printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child\n", 1870 __FUNCTION__, pid, trdid ); 1793 1871 #endif 1794 1872 … … 1819 1897 } // end process_make_fork() 1820 1898 1821 ////////////////////////////////////////////////i////////////////////////////////////// 1822 // This static function is called by the thread_user_exec() function : 1823 // - to register the main() arguments (args) in the <exec_info> structure. 1824 // - to register the environment variables (envs) in the <exec_info> structure. 1825 // In both cases the input is an array of NULL terminated string pointers in user 1826 // space, and the strings can be dispatched anywhere in the user process space. 1827 // This array of pointers is defined by the <u_pointers> argument. The empty slots 1828 // contain the NULL value, and the N non-empty slots are indexed from 0 to (N-1). 1829 // - The max number of envs, and the max number of args are defined by the 1830 // CONFIG_PROCESS_ARGS_NR and CONFIG_PROCESS_ENVS_MAX_NR parameters. 1831 // - The numbers of pages to store the (args) and (envs) strings are defined by the 1832 // CONFIG_VMM_ENVS_SIZE and CONFIG_VMM_STACK_SIZE parameters. 1833 /////////////////////////////////////////////////////////////////////////////////////// 1834 // Implementation note: 1835 // It allocates a kernel buffer to store a kernel copy of both the array of pointers, 1836 // and the strings. It set the pointers and copies the strings in this kernel buffer. 1837 // Finally, it registers the buffer & the actual number of strings in the process 1838 // exec_info structure (defined in the <process.h> file). 1839 /////////////////////////////////////////////////////////////////////////////////////// 1840 // @ is_args : [in] true if called for (args) / false if called for (envs). 1841 // @ u_pointers : [in] array of pointers on the strings (in user space). 1842 // @ exec_info : [out] pointer on the exec_info structure. 1843 // @ return 0 if success / non-zero if too many strings or no memory. 1844 /////////////////////////////////////////////////////////////////////////////////////// 1845 error_t process_exec_get_strings( bool_t is_args, 1846 char ** u_pointers, 1847 exec_info_t * exec_info ) 1848 { 1849 uint32_t index; // slot index in pointers array 1850 uint32_t length; // string length (in bytes) 1851 uint32_t pointers_bytes; // number of bytes to store pointers 1852 uint32_t max_index; // max size of pointers array 1853 char ** k_pointers; // base of kernel array of pointers 1854 char * k_buf_ptr; // pointer on first empty slot in strings buffer 1855 uint32_t k_buf_space; // number of bytes available in string buffer 1856 kmem_req_t req; // kernel memory allocator request 1857 char * k_buf; // kernel buffer for both pointers & strings 1858 1859 #if DEBUG_PROCESS_EXEC_GET_STRINGS 1860 thread_t * this = CURRENT_THREAD; 1861 uint32_t cycle = (uint32_t)hal_get_cycles(); 1862 #endif 1863 1864 // Allocate one block of physical memory for both the pointers and the strings 1865 // as defined by the CONFIG_VMM_ARGS_SIZE and CONFIG_VMM_ENVS_SIZE parameters 1866 // - the array of pointers is stored in the first bytes of the kernel buffer 1867 // - the strings themselve are stored in the next bytes of this buffer 1868 // Set the k_pointers, k_buf_ptr, k_buf_space, and max_index 1869 1870 if( is_args ) 1871 { 1872 req.type = KMEM_PPM; 1873 req.order = bits_log2( CONFIG_VMM_ARGS_SIZE ); 1874 req.flags = AF_KERNEL | AF_ZERO; 1875 k_buf = kmem_alloc( &req ); 1876 1877 pointers_bytes = CONFIG_PROCESS_ARGS_MAX_NR * sizeof(char *); 1878 k_pointers = (char **)k_buf; 1879 k_buf_ptr = k_buf + pointers_bytes; 1880 k_buf_space = (CONFIG_VMM_ARGS_SIZE * CONFIG_PPM_PAGE_SIZE) - pointers_bytes; 1881 max_index = CONFIG_PROCESS_ARGS_MAX_NR; 1882 1883 #if DEBUG_PROCESS_EXEC_GET_STRINGS 1884 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle ) 1885 printk("\n[%s] thread[%x,%x] for args / u_buf %x / k_buf %x\n", 1886 __FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf ); 1887 #endif 1888 1889 } 1890 else 1891 { 1892 req.type = KMEM_PPM; 1893 req.order = bits_log2( CONFIG_VMM_ENVS_SIZE ); 1894 req.flags = AF_KERNEL | AF_ZERO; 1895 k_buf = kmem_alloc( &req ); 1896 1897 pointers_bytes = CONFIG_PROCESS_ENVS_MAX_NR * sizeof(char *); 1898 k_pointers = (char **)k_buf; 1899 k_buf_ptr = k_buf + pointers_bytes; 1900 k_buf_space = (CONFIG_VMM_ENVS_SIZE * CONFIG_PPM_PAGE_SIZE) - pointers_bytes; 1901 max_index = CONFIG_PROCESS_ENVS_MAX_NR; 1902 1903 #if DEBUG_PROCESS_EXEC_GET_STRINGS 1904 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle ) 1905 printk("\n[%s] thread[%x,%x] for envs / u_buf %x / k_buf %x\n", 1906 __FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf ); 1907 #endif 1908 1909 } 1910 1911 // copy the user array of pointers to kernel buffer 1912 hal_copy_from_uspace( XPTR( local_cxy , k_pointers ), 1913 u_pointers, 1914 pointers_bytes ); 1915 1916 // WARNING : the pointers copied in the k_pointers[] array are user pointers, 1917 // after the loop below, the k_pointers[] array contains kernel pointers. 1918 1919 #if DEBUG_PROCESS_EXEC_GET_STRINGS 1920 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle ) 1921 printk("\n[%s] thread[%x,%x] copied u_ptr array to k_ptr array\n" 1922 " p0 = %x / p1 = %x / p2 = %x / p3 = %x\n", 1923 __FUNCTION__, this->process->pid, this->trdid, 1924 k_pointers[0], k_pointers[1], k_pointers[2], k_pointers[3] ); 1925 #endif 1926 1927 // scan kernel array of pointers to copy strings to kernel buffer 1928 for( index = 0 ; index < max_index ; index++ ) 1929 { 1930 // exit loop if (k_pointers[] == NUll) 1931 if( k_pointers[index] == NULL ) break; 1932 1933 // compute string length 1934 length = hal_strlen_from_uspace( k_pointers[index] ) + 1; 1935 1936 // return error if overflow in kernel buffer 1937 if( length > k_buf_space ) return -1; 1938 1939 // copy the string to kernel buffer 1940 hal_copy_from_uspace( XPTR( local_cxy , k_buf_ptr ), 1941 k_pointers[index], 1942 length ); 1943 1944 #if DEBUG_PROCESS_EXEC_GET_STRINGS 1945 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle ) 1946 printk("\n[%s] thread[%x,%x] copied string[%d] <%s> to kernel buffer / length %d\n", 1947 __FUNCTION__, this->process->pid, this->trdid, index, k_buf_ptr, length ); 1948 #endif 1949 1950 // replace the user pointer by a kernel pointer in the k_pointer[] array 1951 k_pointers[index] = k_buf_ptr; 1952 1953 // increment loop variables 1954 k_buf_ptr += length; 1955 k_buf_space -= length; 1956 1957 } // end loop on index 1958 1959 // update into exec_info structure 1960 if( is_args ) 1961 { 1962 exec_info->args_pointers = k_pointers; 1963 exec_info->args_nr = index; 1964 } 1965 else 1966 { 1967 exec_info->envs_pointers = k_pointers; 1968 exec_info->envs_buf_free = k_buf_ptr; 1969 exec_info->envs_nr = index; 1970 } 1971 1972 #if DEBUG_PROCESS_EXEC_GET_STRINGS 1973 if( DEBUG_PROCESS_EXEC_GET_STRINGS < cycle ) 1974 printk("\n[%s] thread[%x,%x] copied %d strings to kernel buffer\n", 1975 __FUNCTION__, this->process->pid, this->trdid, index ); 1976 #endif 1977 1978 return 0; 1979 1980 } // end process_exec_get_strings() 1899 #if DEBUG_PROCESS_MAKE_EXEC 1900 1901 ///////////////////////////////////////////////////////////////////////////////////////// 1902 // This static debug function displays the current state of the exec_info structure 1903 // embedded in the calling process descriptor. 1904 // 1905 // WARNING : It can be used after execution of the sys_exec function, but it cannot 1906 // be used after execution of the process_make_exec() function, because the 1907 // kernel pointers have been replaced by user pointers. 1908 ///////////////////////////////////////////////////////////////////////////////////////// 1909 static void process_exec_info_display( bool_t args_ok, 1910 bool_t envs_ok ) 1911 { 1912 uint32_t i; 1913 char * str; // local pointer on a string 1914 1915 process_t * process = CURRENT_THREAD->process; 1916 1917 // get relevant info from calling process descriptor 1918 pid_t pid = process->pid; 1919 1920 uint32_t args_nr = process->exec_info.args_nr; 1921 char ** args = process->exec_info.args_pointers; 1922 1923 uint32_t envs_nr = process->exec_info.envs_nr; 1924 char ** envs = process->exec_info.envs_pointers; 1925 1926 char * path = process->exec_info.path; 1927 1928 // get pointers on TXT0 chdev 1929 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 1930 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 1931 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 1932 1933 // get extended pointer on remote TXT0 lock 1934 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 1935 1936 // get TXT0 lock 1937 remote_busylock_acquire( lock_xp ); 1938 1939 nolock_printk("\n***** exec_info for process %x in cluster %x / %s\n", 1940 pid , local_cxy , path ); 1941 1942 // display arguments if required 1943 if( args_ok ) 1944 { 1945 for( i = 0 ; i < args_nr ; i++ ) 1946 { 1947 str = args[i]; 1948 if( str != NULL) // display pointer and string 1949 nolock_printk(" - &arg[%d] = %x / arg[%d] = <%s>\n", i, str, i, str ); 1950 else // display WARNING 1951 nolock_printk(" - unexpected NULL pointer for &arg[%d]\n", i ); 1952 } 1953 } 1954 1955 // display env variables if required 1956 if( envs_ok ) 1957 { 1958 for( i = 0 ; i < envs_nr ; i++ ) 1959 { 1960 str = envs[i]; 1961 if( str != NULL) // display pointer and string 1962 nolock_printk(" - &env[%d] = %x / env[%d] = <%s>\n", i, str, i, str ); 1963 else // display WARNING 1964 nolock_printk(" - unexpected NULL pointer for &env[%d]\n", i ); 1965 } 1966 } 1967 1968 // release TXT0 lock 1969 remote_busylock_release( lock_xp ); 1970 1971 } // end process_exec_info_display() 1972 1973 #endif // DEBUG_PROCESS_MAKE_EXEC 1981 1974 1982 1975 ///////////////////////////////// … … 2003 1996 uint32_t envs_size; // envs vseg size (bytes) 2004 1997 1998 #if DEBUG_PROCESS_MAKE_EXEC || DEBUG_PROCESS_ERROR 1999 uint32_t cycle = (uint32_t)hal_get_cycles(); 2000 #endif 2001 2005 2002 // get calling thread, process, pid, trdid, and ref_xp 2006 2003 this = CURRENT_THREAD; … … 2014 2011 2015 2012 #if DEBUG_PROCESS_MAKE_EXEC 2016 uint32_t cycle = (uint32_t)hal_get_cycles();2017 2013 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 2018 2014 printk("\n[%s] thread[%x,%x] enters for <%s> / cycle %d\n", … … 2032 2028 if( error ) 2033 2029 { 2034 printk("\n[ERROR] in %s : thread[%x,%x] failed to open file <%s>\n", 2035 __FUNCTION__, pid, trdid, elf_path ); 2030 2031 #if DEBUG_PROCESS_ERROR 2032 printk("\n[ERROR] in %s : thread[%x,%x] failed to open file <%s> / cycle %d\n", 2033 __FUNCTION__, pid, trdid, elf_path, cycle ); 2034 #endif 2036 2035 return -1; 2037 2036 } … … 2064 2063 #endif 2065 2064 2066 // 4. register the "args" vseg in VSL and map it in GPT, if required 2067 // this vseg contains both the array of pointers and the strings 2065 // 4. register the "args" vseg in VSL and map it in GPT, if args_nr != 0. 2066 // As this vseg contains an array of pointers, the kernel pointers 2067 // are replaced by user pointers in new process space. 2068 2068 args_nr = process->exec_info.args_nr; 2069 2069 … … 2071 2071 { 2072 2072 // get args vseg base and size in user space 2073 args_base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_ SHIFT;2074 args_size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ SHIFT;2073 args_base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_ORDER; 2074 args_size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ORDER; 2075 2075 2076 2076 // create and register args vseg in VMM … … 2085 2085 if( vseg == NULL ) 2086 2086 { 2087 printk("\n[ERROR] in %s : thread[%x,%x] cannot get args vseg for <%s>\n", 2088 __FUNCTION__, pid, trdid, elf_path ); 2087 2088 #if DEBUG_PROCESS_ERROR 2089 printk("\n[ERROR] in %s : thread[%x,%x] cannot create args vseg for <%s> / cycle %d\n", 2090 __FUNCTION__, pid, trdid, elf_path, cycle ); 2091 #endif 2089 2092 return -1; 2090 2093 } … … 2098 2101 } 2099 2102 #endif 2100 // map all pages for th is"args" vseg2103 // map all pages for the "args" vseg 2101 2104 uint32_t fake_attr; // required for hal_gpt_lock_pte() 2102 2105 ppn_t fake_ppn; // required for hal_gpt_lock_pte() 2103 2106 2104 xptr_t gpt = XPTR( local_cxy , &process->vmm.gpt ); 2105 uint32_t attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE | GPT_USER | GPT_CACHABLE; 2106 vpn_t vpn = CONFIG_VMM_UTILS_BASE; 2107 ppn_t ppn = ((ppn_t)process->exec_info.args_pointers >> CONFIG_PPM_PAGE_SHIFT); 2107 xptr_t base_xp = XPTR( local_cxy , process->exec_info.args_pointers ); 2108 xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 2109 uint32_t attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE | GPT_USER | GPT_CACHABLE; 2110 vpn_t vpn = CONFIG_VMM_UTILS_BASE; 2111 ppn_t ppn = ppm_base2ppn( base_xp ); 2108 2112 2109 2113 for( n = 0 ; n < CONFIG_VMM_ARGS_SIZE ; n++ ) 2110 2114 { 2111 2115 // lock the PTE 2112 if (hal_gpt_lock_pte( gpt , vpn , &fake_attr , &fake_ppn ) )2116 if (hal_gpt_lock_pte( gpt_xp , vpn + n , &fake_attr , &fake_ppn ) ) 2113 2117 { 2114 printk("\n[ERROR] in %s : thread[%x,%x] cannot map args vpn %x for <%s>\n", 2115 __FUNCTION__, pid, trdid, vpn, elf_path ); 2118 2119 #if DEBUG_PROCESS_ERROR 2120 printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn[%x] of args vseg for <%s> / cycle %d\n", 2121 __FUNCTION__, pid, trdid, vpn + n , elf_path , cycle ); 2122 #endif 2116 2123 return -1; 2117 2124 } 2118 2125 2119 2126 // map and unlock the PTE 2120 hal_gpt_set_pte( gpt , vpn + n , attr , ppn + n );2121 2127 hal_gpt_set_pte( gpt_xp , vpn + n , attr , ppn + n ); 2128 } 2122 2129 2123 2130 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) … … 2127 2134 __FUNCTION__, pid, trdid ); 2128 2135 hal_vmm_display( ref_xp , true ); 2136 process_exec_info_display( true , false ); // args & not envs 2129 2137 } 2130 2138 #endif 2131 2139 2132 // set user space pointers in array of pointers 2133 char ** ptr = process->exec_info.args_pointers; 2134 2140 // build pointer on args buffer in kernel space 2141 char ** k_args = process->exec_info.args_pointers; 2142 2143 // build pointer on args buffer in user space 2144 char ** u_args = (char **)args_base; 2145 2146 // set user space pointers in kernel args buffer 2135 2147 for( n = 0 ; n < args_nr ; n++ ) 2136 2148 { 2137 ptr[n] = ptr[n] + args_base - (intptr_t)ptr;2149 k_args[n] = (char *)((intptr_t)k_args[n] + (intptr_t)u_args - (intptr_t)k_args); 2138 2150 } 2139 } 2140 2141 // 5. register the "envs" vseg in VSL and map it in GPT, if required 2142 // this vseg contains both the array of pointers and the strings 2151 2152 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 2153 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 2154 printk("\n[%s] thread[%x,%x] args user pointers set in exec_info\n", 2155 __FUNCTION__, pid, trdid ); 2156 #endif 2157 2158 } 2159 2160 // 5. register the "envs" vseg in VSL and map it in GPT, if envs_nr != 0. 2161 // As this vseg contains an array of pointers, the kernel pointers 2162 // are replaced by user pointers in new process space. 2163 2143 2164 envs_nr = process->exec_info.envs_nr; 2144 2165 … … 2146 2167 { 2147 2168 // get envs vseg base and size in user space from config 2148 envs_base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT; 2149 envs_size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; 2150 2151 // TODO (inspired from args) 2169 envs_base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_ORDER; 2170 envs_size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_ORDER; 2171 2172 // TODO (should be similar to the code for args above) 2173 2174 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 2175 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 2176 printk("\n[%s] thread[%x,%x] envs user pointers set in exec_info\n", 2177 __FUNCTION__, pid, trdid ); 2178 #endif 2179 2152 2180 } 2153 2181 … … 2156 2184 // register extended pointer on .elf file in process descriptor 2157 2185 error = elf_load_process( file_xp , process ); 2186 2158 2187 if( error ) 2159 2188 { 2160 printk("\n[ERROR] in %s : thread[%x,%x] failed to access <%s>\n", 2161 __FUNCTION__, pid, trdid, elf_path ); 2189 2190 #if DEBUG_PROCESS_ERROR 2191 printk("\n[ERROR] in %s : thread[%x,%x] failed to access file <%s> / cycle %d\n", 2192 __FUNCTION__, pid, trdid , elf_path , cycle ); 2193 #endif 2162 2194 return -1; 2163 2195 } … … 2183 2215 if( vseg == NULL ) 2184 2216 { 2185 printk("\n[ERROR] in %s : thread[%x,%x] cannot set u_stack vseg for <%s>\n", 2186 __FUNCTION__, pid, trdid, elf_path ); 2217 2218 #if DEBUG_PROCESS_ERROR 2219 printk("\n[ERROR] in %s : thread[%x,%x] failed to set u_stack vseg for <%s> / cycle %d\n", 2220 __FUNCTION__, pid, trdid , elf_path , cycle ); 2221 #endif 2187 2222 return -1; 2188 2223 } … … 2205 2240 if( error ) 2206 2241 { 2207 printk("\n[ERROR] in %s : thread[%x,%x] cannot update thread for <%s>\n", 2208 __FUNCTION__ , pid, trdid, elf_path ); 2242 2243 #if DEBUG_PROCESS_ERROR 2244 printk("\n[ERROR] in %s : thread[%x,%x] failed to set main thread for <%s> / cycle %d\n", 2245 __FUNCTION__, pid, trdid , elf_path , cycle ); 2246 #endif 2209 2247 return -1; 2210 2248 } 2211 2249 2250 // should not be reached, avoid a warning 2212 2251 return 0; 2213 2252 … … 2294 2333 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 2295 2334 printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy ); 2335 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2296 2336 #endif 2297 2337 … … 2356 2396 2357 2397 // allocates memory for process descriptor from local cluster 2358 process = process_alloc(); 2398 process = kmem_alloc( bits_log2(sizeof(process_t)) , AF_ZERO ); 2359 2399 if( process == NULL ) 2360 2400 { … … 2506 2546 } // end process_init_create() 2507 2547 2508 ///////////////////////////////////////// 2509 void process_display( xptr_t process_xp ) 2510 { 2511 process_t * process_ptr; 2512 cxy_t process_cxy; 2548 /////////////////////////////////////////////////// 2549 uint32_t process_build_string( xptr_t process_xp, 2550 char * buffer, 2551 uint32_t size ) 2552 { 2553 int32_t length; // actual length of the string 2554 2555 process_t * process_ptr; // process descriptor local pointer 2556 cxy_t process_cxy; // process descriptor cluster identifier 2513 2557 2514 2558 xptr_t parent_xp; // extended pointer on parent process 2515 process_t * parent_ptr; 2516 cxy_t parent_cxy; 2559 process_t * parent_ptr; // parent process local pointer 2560 cxy_t parent_cxy; // parent process cluster identifier 2517 2561 2518 2562 xptr_t owner_xp; // extended pointer on owner process 2519 process_t * owner_ptr; 2520 cxy_t owner_cxy; 2521 2522 pid_t pid; 2523 pid_t ppid; 2524 lpid_t lpid; 2525 uint32_t state; 2526 uint32_t th_nr; 2563 process_t * owner_ptr; // owner process local pointer 2564 cxy_t owner_cxy; // owner process cluster identifier 2565 2566 pid_t pid; // process identifier 2567 pid_t ppid; // parent process identifier 2568 lpid_t lpid; // local process identifier 2569 uint32_t state; // terminaison state 2570 uint32_t th_nr; // number of threads 2527 2571 2528 2572 xptr_t txt_file_xp; // extended pointer on TXT_RX file descriptor … … 2540 2584 char elf_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2541 2585 2586 assert( __FUNCTION__ , (size >= 80 ) , "buffer size too small" ); 2587 2542 2588 // get cluster and local pointer on process 2543 2589 process_ptr = GET_PTR( process_xp ); … … 2566 2612 if( lpid ) // user process 2567 2613 { 2568 2569 2614 // get extended pointer on file descriptor associated to TXT_RX 2570 2615 txt_file_xp = hal_remote_l64( XPTR( owner_cxy , &owner_ptr->fd_array.array[0] ) ); 2571 2616 2572 2573 2617 assert( __FUNCTION__, (txt_file_xp != XPTR_NULL) , 2618 "process must be attached to one TXT terminal" ); 2574 2619 2575 2620 // get TXT_RX chdev pointers … … 2582 2627 XPTR( txt_chdev_cxy , txt_chdev_ptr->name ) ); 2583 2628 2629 // get TXT_owner process 2584 2630 txt_owner_xp = (xptr_t)hal_remote_l64( XPTR( txt_chdev_cxy, 2585 2631 &txt_chdev_ptr->ext.txt.owner_xp ) ); 2586 2587 2632 // get process .elf name 2588 2633 elf_file_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->vfs_bin_xp ) ); … … 2594 2639 else // kernel process_zero 2595 2640 { 2596 // TXT name and .elf name are not registered in kernel process _zero2641 // TXT name and .elf name are not registered in kernel process 2597 2642 strcpy( txt_name , "txt0_rx" ); 2598 2643 txt_owner_xp = process_xp; … … 2603 2648 if( txt_owner_xp == process_xp ) 2604 2649 { 2605 nolock_printk("PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 2650 length = snprintk( buffer, size, 2651 "PID %X | %s (FG) | %X | PPID %X | TS %X | %d | %s\n", 2606 2652 pid, txt_name, process_ptr, ppid, state, th_nr, elf_name ); 2607 2653 } 2608 2654 else 2609 2655 { 2610 nolock_printk("PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 2656 length = snprintk( buffer, size, 2657 "PID %X | %s (BG) | %X | PPID %X | TS %X | %d | %s\n", 2611 2658 pid, txt_name, process_ptr, ppid, state, th_nr, elf_name ); 2612 2659 } 2660 2661 // check length 2662 if( (length < 0) ) 2663 { 2664 length = snprintk( buffer , size , 2665 "buffer too small for process %x in cluster %x", pid , process_cxy ); 2666 } 2667 2668 return length; 2669 2670 } // end process_build_string() 2671 2672 ///////////////////////////////////////// 2673 void process_display( xptr_t process_xp ) 2674 { 2675 char buffer[CONFIG_PROCESS_DISPLAY_BUF_SIZE]; 2676 2677 // build the string to be displayed 2678 process_build_string( process_xp, 2679 buffer, 2680 CONFIG_PROCESS_DISPLAY_BUF_SIZE ); 2681 // display the string 2682 nolock_puts( buffer ); 2683 2613 2684 } // end process_display() 2614 2685 -
trunk/kernel/kern/process.h
r669 r683 98 98 * This structure defines the information required by the process_make_exec() function 99 99 * to create a new reference process descriptor, and the associated main thread. 100 * All fields in this structure are filled by the sys_exec() function, using the 101 * process_exec_get_strings() function. 100 * All fields in this structure are filled by the sys_exec() function. 102 101 * 103 102 * It contains three parts: … … 106 105 * - the "envs_pointers" & "envs_nr" fields define the env variables (one env == one string). 107 106 * 108 * For both the arguments, and the environment variables, the array of pointers and the 109 * strings themselve are stored in kernel space in the same kernel buffer containing 110 * an integer number of pages, defined by CONFIG_VMM_ARGS_SIZE and CONFIG_VMM_ENVS_SIZE. 111 * This aligned kernel buffer (one or several contiguous physical pages) contains : 107 * For both the arguments and the environment variables, the array of pointers and the 108 * strings themselve are stored in the same kernel buffer. These kernel buffers contain 109 * an integer number of contiguous pages, defined by the CONFIG_VMM_ARGS_SIZE and 110 * CONFIG_VMM_ENVS_SIZE parameters respectively. 111 * Each kernel (args / envs) buffer contains : 112 112 * - in the first bytes, a fixed size kernel array of pointers on the strings. 113 113 * - in the following bytes, the strings themselves. 114 * The size of these arrays of pointers is defined by CONFIG_PROCESS_ARGS_MAX_NR 115 * and CONFIG¨PROCESS_ENVS_MAX_NR. 116 * 117 * WARNING: The "args_pointers" & "envs_pointers" kernel buffer are directly mapped to 118 * the "args" and "envs" user vsegs to be accessed by the user process. 119 * Therefore, the arrays of pointers build by the sys_exec() function contain 120 * kernel pointers, but the process_make_exec() function replace these pointers 121 * by user pointers in the new process user space. 114 * The size of these arrays of pointers is defined by the CONFIG_PROCESS_ARGS_MAX_NR and 115 * CONFIG_PROCESS_ENVS_MAX_NR parameters respectively. 116 * 117 * WARNING (1) The "args_pointers[i]" & "envs_pointers[i] stored in the dynamically 118 * allocated kernel buffers are local pointers. They must be extended by the 119 * local cluster identifier to compute a valid PPN. 120 * WARNING (2) The "args" & "envs" kernel buffers will be mapped to the "args" and "envs" 121 * user vsegs, to be accessed by the new user process. 122 * The process_make_exec() function must therefore replace the kernel pointers 123 * set by sys_exec(), by user pointers in the new process user space. 122 124 ********************************************************************************************/ 123 125 … … 232 234 * The process GPT is initialised as required by the target architecture. 233 235 * The "kcode" and "kdata" segments are registered in the process VSL. 236 * This function does not return an error code: in case of failure, it print a PANIC message 237 * on kernel terminal TXT0, and the core goes to sleep mode. 234 238 ********************************************************************************************* 235 239 * @ process : [in] pointer on process descriptor to initialize. … … 241 245 /********************************************************************************************* 242 246 * This function allocates memory and initializes the "process_init" descriptor and the 243 * associated "thread_init" descriptor. It is called once at the end of the kernel 244 * initialisation procedure. Its local process identifier is 1, and parent process 245 * is the kernel process in cluster 0. 247 * associated "thread_init" descriptor. It is called once at the end of the kernel_init() 248 * procedure. Its local process identifier is 1, and parent process is the kernel process. 246 249 * The "process_init" is the first user process, and all other user processes will be forked 247 250 * from this process. The code executed by "process_init" is stored in a .elf file, whose 248 251 * pathname is defined by the CONFIG_PROCESS_INIT_PATH configuration variable. 249 * Th e process_init does not use the [STDIN/STDOUT/STDERR] streams, but it is linked250 * to kernel TXT0, because these streams must be defined for all user processes.252 * This function does not return an error code: in case of failure, it print a PANIC message 253 * on kernel terminal TXT0, and the core goes to sleep mode. 251 254 ********************************************************************************************/ 252 255 void process_init_create( void ); … … 415 418 416 419 /********************************************************************************************* 417 * This function is called twice by the sys_exec() function :418 * - to register the main() arguments (args) in the process <exec_info> structure.419 * - to register the environment variables (envs) in the <exec_info> structure.420 * In both cases the input is an array of NULL terminated string pointers in user space,421 * identified by the <u_pointers> argument. The strings can be dispatched anywhere in422 * the calling user process space. The max number of envs, and the max number of args are423 * defined by the CONFIG_PROCESS_ARGS_NR and CONFIG_PROCESS_ENVS_MAX_NR parameters.424 *********************************************************************************************425 * Implementation Note:426 * Both the array of pointers and the strings themselve are stored in kernel space in one427 * single, dynamically allocated, kernel buffer containing an integer number of pages,428 * defined by the CONFIG_VMM_ENVS_SIZE and CONFIG_VMM_STACK_SIZE parameters.429 * This aligned kernel buffer (one or several contiguous physical pages) contains :430 * - in the first bytes a fixed size kernel array of kernel pointers on the strings.431 * - in the following bytes the strings themselves.432 * All the pointers, and the actual number of strings are stored in the process exec_info433 * structure defined in the <process.h> file.434 *********************************************************************************************435 * @ is_args : [in] true if called for (args) / false if called for (envs).436 * @ u_pointers : [in] array of pointers on the strings (in user space).437 * @ exec_info : [inout] pointer on the exec_info structure.438 * @ return 0 if success / non-zero if too many strings or no memory.439 ********************************************************************************************/440 error_t process_exec_get_strings( bool_t is_args,441 char ** u_pointers,442 exec_info_t * exec_info );443 444 /*********************************************************************************************445 420 * This function implements the "execve" system call, and is called by sys_exec() function. 446 421 * It must be called by the main thread of the calling "old" process. … … 595 570 * @ dst_xp : extended pointer on the source process descriptor (in owner cluster). 596 571 * @ src_xp : extended pointer on the destination process descriptor (in owner cluster). 597 ********************************************************************************************/ 598 void process_fd_replicate( xptr_t dst_xp, 599 xptr_t src_xp ); 572 * @ return 0 if success / return -1 if failure 573 ********************************************************************************************/ 574 error_t process_fd_replicate( xptr_t dst_xp, 575 xptr_t src_xp ); 600 576 601 577 /********************************************************************************************* … … 617 593 ********************************************************************************************/ 618 594 void process_fd_display( xptr_t process_xp ); 595 596 /********************************************************************************************* 597 * This utility function builds in the buffer defined by the <buffer> and <size> arguments 598 * a printable string describing the current state of a process descriptor identified 599 * by the <process_xp> argument, or a WARNING message if the buffer size is too small. 600 ********************************************************************************************* 601 * @ process_xp : extended pointer on target process descriptor. 602 * @ buffer : kernel buffer for string. 603 * @ size : buffer size in bytes. 604 * @ return always the string length (not including NUL), that can be a warning message. 605 ********************************************************************************************/ 606 uint32_t process_build_string( xptr_t process_xp, 607 char * buffer, 608 uint32_t size ); 619 609 620 610 /******************** Thread Related Operations *****************************************/ -
trunk/kernel/kern/scheduler.c
r669 r683 2 2 * scheduler.c - Core scheduler implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 63 63 // @ returns pointer on selected thread descriptor 64 64 //////////////////////////////////////////////////////////////////////////////////////////// 65 static thread_t * sched_select( scheduler_t * sched )65 static thread_t * __attribute__((__noinline__))sched_select( scheduler_t * sched ) 66 66 { 67 67 thread_t * thread; … … 83 83 while( done == false ) 84 84 { 85 86 // check kernel threads list87 assert( __FUNCTION__, (count < sched->k_threads_nr), "bad kernel threads list" );88 89 85 // get next entry in kernel list 90 86 current = current->next; … … 117 113 while( done == false ) 118 114 { 119 120 // check user threads list121 assert( __FUNCTION__, (count < sched->u_threads_nr), "bad user threads list" );122 123 115 // get next entry in user list 124 116 current = current->next; -
trunk/kernel/kern/scheduler.h
r662 r683 2 2 * scheduler.h - Core scheduler definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019,2020)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/thread.c
r669 r683 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019,2020)5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 67 67 } 68 68 } 69 70 /////////////////////////////////////////////////////////////////////////////////////71 // This static function allocates physical memory for a thread descriptor.72 // It can be called by the three functions:73 // - thread_user_create()74 // - thread_user_fork()75 // - thread_kernel_create()76 /////////////////////////////////////////////////////////////////////////////////////77 // @ return pointer on thread descriptor if success / return NULL if failure.78 /////////////////////////////////////////////////////////////////////////////////////79 static thread_t * thread_alloc( void )80 {81 kmem_req_t req; // kmem request82 83 // allocates memory for thread descriptor + kernel stack84 req.type = KMEM_PPM;85 req.order = CONFIG_THREAD_DESC_ORDER;86 req.flags = AF_KERNEL | AF_ZERO;87 88 return kmem_alloc( &req );89 90 } // end thread_alloc()91 92 69 93 70 ///////////////////////////////////////////////////////////////////////////////////// … … 144 121 145 122 #if DEBUG_BUSYLOCK 146 123 xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) ); 147 124 #endif 148 125 … … 161 138 list_entry_init( &thread->sched_list ); 162 139 163 // initialize the embedded alarm to unlink140 // initialize the embedded alarm 164 141 list_entry_init( &thread->alarm.list ); 165 142 … … 187 164 dqdt_increment_threads(); 188 165 166 // nitialize timer alarm 167 alarm_init( &thread->alarm ); 168 189 169 #if CONFIG_INSTRUMENTATION_PGFAULTS 190 191 192 193 194 195 196 197 198 170 thread->info.false_pgfault_nr = 0; 171 thread->info.false_pgfault_cost = 0; 172 thread->info.false_pgfault_max = 0; 173 thread->info.local_pgfault_nr = 0; 174 thread->info.local_pgfault_cost = 0; 175 thread->info.local_pgfault_max = 0; 176 thread->info.global_pgfault_nr = 0; 177 thread->info.global_pgfault_cost = 0; 178 thread->info.global_pgfault_max = 0; 199 179 #endif 200 180 … … 273 253 274 254 // allocate memory for thread descriptor 275 thread = thread_alloc();255 thread = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO ); 276 256 277 257 if( thread == NULL ) … … 467 447 468 448 // allocate memory for child thread descriptor 469 child_ptr = thread_alloc();449 child_ptr = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO ); 470 450 471 451 if( child_ptr == NULL ) … … 677 657 uint32_t cycle = (uint32_t)hal_get_cycles(); 678 658 if( DEBUG_THREAD_USER_EXEC < cycle ) 679 printk("\n[%s] thread[%x,%x] enter / cycle %d\n",680 __FUNCTION__, process->pid, thread->trdid, cycle );659 printk("\n[%s] thread[%x,%x] enter / argc %d / argv %x / cycle %d\n", 660 __FUNCTION__, process->pid, thread->trdid, argc, argv, cycle ); 681 661 #endif 682 662 … … 727 707 #endif 728 708 729 // restore CPU registers ... andjump to user code709 // restore CPU registers => jump to user code 730 710 hal_do_cpu_restore( thread->cpu_context ); 731 711 … … 759 739 760 740 // allocate memory for new thread descriptor 761 thread = thread_alloc();741 thread = kmem_alloc( CONFIG_THREAD_DESC_ORDER , AF_ZERO ); 762 742 763 743 if( thread == NULL ) … … 839 819 840 820 // check arguments 841 assert( __FUNCTION__, (type == THREAD_IDLE) , "illegal thread type" ); 842 assert( __FUNCTION__, (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" ); 821 assert( __FUNCTION__, (type == THREAD_IDLE), 822 "illegal thread type" ); 823 824 assert( __FUNCTION__, (core_lid < LOCAL_CLUSTER->cores_nr), 825 "illegal core index" ); 843 826 844 827 // set type in thread descriptor … … 848 831 error = process_register_thread( &process_zero , thread , &trdid ); 849 832 850 assert( __FUNCTION__, (error == 0), "cannot register idle_thread in kernel process" ); 833 assert( __FUNCTION__, (error == 0), 834 "cannot register idle_thread in kernel process" ); 851 835 852 836 // set trdid in thread descriptor … … 863 847 NULL ); // no user stack for a kernel thread 864 848 865 assert( __FUNCTION__, (error == 0), "cannot initialize idle_thread" ); 849 assert( __FUNCTION__, (error == 0), 850 "cannot initialize idle_thread" ); 866 851 867 852 // allocate CPU context 868 853 error = hal_cpu_context_alloc( thread ); 869 854 870 assert( __FUNCTION__, (error == 0), "cannot allocate CPU context" ); 855 assert( __FUNCTION__,(error == 0), 856 "cannot allocate CPU context" ); 871 857 872 858 // initialize CPU context … … 963 949 964 950 // release memory for thread descriptor (including kernel stack) 965 kmem_req_t req; 966 req.type = KMEM_PPM; 967 req.ptr = thread; 968 kmem_free( &req ); 951 kmem_free( thread , CONFIG_THREAD_DESC_ORDER ); 969 952 970 953 #if DEBUG_THREAD_DESTROY … … 1091 1074 } // end thread_unblock() 1092 1075 1093 ////////////////////////////////////// 1076 ////////////////////////////////////////////// 1094 1077 void thread_delete_request( xptr_t target_xp, 1095 bool_t is_forced )1078 bool_t is_forced ) 1096 1079 { 1097 1080 reg_t save_sr; // for critical section … … 1475 1458 thread->busylocks - 1, (uint32_t)hal_get_cycles() ); 1476 1459 1477 #if DEBUG_BUSYLOCK 1460 #if DEBUG_BUSYLOCK_TYPE 1478 1461 1479 1462 // scan list of busylocks -
trunk/kernel/kern/thread.h
r669 r683 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019,2020)5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 96 96 #define THREAD_BLOCKED_LOCK 0x1000 /*! ANY : wait queuelock or rwlock */ 97 97 #define THREAD_BLOCKED_CLIENT 0x2000 /*! DEV : wait clients queue non empty */ 98 #define THREAD_BLOCKED_ ALARM0x4000 /*! ANY : wait a timer based alarm */98 #define THREAD_BLOCKED_SLEEP 0x4000 /*! ANY : wait a timer based alarm */ 99 99 100 100 /*************************************************************************************** -
trunk/kernel/kernel_config.h
r675 r683 25 25 #define _KERNEL_CONFIG_H_ 26 26 27 //////////////////////////////////////////////////////////////////////////////////////// ////27 //////////////////////////////////////////////////////////////////////////////////////// 28 28 // KERNEL DEBUG 29 // Each debug variable control one kernel function, or one small group of functions. 30 // - trace is generated only when cycle > debug_value. 31 // - detailed trace is enabled when (debug_value & Ox1) is non zero. 32 //////////////////////////////////////////////////////////////////////////////////////////// 29 // 30 // 1) All errors detected by the kernel caused by a system call are reported to the 31 // user process using the ERRNO mechanism. Moreover, the DEBUG_***_ERROR variables 32 // force the compiler to display on TXT0 the error messages generated by 33 // the low-level kernel function, to help the error cause analysis. 34 // 35 // 2) The other debug variable forece the compiler to display on TXT0 a trace for one 36 // specific kernel function, or one small group of functions. 37 // All these trace variables (but locks, kmem, kcm) respect the following rules: 38 // - the trace is generated if the debug variable is non zeo. 39 // - trace is generated only when cycle > debug_value. 40 // - detailed trace is enabled when (debug_value & 0x1) is non zero. 41 //////////////////////////////////////////////////////////////////////////////////////// 42 43 // error reporting variables 44 45 #define DEBUG_DEV_NIC_ERROR 1 46 #define DEBUG_KCM_ERROR 1 47 #define DEBUG_KMEM_ERROR 1 48 #define DEBUG_MAPPER_ERROR 1 49 #define DEBUG_PPM_ERROR 1 50 #define DEBUG_PROCESS_ERROR 1 51 #define DEBUG_SOCKET_ERROR 1 52 #define DEBUG_SYSCALLS_ERROR 1 53 #define DEBUG_THREAD_ERROR 1 54 #define DEBUG_USER_DIR_ERROR 1 55 #define DEBUG_VFS_ERROR 1 56 #define DEBUG_VMM_ERROR 1 57 58 59 // trace activation variables 33 60 34 61 #define DEBUG_BARRIER_CREATE 0 … … 36 63 #define DEBUG_BARRIER_WAIT 0 37 64 38 #define DEBUG_BUSYLOCK_TYPE 0 39 #define DEBUG_BUSYLOCK_PID 0 40 #define DEBUG_BUSYLOCK_TRDID 0 65 #define DEBUG_BUSYLOCK_TYPE 0 // type 0 undefined => no debug 66 #define DEBUG_BUSYLOCK_PID 0 // owner process PID 67 #define DEBUG_BUSYLOCK_TRDID 0 // owner thread TRDID 41 68 42 69 #define DEBUG_CHDEV_CMD_RX 0 … … 115 142 116 143 #define DEBUG_KCM 0 117 #define DEBUG_KCM_REMOTE 0 144 #define DEBUG_KCM_ORDER 0 // filter for DEBUG_KCM 145 #define DEBUG_KCM_CXY 0 // filter for DEBUG_KCM 146 147 #define DEBUG_KERNEL_INIT 0 118 148 119 149 #define DEBUG_KMEM 0 120 #define DEBUG_KMEM_REMOTE 0 121 122 #define DEBUG_KERNEL_INIT 0 150 #define DEBUG_KMEM_ORDER 0 // filter for DEBUG_KMEM 151 #define DEBUG_KMEM_CXY 0 // filter for DEBUG_KMEM 123 152 124 153 #define DEBUG_MAPPER_GET_PAGE 0 … … 133 162 #define DEBUG_PPM_ALLOC_PAGES 0 134 163 #define DEBUG_PPM_FREE_PAGES 0 135 #define DEBUG_PPM_REMOTE_ALLOC_PAGES 0136 #define DEBUG_PPM_REMOTE_FREE_PAGES 0137 164 138 165 #define DEBUG_PROCESS_COPY_INIT 0 139 166 #define DEBUG_PROCESS_DESTROY 0 140 #define DEBUG_PROCESS_EXEC_GET_STRINGS 0141 167 #define DEBUG_PROCESS_FD_REGISTER 0 142 168 #define DEBUG_PROCESS_FD_REMOVE 0 … … 151 177 #define DEBUG_PROCESS_ZERO_CREATE 0 152 178 153 #define DEBUG_QUEUELOCK_TYPE 0 154 #define DEBUG_QUEUELOCK_PTR 0 155 #define DEBUG_QUEUELOCK_CXY 0 179 #define DEBUG_QUEUELOCK_TYPE 0 // type 0 undefined => no debug 180 #define DEBUG_QUEUELOCK_PTR 0 // lock local pointer 181 #define DEBUG_QUEUELOCK_CXY 0 // lock cluster identifier 156 182 157 183 #define DEBUG_RPC_CLIENT_GENERIC 0 … … 170 196 #define DEBUG_RPC_VMM_SET_COW 0 171 197 172 #define DEBUG_RWLOCK_TYPE 0 173 #define DEBUG_RWLOCK_PTR 0 174 #define DEBUG_RWLOCK_CXY 0 198 #define DEBUG_RWLOCK_TYPE 0 // type 0 undefined => no debug 199 #define DEBUG_RWLOCK_PTR 0 // lock local pointer 200 #define DEBUG_RWLOCK_CXY 0 // lock cluster identifier 175 201 176 202 #define DEBUG_SCHED_HANDLE_SIGNALS 0 … … 189 215 #define DEBUG_SOCKET_DESTROY 0 190 216 #define DEBUG_SOCKET_LISTEN 0 217 #define DEBUG_SOCKET_SEND 0 191 218 #define DEBUG_SOCKET_RECV 0 192 #define DEBUG_SOCKET_SEND 0193 219 #define DEBUG_SOCKET_LINK 0 194 195 #define DEBUG_SYSCALLS_ERROR 0196 220 197 221 #define DEBUG_SYS_BARRIER 0 … … 205 229 #define DEBUG_SYS_FG 0 206 230 #define DEBUG_SYS_FORK 0 207 #define DEBUG_SYS_GET _CONFIG 0231 #define DEBUG_SYS_GET 0 208 232 #define DEBUG_SYS_GETCWD 0 209 233 #define DEBUG_SYS_GETPID 0 210 #define DEBUG_SYS_GET_BEST_CORE 0211 #define DEBUG_SYS_GET_CORE_ID 0212 #define DEBUG_SYS_GET_NB_CORES 0213 #define DEBUG_SYS_GET_THREAD_INFO 0214 234 #define DEBUG_SYS_ISATTY 0 215 235 #define DEBUG_SYS_IS_FG 0 … … 250 270 #define DEBUG_THREAD_USER_EXEC 0 251 271 252 #define DEBUG_USER_DIR 0 253 254 #define DEBUG_VFS_ERROR 0 272 #define DEBUG_USER_DIR_CREATE 0 273 #define DEBUG_USER_DIR_DESTROY 0 255 274 256 275 #define DEBUG_VFS_ADD_CHILD 0 … … 277 296 #define DEBUG_VFS_UNLINK 0 278 297 298 279 299 #define DEBUG_VMM_CREATE_VSEG 0 280 300 #define DEBUG_VMM_DESTROY 0 … … 352 372 //////////////////////////////////////////////////////////////////////////////////////////// 353 373 354 #define CONFIG_VERSION "Version 2. 3 / November 2019"374 #define CONFIG_VERSION "Version 2.4 / November 2020" 355 375 356 376 //////////////////////////////////////////////////////////////////////////////////////////// … … 370 390 #define CONFIG_CLUSTER_SPAN 32 // ln(phys. address space per cluster) 371 391 #define CONFIG_CACHE_LINE_SIZE 64 // number of bytes in cache line 392 #define CONFIG_CACHE_LINE_ORDER 6 // ln( cache line size ) 372 393 373 394 #define CONFIG_CACHE_LINE_ALIGNED __attribute__((aligned(CONFIG_CACHE_LINE_SIZE))) … … 389 410 390 411 //////////////////////////////////////////////////////////////////////////////////////////// 412 // DQDT 413 //////////////////////////////////////////////////////////////////////////////////////////// 414 415 #define CONFIG_DQDT_LEVELS_NR 5 416 417 //////////////////////////////////////////////////////////////////////////////////////////// 418 // FBF WINDOWS 419 //////////////////////////////////////////////////////////////////////////////////////////// 420 421 #define CONFIG_FBF_WINDOWS_MAX_NR 64 // max number of windows 422 #define CONFIG_FBF_WINDOWS_MAX_WIDTH 1024 // max number of pixels in FBF line 423 #define CONFIG_FBF_WINDOWS_MAX_HEIGHT 1024 // max number of lines in FBF 424 425 //////////////////////////////////////////////////////////////////////////////////////////// 391 426 // PROCESS MANAGEMENT 392 427 //////////////////////////////////////////////////////////////////////////////////////////// 393 428 429 #define CONFIG_PROCESS_INIT_PATH "/bin/user/init.elf" 394 430 #define CONFIG_MAX_PROCESS_PER_CLUSTER 16 // max number of owned process 395 431 #define CONFIG_PROCESS_ARGS_MAX_NR 4 // max number of args per process … … 399 435 #define CONFIG_PROCESS_HEAP_MIN_SIZE 0x00010000 // user heap min size (bytes) 400 436 #define CONFIG_PROCESS_HEAP_MAX_SIZE 0x30000000 // user heap max size (bytes) 401 #define CONFIG_PROCESS_INIT_PATH "/bin/user/init.elf" 437 #define CONFIG_PROCESS_DISPLAY_BUF_SIZE 128 // display one process on one line 438 439 //////////////////////////////////////////////////////////////////////////////////////////// 440 // PHYSICAL MEMORY MANAGEMENT 441 //////////////////////////////////////////////////////////////////////////////////////////// 442 443 #define CONFIG_PPM_PAGE_SIZE 4096 // physical page size (bytes) 444 #define CONFIG_PPM_PAGE_ORDER 12 // ln(physical page size) 445 #define CONFIG_PPM_PAGE_MASK 0x00000FFF // physical page mask 446 #define CONFIG_PPM_MAX_ORDER 16 // ln(total number of pages per cluster) 447 #define CONFIG_PPM_MAX_RSVD 32 // max reserved zones on the machine 448 449 #define CONFIG_PPM_PAGE_ALIGNED __attribute__((aligned(CONFIG_PPM_PAGE_SIZE))) 450 451 //////////////////////////////////////////////////////////////////////////////////////////// 452 // RANDOM NUMBERS 453 //////////////////////////////////////////////////////////////////////////////////////////// 454 455 #define CONFIG_RDNG_PARAM_A 65519 456 #define CONFIG_RDNG_PARAM_C 64037 457 458 //////////////////////////////////////////////////////////////////////////////////////////// 459 // REMOTE PROCEDURE CALL 460 //////////////////////////////////////////////////////////////////////////////////////////// 461 462 #define CONFIG_RPC_FIFO_SLOTS 16 463 #define CONFIG_RPC_FIFO_MAX_ITERATIONS 1024 464 #define CONFIG_RPC_THREADS_MAX 4 // max number of RPC threads per core 465 466 //////////////////////////////////////////////////////////////////////////////////////////// 467 // SCHEDULING 468 //////////////////////////////////////////////////////////////////////////////////////////// 469 470 #define CONFIG_SCHED_TICKS_PER_SECOND 1 // number of TICKS per seconds 471 #define CONFIG_SCHED_TICKS_PER_QUANTUM 1 // number of ticks between scheduling 472 #define CONFIG_SCHED_MAX_THREADS_NR 32 // max number of threads per core 473 #define CONFIG_SCHED_IDLE_MODE_SLEEP 0 // idle thread use sleep mode if non 0 474 475 //////////////////////////////////////////////////////////////////////////////////////////// 476 // TCP/UDP/IP 477 //////////////////////////////////////////////////////////////////////////////////////////// 478 479 #define CONFIG_SOCK_ISS_CLIENT 0x10000 // initial sequence number for TCP client 480 #define CONFIG_SOCK_ISS_SERVER 0x20000 // initial sequence number for TCP server 481 #define CONFIG_SOCK_MAX_WINDOW 0xFFFFF // initial window (bytes) for TCP 482 #define CONFIG_SOCK_RETRY_TIMEOUT 1000000 // number of cycles before retry for TCP 483 #define CONFIG_SOCK_QUEUES_DEPTH 4 // max number of packets in RX/TX queues 484 #define CONFIG_SOCK_RX_BUF_ORDER 20 // ln( number of bytes in socket rx_buf ) 485 #define CONFIG_SOCK_TX_BUF_ORDER 20 // ln( number of bytes in socket tx_buf ) 486 #define CONFIG_SOCK_R2T_BUF_SIZE 8 // max number of requests in R2T queue 487 #define CONFIG_SOCK_CRQ_BUF_SIZE 8 // max number of requests in CRQ queue 488 #define CONFIG_SOCK_PKT_BUF_SIZE 2048 // max length for one ETH/IP/TCP packet 489 #define CONFIG_SOCK_PAYLOAD_MAX 1500 // max user payload length for packet 490 491 //////////////////////////////////////////////////////////////////////////////////////////// 492 // THREADS 493 //////////////////////////////////////////////////////////////////////////////////////////// 494 495 #define CONFIG_THREADS_MAX_PER_CLUSTER 32 // max threads per cluster per process 496 #define CONFIG_THREAD_DESC_SIZE 0x4000 // thread desc size (with kernel stack) 497 #define CONFIG_THREAD_DESC_ORDER 14 // ln( number of bytes ) 402 498 403 499 //////////////////////////////////////////////////////////////////////////////////////////// … … 419 515 #define CONFIG_MAPPER_GRDXT_W2 7 // number of bits for RADIX_TREE_IX2 420 516 #define CONFIG_MAPPER_GRDXT_W3 7 // number of bits for RADIX_TREE_IX3 421 422 ////////////////////////////////////////////////////////////////////////////////////////////423 // FBF WINDOWS424 ////////////////////////////////////////////////////////////////////////////////////////////425 426 #define CONFIG_FBF_WINDOWS_MAX_NR 64 // max number of windows427 #define CONFIG_FBF_WINDOWS_MAX_WIDTH 1024 // max number of pixels in FBF line428 #define CONFIG_FBF_WINDOWS_MAX_HEIGHT 1024 // max number of lines in FBF429 430 ////////////////////////////////////////////////////////////////////////////////////////////431 // DQDT432 ////////////////////////////////////////////////////////////////////////////////////////////433 434 #define CONFIG_DQDT_LEVELS_NR 5435 436 ////////////////////////////////////////////////////////////////////////////////////////////437 // RANDOM NUMBERS438 ////////////////////////////////////////////////////////////////////////////////////////////439 440 #define CONFIG_RDNG_PARAM_A 65519441 #define CONFIG_RDNG_PARAM_C 64037442 443 ////////////////////////////////////////////////////////////////////////////////////////////444 // SCHEDULING445 ////////////////////////////////////////////////////////////////////////////////////////////446 447 #define CONFIG_SCHED_TICK_MS_PERIOD 10000 // number of milliseconds per period448 #define CONFIG_SCHED_TICKS_PER_QUANTUM 1 // number of ticks between scheduling449 #define CONFIG_SCHED_MAX_THREADS_NR 32 // max number of threads per core450 #define CONFIG_SCHED_IDLE_MODE_SLEEP 0 // idle thread use sleep mode if non 0451 452 ////////////////////////////////////////////////////////////////////////////////////////////453 // THREADS454 ////////////////////////////////////////////////////////////////////////////////////////////455 456 #define CONFIG_THREADS_MAX_PER_CLUSTER 32 // max threads per cluster per process457 #define CONFIG_THREAD_DESC_SIZE 0x4000 // thread desc size (with kernel stack)458 #define CONFIG_THREAD_DESC_ORDER 2 // ln( number of 4K pages )459 460 ////////////////////////////////////////////////////////////////////////////////////////////461 // REMOTE PROCEDURE CALL462 ////////////////////////////////////////////////////////////////////////////////////////////463 464 #define CONFIG_REMOTE_FIFO_SLOTS 16465 #define CONFIG_REMOTE_FIFO_MAX_ITERATIONS 1024466 #define CONFIG_RPC_THREADS_MAX 4 // max number of RPC threads per core467 517 468 518 //////////////////////////////////////////////////////////////////////////////////////////// … … 479 529 #define CONFIG_VMM_ARGS_SIZE 0x000001 // args vseg size : 4 Kbytes 480 530 #define CONFIG_VMM_ENVS_SIZE 0x000004 // envs vseg size : 16 Kbytes 481 #define CONFIG_VMM_STACK_SIZE 0x001000 // single stack vseg size : 16 Mbytes 482 483 #define CONFIG_VMM_HEAP_MAX_ORDER 18 // max size of MMAP vseg : 1 Gbytes 484 485 //////////////////////////////////////////////////////////////////////////////////////////// 486 // PHYSICAL MEMORY MANAGEMENT 487 //////////////////////////////////////////////////////////////////////////////////////////// 488 489 #define CONFIG_PPM_PAGE_SIZE 4096 // physical page size (bytes) 490 #define CONFIG_PPM_PAGE_SHIFT 12 // physical page shift (bits) 491 #define CONFIG_PPM_PAGE_MASK 0x00000FFF // physical page mask 492 #define CONFIG_PPM_MAX_ORDER 16 // ln(total number of pages per cluster) 493 #define CONFIG_PPM_HEAP_ORDER 10 // ln(number of heap pages per cluster) 494 #define CONFIG_PPM_MAX_RSVD 32 // max reserved zones on the machine 495 496 #define CONFIG_PPM_PAGE_ALIGNED __attribute__((aligned(CONFIG_PPM_PAGE_SIZE))) 531 #define CONFIG_VMM_STACK_SIZE 0x000100 // single stack vseg size : 1 Mbytes 532 533 #define CONFIG_VMM_HEAP_MAX_ORDER 18 // max size of MMAP vseg : 1 Gbytes 497 534 498 535 //////////////////////////////////////////////////////////////////////////////////////////// … … 502 539 #define CONFIG_PRINTK_BUF_SIZE 0x800 // max length of a formated string 503 540 #define CONFIG_PIPE_BUF_SIZE 0x1000 // max number of bytes in a pipe buffer 504 #define CONFIG_SOCK_RX_BUF_SIZE 0x100000 // max number of bytes in RX buffer505 #define CONFIG_SOCK_R2T_BUF_SIZE 0x64 // max number of requests in R2T queue506 #define CONFIG_SOCK_CRQ_BUF_SIZE 0x8 // max number of requests in CRQ queue507 #define CONFIG_SOCK_PKT_BUF_SIZE 0x800 // max length for one ETH/IP/TCP packet508 541 509 542 //////////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/libk/elf.c
r671 r683 161 161 { 162 162 type = VSEG_TYPE_CODE; 163 process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_ SHIFT;163 process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_ORDER; 164 164 } 165 165 else // found DATA segment 166 166 { 167 167 type = VSEG_TYPE_DATA; 168 process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_ SHIFT;168 process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_ORDER; 169 169 } 170 170 … … 215 215 { 216 216 uint32_t new_offset; // unused, required by vfs_lseek() 217 kmem_req_t req; // kmem request for program header218 217 Elf_Ehdr header; // local buffer for .elf header 219 218 void * segs_base; // pointer on buffer for segment descriptors array … … 278 277 279 278 // allocate memory for segment descriptors array 280 req.type = KMEM_KCM; 281 req.order = bits_log2(segs_size); 282 req.flags = AF_KERNEL; 283 segs_base = kmem_alloc( &req ); 279 segs_base = kmem_alloc( bits_log2(segs_size) , AF_NONE ); 284 280 285 281 if( segs_base == NULL ) … … 295 291 { 296 292 printk("\n[ERROR] in %s : cannot seek for descriptors array\n", __FUNCTION__ ); 297 req.ptr = segs_base; 298 kmem_free( &req ); 293 kmem_free( segs_base , bits_log2(segs_size) ); 299 294 return -1; 300 295 } … … 314 309 { 315 310 printk("\n[ERROR] in %s : cannot read segments descriptors\n", __FUNCTION__ ); 316 req.ptr = segs_base; 317 kmem_free( &req ); 311 kmem_free( segs_base , bits_log2(segs_size) ); 318 312 return -1; 319 313 } … … 331 325 if( error ) 332 326 { 333 req.ptr = segs_base;334 kmem_free( &req);327 printk("\n[ERROR] in %s : cannot register segments descriptors\n", __FUNCTION__ ); 328 kmem_free( segs_base , bits_log2(segs_size) ); 335 329 return -1; 336 330 } … … 343 337 344 338 // release allocated memory for program header 345 req.ptr = segs_base; 346 kmem_free(&req); 339 kmem_free( segs_base , bits_log2(segs_size) ); 347 340 348 341 #if DEBUG_ELF_LOAD -
trunk/kernel/libk/grdxt.c
r671 r683 40 40 uint32_t ix3_width ) 41 41 { 42 43 assert( __FUNCTION__, (rt != NULL), 44 "pointer on radix tree is NULL\n" ); 45 42 46 void ** root; 43 kmem_req_t req;44 47 45 48 rt->ix1_width = ix1_width; … … 48 51 49 52 // allocates first level array 50 req.type = KMEM_KCM; 51 req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 52 req.flags = AF_KERNEL | AF_ZERO; 53 root = kmem_alloc( &req ); 53 uint32_t order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 54 root = kmem_alloc( order , AF_ZERO ); 54 55 55 56 if( root == NULL ) … … 68 69 void grdxt_destroy( grdxt_t * rt ) 69 70 { 70 kmem_req_t req; 71 72 assert( __FUNCTION__, (rt != NULL), 73 "pointer on radix tree is NULL\n" ); 74 75 uint32_t order; 71 76 72 77 uint32_t w1 = rt->ix1_width; … … 81 86 uint32_t ix2; 82 87 uint32_t ix3; 83 84 assert( __FUNCTION__, (rt != NULL) , "pointer on radix tree is NULL\n" );85 88 86 89 for( ix1=0 ; ix1 < (uint32_t)(1 << w1) ; ix1++ ) … … 106 109 107 110 // release level 3 array 108 req.type = KMEM_KCM; 109 req.ptr = ptr3; 110 kmem_free( &req ); 111 order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 ); 112 kmem_free( ptr3 , order ); 111 113 } 112 114 113 115 // release level 2 array 114 req.type = KMEM_KCM; 115 req.ptr = ptr2; 116 kmem_free( &req ); 116 order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 ); 117 kmem_free( ptr2 , order ); 117 118 } 118 119 119 120 // release level 1 array 120 req.type = KMEM_KCM; 121 req.ptr = ptr1; 122 kmem_free( &req ); 121 order = w1 + ( (sizeof(void*) == 4) ? 2 : 3 ); 122 kmem_free( ptr1 , order ); 123 123 124 124 } // end grdxt_destroy() … … 129 129 void * value ) 130 130 { 131 kmem_req_t req;131 uint32_t order; 132 132 133 133 uint32_t w1 = rt->ix1_width; … … 136 136 137 137 // Check key value 138 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 138 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), 139 "illegal key value %x\n", key ); 139 140 140 141 // compute indexes … … 155 156 { 156 157 // allocate memory for level 2 array 157 req.type = KMEM_KCM; 158 req.order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 ); 159 req.flags = AF_KERNEL | AF_ZERO; 160 ptr2 = kmem_alloc( &req ); 158 order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 ); 159 ptr2 = kmem_alloc( order , AF_ZERO ); 161 160 162 161 if( ptr2 == NULL) return -1; … … 173 172 { 174 173 // allocate memory for level 3 array 175 req.type = KMEM_KCM; 176 req.order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 ); 177 req.flags = AF_KERNEL | AF_ZERO; 178 ptr3 = kmem_alloc( &req ); 174 order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 ); 175 ptr3 = kmem_alloc( order , AF_ZERO ); 179 176 180 177 if( ptr3 == NULL) return -1; … … 202 199 203 200 // Check key value 204 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 201 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), 202 "illegal key value %x\n", key ); 205 203 206 204 // compute indexes … … 244 242 245 243 // Check key value 246 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 244 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), 245 "illegal key value %x\n", key ); 247 246 248 247 void ** ptr1 = rt->root; … … 284 283 285 284 // Check key value 286 assert( __FUNCTION__, ((start_key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", start_key ); 285 assert( __FUNCTION__, ((start_key >> (w1 + w2 + w3)) == 0 ), 286 "illegal key value %x\n", start_key ); 287 287 288 288 // compute max indexes … … 338 338 uint32_t ix3_width ) 339 339 { 340 341 assert( __FUNCTION__, (rt_xp != XPTR_NULL), 342 "extended pointer on radix tree is NULL\n" ); 343 340 344 void ** root; 341 kmem_req_t req;342 345 343 346 // get cluster and local pointer … … 351 354 352 355 // allocates first level array 353 req.type = KMEM_KCM; 354 req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 355 req.flags = AF_KERNEL | AF_ZERO; 356 root = kmem_remote_alloc( rt_cxy , &req ); 356 uint32_t order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 357 root = kmem_remote_alloc( rt_cxy , order , AF_ZERO ); 357 358 358 359 if( root == NULL ) … … 372 373 void grdxt_remote_destroy( xptr_t rt_xp ) 373 374 { 374 kmem_req_t req; 375 376 assert( __FUNCTION__, (rt_xp != XPTR_NULL), 377 "extended pointer on radix tree is NULL\n" ); 378 379 uint32_t order; 375 380 376 381 uint32_t w1; … … 422 427 423 428 // release level 3 array 424 req.type = KMEM_KCM; 425 req.ptr = ptr3; 426 kmem_remote_free( rt_cxy , &req ); 429 order = w3 + ((sizeof(void*) == 4) ? 2 : 3 ); 430 kmem_remote_free( rt_cxy , ptr3 , order ); 427 431 } 428 432 429 433 // release level 2 array 430 req.type = KMEM_KCM; 431 req.ptr = ptr2; 432 kmem_remote_free( rt_cxy , &req ); 434 order = w2 + ((sizeof(void*) == 4) ? 2 : 3 ); 435 kmem_remote_free( rt_cxy , ptr2 , order ); 433 436 } 434 437 435 438 // release level 1 array 436 req.type = KMEM_KCM; 437 req.ptr = ptr1; 438 kmem_remote_free( rt_cxy , &req ); 439 order = w1 + ((sizeof(void*) == 4) ? 2 : 3 ); 440 kmem_remote_free( rt_cxy , ptr1 , order ); 439 441 440 442 } // end grdxt_remote_destroy() … … 445 447 void * value ) 446 448 { 447 kmem_req_t req;449 uint32_t order; 448 450 449 451 // get cluster and local pointer on remote rt descriptor … … 507 509 { 508 510 // allocate memory in remote cluster 509 req.type = KMEM_KCM; 510 req.order = w2 + ((sizeof(void*) == 4) ? 2 : 3 ); 511 req.flags = AF_ZERO | AF_KERNEL; 512 ptr2 = kmem_remote_alloc( rt_cxy , &req ); 511 order = w2 + ((sizeof(void*) == 4) ? 2 : 3 ); 512 ptr2 = kmem_remote_alloc( rt_cxy , order , AF_ZERO ); 513 513 514 514 if( ptr2 == NULL ) return -1; … … 538 538 { 539 539 // allocate memory in remote cluster 540 req.type = KMEM_KCM; 541 req.order = w3 + ((sizeof(void*) == 4) ? 2 : 3 ); 542 req.flags = AF_ZERO | AF_KERNEL; 543 ptr3 = kmem_remote_alloc( rt_cxy , &req ); 540 order = w3 + ((sizeof(void*) == 4) ? 2 : 3 ); 541 ptr3 = kmem_remote_alloc( rt_cxy , order , AF_ZERO ); 544 542 545 543 if( ptr3 == NULL ) return -1; -
trunk/kernel/libk/remote_barrier.c
r671 r683 2 2 * remote_barrier.c - POSIX barrier implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 84 84 { 85 85 generic_barrier_t * gen_barrier_ptr; // local pointer on generic barrier descriptor 86 void * barrier; // local pointer on implementation barrier descriptor 87 kmem_req_t req; // kmem request 86 void * barrier; // local pointer on impl barrier descriptor 88 87 89 88 // get pointer on local process_descriptor … … 96 95 97 96 // allocate memory for generic barrier descriptor 98 req.type = KMEM_KCM; 99 req.order = bits_log2( sizeof(generic_barrier_t) ); 100 req.flags = AF_ZERO | AF_KERNEL; 101 gen_barrier_ptr = kmem_remote_alloc( ref_cxy , &req ); 102 97 gen_barrier_ptr = kmem_remote_alloc( ref_cxy, 98 bits_log2(sizeof(generic_barrier_t)), 99 AF_KERNEL ); 103 100 if( gen_barrier_ptr == NULL ) 104 101 { … … 108 105 109 106 // create implementation specific barrier descriptor 110 if( attr == NULL ) // simple barrier implementation107 if( attr == NULL ) // simple barrier 111 108 { 112 109 // create simple barrier descriptor 113 110 barrier = simple_barrier_create( count ); 114 115 if( barrier == NULL ) return -1; 116 } 117 else // QDT barrier implementation 111 } 112 else // QDT barrier 118 113 { 119 114 uint32_t x_size = attr->x_size; … … 126 121 printk("\n[ERROR] in %s : count(%d) != x_size(%d) * y_size(%d) * nthreads(%d)\n", 127 122 __FUNCTION__, count, x_size, y_size, nthreads ); 123 kmem_remote_free( ref_cxy, 124 gen_barrier_ptr, 125 bits_log2(sizeof(generic_barrier_t)) ); 128 126 return -1; 129 127 } … … 131 129 // create DQT barrier descriptor 132 130 barrier = dqt_barrier_create( x_size , y_size , nthreads ); 133 134 if( barrier == NULL ) return -1; 131 } 132 133 if( barrier == NULL ) 134 { 135 printk("\n[ERROR] in %s : cannot create impl barrier\n", __FUNCTION__ ); 136 kmem_remote_free( ref_cxy, 137 gen_barrier_ptr, 138 bits_log2(sizeof(generic_barrier_t)) ); 139 return -1; 135 140 } 136 141 … … 157 162 void generic_barrier_destroy( xptr_t gen_barrier_xp ) 158 163 { 159 kmem_req_t req; // kmem request160 161 164 // get pointer on local process_descriptor 162 165 process_t * process = CURRENT_THREAD->process; … … 191 194 remote_busylock_release( lock_xp ); 192 195 193 // release memory allocated to barrier descriptor194 req.type = KMEM_KCM;195 req.ptr = gen_barrier_ptr;196 kmem_remote_free( ref_cxy , &req);196 // release memory allocated to generic barrier descriptor 197 kmem_remote_free( gen_barrier_cxy, 198 gen_barrier_ptr, 199 bits_log2(sizeof(generic_barrier_t)) ); 197 200 198 201 } // end generic_barrier_destroy() … … 246 249 simple_barrier_t * simple_barrier_create( uint32_t count ) 247 250 { 248 kmem_req_t req;249 251 simple_barrier_t * barrier; 250 252 … … 258 260 259 261 // allocate memory for simple barrier descriptor 260 req.type = KMEM_KCM; 261 req.order = bits_log2( sizeof(simple_barrier_t) ); 262 req.flags = AF_ZERO | AF_KERNEL; 263 barrier = kmem_remote_alloc( ref_cxy , &req ); 264 262 barrier = kmem_remote_alloc( ref_cxy, 263 bits_log2(sizeof(simple_barrier_t)), 264 AF_ZERO ); 265 265 if( barrier == NULL ) 266 266 { … … 291 291 void simple_barrier_destroy( xptr_t barrier_xp ) 292 292 { 293 kmem_req_t req;294 295 293 // get barrier cluster and local pointer 296 294 cxy_t barrier_cxy = GET_CXY( barrier_xp ); … … 298 296 299 297 // release memory allocated for barrier descriptor 300 req.type = KMEM_KCM;301 req.ptr = barrier_ptr;302 kmem_remote_free( barrier_cxy , &req);298 kmem_remote_free( barrier_cxy, 299 barrier_ptr, 300 bits_log2(sizeof(simple_barrier_t)) ); 303 301 304 302 #if DEBUG_BARRIER_DESTROY … … 471 469 uint32_t y; // Y coordinate in QDT mesh 472 470 uint32_t l; // level coordinate 473 kmem_req_t req; // kmem request474 471 475 472 // compute number of DQT levels, depending on the mesh size … … 478 475 479 476 // check x_size and y_size arguments 480 assert( __FUNCTION__, (z <= 16) , "DQT mesh size larger than (16*16)\n"); 477 assert( __FUNCTION__, (z <= 16), 478 "DQT mesh size larger than (16*16)\n"); 481 479 482 480 // check size of an array of 5 DQT nodes 483 assert( __FUNCTION__, (sizeof(dqt_node_t) * 5 <= 512 ), "array of DQT nodes larger than 512 bytes\n"); 481 assert( __FUNCTION__, (sizeof(dqt_node_t) * 5 <= 512 ), 482 "array of DQT nodes larger than 512 bytes\n"); 484 483 485 484 // check size of DQT barrier descriptor 486 assert( __FUNCTION__, (sizeof(dqt_barrier_t) <= 0x4000 ), "DQT barrier descriptor larger than 4 pages\n"); 485 assert( __FUNCTION__, (sizeof(dqt_barrier_t) <= 0x4000 ), 486 "DQT barrier descriptor larger than 4 pages\n"); 487 487 488 488 // get pointer on client thread and process descriptors … … 502 502 503 503 // 1. allocate 4 small pages for the DQT barrier descriptor in reference cluster 504 req.type = KMEM_PPM; 505 req.order = 2; // 4 small pages == 16 Kbytes 506 req.flags = AF_ZERO | AF_KERNEL; 507 barrier = kmem_remote_alloc( ref_cxy , &req ); 508 504 barrier = kmem_remote_alloc( ref_cxy, 505 CONFIG_PPM_PAGE_ORDER + 2, // 4 small pages 506 AF_ZERO ); 509 507 if( barrier == NULL ) 510 508 { … … 536 534 { 537 535 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier 538 xptr_t local_array_xp; // xptr o fnodes array in cluster cxy536 xptr_t local_array_xp; // xptr on nodes array in cluster cxy 539 537 540 538 // allocate memory in existing clusters only 541 539 if( LOCAL_CLUSTER->cluster_info[x][y] ) 542 540 { 543 req.type = KMEM_KCM; 544 req.order = 9; // 512 bytes 545 req.flags = AF_ZERO | AF_KERNEL; 546 547 void * ptr = kmem_remote_alloc( cxy , &req ); 541 void * ptr = kmem_remote_alloc( cxy , 9 , AF_ZERO ); // 512 bytes 548 542 549 543 if( ptr == NULL ) … … 729 723 void dqt_barrier_destroy( xptr_t barrier_xp ) 730 724 { 731 kmem_req_t req; // kmem request732 725 uint32_t x; 733 726 uint32_t y; 734 735 727 736 728 // get DQT barrier descriptor cluster and local pointer … … 767 759 void * buf = GET_PTR( buf_xp ); 768 760 769 assert( __FUNCTION__, (cxy == GET_CXY(buf_xp)) , "bad extended pointer on dqt_nodes array\n" ); 770 771 req.type = KMEM_KCM; 772 req.ptr = buf; 773 kmem_remote_free( cxy , &req ); 761 kmem_remote_free( cxy , buf , 9 ); // 512 bytes 774 762 775 763 #if DEBUG_BARRIER_DESTROY … … 785 773 786 774 // 2. release memory allocated for barrier descriptor in ref cluster 787 req.type = KMEM_PPM;788 req.ptr = barrier_ptr;789 kmem_remote_free( barrier_cxy , &req );775 kmem_remote_free( barrier_cxy, 776 barrier_ptr, 777 CONFIG_PPM_PAGE_ORDER + 2 ); // 4 small pages 790 778 791 779 #if DEBUG_BARRIER_DESTROY -
trunk/kernel/libk/remote_buf.c
r671 r683 34 34 remote_buf_t * remote_buf_alloc( cxy_t cxy ) 35 35 { 36 kmem_req_t req; 37 38 req.type = KMEM_KCM; 39 req.order = bits_log2( sizeof(remote_buf_t) ); 40 req.flags = AF_ZERO; 41 return kmem_remote_alloc( cxy , &req ); 36 return kmem_remote_alloc( cxy, 37 bits_log2(sizeof(remote_buf_t)), 38 AF_ZERO ); 42 39 } 43 40 … … 50 47 assert( __FUNCTION__ , (order < 32) , "order cannot be larger than 31" ); 51 48 52 kmem_req_t req;53 49 uint8_t * data; 54 50 … … 57 53 58 54 // allocate the data buffer 59 if( order >= CONFIG_PPM_PAGE_SHIFT ) // use KMEM_PPM 60 { 61 req.type = KMEM_PPM; 62 req.order = order - CONFIG_PPM_PAGE_SHIFT; 63 req.flags = AF_NONE; 64 data = kmem_remote_alloc( buf_cxy , &req ); 65 66 if( data == NULL ) return -1; 67 } 68 else // use KMEM_KCM 69 { 70 req.type = KMEM_KCM; 71 req.order = order; 72 req.flags = AF_NONE; 73 data = kmem_remote_alloc( buf_cxy , &req ); 74 75 if( data == NULL ) return -1; 76 } 55 data = kmem_remote_alloc( buf_cxy , order , AF_NONE ); 56 57 if( data == NULL ) return -1; 77 58 78 59 // initialize buffer descriptor … … 90 71 void remote_buf_release_data( xptr_t buf_xp ) 91 72 { 92 kmem_req_t req;93 73 94 74 assert( __FUNCTION__ , (buf_xp != XPTR_NULL) , "buf_xp cannot be NULL" ); … … 102 82 103 83 // release memory allocated for data buffer if required 104 if( data_ptr != NULL ) 105 { 106 if( order >= CONFIG_PPM_PAGE_SHIFT ) // use KMEM_PPM 107 { 108 req.type = KMEM_PPM; 109 req.ptr = data_ptr; 110 kmem_remote_free( buf_cxy , &req ); 111 } 112 else // use KMEM_KCM 113 { 114 req.type = KMEM_KCM; 115 req.ptr = data_ptr; 116 kmem_remote_free( buf_cxy , &req ); 117 } 118 } 84 if( data_ptr != NULL ) kmem_remote_free( buf_cxy , data_ptr , order ); 85 119 86 } // end remote_buf_release_data() 120 87 … … 125 92 assert( __FUNCTION__ , (buf_xp != XPTR_NULL) , "buf_xp cannot be NULL" ); 126 93 127 kmem_req_t req;128 129 94 remote_buf_t * buf_ptr = GET_PTR( buf_xp ); 130 95 cxy_t buf_cxy = GET_CXY( buf_xp ); … … 134 99 135 100 // release remote_buf descriptor 136 req.type = KMEM_KCM; 137 req.ptr = buf_ptr; 138 kmem_remote_free( buf_cxy , &req ); 101 kmem_remote_free( buf_cxy , buf_ptr , bits_log2(sizeof(remote_buf_t)) ); 139 102 140 103 } // end remote_buf_destroy() … … 404 367 } // end remote_buf_status() 405 368 406 369 /////////////////////////////////////////////// 370 void remote_buf_display( const char * func_str, 371 xptr_t buf_xp, 372 uint32_t nbytes, 373 uint32_t offset ) 374 { 375 if( nbytes > 256 ) 376 { 377 printk("\n[WARNING] in %s : no more than 256 bytes\n", __FUNCTION__ ); 378 nbytes = 256; 379 } 380 381 uint8_t string[128]; // for header 382 uint8_t local_data[256]; // local data buffer 383 384 cxy_t cxy = GET_CXY( buf_xp ); 385 remote_buf_t * ptr = GET_PTR( buf_xp ); 386 387 uint32_t order = hal_remote_l32( XPTR( cxy , &ptr->order )); 388 uint32_t rid = hal_remote_l32( XPTR( cxy , &ptr->rid )); 389 uint32_t wid = hal_remote_l32( XPTR( cxy , &ptr->wid )); 390 uint32_t sts = hal_remote_l32( XPTR( cxy , &ptr->sts )); 391 uint8_t * data = hal_remote_lpt( XPTR( cxy , &ptr->data )); 392 393 // make a local copy of data buffer 394 hal_remote_memcpy( XPTR( local_cxy , local_data ), 395 XPTR( cxy , data + offset ), 396 nbytes ); 397 398 // build header 399 snprintk( (char*)string , 128 , 400 "in %s remote buffer [%x,%x] : size %d / rid %d / wid %d / sts %d ", 401 func_str , cxy , ptr , 1<<order , rid , wid , sts ); 402 403 // display buffer on TXT0 404 putb( (char*)string , local_data , nbytes ); 405 406 } // end remote_buf_display() -
trunk/kernel/libk/remote_buf.h
r671 r683 176 176 uint32_t remote_buf_status( xptr_t buf_xp ); 177 177 178 /************************************************************************************ 179 * This debug function displays on the kernel terminal the current state of a remote 180 * buffer identified by the <buf_xp> argument : order / rid / wid / sts. 181 * If the <nbytes> argument is not nul, and not larger than 256, it displays up to 182 * 256 bytes of the data buffer, from <offset> to (offset + nbytes -1). 183 ************************************************************************************ 184 * @ func_str : [in] calling function name (displayed in header). 185 * @ buf_xp : [in] extended pointer pointer on remote buffer descriptor. 186 * @ nbytes : [in] number of data bytes to display. 187 * @ offset : [in] index of first displayed byte in data buffer. 188 ***********************************************************************************/ 189 void remote_buf_display( const char * func_str, 190 xptr_t buf_xp, 191 uint32_t nbytes, 192 uint32_t offset ); 193 178 194 #endif /* _REMOTE_BUFFER_H_ */ -
trunk/kernel/libk/remote_condvar.c
r635 r683 2 2 * remote_condvar.c - remote kernel condition variable implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 { 87 87 remote_condvar_t * condvar_ptr; 88 kmem_req_t req;89 88 90 89 // get pointer on local process descriptor … … 98 97 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 99 98 100 req.type = KMEM_KCM; 101 req.order = bits_log2( sizeof(remote_condvar_t) ); 102 req.flags = AF_ZERO | AF_KERNEL; 103 condvar_ptr = kmem_alloc( &req ); 99 // allocate memory for condvar descriptor 100 condvar_ptr = kmem_alloc( bits_log2(sizeof(remote_condvar_t)) , AF_ZERO ); 104 101 105 102 if( condvar_ptr == NULL ) … … 130 127 void remote_condvar_destroy( xptr_t condvar_xp ) 131 128 { 132 kmem_req_t req;133 134 129 // get pointer on local process descriptor 135 130 process_t * process = CURRENT_THREAD->process; … … 162 157 163 158 // release memory allocated for condvar descriptor 164 req.type = KMEM_KCM; 165 req.ptr = condvar_ptr; 166 kmem_remote_free( ref_cxy , &req ); 159 kmem_remote_free( ref_cxy , condvar_ptr , bits_log2(sizeof(remote_condvar_t)) ); 167 160 168 161 } // end remote_convar_destroy() -
trunk/kernel/libk/remote_condvar.h
r635 r683 2 2 * remote_condvar.h: POSIX condition variable definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/libk/remote_fifo.c
r657 r683 42 42 fifo->wr_id = 0; 43 43 fifo->rd_id = 0; 44 for( slot = 0 ; slot < CONFIG_R EMOTE_FIFO_SLOTS ; slot++ )44 for( slot = 0 ; slot < CONFIG_RPC_FIFO_SLOTS ; slot++ ) 45 45 { 46 46 fifo->valid[slot] = 0; … … 69 69 70 70 // wait until allocated slot is empty in remote FIFO 71 // max retry = CONFIG_R EMOTE_FIFO_MAX_ITERATIONS71 // max retry = CONFIG_RPC_FIFO_MAX_ITERATIONS 72 72 // return error if watchdog is reached 73 73 while( 1 ) 74 74 { 75 75 // return error if contention detected by watchdog 76 if( watchdog > CONFIG_R EMOTE_FIFO_MAX_ITERATIONS ) return EBUSY;76 if( watchdog > CONFIG_RPC_FIFO_MAX_ITERATIONS ) return EBUSY; 77 77 78 78 // read remote rd_id value … … 84 84 85 85 // exit waiting loop as soon as fifo not full 86 if ( nslots < CONFIG_R EMOTE_FIFO_SLOTS ) break;86 if ( nslots < CONFIG_RPC_FIFO_SLOTS ) break; 87 87 88 88 // retry later if fifo full: … … 97 97 98 98 // compute actual write slot pointer 99 ptw = wr_id % CONFIG_R EMOTE_FIFO_SLOTS;99 ptw = wr_id % CONFIG_RPC_FIFO_SLOTS; 100 100 101 101 // copy item to fifo … … 123 123 124 124 // compute actual read slot pointer 125 uint32_t ptr = rd_id % CONFIG_R EMOTE_FIFO_SLOTS;125 uint32_t ptr = rd_id % CONFIG_RPC_FIFO_SLOTS; 126 126 127 127 // wait slot filled by the writer … … 158 158 else nslots = (0xFFFFFFFF - rd_id) + wr_id; 159 159 160 return ( nslots >= CONFIG_R EMOTE_FIFO_SLOTS );160 return ( nslots >= CONFIG_RPC_FIFO_SLOTS ); 161 161 } 162 162 -
trunk/kernel/libk/remote_fifo.h
r563 r683 36 36 * that is used for - RPC based - inter cluster communications. 37 37 * Each FIF0 slot can contain one 64 bits integer (or one extended pointer). 38 * The number of slots is defined by the CONFIG_R EMOTE_FIFO_SLOTS parameter.38 * The number of slots is defined by the CONFIG_RPC_FIFO_SLOTS parameter. 39 39 * - The write accesses are implemented using a lock-free algorithm, as it uses 40 40 * a ticket based mechanism to handle concurrent access between multiple writers. … … 45 45 * and RPC threads cannot have local index LTID = 0. 46 46 * 47 * WARNING : Each FIFO requires 12 + (12 * CONFIG_R EMOTE_FIFO_SLOTS) bytes.47 * WARNING : Each FIFO requires 12 + (12 * CONFIG_RPC_FIFO_SLOTS) bytes. 48 48 ***********************************************************************************/ 49 49 … … 53 53 volatile uint32_t wr_id; /*! write slot index */ 54 54 volatile uint32_t rd_id; /*! read slot index */ 55 volatile uint32_t valid[CONFIG_R EMOTE_FIFO_SLOTS]; /*! empty slot if 0 */56 uint64_t data[CONFIG_R EMOTE_FIFO_SLOTS]; /*! fifo slot content */55 volatile uint32_t valid[CONFIG_RPC_FIFO_SLOTS]; /*! empty slot if 0 */ 56 uint64_t data[CONFIG_RPC_FIFO_SLOTS]; /*! fifo slot content */ 57 57 } 58 58 remote_fifo_t; … … 84 84 * the slot is empty, using a descheduling policy without blocking if required. 85 85 * It implements a watchdog, returning when the item has been successfully 86 * registered, or after CONFIG_R EMOTE_FIFO_MAX_ITERATIONS failures.86 * registered, or after CONFIG_RPC_FIFO_MAX_ITERATIONS failures. 87 87 ************************************************************************************ 88 88 * @ fifo : extended pointer to the remote fifo. -
trunk/kernel/libk/remote_mutex.c
r635 r683 2 2 * remote_mutex.c - POSIX mutex implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020:) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 85 85 { 86 86 remote_mutex_t * mutex_ptr; 87 kmem_req_t req;88 87 89 88 // get pointer on local process descriptor … … 98 97 99 98 // allocate memory for mutex descriptor in reference cluster 100 req.type = KMEM_KCM; 101 req.order = bits_log2( sizeof(remote_mutex_t) ); 102 req.flags = AF_ZERO | AF_KERNEL; 103 mutex_ptr = kmem_remote_alloc( ref_cxy , &req ); 99 mutex_ptr = kmem_remote_alloc( ref_cxy , bits_log2(sizeof(remote_mutex_t)) , AF_ZERO ); 104 100 105 101 if( mutex_ptr == NULL ) … … 145 141 void remote_mutex_destroy( xptr_t mutex_xp ) 146 142 { 147 kmem_req_t req;148 149 143 // get pointer on local process descriptor 150 144 process_t * process = CURRENT_THREAD->process; … … 171 165 172 166 // release memory allocated for mutex descriptor 173 req.type = KMEM_KCM; 174 req.ptr = mutex_ptr; 175 kmem_remote_free( mutex_cxy , &req ); 167 kmem_remote_free( mutex_cxy , mutex_ptr , bits_log2(sizeof(remote_mutex_t)) ); 176 168 177 169 } // end remote_mutex_destroy() -
trunk/kernel/libk/remote_sem.c
r671 r683 2 2 * remote_sem.c - POSIX unnamed semaphore implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 uint32_t value ) 87 87 { 88 kmem_req_t req;89 88 remote_sem_t * sem_ptr; 90 89 … … 100 99 101 100 // allocate memory for new semaphore in reference cluster 102 req.type = KMEM_KCM; 103 req.order = bits_log2( sizeof(remote_sem_t) ); 104 req.flags = AF_ZERO | AF_KERNEL; 105 sem_ptr = kmem_remote_alloc( ref_cxy, &req ); 101 sem_ptr = kmem_remote_alloc( ref_cxy , bits_log2(sizeof(remote_sem_t)) , AF_ZERO ); 106 102 107 103 if( sem_ptr == NULL ) … … 144 140 void remote_sem_destroy( xptr_t sem_xp ) 145 141 { 146 kmem_req_t req;147 148 142 // get pointer on local process descriptor 149 143 process_t * process = CURRENT_THREAD->process; … … 176 170 177 171 // release memory allocated for semaphore descriptor 178 req.type = KMEM_KCM; 179 req.ptr = sem_ptr; 180 kmem_remote_free( sem_cxy , &req ); 172 kmem_remote_free( sem_cxy , sem_ptr , bits_log2(sizeof(remote_sem_t)) ); 181 173 182 174 } // end remote_sem_destroy() -
trunk/kernel/libk/remote_sem.h
r581 r683 2 2 * remote_sem.h - POSIX unnamed semaphore definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/libk/user_dir.c
r671 r683 2 2 * user_dir.c - kernel DIR related operations implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 105 105 list_entry_t root; // root of temporary list of allocated pages 106 106 uint32_t page_id; // page index in list of physical pages 107 kmem_req_t req; // kmem request descriptor108 107 ppn_t fake_ppn; // unused, but required by hal_gptlock_pte() 109 108 uint32_t fake_attr; // unused, but required by hal_gptlock_pte() 110 109 error_t error; 110 111 #if DEBUG_USER_DIR_CREATE || DEBUG_USER_DIR_ERROR 112 uint32_t cycle = (uint32_t)hal_get_cycles(); 113 thread_t * this = CURRENT_THREAD; 114 #endif 111 115 112 116 // get cluster, local pointer, and pid of reference process … … 115 119 ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) ); 116 120 117 #if DEBUG_USER_DIR 118 uint32_t cycle = (uint32_t)hal_get_cycles(); 119 thread_t * this = CURRENT_THREAD; 120 if( cycle > DEBUG_USER_DIR ) 121 #if DEBUG_USER_DIR_CREATE 122 if( DEBUG_USER_DIR_CREATE < cycle ) 121 123 printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) and process %x / cycle %d\n", 122 124 __FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, ref_pid, cycle ); … … 133 135 134 136 // allocate memory for a local user_dir descriptor 135 req.type = KMEM_KCM; 136 req.order = bits_log2( sizeof(user_dir_t) ); 137 req.flags = AF_ZERO | AF_KERNEL; 138 dir = kmem_alloc( &req ); 137 dir = kmem_alloc( bits_log2(sizeof(user_dir_t)) , AF_ZERO ); 139 138 140 139 if( dir == NULL ) 141 140 { 142 printk("\n[ERROR] in %s : cannot allocate user_dir_t in cluster %x\n", 143 __FUNCTION__, local_cxy ); 141 142 #if DEBUG_USER_DIR_ERROR 143 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate user_dir_t in cluster %x / cycle %d\n", 144 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); 145 #endif 144 146 return NULL; 145 147 } 146 148 147 // Build and initialize the dirent array as a list of pages.148 // For each iteration in this while loop:149 // First loop to build and initialize the dirent array 150 // as a temporary list of pages. For each iteration : 149 151 // - allocate one physical 4 Kbytes (64 dirent slots) 150 152 // - call the relevant FS specific function to scan the directory mapper, … … 162 164 { 163 165 // allocate one physical page 164 req.type = KMEM_PPM; 165 req.order = 0; 166 req.flags = AF_ZERO; 167 base = kmem_alloc( &req ); 166 base = kmem_alloc( CONFIG_PPM_PAGE_ORDER , AF_ZERO ); 168 167 169 168 if( base == NULL ) 170 169 { 171 printk("\n[ERROR] in %s : cannot allocate page in cluster %x\n", 172 __FUNCTION__, ref_cxy ); 170 171 #if DEBUG_USER_DIR_ERROR 172 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate page in cluster %x / cycle %d\n", 173 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); 174 #endif 173 175 goto user_dir_create_failure; 174 176 } … … 184 186 if( error ) 185 187 { 186 printk("\n[ERROR] in %s : cannot initialise dirent array in cluster %x\n", 187 __FUNCTION__, ref_cxy ); 188 189 #if DEBUG_USER_DIR_ERROR 190 printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize dirent array in cluster %x / cycle %d\n", 191 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); 192 #endif 188 193 goto user_dir_create_failure; 189 194 } … … 204 209 } // end while 205 210 206 #if DEBUG_USER_DIR 207 if( cycle > DEBUG_USER_DIR)211 #if DEBUG_USER_DIR_CREATE 212 if( DEBUG_USER_DIR_CREATE < cycle ) 208 213 printk("\n[%s] thread[%x,%x] initialised dirent array / %d entries\n", 209 214 __FUNCTION__, this->process->pid, this->trdid, total_dirents, cycle ); … … 241 246 if( vseg == NULL ) 242 247 { 243 printk("\n[ERROR] in %s : cannot create vseg for user_dir in cluster %x\n", 244 __FUNCTION__, ref_cxy); 248 249 #if DEBUG_USER_DIR_ERROR 250 printk("\n[ERROR] in %s : thread[%x,%x] cannot create vseg in cluster %x / cycle %d\n", 251 __FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle ); 252 #endif 245 253 goto user_dir_create_failure; 246 254 } 247 255 248 #if DEBUG_USER_DIR 249 if( cycle > DEBUG_USER_DIR)256 #if DEBUG_USER_DIR_CREATE 257 if( DEBUG_USER_DIR_CREATE < cycle ) 250 258 printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n", 251 259 __FUNCTION__, this->process->pid, this->trdid, vseg->min, vseg->max - vseg->min ); … … 269 277 vpn_base = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) ); 270 278 271 // scan the list ofallocated physical pages to map279 // Second loop on the allocated physical pages to map 272 280 // all physical pages in the reference process GPT 281 // The pages are mapped in the user process GPT, but 282 // are removed from the temporary list 283 273 284 page_id = 0; 285 274 286 while( list_is_empty( &root ) == false ) 275 287 { … … 290 302 if( error ) 291 303 { 292 printk("\n[ERROR] in %s : cannot map vpn %x in GPT\n", 293 __FUNCTION__, vpn ); 294 304 305 #if DEBUG_USER_DIR_ERROR 306 printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn %x in cluster %x / cycle %d\n", 307 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, cycle ); 308 #endif 295 309 // delete the vseg 296 310 intptr_t base = (intptr_t)hal_remote_lpt( XPTR( ref_cxy , &vseg->min ) ); … … 298 312 299 313 // release the user_dir descriptor 300 req.type = KMEM_KCM; 301 req.ptr = dir; 302 kmem_free( &req ); 314 kmem_free( dir , bits_log2(sizeof(user_dir_t)) ); 303 315 return NULL; 304 316 } … … 310 322 ppn ); 311 323 312 #if DEBUG_USER_DIR 313 if( cycle > DEBUG_USER_DIR)324 #if DEBUG_USER_DIR_CREATE 325 if( DEBUG_USER_DIR_CREATE < cycle ) 314 326 printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n", 315 327 __FUNCTION__, this->process->pid, this->trdid, vpn + page_id, ppn ); … … 329 341 dir->current = 0; 330 342 dir->entries = total_dirents; 331 dir->ident = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_ SHIFT);343 dir->ident = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_ORDER); 332 344 333 345 // build extended pointers on root and lock of user_dir xlist in ref process … … 347 359 remote_queuelock_release( lock_xp ); 348 360 349 #if DEBUG_USER_DIR 350 cycle = (uint32_t)hal_get_cycles(); 351 if( cycle > DEBUG_USER_DIR ) 361 #if DEBUG_USER_DIR_CREATE 362 if( DEBUG_USER_DIR_CREATE < cycle ) 352 363 printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n", 353 364 __FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, total_dirents, cycle ); … … 358 369 user_dir_create_failure: 359 370 360 // release local user_dir_t structure 361 req.type = KMEM_KCM; 362 req.ptr = dir; 363 kmem_free( &req ); 364 365 // release local physical pages 371 // release user_dir_t structure 372 kmem_free( dir , bits_log2(sizeof(user_dir_t)) ); 373 374 // release physical pages 366 375 while( list_is_empty( &root ) == false ) 367 376 { 377 // get page descriptor 368 378 page = LIST_FIRST( &root , page_t , list ); 369 379 … … 371 381 base = GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) ); 372 382 373 req.type = KMEM_PPM; 374 req.ptr = base; 375 kmem_free( &req ); 383 // release the page 384 kmem_free( base , CONFIG_PPM_PAGE_ORDER ); 376 385 } 377 386 … … 402 411 cluster = LOCAL_CLUSTER; 403 412 413 #if DEBUG_USER_DIR_DESTROY 414 uint32_t cycle = (uint32_t)hal_get_cycles(); 415 #endif 416 404 417 // get cluster, local pointer, and PID of reference user process 405 418 ref_cxy = GET_CXY( ref_xp ); … … 407 420 ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) ); 408 421 409 #if DEBUG_USER_DIR 410 uint32_t cycle = (uint32_t)hal_get_cycles(); 411 if( cycle > DEBUG_USER_DIR ) 422 #if DEBUG_USER_DIR_DESTROY 423 if( DEBUG_USER_DIR_DESTROY < cycle ) 412 424 printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) and process %x / cycle %d\n", 413 425 __FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, ref_pid, cycle ); … … 475 487 hal_atomic_add( &responses , 1 ); 476 488 477 #if (DEBUG_USER_DIR & 1) 478 uint32_t cycle = (uint32_t)hal_get_cycles(); 479 if( cycle > DEBUG_USER_DIR ) 489 #if (DEBUG_USER_DIR_DESTROY & 1) 490 if( DEBUG_USER_DIR_DESTROY < cycle ) 480 491 printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n", 481 492 __FUNCTION__, this->process->pid, this->trdid, process_cxy ); … … 496 507 497 508 // release local user_dir_t structure 498 kmem_req_t req; 499 req.type = KMEM_KCM; 500 req.ptr = dir; 501 kmem_free( &req ); 502 503 #if DEBUG_USER_DIR 509 kmem_free( dir , bits_log2(sizeof(user_dir_t)) ); 510 511 #if DEBUG_USER_DIR_DESTROY 504 512 cycle = (uint32_t)hal_get_cycles(); 505 if( cycle > DEBUG_USER_DIR)513 if( DEBUG_USER_DIR_DESTROY < cycle ) 506 514 printk("\n[%s] thread[%x,%x] deleted user_dir (%x,%x) / cycle %d\n", 507 515 __FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, cycle ); -
trunk/kernel/libk/user_dir.h
r651 r683 2 2 * user_dir.h - DIR related operations definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/kcm.c
r672 r683 36 36 #include <kcm.h> 37 37 38 /////////////////////////////////////////////////////////////////////////////////////////// 39 // global variables 40 /////////////////////////////////////////////////////////////////////////////////////////// 41 42 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 43 38 44 39 45 ///////////////////////////////////////////////////////////////////////////////////// … … 42 48 43 49 ////////////////////////////////////////////////////////////////////////////////////// 44 // This static function must be called by a local thread.50 // This static function is called by the kcm_alloc() function. 45 51 // It returns a pointer on a block allocated from an active kcm_page. 46 52 // It makes a panic if no block is available in the selected page. … … 55 61 { 56 62 // initialise variables 57 uint32_t size = 1 << kcm->order; 58 uint32_t max = kcm->max_blocks; 63 uint32_t order = kcm->order; 59 64 uint32_t count = kcm_page->count; 60 65 uint64_t status = kcm_page->status; 61 66 62 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" ); 67 // check kcm page not full 68 assert( __FUNCTION__, (count < 63) , 69 "kcm_page should not be full / cxy %x / order %d / count %d", local_cxy, order, count ); 63 70 64 71 uint32_t index = 1; … … 67 74 // allocate first free block in kcm_page, update status, 68 75 // and count , compute index of allocated block in kcm_page 69 while( index <= max)76 while( index <= 63 ) 70 77 { 71 78 if( (status & mask) == 0 ) // block found … … 81 88 } 82 89 83 // change the page list if found block is the last84 if( count == max-1)90 // switch page to full if last block 91 if( (count + 1) == 63 ) 85 92 { 86 93 list_unlink( &kcm_page->list); … … 92 99 93 100 // compute return pointer 94 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 95 96 #if DEBUG_KCM 97 thread_t * this = CURRENT_THREAD; 98 uint32_t cycle = (uint32_t)hal_get_cycles(); 99 if( DEBUG_KCM < cycle ) 100 printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n", 101 __FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle ); 102 #endif 101 void * ptr = (void *)((intptr_t)kcm_page + (index << order)); 103 102 104 103 return ptr; … … 107 106 108 107 ///////////////////////////////////////////////////////////////////////////////////// 109 // This private static function must be called by a local thread.108 // This static function is called by the kcm_free() function. 110 109 // It releases a previously allocated block to the relevant kcm_page. 111 110 // It makes a panic if the released block is not allocated in this page. … … 121 120 { 122 121 // initialise variables 123 uint32_t max = kcm->max_blocks; 124 uint32_t size = 1 << kcm->order; 122 uint32_t order = kcm->order; 125 123 uint32_t count = kcm_page->count; 126 124 uint64_t status = kcm_page->status; 127 125 128 // compute block index from block pointer 129 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;126 // compute block index from block pointer and kcm_page pointer 127 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order; 130 128 131 129 // compute mask in bit vector … … 136 134 printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n", 137 135 __FUNCTION__, local_cxy, block_ptr, kcm, kcm_page ); 138 printk(" status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) );139 136 kcm_remote_display( local_cxy , kcm ); 140 137 return; … … 145 142 kcm_page->count = count - 1; 146 143 147 // change the page mode if pagewas full148 if( count == max)144 // switch page to active if it was full 145 if( count == 63 ) 149 146 { 150 147 list_unlink( &kcm_page->list ); … … 155 152 } 156 153 157 #if DEBUG_KCM158 thread_t * this = CURRENT_THREAD;159 uint32_t cycle = (uint32_t)hal_get_cycles();160 if( DEBUG_KCM < cycle )161 printk("\n[%s] thread[%x,%x] block %x / page %x / size %d / count %d / cycle %d\n",162 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle );163 #endif164 165 154 } // kcm_put_block() 166 155 167 156 ///////////////////////////////////////////////////////////////////////////////////// 168 // This static function must be called by a local thread. 169 // It returns one non-full kcm_page with the following policy : 157 // This static function returns one non-full kcm_page with the following policy : 170 158 // - if the "active_list" is non empty, it returns the first "active" page, 171 159 // without modifying the KCM state. … … 188 176 else // allocate a new page from PPM 189 177 { 190 // get one 4 Kbytes page from local PPM 191 page_t * page = ppm_alloc_pages( 0 ); 178 // get KCM order 179 uint32_t order = kcm->order; 180 181 // get one kcm_page from PPM 182 page_t * page = ppm_alloc_pages( order + 6 - CONFIG_PPM_PAGE_ORDER ); 192 183 193 184 if( page == NULL ) 194 185 { 195 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 196 __FUNCTION__ , local_cxy ); 197 186 187 #if DEBUG_KCM_ERROR 188 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 189 __FUNCTION__ , local_cxy ); 190 #endif 198 191 return NULL; 199 192 } … … 202 195 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 203 196 204 // get local pointer on kcm_page 197 // get local pointer on kcm_page 205 198 kcm_page = GET_PTR( base_xp ); 206 199 … … 225 218 { 226 219 227 assert( __FUNCTION__, ((order > 5) && (order < 12)) , "order must be in [6,11]" ); 228 229 assert( __FUNCTION__, (CONFIG_PPM_PAGE_SHIFT == 12) , "check status bit_vector width" ); 220 // check argument 221 assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER), 222 "order argument %d too large", order ); 223 224 assert( __FUNCTION__, (order >= CONFIG_CACHE_LINE_ORDER), 225 "order argument %d too small", order ); 230 226 231 227 // initialize lock … … 238 234 list_root_init( &kcm->active_root ); 239 235 240 // initialize order and max_blocks 241 kcm->order = order; 242 kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1; 236 // initialize order 237 kcm->order = order; 243 238 244 239 #if DEBUG_KCM 245 thread_t * this = CURRENT_THREAD; 246 uint32_t cycle = (uint32_t)hal_get_cycles(); 247 if( DEBUG_KCM < cycle ) 248 printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n", 249 __FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks ); 240 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) ) 241 printk("\n[%s] cxy %x / order %d\n", 242 __FUNCTION__, local_cxy, order ); 250 243 #endif 251 244 … … 287 280 void * kcm_alloc( uint32_t order ) 288 281 { 289 kcm_t * kcm _ptr;282 kcm_t * kcm; 290 283 kcm_page_t * kcm_page; 291 void * block_ptr; 292 293 // min block size is 64 bytes 294 if( order < 6 ) order = 6; 295 296 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order ); 284 void * block; 285 286 // check argument 287 assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER), 288 "order argument %d too large", order ); 289 290 #if DEBUG_KCM 291 uint32_t cycle = (uint32_t)hal_get_cycles(); 292 #endif 293 294 // smallest block size is a cache line 295 if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; 297 296 298 297 // get local pointer on relevant KCM allocator 299 kcm _ptr = &LOCAL_CLUSTER->kcm[order - 6];298 kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; 300 299 301 300 // build extended pointer on local KCM lock 302 xptr_t lock_xp = XPTR( local_cxy , &kcm _ptr->lock );301 xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); 303 302 304 303 // get KCM lock … … 306 305 307 306 // get a non-full kcm_page 308 kcm_page = kcm_get_page( kcm_ptr ); 309 310 #if DEBUG_KCM 311 thread_t * this = CURRENT_THREAD; 312 uint32_t cycle = (uint32_t)hal_get_cycles(); 313 if( DEBUG_KCM < cycle ) 314 { 315 printk("\n[%s] thread[%x,%x] enters / order %d / page %x / kcm %x / page_status (%x|%x)\n", 316 __FUNCTION__, this->process->pid, this->trdid, order, kcm_page, kcm_ptr, 317 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 318 kcm_remote_display( local_cxy , kcm_ptr ); 319 } 320 #endif 307 kcm_page = kcm_get_page( kcm ); 321 308 322 309 if( kcm_page == NULL ) … … 326 313 } 327 314 328 // get a block from selected active page 329 block_ptr = kcm_get_block( kcm_ptr , kcm_page ); 315 #if DEBUG_KCM 316 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 317 printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 318 " page %x / status [%x,%x] / count %d\n", 319 __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, 320 kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); 321 #endif 322 323 // allocate a block from selected active page 324 block = kcm_get_block( kcm , kcm_page ); 325 326 #if DEBUG_KCM 327 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 328 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 329 " page %x / status [%x,%x] / count %d\n", 330 __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, 331 kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); 332 #endif 330 333 331 334 // release lock 332 335 remote_busylock_release( lock_xp ); 333 336 334 #if DEBUG_KCM 335 if( DEBUG_KCM < cycle ) 336 printk("\n[%s] thread[%x,%x] exit / order %d / block %x / kcm %x / page_status (%x|%x)\n", 337 __FUNCTION__, this->process->pid, this->trdid, order, block_ptr, kcm_ptr, 338 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 339 #endif 340 341 return block_ptr; 337 return block; 342 338 343 339 } // end kcm_alloc() 344 340 345 ///////////////////////////////// 346 void kcm_free( void * block_ptr ) 347 { 348 kcm_t * kcm_ptr; 341 /////////////////////////////// 342 void kcm_free( void * block, 343 uint32_t order ) 344 { 345 kcm_t * kcm; 349 346 kcm_page_t * kcm_page; 350 347 351 348 // check argument 352 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" ); 349 assert( __FUNCTION__, (block != NULL), 350 "block pointer cannot be NULL" ); 351 352 #if DEBUG_KCM 353 uint32_t cycle = (uint32_t)hal_get_cycles(); 354 #endif 355 356 // smallest block size is a cache line 357 if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; 358 359 // get local pointer on relevant KCM allocator 360 kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; 353 361 354 362 // get local pointer on KCM page 355 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 356 357 // get local pointer on KCM descriptor 358 kcm_ptr = kcm_page->kcm; 359 360 #if DEBUG_KCM 361 thread_t * this = CURRENT_THREAD; 362 uint32_t cycle = (uint32_t)hal_get_cycles(); 363 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 364 { 365 printk("\n[%s] thread[%x,%x] enters / order %d / block %x / page %x / kcm %x / status [%x,%x]\n", 366 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, block_ptr, kcm_page, kcm_ptr, 367 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) ); 368 kcm_remote_display( local_cxy , kcm_ptr ); 369 } 370 #endif 363 intptr_t kcm_page_mask = (1 << (order + 6)) - 1; 364 kcm_page = (kcm_page_t *)((intptr_t)block & ~kcm_page_mask); 371 365 372 366 // build extended pointer on local KCM lock 373 xptr_t lock_xp = XPTR( local_cxy , &kcm _ptr->lock );367 xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); 374 368 375 369 // get lock 376 370 remote_busylock_acquire( lock_xp ); 377 371 378 // release block 379 kcm_put_block( kcm_ptr , kcm_page , block_ptr ); 372 #if DEBUG_KCM 373 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 374 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 375 " page %x / status [%x,%x] / count %d\n", 376 __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, 377 kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); 378 #endif 379 380 // release the block to the relevant page 381 kcm_put_block( kcm , kcm_page , block ); 382 383 #if DEBUG_KCM 384 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 385 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 386 " page %x / status [%x,%x] / count %d\n", 387 __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, 388 kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); 389 #endif 380 390 381 391 // release lock 382 392 remote_busylock_release( lock_xp ); 383 393 384 #if DEBUG_KCM385 if( (DEBUG_KCM < cycle) && (local_cxy == 1) )386 {387 printk("\n[%s] thread[%x,%x] exit / order %d / page %x / status [%x,%x]\n",388 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, kcm_ptr,389 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) );390 kcm_remote_display( local_cxy , kcm_ptr );391 }392 #endif393 394 394 } // end kcm_free() 395 395 … … 400 400 401 401 ///////////////////////////////////////////////////////////////////////////////////// 402 // This static function can be called by any thread running in any cluster. 402 // This static function is called by the kcm_remote_alloc() function. 403 // It can be called by any thread running in any cluster. 403 404 // It returns a local pointer on a block allocated from an active kcm_page. 404 405 // It makes a panic if no block available in the selected kcm_page. … … 415 416 { 416 417 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 417 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );418 418 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 419 419 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 420 uint32_t size = 1 << order; 421 422 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" ); 420 421 // check kcm_page not full 422 assert( __FUNCTION__, (count < 63) , 423 "kcm_page should not be full / cxy %x / order %d / count %d", kcm_cxy, order, count ); 423 424 424 425 uint32_t index = 1; … … 427 428 // allocate first free block in kcm_page, update status, 428 429 // and count , compute index of allocated block in kcm_page 429 while( index <= max)430 while( index <= 63 ) 430 431 { 431 432 if( (status & mask) == 0 ) // block found … … 440 441 } 441 442 442 // change the page list if found block is the last443 if( count == max-1)443 // swich the page to full if last block 444 if( (count + 1) == 63 ) 444 445 { 445 446 list_remote_unlink( kcm_cxy , &kcm_page->list ); … … 451 452 452 453 // compute return pointer 453 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 454 455 #if DEBUG_KCM_REMOTE 456 thread_t * this = CURRENT_THREAD; 457 uint32_t cycle = (uint32_t)hal_get_cycles(); 458 if( DEBUG_KCM_REMOTE < cycle ) 459 printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n", 460 __FUNCTION__, this->process->pid, this->trdid, 461 ptr, kcm_page, kcm_cxy, size, count + 1 ); 462 #endif 454 void * ptr = (void *)((intptr_t)kcm_page + (index << order)); 463 455 464 456 return ptr; … … 467 459 468 460 ///////////////////////////////////////////////////////////////////////////////////// 469 // This private static function can be called by any thread running in any cluster. 461 // This static function is called by the kcm_remote_free() function. 462 // It can be called by any thread running in any cluster. 470 463 // It releases a previously allocated block to the relevant kcm_page. 471 464 // It changes the kcm_page status as required. … … 481 474 void * block_ptr ) 482 475 { 483 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );484 476 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 485 477 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 486 478 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 487 uint32_t size = 1 << order;488 479 489 // compute block index from block pointer 490 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;480 // compute block index from block pointer and kcm_page pointer 481 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order; 491 482 492 483 // compute mask in bit vector … … 497 488 printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n", 498 489 __FUNCTION__, kcm_cxy, block_ptr, kcm_ptr, kcm_page ); 499 printk(" status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) );500 490 kcm_remote_display( kcm_cxy , kcm_ptr ); 501 491 return; … … 506 496 hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , count - 1 ); 507 497 508 // change the page listif page was full509 if( count == max)498 // switch the page to active if page was full 499 if( count == 63 ) 510 500 { 511 501 list_remote_unlink( kcm_cxy , &kcm_page->list ); … … 516 506 } 517 507 518 #if (DEBUG_KCM_REMOTE & 1)519 thread_t * this = CURRENT_THREAD;520 uint32_t cycle = (uint32_t)hal_get_cycles();521 if( DEBUG_KCM_REMOTE < cycle )522 printk("\n[%s] thread[%x,%x] block %x / page %x / cluster %x / size %x / count %d\n",523 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 )524 #endif525 526 508 } // end kcm_remote_put_block() 527 509 528 510 ///////////////////////////////////////////////////////////////////////////////////// 529 // This privatestatic function can be called by any thread running in any cluster.511 // This static function can be called by any thread running in any cluster. 530 512 // It gets one non-full KCM page from the remote KCM. 531 513 // It allocates a page from remote PPM to populate the freelist, and initialises … … 545 527 else // allocate a new page from PPM 546 528 { 547 // get one 4 Kbytes page from remote PPM 548 xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy , 0 ); 549 529 // get KCM order 530 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order )); 531 532 // get one kcm_page from PPM 533 xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy, 534 order + 6 - CONFIG_PPM_PAGE_ORDER ); 550 535 if( page_xp == XPTR_NULL ) 551 536 { 552 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 553 __FUNCTION__ , kcm_cxy ); 554 537 538 #if DEBUG_KCM_ERROR 539 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 540 __FUNCTION__ , kcm_cxy ); 541 #endif 555 542 return NULL; 556 543 } … … 585 572 void * block_ptr; 586 573 587 if( order < 6 ) order = 6; 588 589 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order ); 590 591 // get local pointer on relevant KCM allocator 574 // check kcm_cxy argument 575 assert( __FUNCTION__, cluster_is_active( kcm_cxy ), 576 "cluster %x not active", kcm_cxy ); 577 578 // check order argument 579 assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) , 580 "order argument %d too large", order ); 581 582 // smallest size is a cache line 583 if( order < CONFIG_CACHE_LINE_ORDER ) order = CONFIG_CACHE_LINE_ORDER; 584 585 // get local pointer on relevant KCM allocator (same in all clusters) 592 586 kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6]; 593 587 … … 607 601 } 608 602 603 #if DEBUG_KCM 604 uint32_t cycle = (uint32_t)hal_get_cycles(); 605 uint32_t nb_full = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr )); 606 uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr )); 607 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status )); 608 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count )); 609 #endif 610 611 612 #if DEBUG_KCM 613 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 614 printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 615 " page %x / status [%x,%x] / count %d\n", 616 __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, 617 kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); 618 #endif 619 609 620 // get a block from selected active page 610 621 block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page ); 611 622 623 #if DEBUG_KCM 624 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 625 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 626 " page %x / status [%x,%x] / count %d\n", 627 __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, 628 kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); 629 #endif 630 612 631 // release lock 613 632 remote_busylock_release( lock_xp ); 614 633 615 #if DEBUG_KCM_REMOTE616 thread_t * this = CURRENT_THREAD;617 uint32_t cycle = (uint32_t)hal_get_cycles();618 if( DEBUG_KCM_REMOTE < cycle )619 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm[%x,%x]\n",620 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );621 #endif622 623 634 return block_ptr; 624 635 625 636 } // end kcm_remote_alloc() 626 637 627 ///////////////////////////////////// 628 void kcm_remote_free( cxy_t kcm_cxy, 629 void * block_ptr ) 638 //////////////////////////////////////// 639 void kcm_remote_free( cxy_t kcm_cxy, 640 void * block_ptr, 641 uint32_t order ) 630 642 { 631 643 kcm_t * kcm_ptr; 632 644 kcm_page_t * kcm_page; 633 645 634 // check argument 635 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" ); 636 637 // get local pointer on remote KCM page 638 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 639 640 // get local pointer on remote KCM 641 kcm_ptr = hal_remote_lpt( XPTR( kcm_cxy , &kcm_page->kcm ) ); 646 // check kcm_cxy argument 647 assert( __FUNCTION__, cluster_is_active( kcm_cxy ), 648 "cluster %x not active", kcm_cxy ); 649 650 // check block_ptr argument 651 assert( __FUNCTION__, (block_ptr != NULL), 652 "block pointer cannot be NULL" ); 653 654 // check order argument 655 assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) , 656 "order argument %d too large", order ); 657 658 // smallest block size is a cache line 659 if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; 660 661 // get local pointer on relevant KCM allocator (same in all clusters) 662 kcm_ptr = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; 663 664 // get local pointer on KCM page 665 intptr_t kcm_page_mask = (1 << (order + 6)) - 1; 666 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~kcm_page_mask); 667 668 #if DEBUG_KCM 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 uint32_t nb_full = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr )); 671 uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr )); 672 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status )); 673 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count )); 674 #endif 642 675 643 676 // build extended pointer on remote KCM lock … … 647 680 remote_busylock_acquire( lock_xp ); 648 681 649 // release block 682 #if DEBUG_KCM 683 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 684 printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 685 " page %x / status [%x,%x] / count %d\n", 686 __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, 687 kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); 688 #endif 689 690 // release the block to the relevant page 650 691 kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr ); 692 693 #if DEBUG_KCM 694 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 695 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 696 " page %x / status [%x,%x] / count %d\n", 697 __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, 698 kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); 699 #endif 651 700 652 701 // release lock 653 702 remote_busylock_release( lock_xp ); 654 655 #if DEBUG_KCM_REMOTE656 thread_t * this = CURRENT_THREAD;657 uint32_t cycle = (uint32_t)hal_get_cycles();658 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );659 if( DEBUG_KCM_REMOTE < cycle )660 printk("\n[%s] thread[%x,%x] released block %x / order %d / kcm[%x,%x]\n",661 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );662 #endif663 703 664 704 } // end kcm_remote_free … … 673 713 uint32_t count; 674 714 715 // get pointers on TXT0 chdev 716 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 717 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 718 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 719 720 // get extended pointer on remote TXT0 chdev lock 721 xptr_t txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 722 723 // get TXT0 lock 724 remote_busylock_acquire( txt0_lock_xp ); 725 675 726 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) ); 676 727 uint32_t full_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) ); 677 728 uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); 678 729 679 printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n",730 nolock_printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n", 680 731 kcm_cxy, order, full_pages_nr, active_pages_nr ); 681 732 … … 688 739 count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 689 740 690 printk("- active page %x / status (%x,%x) / count %d\n",691 kcm_page, GET_CXY( status ), GET_PTR( status ), count );741 nolock_printk("- active page %x / status (%x,%x) / count %d\n", 742 kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count ); 692 743 } 693 744 } … … 701 752 count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 702 753 703 printk("- full page %x / status (%x,%x) / count %d\n",704 kcm_page, GET_CXY( status ), GET_PTR( status ), count );754 nolock_printk("- full page %x / status (%x,%x) / count %d\n", 755 kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count ); 705 756 } 706 757 } 758 759 // release TXT0 lock 760 remote_busylock_release( txt0_lock_xp ); 761 707 762 } // end kcm remote_display() -
trunk/kernel/mm/kcm.h
r672 r683 32 32 #include <kmem.h> 33 33 34 35 #define KCM_PAGE_FULL 036 #define KCM_PAGE_EMPTY 137 #define KCM_PAGE_ACTIVE 238 39 34 /**************************************************************************************** 40 * This structure defines a generic Kernel Cache Manager, that is a block allocator, 41 * for fixed size objects. It exists in each cluster a specific KCM allocator for 42 * the following block sizes: 64, 128, 256, 512, 1024, 2048 bytes. 43 * These six KCM allocators are initialized by the cluster_init() function. 35 * This structure defines a generic Kernel Cache Manager, a fixed size block allocator. 36 * It returns an aligned block whose size is a power of 2, not smaller than a cache line, 37 * but smaller than a small PPM page. It exists in each cluster a specific KCM allocator 38 * for each possible block size. When the cache line contains 64 bytes and the page 39 * contains 4K bytes, the possible block sizes are 64, 128, 256, 512, 1024, 2048 bytes. 40 * These KCM allocators are initialized by the cluster_init() function. 44 41 * 45 * Each KCM cache is implemented as a set o 4 Kbytes pages. A kcm_page is split in slots, 46 * where each slot can contain one block. in each kcm_page, the first slot (that cannot 47 * be smaller than 64 bytes) contains the kcm page descriptor, defined below 42 * Each KCM cache is implemented as a set of "kcm_pages": a "kcm_page" is an aligned 43 * buffer in physical memory (allocated by the PPM allocator) such as : 44 * buffer_size = block_size * 64 <=> buffer_order = block_order + 6. 45 * 46 * A kcm_page contains always 64 kcm_blocks, but the first block (that cannot be smaller 47 * than 64 bytes) is used to store the kcm_page descriptor defining the page allocation 48 * status, and cannot be allocated to store data. 49 * 50 * A KCM cache is extensible, as new kcm_pages are dynamically allocated from the PPM 51 * allocator when required. For a given KCM cache the set of kcm_pages is split in two 52 * lists: the list of "full" pages (containing 63 allocated blocks), and the list of 53 * "active" pages (containing at least one free block). An "empty" page (containing 54 * only free blocks) is considered active, and is not released to PPM. 48 55 * 49 56 * To allow any thread running in any cluster to directly access the KCM of any cluster, … … 62 69 63 70 uint32_t order; /*! ln( block_size ) */ 64 uint32_t max_blocks; /*! max number of blocks per page */65 71 } 66 72 kcm_t; … … 84 90 list_entry_t list; /*! [active / busy / free] list member */ 85 91 kcm_t * kcm; /*! pointer on kcm allocator */ 86 page_t * page; /*! pointer on the physical page descriptor*/92 page_t * page; /*! pointer on physical page descriptor */ 87 93 } 88 94 kcm_page_t; … … 120 126 **************************************************************************************** 121 127 * @ block_ptr : local pointer on the released block. 128 * @ order : log2( block_size in bytes ). 122 129 ***************************************************************************************/ 123 void kcm_free( void * block_ptr ); 130 void kcm_free( void * block_ptr, 131 uint32_t order ); 124 132 125 133 … … 143 151 * @ kcm_cxy : remote KCM cluster identifier. 144 152 * @ block_ptr : local pointer on the released buffer in remote cluster. 153 * @ order : log2( block_size in bytes ). 145 154 ***************************************************************************************/ 146 155 void kcm_remote_free( cxy_t kcm_cxy, 147 void * block_ptr ); 156 void * block_ptr, 157 uint32_t order ); 148 158 149 159 /**************************************************************************************** -
trunk/kernel/mm/khm.c
r672 r683 40 40 { 41 41 // check config parameters 42 assert( __FUNCTION__, ((CONFIG_PPM_PAGE_ SHIFT+ CONFIG_PPM_HEAP_ORDER) < 32 ) ,42 assert( __FUNCTION__, ((CONFIG_PPM_PAGE_ORDER + CONFIG_PPM_HEAP_ORDER) < 32 ) , 43 43 "CONFIG_PPM_HEAP_ORDER too large" ); 44 44 … … 47 47 48 48 // compute kernel heap size 49 intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_ SHIFT;49 intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_ORDER; 50 50 51 51 // get kernel heap base from PPM -
trunk/kernel/mm/kmem.c
r672 r683 2 2 * kmem.c - kernel memory allocator implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019,2020)4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 29 29 #include <thread.h> 30 30 #include <memcpy.h> 31 #include <khm.h>32 31 #include <ppm.h> 33 32 #include <kcm.h> … … 35 34 #include <kmem.h> 36 35 37 ///////////////////////////////////// 38 void * kmem_alloc( kmem_req_t * req ) 39 { 40 uint32_t type; // KMEM_PPM / KMEM_KCM / KMEM_KHM 41 uint32_t flags; // AF_NONE / AF_ZERO / AF_KERNEL 42 uint32_t order; // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes 43 44 type = req->type; 45 order = req->order; 46 flags = req->flags; 47 48 ////////////////////// 49 if( type == KMEM_PPM ) 50 { 51 // allocate the number of requested pages 52 page_t * page_ptr = (void *)ppm_alloc_pages( order ); 53 54 if( page_ptr == NULL ) 55 { 56 printk("\n[ERROR] in %s : PPM failed / order %d / cluster %x\n", 57 __FUNCTION__ , order , local_cxy ); 58 return NULL; 59 } 60 61 xptr_t page_xp = XPTR( local_cxy , page_ptr ); 62 63 // reset page if requested 64 if( flags & AF_ZERO ) page_zero( page_ptr ); 65 66 // get pointer on buffer from the page descriptor 67 void * ptr = GET_PTR( ppm_page2base( page_xp ) ); 68 69 #if DEBUG_KMEM 36 /////////////////////////////////// 37 void * kmem_alloc( uint32_t order, 38 uint32_t flags ) 39 { 40 41 #if DEBUG_KMEM || DEBUG_KMEM_ERROR 70 42 thread_t * this = CURRENT_THREAD; 71 43 uint32_t cycle = (uint32_t)hal_get_cycles(); 72 if( DEBUG_KMEM < cycle ) 73 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 74 __FUNCTION__, this->process->pid, this->trdid, 75 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle ); 44 #endif 45 46 if( order >= CONFIG_PPM_PAGE_ORDER ) // use PPM 47 { 48 // allocate memory from PPM 49 page_t * page = (void *)ppm_alloc_pages( order - CONFIG_PPM_PAGE_ORDER ); 50 51 if( page == NULL ) 52 { 53 54 #if DEBUG_KMEM_ERROR 55 if (DEBUG_KMEM_ERROR < cycle) 56 printk("\n[ERROR] in %s : thread[%x,%x] failed for PPM / order %d / cluster %x / cycle %d\n", 57 __FUNCTION__ , this->process->pid , this->trdid , order , local_cxy , cycle ); 58 #endif 59 return NULL; 60 } 61 62 // reset page if requested 63 if( flags & AF_ZERO ) page_zero( page ); 64 65 // get pointer on buffer from the page descriptor 66 xptr_t page_xp = XPTR( local_cxy , page ); 67 void * ptr = GET_PTR( ppm_page2base( page_xp ) ); 68 69 #if DEBUG_KMEM 70 if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) ) 71 printk("\n[%s] thread[%x,%x] from PPM / order %d / ppn %x / cxy %x / cycle %d\n", 72 __FUNCTION__, this->process->pid, this->trdid, 73 order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle ); 76 74 #endif 77 75 return ptr; 78 76 } 79 /////////////////////////// 80 else if( type == KMEM_KCM ) 77 else // use KCM 81 78 { 82 79 // allocate memory from KCM … … 85 82 if( ptr == NULL ) 86 83 { 87 printk("\n[ERROR] in %s : KCM failed / order %d / cluster %x\n", 88 __FUNCTION__ , order , local_cxy ); 84 85 #if DEBUG_KMEM_ERROR 86 if (DEBUG_KMEM_ERROR < cycle) 87 printk("\n[ERROR] in %s : thread[%x,%x] failed for KCM / order %d / cluster %x / cycle %d\n", 88 __FUNCTION__ , this->process->pid , this->trdid , order , local_cxy , cycle ); 89 #endif 89 90 return NULL; 90 91 } … … 94 95 95 96 #if DEBUG_KMEM 96 thread_t * this = CURRENT_THREAD; 97 uint32_t cycle = (uint32_t)hal_get_cycles(); 98 if( DEBUG_KMEM < cycle ) 99 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n", 100 __FUNCTION__, this->process->pid, this->trdid, 101 1<<order, ptr, local_cxy, cycle ); 97 if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) ) 98 printk("\n[%s] thread [%x,%x] from KCM / order %d / base %x / cxy %x / cycle %d\n", 99 __FUNCTION__, this->process->pid, this->trdid, 100 order, ptr, local_cxy, cycle ); 102 101 #endif 103 102 return ptr; 104 103 } 105 ///////////////////////////106 else if( type == KMEM_KHM )107 {108 // allocate memory from KHM109 void * ptr = khm_alloc( &LOCAL_CLUSTER->khm , order );110 111 if( ptr == NULL )112 {113 printk("\n[ERROR] in %s : KHM failed / order %d / cluster %x\n",114 __FUNCTION__ , order , local_cxy );115 return NULL;116 }117 118 // reset memory if requested119 if( flags & AF_ZERO ) memset( ptr , 0 , order );120 121 #if DEBUG_KMEM122 thread_t * this = CURRENT_THREAD;123 uint32_t cycle = (uint32_t)hal_get_cycles();124 if( DEBUG_KMEM < cycle )125 printk("\n[%s] thread[%x,%x] from KHM / %d bytes / base %x / cxy %x / cycle %d\n",126 __FUNCTION__, this->process->pid, this->trdid,127 order, ptr, local_cxy, cycle );128 #endif129 return ptr;130 }131 else132 {133 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);134 return NULL;135 }136 104 } // end kmem_alloc() 137 105 138 ////////////////////////////////// 139 void kmem_free( kmem_req_t * req ) 140 { 141 uint32_t type = req->type; 142 143 ////////////////////// 144 if( type == KMEM_PPM ) 145 { 146 page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , req->ptr ) ) ); 106 ////////////////////////////// 107 void kmem_free( void * ptr, 108 uint32_t order ) 109 { 110 if( order >= CONFIG_PPM_PAGE_ORDER ) // use PPM 111 { 112 page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , ptr ) ) ); 147 113 148 114 ppm_free_pages( page ); 149 115 } 150 /////////////////////////// 151 else if( type == KMEM_KCM ) 116 else // use KCM 152 117 { 153 kcm_free( req->ptr ); 154 } 155 /////////////////////////// 156 else if( type == KMEM_KHM ) 157 { 158 khm_free( req->ptr ); 159 } 160 else 161 { 162 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 163 } 118 kcm_free( ptr , order ); 119 } 164 120 } // end kmem_free() 165 121 166 /////////////////////////////////////////// 167 void * kmem_remote_alloc( cxy_t cxy, 168 kmem_req_t * req ) 169 { 170 uint32_t type; // KMEM_PPM / KMEM_KCM / KMEM_KHM 171 uint32_t flags; // AF_ZERO / AF_KERNEL / AF_NONE 172 uint32_t order; // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes 173 174 type = req->type; 175 order = req->order;176 flags = req->flags;177 178 ////////////////////// 179 if( type == KMEM_PPM )180 { 181 // allocate the number of requested pages from remote cluster182 xptr_t page_xp = ppm_remote_alloc_pages( cxy , order );122 123 124 //////////////////////////////////////// 125 void * kmem_remote_alloc( cxy_t cxy, 126 uint32_t order, 127 uint32_t flags ) 128 { 129 130 #if DEBUG_KMEM || DEBUG_KMEM_ERROR 131 thread_t * this = CURRENT_THREAD; 132 uint32_t cycle = (uint32_t)hal_get_cycles(); 133 #endif 134 135 if( order >= CONFIG_PPM_PAGE_ORDER ) // use PPM 136 { 137 // allocate memory from PPM 138 xptr_t page_xp = ppm_remote_alloc_pages( cxy , order - CONFIG_PPM_PAGE_ORDER ); 183 139 184 140 if( page_xp == XPTR_NULL ) 185 141 { 186 printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n", 187 __FUNCTION__ , order , cxy ); 142 143 #if DEBUG_KMEM_ERROR 144 if( DEBUG_KMEM_ERROR < cycle ) 145 printk("\n[ERROR] in %s : thread[%x,%x] failed for PPM / order %d / cluster %x / cycle %d\n", 146 __FUNCTION__ , this->process->pid , this->trdid , order , cxy , cycle ); 147 #endif 188 148 return NULL; 189 149 } … … 192 152 xptr_t base_xp = ppm_page2base( page_xp ); 193 153 194 // reset page if requested 195 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); 196 197 198 #if DEBUG_KMEM_REMOTE 199 thread_t * this = CURRENT_THREAD; 200 uint32_t cycle = (uint32_t)hal_get_cycles(); 201 if( DEBUG_KMEM_REMOTE < cycle ) 202 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 203 __FUNCTION__, this->process->pid, this->trdid, 204 1<<order, ppm_page2ppn( page_xp ), cxy, cycle ); 154 // reset memory if requested 155 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , 1<<order ); 156 157 #if DEBUG_KMEM 158 if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) ) 159 printk("\n[%s] thread[%x,%x] from PPM / order %d / ppn %x / cxy %x / cycle %d\n", 160 __FUNCTION__, this->process->pid, this->trdid, 161 order, ppm_page2ppn( page_xp ), cxy, cycle ); 205 162 #endif 206 163 return GET_PTR( base_xp ); 207 164 } 208 /////////////////////////// 209 else if( type == KMEM_KCM ) 165 else // use KCM 210 166 { 211 167 // allocate memory from KCM … … 214 170 if( ptr == NULL ) 215 171 { 216 printk("\n[ERROR] in %s : failed for KCM / order %d in cluster %x\n", 217 __FUNCTION__ , order , cxy ); 172 173 #if DEBUG_KMEM_ERROR 174 if( DEBUG_KMEM_ERROR < cycle ) 175 printk("\n[ERROR] in %s : thread[%x,%x] failed for KCM / order %d / cluster %x / cycle %d\n", 176 __FUNCTION__ , this->process->pid , this->trdid , order , cxy , cycle ); 177 #endif 218 178 return NULL; 219 179 } … … 222 182 if( flags & AF_ZERO ) hal_remote_memset( XPTR( cxy , ptr ) , 0 , 1<<order ); 223 183 224 #if DEBUG_KMEM_REMOTE 225 thread_t * this = CURRENT_THREAD; 226 uint32_t cycle = (uint32_t)hal_get_cycles(); 227 if( DEBUG_KMEM_REMOTE < cycle ) 228 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n", 229 __FUNCTION__, this->process->pid, this->trdid, 230 1<<order, ptr, cxy, cycle ); 184 #if DEBUG_KMEM 185 if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) ) 186 printk("\n[%s] thread [%x,%x] from KCM / order %d / base %x / cxy %x / cycle %d\n", 187 __FUNCTION__, this->process->pid, this->trdid, 188 order, ptr, cxy, cycle ); 231 189 #endif 232 190 return ptr; 233 191 } 234 ///////////////////////////235 else if( type == KMEM_KHM )236 {237 printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ );238 return NULL;239 }240 else241 {242 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);243 return NULL;244 }245 192 } // kmem_remote_malloc() 246 193 247 //////////////////////////////////////// 248 void kmem_remote_free( cxy_t cxy, 249 kmem_req_t * req ) 250 { 251 uint32_t type = req->type; 252 253 ////////////////////// 254 if( type == KMEM_PPM ) 255 { 256 page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , req->ptr ) ) ); 194 ///////////////////////////////////// 195 void kmem_remote_free( cxy_t cxy, 196 void * ptr, 197 uint32_t order ) 198 { 199 if( order >= CONFIG_PPM_PAGE_ORDER ) // use PPM 200 { 201 page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , ptr ) ) ); 257 202 258 203 ppm_remote_free_pages( cxy , page ); 259 204 } 260 /////////////////////////// 261 else if( type == KMEM_KCM ) 205 else // use KCM 262 206 { 263 kcm_remote_free( cxy , req->ptr ); 264 } 265 /////////////////////////// 266 else if( type == KMEM_KHM ) 267 { 268 printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ ); 269 } 270 else 271 { 272 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 273 } 207 kcm_remote_free( cxy , ptr , order ); 208 } 274 209 } // end kmem_remote_free() 275 210 -
trunk/kernel/mm/kmem.h
r656 r683 1 1 /* 2 * kmem.h - kernel unified memory allocator interface2 * kmem.h - unified kernel memory allocator definition 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019)4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 29 29 30 30 /************************************************************************************* 31 * This enum defines the three Kernel Memory Allocaror types32 ************************************************************************************/33 34 enum35 {36 KMEM_PPM = 0, /*! PPM allocator */37 KMEM_KCM = 1, /*! KCM allocator */38 KMEM_KHM = 2, /*! KHM allocator */39 };40 41 /*************************************************************************************42 31 * This defines the generic Allocation Flags that can be associated to 43 32 * a Kernel Memory Request. … … 45 34 46 35 #define AF_NONE 0x0000 // no attributes 47 #define AF_KERNEL 0x0001 // for kernel use 48 #define AF_ZERO 0x0002 // must be reset to 0 49 50 /************************************************************************************* 51 * This structure defines a Kernel Memory Request. 52 ************************************************************************************/ 53 54 typedef struct kmem_req_s 55 { 56 uint32_t type; /*! KMEM_PPM / KMEM_KCM / KMEM_KHM */ 57 uint32_t order; /*! PPM: ln2(pages) / KCM: ln2(bytes) / KHM: bytes */ 58 uint32_t flags; /*! request attributes */ 59 void * ptr; /*! local pointer on allocated buffer (only used by free) */ 60 } 61 kmem_req_t; 36 #define AF_KERNEL 0x0001 // for kernel use ??? 37 #define AF_ZERO 0x0002 // data buffer must be reset to 0 62 38 63 39 /************************************************************************************* 64 40 * These two functions allocate physical memory in a local or remote cluster 65 * as specified by the kmem_req_t request descriptor, and return a local pointer 66 * on the allocated buffer. It uses three specialised physical memory allocators: 67 * - PPM (Physical Pages Manager) allocates N contiguous small physical pages. 68 * N is a power of 2, and req.order = ln(N). Implement the buddy algorithm. 69 * - KCM (Kernel Cache Manager) allocates aligned blocks of M bytes from a cache. 70 * M is a power of 2, and req.order = ln( M ). One cache per block size. 71 * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes, 72 * M can have any value, and req.order = M. 73 * 74 * WARNING: the physical memory allocated with a given allocator type must be 75 * released using the same allocator type. 41 * as specified by the <cxy>, <order> and <flags> arguments, and return a local 42 * pointer on the allocated buffer. The buffer size (in bytes) is a power of 2, 43 * equal to (1 << order) bytes. It can be initialized to zero if requested. 44 * Depending on the <order> value, it uses two specialised allocators: 45 * - When order is larger or equal to CONFIG_PPM_PAGE_ORDER, the PPM (Physical Pages 46 * Manager) allocates 2**(order - PPM_PAGE_ORDER) contiguous small physical pages. 47 * This allocator implements the buddy algorithm. 48 * - When order is smaller than CONFIG_PPM_PAGE_ORDER, the KCM (Kernel Cache Manager) 49 * allocates an aligned block of 2**order bytes from specialised KCM[ORDER] caches 50 * (one KCM cache per block size). 76 51 ************************************************************************************* 77 * @ cxy : target cluster identifier for a remote access. 78 * @ req : local pointer on allocation request. 52 * @ cxy : [in] target cluster identifier for a remote access). 53 * @ order : [in] ln( block size in bytes). 54 * @ flags : [in] allocation flags defined above. 79 55 * @ return local pointer on allocated buffer if success / return NULL if no memory. 80 56 ************************************************************************************/ 81 void * kmem_alloc( kmem_req_t * req ); 57 void * kmem_alloc( uint32_t order, 58 uint32_t flags ); 82 59 83 void * kmem_remote_alloc( cxy_t cxy, 84 kmem_req_t * req ); 60 void * kmem_remote_alloc( cxy_t cxy, 61 uint32_t order, 62 uint32_t flags ); 85 63 86 64 /************************************************************************************* 87 * These two functions release previously allocated physical memory, as specified 88 * by the <type> and <ptr> fields of the kmem_req_t request descriptor. 65 * These two functions release a previously allocated physical memory block, 66 * as specified by the <cxy>, <order> and <ptr> arguments. 67 * - When order is larger or equal to CONFIG_PPM_PAGE_ORDER, the PPM (Physical Pages 68 * Manager) releases 2**(order - PPM_PAGE_ORDER) contiguous small physical pages. 69 * This allocator implements the buddy algorithm. 70 * - When order is smaller than CONFIG_PPM_PAGE_ORDER, the KCM (Kernel Cache Manager) 71 * release release the block of 2**order bytes to the specialised KCM[order] cache. 89 72 ************************************************************************************* 90 * @ cxy : target cluster identifier for a remote access. 91 * @ req : local pointer to request descriptor. 73 * @ cxy : [in] target cluster identifier for a remote access. 74 * @ ptr : [in] local pointer to released block. 75 * @ order : [in] ln( block size in bytes ). 92 76 ************************************************************************************/ 93 void kmem_free ( kmem_req_t * req ); 77 void kmem_free( void * ptr, 78 uint32_t order ); 94 79 95 void kmem_remote_free( cxy_t cxy, 96 kmem_req_t * req ); 80 void kmem_remote_free( cxy_t cxy, 81 void * ptr, 82 uint32_t order ); 97 83 98 84 -
trunk/kernel/mm/mapper.c
r672 r683 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018,2019,2020)5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 51 51 { 52 52 mapper_t * mapper_ptr; 53 kmem_req_t req;54 53 error_t error; 55 54 56 55 // allocate memory for mapper descriptor 57 req.type = KMEM_KCM; 58 req.order = bits_log2( sizeof(mapper_t) ); 59 req.flags = AF_KERNEL | AF_ZERO; 60 mapper_ptr = kmem_remote_alloc( cxy , &req ); 56 mapper_ptr = kmem_remote_alloc( cxy , bits_log2(sizeof(mapper_t)) , AF_ZERO ); 61 57 62 58 if( mapper_ptr == NULL ) 63 59 { 64 printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); 60 61 #if DEBUG_MAPPER_ERROR 62 printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); 63 #endif 65 64 return XPTR_NULL; 66 65 } … … 77 76 if( error ) 78 77 { 79 printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); 80 req.type = KMEM_KCM; 81 req.ptr = mapper_ptr; 82 kmem_remote_free( cxy , &req ); 78 79 #if DEBUG_MAPPER_ERROR 80 printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); 81 kmem_remote_free( cxy , mapper_ptr , bits_log2(sizeof(mapper_t)) ); 82 #endif 83 83 return XPTR_NULL; 84 84 } … … 104 104 uint32_t found_index = 0; 105 105 uint32_t start_index = 0; 106 kmem_req_t req;107 106 108 107 cxy_t mapper_cxy = GET_CXY( mapper_xp ); … … 137 136 138 137 // release memory for mapper descriptor 139 req.type = KMEM_KCM; 140 req.ptr = mapper_ptr; 141 kmem_remote_free( mapper_cxy , &req ); 138 kmem_remote_free( mapper_cxy , mapper_ptr , bits_log2(sizeof(mapper_t)) ); 142 139 143 140 } // end mapper_destroy() … … 153 150 uint32_t inode_type = 0; 154 151 155 thread_t * this = CURRENT_THREAD; 152 #if DEBUG_MAPPER_HANDLE_MISS || DEBUG_MAPPER_ERROR 153 thread_t * this = CURRENT_THREAD; 154 uint32_t cycle = (uint32_t)hal_get_cycles(); 155 #endif 156 156 157 157 // get target mapper cluster and local pointer … … 170 170 171 171 #if DEBUG_MAPPER_HANDLE_MISS 172 uint32_t cycle = (uint32_t)hal_get_cycles();173 172 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 174 173 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) … … 185 184 #endif 186 185 187 #if( DEBUG_MAPPER_HANDLE_MISS & 2)186 #if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 188 187 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 189 188 { … … 193 192 #endif 194 193 195 // allocate one 4 Kbytes page from the remote mapper cluster 196 xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 ); 194 // allocate one 4 Kbytes page in the remote mapper cluster 195 void * base_ptr = kmem_remote_alloc( mapper_cxy , 12 , AF_NONE ); 196 197 if( base_ptr == NULL ) 198 { 199 200 #if DEBUG_MAPPER_ERROR 201 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x / cycle %d\n", 202 __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy , cycle ); 203 #endif 204 return -1; 205 } 206 207 // get pointers on allocated page descrptor 208 xptr_t page_xp = ppm_base2page( XPTR( mapper_cxy , base_ptr ) ); 197 209 page_t * page_ptr = GET_PTR( page_xp ); 198 199 if( page_xp == XPTR_NULL )200 {201 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",202 __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy );203 return -1;204 }205 210 206 211 // initialize the page descriptor … … 217 222 page_id, 218 223 page_ptr ); 219 220 224 if( error ) 221 225 { 222 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n", 223 __FUNCTION__ , this->process->pid, this->trdid ); 224 ppm_remote_free_pages( mapper_cxy , page_ptr ); 226 227 #if DEBUG_MAPPER_ERROR 228 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper / cycle %d\n", 229 __FUNCTION__ , this->process->pid, this->trdid , cycle ); 230 ppm_remote_free_pages( mapper_cxy , page_ptr ); 231 #endif 225 232 return -1; 226 233 } … … 236 243 if( error ) 237 244 { 238 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 239 __FUNCTION__ , this->process->pid, this->trdid ); 240 mapper_remote_release_page( mapper_xp , page_ptr ); 245 246 #if DEBUG_MAPPER_ERROR 247 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device / cycle %d\n", 248 __FUNCTION__ , this->process->pid, this->trdid , cycle ); 249 mapper_remote_release_page( mapper_xp , page_ptr ); 250 #endif 241 251 return -1; 242 252 } … … 260 270 #endif 261 271 262 #if( DEBUG_MAPPER_HANDLE_MISS & 2)272 #if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 263 273 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 264 274 { … … 299 309 #endif 300 310 301 #if( DEBUG_MAPPER_GET_PAGE & 2)311 #if( DEBUG_MAPPER_GET_PAGE & 1 ) 302 312 if( DEBUG_MAPPER_GET_PAGE < cycle ) 303 313 ppm_remote_display( local_cxy ); … … 336 346 if( error ) 337 347 { 338 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 339 __FUNCTION__ , this->process->pid, this->trdid ); 340 remote_rwlock_wr_release( lock_xp ); 348 349 #if DEBUG_MAPPER_ERROR 350 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 351 __FUNCTION__ , this->process->pid, this->trdid ); 352 remote_rwlock_wr_release( lock_xp ); 353 #endif 341 354 return XPTR_NULL; 342 355 } … … 364 377 #endif 365 378 366 #if( DEBUG_MAPPER_GET_PAGE & 2)379 #if( DEBUG_MAPPER_GET_PAGE & 1) 367 380 if( DEBUG_MAPPER_GET_PAGE < cycle ) 368 381 ppm_remote_display( local_cxy ); … … 432 445 if( error ) 433 446 { 434 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 435 __FUNCTION__ , this->process->pid, this->trdid ); 436 remote_rwlock_wr_release( lock_xp ); 447 448 #if DEBUG_MAPPER_ERROR 449 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 450 __FUNCTION__ , this->process->pid, this->trdid ); 451 remote_rwlock_wr_release( lock_xp ); 452 #endif 437 453 return XPTR_NULL; 438 454 } … … 460 476 #endif 461 477 462 #if( DEBUG_MAPPER_GET_FAT_PAGE & 2)478 #if( DEBUG_MAPPER_GET_FAT_PAGE & 1) 463 479 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 464 480 ppm_remote_display( local_cxy ); … … 532 548 533 549 // compute indexes of pages for first and last byte in mapper 534 uint32_t first = min_byte >> CONFIG_PPM_PAGE_ SHIFT;535 uint32_t last = max_byte >> CONFIG_PPM_PAGE_ SHIFT;550 uint32_t first = min_byte >> CONFIG_PPM_PAGE_ORDER; 551 uint32_t last = max_byte >> CONFIG_PPM_PAGE_ORDER; 536 552 537 553 #if (DEBUG_MAPPER_MOVE_USER & 1) … … 668 684 669 685 // compute indexes for first and last pages in mapper 670 uint32_t first = min_byte >> CONFIG_PPM_PAGE_ SHIFT;671 uint32_t last = max_byte >> CONFIG_PPM_PAGE_ SHIFT;686 uint32_t first = min_byte >> CONFIG_PPM_PAGE_ORDER; 687 uint32_t last = max_byte >> CONFIG_PPM_PAGE_ORDER; 672 688 673 689 // compute source and destination clusters … … 853 869 if( error ) 854 870 { 855 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 856 __FUNCTION__, page_ptr->index ); 871 872 #if DEBUG_MAPPER_SYNC 873 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 874 __FUNCTION__, page_ptr->index ); 875 #endif 857 876 return -1; 858 877 } -
trunk/kernel/mm/mapper.h
r657 r683 39 39 /******************************************************************************************* 40 40 * This mapper_t object implements the kernel cache for a given VFS file or directory. 41 * There is one mapper per file/dir. It is implemented as a three levels radix tree, 42 * entirely stored in the same cluster as the inode representing the file/dir. 41 * There is one mapper per file/dir. 42 * - It is implemented as a three levels radix tree, entirely stored in the same cluster 43 * as the inode representing the file/directory. 43 44 * - The fast retrieval key is the page index in the file. 44 45 * The ix1_width, ix2_width, ix3_width sub-indexes are configuration parameters. 45 46 * - The leaves are pointers on physical page descriptors, dynamically allocated 46 * in the local cluster.47 * in the same cluster as the radix tree. 47 48 * - The mapper is protected by a "remote_rwlock", to support several simultaneous 48 49 * "readers", and only one "writer". … … 60 61 * buffer, that can be physically located in any cluster. 61 62 * - In the present implementation the cache size for a given file increases on demand, 62 * and the allocated memory is only released when the mapper/inode is destroyed. 63 * and the allocated memory is only released when the inode is destroyed. 64 * 65 * WARNING : This mapper implementation makes the assumption that the PPM page size 66 * is 4 Kbytes. This code should be modified to support a generic page size, 67 * defined by the CONFIG_PPM_PAGE_SIZE parameter. 63 68 ******************************************************************************************/ 64 69 -
trunk/kernel/mm/page.h
r656 r683 3 3 * 4 4 * Authors Ghassan Almalles (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/ppm.c
r672 r683 60 60 61 61 void * base_ptr = ppm->vaddr_base + 62 ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ SHIFT);62 ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER); 63 63 64 64 return XPTR( page_cxy , base_ptr ); … … 75 75 76 76 page_t * page_ptr = ppm->pages_tbl + 77 ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_ SHIFT);77 ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_ORDER); 78 78 79 79 return XPTR( base_cxy , page_ptr ); … … 91 91 page_t * page_ptr = GET_PTR( page_xp ); 92 92 93 paddr_t paddr = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ SHIFT);94 95 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ SHIFT);93 paddr_t paddr = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER ); 94 95 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER); 96 96 97 97 } // end hal_page2ppn() … … 102 102 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 103 103 104 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ SHIFT;104 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER; 105 105 106 106 cxy_t cxy = CXY_FROM_PADDR( paddr ); 107 107 lpa_t lpa = LPA_FROM_PADDR( paddr ); 108 108 109 return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_ SHIFT] );109 return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_ORDER] ); 110 110 111 111 } // end hal_ppn2page … … 118 118 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 119 119 120 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ SHIFT;120 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER; 121 121 122 122 cxy_t cxy = CXY_FROM_PADDR( paddr ); … … 137 137 paddr_t paddr = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) ); 138 138 139 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ SHIFT);139 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER); 140 140 141 141 } // end ppm_base2ppn() … … 159 159 160 160 assert( __FUNCTION__, !page_is_flag( page , PG_FREE ) , 161 "page already released : ppn = %x \n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );161 "page already released : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) ); 162 162 163 163 assert( __FUNCTION__, !page_is_flag( page , PG_RESERVED ) , 164 "reserved page : ppn = %x \n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );164 "reserved page : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) ); 165 165 166 166 // set FREE flag in released page descriptor … … 214 214 page_t * found_block; 215 215 216 thread_t * this = CURRENT_THREAD;217 218 216 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 219 217 220 #if DEBUG_PPM_ALLOC_PAGES 221 uint32_t cycle = (uint32_t)hal_get_cycles(); 218 #if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR 219 thread_t * this = CURRENT_THREAD; 220 uint32_t cycle = (uint32_t)hal_get_cycles(); 222 221 #endif 223 222 … … 232 231 233 232 // check order 234 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 233 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , 234 "illegal order argument = %d" , order ); 235 235 236 236 //build extended pointer on lock protecting remote PPM … … 273 273 if( current_block == NULL ) // return failure if no free block found 274 274 { 275 // release lock protecting free lists 275 276 #if DEBUG_PPM_ERROR 277 printk("\n[ERROR] in %s thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 278 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle ); 279 #endif 280 // release lock protecting free lists 276 281 remote_busylock_release( lock_xp ); 277 278 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",279 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );280 281 282 return NULL; 282 283 } … … 385 386 page_t * found_block; 386 387 387 thread_t * this = CURRENT_THREAD;388 389 388 // check order 390 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 389 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , 390 "illegal order argument = %d" , order ); 391 391 392 392 // get local pointer on PPM (same in all clusters) 393 393 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 394 394 395 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 395 #if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR 396 thread_t * this = CURRENT_THREAD; 396 397 uint32_t cycle = (uint32_t)hal_get_cycles(); 397 398 #endif 398 399 399 #if DEBUG_PPM_ REMOTE_ALLOC_PAGES400 if( DEBUG_PPM_ REMOTE_ALLOC_PAGES < cycle )400 #if DEBUG_PPM_ALLOC_PAGES 401 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 401 402 { 402 403 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 403 404 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 404 if( DEBUG_PPM_ REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );405 if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy ); 405 406 } 406 407 #endif … … 445 446 if( current_block == NULL ) // return failure 446 447 { 448 449 #if DEBUG_PPM_ERROR 450 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 451 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 452 #endif 447 453 // release lock protecting free lists 448 454 remote_busylock_release( lock_xp ); 449 450 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",451 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );452 453 455 return XPTR_NULL; 454 456 } … … 489 491 hal_fence(); 490 492 491 #if DEBUG_PPM_ REMOTE_ALLOC_PAGES492 if( DEBUG_PPM_ REMOTE_ALLOC_PAGES < cycle )493 #if DEBUG_PPM_ALLOC_PAGES 494 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 493 495 { 494 496 printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n", 495 497 __FUNCTION__, this->process->pid, this->trdid, 496 498 1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle ); 497 if( DEBUG_PPM_ REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );499 if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy ); 498 500 } 499 501 #endif … … 521 523 uint32_t order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) ); 522 524 523 #if DEBUG_PPM_ REMOTE_FREE_PAGES525 #if DEBUG_PPM_FREE_PAGES 524 526 thread_t * this = CURRENT_THREAD; 525 527 uint32_t cycle = (uint32_t)hal_get_cycles(); … … 527 529 #endif 528 530 529 #if DEBUG_PPM_ REMOTE_FREE_PAGES530 if( DEBUG_PPM_ REMOTE_FREE_PAGES < cycle )531 #if DEBUG_PPM_FREE_PAGES 532 if( DEBUG_PPM_FREE_PAGES < cycle ) 531 533 { 532 534 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n", 533 535 __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle ); 534 if( DEBUG_PPM_ REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );536 if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy ); 535 537 } 536 538 #endif … … 549 551 550 552 assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_FREE ) , 551 "page already released : ppn = %x \n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );553 "page already released : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) ); 552 554 553 555 assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_RESERVED ) , 554 "reserved page : ppn = %x \n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );556 "reserved page : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) ); 555 557 556 558 // set the FREE flag in released page descriptor … … 607 609 hal_fence(); 608 610 609 #if DEBUG_PPM_ REMOTE_FREE_PAGES610 if( DEBUG_PPM_ REMOTE_FREE_PAGES < cycle )611 #if DEBUG_PPM_FREE_PAGES 612 if( DEBUG_PPM_FREE_PAGES < cycle ) 611 613 { 612 614 printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n", 613 615 __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle ); 614 if( DEBUG_PPM_ REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );616 if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy ); 615 617 } 616 618 #endif -
trunk/kernel/mm/ppm.h
r656 r683 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 57 57 * the "buddy" algorithm. 58 58 * The local threads can access these free_lists by calling the ppm_alloc_pages() and 59 * ppm_free_page() functions, butthe remote threads can access the same free lists,59 * ppm_free_page() functions, and the remote threads can access the same free lists, 60 60 * by calling the ppm_remote_alloc_pages() and ppm_remote_free_pages functions. 61 61 * Therefore, these free lists are protected by a remote_busy_lock. … … 98 98 * physical pages. It takes the lock protecting the free_lists before register the 99 99 * released page in the relevant free_list. 100 * In normal use, you do not need to call itdirectly, as the recommended way to free100 * In normal use, it should not be called directly, as the recommended way to free 101 101 * physical pages is to call the generic allocator defined in kmem.h. 102 102 ***************************************************************************************** -
trunk/kernel/mm/vmm.c
r672 r683 1 1 /* 2 * vmm.c - virtual memory manager related operations definition.2 * vmm.c - virtual memory manager related operations implementation. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) … … 89 89 90 90 // check ltid argument 91 assert( __FUNCTION__, (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 91 assert( __FUNCTION__, 92 (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 92 93 "slot index %d too large for an user stack vseg", ltid ); 93 94 … … 107 108 if( vseg == NULL ) 108 109 { 109 // release lock protecting free lists 110 111 #if DEBUG_VMM_ERROR 112 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n", 113 __FUNCTION__ , local_cxy ); 114 #endif 110 115 busylock_release( &mgr->lock ); 111 112 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",113 __FUNCTION__ , local_cxy );114 115 116 return NULL; 116 117 } … … 346 347 if( current_vseg == NULL ) // return failure 347 348 { 348 // release lock protecting free lists 349 350 #if DEBUG_VMM_ERROR 351 printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n", 352 __FUNCTION__, npages , local_cxy ); 353 #endif 349 354 busylock_release( &mgr->lock ); 350 351 printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",352 __FUNCTION__, npages , local_cxy );353 354 355 return NULL; 355 356 } … … 368 369 if( new_vseg == NULL ) 369 370 { 370 // release lock protecting free lists 371 372 #if DEBUG_VMM_ERROR 373 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n", 374 __FUNCTION__ , local_cxy ); 375 #endif 371 376 busylock_release( &mgr->lock ); 372 373 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",374 __FUNCTION__ , local_cxy );375 376 377 return NULL; 377 378 } … … 517 518 XPTR( local_cxy , &vseg->xlist ) ); 518 519 519 } // end vmm_attach_vseg_ from_vsl()520 } // end vmm_attach_vseg_to_vsl() 520 521 521 522 //////////////////////////////////////////////////////////////////////////////////////////// … … 537 538 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 538 539 539 } // end vmm_detach_ from_vsl()540 } // end vmm_detach_vseg_from_vsl() 540 541 541 542 //////////////////////////////////////////// … … 1290 1291 if( child_vseg == NULL ) // release all allocated vsegs 1291 1292 { 1293 1294 #if DEBUG_VMM_ERROR 1295 printk("\n[ERROR] in %s : cannot create vseg for child in cluster %x\n", 1296 __FUNCTION__, local_cxy ); 1297 #endif 1292 1298 vmm_destroy( child_process ); 1293 printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ );1294 1299 return -1; 1295 1300 } … … 1338 1343 if( error ) 1339 1344 { 1345 1346 #if DEBUG_VMM_ERROR 1347 printk("\n[ERROR] in %s : cannot copy GPT\n", 1348 __FUNCTION__ ); 1349 #endif 1340 1350 vmm_destroy( child_process ); 1341 printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ );1342 1351 return -1; 1343 1352 } … … 1357 1366 remote_queuelock_release( parent_lock_xp ); 1358 1367 1359 /* deprecated [AG] : this is already done by the vmm_user_init() funcfion1360 1361 // initialize the child VMM STACK allocator1362 vmm_stack_init( child_vmm );1363 1364 // initialize the child VMM MMAP allocator1365 vmm_mmap_init( child_vmm );1366 1367 // initialize instrumentation counters1368 child_vmm->false_pgfault_nr = 0;1369 child_vmm->local_pgfault_nr = 0;1370 child_vmm->global_pgfault_nr = 0;1371 child_vmm->false_pgfault_cost = 0;1372 child_vmm->local_pgfault_cost = 0;1373 child_vmm->global_pgfault_cost = 0;1374 */1375 1368 // copy base addresses from parent VMM to child VMM 1376 1369 child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); … … 1564 1557 if( vseg == NULL ) 1565 1558 { 1566 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1567 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1559 1560 #if DEBUG_VMM_ERROR 1561 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1562 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1563 #endif 1568 1564 return NULL; 1569 1565 } … … 1572 1568 vseg->type = type; 1573 1569 vseg->vmm = vmm; 1574 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_ SHIFT;1575 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ SHIFT);1570 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER; 1571 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER); 1576 1572 vseg->cxy = cxy; 1577 1573 … … 1582 1578 { 1583 1579 // compute page index (in mapper) for first and last byte 1584 vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_ SHIFT;1585 vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_ SHIFT;1580 vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_ORDER; 1581 vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_ORDER; 1586 1582 1587 1583 // compute offset in first page and number of pages … … 1594 1590 if( vseg == NULL ) 1595 1591 { 1596 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1597 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1592 1593 #if DEBUG_VMM_ERROR 1594 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1595 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1596 #endif 1598 1597 return NULL; 1599 1598 } … … 1602 1601 vseg->type = type; 1603 1602 vseg->vmm = vmm; 1604 vseg->min = (vseg->vpn_base << CONFIG_PPM_PAGE_ SHIFT) + offset;1603 vseg->min = (vseg->vpn_base << CONFIG_PPM_PAGE_ORDER) + offset; 1605 1604 vseg->max = vseg->min + size; 1606 1605 vseg->file_offset = file_offset; … … 1615 1614 { 1616 1615 // compute number of required pages in virtual space 1617 vpn_t npages = size >> CONFIG_PPM_PAGE_ SHIFT;1616 vpn_t npages = size >> CONFIG_PPM_PAGE_ORDER; 1618 1617 if( size & CONFIG_PPM_PAGE_MASK) npages++; 1619 1618 … … 1623 1622 if( vseg == NULL ) 1624 1623 { 1625 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1626 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1624 1625 #if DEBUG_VMM_ERROR 1626 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1627 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1628 #endif 1627 1629 return NULL; 1628 1630 } … … 1631 1633 vseg->type = type; 1632 1634 vseg->vmm = vmm; 1633 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_ SHIFT;1634 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ SHIFT);1635 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER; 1636 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER); 1635 1637 vseg->cxy = cxy; 1636 1638 … … 1640 1642 else // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg 1641 1643 { 1642 uint32_t vpn_min = base >> CONFIG_PPM_PAGE_ SHIFT;1643 uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_ SHIFT;1644 uint32_t vpn_min = base >> CONFIG_PPM_PAGE_ORDER; 1645 uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_ORDER; 1644 1646 1645 1647 // allocate vseg descriptor … … 1648 1650 if( vseg == NULL ) 1649 1651 { 1650 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1651 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1652 1653 #if DEBUG_VMM_ERROR 1654 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1655 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1656 #endif 1652 1657 return NULL; 1653 1658 } 1659 1654 1660 // initialize vseg 1655 1661 vseg->type = type; … … 1657 1663 vseg->min = base; 1658 1664 vseg->max = base + size; 1659 vseg->vpn_base = base >> CONFIG_PPM_PAGE_ SHIFT;1665 vseg->vpn_base = base >> CONFIG_PPM_PAGE_ORDER; 1660 1666 vseg->vpn_size = vpn_max - vpn_min + 1; 1661 1667 vseg->file_offset = file_offset; … … 1672 1678 if( existing_vseg != NULL ) 1673 1679 { 1674 printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n" 1675 " overlap existing vseg %s [vpn_base %x / vpn_size %x]\n", 1676 __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, 1677 vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size ); 1680 1681 #if DEBUG_VMM_ERROR 1682 printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n" 1683 " overlap existing vseg %s [vpn_base %x / vpn_size %x]\n", 1684 __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, 1685 vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size ); 1686 #endif 1678 1687 vseg_free( vseg ); 1679 1688 return NULL; … … 1801 1810 if( do_kmem_release ) 1802 1811 { 1803 kmem_req_t req; 1804 req.type = KMEM_PPM; 1805 req.ptr = GET_PTR( ppm_ppn2base( ppn ) ); 1806 1807 kmem_remote_free( page_cxy , &req ); 1812 // get physical page order 1813 uint32_t order = CONFIG_PPM_PAGE_ORDER + 1814 hal_remote_l32( XPTR( page_cxy , &page_ptr->order )); 1815 1816 // get physical page base 1817 void * base = GET_PTR( ppm_ppn2base( ppn ) ); 1818 1819 // release physical page 1820 kmem_remote_free( page_cxy , base , order ); 1808 1821 1809 1822 #if DEBUG_VMM_PPN_RELEASE … … 1855 1868 #endif 1856 1869 1857 // loop on PTEs in GPT to unmap all mapped PTE1858 1870 // the loop on PTEs in GPT to unmap all mapped PTEs 1871 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1859 1872 { 1860 1873 // get ppn and attr … … 1942 1955 intptr_t min = new_base; 1943 1956 intptr_t max = new_base + new_size; 1944 vpn_t new_vpn_min = min >> CONFIG_PPM_PAGE_ SHIFT;1945 vpn_t new_vpn_max = (max - 1) >> CONFIG_PPM_PAGE_ SHIFT;1957 vpn_t new_vpn_min = min >> CONFIG_PPM_PAGE_ORDER; 1958 vpn_t new_vpn_max = (max - 1) >> CONFIG_PPM_PAGE_ORDER; 1946 1959 1947 1960 // build extended pointer on GPT … … 2082 2095 if( ref_cxy == local_cxy ) // local is ref => return error 2083 2096 { 2084 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", 2085 __FUNCTION__, vaddr, process->pid ); 2086 2087 // release local VSL lock 2097 2098 #if DEBUG_VMM_ERROR 2099 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", 2100 __FUNCTION__, vaddr, process->pid ); 2101 #endif 2088 2102 remote_queuelock_release( loc_lock_xp ); 2089 2090 2103 return -1; 2091 2104 } … … 2103 2116 if( ref_vseg == NULL ) // vseg not found => return error 2104 2117 { 2105 // release both VSL locks 2118 2119 #if DEBUG_VMM_ERROR 2120 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", 2121 __FUNCTION__, vaddr, process->pid ); 2122 #endif 2106 2123 remote_queuelock_release( loc_lock_xp ); 2107 2124 remote_queuelock_release( ref_lock_xp ); 2108 2109 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",2110 __FUNCTION__, vaddr, process->pid );2111 2112 2125 return -1; 2113 2126 } … … 2119 2132 if( loc_vseg == NULL ) // no memory => return error 2120 2133 { 2121 printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n", 2122 __FUNCTION__, vaddr, process->pid ); 2123 2124 // release both VSL locks 2134 2135 #if DEBUG_VMM_ERROR 2136 printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n", 2137 __FUNCTION__, vaddr, process->pid ); 2138 #endif 2125 2139 remote_queuelock_release( ref_lock_xp ); 2126 2140 remote_queuelock_release( loc_lock_xp ); 2127 2128 2141 return -1; 2129 2142 } … … 2158 2171 ////////////////////////////////////////////////////////////////////////////////////// 2159 2172 // This static function compute the target cluster to allocate a physical page 2160 // for a given <vpn> in a given <vseg>, allocates the page and returns an extended 2161 // pointer on the allocated page descriptor. 2173 // for a given <vpn> in a given <vseg>, allocates the physical page from a local 2174 // or remote cluster (depending on the vseg type), and returns an extended pointer 2175 // on the allocated page descriptor. 2162 2176 // The vseg cannot have the FILE type. 2163 2177 ////////////////////////////////////////////////////////////////////////////////////// 2164 2178 // @ vseg : local pointer on vseg. 2165 2179 // @ vpn : unmapped vpn. 2166 // @ return an extended pointer on the allocated page descriptor.2180 // @ return xptr on page descriptor if success / return XPTR_NULL if failure 2167 2181 ////////////////////////////////////////////////////////////////////////////////////// 2168 2182 static xptr_t vmm_page_allocate( vseg_t * vseg, … … 2207 2221 } 2208 2222 2209 // allocate one small physical page from target cluster2210 kmem_req_t req;2211 req.type = KMEM_PPM;2212 req.order = 0;2213 req.flags = AF_ZERO;2214 2215 2223 // get local pointer on page base 2216 void * ptr = kmem_remote_alloc( page_cxy , &req ); 2217 2224 void * ptr = kmem_remote_alloc( page_cxy , CONFIG_PPM_PAGE_ORDER , AF_ZERO ); 2225 2226 if( ptr == NULL ) 2227 { 2228 2229 #if DEBUG_VMM_ERROR 2230 printk("\n[ERROR] in %s : cannot allocate memory from cluster %x\n", 2231 __FUNCTION__, page_cxy ); 2232 #endif 2233 return XPTR_NULL; 2234 } 2218 2235 // get extended pointer on page descriptor 2219 2236 page_xp = ppm_base2page( XPTR( page_cxy , ptr ) ); … … 2291 2308 2292 2309 // compute missing page offset in vseg 2293 uint32_t offset = page_id << CONFIG_PPM_PAGE_ SHIFT;2310 uint32_t offset = page_id << CONFIG_PPM_PAGE_ORDER; 2294 2311 2295 2312 // compute missing page offset in .elf file … … 2427 2444 // get local vseg (access to reference VSL can be required) 2428 2445 error = vmm_get_vseg( process, 2429 (intptr_t)vpn<<CONFIG_PPM_PAGE_ SHIFT,2446 (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER, 2430 2447 &vseg ); 2431 2448 if( error ) … … 2752 2769 // get local vseg 2753 2770 error = vmm_get_vseg( process, 2754 (intptr_t)vpn<<CONFIG_PPM_PAGE_ SHIFT,2771 (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER, 2755 2772 &vseg ); 2756 2773 if( error ) -
trunk/kernel/mm/vseg.c
r672 r683 62 62 vseg_t * vseg_alloc( void ) 63 63 { 64 kmem_req_t req; 65 66 req.type = KMEM_KCM; 67 req.order = bits_log2( sizeof(vseg_t) ); 68 req.flags = AF_KERNEL | AF_ZERO; 69 70 return kmem_alloc( &req ); 64 return (vseg_t*)kmem_alloc( bits_log2( sizeof(vseg_t)) , AF_ZERO ); 71 65 } 72 66 … … 74 68 void vseg_free( vseg_t * vseg ) 75 69 { 76 kmem_req_t req; 77 78 req.type = KMEM_KCM; 79 req.ptr = vseg; 80 kmem_free( &req ); 70 kmem_free( vseg , bits_log2( sizeof(vseg_t)) ); 81 71 } 82 72 -
trunk/kernel/mm/vseg.h
r657 r683 82 82 vpn_t vpn_base; /*! first page of vseg */ 83 83 vpn_t vpn_size; /*! number of pages occupied */ 84 xptr_t mapper_xp; /*! xptr on remote mapper (for types CODE/DATA/FILE) */ 84 85 uint32_t flags; /*! vseg attributes */ 85 xptr_t mapper_xp; /*! xptr on remote mapper (for types CODE/DATA/FILE) */86 86 intptr_t file_offset; /*! vseg offset in file (for types CODE/DATA/FILE) */ 87 87 intptr_t file_size; /*! max segment size in mapper (for type CODE/DATA) */ -
trunk/kernel/syscalls/shared_include/shared_almos.h
r670 r683 2 2 * shared_almos.h - Shared mnemonics used by the almos-mkh specific syscalls. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 62 62 63 63 /******************************************************************************************* 64 * This enum defines the operation mnemonics for the non standard get_xxx() syscalls. 65 ******************************************************************************************/ 66 67 typedef enum 68 { 69 GET_PROCESSES = 0, 70 GET_CONFIG = 1, 71 GET_CORE_ID = 2, 72 GET_NB_CORES = 3, 73 GET_BEST_CORE = 4, 74 GET_CYCLE = 5, 75 GET_THREAD_INFO = 6, 76 } 77 get_operation_type_t; 78 79 /******************************************************************************************* 64 80 * This structure defines the - user accessible - information stored in a thread. 65 81 ******************************************************************************************/ -
trunk/kernel/syscalls/shared_include/shared_dirent.h
r611 r683 1 1 /* 2 * shared_dirent.h - Shared structureused by the opendir() / readdir() / closedir() syscalls.2 * shared_dirent.h - structures used by the opendir() / readdir() / closedir() syscalls. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/syscalls/shared_include/shared_socket.h
r670 r683 69 69 SOCK_SEND = 5, 70 70 SOCK_RECV = 6, 71 SOCK_SENDTO = 7, 72 SOCK_RECVFROM = 8, 71 73 } 72 74 socket_operation_type_t; -
trunk/kernel/syscalls/shared_include/syscalls_numbers.h
r657 r683 2 2 * syscalls_numbers.c - Contains enum of the syscalls. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 27 27 /****************************************************************************************** 28 28 * This enum defines the mnemonics for the syscall indexes. 29 * It must be kept consistent with the array defined in do_syscalls.c 29 * It must be kept consistent with the array defined in the <do_syscalls.c> file 30 * and with the SYS_OBJs defined in the kernel <Makefile> 30 31 *****************************************************************************************/ 31 32 typedef enum … … 75 76 SYS_WAIT = 39, 76 77 77 SYS_GET _CONFIG= 40,78 SYS_ GET_CORE_ID= 41,79 SYS_ GET_CYCLE = 42,80 SYS_ DISPLAY= 43,81 SYS_ PLACE_FORK = 44,82 SYS_T HREAD_SLEEP= 45,83 SYS_ THREAD_WAKEUP= 46,84 SYS_ TRACE= 47,85 SYS_F G= 48,86 SYS_ IS_FG= 49,78 SYS_GET = 40, 79 SYS_DISPLAY = 41, 80 SYS_PLACE_FORK = 42, 81 SYS_THREAD_SLEEP = 43, 82 SYS_THREAD_WAKEUP = 44, 83 SYS_TRACE = 45, 84 SYS_FG = 46, 85 SYS_IS_FG = 47, 86 SYS_FBF = 48, 87 SYS_UNDEFINED_49 = 49, 87 88 88 89 SYS_EXIT = 50, 89 90 SYS_SYNC = 51, 90 91 SYS_FSYNC = 52, 91 SYS_GET_BEST_CORE = 53, 92 SYS_GET_NB_CORES = 54, 93 SYS_GET_THREAD_INFO = 55, 94 SYS_FBF = 56, 95 SYS_SOCKET = 57, 92 SYS_SOCKET = 53, 96 93 97 SYSCALLS_NR = 5 8,94 SYSCALLS_NR = 54, 98 95 99 96 } syscalls_t; -
trunk/kernel/syscalls/sys_alarm.c
r506 r683 3 3 * 4 4 * Author Alain Greiner (2016,2017) 5 *5 * 6 6 * Copyright (c) UPMC Sorbonne Universites 7 7 * -
trunk/kernel/syscalls/sys_barrier.c
r670 r683 2 2 * sys_barrier.c - Access a POSIX barrier. 3 3 * 4 * authors Alain Greiner (2016,2017,2018,2019 )4 * authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 33 33 #include <remote_barrier.h> 34 34 35 ///////////////////////////////////////////////////////////////////////////////// 36 // This function returns a printable string for the barrier related command type. 37 ///////////////////////////////////////////////////////////////////////////////// 38 39 #if DEBUG_SYS_SOCKET || DEBUG_SYSCALLS_ERROR 40 static char* barrier_cmd_str( uint32_t type ) 41 { 42 if ( type == BARRIER_INIT ) return "INIT"; 43 else if( type == BARRIER_WAIT ) return "WAIT"; 44 else if( type == BARRIER_DESTROY ) return "DESTROY"; 45 else return "undefined"; 46 } 47 #endif 48 35 49 ////////////////////////////////// 36 50 int sys_barrier( intptr_t vaddr, … … 53 67 if( DEBUG_SYS_BARRIER < tm_start ) 54 68 printk("\n[%s] thread[%x,%x] enters for %s / count %d / cycle %d\n", 55 __FUNCTION__, process->pid, this->trdid, sys_barrier_op_str(operation), count,69 __FUNCTION__, process->pid, this->trdid, barrier_cmd_str(operation), count, 56 70 (uint32_t)tm_start ); 57 71 #endif 58 72 59 73 // check vaddr in user vspace 60 error = vmm_get_vseg( process , vaddr , &vseg ); 61 if( error ) 74 if( vmm_get_vseg( process , vaddr , &vseg ) ) 62 75 { 63 76 … … 65 78 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 66 79 printk("\n[ERROR] in %s for %s : unmapped barrier %x / thread[%x,%x]\n", 67 __FUNCTION__, sys_barrier_op_str(operation), vaddr, process->pid, this->trdid );68 #endif 69 this->errno = error;80 __FUNCTION__, barrier_cmd_str(operation), vaddr, process->pid, this->trdid ); 81 #endif 82 this->errno = EINVAL; 70 83 return -1; 71 84 } … … 79 92 if( attr != 0 ) // QDT barrier required 80 93 { 81 error = vmm_get_vseg( process , attr , &vseg ); 82 if( error ) 94 if( vmm_get_vseg( process , attr , &vseg ) ) 83 95 { 84 96 -
trunk/kernel/syscalls/sys_display.c
r670 r683 529 529 530 530 // display socket descriptor on TXT0 531 socket_display( XPTR( file_cxy , socket ), NULL );531 socket_display( XPTR( file_cxy , socket ), __FUNCTION__ , NULL ); 532 532 533 533 break; -
trunk/kernel/syscalls/sys_exec.c
r670 r683 38 38 #include <syscalls.h> 39 39 40 ////////////////////////////////////////////////i//////////////////////////////////////// 41 // This static function is called twice by the sys_exec() function : 42 // - to register the main() arguments (args) in the process <exec_info> structure. 43 // - to register the environment variables (envs) in the <exec_info> structure. 44 // In both cases the input is an array of NULL terminated string pointers in user space, 45 // identified by the <u_pointers> argument. The strings can be dispatched anywhere in 46 // the calling user process space. The max number of envs, and the max number of args 47 // are defined by the CONFIG_PROCESS_ARGS_NR and CONFIG_PROCESS_ENVS_MAX_NR parameters. 48 ////////////////////////////////////////////////i//////////////////////////////////////// 49 // Implementation Note: 50 // Both the array of pointers and the strings themselve are stored in kernel space in one 51 // single, dynamically allocated, kernel buffer containing an integer number of pages, 52 // defined by the CONFIG_VMM_ENVS_SIZE and CONFIG_VMM_STACK_SIZE parameters. 53 // These two kernel buffers contains : 54 // - in the first bytes a fixed size kernel array of kernel pointers on the strings. 55 // - in the following bytes the strings themselves. 56 // The exec_info_t structure is defined in the <process.h> file. 57 ////////////////////////////////////////////////i//////////////////////////////////////// 58 // @ is_args : [in] true if called for (args) / false if called for (envs). 59 // @ u_pointers : [in] array of pointers on the strings (in user space). 60 // @ exec_info : [inout] pointer on the exec_info structure. 61 // @ return 0 if success / non-zero if too many strings or no memory. 62 ////////////////////////////////////////////////i//////////////////////////////////////// 63 static error_t exec_get_strings( bool_t is_args, 64 char ** u_pointers, 65 exec_info_t * exec_info ) 66 { 67 uint32_t index; // slot index in pointers array 68 uint32_t length; // string length (in bytes) 69 uint32_t pointers_bytes; // number of bytes to store pointers 70 uint32_t max_index; // max size of pointers array 71 char ** k_pointers; // base of kernel array of pointers 72 char * k_buf_ptr; // pointer on first empty slot in strings buffer 73 uint32_t k_buf_space; // number of bytes available in string buffer 74 char * k_buf; // kernel buffer for both pointers & strings 75 76 #if DEBUG_SYS_EXEC 77 thread_t * this = CURRENT_THREAD; 78 uint32_t cycle = (uint32_t)hal_get_cycles(); 79 #endif 80 81 // Allocate one block of physical memory for both the pointers and the strings 82 83 if( is_args ) 84 { 85 k_buf = kmem_alloc( bits_log2(CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ORDER), AF_ZERO ); 86 87 pointers_bytes = (CONFIG_PROCESS_ARGS_MAX_NR + 1) * sizeof(char *); 88 k_pointers = (char **)k_buf; 89 k_buf_ptr = k_buf + pointers_bytes; 90 k_buf_space = (CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_ORDER) - pointers_bytes; 91 max_index = CONFIG_PROCESS_ARGS_MAX_NR + 1; 92 93 #if DEBUG_SYS_EXEC 94 if( DEBUG_SYS_EXEC < cycle ) 95 printk("\n[%s] thread[%x,%x] for args / u_buf %x / k_buf %x\n", 96 __FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf ); 97 #endif 98 99 } 100 else // envs 101 { 102 k_buf = kmem_alloc( bits_log2(CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_ORDER), AF_ZERO ); 103 104 pointers_bytes = (CONFIG_PROCESS_ENVS_MAX_NR + 1) * sizeof(char *); 105 k_pointers = (char **)k_buf; 106 k_buf_ptr = k_buf + pointers_bytes; 107 k_buf_space = (CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_ORDER) - pointers_bytes; 108 max_index = CONFIG_PROCESS_ENVS_MAX_NR + 1; 109 110 #if DEBUG_SYS_EXEC 111 if( DEBUG_SYS_EXEC < cycle ) 112 printk("\n[%s] thread[%x,%x] for envs / u_buf %x / k_buf %x\n", 113 __FUNCTION__, this->process->pid, this->trdid, u_pointers, k_buf ); 114 #endif 115 116 } 117 118 // copy the user array of pointers to kernel buffer 119 hal_copy_from_uspace( XPTR( local_cxy , k_pointers ), 120 u_pointers, 121 pointers_bytes ); 122 123 // WARNING : the pointers copied in the k_pointers[] array are user pointers, 124 // after the loop below, the k_pointers[] array contains kernel pointers. 125 126 #if DEBUG_SYS_EXEC 127 if( DEBUG_SYS_EXEC < cycle ) 128 printk("\n[%s] thread[%x,%x] moved u_ptr array of pointers to k_ptr array\n", 129 __FUNCTION__, this->process->pid, this->trdid ); 130 #endif 131 132 // scan kernel array of pointers to copy strings to kernel buffer 133 for( index = 0 ; index < max_index ; index++ ) 134 { 135 // exit loop if (k_pointers[index] == NUll) 136 if( k_pointers[index] == NULL ) break; 137 138 // compute string length (without the NUL character) 139 length = hal_strlen_from_uspace( k_pointers[index] ); 140 141 // return error if overflow in kernel buffer 142 if( length > k_buf_space ) return -1; 143 144 // copy the string itself to kernel buffer 145 hal_copy_from_uspace( XPTR( local_cxy , k_buf_ptr ), 146 k_pointers[index], 147 length + 1 ); 148 149 #if DEBUG_SYS_EXEC 150 if( DEBUG_SYS_EXEC < cycle ) 151 printk("\n[%s] thread[%x,%x] copied string[%d] <%s> to kernel buffer / length %d\n", 152 __FUNCTION__, this->process->pid, this->trdid, index, k_buf_ptr, length ); 153 #endif 154 155 // replace the user pointer by a kernel pointer in the k_pointer[] array 156 k_pointers[index] = k_buf_ptr; 157 158 // increment loop variables 159 k_buf_ptr += (length + 1); 160 k_buf_space -= (length + 1); 161 162 #if DEBUG_SYS_EXEC 163 if( DEBUG_SYS_EXEC < cycle ) 164 { 165 if( k_pointers[0] != NULL ) 166 printk("\n[%s] thread[%x,%x] : &arg0 = %x / arg0 = <%s>\n", 167 __FUNCTION__, this->process->pid, this->trdid, k_pointers[0], k_pointers[0] ); 168 else 169 printk("\n[%s] thread[%x,%x] : unexpected NULL value for &arg0\n", 170 __FUNCTION__, this->process->pid, this->trdid ); 171 } 172 #endif 173 174 } // end loop on index 175 176 // update into exec_info structure 177 if( is_args ) 178 { 179 exec_info->args_pointers = k_pointers; 180 exec_info->args_nr = index; 181 } 182 else 183 { 184 exec_info->envs_pointers = k_pointers; 185 exec_info->envs_buf_free = k_buf_ptr; 186 exec_info->envs_nr = index; 187 } 188 189 #if DEBUG_SYS_EXEC 190 if( DEBUG_SYS_EXEC < cycle ) 191 printk("\n[%s] thread[%x,%x] copied %d strings to kernel buffer\n", 192 __FUNCTION__, this->process->pid, this->trdid, index ); 193 #endif 194 195 return 0; 196 197 } // end exec_get_strings() 198 199 40 200 /////////////////////////////// 41 int sys_exec( char * pathname, 42 char ** user_args, // pointer onprocess arguments in user space43 char ** user_envs ) // pointer onenv variables in user space201 int sys_exec( char * pathname, // .elf file pathname in user space 202 char ** user_args, // pointer on array of process arguments in user space 203 char ** user_envs ) // pointer on array of env variables in user space 44 204 { 45 205 error_t error; … … 96 256 97 257 #if DEBUG_SYSCALLS_ERROR 258 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 98 259 printk("\n[ERROR] in %s for thread[%x,%] : user_args pointer %x unmapped\n", 99 260 __FUNCTION__, pid, trdid, user_args ); … … 115 276 return -1; 116 277 } 117 118 #if DEBUG_SYS_EXEC119 if( DEBUG_SYS_EXEC < (uint32_t)tm_start )120 printk("\n[%s] thread[%x,%x] enter / path <%s> / args %x / envs %x / cycle %d\n",121 __FUNCTION__, pid, trdid, &process->exec_info.path[0], user_args, user_envs, cycle );122 #endif123 278 124 279 // 1. copy "pathname" in kernel exec_info structure … … 127 282 CONFIG_VFS_MAX_PATH_LENGTH ); 128 283 284 #if DEBUG_SYS_EXEC 285 if( DEBUG_SYS_EXEC < (uint32_t)tm_start ) 286 printk("\n[%s] thread[%x,%x] enter / path <%s> / args %x / envs %x / cycle %d\n", 287 __FUNCTION__, pid, trdid, &process->exec_info.path[0], 288 user_args, user_envs, (uint32_t)tm_start ); 289 #endif 290 129 291 // 2. copy "arguments" pointers & strings in process exec_info if required 130 292 if( user_args != NULL ) 131 293 { 132 if( process_exec_get_strings( true , user_args , &process->exec_info ) )294 if( exec_get_strings( true , user_args , &process->exec_info ) ) 133 295 { 134 296 135 297 #if DEBUG_SYSCALLS_ERROR 136 298 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 137 printk("\n[ERROR] in %s : thread[%x,%] get arguments for <%s>\n",299 printk("\n[ERROR] in %s : thread[%x,%] cannot get arguments for <%s>\n", 138 300 __FUNCTION__, pid, trdid, pathname ); 139 301 #endif … … 144 306 #if DEBUG_SYS_EXEC 145 307 if( DEBUG_SYS_EXEC < (uint32_t)tm_start ) 146 printk("\n[%s] thread[%x,%x] got arguments/ arg[0] = <%s>\n",308 printk("\n[%s] thread[%x,%x] set arguments in exec_info / arg[0] = <%s>\n", 147 309 __FUNCTION__, pid, trdid, process->exec_info.args_pointers[0] ); 148 310 #endif … … 153 315 if( user_envs != NULL ) 154 316 { 155 if( process_exec_get_strings( false , user_envs , &process->exec_info ) )317 if( exec_get_strings( false , user_envs , &process->exec_info ) ) 156 318 { 157 319 158 320 #if DEBUG_SYSCALLS_ERROR 159 321 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 160 printk("\n[ERROR] in %s : thread[%x,%] get env variables for <%s>\n",322 printk("\n[ERROR] in %s : thread[%x,%] cannot get env variables for <%s>\n", 161 323 __FUNCTION__, pid, trdid, pathname ); 162 324 #endif … … 167 329 #if DEBUG_SYS_EXEC 168 330 if( DEBUG_SYS_EXEC < (uint32_t)tm_start ) 169 printk("\n[%s] thread[%x,%x] got envs/ env[0] = <%s>\n",331 printk("\n[%s] thread[%x,%x] set envs in exec_info / env[0] = <%s>\n", 170 332 __FUNCTION__, pid, trdid, process->exec_info.envs_pointers[0] ); 171 333 #endif -
trunk/kernel/syscalls/sys_kill.c
r664 r683 2 2 * sys_kill.c - Kernel function implementing the "kill" system call. 3 3 * 4 * Author Alain Greiner (2016,2017,2018)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 67 67 process_t * process = this->process; 68 68 69 #if (DEBUG_SYS_KILL || CONFIG_INSTRUMENTATION_SYSCALLS)69 #if DEBUG_SYS_KILL || DEBUG_SYSCALLS_ERROR || CONFIG_INSTRUMENTATION_SYSCALLS 70 70 uint64_t tm_start = hal_get_cycles(); 71 71 #endif 72 72 73 73 #if DEBUG_SYS_KILL 74 tm_start = hal_get_cycles();75 74 if( DEBUG_SYS_KILL < tm_start ) 76 75 printk("\n[%s] thread[%x,%x] enter : %s to process %x / cycle %d\n", … … 95 94 96 95 #if DEBUG_SYSCALLS_ERROR 97 printk("\n[ERROR] in %s : process %x not found\n", __FUNCTION__, pid ); 96 if( DEBUG_SYSCALLS_ERROR < tm_start ) 97 printk("\n[ERROR] in %s : thread[%x,%x] / process %x not found\n", 98 __FUNCTION__, process->pid, this->trdid, pid ); 98 99 #endif 99 100 this->errno = EINVAL; … … 175 176 176 177 #if DEBUG_SYSCALLS_ERROR 177 printk("\n[ERROR] in %s : process %x cannot kill itself\n", __FUNCTION__, pid ); 178 if( DEBUG_SYSCALLS_ERROR < tm_start ) 179 printk("\n[ERROR] in %s : thread[%x,%x] / process %x cannot kill itself\n", 180 __FUNCTION__, process->pid, this->trdid, pid ); 178 181 #endif 179 182 this->errno = EINVAL; … … 186 189 187 190 #if DEBUG_SYSCALLS_ERROR 188 printk("\n[ERROR] in %s : process_init cannot be killed\n", __FUNCTION__ ); 191 if( DEBUG_SYSCALLS_ERROR < tm_start ) 192 printk("\n[ERROR] in %s : thread[%x,%x] / process_init cannot be killed\n", 193 __FUNCTION__, process->pid, this->trdid); 189 194 #endif 190 195 this->errno = EINVAL; … … 219 224 220 225 #if DEBUG_SYSCALLS_ERROR 221 printk("\n[ERROR] in %s : illegal signal %d / process %x\n", __FUNCTION__, sig_id, pid ); 226 if( DEBUG_SYSCALLS_ERROR < tm_start ) 227 printk("\n[ERROR] in %s : thread[%x,%x] / illegal signal %d\n", 228 __FUNCTION__, process->pid, this->trdid, sig_id ); 222 229 #endif 223 230 this->errno = EINVAL; … … 234 241 #if DEBUG_SYS_KILL 235 242 if( DEBUG_SYS_KILL < tm_end ) 236 printk("\n[%s] thread[%x,%x] exit / process %x / %s / c ost = %d / cycle %d\n",243 printk("\n[%s] thread[%x,%x] exit / process %x / %s / cycle %d\n", 237 244 __FUNCTION__ , this->process->pid, this->trdid, pid, 238 sig_type_str(sig_id), (uint32_t) (tm_end - tm_start), (uint32_t)tm_end );245 sig_type_str(sig_id), (uint32_t)tm_end ); 239 246 #endif 240 247 -
trunk/kernel/syscalls/sys_opendir.c
r670 r683 65 65 66 66 #if DEBUG_SYSCALLS_ERROR 67 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 68 printk("\n[ERROR] in %s : thread[%x,%x] / DIR buffer %x unmapped\n", 69 __FUNCTION__ , process->pid , this->trdid, dirp ); 67 printk("\n[ERROR] in %s : thread[%x,%x] / DIR buffer %x unmapped / cycle %d\n", 68 __FUNCTION__ , process->pid , this->trdid, dirp, (uint32_t)tm_start ); 70 69 #endif 71 70 this->errno = EINVAL; … … 80 79 81 80 #if DEBUG_SYSCALLS_ERROR 82 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 83 printk("\n[ERROR] in %s : thread[%x,%x] / pathname %x unmapped\n", 84 __FUNCTION__ , process->pid , this->trdid, pathname ); 81 printk("\n[ERROR] in %s : thread[%x,%x] / pathname %x unmapped / cycle %d\n", 82 __FUNCTION__ , process->pid , this->trdid, pathname, (uint32_t)tm_start ); 85 83 #endif 86 84 this->errno = EINVAL; … … 92 90 93 91 #if DEBUG_SYSCALLS_ERROR 94 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 95 printk("\n[ERROR] in %s / thread[%x,%x] : pathname too long\n", 96 __FUNCTION__ , process->pid , this->trdid ); 92 printk("\n[ERROR] in %s / thread[%x,%x] : pathname too long / cycle %d\n", 93 __FUNCTION__ , process->pid , this->trdid, (uint32_t)tm_start ); 97 94 #endif 98 95 this->errno = ENFILE; … … 138 135 139 136 #if DEBUG_SYSCALLS_ERROR 140 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 141 printk("\n[ERROR] in %s : thread[%x,%x] / cannot found directory <%s>\n", 142 __FUNCTION__ , process->pid , this->trdid , kbuf ); 137 printk("\n[ERROR] in %s : thread[%x,%x] / cannot found directory <%s> / cycle %d\n", 138 __FUNCTION__ , process->pid , this->trdid , kbuf , (uint32_t)tm_start ); 143 139 #endif 144 140 this->errno = ENFILE; … … 155 151 156 152 #if DEBUG_SYSCALLS_ERROR 157 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 158 printk("\n[ERROR] in %s : thread[%x,%x] / <%s> is not a directory\n", 159 __FUNCTION__ , process->pid , this->trdid , kbuf ); 153 printk("\n[ERROR] in %s : thread[%x,%x] / <%s> is not a directory / cycle %d\n", 154 __FUNCTION__ , process->pid , this->trdid , kbuf , (uint32_t)tm_start ); 160 155 #endif 161 156 this->errno = ENFILE; … … 163 158 } 164 159 165 // create a new user_dir_t structure in target directory inode cluster160 // create an user_dir_t structure in cluster containing directory inode 166 161 // map it in the reference user process VMM (in a new ANON vseg) 167 162 // an get the local pointer on the created user_dir_t structure … … 183 178 184 179 #if DEBUG_SYSCALLS_ERROR 185 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 186 printk("\n[ERROR] in %s : thread[%x,%x] / cannot create user_dir for <%s>\n", 187 __FUNCTION__ , process->pid , this->trdid , kbuf ); 180 printk("\n[ERROR] in %s : thread[%x,%x] / cannot create user_dir for <%s> / cycle %d\n", 181 __FUNCTION__ , process->pid , this->trdid , kbuf , (uint32_t)tm_start ); 188 182 #endif 189 183 this->errno = ENFILE; -
trunk/kernel/syscalls/sys_pipe.c
r670 r683 36 36 { 37 37 vseg_t * vseg; 38 kmem_req_t req;39 38 pipe_t * pipe; 40 39 vfs_file_t * file_0; … … 88 87 // 2. allocate memory for fd[0] file descriptor in local cluster 89 88 // we don't use the vfs_file_create function because there is no inode. 90 req.type = KMEM_KCM; 91 req.order = bits_log2( sizeof(vfs_file_t) ); 92 req.flags = AF_ZERO; 93 file_0 = kmem_alloc( &req ); 89 file_0 = kmem_alloc( bits_log2(sizeof(vfs_file_t)) , AF_ZERO ); 94 90 95 91 if( file_0 == NULL ) … … 120 116 121 117 // 4. allocate memory for fd[1] file descriptor in local cluster 122 req.type = KMEM_KCM; 123 req.order = bits_log2( sizeof(vfs_file_t) ); 124 req.flags = AF_ZERO; 125 file_1 = kmem_alloc( &req ); 118 // we don't use the vfs_file_create function because there is no inode. 119 file_1 = kmem_alloc( bits_log2(sizeof(vfs_file_t)) , AF_ZERO ); 126 120 127 121 if( file_1 == NULL ) … … 178 172 error_5: // release memory allocated for fd[1] file descriptor 179 173 180 req.ptr = file_1; 181 kmem_free( &req ); 174 kmem_free( file_1 , bits_log2(sizeof(vfs_file_t)) ); 182 175 183 176 error_4: // release fdid_0 from fd_array[] … … 187 180 error_3: // release memory allocated for fd[0] file descriptor 188 181 189 req.ptr = file_0; 190 kmem_free( &req ); 182 kmem_free( file_0 , bits_log2(sizeof(vfs_file_t)) ); 191 183 192 184 error_2: // release memory allocated for the pipe -
trunk/kernel/syscalls/sys_place_fork.c
r670 r683 55 55 56 56 #if DEBUG_SYSCALLS_ERROR 57 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) ;57 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 58 58 printk("\n[ERROR] in %s : thread[%x,‰x] / illegal cxy argument %x\n", 59 59 __FUNCTION__ , process->pid , this->trdid , cxy ); -
trunk/kernel/syscalls/sys_socket.c
r670 r683 45 45 46 46 #if DEBUG_SYS_SOCKET 47 static char* socket_ cmd_type_str( uint32_t type )47 static char* socket_user_cmd_str( uint32_t type ) 48 48 { 49 49 if ( type == SOCK_CREATE ) return "CREATE"; … … 54 54 else if( type == SOCK_SEND ) return "SEND"; 55 55 else if( type == SOCK_RECV ) return "RECV"; 56 else if( type == SOCK_SENDTO ) return "SENDTO"; 57 else if( type == SOCK_RECVFROM ) return "RECVFROM"; 56 58 else return "undefined"; 57 59 } … … 79 81 80 82 #if DEBUG_SYS_SOCKET 83 char kbuf[64]; 81 84 if( DEBUG_SYS_SOCKET < (uint32_t)tm_start ) 82 printk("\n[%s] thread[%x,%x] enter /%s / a1 %x / a2 %x / a3 %x / cycle %d\n",83 __FUNCTION__, process->pid, this->trdid, socket_ cmd_type_str(cmd),85 printk("\n[%s] thread[%x,%x] enter for %s / a1 %x / a2 %x / a3 %x / cycle %d\n", 86 __FUNCTION__, process->pid, this->trdid, socket_user_cmd_str(cmd), 84 87 arg1, arg2, arg3, (uint32_t)tm_start ); 85 88 #endif … … 97 100 98 101 #if DEBUG_SYSCALLS_ERROR 99 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 100 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / domain %d =! AF_INET\n", 101 __FUNCTION__ , process->pid , this->trdid , domain ); 102 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / domain %d =! AF_INET / cycle %d\n", 103 __FUNCTION__ , process->pid , this->trdid , domain , (uint32_t)tm_start ); 102 104 #endif 103 105 this->errno = EINVAL; … … 110 112 111 113 #if DEBUG_SYSCALLS_ERROR 112 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 113 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / illegal socket type\n", 114 __FUNCTION__ , process->pid , this->trdid ); 114 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / illegal socket type / cycle %d\n", 115 __FUNCTION__ , process->pid , this->trdid , (uint32_t)tm_start); 115 116 #endif 116 117 this->errno = EINVAL; … … 126 127 127 128 #if DEBUG_SYSCALLS_ERROR 128 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 129 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / cannot create socket\n", 130 __FUNCTION__ , process->pid , this->trdid ); 129 printk("\n[ERROR] in %s : thread[%x,%x] / CREATE / cannot create socket / cycle %d\n", 130 __FUNCTION__ , process->pid , this->trdid , (uint32_t)tm_start); 131 131 #endif 132 132 this->errno = EINVAL; … … 148 148 149 149 #if DEBUG_SYSCALLS_ERROR 150 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 151 printk("\n[ERROR] in %s : thread[%x,%x] / BIND / socket address %x unmapped\n", 152 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 ); 150 printk("\n[ERROR] in %s : thread[%x,%x] / BIND / socket address %x unmapped / cycle %d\n", 151 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start ); 153 152 #endif 154 153 this->errno = EINVAL; … … 171 170 172 171 #if DEBUG_SYSCALLS_ERROR 173 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 174 printk("\n[ERROR] in %s : thread[%x,%x] / BIND / cannot access socket[%x,%d]\n", 175 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid ); 172 printk("\n[ERROR] in %s : thread[%x,%x] / BIND / cannot access socket[%x,%d] / cycle %d\n", 173 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start ); 176 174 #endif 177 175 this->errno = EINVAL; … … 195 193 196 194 #if DEBUG_SYSCALLS_ERROR 197 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 198 printk("\n[ERROR] in %s : thread[%x,%x] / LISTEN / cannot access socket[%x,%d]\n", 199 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid ); 195 printk("\n[ERROR] in %s : thread[%x,%x] / LISTEN / cannot access socket[%x,%d] / cycle %d\n", 196 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start ); 200 197 #endif 201 198 this->errno = EINVAL; … … 217 214 218 215 #if DEBUG_SYSCALLS_ERROR 219 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 220 printk("\n[ERROR] in %s : thread[%x,%x] / CONNECT / server address %x unmapped\n", 221 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 ); 216 printk("\n[ERROR] in %s : thread[%x,%x] / CONNECT / server address %x unmapped / cycle %d\n", 217 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start ); 222 218 #endif 223 219 this->errno = EINVAL; … … 239 235 240 236 #if DEBUG_SYSCALLS_ERROR 241 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 242 printk("\n[ERROR] in %s : thread[%x,%x] / LISTEN / cannot access socket[%x,%d]\n", 243 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid ); 237 printk("\n[ERROR] in %s : thread[%x,%x] / LISTEN / cannot access socket[%x,%d] / cycle %d\n", 238 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start ); 244 239 #endif 245 240 this->errno = EINVAL; … … 261 256 262 257 #if DEBUG_SYSCALLS_ERROR 263 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 264 printk("\n[ERROR] in %s : thread[%x,%x] / CONNECT / server address %x unmapped\n", 265 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 ); 258 printk("\n[ERROR] in %s : thread[%x,%x] / CONNECT / server address %x unmapped / cycle %d\n", 259 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start ); 266 260 #endif 267 261 this->errno = EINVAL; … … 275 269 &k_sockaddr.sin_port ); 276 270 277 if( ret ) 278 { 279 280 #if DEBUG_SYSCALLS_ERROR 281 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 282 printk("\n[ERROR] in %s : thread[%x,%x] / ACCEPT / cannot access socket[%x,%d]\n", 283 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid ); 271 if( ret < 0 ) 272 { 273 274 #if DEBUG_SYSCALLS_ERROR 275 printk("\n[ERROR] in %s : thread[%x,%x] / ACCEPT / cannot access socket[%x,%d] / cycle %d\n", 276 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start ); 284 277 #endif 285 278 this->errno = EINVAL; … … 305 298 306 299 #if DEBUG_SYSCALLS_ERROR 307 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 308 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / buffer %x unmapped\n", 309 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 ); 310 #endif 311 this->errno = EINVAL; 312 ret = -1; 313 break; 314 } 315 316 // check length 317 if( length == 0 ) 318 { 319 320 #if DEBUG_SYSCALLS_ERROR 321 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 322 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / buffer length is 0\n", 323 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 ); 324 #endif 325 this->errno = EINVAL; 326 ret = -1; 327 break; 328 } 329 330 // cal relevant relevant socket function 331 ret = socket_send( fdid , u_buf , length ); 332 333 if( ret < 0 ) 334 { 335 336 #if DEBUG_SYSCALLS_ERROR 337 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 338 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / cannot access socket[%x,%d] \n", 339 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid ); 340 #endif 341 this->errno = EINVAL; 342 } 300 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / u_buf %x unmapped / cycle %d\n", 301 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start ); 302 #endif 303 this->errno = EINVAL; 304 ret = -1; 305 break; 306 } 307 308 // check length argument 309 if( (length == 0) || (length > (1<<CONFIG_SOCK_TX_BUF_ORDER)) ) 310 { 311 312 #if DEBUG_SYSCALLS_ERROR 313 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / bad buffer length %d / cycle %d\n", 314 __FUNCTION__ , process->pid , this->trdid , length , (uint32_t)tm_start ); 315 #endif 316 this->errno = EINVAL; 317 ret = -1; 318 break; 319 } 320 321 // cal relevant socket function 322 ret = socket_send( fdid, 323 u_buf, 324 length ); 325 if( ret < 0 ) 326 { 327 328 #if DEBUG_SYSCALLS_ERROR 329 printk("\n[ERROR] in %s : thread[%x,%x] / SEND / cannot access socket[%x,%d] / cycle %d\n", 330 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start ); 331 #endif 332 this->errno = EINVAL; 333 } 334 335 #if DEBUG_SYS_SOCKET 336 if( DEBUG_SYS_SOCKET < (uint32_t)tm_start ) 337 { 338 hal_copy_from_uspace( XPTR( local_cxy , &kbuf ) , u_buf , ret ); 339 printk("\n[%s] thread[%x,%x] send %d bytes <%s>\n", 340 __FUNCTION__, process->pid, this->trdid , ret, kbuf ); 341 } 342 #endif 343 343 break; 344 344 } … … 355 355 356 356 #if DEBUG_SYSCALLS_ERROR 357 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 358 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / buffer %x unmapped\n", 359 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 ); 360 #endif 361 this->errno = EINVAL; 362 ret = -1; 363 break; 364 } 365 366 // check length 367 if( length == 0 ) 368 { 369 370 #if DEBUG_SYSCALLS_ERROR 371 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 372 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / buffer length is 0\n", 373 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 ); 357 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / u_buf %x unmapped / cycle %d\n", 358 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start ); 359 #endif 360 this->errno = EINVAL; 361 ret = -1; 362 break; 363 } 364 365 // check length argument 366 if( (length == 0) || (length > (1<<CONFIG_SOCK_RX_BUF_ORDER)) ) 367 { 368 369 #if DEBUG_SYSCALLS_ERROR 370 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / bad buffer length %d / cycle %d\n", 371 __FUNCTION__ , process->pid , this->trdid , length , (uint32_t)tm_start ); 374 372 #endif 375 373 this->errno = EINVAL; … … 379 377 380 378 // cal relevant kernel socket function 381 ret = socket_recv( fdid , u_buf , length ); 382 383 if( ret < 0 ) 384 { 385 386 #if DEBUG_SYSCALLS_ERROR 387 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 388 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / cannot access socket[%x,%d] \n", 389 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid ); 390 #endif 391 this->errno = EINVAL; 392 } 379 ret = socket_recv( fdid, 380 u_buf, 381 length ); 382 if( ret < 0 ) 383 { 384 385 #if DEBUG_SYSCALLS_ERROR 386 printk("\n[ERROR] in %s : thread[%x,%x] / RECV / cannot access socket[%x,%d] / cycle %d\n", 387 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start ); 388 #endif 389 this->errno = EINVAL; 390 } 391 392 #if DEBUG_SYS_SOCKET 393 if( DEBUG_SYS_SOCKET < (uint32_t)tm_start ) 394 { 395 hal_copy_from_uspace( XPTR( local_cxy , &kbuf ) , u_buf , ret ); 396 printk("\n[%s] thread[%x,%x] received %d bytes <%s>\n", 397 __FUNCTION__, process->pid, this->trdid , ret, kbuf ); 398 } 399 #endif 400 break; 401 } 402 ///////////////// 403 case SOCK_SENDTO: 404 { 405 sockaddr_in_t k_remote_addr; 406 407 uint32_t fdid = (uint32_t)arg1 & 0x0000FFFF; 408 uint32_t length = (uint32_t)arg1 >> 16; 409 uint8_t * u_buf = (uint8_t *)(intptr_t)arg2; 410 sockaddr_t * u_remote_addr = (sockaddr_t *)(intptr_t)arg3; 411 412 // check u_buf mapped in user space 413 if( vmm_get_vseg( process , (intptr_t)arg2 , &vseg ) ) 414 { 415 416 #if DEBUG_SYSCALLS_ERROR 417 printk("\n[ERROR] in %s : thread[%x,%x] / SENDTO / u_buf %x unmapped / cycle %d\n", 418 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start ); 419 #endif 420 this->errno = EINVAL; 421 ret = -1; 422 break; 423 } 424 425 // check u_remote_addr mapped in user space 426 if( vmm_get_vseg( process , (intptr_t)arg3 , &vseg ) ) 427 { 428 429 #if DEBUG_SYSCALLS_ERROR 430 printk("\n[ERROR] in %s : thread[%x,%x] / SENDTO / u_remote_addr %x unmapped / cycle %d\n", 431 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg3 , (uint32_t)tm_start ); 432 #endif 433 this->errno = EINVAL; 434 ret = -1; 435 break; 436 } 437 438 // check length argument 439 if( (length == 0) || (length > (1<<CONFIG_SOCK_TX_BUF_ORDER)) ) 440 { 441 442 #if DEBUG_SYSCALLS_ERROR 443 printk("\n[ERROR] in %s : thread[%x,%x] / SENDTO / bad length %d / cycle %d\n", 444 __FUNCTION__ , process->pid , this->trdid , length , (uint32_t)tm_start ); 445 #endif 446 this->errno = EINVAL; 447 ret = -1; 448 break; 449 } 450 451 // make a kernel copy of the sockaddr_t structure 452 hal_copy_from_uspace( XPTR( local_cxy , &k_remote_addr ), 453 u_remote_addr, sizeof(sockaddr_t) ); 454 455 // cal relevant socket function 456 ret = socket_sendto( fdid, 457 u_buf, 458 length, 459 k_remote_addr.sin_addr, 460 k_remote_addr.sin_port ); 461 if( ret < 0 ) 462 { 463 464 #if DEBUG_SYSCALLS_ERROR 465 printk("\n[ERROR] in %s : thread[%x,%x] / SENDTO / cannot access socket[%x,%d] / cycle %d\n", 466 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start ); 467 #endif 468 this->errno = EINVAL; 469 } 470 471 break; 472 } 473 /////////////////// 474 case SOCK_RECVFROM: 475 { 476 sockaddr_in_t k_remote_addr; 477 478 uint32_t fdid = (uint32_t)arg1 & 0x0000FFFF; 479 uint32_t length = (uint32_t)arg1 >> 16; 480 uint8_t * u_buf = (uint8_t *)(intptr_t)arg2; 481 sockaddr_t * u_remote_addr = (sockaddr_t *)(intptr_t)arg3; 482 483 // check buffer is mapped in user space 484 if( vmm_get_vseg( process , (intptr_t)arg2 , &vseg ) ) 485 { 486 487 #if DEBUG_SYSCALLS_ERROR 488 printk("\n[ERROR] in %s : thread[%x,%x] / RECVFROM / u_buf %x unmapped / cycle %d\n", 489 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg2 , (uint32_t)tm_start ); 490 #endif 491 this->errno = EINVAL; 492 ret = -1; 493 break; 494 } 495 496 // check u_remote_addr mapped in user space 497 if( vmm_get_vseg( process , (intptr_t)arg3 , &vseg ) ) 498 { 499 500 #if DEBUG_SYSCALLS_ERROR 501 printk("\n[ERROR] in %s : thread[%x,%x] / RECVFROM / u_remote_addr %x unmapped / cycle %d\n", 502 __FUNCTION__ , process->pid , this->trdid , (intptr_t)arg3 , (uint32_t)tm_start ); 503 #endif 504 this->errno = EINVAL; 505 ret = -1; 506 break; 507 } 508 509 // check length argument 510 if( (length == 0) || (length > (1<<CONFIG_SOCK_RX_BUF_ORDER)) ) 511 { 512 513 #if DEBUG_SYSCALLS_ERROR 514 printk("\n[ERROR] in %s : thread[%x,%x] / RECVFROM / bad length %d / cycle %d\n", 515 __FUNCTION__ , process->pid , this->trdid , length , (uint32_t)tm_start ); 516 #endif 517 this->errno = EINVAL; 518 ret = -1; 519 break; 520 } 521 522 // make a kernel copy of the sockaddr_t structure 523 hal_copy_from_uspace( XPTR( local_cxy , &k_remote_addr ), 524 u_remote_addr, sizeof(sockaddr_t) ); 525 526 // cal relevant socket function 527 ret = socket_recvfrom( fdid, 528 u_buf, 529 length, 530 k_remote_addr.sin_addr, 531 k_remote_addr.sin_port ); 532 if( ret < 0 ) 533 { 534 535 #if DEBUG_SYSCALLS_ERROR 536 printk("\n[ERROR] in %s : thread[%x,%x] / RECVFROM / cannot access socket[%x,%d] / cycle %d\n", 537 __FUNCTION__ , process->pid , this->trdid , process->pid, fdid , (uint32_t)tm_start ); 538 #endif 539 this->errno = EINVAL; 540 } 541 393 542 break; 394 543 } … … 398 547 399 548 #if DEBUG_SYSCALLS_ERROR 400 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 401 printk("\n[ERROR] in %s : thread[%x,%x] / undefined socket operation %d\n", 402 __FUNCTION__ , process->pid , this->trdid , cmd ); 549 printk("\n[ERROR] in %s : thread[%x,%x] / undefined socket operation %d / cycle %d\n", 550 __FUNCTION__ , process->pid , this->trdid , cmd , (uint32_t)tm_start ); 403 551 #endif 404 552 this->errno = EINVAL; … … 413 561 414 562 #if DEBUG_SYS_SOCKET 415 if( DEBUG_SYS_SOCKET < tm_end ) 416 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 417 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_end ); 563 printk("\n[%s] thread[%x,%x] exit for %s / cycle %d\n", 564 __FUNCTION__, process->pid, this->trdid, socket_user_cmd_str(cmd), (uint32_t)tm_end ); 418 565 #endif 419 566 -
trunk/kernel/syscalls/sys_thread_exit.c
r670 r683 2 2 * sys_thread_exit.c - terminates the execution of calling thread 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 44 44 pid_t pid = process->pid; 45 45 46 #if DEBUG_SYS_THREAD_EXIT || DEBUG_SYSCALLS_ERROR 47 uint64_t tm_start = hal_get_cycles(); 48 #endif 49 46 50 // check exit_value pointer in user space if required 47 51 if( exit_status != NULL ) … … 53 57 54 58 #if DEBUG_SYSCALLS_ERROR 55 printk("\n[ERROR] in %s : exit_status buffer %x unmapped / thread[%x,%x]\n", 56 __FUNCTION__, (intptr_t)exit_status, process->pid, this->trdid ); 59 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 60 printk("\n[WARNING] in %s : exit_status buffer %x unmapped / thread[%x,%x]\n", 61 __FUNCTION__, (intptr_t)exit_status, pid, trdid ); 57 62 #endif 58 63 this->errno = EINVAL; … … 67 72 68 73 #if DEBUG_SYSCALLS_ERROR 69 printk("\n[ERROR] in %s : busylocks count = %d / thread[%x,%x]\n", 70 __FUNCTION__ , count, process->pid, this->trdid ); 74 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 75 printk("\n[WARNING] in %s : busylocks count = %d / thread[%x,%x]\n", 76 __FUNCTION__ , count, pid, trdid ); 71 77 #endif 72 78 this->errno = EINVAL; … … 84 90 85 91 #if DEBUG_SYS_THREAD_EXIT 86 uint64_t tm_start = hal_get_cycles(); 87 if( DEBUG_SYS_THREAD_EXIT < tm_start ) 92 if( DEBUG_SYS_THREAD_EXIT < (uint32_t)tm_start ) 88 93 printk("\n[%s] thread[%x,%x] is main => delete process / cycle %d\n", 89 94 __FUNCTION__ , pid , trdid , (uint32_t)tm_start ); … … 96 101 97 102 #if DEBUG_SYS_THREAD_EXIT 98 uint64_t tm_start = hal_get_cycles(); 99 if( DEBUG_SYS_THREAD_EXIT < tm_start ) 103 if( DEBUG_SYS_THREAD_EXIT < (uint32_t)tm_start ) 100 104 printk("\n[%s] thread[%x,%x] is not main => delete thread / cycle %d\n", 101 105 __FUNCTION__ , pid , trdid , (uint32_t)tm_start ); -
trunk/kernel/syscalls/sys_thread_sleep.c
r566 r683 1 1 /* 2 * sys_thread_sleep.c - put the calling thread in sleep state2 * sys_thread_sleep.c - block the calling thread on SLEEP, with or without alarm 3 3 * 4 * Author Alain Greiner (2016,2017)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 28 28 #include <syscalls.h> 29 29 30 ////////////////////// 31 int sys_thread_sleep( void ) 30 /////////////////////////////////////////////////////////////////////////////////////// 31 // This static function implements the alarm handler used to wake-up a thread 32 // when the amarm rings after after a sleep( seconds ) syscall. 33 /////////////////////////////////////////////////////////////////////////////////////// 34 // @ thread_xp : extended pointer on blocked thread. 35 /////////////////////////////////////////////////////////////////////////////////////// 36 static void __attribute__((noinline)) sleep_alarm_handler( xptr_t thread_xp ) 32 37 { 38 // stop the alarm 39 alarm_stop( thread_xp ); 33 40 34 thread_t * this = CURRENT_THREAD; 41 // unblock the thread 42 thread_unblock( thread_xp , THREAD_BLOCKED_SLEEP ); 43 44 } // end sleep_alarm_handler() 45 46 //////////////////////////////////////// 47 int sys_thread_sleep( uint32_t seconds ) 48 { 49 cycle_t ncycles; // number of cycles to sleep 50 51 thread_t * this = CURRENT_THREAD; 52 xptr_t thread_xp = XPTR( local_cxy , this ); 53 54 cycle_t tm_start = hal_get_cycles(); 35 55 36 56 #if DEBUG_SYS_THREAD_SLEEP 37 uint64_t tm_start; 38 uint64_t tm_end; 39 tm_start = hal_get_cycles(); 40 if( DEBUG_SYS_THREAD_SLEEP < tm_start ) 41 printk("\n[DBG] %s : thread %x n process %x blocked / cycle %d\n", 42 __FUNCTION__ , this->trdid, this->process->pid , (uint32_t)tm_start ); 57 if( DEBUG_SYS_THREAD_SLEEP < (uint32_t)tm_start ) 58 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 59 __FUNCTION__, this->process->pid, this->trdid, (uint32_t)tm_start ); 43 60 #endif 44 61 45 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_GLOBAL ); 46 sched_yield("blocked on sleep"); 62 if( seconds == 0 ) // sleep without alarm 63 { 64 65 #if DEBUG_SYS_THREAD_SLEEP 66 if( DEBUG_SYS_THREAD_SLEEP < tm_start ) 67 printk("\n[%s] thread[%x,%x] blocks on <SLEEP> without alarm / cycle %d\n", 68 __FUNCTION__ , this->process->pid, this->trdid, (uint32_t)tm_start ); 69 #endif 70 // threads blocks and deschedules 71 thread_block( thread_xp , THREAD_BLOCKED_SLEEP ); 72 sched_yield("sleep without alarm"); 73 } 74 else // sleep with alarm 75 { 76 // translate seconds to ncycles 77 ncycles = seconds * LOCAL_CLUSTER->sys_clk; 78 79 // register & start the calling thread alarm 80 alarm_start( thread_xp, 81 tm_start + ncycles, 82 &sleep_alarm_handler, 83 thread_xp ); 84 85 #if DEBUG_SYS_THREAD_SLEEP 86 if( DEBUG_SYS_THREAD_SLEEP < tm_start ) 87 printk("\n[DBG] %s : thread[%x,%x] blocks on <SLEEP> for %d seconds / cycle %d\n", 88 __FUNCTION__ , this->process->pid, this->trdid, seconds, (uint32_t)tm_start ); 89 #endif 90 // thread blocks & deschedules 91 thread_block( thread_xp , THREAD_BLOCKED_SLEEP ); 92 sched_yield("sleep with alarm"); 93 } 47 94 48 95 #if DEBUG_SYS_THREAD_SLEEP 49 tm_end = hal_get_cycles();50 96 if( DEBUG_SYS_THREAD_SLEEP < tm_end ) 51 printk("\n[ DBG] %s : thread %x in process %xresume / cycle %d\n",52 __FUNCTION__ , this-> trdid, this->process->pid, (uint32_t)tm_end );97 printk("\n[%s] thread[%x,%x] resume / cycle %d\n", 98 __FUNCTION__ , this->process->pid, this->trdid, (uint32_t)tm_end ); 53 99 #endif 54 100 -
trunk/kernel/syscalls/sys_thread_wakeup.c
r637 r683 1 1 /* 2 * sys_thread_wakeup.c - wakeup indicated thread2 * sys_thread_wakeup.c - unblock indicated thread from the SLEEP condition 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 27 27 #include <process.h> 28 28 #include <errno.h> 29 30 29 #include <syscalls.h> 31 30 … … 36 35 process_t * process = this->process; 37 36 38 #if (DEBUG_SYS_THREAD_WAKEUP || CONFIG_INSTRUMENTATION_SYSCALLS)39 uint64_ttm_start = hal_get_cycles();37 #if DEBUG_SYS_THREAD_WAKEUP || DEBUG_SYSCALLS_ERROR || CONFIG_INTRUMENTATION_SYSCALLS 38 cycle_t tm_start = hal_get_cycles(); 40 39 #endif 41 40 42 41 #if DEBUG_SYS_THREAD_WAKEUP 43 if( DEBUG_SYS_THREAD_WAKEUP < tm_start )44 printk("\n[%s] thread %x in processenter to activate thread %x / cycle %d\n",45 __FUNCTION__, this->trdid, process->pid, trdid, (uint32_t)tm_start );42 if( DEBUG_SYS_THREAD_WAKEUP < (uint32_t)tm_start ) 43 printk("\n[%s] thread[%x,%x] enter to activate thread %x / cycle %d\n", 44 __FUNCTION__, process->pid, this->trdid, trdid, (uint32_t)tm_start ); 46 45 #endif 47 46 … … 56 55 57 56 #if DEBUG_SYSCALLS_ERROR 58 printk("\n[ERROR] in %s : thread %x in process %x / illegal trdid argument %x\n", 59 __FUNCTION__, this->trdid, process->pid, trdid ); 57 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 58 printk("\n[ERROR] in %s : thread[%x,%x] / illegal trdid argument %x\n", 59 __FUNCTION__, process->pid, this->trdid, trdid ); 60 60 #endif 61 61 this->errno = EINVAL; … … 70 70 71 71 #if DEBUG_SYSCALLS_ERROR 72 printk("\n[ERROR] in %s : thread %x in process %x cannot find thread %x/n", 73 __FUNCTION__ , this->trdid, process->pid, trdid ); 72 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 73 printk("\n[ERROR] in %s : thread[%x,%x] / cannot find thread[%x,%x]\n", 74 __FUNCTION__, process->pid, this->trdid, process->pid, trdid ); 74 75 #endif 75 76 CURRENT_THREAD->errno = EINVAL; … … 77 78 } 78 79 80 // get target thread cluster and local pointer 81 thread_t * tgt_ptr = GET_PTR( thread_xp ); 82 cxy_t tgt_cxy = GET_CXY( thread_xp ); 83 84 // get state of the target thread alarm 85 bool_t linked = hal_remote_l32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) ); 86 87 // delete the alarm if active 88 if( linked ) alarm_stop( thread_xp ); 89 79 90 // unblock target thread 80 thread_unblock( thread_xp , THREAD_BLOCKED_ GLOBAL);91 thread_unblock( thread_xp , THREAD_BLOCKED_SLEEP ); 81 92 82 93 #if (DEBUG_SYS_THREAD_WAKEUP || CONFIG_INSTRUMENTATION_SYSCALLS) … … 87 98 #if DEBUG_SYS_THREAD_WAKEUP 88 99 if( DEBUG_SYS_THREAD_WAKEUP < tm_end ) 89 printk("\n[%s] thread %x in process %x exit / thread %xactivated / cycle %d\n",90 __FUNCTION__ , this->trdid, process->pid, trdid, (uint32_t)tm_end );100 printk("\n[%s] thread[%x,%x] exit / thread[%x,%x] activated / cycle %d\n", 101 __FUNCTION__ , process->pid, this->trdid, process->pid, trdid, (uint32_t)tm_end ); 91 102 #endif 92 103 -
trunk/kernel/syscalls/sys_timeofday.c
r637 r683 2 2 * sys_timeofday.c - Get current time 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 32 32 #include <core.h> 33 33 #include <shared_syscalls.h> 34 35 34 #include <syscalls.h> 36 35 … … 50 49 process_t * process = this->process; 51 50 52 #if (DEBUG_SYS_TIMEOFDAY || CONFIG_INSTRUMENTATION_SYSCALLS)51 #if DEBUG_SYS_TIMEOFDAY || DEBUG_SYSCALLS_ERROR || CONFIG_INSTRUMENTATION_SYSCALLS 53 52 uint64_t tm_start = hal_get_cycles(); 54 53 #endif 55 54 56 55 #if DEBUG_SYS_TIMEOFDAY 57 if( DEBUG_SYS_TIMEOFDAY < tm_start )56 if( DEBUG_SYS_TIMEOFDAY < (uint32_t)tm_start ) 58 57 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 59 58 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_start ); … … 65 64 66 65 #if DEBUG_SYSCALLS_ERROR 67 printk("\n[ERROR] in %s for thread %x in process %x : tz argument must be NULL\n", 66 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 67 printk("\n[ERROR] in %s : thread[%x,%x] / tz argument must be NULL\n", 68 68 __FUNCTION__ , this->trdid , process->pid ); 69 69 #endif … … 79 79 80 80 #if DEBUG_SYSCALLS_ERROR 81 printk("\n[ERROR] in %s : user buffer tz unmapped / thread %x / process %x\n", 82 __FUNCTION__ , (intptr_t)tz , this->trdid , process->pid ); 81 if( DEBUG_SYSCALLS_ERROR < (uint32_t)tm_start ) 82 printk("\n[ERROR] in %s : thread[%x,%x] / user buffer tv unmapped\n", 83 __FUNCTION__ , this->trdid , process->pid , (intptr_t)tz ); 83 84 #endif 84 85 this->errno = EINVAL; -
trunk/kernel/syscalls/syscalls.h
r670 r683 38 38 39 39 /****************************************************************************************** 40 * This function forces the calling thread to sleep, for a fixed number of cycles.41 ****************************************************************************************** 42 * cycles : number of cycles.43 *****************************************************************************************/ 44 int sys_alarm( uint32_t cycles );40 * This function forces the calling thread to sleep, for a fixed number of seconds. 41 ****************************************************************************************** 42 * @ seconds : number of seconds. 43 *****************************************************************************************/ 44 int sys_alarm( uint32_t seconds ); 45 45 46 46 /****************************************************************************************** … … 229 229 230 230 /****************************************************************************************** 231 * This generic function implements all the non standard syscalls of type "get_xxx()", 232 * defined in the <almosmkh.h> and <almosmkh.c> files. 233 * The operation types mnemonics are defined in the <shared_almos.h> file. 234 * This function ckecks the syscall arguments, and call the relevant kernel function. 235 ****************************************************************************************** 236 * @ arg0 : operation type (mnemonics defined in shared_get.h) 237 * @ arg1 : depends on operation type 238 * @ arg2 : depends on operation type 239 * @ arg3 : depends on operation type 240 * @ return 0 if success / return -1 if illegal argument. 241 *****************************************************************************************/ 242 int sys_get( reg_t arg0, 243 reg_t arg1, 244 reg_t arg2, 245 reg_t arg3 ); 246 247 /****************************************************************************************** 231 248 * This function implements the non-standard "get_best_core" syscall. 232 249 * It selects, in a macro-cluster specified by the <base_cxy> and <level> arguments, … … 470 487 * as a remote_buffer_t, creates two (read and write) file descriptors, and links these 471 488 * two file descriptors to the pipe. 472 * TODO : the dynamic memory allocation in case of buffer full is not implemented. 489 * TODO : the dynamic memory allocation in case of buffer full is not implemented. 490 * FIXME : wich syscall release the kernel memory allocated by this syscall ? 473 491 ****************************************************************************************** 474 492 * @ fd : pointeur on a 2 slots array of fdid : fd[0] read / fd[1] write. … … 566 584 567 585 /****************************************************************************************** 568 * This generic function implements thesocket related syscalls.586 * This generic function implements all socket related syscalls. 569 587 * The operation types mnemonics are defined in the <shared_socket> file. 570 588 * The supported operations are defined in the <socket.h> & <socket.c> files. … … 686 704 687 705 /****************************************************************************************** 688 * This function block the calling thread on the THREAD_BLOCKED_GLOBAL condition, 689 * and deschedule. 690 ****************************************************************************************** 691 * @ return 0 if success / returns -1 if failure. 692 *****************************************************************************************/ 693 int sys_thread_sleep( void ); 706 * This function blocks the calling thread on the THREAD_BLOCKED_SLEEP condition, 707 * and deschedules. When the <seconds> argument is non-zero, this argument defines 708 * the sleeping time. When it is zero, the sleeping time is unbounded, and the thread 709 * must be unblocked by the sys_thread_wakeup() function 710 ****************************************************************************************** 711 * @ seconds : number of seconds of sleep / No alarm is activated when 0. 712 * @ return 0 if success / returns -1 if failure. 713 *****************************************************************************************/ 714 int sys_thread_sleep( uint32_t seconds ); 694 715 695 716 /****************************************************************************************** 696 717 * This function unblock the thread identified by its <trdid> from the 697 * THREAD_BLOCKED_ GLOBAL condition.718 * THREAD_BLOCKED_SLEEP condition, and cancel the registered alarm if required. 698 719 ****************************************************************************************** 699 720 * @ trdid : target thread identifier. … … 705 726 * This function calls the scheduler for the core running the calling thread. 706 727 ****************************************************************************************** 707 * @ x_size : [out] number of clusters in a row.708 * @ y_size : [out] number of clusters in a column.709 * @ ncores : [out] number of cores per cluster.710 728 * @ return always 0. 711 729 *****************************************************************************************/
Note: See TracChangeset
for help on using the changeset viewer.