Changeset 683
- Timestamp:
- Jan 13, 2021, 12:36:17 AM (4 years ago)
- Location:
- trunk/kernel
- Files:
-
- 1 deleted
- 75 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/Makefile
r675 r683 107 107 build/mm/page.o \ 108 108 build/mm/kcm.o \ 109 build/mm/khm.o \110 109 build/mm/mapper.o \ 111 110 build/mm/kmem.o … … 179 178 build/syscalls/sys_wait.o 180 179 181 SYS_OBJS_4 = build/syscalls/sys_get_config.o \ 182 build/syscalls/sys_get_core_id.o \ 183 build/syscalls/sys_get_cycle.o \ 180 SYS_OBJS_4 = build/syscalls/sys_get.o \ 184 181 build/syscalls/sys_display.o \ 185 182 build/syscalls/sys_place_fork.o \ … … 188 185 build/syscalls/sys_trace.o \ 189 186 build/syscalls/sys_fg.o \ 190 build/syscalls/sys_is_fg.o 187 build/syscalls/sys_is_fg.o \ 188 build/syscalls/sys_fbf.o 191 189 192 190 SYS_OBJS_5 = build/syscalls/sys_exit.o \ 193 191 build/syscalls/sys_sync.o \ 194 192 build/syscalls/sys_fsync.o \ 195 build/syscalls/sys_get_best_core.o \196 build/syscalls/sys_get_nb_cores.o \197 build/syscalls/sys_get_thread_info.o \198 build/syscalls/sys_fbf.o \199 193 build/syscalls/sys_socket.o 200 194 -
trunk/kernel/devices/dev_fbf.c
r674 r683 159 159 intptr_t * user_buffer ) 160 160 { 161 kmem_req_t req;162 161 fbf_window_t * window; // window descriptor (created in local cluster) 163 162 vseg_t * vseg; // vseg descriptor (created in reference cluster) … … 202 201 203 202 // allocate memory for the window descriptor in local cluster 204 req.type = KMEM_KCM; 205 req.order = bits_log2( sizeof(fbf_window_t) ); 206 req.flags = AF_ZERO | AF_KERNEL; 207 window = kmem_alloc( &req ); 203 window = kmem_alloc( bits_log2(sizeof(fbf_window_t)) , AF_ZERO ); 208 204 209 205 if( window == NULL ) … … 256 252 printk("\n[ERROR] in %s / thread[%x,%x] cannot create vseg in reference cluster\n", 257 253 __FUNCTION__, process->pid, this->trdid ); 258 req.ptr = (void *)window; 259 kmem_free( &req ); 254 kmem_free( window , bits_log2(sizeof(fbf_window_t)) ); 260 255 return -1; 261 256 } … … 281 276 printk("\n[ERROR] in %s / thread[%x,%x] cannot allocate buffer for window\n", 282 277 __FUNCTION__, process->pid, this->trdid ); 283 req.ptr = (void *)window; 284 kmem_free( &req ); 278 kmem_free( window , bits_log2(sizeof(fbf_window_t)) ); 285 279 vmm_remove_vseg( process , vseg ); 286 280 return -1; … … 521 515 error_t dev_fbf_delete_window( uint32_t wid ) 522 516 { 523 kmem_req_t req;524 525 517 thread_t * this = CURRENT_THREAD; 526 518 process_t * process = this->process; … … 581 573 582 574 // 8. release memory allocated for window descriptor 583 req.type = KMEM_KCM; 584 req.ptr = window_ptr; 585 kmem_remote_free( window_cxy , &req ); 575 kmem_remote_free( window_cxy , window_ptr , bits_log2(sizeof(fbf_window_t)) ); 586 576 587 577 // 9. release the associated vseg -
trunk/kernel/devices/dev_nic.c
r674 r683 1 2 1 /* 3 2 * dev_nic.c - NIC (Network Controler) generic device API implementation. … … 46 45 void dev_nic_init( chdev_t * chdev ) 47 46 { 47 48 assert( __FUNCTION__ , (chdev->func == DEV_FUNC_NIC) , 49 "bad func value"); 50 48 51 thread_t * new_thread; 49 52 error_t error; … … 74 77 75 78 // build pointer on server function 76 void * func = is_rx ? &dev_nic_rx_server : &dev_nic_tx_server;79 void * server_func = is_rx ? &dev_nic_rx_server : &dev_nic_tx_server; 77 80 78 81 // create server thread 79 82 error = thread_kernel_create( &new_thread, 80 83 THREAD_DEV, 81 func,84 server_func, 82 85 chdev, 83 86 lid ); … … 120 123 thread_t * this = CURRENT_THREAD; 121 124 122 xptr_t dev_xp = chdev_dir.nic_tx[0]; 125 // get cluster and local pointer fo the nic_tx[0] chdev 126 xptr_t dev_xp = chdev_dir.nic_tx[0]; 123 127 chdev_t * dev_ptr = GET_PTR( dev_xp ); 124 125 if( dev_xp == XPTR_NULL ) return -1; 128 cxy_t dev_cxy = GET_CXY( dev_xp ); 129 130 if( dev_xp == XPTR_NULL ) 131 { 132 133 #if DEBUG_DEV_NIC_ERROR 134 printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n", 135 __FUNCTION__, local_cxy ); 136 #endif 137 return -1; 138 } 126 139 127 140 // set command arguments in client thread descriptor … … 131 144 this->nic_cmd.type = NIC_CMD_GET_KEY; 132 145 146 // get cmd function pointer from nic_tx[0] chdev descriptor 147 dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd )); 148 133 149 // call driver 134 dev_ptr->cmd( XPTR( local_cxy , this ) );135 136 // get "status"150 cmd( XPTR( local_cxy , this ) ); 151 152 // return command status 137 153 return this->nic_cmd.status; 138 } 154 155 } // end dev_nic_get_key() 139 156 140 157 ////////////////////////////////////////// … … 146 163 if( channel >= LOCAL_CLUSTER->nb_nic_channels ) return -1; 147 164 148 xptr_t dev_xp = chdev_dir.nic_tx[0]; 165 // get cluster and local pointer fo the nic_tx[channel] chdev 166 xptr_t dev_xp = chdev_dir.nic_tx[channel]; 149 167 chdev_t * dev_ptr = GET_PTR( dev_xp ); 150 151 if( dev_xp == XPTR_NULL ) return -1; 168 cxy_t dev_cxy = GET_CXY( dev_xp ); 169 170 if( dev_xp == XPTR_NULL ) 171 { 172 173 #if DEBUG_DEV_NIC_ERROR 174 printk("\n[ERROR] in %s : nic_tx[%d] chdev undefined in chdev_dir of cluster %x\n", 175 __FUNCTION__, channel, local_cxy ); 176 #endif 177 return -1; 178 } 152 179 153 180 // set command arguments in client thread descriptor … … 157 184 this->nic_cmd.status = run; 158 185 186 // get cmd function pointer from nic_tx[channel] chdev descriptor 187 dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd )); 188 159 189 // call driver 160 dev_ptr->cmd( XPTR( local_cxy , this ) );190 cmd( XPTR( local_cxy , this ) ); 161 191 162 192 // return "error" 163 193 return this->nic_cmd.error; 164 } 194 195 } // end dev_nic_set_run() 165 196 166 197 ////////////////////////////////// … … 169 200 thread_t * this = CURRENT_THREAD; 170 201 202 // get cluster and local pointer fo the nic_tx[0] chdev 171 203 xptr_t dev_xp = chdev_dir.nic_tx[0]; 172 204 chdev_t * dev_ptr = GET_PTR( dev_xp ); 205 cxy_t dev_cxy = GET_CXY( dev_xp ); 173 206 174 if( dev_xp == XPTR_NULL ) return -1; 207 if( dev_xp == XPTR_NULL ) 208 { 209 210 #if DEBUG_DEV_NIC_ERROR 211 printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n", 212 __FUNCTION__, local_cxy ); 213 #endif 214 return -1; 215 } 175 216 176 217 // set command arguments in client thread descriptor … … 178 219 this->nic_cmd.type = NIC_CMD_GET_INSTRU; 179 220 221 // get cmd function pointer from nic_tx[0] chdev descriptor 222 dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd )); 223 180 224 // call driver 181 dev_ptr->cmd( XPTR( local_cxy , this ) );225 cmd( XPTR( local_cxy , this ) ); 182 226 183 227 // return "error" 184 228 return this->nic_cmd.error; 185 } 229 230 } // end dev_nic_get_instru() 231 186 232 187 233 //////////////////////////////////// … … 190 236 thread_t * this = CURRENT_THREAD; 191 237 238 // get cluster and local pointer fo the nic_tx[0] chdev 192 239 xptr_t dev_xp = chdev_dir.nic_tx[0]; 193 240 chdev_t * dev_ptr = GET_PTR( dev_xp ); 241 cxy_t dev_cxy = GET_CXY( dev_xp ); 194 242 195 if( dev_xp == XPTR_NULL ) return -1; 243 if( dev_xp == XPTR_NULL ) 244 { 245 246 #if DEBUG_DEV_NIC_ERROR 247 printk("\n[ERROR] in %s : nic_tx[0] chdev undefined in chdev_dir of cluster %x\n", 248 __FUNCTION__, local_cxy ); 249 #endif 250 return -1; 251 } 196 252 197 253 // set command arguments in client thread descriptor … … 199 255 this->nic_cmd.type = NIC_CMD_GET_INSTRU; 200 256 257 // get cmd function pointer from nic_tx[0] chdev descriptor 258 dev_cmd_t * cmd = hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd )); 259 201 260 // call driver 202 dev_ptr->cmd( XPTR( local_cxy , this ) );261 cmd( XPTR( local_cxy , this ) ); 203 262 204 263 // return "error" 205 264 return this->nic_cmd.error; 206 } 265 266 } // end dev_nic_clear_instru() 207 267 208 268 … … 261 321 262 322 //////////////////////////////////////////////////////////////////////////////////////// 263 // This static function computes the checksum for an UDP packet defined by 264 // the <buffer> and <size> arguments. 323 // This static function computes the checksum for a TCP segment or an UDP packet, 324 // defined by the <buffer> and <length> arguments. 325 // It includes the "pseudo header "defined by the <src_ip_addr>, <dst_ip_addr>, and 326 // <tcp_length> arguments, and by the UDP/TCP protocol code. 265 327 //////////////////////////////////////////////////////////////////////////////////////// 266 // @ buffer : [in] pointer on UDP packet base. 267 // @ size : [in] number of bytes in this packet (including header). 328 // @ buffer : [in] pointer on buffer containing the TCP segment or UDP packet. 329 // @ length : [in] number of bytes in this packet/segment (including header). 330 // @ src_ip_addr : [in] source IP address (pseudo header). 331 // @ dst_ip_addr : [in] destination IP address (pseudo header). 332 // @ is_tcp : [in] TCP if true / UDP if false (pseudo header). 268 333 // @ return the checksum value on 16 bits 269 334 //////////////////////////////////////////////////////////////////////////////////////// 270 static uint16_t dev_nic_udp_checksum( uint8_t * buffer, 271 uint32_t size ) 272 { 273 uint32_t i; 274 uint32_t carry; 275 uint32_t cs; // 32 bits accumulator 276 uint16_t * buf; 277 uint32_t max; // number of uint16_t in packet 278 279 // compute max & buf 280 buf = (uint16_t *)buffer; 281 max = size >> 1; 282 283 // extend buffer[] if required 284 if( size & 1 ) 285 { 286 max++; 287 buffer[size] = 0; 288 } 289 290 // compute checksum for UDP packet 291 for( i = 0 , cs = 0 ; i < size ; i++ ) cs += buf[i]; 292 293 // handle carry 294 carry = (cs >> 16); 295 if( carry ) 296 { 297 cs += carry; 298 carry = (cs >> 16); 299 if( carry ) cs += carry; 300 } 301 302 // one's complement 303 return ~cs; 304 } 305 306 //////////////////////////////////////////////////////////////////////////////////////// 307 // This static function computes the checksum for a TCP segment defined by the <buffer> 308 // and <size> arguments. It includes the pseudo header defined by the <src_ip_addr>, 309 // <dst_ip_addr>, <size> arguments, and by the TCP_PROTOCOL code. 310 //////////////////////////////////////////////////////////////////////////////////////// 311 // @ buffer : [in] pointer on TCP segment base. 312 // @ tcp_length : [in] number of bytes in this TCP segment (including header). 313 // @ src_ip_addr : [in] source IP address (pseudo header) 314 // @ dst_ip_addr : [in] destination IP address (pseudo header) 315 // @ return the checksum value on 16 bits 316 //////////////////////////////////////////////////////////////////////////////////////// 317 static uint16_t dev_nic_tcp_checksum( uint8_t * buffer, 318 uint32_t tcp_length, 319 uint32_t src_ip_addr, 320 uint32_t dst_ip_addr ) 335 static uint16_t dev_nic_tcp_udp_checksum( uint8_t * buffer, 336 uint32_t length, 337 uint32_t src_ip_addr, 338 uint32_t dst_ip_addr, 339 bool_t is_tcp ) 321 340 { 322 341 uint32_t i; … … 324 343 uint32_t cs; // 32 bits accumulator 325 344 uint16_t * buf; 326 uint32_t max; // number of uint16_t in segment 345 uint32_t max; // number of uint16_t in segment/paket 327 346 328 347 // compute max & buf 329 348 buf = (uint16_t *)buffer; 330 max = tcp_length >> 1;349 max = length >> 1; 331 350 332 351 // extend buffer[] if required 333 if( tcp_length & 1 )352 if( length & 1 ) 334 353 { 335 354 max++; 336 buffer[ tcp_length] = 0;355 buffer[length] = 0; 337 356 } 338 357 339 358 // compute checksum for TCP segment 340 for( i = 0 , cs = 0 ; i < tcp_length; i++ ) cs += buf[i];359 for( i = 0 , cs = 0 ; i < max ; i++ ) cs += buf[i]; 341 360 342 361 // complete checksum for pseudo-header 343 cs += src_ip_addr; 344 cs += dst_ip_addr; 345 cs += PROTOCOL_TCP; 346 cs += tcp_length; 362 cs += (src_ip_addr & 0xFFFF); 363 cs += (src_ip_addr >> 16 ); 364 cs += (dst_ip_addr & 0xFFFF); 365 cs += (dst_ip_addr >> 16 ); 366 cs += length; 367 cs += (is_tcp ? PROTOCOL_TCP : PROTOCOL_UDP); 347 368 348 369 // handle carry … … 360 381 361 382 /////////////////////////////////////////////////////////////////////////////////////////// 362 // This static function is called by the NIC_TX orNIC_RX server threads to unblock383 // This static function is called by the NIC_TX and NIC_RX server threads to unblock 363 384 // the TX client thread after completion (success or error) of a TX command registered 364 // in a socket identified by the <socket_xp> argument. The <status> argument defines365 // the command success/failure status: a null value signals a success, a non-null value366 // signals a failure. For all commands, it copies the status value in the tx_sts field,367 // and print an errormessage on TXT0 in case of failure.385 // in a socket identified by the <socket_xp> argument. 386 // The <status> argument defines the command success/failure status. 387 // For all commands, it copies the status value in the tx_sts field, and print an error 388 // message on TXT0 in case of failure. 368 389 /////////////////////////////////////////////////////////////////////////////////////////// 369 390 // @ socket_xp : [in] extended pointer on socket … … 377 398 cxy_t socket_cxy = GET_CXY( socket_xp ); 378 399 379 if( status != CMD_STS_SUCCESS)400 if( (status != CMD_STS_SUCCESS) && (status != CMD_STS_EOF) ) 380 401 { 381 402 uint32_t sock_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state )); … … 400 421 } // end dev_nic_unblock_tx_client() 401 422 402 /////////////////////////////////////////////////////////////////////////////////////////// 403 // This static function is called by the NIC_TX or NIC_RX server threads to unblock 423 424 /////////////////////////////////////////////////////////////////////////////////////////// 425 // Functions called by the NIC_RX server thread 426 /////////////////////////////////////////////////////////////////////////////////////////// 427 428 /////////////////////////////////////////////////////////////////////////////////////////// 429 // This static function is called by the NIC_RX server threads to unblock 404 430 // the RX client thread after completion (success or error) of an RX command registered 405 // in a socket identified by the <socket_xp> argument. The <status> argument defines406 // the command success/failure status: a null value signals a success, a non-null value407 // signals a failure. For all commands, it copies the status value in the rx_sts field,408 // and print an errormessage on TXT0 in case of failure.431 // in a socket identified by the <socket_xp> argument. 432 // The <status> argument defines the command success/failure status. 433 // For all commands, it copies the status value in the rx_sts field, and print an error 434 // message on TXT0 in case of failure. 409 435 /////////////////////////////////////////////////////////////////////////////////////////// 410 436 // @ socket_xp : [in] extended pointer on socket … … 418 444 cxy_t socket_cxy = GET_CXY( socket_xp ); 419 445 420 if( status != CMD_STS_SUCCESS)446 if( (status != CMD_STS_SUCCESS) && (status != CMD_STS_EOF) ) 421 447 { 422 448 uint32_t sock_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state )); … … 440 466 441 467 } // end dev_nic_unblock_rx_client() 442 443 ///////////////////////////////////////////////////////////////////////////////////////////444 // Functions called by the NIC_RX server thread445 ///////////////////////////////////////////////////////////////////////////////////////////446 468 447 469 /////////////////////////////////////////////////////////////////////////////////////////// … … 553 575 554 576 return 0; 555 } 577 578 } // end dev_nic_rx_check_ip() 556 579 557 580 /////////////////////////////////////////////////////////////////////////////////////////// … … 595 618 xptr_t socket_rbuf_xp; // extended pointer on socket rx_buf 596 619 xptr_t socket_lock_xp; // extended pointer on socket lock 597 xptr_t socket_client_xp; // extended pointer on socket rx_client field 598 xptr_t client_xp; // extended pointer on client thread descriptor 620 xptr_t socket_rx_client; // socket rx_client thread 621 bool_t socket_rx_valid; // socket rx_command valid 622 uint32_t socket_rx_cmd; // socket rx_command type 599 623 uint32_t payload; // number of bytes in payload 600 624 uint32_t status; // number of bytes in rx_buf … … 602 626 uint32_t moved_bytes; // number of bytes actually moved to rx_buf 603 627 628 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 629 thread_t * this = CURRENT_THREAD; 630 uint32_t cycle = (uint32_t)hal_get_cycles(); 631 #endif 632 633 #if DEBUG_DEV_NIC_RX 634 uint32_t fdid; 635 uint32_t pid; 636 if( DEBUG_DEV_NIC_RX < cycle ) 637 printk("\n[%s] thread[%x,%x] enter / channel %d / plen %d / cycle %d\n", 638 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, k_length, cycle ); 639 if( (DEBUG_DEV_NIC_RX < cycle) && (DEBUG_DEV_NIC_RX & 1)) 640 putb("64 first bytes in k_buf" , k_buf , 64 ); 641 #endif 642 604 643 // build extended pointers on list of sockets attached to NIC_RX chdev 605 644 root_xp = XPTR( local_cxy , &chdev->wait_root ); 606 645 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 607 646 608 // compute UDP packet checksum 609 checksum = dev_nic_udp_checksum( k_buf , k_length ); 610 611 // get checksum from received packet header 647 // extract checksum from received UDP packet header 612 648 pkt_checksum = ((uint16_t)k_buf[6] << 8) | (uint16_t)k_buf[7]; 613 649 650 // reset checksum field 651 k_buf[6] = 0; 652 k_buf[7] = 0; 653 654 // compute checksum from received UDP packet 655 checksum = dev_nic_tcp_udp_checksum( k_buf, 656 k_length, 657 pkt_src_addr, 658 pkt_dst_addr, 659 false ); // is_not_tcp 614 660 // discard corrupted packet 615 if( pkt_checksum != checksum ) return; 661 if( pkt_checksum != checksum ) 662 { 663 664 #if DEBUG_DEV_NIC_ERROR 665 printk("\n[WARNING] in %s : thread[%x,%x] discard corrupted packet on channel %d / cycle %d\n" 666 " expected checksum %x / received checksum %x\n", 667 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle, 668 (uint32_t)checksum, (uint32_t)pkt_checksum ); 669 #endif 670 return; 671 } 616 672 617 673 // get src_port and dst_port from UDP header … … 619 675 uint32_t pkt_dst_port = ((uint32_t)k_buf[2] << 8) | (uint32_t)k_buf[3]; 620 676 621 // discard unexpected packet622 if( xlist_is_empty( root_xp ) ) return;623 624 677 // take the lock protecting the sockets list 625 678 remote_busylock_acquire( lock_xp ); … … 658 711 else match_socket = local_match; 659 712 660 // exit loop when socket found 661 if( match_socket ) break; 713 // exit loop if matching 714 if( match_socket ) 715 { 716 717 #if DEBUG_DEV_NIC_RX 718 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 719 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 720 if( DEBUG_DEV_NIC_RX < cycle ) 721 printk("\n[%s] thread[%x,%x] found matching UDP socket[%d,%d] / state %s\n", 722 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 723 #endif 724 break; 725 } 662 726 } 663 727 … … 666 730 667 731 // discard unexpected packet 668 if( match_socket == false ) return; 669 670 // build extended pointers on various socket fields 732 if( match_socket == false ) 733 { 734 735 #if DEBUG_DEV_NIC_ERROR 736 printk("\n[WARNING] in %s : thread[%x,%s] discard unexpected packet on channel %d / cycle %d\n", 737 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle ); 738 #endif 739 return; 740 } 741 742 // build extended pointers on socket.rx_buf and socket.lock 671 743 socket_rbuf_xp = XPTR( socket_cxy , &socket_ptr->rx_buf ); 672 744 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 673 socket_client_xp = XPTR( socket_cxy , &socket_ptr->rx_client );674 745 675 746 // take the lock protecting the socket … … 678 749 // get status & space from rx_buf 679 750 status = remote_buf_status( socket_rbuf_xp ); 680 space = CONFIG_SOCK_RX_BUF_SIZE - status; 681 682 // get client thread 683 client_xp = hal_remote_l64( socket_client_xp ); 751 space = (1 << CONFIG_SOCK_RX_BUF_ORDER) - status; 752 753 // get socket rx_client, rx_valid and rx_cmd values 754 socket_rx_client = hal_remote_l64( XPTR( socket_cxy , &socket_ptr->rx_client ) ); 755 socket_rx_valid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->rx_valid ) ); 756 socket_rx_cmd = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->rx_cmd ) ); 684 757 685 758 // get number of bytes in payload … … 691 764 // move payload from kernel buffer to socket rx_buf 692 765 remote_buf_put_from_kernel( socket_rbuf_xp, 693 k_buf + UDP_HEAD_LEN, 694 moved_bytes ); 695 // unblock client thread 696 if( client_xp != XPTR_NULL ) 697 { 698 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 766 k_buf + UDP_HEAD_LEN, 767 moved_bytes ); 768 #if DEBUG_DEV_NIC_RX 769 if( DEBUG_DEV_NIC_RX < cycle ) 770 printk("\n[%s] thread[%x,%x] for socket[%d,%d] move %d bytes to rx_buf / buf_sts %d\n", 771 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 772 moved_bytes, remote_buf_status(socket_rbuf_xp), moved_bytes ); 773 #endif 774 775 // signal client thread if pending RECV command 776 if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) ) 777 { 778 // reset rx_valid 779 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->rx_valid), false ); 780 781 // report success to RX client thread 782 dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS ); 783 784 #if DEBUG_DEV_NIC_RX 785 if( DEBUG_DEV_NIC_RX < cycle ) 786 printk("\n[%s] thread[%x,%x] for UDP socket[%x,%d] / unblock client thread\n", 787 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 788 #endif 789 790 } 791 else 792 { 793 794 #if DEBUG_DEV_NIC_RX 795 if( DEBUG_DEV_NIC_RX < cycle ) 796 printk("\n[%s] thread[%x,%x] for socket[%x,%d] / no client thread\n" 797 " rx_valid %d / rx_cmd %s\n", 798 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 799 socket_rx_valid , socket_cmd_type_str(socket_rx_cmd) ); 800 #endif 801 699 802 } 700 803 … … 707 810 // This static function is called by the dev_nic_rx_server() function to handle one RX 708 811 // TCP segment contained in a kernel buffer defined by the <k_buf> & <k_length> arguments. 709 // The <seg_remote_addr> and <seg_local_addr> arguments are obtained from the received 710 // IP packet header. It the received segment doesn't match any connected socket attached 711 // to the selected chdev[k], or any listening socket waiting connection, or if the segment 712 // is corrupted, this segment is discarded. 713 // If required by the TCP flags, it registers an R2T request in the socket R2T queue 714 // to implement the TCP handcheck for close and connect. 812 // The <seg_remote_addr> and <seg_local_addr> arguments have been extracted from the IP 813 // IP header. The local and remote ports are obtained from the TCP header. 814 // It the received segment doesn't match any connected socket attached to the selected 815 // <chdev>, or any listening socket waiting connection, or if the segment is corrupted, 816 // the segment is discarded. This function implement the TCP error recovery protocol, 817 // as specified by the RFC. Depending on both the socket state, and the segment header: 818 // - it register data in the RX buffer, 819 // - it update the socket state and TCB, 820 // - it register acknolegce requests in the R2T queue, 821 // - it register connection requests in the CRQ queue, 715 822 /////////////////////////////////////////////////////////////////////////////////////////// 716 823 // Implementation note: … … 724 831 // the SYN, FIN, ACK and RST flags. It updates the socket state when required, moves 725 832 // data to the rx_buf when possible, and return. It takes the lock protecting the socket, 726 // because a nconnected socket is accessed by both the NIC_TX and NIC_RX server threads.833 // because a connected socket is accessed by both the NIC_TX and NIC_RX server threads. 727 834 // 4) If no matching connected socket has been found, it scans the list of listening 728 835 // sockets to find a matching listening socket. … … 760 867 bool_t socket_tx_valid; // TX command valid 761 868 uint32_t socket_tx_cmd; // TX command type 762 uint32_t socket_tx_todo; // number of TX bytes not sent yet763 869 uint32_t socket_tx_nxt; // next byte to send in TX stream 764 870 uint32_t socket_tx_una; // first unacknowledged byte in TX stream 871 uint32_t socket_tx_len; // number of bytes in tx_buf 872 uint32_t socket_tx_ack; // number of acknowledged bytes in tx_buf 765 873 bool_t socket_rx_valid; // RX command valid 766 874 uint32_t socket_rx_cmd; // TX command type … … 804 912 uint32_t seg_data_len = k_length - seg_hlen; // number of bytes in payload 805 913 806 #if DEBUG_DEV_NIC_RX 807 thread_t * this = CURRENT_THREAD; 808 uint32_t cycle; 914 uint32_t seg_data_dup; // number of duplicated bytes in payload 915 uint32_t seg_data_new; // number of new bytes in payload 916 917 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 809 918 uint32_t fdid; 810 919 pid_t pid; 811 #endif 812 813 #if DEBUG_DEV_NIC_RX 814 cycle = (uint32_t)hal_get_cycles(); 815 if( cycle > DEBUG_DEV_NIC_RX ) 920 thread_t * this = CURRENT_THREAD; 921 uint32_t cycle = (uint32_t)hal_get_cycles(); 922 #endif 923 924 #if DEBUG_DEV_NIC_RX 925 if( DEBUG_DEV_NIC_RX < cycle ) 816 926 printk("\n[%s] thread[%x,%x] enters / tcp_length %d / tcp_flags %x / cycle %d\n", 817 927 __FUNCTION__, this->process->pid, this->trdid, k_length, seg_flags , cycle ); 818 928 #endif 819 929 820 // compute and check TCP checksum930 // reset checksum field 821 931 k_buf[16] = 0; 822 932 k_buf[17] = 0; 823 checksum = dev_nic_tcp_checksum( k_buf, 824 k_length, 825 seg_remote_addr, 826 seg_local_addr ); 827 933 934 // compute TCP checksum 935 checksum = dev_nic_tcp_udp_checksum( k_buf, 936 k_length, 937 seg_remote_addr, 938 seg_local_addr, 939 true ); // is_tcp 828 940 // discard segment if corrupted 829 941 if( seg_checksum != checksum ) 830 942 { 831 943 832 #if DEBUG_DEV_NIC_RX 833 if( cycle > DEBUG_DEV_NIC_RX ) 834 printk("\n[%s] thread[%x,%x] tcp checksum failure : received %x / computed %x\n", 835 __FUNCTION__, this->process->pid, this->trdid, seg_checksum, checksum ); 944 #if DEBUG_DEV_NIC_ERROR 945 printk("\n[WARNING] in %s : thread[%x,%x] / checksum failure on channel %d / cycle %d\n", 946 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle ); 836 947 #endif 837 948 return; 838 949 } 839 950 840 // scan list of attached sockets to find a matching TCP socket841 attached_match = false;842 843 951 // build extended pointer on xlist of sockets attached to NIC_RX chdev 844 952 root_xp = XPTR( local_cxy , &chdev->wait_root ); 845 953 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 846 954 955 attached_match = false; 956 847 957 // take the lock protecting the list of attached sockets 848 958 remote_busylock_acquire( lock_xp ); 849 959 960 // scan list of attached sockets to find a matching TCP socket 850 961 XLIST_FOREACH( root_xp , iter_xp ) 851 962 { … … 878 989 { 879 990 880 #if DEBUG_DEV_NIC_RX 881 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 882 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 883 if( cycle > DEBUG_DEV_NIC_RX ) 884 printk("\n[%s] thread[%x,%x] matching attached socket[%d,%d] / state %s\n", 885 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 991 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 992 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 993 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 994 #endif 995 996 #if DEBUG_DEV_NIC_RX 997 if( DEBUG_DEV_NIC_RX < cycle ) 998 printk("\n[%s] matching attached TCP socket[%d,%d] / state %s\n", 999 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 886 1000 #endif 887 1001 break; … … 912 1026 socket_tx_valid = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_valid )); 913 1027 socket_tx_cmd = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_cmd )); 914 socket_tx_todo = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_todo ));915 1028 socket_tx_nxt = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_nxt )); 916 1029 socket_tx_una = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_una )); 1030 socket_tx_ack = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_ack )); 1031 socket_tx_len = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_len )); 917 1032 918 1033 socket_rx_valid = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->rx_valid )); … … 926 1041 { 927 1042 //////////////////////// 928 case TCP_STATE_SYN_SENT: // TCP client waiting for SYN-ACK in connect handshake1043 case TCP_STATE_SYN_SENT: // TCP client waiting for SYN-ACK 929 1044 { 930 // [1] check ACK flag1045 // [1] & [2] check ACK and RST 931 1046 if( seg_ack_set ) 932 1047 { 933 if( seg_ack_num != TCP_ISS_CLIENT + 1 ) // bad ACK => report error 1048 bool_t ack_ok = (seg_ack_num == (CONFIG_SOCK_ISS_CLIENT + 1) ); 1049 1050 if( seg_rst_set && ack_ok ) 934 1051 { 935 1052 936 1053 #if DEBUG_DEV_NIC_RX 937 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect ack_num %x / get %x\n", 938 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 939 socket_state_str(socket_state), TCP_ISS_CLIENT + 1, seg_ack_num ); 940 #endif 941 // make an RST request to R2T queue 1054 if( DEBUG_DEV_NIC_RX < cycle ) 1055 printk("\n[%s] socket[%x,%d] %s RST received from remote TCP => close\n", 1056 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1057 #endif 1058 // report RST to local TCP client thread 1059 dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST ); 1060 1061 // update socket state 1062 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1063 TCP_STATE_BOUND ); 1064 break; 1065 } 1066 1067 if( seg_rst_set && (ack_ok == false) ) 1068 { 1069 1070 #if DEBUG_DEV_NIC_ERROR 1071 printk("\n[ERROR] in %s : socket[%x,%d] %s RST but expect ack_num %x != rcvd %x => discard\n", 1072 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1073 CONFIG_SOCK_ISS_CLIENT + 1, seg_ack_num ); 1074 #endif 1075 break; 1076 } 1077 1078 if( (seg_rst_set == false) && (ack_ok == false) ) 1079 { 1080 1081 #if DEBUG_DEV_NIC_ERROR 1082 printk("\n[ERROR] in %s : socket[%x,%d] %s expected ack_num %x != rcvd %x => send RST\n", 1083 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1084 CONFIG_SOCK_ISS_CLIENT + 1, seg_ack_num ); 1085 #endif 1086 // send RST to remote TCP 942 1087 socket_put_r2t_request( socket_r2tq_xp, 943 1088 TCP_FLAG_RST, 944 1089 chdev->channel ); 945 946 // report error to local TX client thread947 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADACK );948 949 1090 break; 950 1091 } 951 1092 } 952 1093 953 // [2] check RST flag // receive RST => report error954 if( seg_rst_set )955 {956 957 #if DEBUG_DEV_NIC_RX958 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n",959 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) );960 #endif961 // update socket state962 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ),963 TCP_STATE_BOUND );964 965 // signal error to local TX client thread966 dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST );967 968 break;969 }970 971 1094 // [3] handle security & precedence TODO ... someday 972 1095 … … 976 1099 977 1100 #if DEBUG_DEV_NIC_RX 978 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received expected SYN-ACK\n", 979 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1101 if( DEBUG_DEV_NIC_RX < cycle ) 1102 printk("\n[%s] socket[%x,%d] %s : received expected SYN-ACK\n", 1103 __FUNCTION__, pid, fdid , socket_state_str(socket_state) ); 980 1104 #endif 981 1105 // set socket.tx_una … … 999 1123 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1000 1124 } 1001 else // received SYN without ACK => client becomesserver1125 else // SYN without ACK => TCP client becomes a TCP server 1002 1126 { 1003 1127 1004 1128 #if DEBUG_DEV_NIC_RX 1005 printk("\n[%s] thread[%x,%x] for socket[%x,%d] %s : received SYN-ACK => become server\n", 1006 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1129 if( DEBUG_DEV_NIC_RX < cycle ) 1130 printk("\n[%s] socket[%x,%d] %s : received SYN without ACK => send a SYN_ACK\n", 1131 __FUNCTION__, pid, fdid , socket_state_str(socket_state) ); 1007 1132 #endif 1008 1133 // update socket.state 1009 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->state), TCP_STATE_SYN_RCVD ); 1134 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->state), 1135 TCP_STATE_SYN_RCVD ); 1010 1136 1011 1137 // set socket.tx_nxt 1012 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_SERVER ); 1138 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), 1139 CONFIG_SOCK_ISS_SERVER ); 1013 1140 1014 1141 // set socket.rx_nxt to seg_seq_num + 1 1015 1142 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_nxt), seg_seq_num + 1 ); 1016 1143 1017 // make a SYN.ACK request to R2T queue1144 // send SYN.ACK to remote TCP 1018 1145 socket_put_r2t_request( socket_r2tq_xp, 1019 1146 TCP_FLAG_SYN | TCP_FLAG_ACK, … … 1021 1148 } 1022 1149 break; 1023 } 1024 //////////////////////// 1025 case TCP_STATE_SYN_RCVD: // TCP server waiting last ACK in connect handshake 1026 { 1027 // [1] check sequence number 1028 if( seg_seq_num != socket_rx_nxt ) // unexpected SEQ_NUM => discard 1029 { 1030 1031 #if DEBUG_DEV_NIC_RX 1032 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect seq_num %x / get %x\n", 1033 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1034 socket_state_str(socket_state), socket_rx_nxt, seg_seq_num ); 1035 #endif 1036 // discard segment without reporting 1037 break; 1038 } 1039 1040 // [2] handle RST flag // received RST => report error 1041 if( seg_rst_set ) 1042 { 1043 1044 #if DEBUG_DEV_NIC_RX 1045 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n", 1046 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1047 #endif 1048 // update socket state 1049 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), TCP_STATE_BOUND ); 1050 1051 // report error to local TX client thread 1052 dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST ); 1053 1054 break; 1055 } 1056 1057 // [3] handle security & precedence TODO ... someday 1058 1059 // [4] handle SYN flag 1060 if( seg_syn_set ) // received SYN => discard 1061 { 1062 1063 #if DEBUG_DEV_NIC_RX 1064 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received SYN flag\n", 1065 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1066 #endif 1067 // discard segment without reporting 1068 break; 1069 } 1070 1071 // [5] handle ACK flag 1072 if( seg_ack_set == false ) // missing ACK => discard 1073 { 1074 1075 #if DEBUG_DEV_NIC_RX 1076 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : no ACK in TCP segment\n", 1077 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1078 #endif 1079 // discard segment without reporting 1080 break; 1081 } 1082 else if( seg_ack_num != (TCP_ISS_SERVER + 1) ) // unacceptable ACK 1083 { 1084 1085 #if DEBUG_DEV_NIC_RX 1086 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : expect ack_num %x / get %x\n", 1087 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1088 socket_state_str(socket_state), TCP_ISS_SERVER + 1, seg_ack_num ); 1089 #endif 1090 1091 // register an RST request to R2TQ for remote TCP client 1092 socket_put_r2t_request( socket_r2tq_xp, 1093 TCP_FLAG_RST, 1094 chdev->channel ); 1095 1096 // report error to local TX client thread 1097 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADACK ); 1098 } 1099 else // acceptable ACK 1100 { 1101 1102 #if DEBUG_DEV_NIC_RX 1103 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received expected ACK\n", 1104 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1105 #endif 1106 // set socket.tx_una 1107 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num ); 1108 1109 // update socket.state 1110 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1111 TCP_STATE_ESTAB ); 1112 1113 // report success to local TX client thread 1114 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1115 } 1116 break; 1117 } 1118 ///////////////////// 1150 } // end state SYN_SENT 1151 1152 //////////////////////// all "connected" states 1153 case TCP_STATE_SYN_RCVD: 1119 1154 case TCP_STATE_ESTAB: 1120 1155 case TCP_STATE_FIN_WAIT1: … … 1125 1160 case TCP_STATE_TIME_WAIT: 1126 1161 { 1127 // [1] check sequence number : out_of_order segments not accepted 1128 if( seg_seq_num != socket_rx_nxt ) 1162 // [1] check SEQ_NUM 1163 // - we accept duplicate segments (i.e. seq_num < rx_next) 1164 // - we don't accept out of order segment (i.e. seq_num_num > rx_next) 1165 // => seq_num must be in window [rx_nxt - rx_win , rx_nxt] 1166 1167 bool_t seq_ok = is_in_window( seg_seq_num, 1168 (socket_rx_nxt - socket_rx_wnd), 1169 socket_rx_nxt ); 1170 1171 if( seq_ok == false ) // SEQ_NUM not acceptable 1129 1172 { 1130 1131 #if DEBUG_DEV_NIC_RX 1132 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : illegal SEQ_NUM %x / expected %x\n", 1133 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1134 socket_state_str(socket_state), seg_seq_num, socket_rx_nxt ); 1135 #endif 1136 // discard segment 1137 break; 1138 } 1139 1140 // check all bytes in window when the payload exist 1141 // TODO : we could accept bytes that are in window, 1142 // but this implementation reject all bytes in segment 1143 if( seg_data_len > 0 ) 1144 { 1145 // compute min & max acceptable sequence numbers 1146 uint32_t seq_min = socket_rx_nxt; 1147 uint32_t seq_max = socket_rx_nxt + socket_rx_wnd - 1; 1148 1149 // compute sequence number for last byte in segment 1150 uint32_t seg_seq_last = seg_seq_num + seg_data_len - 1; 1151 1152 if( is_in_window( seg_seq_last, seq_min, seq_max ) == false ) 1173 if( seg_rst_set ) 1153 1174 { 1154 1175 1155 #if DEBUG_DEV_NIC_RX 1156 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : last SEQ_NUM %x not in [%x,%x]\n", 1157 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1158 socket_state_str(socket_state), seg_seq_last, seq_min, seq_max ); 1159 #endif 1160 // discard segment 1176 #if DEBUG_DEV_NIC_ERROR 1177 printk("\n[ERROR] in %s : socket[%x,%d] %s expect seq_num %x != rcvd %x and RST => discard\n", 1178 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1179 CONFIG_SOCK_ISS_CLIENT + 1, seg_seq_num ); 1180 #endif 1161 1181 break; 1162 1182 } 1163 } 1164 1165 // [2] handle RST flag 1166 if( seg_rst_set ) 1183 else // no RST 1184 { 1185 // send ACK to remote TCP 1186 socket_put_r2t_request( socket_r2tq_xp, 1187 TCP_FLAG_ACK, 1188 chdev->channel ); 1189 #if DEBUG_DEV_NIC_ERROR 1190 printk("\n[ERROR] in %s : socket[%x,%d] %s expect seq_num %x != rcvd %x => ACK and discard\n", 1191 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1192 CONFIG_SOCK_ISS_CLIENT + 1, seg_seq_num ); 1193 #endif 1194 break; 1195 } 1196 } 1197 else // SEQ_NUM acceptable 1167 1198 { 1168 1169 #if DEBUG_DEV_NIC_RX 1170 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received RST flag\n", 1171 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 1172 #endif 1173 if( (socket_state == TCP_STATE_ESTAB ) || 1174 (socket_state == TCP_STATE_FIN_WAIT1 ) || 1175 (socket_state == TCP_STATE_FIN_WAIT2 ) || 1176 (socket_state == TCP_STATE_CLOSE_WAIT) ) 1199 // compute number of new bytes & number of duplicated bytes 1200 if( seg_seq_num != socket_rx_nxt ) // duplicate segment 1177 1201 { 1178 // TODO all pending send & received commands 1179 // must receive "reset" responses 1180 1181 // TODO destroy the socket 1202 seg_data_dup = socket_rx_nxt - seg_seq_num; 1203 seg_data_new = (seg_data_len > seg_data_dup) ? 1204 (seg_data_len - seg_data_dup) : 0; 1205 } 1206 else // expected segment 1207 { 1208 seg_data_dup = 0; 1209 seg_data_new = seg_data_len; 1210 } 1211 1212 #if DEBUG_DEV_NIC_RX 1213 if( DEBUG_DEV_NIC_RX < cycle ) 1214 printk("\n[%s] socket[%x,%d] %s seq_num %x / rx_nxt %x / len %d / new %d / dup %d\n", 1215 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1216 seg_seq_num, socket_rx_nxt, seg_data_len, seg_data_new, seg_data_dup ); 1217 #endif 1218 } 1219 1220 // [2] handle RST flag (depending on socket state) 1221 if( seg_rst_set ) 1222 { 1223 if( socket_state == TCP_STATE_SYN_RCVD ) 1224 { 1225 1226 #if DEBUG_DEV_NIC_RX 1227 if( DEBUG_DEV_NIC_RX < cycle ) 1228 printk("\n[%s] socket[%x,%d] %s RST received from remote TCP => report to user\n", 1229 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1230 #endif 1231 // report RST to local TX client thread 1232 dev_nic_unblock_tx_client( socket_xp , CMD_STS_RST ); 1233 1234 // update socket state 1235 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1236 TCP_STATE_BOUND ); 1237 break; 1238 } 1239 1240 else if( (socket_state == TCP_STATE_ESTAB ) || 1241 (socket_state == TCP_STATE_FIN_WAIT1 ) || 1242 (socket_state == TCP_STATE_FIN_WAIT2 ) || 1243 (socket_state == TCP_STATE_CLOSE_WAIT) ) 1244 { 1245 1246 #if DEBUG_DEV_NIC_RX 1247 if( DEBUG_DEV_NIC_RX < cycle ) 1248 printk("\n[%s] socket[%x,%d] %s / received RST flag\n", 1249 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1250 #endif 1251 // report RST to local TX client thread 1252 if( socket_tx_valid ) dev_nic_unblock_tx_client( socket_xp, 1253 CMD_STS_RST ); 1254 // report RST to local RX client thread 1255 if( socket_rx_valid ) dev_nic_unblock_rx_client( socket_xp, 1256 CMD_STS_RST ); 1257 // update socket state 1258 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1259 TCP_STATE_BOUND ); 1260 break; 1182 1261 } 1183 1262 else // states CLOSING / LAST_ACK / TIME_WAIT 1184 1263 { 1185 // TODO 1264 // update socket state 1265 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1266 TCP_STATE_BOUND ); 1267 break; 1186 1268 } 1269 } 1270 1271 // [3] handle security & precedence TODO ... someday 1272 1273 // [4] check SYN 1274 if( seg_syn_set ) // received SYN => send RST to remote 1275 { 1276 1277 #if DEBUG_DEV_NIC_ERROR 1278 printk("\n[ERROR] in %s socket[%x,%d] %s : received SYN flag => send RST-ACK\n", 1279 __FUNCTION__, pid, fdid , socket_state_str(socket_state) ); 1280 #endif 1281 // send RST & ACK to remote TCP 1282 socket_put_r2t_request( socket_r2tq_xp, 1283 TCP_FLAG_RST | TCP_FLAG_ACK, 1284 chdev->channel ); 1285 1286 // report RST to local TX client thread 1287 if( socket_tx_valid ) dev_nic_unblock_tx_client( socket_xp, 1288 CMD_STS_RST ); 1289 // report RST to local RX client thread 1290 if( socket_rx_valid ) dev_nic_unblock_rx_client( socket_xp, 1291 CMD_STS_RST ); 1292 // update socket state 1293 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1294 TCP_STATE_BOUND ); 1187 1295 break; 1188 1296 } 1189 1297 1190 // [3] handle security & precedence TODO ... someday 1191 1192 // [4] check SYN flag 1193 if( seg_syn_set ) // received SYN => ERROR 1298 // [5] handle ACK (depending on socket state) 1299 if( seg_ack_set == false ) // missing ACK => discard segment 1194 1300 { 1195 1301 1196 #if DEBUG_DEV_NIC_RX 1197 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : received unexpected SYN\n", 1198 __FUNCTION__, this->process->pid, this->trdid, pid, fdid , socket_state_str(socket_state) ); 1199 #endif 1200 // TODO signal error to user 1201 1202 // make an RST request to R2T queue 1203 socket_put_r2t_request( socket_r2tq_xp, 1204 TCP_FLAG_RST, 1205 chdev->channel ); 1206 1207 // update socket state 1208 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), TCP_STATE_BOUND ); 1209 1302 #if DEBUG_DEV_NIC_ERROR 1303 printk("\n[ERROR] in %s : socket[%x,%d] %s / no ACK in segment => discard\n", 1304 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1305 #endif 1210 1306 break; 1211 1307 } 1212 1308 1213 // [5] check ACK 1214 if( seg_ack_set == false ) // missing ACK 1309 // compute acceptable ACK 1310 bool_t ack_ok = is_in_window( seg_ack_num, 1311 socket_tx_una, 1312 socket_tx_nxt ); 1313 1314 if( socket_state == TCP_STATE_SYN_RCVD ) 1215 1315 { 1216 1217 #if DEBUG_DEV_NIC_RX 1218 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : no ACK flag\n", 1219 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 1220 #endif 1221 // discard segment 1222 break; 1223 } 1224 else if( is_in_window( seg_ack_num, 1225 socket_tx_una, 1226 socket_tx_nxt ) == false ) // unacceptable ACK 1227 { 1228 1229 #if DEBUG_DEV_NIC_RX 1230 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : ACK_NUM %x not in [%x,%x]\n", 1231 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state), 1232 seg_ack_num, socket_tx_una, socket_tx_nxt ); 1233 #endif 1234 // discard segment 1235 break; 1236 } 1237 else // acceptable ack 1238 { 1239 // update socket.tx_una 1240 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num ); 1241 1242 // update socket.tx_wnd 1243 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_wnd), seg_window ); 1244 1245 // check last data byte acknowledged for a SEND command 1246 if( (socket_tx_todo == 0) && 1247 (seg_ack_num == socket_tx_nxt) && 1248 (socket_tx_cmd == CMD_TX_SEND) ) 1316 if( ack_ok ) // acceptable ACK 1249 1317 { 1250 // signal success to TX client thread 1318 1319 #if DEBUG_DEV_NIC_RX 1320 if( DEBUG_DEV_NIC_RX < cycle ) 1321 printk("\n[%s] socket[%x,%d] %s : received expected ACK => update socket\n", 1322 __FUNCTION__, pid, fdid , socket_state_str(socket_state) ); 1323 #endif 1324 // set socket.tx_una 1325 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), seg_ack_num ); 1326 1327 // update socket.state 1328 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1329 TCP_STATE_ESTAB ); 1330 1331 // report success to local TX client thread 1251 1332 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1252 1333 } 1334 else // send RST to remote 1335 { 1336 1337 #if DEBUG_DEV_NIC_ERROR 1338 printk("\n[ERROR] in %s : socket[%x,%d] %s / ACK %x not in [%x,%x] => discard\n", 1339 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1340 seg_ack_num, socket_tx_una, socket_tx_nxt ); 1341 #endif 1342 // send RST & ACK to remote TCP 1343 socket_put_r2t_request( socket_r2tq_xp, 1344 TCP_FLAG_RST | TCP_FLAG_ACK, 1345 chdev->channel ); 1346 break; 1347 } 1253 1348 } 1254 1255 // [7] handle URG flag TODO ... someday 1256 1257 // [8] Move DATA to rx_buf / ACK request to R2T queue / unblock rx_client 1258 if( seg_data_len ) 1349 1350 else if( (socket_state == TCP_STATE_ESTAB) || 1351 (socket_state == TCP_STATE_FIN_WAIT1) || 1352 (socket_state == TCP_STATE_FIN_WAIT2) || 1353 (socket_state == TCP_STATE_FIN_WAIT2) || 1354 (socket_state == TCP_STATE_CLOSE_WAIT) || 1355 (socket_state == TCP_STATE_CLOSING) ) 1259 1356 { 1260 if( (socket_state == TCP_STATE_ESTAB) || 1261 (socket_state == TCP_STATE_FIN_WAIT1) || 1262 (socket_state == TCP_STATE_FIN_WAIT2) ) 1357 if( ack_ok ) // acceptable ack 1358 { 1359 // compute number of acknowledged bytes 1360 uint32_t ack_bytes = seg_ack_num - socket_tx_una; 1361 1362 if( ack_bytes ) // handle acknowledged bytes 1363 { 1364 #if DEBUG_DEV_NIC_RX 1365 if( DEBUG_DEV_NIC_RX < cycle ) 1366 printk("\n[%s] socket[%x,%d] %d bytes acknowledged => update socket\n", 1367 __FUNCTION__, pid, fdid, ack_bytes ); 1368 #endif 1369 // update socket.tx_una, socket.tx_ack, and socket.tx_wnd fields 1370 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_una), 1371 seg_ack_num ); 1372 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_ack), 1373 socket_tx_ack + ack_bytes ); 1374 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_wnd), 1375 seg_window ); 1376 1377 // unblock the TX client thread if last byte acknowledged 1378 if( (socket_tx_ack + ack_bytes) == socket_tx_len ) 1379 { 1380 // report success to TX client thread 1381 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1382 #if DEBUG_DEV_NIC_RX 1383 if( DEBUG_DEV_NIC_RX < cycle ) 1384 printk("\n[%s] socket[%x,%d] %s : last ack => unblock TX client thread\n", 1385 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1386 #endif 1387 } 1388 } 1389 1390 if( socket_state == TCP_STATE_FIN_WAIT1 ) 1391 { 1392 // update socket state 1393 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1394 TCP_STATE_FIN_WAIT2 ); 1395 } 1396 if( socket_state == TCP_STATE_FIN_WAIT2 ) 1397 { 1398 // TODO 1399 } 1400 else if( socket_state == TCP_STATE_CLOSING ) 1401 { 1402 // update socket state 1403 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->state), 1404 TCP_STATE_TIME_WAIT ); 1405 } 1406 else if( socket_state == TCP_STATE_CLOSING ) 1407 { 1408 // TODO 1409 } 1410 } 1411 else // unacceptable ACK => discard segment 1412 { 1413 1414 #if DEBUG_DEV_NIC_ERROR 1415 printk("\n[ERROR] in %s : socket[%x,%d] %s / ACK %x not in [%x,%x] => discard\n", 1416 __FUNCTION__, pid, fdid, socket_state_str(socket_state), 1417 seg_ack_num, socket_tx_una, socket_tx_nxt ); 1418 #endif 1419 break; 1420 } 1421 } 1422 1423 else if( socket_state == TCP_STATE_LAST_ACK ) 1424 { 1425 // TODO 1426 } 1427 1428 else if( socket_state == TCP_STATE_TIME_WAIT ) 1429 { 1430 // TODO 1431 } 1432 1433 // [6] handle URG flag TODO ... someday 1434 1435 // [7] handle received data : update socket state, 1436 // move data to rx_buf, register ACK request to R2T queue, 1437 // unblock the RX client thread in case of pending RX_RECV command 1438 if((socket_state == TCP_STATE_ESTAB) || 1439 (socket_state == TCP_STATE_FIN_WAIT1) || 1440 (socket_state == TCP_STATE_FIN_WAIT2) ) 1441 { 1442 // register new bytes if requested 1443 if( seg_data_new ) 1263 1444 { 1264 1445 // get number of bytes already stored in rx_buf 1265 1446 uint32_t status = remote_buf_status( socket_rx_buf_xp ); 1266 1447 1267 // compute empty space in rx_buf 1268 uint32_t space = CONFIG_SOCK_RX_BUF_SIZE - status; 1269 1270 // compute number of bytes to move : min (space , seg_data_len) 1271 uint32_t nbytes = ( space < seg_data_len ) ? space : seg_data_len; 1272 1273 // move payload from k_buf to rx_buf 1448 // compute space in rx_buf and actual number of acceptable bytes 1449 // when (space < seg_data_new) the last new bytes are discarded 1450 uint32_t space = (1 << CONFIG_SOCK_RX_BUF_ORDER) - status; 1451 uint32_t rcv_bytes = (space < seg_data_new) ? space : seg_data_new; 1452 1453 // move new bytes from k_buf to rx_buf 1274 1454 remote_buf_put_from_kernel( socket_rx_buf_xp, 1275 k_buf + seg_hlen ,1276 nbytes );1277 #if DEBUG_DEV_NIC_RX 1278 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : move %d bytes to rx_buf\n", 1279 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1280 socket_state_str(socket_state), nbytes );1281 #endif1282 // update socket.rx_nxt 1455 k_buf + seg_hlen + seg_data_dup, 1456 rcv_bytes ); 1457 #if DEBUG_DEV_NIC_RX 1458 if( DEBUG_DEV_NIC_RX < cycle ) 1459 printk("\n[%s] socket[%x,%d] %s : move %d bytes to rx_buf\n", 1460 __FUNCTION__, pid, fdid, socket_state_str(socket_state), rcv_bytes ); 1461 #endif 1462 // update socket.rx_nxt and socket_rx_wnd fields 1283 1463 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1284 socket_rx_nxt + nbytes ); 1285 1286 // update socket.rx_wnd 1464 socket_rx_nxt + rcv_bytes ); 1287 1465 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_wnd ), 1288 socket_rx_wnd - nbytes ); 1289 1290 // make an ACK request to R2T queue 1466 socket_rx_wnd - rcv_bytes ); 1467 1468 // unblock RX client if required 1469 if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) ) 1470 { 1471 // reset rx_valid 1472 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false ); 1473 1474 // report success to RX client thread 1475 dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS ); 1476 #if DEBUG_DEV_NIC_RX 1477 if( DEBUG_DEV_NIC_RX < cycle ) 1478 printk("\n[%s] socket[%x,%d] %s : last data => unblock RX client thread\n", 1479 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1480 #endif 1481 } 1482 } 1483 1484 // make an ACK request to remote 1485 socket_put_r2t_request( socket_r2tq_xp, 1486 TCP_FLAG_ACK, 1487 chdev->channel ); 1488 } // end payload handling 1489 1490 // [8] handle FIN flag depending on socket state 1491 if( (socket_state == TCP_STATE_SYN_RCVD) || 1492 (socket_state == TCP_STATE_ESTAB ) ) 1493 { 1494 if( seg_fin_set ) 1495 { 1496 1497 #if DEBUG_DEV_NIC_RX 1498 if( DEBUG_DEV_NIC_RX < cycle ) 1499 printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSE_WAIT\n", 1500 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1501 #endif 1502 // update socket.rx_nxt when FIN received 1503 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1504 socket_rx_nxt + 1 ); 1505 1506 // update socket state 1507 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1508 TCP_STATE_CLOSE_WAIT ); 1509 1510 // send ACK to remote TCP 1291 1511 socket_put_r2t_request( socket_r2tq_xp, 1292 1512 TCP_FLAG_ACK, … … 1294 1514 1295 1515 // check pending RX_RECV command 1296 if( (socket_rx_valid == true) && 1297 (socket_rx_cmd == CMD_RX_RECV) ) 1516 if( (socket_rx_valid == true) && (socket_rx_cmd == CMD_RX_RECV) ) 1298 1517 { 1299 1518 // reset rx_valid 1300 1519 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false ); 1301 1520 1302 // report success to RX client thread 1303 dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS ); 1304 #if DEBUG_DEV_NIC_RX 1305 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : unblock waiting RX client thread\n", 1306 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1307 socket_state_str(socket_state) ); 1308 #endif 1309 } 1310 } 1311 } 1312 1313 // [9] handle FIN flag 1314 if( socket_state == TCP_STATE_ESTAB ) 1315 { 1316 if( seg_fin_set ) // received ACK & FIN 1317 { 1318 1319 #if DEBUG_DEV_NIC_RX 1320 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSE_WAIT\n", 1321 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 1322 socket_state_str(socket_state) ); 1323 #endif 1324 // update socket.rx_nxt when FIN received 1325 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1326 socket_rx_nxt + 1 ); 1327 1328 // update socket state 1329 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1330 TCP_STATE_CLOSE_WAIT ); 1331 1332 // make an ACK request to R2T queue 1333 socket_put_r2t_request( socket_r2tq_xp, 1334 TCP_FLAG_ACK, 1335 chdev->channel ); 1336 1337 // check pending RX_RECV command 1338 if( (socket_rx_valid == true) && 1339 (socket_rx_cmd == CMD_RX_RECV) ) 1340 { 1341 // reset rx_valid 1342 hal_remote_s32( XPTR(socket_cxy,&socket_ptr->rx_valid), false ); 1343 1344 // report error to RX client thread 1521 // report FIN to RX client thread 1345 1522 dev_nic_unblock_rx_client( socket_xp , CMD_STS_EOF ); 1346 1523 #if DEBUG_DEV_NIC_RX 1347 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : unblock RX client waiting on RECV\n", 1348 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1349 socket_state_str(socket_state) );1524 if( DEBUG_DEV_NIC_RX < cycle ) 1525 printk("\n[%s] socket[%x,%d] %s : unblock RX client waiting on RECV\n", 1526 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1350 1527 #endif 1351 1528 } … … 1354 1531 else if( socket_state == TCP_STATE_FIN_WAIT1 ) 1355 1532 { 1356 if( seg_fin_set ) // received ACK & FIN1533 if( seg_fin_set ) 1357 1534 { 1358 1535 1359 1536 #if DEBUG_DEV_NIC_RX 1360 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSING\n", 1361 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1362 socket_state_str(socket_state) );1363 #endif 1364 // update socket.rx_nxt when FIN received1537 if( DEBUG_DEV_NIC_RX < cycle ) 1538 printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSING\n", 1539 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1540 #endif 1541 // update socket.rx_nxt 1365 1542 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1366 1543 socket_rx_nxt + 1 ); … … 1370 1547 TCP_STATE_CLOSING ); 1371 1548 1372 // make an ACK request to R2T queue1549 // send ACK request to remote 1373 1550 socket_put_r2t_request( socket_r2tq_xp, 1374 1551 TCP_FLAG_ACK, … … 1379 1556 1380 1557 #if DEBUG_DEV_NIC_RX 1381 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : only ACK => goes FIN_WAIT2\n", 1382 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1383 socket_state_str(socket_state) );1558 if( DEBUG_DEV_NIC_RX < cycle ) 1559 printk("\n[%s] socket[%x,%d] %s : only ACK => goes FIN_WAIT2\n", 1560 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1384 1561 #endif 1385 1562 // update socket state … … 1394 1571 1395 1572 #if DEBUG_DEV_NIC_RX 1396 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s : FIN-ACK => goes CLOSED / unblock client\n", 1397 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,1398 socket_state_str(socket_state) );1573 if( DEBUG_DEV_NIC_RX < cycle ) 1574 printk("\n[%s] socket[%x,%d] %s : FIN-ACK => goes CLOSED / unblock client\n", 1575 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1399 1576 #endif 1400 1577 // update socket.rx_nxt when FIN received … … 1438 1615 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 1439 1616 } 1440 } // end case connected edstates1617 } // end case connected states 1441 1618 } // end switch socket state 1442 1619 … … 1491 1668 { 1492 1669 1493 #if DEBUG_DEV_NIC_RX 1494 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 1495 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 1496 if( cycle > DEBUG_DEV_NIC_RX ) 1497 printk("\n[%s] thread[%x,%x] matching listening socket[%d,%d] / state %s\n", 1498 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, socket_state_str(socket_state) ); 1670 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 1671 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ) ); 1672 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ) ); 1673 #endif 1674 1675 #if DEBUG_DEV_NIC_RX 1676 if( DEBUG_DEV_NIC_RX < cycle ) 1677 printk("\n[%s] matching listening socket[%d,%d] / state %s\n", 1678 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1499 1679 #endif 1500 1680 break; … … 1509 1689 { 1510 1690 // The actions depend on the received segment flags 1511 // - discard segment for RST or ACK 1512 // - update socket state & remote IP address, 1513 // register connect request in socket CRQ queue, 1514 // and unblock client thread for SYN 1691 // - discard segment for RST or ACK, 1692 // - for SYN, register the connect request in listening socket CRQ queue, 1693 // and unblock the client thread in case of pending RX_ACCEPT command. 1515 1694 1516 // discard segment if RST flag1517 if( seg_rst_set ) 1695 // [1] check RST 1696 if( seg_rst_set ) // discard segment 1518 1697 { 1519 1698 1520 #if DEBUG_DEV_NIC_RX 1521 if( cycle > DEBUG_DEV_NIC_RX ) 1522 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : received RST\n", 1523 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 1699 #if DEBUG_DEV_NIC_ERROR 1700 printk("\n[ERROR] in %s : socket[%x,%d] %s / received RST => discard segment\n", 1701 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1524 1702 #endif 1525 1703 return; 1526 1704 } 1527 1705 1528 // discard segment if ACK flag1529 if( seg_ack_set ) 1706 // [2] check ACK 1707 if( seg_ack_set ) // send RST to remote 1530 1708 { 1531 1709 1532 #if DEBUG_DEV_NIC_RX 1533 if( cycle > DEBUG_DEV_NIC_RX ) 1534 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : received ACK\n", 1535 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 1536 #endif 1710 #if DEBUG_DEV_NIC_ERROR 1711 printk("\n[ERROR] in %s : socket[%x,%d] %s received ACK => send RST & discard \n", 1712 __FUNCTION__, pid, fdid, socket_state_str(socket_state) ); 1713 #endif 1714 // make an RST request to R2T queue 1715 socket_put_r2t_request( socket_r2tq_xp, 1716 TCP_FLAG_RST, 1717 chdev->channel ); 1537 1718 return; 1538 1719 } 1539 1720 1540 // SYN flag == CONNECT request / seq_num cannot be wrong 1721 // [3] handle security & precedence TODO ... someday 1722 1723 // handle SYN == CONNECT request 1541 1724 if( seg_syn_set ) 1542 1725 { 1543 // build extended pointer on listening socket CRQ 1726 // build extended pointers on various listening socket fields 1727 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 1544 1728 socket_crqq_xp = XPTR( socket_cxy , &socket_ptr->crqq ); 1729 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 1730 1731 // take the lock protecting the matching socket 1732 remote_queuelock_acquire( socket_lock_xp ); 1545 1733 1546 1734 // try to register request into CRQ queue … … 1550 1738 seg_seq_num, 1551 1739 seg_window ); 1552 1553 1740 if ( error ) // CRQ full 1554 1741 { 1555 1742 1556 #if DEBUG_DEV_NIC_RX 1557 if( cycle > DEBUG_DEV_NIC_RX ) 1558 printk("\n[%s] thread[%x,%x] listening socket[%x,%d] CRQ full => send RST\n", 1559 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 1743 #if DEBUG_DEV_NIC_ERROR 1744 printk("\n[ERROR] in %s : listening socket[%x,%d] %s receive SYN but CRQ full => send RST\n", 1745 __FUNCTION__, pid, fdid ); 1560 1746 #endif 1561 1747 // make an RST request to R2T queue … … 1564 1750 chdev->channel ); 1565 1751 } 1566 else // new connection request registered inCRQ1752 else // register request in listening socket CRQ 1567 1753 { 1568 1754 1569 1755 #if DEBUG_DEV_NIC_RX 1756 if( DEBUG_DEV_NIC_RX < cycle ) 1570 1757 if( cycle > DEBUG_DEV_NIC_RX ) 1571 printk("\n[%s] thread[%x,%x] for listening socket[%x,%d] : register request in CRQ\n", 1572 __FUNCTION__, this->process->pid, this->trdid, pid, fdid ); 1573 #endif 1574 // check pending RX_ACCEPT command 1575 if( (hal_remote_l32(XPTR(socket_cxy,&socket_ptr->rx_valid)) == true) && 1576 (hal_remote_l32(XPTR(socket_cxy,&socket_ptr->rx_cmd)) == CMD_RX_ACCEPT) ) 1758 printk("\n[%s] listening socket[%x,%d] register request in CRQ\n", 1759 __FUNCTION__, pid, fdid ); 1760 #endif 1761 bool_t rx_valid = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->rx_valid)); 1762 uint32_t rx_cmd = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->rx_cmd)); 1763 1764 // check pending ACCEPT command 1765 if( rx_valid && (rx_cmd == CMD_RX_ACCEPT) ) 1577 1766 { 1578 1767 // reset rx_valid 1579 1768 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_valid ), false ); 1580 1769 1581 // report success to RX client thread 1770 // report success to RX client thread, that will 1771 // create a new socket and request a SYN-ACK to TX server thread 1582 1772 dev_nic_unblock_rx_client( socket_xp , CMD_STS_SUCCESS ); 1583 1773 1584 1774 #if DEBUG_DEV_NIC_RX 1775 if( DEBUG_DEV_NIC_RX < cycle ) 1585 1776 if( cycle > DEBUG_DEV_NIC_RX ) 1586 printk("\n[%s] thread[%x,%x] forlistening socket[%x,%d] unblock RX client thread\n",1587 __FUNCTION__, this->process->pid, this->trdid, pid,fdid );1777 printk("\n[%s] listening socket[%x,%d] unblock RX client thread\n", 1778 __FUNCTION__, fdid ); 1588 1779 #endif 1589 1780 } 1590 1781 } // end register request in CRQ 1782 1783 // release the lock protecting the matching socket 1784 remote_queuelock_release( socket_lock_xp ); 1785 1591 1786 } // end if SYN 1592 1787 1593 1788 return; 1594 1789 1595 1790 } // end if listening_match 1596 1791 1597 // 6. no socket found => discard segment 1598 1599 #if DEBUG_DEV_NIC_RX 1600 if( cycle > DEBUG_DEV_NIC_RX ) 1601 printk("\n[%s] thread[%x,%x] exit failure : no socket found => discard segment\n", 1602 __FUNCTION__, this->process->pid, this->trdid ); 1792 // 6. no attached socket found and no listening socket found => discard segment 1793 1794 #if DEBUG_DEV_NIC_ERROR 1795 printk("\n[ERROR] in %s : thread[%x,%d] / unexpected TCP segment => discard / cycle %d\n", 1796 __FUNCTION__, this->process->pid, this->trdid, chdev->channel, cycle ); 1603 1797 #endif 1604 1798 … … 1618 1812 1619 1813 thread_t * this = CURRENT_THREAD; 1620 1814 1621 1815 // check thread can yield 1622 1816 thread_assert_can_yield( this , __FUNCTION__ ); … … 1626 1820 "illegal chdev type or direction" ); 1627 1821 1628 #if DEBUG_DEV_NIC_RX 1629 uint32_t cycle = (uint32_t)hal_get_cycles(); 1822 #if DEBUG_DEV_NIC_RX || DEBUG_DEV_NIC_ERROR 1823 uint32_t cycle = (uint32_t)hal_get_cycles(); 1824 #endif 1825 1826 #if DEBUG_DEV_NIC_RX 1630 1827 if( cycle > DEBUG_DEV_NIC_RX ) 1631 1828 printk("\n[%s] thread[%x,%x] starts / cycle %d\n", 1632 1829 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1633 1830 #endif 1831 1832 // avoid warning 1833 ip_length = 0; 1834 error = 0; 1634 1835 1635 1836 // get extended pointers on server tread and chdev … … 1674 1875 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1675 1876 #endif 1676 1877 // check possible error reported by NIC ISR 1878 if( this->nic_cmd.error ) 1879 { 1880 printk("\n[PANIC] in %s : %s DMA engine cannot access RX_QUEUE / cycle %d\n", 1881 __FUNCTION__, chdev->name , (uint32_t)hal_get_cycles() ); 1882 } 1677 1883 } 1678 1884 else // success => handle packet … … 1682 1888 cycle = (uint32_t)hal_get_cycles(); 1683 1889 if( DEBUG_DEV_NIC_RX < cycle ) 1684 dev_nic_packet_display( false, // is_tx1685 this->process->pid,1686 this->trdid,1687 cycle,1688 k_buf );1689 1890 #endif 1690 1891 … … 1697 1898 { 1698 1899 1699 #if DEBUG_DEV_NIC_RX 1700 cycle = (uint32_t)hal_get_cycles(); 1701 if( DEBUG_DEV_NIC_RX < cycle ) 1702 printk("\n[%s] thread[%x,%x] discard ETH packet / cycle %d\n", 1900 #if DEBUG_DEV_NIC_ERROR 1901 printk("\n[WARNING] in %s : thread[%x,%x] discard ETH packet / cycle %d\n", 1703 1902 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1704 1903 #endif … … 1723 1922 { 1724 1923 1725 #if DEBUG_DEV_NIC_RX 1726 cycle = (uint32_t)hal_get_cycles(); 1727 if( DEBUG_DEV_NIC_RX < cycle ) 1728 printk("\n[%s] thread[%x,%x] discarded IP packet / cycle %d\n", 1924 #if DEBUG_DEV_NIC_ERROR 1925 printk("\n[WARNING] in %s : thread[%x,%x] discard IP packet / cycle %d\n", 1729 1926 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1730 1927 #endif … … 1762 1959 { 1763 1960 1764 #if DEBUG_DEV_NIC_ RX1961 #if DEBUG_DEV_NIC_ERROR 1765 1962 cycle = (uint32_t)hal_get_cycles(); 1766 1963 if( DEBUG_DEV_NIC_RX < cycle ) 1767 printk("\n[ %s] thread[%x,%x] discarded unsupported transport protocol%d\n",1964 printk("\n[WARNING] in %s : thread[%x,%x] unsupported transport protocol %d / cycle %d\n", 1768 1965 __FUNCTION__, this->process->pid, this->trdid, trsp_protocol, cycle ); 1769 1966 #endif 1770 1967 continue; 1771 1968 } 1772 } 1773 } // end of while loop1774 } // end dev_nic_rx_server()1969 } // end else success 1970 } // end of while loop 1971 } // end dev_nic_rx_server() 1775 1972 1776 1973 … … 1782 1979 1783 1980 /////////////////////////////////////////////////////////////////////////////////////////// 1784 // This static function is called by the dev_nic_tx_ build_packet() function.1981 // This static function is called by the dev_nic_tx_send_packet() function. 1785 1982 // It moves one ETH/IP/UDP packet from the kernel buffer identified by the <buffer> and 1786 1983 // <length> arguments to the NIC_TX_QUEUE identified the <chdev> argument. … … 1816 2013 this->nic_cmd.buffer = k_buf; 1817 2014 this->nic_cmd.length = length; 2015 this->nic_cmd.error = 0; 1818 2016 1819 2017 while( 1 ) … … 1868 2066 // <socket_xp> argument. The <length> argument defines the number of bytes in payload. 1869 2067 // It set the "src_port", "dst_port", "total_length" and "checksum" fields in UDP header. 1870 // The payload must be previouly loaded in the pernel buffer.2068 // The payload must be previouly loaded in the kernel buffer. 1871 2069 /////////////////////////////////////////////////////////////////////////////////////////// 1872 2070 // @ k_buf : [in] pointer on first byte of UDP header in kernel buffer. … … 1904 2102 k_buf[3] = remote_port; 1905 2103 2104 // reset checksum 2105 k_buf[6] = 0; 2106 k_buf[7] = 0; 2107 1906 2108 // set packet length in header 1907 2109 k_buf[4] = total_length >> 8; … … 1909 2111 1910 2112 // compute UDP packet checksum 1911 checksum = dev_nic_udp_checksum( k_buf , total_length ); 1912 2113 checksum = dev_nic_tcp_udp_checksum( k_buf, 2114 total_length, 2115 local_addr, 2116 remote_addr, 2117 false ); // is_not_tcp 1913 2118 // set checksum 1914 2119 k_buf[6] = checksum >> 8; … … 1920 2125 // This static function is called by the dev_nic_tx_server() function. 1921 2126 // It builds a TCP header in the kernel buffer defined by the <k_buf> argument. 1922 // The payload must have been previouly registered in this buffer .2127 // The payload must have been previouly registered in this buffer (for checksum). 1923 2128 // The "local_addr", "local_port", "remote_addr", "remote_port", seq_num", "ack_num", 1924 2129 // and "window" fields are obtained from the <socket_xp> argument. … … 1999 2204 2000 2205 // compute TCP segment checksum 2001 checksum = dev_nic_tcp_checksum( k_buf, 2002 total_length, 2003 src_addr, 2004 dst_addr ); 2206 checksum = dev_nic_tcp_udp_checksum( k_buf, 2207 total_length, 2208 src_addr, 2209 dst_addr, 2210 true ); // is_tcp 2005 2211 // set "checksum" 2006 2212 k_buf[16] = checksum >> 8; … … 2108 2314 } // end dev_nic_tx_build_eth_header() 2109 2315 2110 /////////////////////////////////////////////////////////////////////////////////////////// 2111 // This static function is called by the dev_nic_tx_server() function to handle one TX 2112 // command, or one R2T request, as defined by the <cmd_valid> and <r2t_valid> arguments, 2113 // for the socket identified by the <socket_xp> argument. It builds an ETH/IP/UDP packet 2114 // or ETH/IP/TCP segment, in the buffer defined by the <k_buf> argument, and registers 2115 // it in the NIC_TX queue defined by the <chdev> argument. 2116 // For a TCP header, the "seq_num", ack_num", and "window" fiels are defined by the 2117 // "socket.tx_next", "socket.rx_next" and "socket.rx_wnd" fields respectively. 2118 // It updates the "socket.state", "socket.tx_nxt", "socket.r2tq", and "socket.crqq" 2119 // The supported TX command types are CONNECT / ACCEPT / SEND / CLOSE. 2120 // fields as required by the command type. 2121 // - For an UDP socket, it reset the "socket.tx_valid" field, and unblock the client 2122 // thread when the packet has been sent, or when an error must be reported. 2123 // - For a TCP socket, it reset the "socket.tx_valid" field when the segment has been 2124 // sent, but does not unblocks the client thread, that will be unblocqued by the 2125 // NIC_RX thread when the TX command is fully completed. 2316 2317 /////////////////////////////////////////////////////////////////////////////////////////// 2318 // This static function implement the TCP protocol as specified by the RFC. 2319 // It is called by the dev_nic_tx_server() function to handle one TX command, 2320 // or one R2T request, for the socket identified by the <socket_xp> argument. 2321 // It builds an ETH/IP/UDP packet or ETH/IP/TCP segment, in the 2 Kbytes kernel buffer, 2322 // defined by the <k_buf> argument from informations found in socket descriptor. 2323 // It returns a command status code (defined in the ksocket.h file), and returns in the 2324 // <total_length> argument the actual packet length. 2325 // It updates the "socket.state", "socket.tx_nxt", "socket.r2tq", "socket.crqq", 2326 // "socket.todo" fields as required by the command type, but it does NOT reset 2327 // the "socket.tx_valid" field and does NOT unblock the client thread. 2328 // It does NOt take the socket lock, that is taken by the dev_nic_server(). 2126 2329 /////////////////////////////////////////////////////////////////////////////////////////// 2127 2330 // To build a packet, it makes the following actions: 2128 // 1) it takes the lock protecting the socket state. 2129 // 2) it get the command arguments from socket descriptor. 2130 // 3) it build an UDP packet or a TCP segment, depending on command type and socket state. 2131 // 4) it updates the socket state. 2132 // 5) it releases the lock protecting the socket. 2133 // 6) it build the IP header. 2134 // 7) it build the ETH header. 2135 // 8) it copies the packet in the NIC_TX queue. 2136 /////////////////////////////////////////////////////////////////////////////////////////// 2137 // @ cmd_state : [in] TX command valid in socket descriptor. 2138 // @ r2t_valid : [in] R2T request valid in command descriptor. 2139 // @ socket_xp : [in] extended pointer on client socket. 2140 // @ k_buf : [in] local pointer on kernel buffer (2 Kbytes). 2141 // @ chdev : [in] local pointer on NIC_RX chdev. 2142 /////////////////////////////////////////////////////////////////////////////////////////// 2143 static void dev_nic_tx_build_packet( bool_t cmd_valid, 2144 bool_t r2t_valid, 2145 xptr_t socket_xp, 2146 uint8_t * k_buf, 2147 chdev_t * chdev ) 2331 // 1) it get the command arguments from socket descriptor. 2332 // 2) it build an UDP packet or a TCP segment, and update socket state. 2333 // 3) it build the IP header. 2334 // 4) it build the ETH header. 2335 /////////////////////////////////////////////////////////////////////////////////////////// 2336 // @ socket_xp : [in] extended pointer on client socket. 2337 // @ k_buf : [in] local pointer on kernel buffer (2 Kbytes). 2338 // @ total_length : [out] total number of bytes written in k_buf. 2339 // @ return command status. 2340 /////////////////////////////////////////////////////////////////////////////////////////// 2341 static socket_cmd_sts_t dev_nic_tx_build_packet( xptr_t socket_xp, 2342 uint8_t * k_buf, 2343 uint32_t * total_length ) 2148 2344 { 2149 2345 socket_t * socket_ptr; 2150 2346 cxy_t socket_cxy; 2151 2347 xptr_t client_xp; // extended pointer on client thread 2348 bool_t cmd_valid; // valid user command 2349 bool_t r2t_valid; // valid R2T queue request 2152 2350 uint32_t cmd_type; // NIC command type 2153 uint8_t * tx_buf; // local pointer on kernelbuffer for payload2351 uint8_t * tx_buf; // local pointer on socket buffer for payload 2154 2352 uint32_t len; // tx_buf length (bytes) 2155 2353 uint32_t todo; // number of bytes not yet sent 2156 2354 uint32_t socket_type; // socket type (UDP/TCP) 2157 2355 uint32_t socket_state; // socket state 2158 xptr_t socket_lock_xp; // extended pointer on socket lock2159 2356 xptr_t socket_r2tq_xp; // extended pointer on R2T queue 2160 2357 uint32_t src_ip_addr; // source IP address … … 2166 2363 uint8_t trsp_protocol; // transport protocol type (UDP/TCP) 2167 2364 uint8_t r2t_flags; // flags defined by one R2T queue request 2168 bool_t do_send; // build & send a packet when true 2169 2365 2170 2366 // get socket cluster and local pointer 2171 2367 socket_cxy = GET_CXY( socket_xp ); 2172 2368 socket_ptr = GET_PTR( socket_xp ); 2173 2369 2370 #if DEBUG_DEV_NIC_TX || DEBUG_DEV_NIC_ERROR 2371 uint32_t cycle = (uint32_t)hal_get_cycles(); 2372 uint32_t socket_fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid )); 2373 uint32_t socket_pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid )); 2374 #endif 2375 2376 // build extended pointer on socket r2t queue 2377 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 2378 2379 // get cmd_valid & t2t_valid from socket descriptor 2380 cmd_valid = (bool_t)hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid )); 2381 r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq )); 2382 2174 2383 #if DEBUG_DEV_NIC_TX 2175 thread_t * this = CURRENT_THREAD;;2176 uint32_t cycle = (uint32_t)hal_get_cycles();2177 uint32_t fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid ));2178 uint32_t pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid ));2179 2384 if( cycle > DEBUG_DEV_NIC_TX ) 2180 printk("\n[%s] thread[%x,%x] enter for socket[%x,%d] : cmd_valid %d / r2t_valid %d / cycle %d\n", 2181 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle ); 2182 #endif 2183 2184 // build extended pointers on socket lock and r2t queue 2185 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 2186 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 2187 2188 // 1. take lock protecting this socket 2189 remote_queuelock_acquire( socket_lock_xp ); 2190 2191 // get relevant socket infos 2385 printk("\n[%s] enter for socket[%x,%d] : cmd_val %d / r2t_val %d / cycle %d\n", 2386 __FUNCTION__, socket_pid, socket_fdid, cmd_valid, r2t_valid, cycle ); 2387 #endif 2388 2389 // 1. get relevant socket infos 2192 2390 socket_type = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->type )); 2193 2391 socket_state = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->state )); … … 2195 2393 dst_ip_addr = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->remote_addr )); 2196 2394 2197 // compute UDP/TCP packet base in kernel buffer2395 // compute UDP/TCP packet base in local kernel buffer 2198 2396 k_trsp_base = k_buf + ETH_HEAD_LEN + IP_HEAD_LEN; 2199 2397 2200 // set default values 2201 do_send = false; 2398 // default value 2202 2399 trsp_length = 0; 2203 nbytes = 0; 2204 2205 if( cmd_valid ) // handle TX command 2206 { 2207 // 2. get command arguments from socket 2400 2401 if( cmd_valid ) // handle TX command depending on type 2402 { 2403 // get command arguments from socket 2208 2404 cmd_type = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_cmd )); 2209 2405 tx_buf = hal_remote_lpt( XPTR( socket_cxy , &socket_ptr->tx_buf )); … … 2213 2409 2214 2410 #if DEBUG_DEV_NIC_TX 2215 cycle = (uint32_t)hal_get_cycles();2216 2411 if( cycle > DEBUG_DEV_NIC_TX ) 2217 printk("\n[%s] thread[%x,%x] cmd_valid for socket[%x,%d] : %s / %s / cycle %d\n",2218 __FUNCTION__, this->process->pid, this->trdid, pid, fdid,2219 socket_cmd_type_str(cmd_type), socket_state_str(socket_state), cycle);2412 printk("\n[%s] socket[%x,%d] / %s / command %s \n", 2413 __FUNCTION__, socket_pid, socket_fdid, 2414 socket_cmd_type_str(cmd_type),socket_state_str(socket_state) ); 2220 2415 #endif 2221 2416 2222 2417 ////////////////////////////////////////////////////////// 2223 // 3. UDP : build UDP packet and update UDP socket state2418 // 2. UDP : build UDP packet and update UDP socket state 2224 2419 if( socket_type == SOCK_DGRAM ) 2225 2420 { … … 2228 2423 if( socket_state != UDP_STATE_ESTAB ) 2229 2424 { 2230 // reset tx_valid 2231 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false ); 2232 2233 // unblock client thread / report error 2234 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2425 return CMD_STS_BADSTATE; 2235 2426 } 2236 else 2427 else if( cmd_type == CMD_TX_SEND ) 2237 2428 { 2238 if( cmd_type == CMD_TX_SEND ) 2239 { 2240 // compute payload length 2241 nbytes = ( PAYLOAD_MAX_LEN < todo ) ? PAYLOAD_MAX_LEN : todo; 2242 2243 // move payload from tx_buf to 2 Kbytes kernel buffer 2244 memcpy( k_trsp_base + UDP_HEAD_LEN, 2245 tx_buf + (len - todo), 2246 nbytes ); 2247 2248 // build UDP header 2249 dev_nic_tx_build_udp_header( k_trsp_base, 2250 socket_xp, 2251 nbytes ); 2252 2253 // update "tx_todo" in socket descriptor 2254 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_todo), todo - nbytes ); 2255 2256 // send UDP packet 2257 trsp_length = UDP_HEAD_LEN + nbytes; 2258 do_send = true; 2259 2260 #if( DEBUG_DEV_NIC_TX & 1) 2261 cycle = (uint32_t)hal_get_cycles(); 2429 // compute payload length 2430 nbytes = ( CONFIG_SOCK_PAYLOAD_MAX < todo ) ? CONFIG_SOCK_PAYLOAD_MAX : todo; 2431 2432 // move payload from remote socket tx_buf to local kernel buffer 2433 hal_remote_memcpy( XPTR( local_cxy , k_trsp_base + UDP_HEAD_LEN ), 2434 XPTR( socket_cxy , tx_buf + (len - todo) ), 2435 nbytes ); 2436 2437 // build UDP header 2438 dev_nic_tx_build_udp_header( k_trsp_base, 2439 socket_xp, 2440 nbytes ); 2441 2442 // update "tx_todo" in socket descriptor 2443 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_todo), todo - nbytes ); 2444 2445 // set UDP packet length 2446 trsp_length = UDP_HEAD_LEN + nbytes; 2447 2448 #if DEBUG_DEV_NIC_TX 2262 2449 if( cycle > DEBUG_DEV_NIC_TX ) 2263 printk("\n[%s] thread[%x,%x] socket[%x,%d] UDP packet build / length %d / cycle %d\n", 2264 __FUNCTION__, this->process->pid, this->trdid, trsp_length , cycle ); 2265 #endif 2266 if( nbytes == todo ) // last byte sent 2267 { 2268 // reset tx_valid 2269 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false ); 2270 2271 // report success to TX client 2272 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 2273 } 2274 } 2275 else // CONNECT, ACCEPT, or CLOSE commands are illegal for UDP 2276 { 2277 // reset tx_valid 2278 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false ); 2279 2280 // report error 2281 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADCMD ); 2282 } 2450 printk("\n[%s] socket[%x,%d] UDP packet build / %d bytes\n", 2451 __FUNCTION__, socket_pid, socket_fdid, nbytes ); 2452 #endif 2453 } 2454 else // CONNECT, ACCEPT, or CLOSE commands are illegal for UDP 2455 { 2456 2457 #if DEBUG_DEV_NIC_ERROR 2458 printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n", 2459 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2460 #endif 2461 return CMD_STS_BADCMD; 2283 2462 } 2284 2463 } // end UDP 2285 2464 2286 2465 /////////////////////////////////////////////////////////// 2287 // 3. TCP : build TCP segment and update TCP socket state2466 // 2. TCP : build TCP segment and update TCP socket state 2288 2467 else if( socket_type == SOCK_STREAM ) 2289 2468 { … … 2296 2475 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 2297 2476 2298 // get one request from R2T queue 2299 remote_buf_get_to_kernel( socket_r2tq_xp , &r2t_flags , 1);2477 // get one request from R2T queue, and update R2T queue 2478 socket_get_r2t_request( socket_r2tq_xp , &r2t_flags ); 2300 2479 } 2301 2480 else … … 2311 2490 { 2312 2491 // initialises socket tx_nxt, and rx_wnd 2313 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_CLIENT ); 2314 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), TCP_MAX_WINDOW ); 2492 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), 2493 CONFIG_SOCK_ISS_CLIENT ); 2494 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), 2495 CONFIG_SOCK_MAX_WINDOW ); 2315 2496 2316 2497 // build TCP SYN segment … … 2319 2500 0, // length 2320 2501 TCP_FLAG_SYN ); 2321 // se nd segment2502 // set TCP packet length 2322 2503 trsp_length = TCP_HEAD_LEN; 2323 do_send = true; 2324 2325 #if DEBUG_DEV_NIC_TX 2326 cycle = (uint32_t)hal_get_cycles(); 2327 if( cycle > DEBUG_DEV_NIC_TX ) 2328 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CONNECT / " 2329 "TCP SYN build / cycle %d\n", 2330 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2331 socket_state_str( socket_state ), cycle ); 2332 #endif 2504 2333 2505 // update socket.state 2334 2506 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), … … 2337 2509 // update socket.tx_nxt 2338 2510 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 2339 TCP_ISS_CLIENT + 1 ); 2340 2341 // reset tx_valid but do not unblock client thread 2342 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2511 CONFIG_SOCK_ISS_CLIENT + 1 ); 2512 #if DEBUG_DEV_NIC_TX 2513 if( cycle > DEBUG_DEV_NIC_TX ) 2514 printk("\n[%s] socket[%x,%d] %s / CONNECT / TCP SYN build\n", 2515 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) ); 2516 #endif 2343 2517 } 2344 2518 else // report error for all other socket states 2345 2519 { 2346 // reset tx_valid 2347 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ) , false ); 2348 2349 // report error 2350 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2520 2521 #if DEBUG_DEV_NIC_ERROR 2522 printk("\n[ERROR] in %s : bad state %s socket[%x,%x] / cycle %d\n", 2523 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2524 #endif 2525 return CMD_STS_BADSTATE; 2351 2526 } 2352 2527 } … … 2358 2533 { 2359 2534 // initialize socket tx_nxt, and rx_wnd 2360 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), TCP_ISS_SERVER ); 2361 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), CONFIG_SOCK_RX_BUF_SIZE); 2535 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->tx_nxt), 2536 CONFIG_SOCK_ISS_SERVER ); 2537 hal_remote_s32(XPTR(socket_cxy , &socket_ptr->rx_wnd), 2538 (1 << CONFIG_SOCK_RX_BUF_ORDER) ); 2362 2539 2363 2540 // build TCP ACK-SYN segment … … 2366 2543 0, // length 2367 2544 TCP_FLAG_SYN | TCP_FLAG_ACK ); 2368 // se nd segment2545 // set TCP packet length 2369 2546 trsp_length = TCP_HEAD_LEN; 2370 do_send = true; 2371 2372 #if DEBUG_DEV_NIC_TX 2373 cycle = (uint32_t)hal_get_cycles(); 2374 if( cycle > DEBUG_DEV_NIC_TX ) 2375 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / ACCEPT / send SYN-ACK / cycle %d\n", 2376 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2377 socket_state_str( socket_state ), cycle ); 2378 #endif 2547 2379 2548 // update socket.state 2380 2549 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), … … 2383 2552 // update socket.tx_nxt 2384 2553 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 2385 TCP_ISS_SERVER + 1 ); 2386 2387 // reset tx_valid but do not unblock client thread 2388 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2554 CONFIG_SOCK_ISS_SERVER + 1 ); 2555 #if DEBUG_DEV_NIC_TX 2556 if( cycle > DEBUG_DEV_NIC_TX ) 2557 printk("\n[%s] socket[%x,%d] %s / ACCEPT / SYN-ACK build\n", 2558 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) ); 2559 #endif 2389 2560 } 2390 2561 else // report error in all other socket states 2391 2562 { 2392 // reset tx_valid 2393 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2394 2395 // report error to TX client thread 2396 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2563 2564 #if DEBUG_DEV_NIC_ERROR 2565 printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n", 2566 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2567 #endif 2568 return CMD_STS_BADSTATE; 2397 2569 } 2398 2570 } … … 2423 2595 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), tx_nxt + 1 ); 2424 2596 2425 // se nd segment2597 // set TCP packet length 2426 2598 trsp_length = TCP_HEAD_LEN; 2427 do_send = true;2428 2599 2429 2600 #if DEBUG_DEV_NIC_TX 2430 cycle = (uint32_t)hal_get_cycles();2431 2601 if( cycle > DEBUG_DEV_NIC_TX ) 2432 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CLOSE / send FIN-ACK / cycle %d\n", 2433 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2434 socket_state_str( socket_state ), cycle ); 2435 #endif 2436 // reset tx_valid but do not unblock client thread 2437 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2602 printk("\n[%s] socket[%x,%d] %s / CLOSE / FIN-ACK build\n", 2603 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state) ); 2604 #endif 2438 2605 } 2439 2606 else // all other states => signal error 2440 2607 { 2441 2608 2442 #if DEBUG_DEV_NIC_TX 2443 cycle = (uint32_t)hal_get_cycles(); 2444 if( cycle > DEBUG_DEV_NIC_TX ) 2445 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / CLOSE / error BADSTATE / cycle %d\n", 2446 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2447 socket_state_str( socket_state ), cycle ); 2448 #endif 2449 // reset tx_valid 2450 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2451 2452 // report error 2453 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2609 #if DEBUG_DEV_NIC_ERROR 2610 printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n", 2611 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2612 #endif 2613 return CMD_STS_BADSTATE; 2454 2614 } 2455 2615 } 2456 ////////////////////////////////// ///2616 ////////////////////////////////// 2457 2617 else if( cmd_type == CMD_TX_SEND ) 2458 2618 { … … 2464 2624 2465 2625 // compute actual payload length 2466 nbytes = ( PAYLOAD_MAX_LEN < todo ) ? PAYLOAD_MAX_LEN : todo; 2467 2468 // compute TCP segment base in kernel buffer 2469 k_trsp_base = k_buf + ETH_HEAD_LEN + IP_HEAD_LEN; 2470 2471 // move payload to k_buf 2472 memcpy( k_trsp_base + TCP_HEAD_LEN, 2473 tx_buf + (len - todo), 2474 nbytes ); 2626 nbytes = ( CONFIG_SOCK_PAYLOAD_MAX < todo ) ? 2627 CONFIG_SOCK_PAYLOAD_MAX : todo; 2628 2629 // move payload from remote tx_buf to local kernel buffer 2630 hal_remote_memcpy( XPTR( local_cxy , k_trsp_base + TCP_HEAD_LEN ), 2631 XPTR( socket_cxy , tx_buf + (len - todo) ), 2632 nbytes ); 2475 2633 2476 2634 // build TCP header … … 2486 2644 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_nxt), tx_nxt + nbytes ); 2487 2645 2488 // se nd TCP segment2646 // set TCP packet length 2489 2647 trsp_length = TCP_HEAD_LEN + nbytes; 2490 do_send = true;2491 2492 if( todo == nbytes ) // last byte sent2493 {2494 // reset tx_valid when last byte has been sent2495 hal_remote_s32( XPTR(socket_cxy , &socket_ptr->tx_valid), false );2496 }2497 2648 2498 2649 #if DEBUG_DEV_NIC_TX 2499 cycle = (uint32_t)hal_get_cycles();2500 2650 if( cycle > DEBUG_DEV_NIC_TX ) 2501 printk("\n[%s] thread[%x,%x] socket[%x,%d] %s / SEND / " 2502 "TCP DATA build / payload %d / cycle %d\n", 2503 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, 2504 socket_state_str( socket_state ), nbytes, cycle ); 2651 printk("\n[%s] socket[%x,%d] %s / SEND / %d bytes\n", 2652 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), nbytes ); 2505 2653 #endif 2506 2654 } 2507 2655 else // all other socket states 2508 2656 { 2509 // reset tx_valid 2510 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2511 2512 // report error to TX client thread 2513 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADSTATE ); 2657 2658 #if DEBUG_DEV_NIC_ERROR 2659 printk("\n[ERROR] in %s : bad state %s for socket[%x,%x] / cycle %d\n", 2660 __FUNCTION__, socket_state_str(socket_state), socket_pid, socket_fdid, cycle ); 2661 #endif 2662 return CMD_STS_BADSTATE; 2514 2663 } 2515 2664 } … … 2517 2666 else // undefined TX command type 2518 2667 { 2519 // reset tx_valid 2520 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid ), false ); 2521 2522 // report error to TX client thread 2523 dev_nic_unblock_tx_client( socket_xp , CMD_STS_BADCMD ); 2668 2669 #if DEBUG_DEV_NIC_ERROR 2670 printk("\n[ERROR] in %s : undefined command type for socket[%x,%x] %s / cycle %d\n", 2671 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), cycle ); 2672 #endif 2673 return CMD_STS_BADCMD; 2524 2674 } 2525 2675 } // end TCP … … 2527 2677 else // no valid TX command => handle R2T request only 2528 2678 { 2679 2680 assert( __FUNCTION__ , (socket_type == SOCK_STREAM) , "don't use R2T queue for UDP" ); 2681 2529 2682 // get one request from R2T queue 2530 remote_buf_get_to_kernel( socket_r2tq_xp , &r2t_flags , 1);2683 socket_get_r2t_request( socket_r2tq_xp , &r2t_flags ); 2531 2684 2532 2685 #if DEBUG_DEV_NIC_TX 2533 2686 cycle = (uint32_t)hal_get_cycles(); 2534 2687 if( cycle > DEBUG_DEV_NIC_TX ) 2535 printk("\n[%s] thread[%x,%x] only r2t_valid for socket[%x,%d] / flags %x / cycle %d\n", 2536 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, r2t_flags, cycle ); 2537 #endif 2538 2688 printk("\n[%s] socket[%x,%d] %s / send only flags %x / no data\n", 2689 __FUNCTION__, socket_pid, socket_fdid, socket_state_str(socket_state), r2t_flags ); 2690 #endif 2539 2691 // build TCP header 2540 2692 dev_nic_tx_build_tcp_header( k_trsp_base, 2541 2693 socket_xp, 2542 0, // payload length2694 0, // no payload 2543 2695 r2t_flags ); // flags 2544 // se nd TCP segment2696 // set protocol 2545 2697 trsp_protocol = PROTOCOL_TCP; 2698 2699 // set TCP packet length 2546 2700 trsp_length = TCP_HEAD_LEN; 2547 do_send = true;2548 2701 } 2549 2702 2550 // 4. release the lock protecting the socket 2551 remote_queuelock_release( socket_lock_xp ); 2552 2553 // return if no packet to send 2554 if( do_send == false ) return; 2555 2556 // 5. build IP header 2703 // 3. build IP header 2557 2704 dev_nic_tx_build_ip_header( k_buf + ETH_HEAD_LEN, 2558 2705 src_ip_addr, … … 2561 2708 trsp_length ); 2562 2709 2563 #if( DEBUG_DEV_NIC_TX & 1) 2564 cycle = (uint32_t)hal_get_cycles(); 2565 if( cycle > DEBUG_DEV_NIC_TX ) 2566 printk("\n[%s] thread[%x,%x] IP header build / length %d / cycle %d\n", 2567 __FUNCTION__, this->process->pid, this->trdid, IP_HEAD_LEN + trsp_length , cycle ); 2568 #endif 2569 2570 // 6. build ETH header 2710 // 4. build ETH header 2571 2711 dev_nic_tx_build_eth_header( k_buf, 2572 2712 (uint8_t)DST_MAC_5, … … 2584 2724 IP_HEAD_LEN + trsp_length ); 2585 2725 2586 #if( DEBUG_DEV_NIC_TX & 1)2587 cycle = (uint32_t)hal_get_cycles();2588 if( cycle > DEBUG_DEV_NIC_TX )2589 printk("\n[%s] thread[%x,%x] ETH header build / cycle %d\n",2590 __FUNCTION__, this->process->pid, this->trdid, cycle );2591 #endif2592 2593 // 7. move packet to NIC_TX queue (blocking function)2594 dev_nic_tx_move_packet( chdev,2595 k_buf,2596 ETH_HEAD_LEN + IP_HEAD_LEN + trsp_length );2597 2598 2726 #if DEBUG_DEV_NIC_TX 2599 2727 cycle = (uint32_t)hal_get_cycles(); 2600 2728 if( cycle > DEBUG_DEV_NIC_TX ) 2601 printk("\n[%s] thread[%x,%x] for socket[%x,%d] moved packet to NIC_TX / cycle %d\n", 2602 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cycle ); 2603 #endif 2729 printk("\n[%s] exit for socket[%x,%d] / packet build / cycle %d\n", 2730 __FUNCTION__, socket_pid, socket_fdid, cycle ); 2731 #endif 2732 2733 // return success and total packet length 2734 *total_length = ETH_HEAD_LEN + IP_HEAD_LEN + trsp_length; 2735 return CMD_STS_SUCCESS; 2604 2736 2605 2737 } // end dev_nic_tx_build_packet() 2606 2607 2738 2608 2739 ///////////////////////////////////////// 2609 2740 void dev_nic_tx_server( chdev_t * chdev ) 2610 2741 { 2611 uint8_t k_buf[CONFIG_SOCK_PKT_BUF_SIZE]; // buffer for one packet 2612 2613 xptr_t queue_root_xp; // extended pointer on sockets list root 2614 xptr_t queue_lock_xp; // extended pointer on lock protecting this list 2615 xptr_t socket_xp; // extended pointer on on registered socket 2616 socket_t * socket_ptr; 2617 cxy_t socket_cxy; 2618 xptr_t iter_xp; // iterator for loop on registered sockets 2619 xlist_entry_t temp_root; // root of temporary list of sockets 2620 xptr_t temp_root_xp; // extended pointer on temporary list of sockets 2621 uint32_t temp_nr; // number of active registered sockets 2622 bool_t cmd_valid; // TX command valid in socket descriptor 2623 bool_t r2t_valid; // valid R2T request in socket descriptor 2624 2742 uint8_t k_buf[CONFIG_SOCK_PKT_BUF_SIZE]; // buffer for one packet 2743 2744 xptr_t queue_lock_xp; // extended pointer on lock for sockets list 2745 xptr_t root_xp; // extended pointer on sockets list root 2746 xptr_t iter_xp; // iterator for loop on sockets list 2747 xptr_t list_xp; // extended pointer on socket tx_list field 2748 xptr_t socket_xp; // extended pointer on found socket 2749 socket_t * socket_ptr; // local pointer on found socket 2750 cxy_t socket_cxy; // found socket cluster identifier 2751 xptr_t socket_lock_xp; // extented pointer on found socket lock 2752 bool_t cmd_valid; // TX command valid in socket descriptor 2753 bool_t r2t_valid; // valid R2T request in socket descriptor 2754 uint32_t sock_type; // socket type 2755 socket_cmd_sts_t cmd_sts; // value returned by dev_nic_tx_build_packet() 2756 socket_cmd_type_t tx_cmd; // socket TX command type 2757 uint32_t tx_todo; // socket number of bytes not sent yet 2758 uint32_t total_length; // length of the ETH/IP/TCP packet (bytes) 2759 bool_t found; // one active socket found 2760 2625 2761 thread_t * this = CURRENT_THREAD; 2626 2762 … … 2638 2774 "illegal chdev type or direction" ); 2639 2775 2640 // check thread can yield 2641 thread_assert_can_yield( this , __FUNCTION__ ); 2642 2643 // build extended pointer on temporary list 2644 temp_root_xp = XPTR( local_cxy , &temp_root ); 2645 2646 // build extended pointer on client sockets queue (lock & root) 2776 // build extended pointers on client sockets queue lock 2647 2777 queue_lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 2648 queue_root_xp = XPTR( local_cxy , &chdev->wait_root ); 2778 2779 // build extended pointers on client sockets queue root and first item 2780 root_xp = XPTR( local_cxy , &chdev->wait_root ); 2649 2781 2650 2782 while( 1 ) // TX server infinite loop 2651 2783 { 2652 // initialize temporary list of registered sockets as empty2653 xlist_root_init( temp_root_xp );2654 temp_nr = 0;2655 2656 2784 // take the lock protecting the client sockets queue 2657 2785 remote_busylock_acquire( queue_lock_xp ); 2658 2786 2659 // build temporary list of all registered sockets 2660 if( xlist_is_empty( queue_root_xp ) == false ) 2787 found = false; 2788 2789 // scan registered sockets to find one active socket 2790 // with a round robin priority between the registered sockets 2791 if( xlist_is_empty( root_xp ) == false ) 2661 2792 { 2662 XLIST_FOREACH( queue_root_xp , iter_xp )2793 XLIST_FOREACH( root_xp , iter_xp ) 2663 2794 { 2664 // get client socket cluster and local pointer2795 // get client socket cluster and pointers 2665 2796 socket_xp = XLIST_ELEMENT( iter_xp , socket_t , tx_list ); 2666 2797 socket_ptr = GET_PTR( socket_xp ); 2667 2798 socket_cxy = GET_CXY( socket_xp ); 2668 2799 2669 // register socket in temporary list 2670 xlist_add_last( temp_root_xp , XPTR( socket_cxy , &socket_ptr->tx_temp )); 2671 temp_nr++; 2672 } 2800 // build extended pointer on socket tx_list field 2801 list_xp = XPTR( socket_cxy , &socket_ptr->tx_list ); 2802 2803 // get cmd_valid & r2t_valid from socket descriptor 2804 cmd_valid = (bool_t)hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid )); 2805 2806 // get r2t_valid from socket descriptor 2807 r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq )); 2808 2809 if( cmd_valid || r2t_valid ) // active => move socket, and exit loop 2810 { 2811 // move selected socket to last position for round-robin 2812 xlist_unlink( list_xp ); 2813 xlist_add_last( root_xp , list_xp ); 2814 2815 // exit loop 2816 found = true; 2817 break; 2818 } 2819 } // end loop on sockets 2673 2820 } 2674 2821 2675 2822 // release the lock protecting the client sockets queue 2676 2823 remote_busylock_release( queue_lock_xp ); 2677 2824 2678 if( temp_nr > 0 ) 2679 { 2680 // loop on temporary list 2681 XLIST_FOREACH( temp_root_xp , iter_xp ) 2682 { 2683 // get client socket cluster and local pointer 2684 socket_xp = XLIST_ELEMENT( iter_xp , socket_t , tx_temp ); 2685 socket_ptr = GET_PTR( socket_xp ); 2686 socket_cxy = GET_CXY( socket_xp ); 2687 2688 // get cmd_valid & t2t_valid from socket descriptor 2689 cmd_valid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_valid )); 2690 2691 // get r2t_valid from socket descriptor 2692 r2t_valid = (bool_t)remote_buf_status( XPTR( socket_cxy , &socket_ptr->r2tq )); 2693 2694 // test if socket is active 2695 if( cmd_valid || r2t_valid ) // active socket 2696 { 2697 2698 #if DEBUG_DEV_NIC_TX 2699 cycle = (uint32_t)hal_get_cycles(); 2700 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid )); 2701 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid )); 2702 if( cycle > DEBUG_DEV_NIC_TX ) 2703 printk("\n[%s] thread[%x,%x] found socket[%x,%d] / cmd_valid %d / r2t_valid %d / cycle %d\n", 2704 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle ); 2705 #endif 2706 // build and send one packet/segment for this socket 2707 dev_nic_tx_build_packet( cmd_valid, 2708 r2t_valid, 2709 socket_xp, 2710 k_buf, 2711 chdev ); 2712 #if DEBUG_DEV_NIC_TX 2713 cycle = (uint32_t)hal_get_cycles(); 2714 if( cycle > DEBUG_DEV_NIC_TX ) 2715 dev_nic_packet_display( true, // is_tx 2716 this->process->pid, 2717 this->trdid, 2718 cycle, 2719 k_buf ); 2720 #endif 2721 } 2722 else // inactive socket 2723 { 2724 temp_nr--; 2725 } 2726 } // end loop on temporary list 2727 } 2728 2729 // block & deschedule if no active socket found in current iteration 2730 if( temp_nr == 0 ) 2825 if( found == false ) // block & deschedule if no active socket 2731 2826 { 2732 2827 … … 2737 2832 __FUNCTION__, this->process->pid, this->trdid, cycle ); 2738 2833 #endif 2739 2740 2834 // block and deschedule 2741 2835 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_CLIENT ); … … 2749 2843 #endif 2750 2844 } 2845 else // handle active socket request 2846 { 2847 // avoid warning 2848 total_length = 0; 2849 2850 // build extended pointer on socket lock 2851 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 2852 2853 // take socket lock 2854 remote_queuelock_acquire( socket_lock_xp ); 2855 2856 #if DEBUG_DEV_NIC_TX 2857 cycle = (uint32_t)hal_get_cycles(); 2858 pid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->pid )); 2859 fdid = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->fdid )); 2860 #endif 2861 2862 #if DEBUG_DEV_NIC_TX 2863 if( cycle > DEBUG_DEV_NIC_TX ) 2864 printk("\n[%s] thread[%x,%x] select socket[%x,%d] / cmd_val %d / r2t_val %d / cycle %d\n", 2865 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, cmd_valid, r2t_valid, cycle ); 2866 #endif 2867 // build one UDP packet / TCP segment 2868 cmd_sts = dev_nic_tx_build_packet( socket_xp, 2869 k_buf, 2870 &total_length ); 2871 #if DEBUG_DEV_NIC_TX 2872 cycle = (uint32_t)hal_get_cycles(); 2873 if( cycle > DEBUG_DEV_NIC_TX ) 2874 printk("\n[%s] thread[%x,%x] for socket[%x,%x] build packet / %d bytes / sts %d / cycle %d\n", 2875 __FUNCTION__, this->process->pid, this->trdid, pid, fdid, total_length, cmd_sts, cycle ); 2876 #endif 2877 // release socket lock 2878 remote_queuelock_release( socket_lock_xp ); 2879 2880 if( cmd_sts == CMD_STS_SUCCESS ) // move packet to TX queue 2881 { 2882 // move packet to NIC_TX queue 2883 dev_nic_tx_move_packet( chdev, 2884 k_buf, 2885 total_length ); 2886 #if DEBUG_DEV_NIC_TX 2887 cycle = (uint32_t)hal_get_cycles(); 2888 if( cycle > DEBUG_DEV_NIC_TX ) 2889 dev_nic_packet_display( pid, fdid, cycle, k_buf ); 2890 #endif 2891 // get socket.type, socket.tx_cmd and socket.tx_todo values 2892 tx_cmd = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_cmd )); 2893 tx_todo = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->tx_todo )); 2894 sock_type = hal_remote_l32( XPTR( socket_cxy , &socket_ptr->type )); 2895 2896 // client signaling depends on command type and socket type 2897 if( (tx_cmd == CMD_TX_SEND) && (tx_todo == 0) ) 2898 { 2899 // reset tx_valid for both UDP and TCP 2900 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false ); 2901 2902 // unblock client thread for UDP only 2903 if(sock_type == SOCK_DGRAM) 2904 dev_nic_unblock_tx_client( socket_xp , CMD_STS_SUCCESS ); 2905 } 2906 else // type is CONNECT / ACCEPT / CLOSE 2907 { 2908 // reset tx_valid 2909 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false ); 2910 } 2911 } 2912 else // signal error to client thread 2913 { 2914 // reset tx_valid 2915 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_valid), false ); 2916 2917 // unblock tx_client thread 2918 dev_nic_unblock_tx_client( socket_xp , cmd_sts ); 2919 } 2920 } // end active socket handling 2751 2921 } // end infinite while loop 2752 2922 } // end dev_nic_tx_server() 2753 2923 2754 2924 2755 ///////////////////////////////////////////// 2756 void dev_nic_packet_display( bool_t is_tx, 2757 pid_t thread_pid, 2758 trdid_t thread_trdid, 2925 2926 2927 2928 ////////////////////////////////////////////////// 2929 void dev_nic_packet_display( pid_t socket_pid, 2930 uint32_t socket_fdid, 2759 2931 uint32_t cycle, 2760 2932 uint8_t * buf ) … … 2815 2987 remote_busylock_acquire( lock_xp ); 2816 2988 2817 if( is_tx ) 2818 { 2819 nolock_printk("\n*** NIC_TX server thread[%x,%x] send packet / cycle %d\n", 2820 thread_pid, thread_trdid, cycle ); 2821 } 2822 else 2823 { 2824 nolock_printk("\n*** NIC_RX server thread[%x,%x] get packet / cycle %d\n", 2825 thread_pid, thread_trdid, cycle ); 2826 } 2827 2828 nolock_printk("\n***** ETH header *****\n"); 2989 nolock_printk("\n***** packet sent by NIC_TX server for socket[%x,%d] / cycle %d\n", 2990 socket_pid, socket_fdid, cycle ); 2991 2992 nolock_printk(" ETH header\n"); 2829 2993 nolock_printk(" - dst_mac [6] = %l\n" , eth_dst_mac ); 2830 2994 nolock_printk(" - src_mac [6] = %l\n" , eth_src_mac ); 2831 2995 nolock_printk(" - length [2] = %d\n" , (uint32_t)eth_length ); 2832 nolock_printk(" ***** IP header *****\n");2996 nolock_printk(" IP header\n"); 2833 2997 nolock_printk(" - version [1] = %x\n" , (uint32_t)ip_version ); 2834 2998 nolock_printk(" - tos [1] = %x\n" , (uint32_t)ip_tos ); … … 2850 3014 ((uint16_t)buf[37] ) ; 2851 3015 2852 nolock_printk(" ***** UDP header *****\n");3016 nolock_printk(" UDP header\n"); 2853 3017 nolock_printk(" - src_port [2] = %d\n" , (uint32_t)udp_src_port ); 2854 3018 nolock_printk(" - dst_port [2] = %d\n" , (uint32_t)udp_dst_port ); … … 2881 3045 ((uint16_t)buf[53] ) ; 2882 3046 2883 nolock_printk(" ***** TCP header *****\n");3047 nolock_printk(" TCP header\n"); 2884 3048 nolock_printk(" - src_port [2] = %x\n" , (uint32_t)tcp_src_port ); 2885 3049 nolock_printk(" - dst_port [2] = %x\n" , (uint32_t)tcp_dst_port ); -
trunk/kernel/devices/dev_nic.h
r674 r683 99 99 * 100 100 * - GET_KEY : get channel index from remote IP address and port 101 * - SET_RUN : activate/desactivate one channel 101 * - SET_RUN : activate/desactivate one channel (both directions) 102 102 * - GET_INSTRU : get one instrumentation counter value 103 103 * - CLEAR_INSTRU : reset all instrumentation counters … … 140 140 #define PROTOCOL_TCP 0x06 141 141 142 #define TCP_ISS_CLIENT 0x10000 // initial sequence number for TCP client143 #define TCP_ISS_SERVER 0x20000 // initial sequence number for TCP server144 #define TCP_MAX_WINDOW 0xFFFFF // initial TCP send window145 146 #define PAYLOAD_MAX_LEN 1500 // max length for an UDP packet / TCP segment147 148 142 #define TCP_FLAG_FIN 0x01 149 143 #define TCP_FLAG_SYN 0x02 … … 152 146 #define TCP_FLAG_ACK 0x10 153 147 #define TCP_FLAG_URG 0x20 154 155 #define TCP_RETRANSMISSION_TIMEOUT 10000000156 148 157 149 /***************************************************************************************** … … 192 184 * in the server thread descriptor, to access the NIC_RX & NIC_TX packet queues. 193 185 * The buffer is always a 2K bytes kernel buffer, containing an Ethernet packet. 194 * - The next 4 synchronous commands are used by the client th , and stored in the186 * - The next 4 synchronous commands are used by the client thread, and stored in the 195 187 * client thread descriptor, to directly access the NIC registers. 196 188 ****************************************************************************************/ … … 212 204 xptr_t dev_xp; /*! extended pointer on NIC chdev descriptor */ 213 205 nic_cmd_t type; /*! command type */ 214 uint8_t * buffer; /*! local pointer on kernel buffer 215 uint32_t length; /*! number of bytes in buffer 206 uint8_t * buffer; /*! local pointer on kernel buffer (when READ / WRITE) */ 207 uint32_t length; /*! number of bytes in buffer (when READ / WRITE ) */ 216 208 uint32_t status; /*! return value (depends on command type) */ 217 209 uint32_t error; /*! return an error from the hardware (0 if no error) */ … … 282 274 * This TX server thread is created by the dev_nic_init() function. 283 275 * It build and send UDP packets or TCP segments for all clients threads registered in 284 * the NIC_TX[channel] chdev. The command types are (CONNECT / SEND / CLOSE), and the 285 * priority between clients is round-robin. It takes into account the request registered 286 * by the RX server thread in the R2T queue associated to the involved socket. 287 * When a command is completed, it unblocks the client thread. For a SEND command, the 288 * last byte must have been sent for an UDP socket, and it must have been acknowledged 289 * for a TCP socket. 290 * When the TX client threads queue is empty, it blocks on THREAD_BLOCKED_CLIENT 291 * condition and deschedules. It is re-activated by a client thread registering a command. 276 * the NIC_TX[channel] chdev. The command types are (CONNECT / ACCEPT / CLOSE / SEND). 277 * It takes into account the request registered by the RX server thread in the R2T queues. 278 * The loop on registered sockets implements a round-robin priority between sockets. 279 * When no registered socket is active, it blocks on the THREAD_BLOCKED_CLIENT condition 280 * and deschedules. It is re-activated by a client thread registering a command. 292 281 * When the NIC_TX packet queue is full, it blocks on the THREAD_BLOCKED_ISR condition 293 282 * and deschedules. It is reactivated by the NIC_TX DMA engine. 294 283 ****************************************************************************************** 295 284 * Implementation note: 296 * It execute an infinite loop in which it takes the lock protecting the clients list 297 * to build a "kleenex" list of currently registered clients. 298 * For each client registered in this "kleenex" list, it takes the lock protecting the 299 * socket state, build one packet/segment in a local 2K bytes kernel buffer, calls the 300 * transport layer to add the UDP/TCP header, calls the IP layer to add the IP header, 285 * At each iteration in the infinite loop, it takes the lock protecting the registered 286 * client sockets queue to find one active socket (tx_valid or r2t_valid flags set). 287 * For each registered socket, it takes the lock protecting the socket state, and 288 * exit the scan when an active socket has been found, without releasing the socket state. 289 * When the scan is completed, it release the lock protecting the queue, before handling 290 * the found active socket. The socket lock is released only when the requested packet 291 * has been build, and the active socket state has been updated. 292 * To handle a socket request, it calls the transport layer to build the UDP packet or 293 * TCP segment in a local 2K bytes kernel buffer, calls the IP layer to add the IP header, 301 294 * calls the ETH layer to add the ETH header, and moves the packet to the NIC_TX_QUEUE. 302 * Finally, it updates the socket state, and release the socket lock.303 295 ****************************************************************************************** 304 296 * @ chdev : [in] local pointer on one local NIC_TX[channel] chdev descriptor. … … 331 323 332 324 /****************************************************************************************** 333 * This function displays all the fields of an ETH/IP/TCP segment or ETH/IP/UDP packet.334 * *****************************************************************************************335 * @ is_tx : [in] sent packet if true / received packet if false.325 * This debug function can be called by the dev_nic_tx_server() function to display 326 * on TXT0 the header of a TX [ETH/IP/TCP] segment or [ETH/IP/UDP] packet. 327 ****************************************************************************************** 336 328 * @ pid : [in] process identifier. 337 * @ trdid : [in] threadidentifier.329 * @ fdid : [in] socket identifier. 338 330 * @ cycle : [in] date (number of cycles). 339 331 * @ buf : [in] local pointer on kernel buffer containing the packet. 340 332 *****************************************************************************************/ 341 void dev_nic_packet_display( bool_t is_tx, 342 pid_t pid, 343 trdid_t trdid, 333 void dev_nic_packet_display( pid_t pid, 334 uint32_t fdid, 344 335 uint32_t cycle, 345 336 uint8_t * buf ); -
trunk/kernel/fs/devfs.c
r673 r683 56 56 xptr_t devfs_ctx_alloc( cxy_t cxy ) 57 57 { 58 kmem_req_t req;59 60 req.type = KMEM_KCM;61 req.order = bits_log2( sizeof(devfs_ctx_t) );62 req.flags = AF_KERNEL | AF_ZERO;63 64 58 // allocates devfs context from target cluster 65 return XPTR( cxy , kmem_remote_alloc( cxy , &req ) ); 59 void * ptr = kmem_remote_alloc( cxy, 60 bits_log2(sizeof(devfs_ctx_t)), 61 AF_ZERO ); 62 63 if( ptr == NULL ) return XPTR_NULL; 64 else return XPTR( cxy , ptr ); 66 65 } 67 66 … … 90 89 void devfs_ctx_destroy( xptr_t devfs_ctx_xp ) 91 90 { 92 kmem_req_t req;93 94 91 // get cluster and local pointer on devfs context 95 92 devfs_ctx_t * devfs_ctx_ptr = GET_PTR( devfs_ctx_xp ); 96 93 cxy_t devfs_ctx_cxy = GET_CXY( devfs_ctx_xp ); 97 94 98 req.type = KMEM_KCM;99 req.ptr = devfs_ctx_ptr;100 101 95 // release devfs context descriptor to remote cluster 102 kmem_remote_free( devfs_ctx_cxy , &req ); 96 kmem_remote_free( devfs_ctx_cxy, 97 devfs_ctx_ptr, 98 bits_log2(sizeof(devfs_ctx_t)) ); 103 99 } 104 100 -
trunk/kernel/fs/fatfs.c
r673 r683 1630 1630 xptr_t fatfs_ctx_alloc( cxy_t cxy ) 1631 1631 { 1632 kmem_req_t req;1633 1634 1632 // allocate memory from remote cluster 1635 req.type = KMEM_KCM; 1636 req.order = bits_log2( sizeof(fatfs_ctx_t) ); 1637 req.flags = AF_KERNEL | AF_ZERO; 1638 1639 return XPTR( cxy , kmem_remote_alloc( cxy , &req ) ); 1633 void * ptr = kmem_remote_alloc( cxy, 1634 bits_log2(sizeof(fatfs_ctx_t)), 1635 AF_ZERO ); 1636 1637 if( ptr == NULL ) return XPTR_NULL; 1638 else return XPTR( cxy , ptr ); 1640 1639 1641 1640 } //end faffs_ctx_alloc() … … 1645 1644 { 1646 1645 error_t error; 1647 kmem_req_t req;1648 1646 cxy_t cxy; // FATFS context cluster identifier 1649 1647 fatfs_ctx_t * fatfs_ctx_ptr; // local pointer on FATFS context … … 1667 1665 // allocate a 512 bytes buffer in remote cluster, used to store 1668 1666 // temporarily the BOOT sector, and permanently the FS_INFO sector 1669 req.type = KMEM_KCM; 1670 req.order = 9; // 512 bytes 1671 req.flags = AF_KERNEL | AF_ZERO; 1672 buffer = kmem_remote_alloc( cxy , &req ); 1673 1667 buffer = kmem_remote_alloc( cxy, 1668 9, 1669 AF_ZERO ); 1674 1670 if( buffer == NULL ) 1675 1671 { … … 1827 1823 void fatfs_ctx_destroy( xptr_t fatfs_ctx_xp ) 1828 1824 { 1829 kmem_req_t req;1830 1825 mapper_t * fat_mapper; 1831 1826 uint8_t * fs_info_buffer; … … 1844 1839 fs_info_buffer = hal_remote_lpt( XPTR( fatfs_ctx_cxy , &fatfs_ctx_ptr->fs_info_buffer ) ); 1845 1840 1846 // release FS_INFO buffer 1847 req.type = KMEM_KCM;1848 req.ptr = fs_info_buffer;1849 kmem_remote_free( fatfs_ctx_cxy , &req );1841 // release FS_INFO buffer (512 bytes) 1842 kmem_remote_free( fatfs_ctx_cxy, 1843 fs_info_buffer, 1844 9 ); 1850 1845 1851 1846 // release FATFS context descriptor 1852 req.type = KMEM_KCM;1853 req.ptr = fatfs_ctx_ptr;1854 kmem_remote_free( fatfs_ctx_cxy , &req);1847 kmem_remote_free( fatfs_ctx_cxy, 1848 fatfs_ctx_ptr, 1849 bits_log2(sizeof(fatfs_ctx_t)) ); 1855 1850 1856 1851 } // end fatfs_ctx_destroy() … … 2857 2852 2858 2853 // compute number of pages 2859 npages = size >> CONFIG_PPM_PAGE_ SHIFT;2854 npages = size >> CONFIG_PPM_PAGE_ORDER; 2860 2855 if( size & CONFIG_PPM_PAGE_MASK ) npages++; 2861 2856 -
trunk/kernel/fs/vfs.c
r673 r683 48 48 49 49 ////////////////////////////////////////////////////////////////////////////////////////// 50 // Extern variables50 // Extern global variables 51 51 ////////////////////////////////////////////////////////////////////////////////////////// 52 52 … … 54 54 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 55 55 extern char * lock_type_str[]; // allocated in kernel_init.c 56 extern process_t process_zero; // allocated in kernel_init.c 56 57 57 58 /////////////////////////////////////////////////////////////////////////////////////////// … … 186 187 uint32_t inum; // inode identifier (to be allocated) 187 188 vfs_ctx_t * ctx; // file system context 188 kmem_req_t req; // request to kernel memory allocator189 189 error_t error; 190 190 … … 192 192 uint32_t cycle = (uint32_t)hal_get_cycles(); 193 193 thread_t * this = CURRENT_THREAD; 194 pid_t pid = this->process->pid; 195 trdid_t trdid = this->trdid; 194 196 #endif 195 197 … … 202 204 203 205 #if DEBUG_VFS_ERROR 204 if( DEBUG_VFS_ERROR < cycle ) 205 printk("\n[ERROR] in %s : thread[%x,%x] / illegal FS type\n", 206 __FUNCTION__ , this->process->pid , this->trdid ); 206 printk("\n[ERROR] in %s : thread[%x,%x] / illegal FS type / cycle %d\n", 207 __FUNCTION__ , pid , trdid, cycle ); 207 208 #endif 208 209 return -1; … … 220 221 221 222 #if DEBUG_VFS_ERROR 222 if( DEBUG_VFS_ERROR < cycle ) 223 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inum\n", 224 __FUNCTION__ , this->process->pid , this->trdid ); 223 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inum / cycle %d\n", 224 __FUNCTION__ , pid , trdid, cycle ); 225 225 #endif 226 226 return -1; … … 234 234 235 235 #if DEBUG_VFS_ERROR 236 if( DEBUG_VFS_ERROR < cycle ) 237 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate mapper\n", 238 __FUNCTION__ , this->process->pid , this->trdid ); 236 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate mapper / cycle %d\n", 237 __FUNCTION__ , pid , trdid, cycle ); 239 238 #endif 240 239 vfs_ctx_inum_release( XPTR( cxy , ctx ) , inum ); … … 244 243 mapper_ptr = GET_PTR( mapper_xp ); 245 244 246 // allocate one page for VFS inode descriptor 247 // because the embedded "children" xhtab footprint 248 req.type = KMEM_PPM; 249 req.order = 0; 250 req.flags = AF_KERNEL | AF_ZERO; 251 inode_ptr = kmem_remote_alloc( cxy , &req ); 252 245 // allocate memory for inode descriptor 246 inode_ptr = kmem_remote_alloc( cxy, 247 bits_log2(sizeof(vfs_inode_t)), 248 AF_ZERO ); 253 249 if( inode_ptr == NULL ) 254 250 { 255 251 256 252 #if DEBUG_VFS_ERROR 257 if( DEBUG_VFS_ERROR < cycle ) 258 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inode\n", 259 __FUNCTION__ , this->process->pid , this->trdidi ); 253 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate inode / cycle %d\n", 254 __FUNCTION__ , pid , trdid, cycle ); 260 255 #endif 261 256 vfs_ctx_inum_release( XPTR( cxy , ctx ) , inum ); … … 297 292 if( DEBUG_VFS_INODE_CREATE < cycle ) 298 293 printk("\n[%s] thread[%x,%x] created inode (%x,%x) / ctx %x / fs_type %d / cycle %d\n", 299 __FUNCTION__, this->process->pid, this->trdid, cxy, inode_ptr, ctx, ctx->type, cycle );294 __FUNCTION__, pid, trdid, cxy, inode_ptr, ctx, ctx->type, cycle ); 300 295 #endif 301 296 … … 318 313 319 314 // release memory allocated for inode descriptor 320 kmem_req_t req; 321 req.type = KMEM_PPM; 322 req.ptr = inode_ptr; 323 kmem_remote_free( inode_cxy , &req ); 315 kmem_remote_free( inode_cxy, 316 inode_ptr, 317 bits_log2(sizeof(vfs_inode_t)) ); 324 318 325 319 } // end vfs_inode_destroy() … … 447 441 uint32_t size = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->size ) ); 448 442 449 #if DEBUG_VFS_INODE_LOAD_ALL 450 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 443 #if DEBUG_VFS_INODE_LOAD_ALL || DEBUG_VFS_ERROR 451 444 uint32_t cycle = (uint32_t)hal_get_cycles(); 452 445 thread_t * this = CURRENT_THREAD; 446 #endif 447 448 #if DEBUG_VFS_INODE_LOAD_ALL 449 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 453 450 vfs_inode_get_name( inode_xp , name ); 454 451 if( DEBUG_VFS_INODE_LOAD_ALL < cycle ) … … 458 455 459 456 // compute number of pages 460 uint32_t npages = size >> CONFIG_PPM_PAGE_ SHIFT;457 uint32_t npages = size >> CONFIG_PPM_PAGE_ORDER; 461 458 if( (size & CONFIG_PPM_PAGE_MASK) || (size == 0) ) npages++; 462 459 … … 468 465 page_xp = mapper_get_page( XPTR( inode_cxy , mapper ), page_id ); 469 466 470 if( page_xp == XPTR_NULL ) return -1; 467 if( page_xp == XPTR_NULL ) 468 { 469 470 #if DEBUG_VFS_ERROR 471 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate memory for mapper / cycle %d\n", 472 __FUNCTION__, this->process->pid, this->trdid, cycle ); 473 #endif 474 return -1; 475 } 471 476 } 472 477 … … 534 539 xptr_t * dentry_xp ) 535 540 { 536 kmem_req_t req; // request to kernel memory allocator537 541 vfs_ctx_t * ctx = NULL; // context descriptor 538 542 vfs_dentry_t * dentry_ptr; // dentry descriptor (to be allocated) … … 557 561 558 562 #if DEBUG_VFS_ERROR 559 if( DEBUG_VFS_ERROR < cycle ) 560 printk("\n[ERROR] in %s : thread[%x,%x] / undefined fs_type %d\n", 561 __FUNCTION__ , this->process->pid, this->trdid, fs_type ); 563 printk("\n[ERROR] in %s : thread[%x,%x] / undefined fs_type %d / cycle %d\n", 564 __FUNCTION__ , this->process->pid, this->trdid, fs_type, cycle ); 562 565 #endif 563 566 return -1; … … 570 573 571 574 // allocate memory for dentry descriptor 572 req.type = KMEM_KCM; 573 req.order = bits_log2( sizeof(vfs_dentry_t) ); 574 req.flags = AF_KERNEL | AF_ZERO; 575 dentry_ptr = kmem_remote_alloc( cxy , &req ); 576 575 dentry_ptr = kmem_remote_alloc( cxy, 576 bits_log2(sizeof(vfs_dentry_t)), 577 AF_ZERO ); 577 578 if( dentry_ptr == NULL ) 578 579 { 579 580 580 581 #if DEBUG_VFS_ERROR 581 if( DEBUG_VFS_ERROR < cycle ) 582 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate dentry descriptor\n", 583 __FUNCTION__ , this->process->pid, this->trdid ); 582 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate dentry descriptor / cycle %d\n", 583 __FUNCTION__ , this->process->pid, this->trdid, cycle ); 584 584 #endif 585 585 return -1; … … 616 616 617 617 // release memory allocated to dentry 618 kmem_req_t req; 619 req.type = KMEM_KCM; 620 req.ptr = dentry_ptr; 621 kmem_remote_free( dentry_cxy , &req ); 618 kmem_remote_free( dentry_cxy, 619 dentry_ptr, 620 bits_log2(sizeof(vfs_dentry_t)) ); 622 621 623 622 } // end vfs_dentry_destroy() … … 634 633 { 635 634 vfs_file_t * file_ptr; 636 kmem_req_t req;637 635 uint32_t type; 638 636 mapper_t * mapper; … … 644 642 cxy_t inode_cxy = GET_CXY( inode_xp ); 645 643 644 #if DEBUG_VFS_FILE_CREATE || DEBUG_VFS_ERROR 645 thread_t * this = CURRENT_THREAD; 646 uint32_t cycle = (uint32_t)hal_get_cycles(); 647 #endif 648 646 649 #if DEBUG_VFS_FILE_CREATE 647 thread_t * this = CURRENT_THREAD;648 uint32_t cycle = (uint32_t)hal_get_cycles();649 650 if( DEBUG_VFS_FILE_CREATE < cycle ) 650 651 printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) / cycle %d\n", … … 653 654 654 655 // allocate memory for new file descriptor 655 req.type = KMEM_KCM; 656 req.order = bits_log2( sizeof(vfs_file_t) ); 657 req.flags = AF_KERNEL | AF_ZERO; 658 file_ptr = kmem_remote_alloc( inode_cxy , &req ); 659 660 if( file_ptr == NULL ) return -1; 656 file_ptr = kmem_remote_alloc( inode_cxy, 657 bits_log2(sizeof(vfs_file_t)), 658 AF_ZERO ); 659 660 if( file_ptr == NULL ) 661 { 662 663 #if DEBUG_VFS_ERROR 664 printk("\n[ERROR] in %s : thread[%x,%x] / cannot allocate memory / cycle %d\n", 665 __FUNCTION__ , this->process->pid, this->trdid, cycle ); 666 #endif 667 return -1; 668 } 661 669 662 670 // get type, ctx, mapper, and buffer from inode descriptor … … 718 726 719 727 // release file descriptor 720 kmem_req_t req; 721 req.type = KMEM_KCM; 722 req.ptr = file_ptr; 723 kmem_remote_free( file_cxy , &req ); 728 kmem_remote_free( file_cxy, 729 file_ptr, 730 bits_log2(sizeof(vfs_file_t)) ); 724 731 725 732 #if DEBUG_VFS_FILE_DESTROY … … 775 782 xptr_t lock_xp; // extended pointer on Inode Tree lock 776 783 784 #if DEBUG_VFS_OPEN || DEBUG_VFS_ERROR 785 uint32_t cycle = (uint32_t)hal_get_cycles(); 786 thread_t * this = CURRENT_THREAD; 787 pid_t pid = this->process->pid; 788 trdid_t trdid = this->trdid; 789 #endif 790 777 791 if( mode != 0 ) 778 792 { 779 printk("\n[ERROR] in %s : the mode parameter is not supported yet\n" ); 793 794 #if DEBUG_VFS_ERROR 795 printk("\n[ERROR] in %s : the mode parameter is not supported yet\n" ); 796 #endif 780 797 return -1; 781 798 } 782 783 thread_t * this = CURRENT_THREAD;784 process_t * process = this->process;785 799 786 800 // compute lookup working mode … … 790 804 if( (flags & O_EXCL ) ) lookup_mode |= VFS_LOOKUP_EXCL; 791 805 792 #if DEBUG_VFS_OPEN || DEBUG_VFS_ERROR793 uint32_t cycle = (uint32_t)hal_get_cycles();794 #endif795 796 806 #if DEBUG_VFS_OPEN 797 807 if( DEBUG_VFS_OPEN < cycle ) 798 808 printk("\n[%s] thread[%x,%x] enter for <%s> / root_inode (%x,%x) / cycle %d\n", 799 __FUNCTION__, p rocess->pid, this->trdid, path, GET_CXY(root_xp), GET_PTR(root_xp), cycle );809 __FUNCTION__, pid, trdid, path, GET_CXY(root_xp), GET_PTR(root_xp), cycle ); 800 810 #endif 801 811 … … 809 819 810 820 // build extended pointer on lock protecting Inode Tree 811 vfs_root_xp = process ->vfs_root_xp;821 vfs_root_xp = process_zero.vfs_root_xp; 812 822 vfs_root_ptr = GET_PTR( vfs_root_xp ); 813 823 vfs_root_cxy = GET_CXY( vfs_root_xp ); … … 831 841 832 842 #if DEBUG_VFS_ERROR 833 if( DEBUG_VFS_ERROR < cycle ) 834 printk("\n[ERROR] in %s : thread[%x,%x] cannot get inode <%s>\n", 835 __FUNCTION__ , process->pid, this->trdid , path ); 843 printk("\n[ERROR] in %s : thread[%x,%x] cannot get inode <%s> / cycle %d\n", 844 __FUNCTION__ , pid, trdid , path , cycle ); 836 845 #endif 837 846 return -1; … … 843 852 844 853 #if (DEBUG_VFS_OPEN & 1) 845 cycle = (uint32_t)hal_get_cycles();846 854 if( DEBUG_VFS_OPEN < cycle ) 847 855 printk("\n[%s] thread[%x,%x] found inode(%x,%x) for <%s>\n", 848 __FUNCTION__, p rocess->pid, this->trdid, inode_cxy, inode_ptr, path );856 __FUNCTION__, pid, trdid, inode_cxy, inode_ptr, path ); 849 857 #endif 850 858 … … 852 860 error = vfs_file_create( inode_xp , file_attr , &file_xp ); 853 861 854 if( error ) return error; 862 if( error ) 863 { 864 865 #if DEBUG_VFS_ERROR 866 printk("\n[ERROR] in %s : thread[%x,%x] cannot create file descriptor for <%s> / cycle %d\n", 867 __FUNCTION__ , pid, trdid , path , cycle ); 868 #endif 869 return error; 870 } 855 871 856 872 #if (DEBUG_VFS_OPEN & 1) 857 cycle = (uint32_t)hal_get_cycles();858 873 if( DEBUG_VFS_OPEN < cycle ) 859 874 printk("\n[%s] thread[%x,%x] created file descriptor (%x,%x) for <%s>\n", 860 __FUNCTION__, p rocess->pid, this->trdid, GET_CXY(file_xp), GET_PTR(file_xp), path );875 __FUNCTION__, pid, trdid, GET_CXY(file_xp), GET_PTR(file_xp), path ); 861 876 #endif 862 877 … … 864 879 error = process_fd_register( process_xp , file_xp , &file_id ); 865 880 866 if( error ) return error; 881 if( error ) 882 { 883 884 #if DEBUG_VFS_ERROR 885 printk("\n[ERROR] in %s : thread[%x,%x] cannot register file descriptor for <%s> / cycle %d\n", 886 __FUNCTION__ , pid, trdid , path , cycle ); 887 #endif 888 return error; 889 } 867 890 868 891 // get new file descriptor cluster and local pointer … … 891 914 if( DEBUG_VFS_OPEN < cycle ) 892 915 printk("\n[%s] thread[%x,%x] exit for <%s> / fdid %d / file(%x,%x) / cycle %d\n", 893 __FUNCTION__, p rocess->pid, this->trdid, path, file_id,916 __FUNCTION__, pid, trdid, path, file_id, 894 917 GET_CXY( file_xp ), GET_PTR( file_xp ), cycle ); 895 918 #endif … … 997 1020 998 1021 #if DEBUG_VFS_ERROR 999 if( DEBUG_VFS_ERROR < cycle ) 1000 printk("\n[ERROR] in %s thread[%x,%x] cannot move data", 1001 __FUNCTION__, this->process->pid, this->trdid ); 1022 printk("\n[ERROR] in %s thread[%x,%x] cannot move data / cycle %d", 1023 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1002 1024 #endif 1003 1025 return -1; … … 1008 1030 1009 1031 #if DEBUG_VFS_USER_MOVE 1010 cycle = (uint32_t)hal_get_cycles();1011 1032 if( cycle > DEBUG_VFS_USER_MOVE ) 1012 1033 { … … 1032 1053 cxy_t file_cxy; // remote file descriptor cluster 1033 1054 vfs_file_t * file_ptr; // remote file descriptor local pointer 1034 vfs_file_type_t inode_type; // remote file type1035 1055 uint32_t file_offset; // current offset in file 1036 1056 mapper_t * mapper_ptr; // remote mapper local pointer … … 1041 1061 assert( __FUNCTION__, (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL" ); 1042 1062 1063 #if DEBUG_VFS_KERNEL_MOVE || DEBUG_VFS_ERROR 1064 uint32_t cycle = (uint32_t)hal_get_cycles(); 1065 thread_t * this = CURRENT_THREAD; 1066 #endif 1067 1043 1068 // get cluster and local pointer on remote file descriptor 1044 1069 file_cxy = GET_CXY( file_xp ); 1045 1070 file_ptr = GET_PTR( file_xp ); 1046 1047 // get inode type from remote file descriptor1048 inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) );1049 1050 // check inode type1051 assert( __FUNCTION__, (inode_type == FILE_TYPE_REG), "bad file type" );1052 1071 1053 1072 // get mapper pointers and file offset from file descriptor … … 1064 1083 if( error ) 1065 1084 { 1066 printk("\n[ERROR] in %s : cannot move data", __FUNCTION__ ); 1085 1086 #if DEBUG_VFS_ERROR 1087 printk("\n[ERROR] in %s : thread[%x,%x] / cannot move data / cycle %d\n", 1088 __FUNCTION__ , this->process->pid , this->trdid , cycle ); 1089 #endif 1067 1090 return -1; 1091 1068 1092 } 1069 1093 1070 1094 #if DEBUG_VFS_KERNEL_MOVE 1071 1095 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 1072 uint32_t cycle = (uint32_t)hal_get_cycles();1073 thread_t * this = CURRENT_THREAD;1074 1096 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 1075 1097 void * buffer_ptr = GET_PTR( buffer_xp ); … … 1109 1131 assert( __FUNCTION__, (new_offset != NULL ) , "new_offset == NULL" ); 1110 1132 1133 #if DEBUG_VFS_LSEEK || DEBUG_VFS_ERROR 1134 uint32_t cycle = (uint32_t)hal_get_cycles(); 1135 thread_t * this = CURRENT_THREAD; 1136 #endif 1137 1111 1138 // get cluster and local pointer on remote file descriptor 1112 1139 file_cxy = GET_CXY( file_xp ); … … 1138 1165 else 1139 1166 { 1140 printk("\n[ERROR] in %s : illegal whence value\n", __FUNCTION__ ); 1167 1168 #if DEBUG_VFS_ERROR 1169 printk("\n[ERROR] in %s : thread[%x,%x] / undefined whence value / cycle %d", 1170 __FUNCTION__ , this->process->pid , this->trdid , cycle ); 1171 #endif 1141 1172 remote_rwlock_wr_release( lock_xp ); 1142 1173 return -1; … … 1191 1222 cluster_t * cluster = LOCAL_CLUSTER; 1192 1223 1224 #if DEBUG_VFS_CLOSE || DEBUG_VFS_ERROR 1225 uint32_t cycle = (uint32_t)hal_get_cycles(); 1226 #endif 1227 1193 1228 // get file name 1194 1229 vfs_file_get_name( file_xp , name ); 1195 1230 1196 1231 #if DEBUG_VFS_CLOSE 1197 uint32_t cycle = (uint32_t)hal_get_cycles();1198 1232 if( DEBUG_VFS_CLOSE < cycle ) 1199 1233 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", … … 1215 1249 if( error ) 1216 1250 { 1217 printk("\n[ERROR] in %s : cannot synchronise dirty pages for <%s>\n", 1218 __FUNCTION__, name ); 1251 1252 #if DEBUG_VFS_ERROR 1253 printk("\n[ERROR] in %s : thread[%x,%x] / cannot synchronise dirty pages for <%s> / cycle %d\n", 1254 __FUNCTION__ , this->process->pid , this->trdid , name , cycle ); 1255 #endif 1219 1256 return -1; 1220 1257 } … … 1222 1259 #if DEBUG_VFS_CLOSE 1223 1260 if( DEBUG_VFS_CLOSE < cycle ) 1224 printk("\n[%s] thread[%x,%x] synchronised mapper of <%s>to device\n",1261 printk("\n[%s] thread[%x,%x] synchronised <%s> mapper to device\n", 1225 1262 __FUNCTION__, process->pid, this->trdid, name ); 1226 1263 #endif … … 1259 1296 if( error ) 1260 1297 { 1261 printk("\n[ERROR] in %s : cannot update size in parent\n", 1262 __FUNCTION__ ); 1298 1299 #if DEBUG_VFS_ERROR 1300 printk("\n[ERROR] in %s : thread[%x,%x] / cannot update size in parent / cycle %d\n", 1301 __FUNCTION__ , this->process->pid , this->trdid , cycle ); 1302 #endif 1263 1303 return -1; 1264 1304 } … … 1277 1317 if( error ) 1278 1318 { 1279 printk("\n[ERROR] in %s : cannot synchronise parent mapper to device\n", 1280 __FUNCTION__ ); 1319 1320 #if DEBUG_VFS_ERROR 1321 printk("\n[ERROR] in %s : thread[%x,%x] / cannot synchronise mapper & device / cycle %d\n", 1322 __FUNCTION__ , this->process->pid , this->trdid , cycle ); 1323 #endif 1281 1324 return -1; 1282 1325 } … … 1367 1410 char last_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1368 1411 1412 #if DEBUG_VFS_MKDIR || DEBUG_VFS_ERROR 1413 uint32_t cycle = (uint32_t)hal_get_cycles(); 1414 #endif 1415 1369 1416 thread_t * this = CURRENT_THREAD; 1370 1417 process_t * process = this->process; … … 1373 1420 char root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1374 1421 vfs_inode_get_name( root_xp , root_name ); 1375 uint32_t cycle = (uint32_t)hal_get_cycles();1376 1422 if( DEBUG_VFS_MKDIR < cycle ) 1377 1423 printk("\n[%s] thread[%x,%x] enter / root <%s> / path <%s> / cycle %d\n", … … 1396 1442 if( error ) 1397 1443 { 1444 1445 #if DEBUG_VFS_ERROR 1446 printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n", 1447 __FUNCTION__, process->pid, this->trdid, path , cycle ); 1448 #endif 1398 1449 remote_rwlock_wr_release( lock_xp ); 1399 printk("\n[ERROR] in %s : cannot get parent inode for <%s>\n",1400 __FUNCTION__, path );1401 1450 return -1; 1402 1451 } … … 1423 1472 if( error ) 1424 1473 { 1474 1475 #if DEBUG_VFS_ERROR 1476 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry in cluster %x for <%s> / cycle %d\n", 1477 __FUNCTION__, process->pid, this->trdid, parent_cxy, path , cycle ); 1478 #endif 1425 1479 remote_rwlock_wr_release( lock_xp ); 1426 printk("\n[ERROR] in %s : cannot create new dentry in cluster %x for <%s>\n",1427 __FUNCTION__, parent_cxy, path );1428 1480 return -1; 1429 1481 } … … 1457 1509 if( error ) 1458 1510 { 1511 1512 #if DEBUG_VFS_ERROR 1513 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x for <%s> / cycle %d\n", 1514 __FUNCTION__, process->pid, this->trdid, parent_cxy, path , cycle ); 1515 #endif 1459 1516 remote_rwlock_wr_release( lock_xp ); 1460 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n",1461 __FUNCTION__ , inode_cxy , path );1462 1517 vfs_dentry_destroy( dentry_xp ); 1463 1518 return -1; … … 1504 1559 if( error ) 1505 1560 { 1561 1562 #if DEBUG_VFS_ERROR 1563 printk("\n[ERROR] in %s : thread[%x,%x] cannot create <.> & <..> dentries for <%s> / cycle %d\n", 1564 __FUNCTION__, process->pid, this->trdid, path , cycle ); 1565 #endif 1566 vfs_remove_child_from_parent( dentry_xp ); 1506 1567 remote_rwlock_wr_release( lock_xp ); 1507 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n",1508 __FUNCTION__ , inode_cxy , path );1509 vfs_dentry_destroy( dentry_xp );1510 1568 return -1; 1511 1569 } … … 1520 1578 if( error ) 1521 1579 { 1522 printk("\n[ERROR] in %s : cannot update parent directory for <%s>\n", 1523 __FUNCTION__, path ); 1580 1581 #if DEBUG_VFS_ERROR 1582 printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory for <%s> / cycle %d\n", 1583 __FUNCTION__, process->pid, this->trdid, path , cycle ); 1584 #endif 1585 vfs_remove_child_from_parent( dentry_xp ); 1524 1586 return -1; 1525 1587 } … … 1527 1589 #if(DEBUG_VFS_MKDIR & 1) 1528 1590 if( DEBUG_VFS_MKDIR < cycle ) 1529 printk("\n[%s] thread[%x,%x] updated parent dir (mapper and IOC) for <%s>\n",1591 printk("\n[%s] thread[%x,%x] created <%s> dir (Inode-Tree, Mapper and IOC)\n", 1530 1592 __FUNCTION__, process->pid, this->trdid, path ); 1531 1593 #endif … … 1565 1627 char new_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1566 1628 1629 #if DEBUG_VFS_LINK || DEBUG_VFS_ERROR 1630 uint32_t cycle = (uint32_t)hal_get_cycles(); 1631 #endif 1632 1567 1633 thread_t * this = CURRENT_THREAD; 1568 1634 process_t * process = this->process; … … 1573 1639 vfs_inode_get_name( old_root_xp , old_root_name ); 1574 1640 vfs_inode_get_name( new_root_xp , new_root_name ); 1575 uint32_t cycle = (uint32_t)hal_get_cycles();1576 1641 if( DEBUG_VFS_LINK < cycle ) 1577 1642 printk("\n[%s] thread[%x,%x] enter / old_root <%s> / old_path <%s> / " … … 1598 1663 if( error ) 1599 1664 { 1665 1666 #if DEBUG_VFS_ERROR 1667 printk("\n[ERROR] in %s : thread[%x,%x] cannot get target inode for <%s> / cycle %d\n", 1668 __FUNCTION__, process->pid, this->trdid, old_path , cycle ); 1669 #endif 1600 1670 remote_rwlock_wr_release( lock_xp ); 1601 printk("\n[ERROR] in %s : cannot get target inode for <%s>\n",1602 __FUNCTION__, old_path );1603 1671 return -1; 1604 1672 } … … 1619 1687 if( error ) 1620 1688 { 1689 1690 #if DEBUG_VFS_ERROR 1691 printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n", 1692 __FUNCTION__, process->pid, this->trdid, new_path , cycle ); 1693 #endif 1621 1694 remote_rwlock_wr_release( lock_xp ); 1622 printk("\n[ERROR] in %s : cannot get parent inode for <%s>\n",1623 __FUNCTION__, new_path );1624 1695 return -1; 1625 1696 } … … 1655 1726 if( error ) 1656 1727 { 1728 1729 #if DEBUG_VFS_ERROR 1730 printk("\n[ERROR] in %s : thread[%x,%x] cannot create new dentry for <%s> / cycle %d\n", 1731 __FUNCTION__, process->pid, this->trdid, new_path , cycle ); 1732 #endif 1657 1733 remote_rwlock_wr_release( lock_xp ); 1658 printk("\n[ERROR] in %s : cannot create new dentry for <%s>\n",1659 __FUNCTION__, new_path );1660 1734 return -1; 1661 1735 } … … 1696 1770 if( error ) 1697 1771 { 1698 printk("\n[ERROR] in %s : cannot update new parent directory for <%s>\n", 1699 __FUNCTION__, new_path ); 1772 1773 #if DEBUG_VFS_ERROR 1774 printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory for <%s> / cycle %d\n", 1775 __FUNCTION__, process->pid, this->trdid, new_path , cycle ); 1776 #endif 1700 1777 return -1; 1701 1778 } … … 1710 1787 else 1711 1788 { 1712 // release the lock protecting Inode Tree 1789 1790 #if DEBUG_VFS_ERROR 1791 printk("\n[ERROR] in %s : thread[%x,%x] / unsupported inode type %s / cycle %d\n", 1792 __FUNCTION__, process->pid, this->trdid, vfs_inode_type_str( inode_type ), cycle ); 1793 #endif 1713 1794 remote_rwlock_wr_release( lock_xp ); 1714 1715 printk("\n[ERROR] in %s : unsupported inode type %s\n",1716 __FUNCTION__ , vfs_inode_type_str( inode_type ) );1717 1795 return -1; 1718 1796 } … … 1746 1824 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; // name of parent directory 1747 1825 1826 #if DEBUG_VFS_UNLINK || DEBUG_VFS_ERROR 1827 uint32_t cycle = (uint32_t)hal_get_cycles(); 1828 #endif 1829 1748 1830 thread_t * this = CURRENT_THREAD; 1749 1831 process_t * process = this->process; 1750 1832 1751 1833 #if DEBUG_VFS_UNLINK 1752 uint32_t cycle = (uint32_t)hal_get_cycles();1753 1834 char root_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1754 1835 vfs_inode_get_name( root_xp , root_name ); … … 1775 1856 if( error ) 1776 1857 { 1858 1859 #if DEBUG_VFS_ERROR 1860 printk("\n[ERROR] in %s : thread[%x,%x] cannot get parent inode for <%s> / cycle %d\n", 1861 __FUNCTION__, process->pid, this->trdid, path , cycle ); 1862 #endif 1777 1863 remote_rwlock_wr_release( lock_xp ); 1778 printk("\n[ERROR] in %s : cannot get parent inode for <%s> in <%s>\n",1779 __FUNCTION__, child_name, path );1780 1864 return -1; 1781 1865 } … … 1824 1908 if( error ) 1825 1909 { 1826 printk("\n[ERROR] in %s : cannot create inode <%s> in Inode Tree\n", 1827 __FUNCTION__ , child_name ); 1910 1911 #if DEBUG_VFS_ERROR 1912 printk("\n[ERROR] in %s : thread[%x,%x] cannot create node <%s> in Inode_Tree / cycle %d\n", 1913 __FUNCTION__, process->pid, this->trdid, path, cycle ); 1914 #endif 1915 remote_rwlock_wr_release( lock_xp ); 1828 1916 return -1; 1829 1917 } … … 1839 1927 if ( error ) 1840 1928 { 1841 printk("\n[ERROR] in %s : cannot get entry <%s> in parent <%s> mapper\n", 1842 __FUNCTION__ , child_name, parent_name ); 1929 1930 #if DEBUG_VFS_ERROR 1931 printk("\n[ERROR] in %s : thread[%x,%x] cannot get dentry <%s> in parent <%s> mapper / cycle %d\n", 1932 __FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle ); 1933 #endif 1934 remote_rwlock_wr_release( lock_xp ); 1843 1935 return -1; 1844 1936 } … … 1861 1953 } 1862 1954 1863 // At this point the Inode Tree contains the target dentry and child inode1955 // At this point the Inode-Tree contains the parent dentry and child inode 1864 1956 // we can safely remove this dentry from both the parent mapper, and the Inode Tree. 1865 1957 … … 1897 1989 if( inode_children != 0 ) 1898 1990 { 1991 1992 #if DEBUG_VFS_ERROR 1993 printk("\n[ERROR] in %s : thread[%x,%x] cannot remove <%s> inode that has children / cycle %d\n", 1994 __FUNCTION__, process->pid, this->trdid, path, cycle ); 1995 #endif 1899 1996 remote_rwlock_wr_release( lock_xp ); 1900 printk("\n[ERROR] in %s : cannot remove <%s> inode that has children\n",1901 __FUNCTION__, path );1902 1997 return -1; 1903 1998 } … … 1908 2003 if( error ) 1909 2004 { 2005 2006 #if DEBUG_VFS_ERROR 2007 printk("\n[ERROR] in %s : thread[%x,%x] cannot update FAT mapper to remove <s> / cycle %d\n", 2008 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2009 #endif 1910 2010 remote_rwlock_wr_release( lock_xp ); 1911 printk("\n[ERROR] in %s : cannot update FAT mapper to remove <%s> inode\n",1912 __FUNCTION__ , path );1913 2011 return -1; 1914 2012 } … … 1927 2025 if( error ) 1928 2026 { 2027 2028 #if DEBUG_VFS_ERROR 2029 printk("\n[ERROR] in %s : thread[%x,%x] cannot update parent directory on IOC for <s> / cycle %d\n", 2030 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2031 #endif 1929 2032 remote_rwlock_wr_release( lock_xp ); 1930 printk("\n[ERROR] in %s : cannot update dentry on device for <%s>\n",1931 __FUNCTION__ , path );1932 2033 return -1; 1933 2034 } … … 1979 2080 else 1980 2081 { 2082 2083 #if DEBUG_VFS_ERROR 2084 printk("\n[ERROR] in %s : thread[%x,%x] unsupported inode type %d for <s> / cycle %d\n", 2085 __FUNCTION__, process->pid, this->trdid, vfs_inode_type_str( inode_type ), path, cycle ); 2086 #endif 1981 2087 remote_rwlock_wr_release( lock_xp ); 1982 printk("\n[ERROR] in %s : unsupported inode type %s\n",1983 __FUNCTION__ , vfs_inode_type_str( inode_type ) );1984 2088 return -1; 1985 2089 } … … 2004 2108 process_t * process = this->process; 2005 2109 2110 #if DEBUG_VFS_STAT || DEBUG_VFS_ERROR 2111 uint32_t cycle = (uint32_t)hal_get_cycles(); 2112 #endif 2113 2006 2114 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 2007 2115 vfs_root_xp = process->vfs_root_xp; … … 2025 2133 if( error ) 2026 2134 { 2027 printk("\n[ERROR] in %s : cannot found inode <%s>\n", 2028 __FUNCTION__ , path ); 2135 2136 #if DEBUG_VFS_ERROR 2137 printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n", 2138 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2139 #endif 2029 2140 return -1; 2030 2141 } … … 2050 2161 2051 2162 #if DEBUG_VFS_STAT 2052 uint32_t cycle = (uint32_t)hal_get_cycles();2053 2163 if( DEBUG_VFS_STAT < cycle ) 2054 printk("\n[%s] thread[%x,%x] set stat %x for inode %x in cluster %x / cycle %d\n" 2055 " %s / inum %d / size %d\n", 2056 __FUNCTION__, process->pid, this->trdid, st, inode_ptr, inode_cxy, cycle, 2057 vfs_inode_type_str( type ), inum, size ); 2164 printk("\n[%s] thread[%x,%x] set stat for <%s> / %s / inum %d / size %d / cycle %d\n", 2165 __FUNCTION__, process->pid, this->trdid, path, vfs_inode_type_str( type ), inum, size, cycle ); 2058 2166 #endif 2059 2167 … … 2084 2192 process_t * process = this->process; 2085 2193 2086 #if DEBUG_VFS_CHDIR 2087 uint32_t cycle = (uint32_t)hal_get_cycles(); 2088 if( DEBUG_VFS_CHDIR < cycle ) 2089 printk("\n[%s] thread[%x,%x] enter for path <%s> / cycle %d\n", 2090 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2194 #if DEBUG_VFS_CHDIR || DEBUG_VFS_ERROR 2195 uint32_t cycle = (uint32_t)hal_get_cycles(); 2091 2196 #endif 2092 2197 … … 2112 2217 if( error ) 2113 2218 { 2114 printk("\n[ERROR] in %s : <%s> not found\n", 2115 __FUNCTION__, path ); 2219 2220 #if DEBUG_VFS_ERROR 2221 printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n", 2222 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2223 #endif 2116 2224 return -1; 2117 2225 } … … 2124 2232 if( inode_type != FILE_TYPE_DIR ) 2125 2233 { 2126 printk("\n[ERROR] in %s : <%s> is not a directory\n", 2127 __FUNCTION__, path ); 2234 2235 #if DEBUG_VFS_ERROR 2236 printk("\n[ERROR] in %s : thread[%x,%x] / <%s> is not a directory / cycle %d\n", 2237 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2238 #endif 2128 2239 return -1; 2129 2240 } … … 2146 2257 2147 2258 #if DEBUG_VFS_CHDIR 2148 cycle = (uint32_t)hal_get_cycles();2149 2259 if( DEBUG_VFS_CHDIR < cycle ) 2150 printk("\n[%s] thread[%x,%x] exit : inode (%x,%x) / &cwd_xp (%x,%x) / cycle %d\n", 2151 __FUNCTION__, process->pid, this->trdid, inode_cxy, inode_ptr, 2152 GET_CXY(cwd_xp_xp), GET_PTR(cwd_xp_xp), cycle ); 2260 printk("\n[%s] thread[%x,%x] set new cwd <%s> / inode_xp (%x,%x) / cycle %d\n", 2261 __FUNCTION__, process->pid, this->trdid, path, inode_cxy, inode_ptr, cycle ); 2153 2262 #endif 2154 2263 … … 2163 2272 { 2164 2273 error_t error; 2165 xptr_t inode_xp; // extended pointer on target inode 2166 cxy_t inode_cxy; // inode cluster identifier 2167 vfs_inode_t * inode_ptr; // inode local pointer 2168 2169 // check lookup working mode 2170 assert( __FUNCTION__, (rights == 0), "access rights non implemented yet" ); 2171 2274 xptr_t vfs_root_xp; // extended pointer on VFS root inode 2275 vfs_inode_t * vfs_root_ptr; // local_pointer on VFS root inode 2276 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 2277 xptr_t main_lock_xp; // extended pointer on lock protecting Inode Tree 2278 xptr_t inode_xp; // extended pointer on target inode 2279 cxy_t inode_cxy; // inode cluster identifier 2280 vfs_inode_t * inode_ptr; // inode local pointer 2281 vfs_file_type_t inode_type; // inode type 2282 2283 thread_t * this = CURRENT_THREAD; 2284 process_t * process = this->process; 2285 2286 #if DEBUG_VFS_CHMOD || DEBUG_VFS_ERROR 2287 uint32_t cycle = (uint32_t)hal_get_cycles(); 2288 #endif 2289 2290 // build extended pointer on lock protecting Inode Tree (in VFS root inode) 2291 vfs_root_xp = process->vfs_root_xp; 2292 vfs_root_ptr = GET_PTR( vfs_root_xp ); 2293 vfs_root_cxy = GET_CXY( vfs_root_xp ); 2294 main_lock_xp = XPTR( vfs_root_cxy , &vfs_root_ptr->main_lock ); 2295 2296 // take lock protecting Inode Tree in read mode 2297 remote_rwlock_rd_acquire( main_lock_xp ); 2298 2172 2299 // get extended pointer on target inode 2173 2300 error = vfs_lookup( cwd_xp, … … 2177 2304 NULL ); 2178 2305 2179 if( error ) return error; 2306 // release lock protecting Inode Tree in read mode 2307 remote_rwlock_rd_release( main_lock_xp ); 2308 2309 if( error ) 2310 { 2311 2312 #if DEBUG_VFS_ERROR 2313 printk("\n[ERROR] in %s : thread[%x,%x] cannot found inode <%s> / cycle %d\n", 2314 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2315 #endif 2316 return -1; 2317 } 2180 2318 2181 2319 // get inode cluster and local pointer … … 2184 2322 2185 2323 // get inode type from remote inode 2186 //inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) );2324 inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 2187 2325 2188 2326 // TODO finalize implementation 2189 2327 2190 assert( __FUNCTION__, false , "not implemented" );2328 assert( __FUNCTION__, false , "not fully implemented" ); 2191 2329 2192 2330 // set inode rights in remote inode 2193 2331 hal_remote_s32( XPTR( inode_cxy , &inode_ptr->rights ) , rights ); 2332 2333 #if DEBUG_VFS_CHMOD 2334 if( DEBUG_VFS_CHMOD < cycle ) 2335 printk("\n[%s] thread[%x,%x] set access rights %x for <%s> / inode_xp (%x,%x) / cycle %d\n", 2336 __FUNCTION__, process->pid, this->trdid, rights, path, inode_cxy, inode_ptr, cycle ); 2337 #endif 2194 2338 2195 2339 return 0; … … 2212 2356 thread_t * this = CURRENT_THREAD; 2213 2357 process_t * process = this->process; 2358 2359 #if DEBUG_VFS_MKFIFO || DEBUG_VFS_ERROR 2360 uint32_t cycle = (uint32_t)hal_get_cycles(); 2361 #endif 2214 2362 2215 2363 // build extended pointer on lock protecting Inode Tree … … 2230 2378 if( error ) 2231 2379 { 2232 printk("\n[ERROR] in %s : cannot get parent inode for <%s> path\n", 2233 __FUNCTION__ , path ); 2380 2381 #if DEBUG_VFS_ERROR 2382 printk("\n[ERROR] in %s : thread[%x,%x] cannot found parent inode for <%s> / cycle %d\n", 2383 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2384 #endif 2385 remote_rwlock_wr_release( vfs_lock_xp ); 2234 2386 return -1; 2235 2387 } … … 2259 2411 if( error ) 2260 2412 { 2261 printk("\n[ERROR] in %s : cannot create fifo inode for <%s> path\n", 2262 __FUNCTION__ , path ); 2413 2414 #if DEBUG_VFS_ERROR 2415 printk("\n[ERROR] in %s : thread[%x,%x] cannot create fifo inode for <%s> / cycle %d\n", 2416 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2417 #endif 2418 remote_rwlock_wr_release( vfs_lock_xp ); 2263 2419 return -1; 2264 2420 } … … 2270 2426 if( pipe == NULL ) 2271 2427 { 2272 printk("\n[ERROR] in %s : cannot create pipe for <%s> path\n", 2273 __FUNCTION__ , path ); 2428 2429 #if DEBUG_VFS_ERROR 2430 printk("\n[ERROR] in %s : thread[%x,%x] cannot create pipe for <%s> / cycle %d\n", 2431 __FUNCTION__, process->pid, this->trdid, path, cycle ); 2432 #endif 2433 vfs_remove_child_from_parent( fifo_dentry_xp ); 2434 remote_rwlock_wr_release( vfs_lock_xp ); 2274 2435 return -1; 2275 2436 } … … 2282 2443 // release the lock protecting the Inode-Tree from write mode 2283 2444 remote_rwlock_wr_release( vfs_lock_xp ); 2445 2446 #if DEBUG_VFS_MKDIR 2447 if( DEBUG_VFS_MKDIR < cycle ) 2448 printk("\n[%s] thread[%x,%x] creared fifo <%s> / inode_xp [%x,%x] / cycle %d\n", 2449 __FUNCTION__, process->pid, this->trdid, path, fifo_cxy, fifo_inode_ptr, cycle ); 2450 #endif 2284 2451 2285 2452 return 0; … … 2746 2913 2747 2914 #if DEBUG_VFS_ERROR 2748 if( DEBUG_VFS_ERROR < cycle ) 2749 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode <%s> in path <%s>\n", 2750 __FUNCTION__ , process->pid, this->trdid, name, pathname ); 2915 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode <%s> in path <%s> / cycle %d\n", 2916 __FUNCTION__ , process->pid, this->trdid, name, pathname, cycle ); 2751 2917 #endif 2752 2918 return -1; … … 2777 2943 2778 2944 #if DEBUG_VFS_ERROR 2779 if( DEBUG_VFS_ERROR < cycle ) 2780 printk("\n[ERROR] in %s : thread[%x,%x] cannot add dentry <%s> in parent dir\n", 2781 __FUNCTION__, process->pid, this->trdid, name ); 2945 printk("\n[ERROR] in %s : thread[%x,%x] cannot add dentry <%s> in parent dir / cycle %d\n", 2946 __FUNCTION__, process->pid, this->trdid, name, cycle ); 2782 2947 #endif 2783 2948 vfs_remove_child_from_parent( dentry_xp ); … … 2795 2960 2796 2961 #if DEBUG_VFS_ERROR 2797 if( DEBUG_VFS_ERROR < cycle ) 2798 printk("\n[ERROR] in %s : thread[%x,%x] cannot found node <%s> in parent for <%s>\n", 2799 __FUNCTION__ , process->pid, this->trdid, name, pathname ); 2962 printk("\n[ERROR] in %s : thread[%x,%x] cannot found node <%s> in parent for <%s> / cycle %d\n", 2963 __FUNCTION__ , process->pid, this->trdid, name, pathname, cycle ); 2800 2964 #endif 2801 2965 vfs_remove_child_from_parent( dentry_xp ); … … 2810 2974 2811 2975 #if DEBUG_VFS_ERROR 2812 if( DEBUG_VFS_ERROR < cycle ) 2813 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s> %\n", 2814 __FUNCTION__ , process->pid, this->trdid, pathname ); 2976 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s> / cycle %d\n", 2977 __FUNCTION__ , process->pid, this->trdid, pathname, cycle ); 2815 2978 #endif 2816 2979 return -1; … … 2831 2994 { 2832 2995 #if DEBUG_VFS_ERROR 2833 if( DEBUG_VFS_ERROR < cycle ) 2834 printk("\n[ERROR] in %s : thread[%x,%x] cannot load <%s> from device\n", 2835 __FUNCTION__ , process->pid, this->trdid, name ); 2996 printk("\n[ERROR] in %s : thread[%x,%x] cannot load <%s> from device / cycle %d\n", 2997 __FUNCTION__ , process->pid, this->trdid, name, cycle ); 2836 2998 #endif 2837 2999 vfs_remove_child_from_parent( dentry_xp ); … … 2864 3026 2865 3027 #if DEBUG_VFS_ERROR 2866 if( DEBUG_VFS_ERROR < cycle ) 2867 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s>\n", 2868 __FUNCTION__ , process->pid, this->trdid, pathname ); 3028 printk("\n[ERROR] in %s : thread[%x,%x] found an existing node <%s> / cycle %d\n", 3029 __FUNCTION__ , process->pid, this->trdid, pathname, cycle ); 2869 3030 #endif 2870 3031 return -1; … … 2946 3107 xptr_t children_entry_xp; // extended pointer on dentry "children" field 2947 3108 3109 #if DEBUG_VFS_ADD_SPECIAL || DEBUG_VFS_ERROR 3110 uint32_t cycle = (uint32_t)hal_get_cycles(); 3111 thread_t * this = CURRENT_THREAD; 3112 process_t * process = this->process; 3113 #endif 3114 2948 3115 #if DEBUG_VFS_ADD_SPECIAL 2949 uint32_t cycle = (uint32_t)hal_get_cycles();2950 thread_t * this = CURRENT_THREAD;2951 3116 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2952 3117 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; … … 2955 3120 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2956 3121 printk("\n[%s] thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n", 2957 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle );3122 __FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle ); 2958 3123 #endif 2959 3124 … … 2973 3138 if( error ) 2974 3139 { 2975 printk("\n[ERROR] in %s : cannot create dentry <.> in cluster %x\n", 2976 __FUNCTION__ , child_cxy ); 3140 3141 #if DEBUG_VFS_ERROR 3142 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <.> in cluster %x / cycle %d\n", 3143 __FUNCTION__ , process->pid, this->trdid, child_cxy, cycle ); 3144 #endif 2977 3145 return -1; 2978 3146 } … … 2982 3150 2983 3151 #if(DEBUG_VFS_ADD_SPECIAL & 1) 2984 cycle = (uint32_t)hal_get_cycles();2985 3152 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2986 3153 printk("\n[%s] thread[%x,%x] created dentry <.> (%x,%x) / cycle %d\n", 2987 __FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr, cycle );3154 __FUNCTION__, process->pid, this->trdid, child_cxy, dentry_ptr, cycle ); 2988 3155 #endif 2989 3156 … … 2996 3163 if( error ) 2997 3164 { 2998 printk("\n[ERROR] in %s : cannot register dentry <.> in xhtab\n", 2999 __FUNCTION__ ); 3165 3166 #if DEBUG_VFS_ERROR 3167 printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <.> in xhtab / cycle %d\n", 3168 __FUNCTION__ , process->pid, this->trdid, cycle ); 3169 #endif 3000 3170 return -1; 3001 3171 } … … 3009 3179 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3010 3180 printk("\n[%s] thread[%x,%x] linked dentry <.> to parent and child inodes / cycle %d\n", 3011 __FUNCTION__, this->process->pid, this->trdid, cycle );3181 __FUNCTION__, process->pid, this->trdid, cycle ); 3012 3182 #endif 3013 3183 … … 3020 3190 if( error ) 3021 3191 { 3022 printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n", 3023 __FUNCTION__ ); 3192 3193 #if DEBUG_VFS_ERROR 3194 printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <.> in mapper / cycle %d\n", 3195 __FUNCTION__ , process->pid, this->trdid, cycle ); 3196 #endif 3024 3197 return -1; 3025 3198 } … … 3029 3202 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3030 3203 printk("\n[%s] thread[%x,%x] registered dentry <.> in child mapper / cycle %d\n", 3031 __FUNCTION__, this->process->pid, this->trdid, cycle );3204 __FUNCTION__, process->pid, this->trdid, cycle ); 3032 3205 #endif 3033 3206 … … 3041 3214 if( error ) 3042 3215 { 3043 printk("\n[ERROR] in %s : cannot create dentry <..> in cluster %x\n", 3044 __FUNCTION__ , child_cxy ); 3216 3217 #if DEBUG_VFS_ERROR 3218 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <..> in cluster %x / cycle %d\n", 3219 __FUNCTION__ , process->pid, this->trdid, child_cxy, cycle ); 3220 #endif 3045 3221 return -1; 3046 3222 } … … 3053 3229 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3054 3230 printk("\n[%s] thread[%x,%x] created dentry <..> (%x,%x) / cycle %d\n", 3055 __FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr, cycle );3231 __FUNCTION__, process->pid, this->trdid, child_cxy, dentry_ptr, cycle ); 3056 3232 #endif 3057 3233 … … 3059 3235 children_xhtab_xp = XPTR( child_cxy , &child_ptr->children ); 3060 3236 children_entry_xp = XPTR( child_cxy , &dentry_ptr->children ); 3237 3061 3238 error = xhtab_insert( children_xhtab_xp , ".." , children_entry_xp ); 3239 3062 3240 if( error ) 3063 3241 { 3064 printk("\n[ERROR] in %s : cannot register dentry <..> in xhtab\n", 3065 __FUNCTION__ ); 3242 3243 #if DEBUG_VFS_ERROR 3244 printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <..> in xhtab / cycle %d\n", 3245 __FUNCTION__ , process->pid, this->trdid, cycle ); 3246 #endif 3066 3247 return -1; 3067 3248 } … … 3077 3258 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3078 3259 printk("\n[%s] thread[%x,%x] linked dentry <..> to parent and child inodes / cycle %d\n", 3079 __FUNCTION__, this->process->pid, this->trdid, cycle );3260 __FUNCTION__, process->pid, this->trdid, cycle ); 3080 3261 #endif 3081 3262 … … 3088 3269 if( error ) 3089 3270 { 3090 printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n", 3091 __FUNCTION__ ); 3271 3272 #if DEBUG_VFS_ERROR 3273 printk("\n[ERROR] in %s : thread[%x,%x] cannot register dentry <..> in mapper / cycle %d\n", 3274 __FUNCTION__ , process->pid, this->trdid, cycle ); 3275 #endif 3092 3276 return -1; 3093 3277 } … … 3097 3281 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3098 3282 printk("\n[%s] thread[%x,%x] registered dentry <..> in child mapper / cycle %d\n", 3099 __FUNCTION__, this->process->pid, this->trdid, cycle );3283 __FUNCTION__, process->pid, this->trdid, cycle ); 3100 3284 #endif 3101 3285 … … 3106 3290 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 3107 3291 printk("\n[%s] thread[%x,%x] exit for child <%s> in parent <%s> / cycle %d\n", 3108 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle );3292 __FUNCTION__, process->pid, this->trdid, child_name, parent_name, cycle ); 3109 3293 #endif 3110 3294 … … 3139 3323 3140 3324 #if DEBUG_VFS_GET_PATH 3141 uint32_t cycle = (uint32_t)hal_get_cycles(); 3325 uint32_t cycle = (uint32_t)hal_get_cycles(); 3326 #endif 3327 3328 #if DEBUG_VFS_GET_PATH 3142 3329 if( DEBUG_VFS_GET_PATH < cycle ) 3143 3330 printk("\n[%s] thread[%x,%x] enter : inode (%x,%x) / cycle %d\n", … … 3296 3483 3297 3484 #if DEBUG_VFS_ERROR 3298 if( DEBUG_VFS_ERROR < cycle ) 3299 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <%s> in cluster %x\n", 3300 __FUNCTION__ , this->process->pid, this->trdid , name , parent_cxy ); 3485 printk("\n[ERROR] in %s : thread[%x,%x] cannot create dentry <%s> in cluster %x / cycle %d\n", 3486 __FUNCTION__ , this->process->pid, this->trdid , name , parent_cxy, cycle ); 3301 3487 #endif 3302 3488 return -1; … … 3330 3516 3331 3517 #if DEBUG_VFS_ERROR 3332 if( DEBUG_VFS_ERROR < cycle ) 3333 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x\n", 3334 __FUNCTION__ , this->process->pid , this->trdid , child_cxy ); 3518 printk("\n[ERROR] in %s : thread[%x,%x] cannot create inode in cluster %x / cycle %d\n", 3519 __FUNCTION__ , this->process->pid , this->trdid , child_cxy, cycle ); 3335 3520 #endif 3336 3521 … … 3428 3613 3429 3614 #if DEBUG_VFS_REMOVE_CHILD 3430 if( DEBUG_VFS_REMOVE_CHILD < cycle )3431 3615 printk("\n[%s] thread[%x,%x] enter for dentry[%x,%x] / inode[%x,%x] / cycle %d\n", 3432 3616 __FUNCTION__, this->process->pid, this->trdid, … … 3441 3625 if( error ) 3442 3626 { 3443 printk("\n[WARNING] in %s] thread[%x,%x] cannot remove dentry %s from parent dir\n", 3444 __FUNCTION__, this->process->pid, this->trdid, dentry_name ); 3445 } 3446 3447 #if DEBUG_VFS_REMOVE_CHILD 3448 cycle = (uint32_t)hal_get_cycles(); 3627 printk("\n[WARNING] in %s : thread[%x,%x] cannot remove dentry <%s> from parent\n", 3628 __FUNCTION__ , this->process->pid , this->trdid , dentry_name ); 3629 } 3630 3631 #if(DEBUG_VFS_REMOVE_CHILD & 1) 3449 3632 if( DEBUG_VFS_REMOVE_CHILD < cycle ) 3450 printk("\n[%s] thread[%x,%x] removed dentry from parent inode / cycle %d\n",3451 __FUNCTION__, this->process->pid, this->trdid , cycle);3633 printk("\n[%s] thread[%x,%x] removed dentry from parent inode\n", 3634 __FUNCTION__, this->process->pid, this->trdid ); 3452 3635 #endif 3453 3636 … … 3458 3641 links = hal_remote_atomic_add( XPTR( child_cxy , &child_inode_ptr->links ) , -1 ); 3459 3642 3460 #if DEBUG_VFS_REMOVE_CHILD 3461 cycle = (uint32_t)hal_get_cycles(); 3643 #if(DEBUG_VFS_REMOVE_CHILD & 1) 3462 3644 if( DEBUG_VFS_REMOVE_CHILD < cycle ) 3463 printk("\n[%s] thread[%x,%x] removed dentry from child inode / cycle %d\n",3464 __FUNCTION__, this->process->pid, this->trdid , cycle);3645 printk("\n[%s] thread[%x,%x] removed dentry from child inode\n", 3646 __FUNCTION__, this->process->pid, this->trdid ); 3465 3647 #endif 3466 3648 … … 3723 3905 assert( __FUNCTION__, (array != NULL) , "child pointer is NULL"); 3724 3906 assert( __FUNCTION__, (detailed == false) , "detailed argument not supported\n"); 3725 3726 // check inode type 3727 if( inode->type != FILE_TYPE_DIR ) 3728 { 3729 printk("\n[ERROR] in %s : target inode is not a directory\n", 3730 __FUNCTION__ ); 3731 return -1; 3732 } 3907 assert( __FUNCTION__, (inode->type == FILE_TYPE_DIR), "inode is not a directory\n"); 3733 3908 3734 3909 // get parent inode FS type -
trunk/kernel/fs/vfs.h
r673 r683 168 168 *****************************************************************************************/ 169 169 170 /* this enum define the VFS inode types values*/170 /* this enum define the VFS file types */ 171 171 /* WARNING : this enum must be kept consistent with macros in <shared_stat.h> file */ 172 172 /* and with types in <shared_dirent.h> file. */ … … 174 174 typedef enum 175 175 { 176 FILE_TYPE_REG = 0, /*! regular file */177 FILE_TYPE_DIR = 1, /*! directory */178 FILE_TYPE_FIFO = 2, /*! POSIX named fifo */179 FILE_TYPE_PIPE = 3, /*! POSIX anonymous pipe */180 FILE_TYPE_SOCK = 4, /*! POSIX anonymous socket */181 FILE_TYPE_DEV = 5, /*! character device */182 FILE_TYPE_BLK = 6, /*! block device */183 FILE_TYPE_SYML = 7, /*! symbolic link */176 FILE_TYPE_REG = 0, /*! regular file */ 177 FILE_TYPE_DIR = 1, /*! directory */ 178 FILE_TYPE_FIFO = 2, /*! POSIX named fifo */ 179 FILE_TYPE_PIPE = 3, /*! POSIX anonymous pipe */ 180 FILE_TYPE_SOCK = 4, /*! POSIX anonymous socket */ 181 FILE_TYPE_DEV = 5, /*! character device */ 182 FILE_TYPE_BLK = 6, /*! block device */ 183 FILE_TYPE_SYML = 7, /*! symbolic link */ 184 184 } 185 185 vfs_file_type_t; … … 200 200 struct vfs_ctx_s * ctx; /*! local pointer on FS context. */ 201 201 vfs_file_attr_t attr; /*! file attributes bit vector (see above) */ 202 vfs_file_type_t type; /*! same type as inode */202 vfs_file_type_t type; /*! same type as inode */ 203 203 uint32_t offset; /*! seek position in file */ 204 204 remote_rwlock_t lock; /*! protect offset modifications */ … … 285 285 uint32_t inum; /*! inode identifier (unique in file system) */ 286 286 uint32_t attr; /*! inode attributes (see above) */ 287 vfs_file_type_t type; /*! inode type (see above)*/287 vfs_file_type_t type; /*! inode type (see vfs_file_t) */ 288 288 uint32_t size; /*! number of bytes */ 289 289 uint32_t uid; /*! user owner identifier */ … … 829 829 /****************************************************************************************** 830 830 * This function returns, in the structure pointed by the <st> pointer, various 831 * informations on the inodeidentified by the <root_inode_xp> and <patname> arguments.831 * informations on the file identified by the <root_inode_xp> and <patname> arguments. 832 832 * 833 833 * TODO : only partially implemented yet (only size and inum fields). -
trunk/kernel/kern/alarm.c
r669 r683 31 31 32 32 //////////////////////////////////////////////////////////////////////////////////////////// 33 // This static function registers the alarm identified ny the <new_alarm> argument33 // This static function registers the alarm identified by the <alarm> & <cxy> arguments 34 34 // in the list of alarms rooted in the core identified by the <core> argument. 35 35 // When the existing list of alarms is not empty, it scan the list to insert the new 36 36 // alarm in the right place to respect the absolute dates ordering. 37 37 //////////////////////////////////////////////////////////////////////////////////////////// 38 // @ new_alarm : local pointer on the new alarm. 39 // @ core : local pointer on the target core. 38 // @ cxy : cluster containing both the new alarm and the core. 39 // @ alarm : local pointer on the alarm. 40 // @ core : local pointer on the core. 40 41 //////////////////////////////////////////////////////////////////////////////////////////// 41 static void alarm_register( alarm_t * new_alarm, 42 static void alarm_register( cxy_t cxy, 43 alarm_t * alarm, 42 44 core_t * core ) 43 45 { 44 list_entry_t * current; // pointer on current list_entry in existing list 45 list_entry_t * previous; // pointer on previous list_entry in existing list 46 alarm_t * current_alarm; // pointer on current alarm in existing list 47 cycle_t current_date; // date of current alarm in existing list 48 49 bool_t done = false; 50 51 // get pointers on root of alarms and lock 46 // get alarm date 47 cycle_t new_date = hal_remote_l64( XPTR( cxy , &alarm->date ) ); 48 49 // build local pointer on root of alarms list 52 50 list_entry_t * root = &core->alarms_root; 53 busylock_t * lock = &core->alarms_lock; 54 55 // get pointer on new_alarm list_entry 56 list_entry_t * new_entry = &new_alarm->list; 57 58 // get new_alarm date 59 cycle_t new_date = new_alarm->date; 60 61 // take the lock 62 busylock_acquire( lock ); 51 52 // build local pointer on new alarm list_entry 53 list_entry_t * new = &alarm->list; 63 54 64 55 // insert new alarm to respect dates order 65 if( list_ is_empty( root ) )// list empty56 if( list_remote_is_empty( cxy , &core->alarms_root ) ) // list empty 66 57 { 67 list_ add_first( root , new_entry);58 list_remote_add_first( cxy , root , new ); 68 59 } 69 else // list non empty60 else // list non empty 70 61 { 71 for( current = root->next ; 72 (current != root) && (done == false) ; 73 current = current->next ) 62 list_entry_t * iter; // local pointer on current list_entry in existing list 63 alarm_t * iter_alarm; // local pointer on current alarm in existing list 64 cycle_t iter_date; // date of current alarm in existing list 65 bool_t done = false; 66 67 for( iter = hal_remote_lpt( XPTR( cxy , &root->next ) ) ; 68 (iter != root) && (done == false) ; 69 iter = hal_remote_lpt( XPTR( cxy , &iter->next ) ) ) 74 70 { 75 // get pointer on previous entry in existing list76 previous = current->pred;77 78 // get pointer on current alarm79 current_alarm = LIST_ELEMENT( current, alarm_t , list );71 // get local pointer on pred and next for iter 72 list_entry_t * prev = hal_remote_lpt( XPTR( cxy , &iter->pred ) ); 73 74 // get local pointer on current alarm 75 iter_alarm = LIST_ELEMENT( iter , alarm_t , list ); 80 76 81 77 // get date for current alarm 82 current_date = current_alarm->date; 83 84 if( current_date > new_date ) // insert new alarm just before current 78 iter_date = hal_remote_l64( XPTR( cxy , &iter_alarm->date ) ); 79 80 // insert new alarm just before current when required 81 if( iter_date > new_date ) 85 82 { 86 new_entry->next = current;87 new_entry->pred = previous;88 89 current->pred = new_entry;90 previous->next = new_entry;83 hal_remote_spt( XPTR( cxy , &new->next ) , iter ); 84 hal_remote_spt( XPTR( cxy , &new->pred ) , prev ); 85 86 hal_remote_spt( XPTR( cxy , &iter->pred ) , new ); 87 hal_remote_spt( XPTR( cxy , &prev->next ) , new ); 91 88 92 89 done = true; … … 96 93 if( done == false ) // new_date is larger than all registered dates 97 94 { 98 list_ add_last( root , new_entry);95 list_remote_add_last( cxy, root , new ); 99 96 } 100 97 } 101 98 } // end alarm_register() 99 100 101 /////////////////////////////////// 102 void alarm_init( alarm_t * alarm ) 103 { 104 alarm->linked = false; 105 list_entry_init( &alarm->list ); 106 } 107 108 /////////////////////////////////////// 109 void alarm_start( xptr_t thread_xp, 110 cycle_t date, 111 void * func_ptr, 112 xptr_t args_xp ) 113 { 114 // get cluster and local pointer on target thread 115 thread_t * tgt_ptr = GET_PTR( thread_xp ); 116 cxy_t tgt_cxy = GET_CXY( thread_xp ); 117 118 // check alarm state 119 assert( __FUNCTION__ , (hal_remote_l32( XPTR(tgt_cxy,&tgt_ptr->alarm.linked)) == false ), 120 "alarm already started"); 121 122 // get local pointer on core running target thread 123 core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) ); 124 125 // build extended pointer on lock protecting alarms list 126 xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock ); 127 128 // initialize alarm descriptor 129 hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.date ) , date ); 130 hal_remote_spt( XPTR( tgt_cxy , &tgt_ptr->alarm.func_ptr ) , func_ptr ); 131 hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.args_xp ) , args_xp ); 132 hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) , true ); 133 134 // take the lock 135 remote_busylock_acquire( lock_xp ); 136 137 // register alarm in core list 138 alarm_register( tgt_cxy , &tgt_ptr->alarm , core ); 139 140 //release the lock 141 remote_busylock_release( lock_xp ); 142 143 } // end alarm_start() 144 145 146 ///////////////////////////////////// 147 void alarm_stop( xptr_t thread_xp ) 148 { 149 // get cluster and local pointer on target thread 150 thread_t * tgt_ptr = GET_PTR( thread_xp ); 151 cxy_t tgt_cxy = GET_CXY( thread_xp ); 152 153 // get local pointer on core running target thread 154 core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) ); 155 156 // build extended pointer on lock protecting alarms list 157 xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock ); 158 159 // take the lock 160 remote_busylock_acquire( lock_xp ); 161 162 // unlink the alarm from the list rooted in core 163 list_remote_unlink( tgt_cxy , &tgt_ptr->alarm.list ); 164 165 // update alarm state 166 hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) , false ); 167 168 //release the lock 169 remote_busylock_release( lock_xp ); 170 171 } // end alarm_stop() 172 173 174 ////////////////////////////////////// 175 void alarm_update( xptr_t thread_xp, 176 cycle_t new_date ) 177 { 178 // get cluster and local pointer on target thread 179 thread_t * tgt_ptr = GET_PTR( thread_xp ); 180 cxy_t tgt_cxy = GET_CXY( thread_xp ); 181 182 // get local pointer on core running target thread 183 core_t * core = hal_remote_lpt( XPTR( tgt_cxy , &tgt_ptr->core ) ); 184 185 // build extended pointer on lock protecting alarms list 186 xptr_t lock_xp = XPTR( tgt_cxy , &core->alarms_lock ); 187 188 // take the lock 189 remote_busylock_acquire( lock_xp ); 190 191 // unlink the alarm from the core list 192 list_remote_unlink( tgt_cxy , &tgt_ptr->alarm.list ); 193 194 // update the alarm date and state 195 hal_remote_s64( XPTR( tgt_cxy , &tgt_ptr->alarm.date ) , new_date ); 196 hal_remote_s32( XPTR( tgt_cxy , &tgt_ptr->alarm.linked ) , true ); 197 198 // register alarm in core list 199 alarm_register( tgt_cxy , &tgt_ptr->alarm , core ); 200 102 201 // release the lock 103 busylock_release( lock ); 104 105 } // end alarm_register() 106 107 ////////////////////////////////////// 108 void alarm_start( cycle_t date, 109 void * func_ptr, 110 xptr_t args_xp, 111 thread_t * thread ) 112 { 113 // get pointer on alarm 114 alarm_t * alarm = &thread->alarm; 115 116 // initialize alarm descriptor 117 alarm->date = date; 118 alarm->func_ptr = func_ptr; 119 alarm->args_xp = args_xp; 120 121 // register alarm in core list 122 alarm_register( alarm , thread->core ); 123 124 } // end alarm_start() 125 126 ///////////////////////////////////// 127 void alarm_update( thread_t * thread, 128 cycle_t new_date ) 129 { 130 // get pointer on alarm 131 alarm_t * alarm = &thread->alarm; 132 133 // get pointer on core 134 core_t * core = thread->core; 135 136 // get pointer on lock protecting the alarms list 137 busylock_t * lock = &core->alarms_lock; 138 139 // unlink the alarm from the core list 140 busylock_acquire( lock ); 141 list_unlink( &alarm->list ); 142 busylock_release( lock ); 143 144 // update the alarm date 145 alarm->date = new_date; 146 147 // register alarm in core list 148 alarm_register( alarm , core ); 149 202 remote_busylock_release( lock_xp ); 203 150 204 } // end alarm_update() 151 205 152 //////////////////////////////////// 153 void alarm_stop( thread_t * thread ) 154 { 155 // get pointer on alarm 156 alarm_t * alarm = &thread->alarm; 157 158 // get pointer on core 159 core_t * core = thread->core; 160 161 // get pointer on lock protecting the alarms list 162 busylock_t * lock = &core->alarms_lock; 163 164 // unlink the alarm from the list rooted in core 165 busylock_acquire( lock ); 166 list_unlink( &alarm->list ); 167 busylock_release( lock ); 168 169 } // end alarm_stop() 170 206 -
trunk/kernel/kern/alarm.h
r669 r683 36 36 * This structure defines a generic, timer based, kernel alarm. 37 37 * 38 * - An alarm being attached to a given thread,the alarm descriptor is embedded in the38 * - An alarm is attached to a given thread, and the alarm descriptor is embedded in the 39 39 * thread descriptor. A client thread can use the alarm_start() function to dynamically 40 40 * activate the alarm. It can use the alarm_stop() function to desactivate this alarm. 41 41 * - This kernel alarm is generic, as the alarm handler (executed when the alarm rings), 42 * and the handler arguments are defined by two pointers <func_ptr> and <args_xp>.42 * and the handler arguments are defined by two pointers: <func_ptr> and <args_xp>. 43 43 * - When an alarm is created by a client thread, it is registered in the list of alarms 44 44 * rooted in the core running the client thread. When it is stopped, the alarm is simply 45 45 * removed from this list. 46 * - When creating an alarm , the client thread must define an absolute date (in cycles),47 * the func_ptr localpointer, and the args_xp extended pointer.46 * - When creating an alarm with the alarm_start() function, the client thread must define 47 * an absolute date (in cycles), the func_ptr pointer, and the args_xp extended pointer. 48 48 * - The list of alarms is ordered by increasing dates. At each TICK received by a core, 49 49 * the date of the first registered alarm is compared to the current date (in the 50 50 * core_clock() function). The alarm handler is executed when current_date >= alarm_date. 51 * - It is the handler responsability to stop a ringing alarm, or update the date. 51 * - It is the handler responsability to stop and delete a ringing alarm using the 52 * alarm_stop() function, or update the alarm date using the alarm_update() function. 53 * - The three alarm_start(), alarm_stop(), and alarm_update() access functions use 54 * the lock protecting the alarms list to handle concurrent accesses. These functions 55 * use extended pointers to access the alarm list, and can be called by a thread 56 * running in any cluster. 52 57 * 53 * This mechanism is used bi the almos_mkh implementation of the TCP protocoL. 58 * This embedded alarm mechanism is used by: 59 * 1. the socket_accept(), socket_connect(), socket_send(), socket_close() functions, 60 * to implement the TCP retransmission machanism. 61 * 2. the sys_thread_sleep() function, to implement the "sleep" mechanism. 54 62 ******************************************************************************************/ 55 63 56 64 typedef struct alarm_s 57 65 { 66 bool_t linked; /*! active when true (i.e linked to the core list) */ 58 67 cycle_t date; /*! absolute date for handler execution */ 59 68 void * func_ptr; /*! local pointer on alarm handler function */ 60 69 xptr_t args_xp; /*! local pointer on handler arguments */ 61 list_entry_t list; /*! all alarms attached to the same core*/70 list_entry_t list; /*! set of active alarms attached to the same core */ 62 71 } 63 72 alarm_t; … … 70 79 71 80 /******************************************************************************************* 81 * This function initialises the alarm state to "inactive". 82 ******************************************************************************************* 83 * @ alarm : local pointer on alarm. 84 ******************************************************************************************/ 85 void alarm_init( alarm_t * alarm ); 86 87 /******************************************************************************************* 72 88 * This function initializes the alarm descriptor embedded in the thread identified by the 73 * <thread> argument from the <date>, <func_ptr>, <args_ptr> arguments, and registers it 74 * in the ordered list rooted in the core running this <thread>. 89 * <thread_xp> argument from the <date>, <func_ptr>, <args_ptr> arguments, and registers 90 * this alarm in the ordered list rooted in the core running this thread. 91 * It takes the lock protecting the alarms list against concurrent accesses. 75 92 ******************************************************************************************* 93 * @ thread_xp : extended pointer on the target thread. 76 94 * @ date : absolute date (in cycles). 77 95 * @ func_ptr : local pointer on the handler to execute when the alarm rings. 78 96 * @ args_xp : extended pointer on the handler arguments. 79 * @ thread : local pointer on the client thread.80 97 ******************************************************************************************/ 81 void alarm_start( cycle_t date,82 void * func_ptr,83 xptr_t args_xp,84 struct thread_s * thread);98 void alarm_start( xptr_t thread_xp, 99 cycle_t date, 100 void * func_ptr, 101 xptr_t args_xp ); 85 102 86 103 /******************************************************************************************* … … 88 105 * <thread> argument. The list of alarms rooted in the core running the client thread 89 106 * is modified to respect the absolute dates ordering. 107 * It takes the lock protecting the alarms list against concurrent accesses. 90 108 ******************************************************************************************* 91 * @ thread : local pointer on the client thread.109 * @ thread_xp : extended pointer on the target thread. 92 110 * @ new_date : absolute new date (in cycles). 93 111 ******************************************************************************************/ 94 void alarm_update( struct thread_s * thread,95 cycle_t 112 void alarm_update( xptr_t thread_xp, 113 cycle_t new_date ); 96 114 97 115 /******************************************************************************************* 98 116 * This function unlink an alarm identified by the <thread> argument from the list of 99 117 * alarms rooted in the core descriptor. 118 * It takes the lock protecting the alarms list against concurrent accesses. 100 119 ******************************************************************************************* 101 * @ thread : local pointer on the client thread.120 * @ thread_xp : extended pointer on the target thread. 102 121 ******************************************************************************************/ 103 void alarm_stop( struct thread_s * thread);122 void alarm_stop( xptr_t thread_xp ); 104 123 105 124 -
trunk/kernel/kern/chdev.c
r669 r683 87 87 { 88 88 chdev_t * chdev; 89 kmem_req_t req;90 89 91 90 // allocate memory for chdev 92 req.type = KMEM_KCM; 93 req.order = bits_log2( sizeof(chdev_t) ); 94 req.flags = AF_ZERO | AF_KERNEL; 95 chdev = kmem_alloc( &req ); 91 chdev = kmem_alloc( bits_log2(sizeof(chdev_t)) , AF_ZERO | AF_KERNEL ); 96 92 97 93 if( chdev == NULL ) return NULL; … … 114 110 } // end chdev_create() 115 111 116 /////////////////////////////////// 117 void chdev_print( chdev_t * chdev ) 118 { 119 printk("\n - func = %s" 120 "\n - channel = %d" 121 "\n - base = %l" 112 ///////////////////////////////////// 113 void chdev_display( xptr_t chdev_xp ) 114 { 115 chdev_t * chdev = GET_PTR( chdev_xp ); 116 cxy_t cxy = GET_CXY( chdev_xp ); 117 118 char name[16]; 119 120 hal_remote_memcpy( XPTR( local_cxy, name ), 121 XPTR( cxy , &chdev->name ), 16 ); 122 123 printk("\n - chdev = [%x,%x]" 124 "\n - name = %s" 125 "\n - base = [%x,%x]" 122 126 "\n - cmd = %x" 123 "\n - isr = %x" 124 "\n - chdev = %x\n", 125 chdev_func_str(chdev->func), 126 chdev->channel, 127 chdev->base, 128 chdev->cmd, 129 chdev->isr, 130 chdev ); 131 } 127 "\n - isr = %x\n", 128 cxy, 129 chdev, 130 name, 131 GET_CXY( hal_remote_l64( XPTR( cxy , &chdev->base ))), 132 GET_PTR( hal_remote_l64( XPTR( cxy , &chdev->base ))), 133 hal_remote_lpt( XPTR( cxy , &chdev->cmd )), 134 hal_remote_lpt( XPTR( cxy , &chdev->isr )) ); 135 136 } // end chdev_display() 132 137 133 138 ////////////////////////////////////////////////// … … 450 455 chdev_t * chdev_ptr; 451 456 452 453 "file_xp == XPTR_NULL\n" );457 assert( __FUNCTION__, (file_xp != XPTR_NULL) , 458 "file_xp == XPTR_NULL" ); 454 459 455 460 // get cluster and local pointer on remote file descriptor … … 462 467 inode_ptr = (vfs_inode_t *)hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 463 468 464 465 "inode type %d is not FILE_TYPE_DEV\n", inode_type );469 assert( __FUNCTION__, (inode_type == FILE_TYPE_DEV) , 470 "inode type %d is not FILE_TYPE_DEV", inode_type ); 466 471 467 472 // get chdev local pointer from inode extension -
trunk/kernel/kern/chdev.h
r669 r683 121 121 * . This busylock is also used to protect direct access to the shared 122 122 * kernel TXT0 terminal, that does not use the waiting queue. 123 * . For most d chdevs, the client waiting queue is an xlist of threads, but it is123 * . For most chdevs, the client waiting queue is a list of threads, but it is 124 124 * a list of sockets for the NIC chdevs. It is unused for ICU, PIC, and IOB. 125 125 *****************************************************************************************/ … … 190 190 191 191 /**************************************************************************************** 192 * This function display relevant values for a chdev descriptor.193 ****************************************************************************************194 * @ chdev : pointer on chdev.195 ***************************************************************************************/196 void chdev_print( chdev_t * chdev );197 198 /****************************************************************************************199 192 * This function returns a printable string for a device functionnal types. 200 193 **************************************************************************************** … … 223 216 224 217 /**************************************************************************************** 225 * This generi dfunction is executed by an user thread requesting an IOC or TXT chdev218 * This generic function is executed by an user thread requesting an IOC or TXT chdev 226 219 * service. It registers the calling thread in the waiting queue of a the remote 227 220 * chdev descriptor identified by the <chdev_xp> argument. … … 282 275 283 276 /**************************************************************************************** 277 * This function display relevant values for a remote chdev descriptor. 278 **************************************************************************************** 279 * @ chdev_xp : pointer on chdev. 280 ***************************************************************************************/ 281 void chdev_display( xptr_t chdev_xp ); 282 283 /**************************************************************************************** 284 284 * This function displays the local copy of the external chdevs directory. 285 285 * (global variable replicated in all clusters) -
trunk/kernel/kern/cluster.c
r669 r683 2 2 * cluster.c - Cluster-Manager related operations 3 3 * 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012)4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019,2020)6 * Alain Greiner (2016,2017,2018,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 74 74 cluster->y_size = info->y_size; 75 75 cluster->io_cxy = info->io_cxy; 76 cluster->sys_clk = info->sys_clk; 76 77 77 78 // initialize the cluster_info[][] array … … 177 178 printk("\n[%s] PPM initialized in cluster %x / cycle %d\n", 178 179 __FUNCTION__ , local_cxy , cycle ); 179 #endif180 181 // initialises embedded KHM182 khm_init( &cluster->khm );183 184 #if( DEBUG_CLUSTER_INIT & 1 )185 cycle = (uint32_t)hal_get_cycles();186 if( DEBUG_CLUSTER_INIT < cycle )187 printk("\n[%s] KHM initialized in cluster %x at cycle %d\n",188 __FUNCTION__ , local_cxy , hal_get_cycles() );189 180 #endif 190 181 -
trunk/kernel/kern/cluster.h
r657 r683 2 2 * cluster.h - Cluster-Manager definition 3 3 * 4 * authors Ghassan Almaless (2008,2009,2010,2011,2012)4 * authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019,2019,2020)6 * Alain Greiner (2016,2017,2018,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 39 39 #include <ppm.h> 40 40 #include <kcm.h> 41 #include <khm.h>42 41 #include <rpc.h> 43 42 #include <core.h> … … 105 104 uint32_t x_size; /*! number of clusters in a row (can be 1) */ 106 105 uint32_t y_size; /*! number of clusters in a column (can be 1) */ 107 cxy_t io_cxy; /*! io cluster identifier */ 106 uint32_t io_cxy; /*! io cluster identifier */ 107 uint32_t sys_clk; /*! system_clock frequency (in Hertz) */ 108 108 uint32_t dqdt_root_level; /*! index of root node in dqdt_tbl[] */ 109 109 uint32_t nb_txt_channels; /*! number of TXT channels */ … … 124 124 list_entry_t dev_root; /*! root of list of devices in cluster */ 125 125 126 // memory allocators 127 ppm_t ppm; /*! embedded kernel page manager */ 128 khm_t khm; /*! embedded kernel heap manager */ 129 kcm_t kcm[6]; /*! embedded kernel cache managers [6:11] */ 126 // physical memory allocators: one PPM and severa KCM 127 ppm_t ppm; 128 kcm_t kcm[CONFIG_PPM_PAGE_ORDER - CONFIG_CACHE_LINE_ORDER]; 130 129 131 130 // RPC -
trunk/kernel/kern/core.c
r669 r683 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018)5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 47 47 core->ticks_nr = 0; 48 48 core->usage &nbs