Changeset 611 for trunk/kernel/kern
- Timestamp:
- Jan 9, 2019, 3:02:51 PM (6 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/cluster.h
r583 r611 264 264 /****************************************************************************************** 265 265 * This function returns a pointer on the local process descriptor from the PID. 266 * It uses the RPC267 * to create a local process descriptor copy if it does not exist yet.268 266 ****************************************************************************************** 269 267 * @ pid : searched process identifier. -
trunk/kernel/kern/kernel_init.c
r610 r611 167 167 "PROCESS_FDARRAY", // 27 168 168 "FATFS_FREE", // 28 169 170 "PROCESS_THTBL", // 29 171 172 "MAPPER_STATE", // 30 173 "VFS_SIZE", // 31 174 "VFS_FILE", // 32 175 "VMM_VSL", // 33 176 "VMM_GPT", // 34 177 "VFS_MAIN", // 35 169 "PROCESS_DIR", // 29 170 171 "PROCESS_THTBL", // 30 172 173 "MAPPER_STATE", // 31 174 "VFS_SIZE", // 32 175 "VFS_FILE", // 33 176 "VMM_VSL", // 34 177 "VMM_GPT", // 35 178 "VFS_MAIN", // 36 178 179 }; 179 180 -
trunk/kernel/kern/process.c
r610 r611 274 274 remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN ); 275 275 276 // reset semaphore / mutex / barrier / condvar list roots 276 // reset semaphore / mutex / barrier / condvar list roots and lock 277 277 xlist_root_init( XPTR( local_cxy , &process->sem_root ) ); 278 278 xlist_root_init( XPTR( local_cxy , &process->mutex_root ) ); … … 280 280 xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); 281 281 remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC ); 282 283 // reset open directories root and lock 284 xlist_root_init( XPTR( local_cxy , &process->dir_root ) ); 285 remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR ); 282 286 283 287 // register new process in the local cluster manager pref_tbl[] … … 546 550 thread_block( client_xp , THREAD_BLOCKED_RPC ); 547 551 548 // take the lock protecting process copies549 remote_queuelock_acquire( lock_xp );550 551 552 // initialize shared RPC descriptor 552 553 rpc.responses = 0; … … 555 556 rpc.thread = client; 556 557 rpc.lid = client->core->lid; 557 rpc.args[0] = type; 558 rpc.args[1] = pid; 558 rpc.args[0] = pid; 559 rpc.args[1] = type; 560 561 // take the lock protecting process copies 562 remote_queuelock_acquire( lock_xp ); 559 563 560 564 // scan list of process copies 561 // to send RPCs to remote copies562 565 XLIST_FOREACH( root_xp , iter_xp ) 563 566 { -
trunk/kernel/kern/process.h
r610 r611 60 60 ********************************************************************************************/ 61 61 62 typedef enum process_sigactions62 typedef enum 63 63 { 64 64 BLOCK_ALL_THREADS = 0x11, 65 65 UNBLOCK_ALL_THREADS = 0x22, 66 66 DELETE_ALL_THREADS = 0x33, 67 } process_sigactions_t; 67 } 68 process_sigactions_t; 68 69 69 70 /********************************************************************************************* … … 145 146 146 147 struct thread_s * th_tbl[CONFIG_THREADS_MAX_PER_CLUSTER]; /*! local threads */ 148 147 149 uint32_t th_nr; /*! number of threads in this cluster */ 148 150 rwlock_t th_lock; /*! lock protecting th_tbl[] i */ 149 151 150 xlist_entry_t sem_root; /*! root of the user defined semaphore list*/152 xlist_entry_t sem_root; /*! root of the user defined semaphore list */ 151 153 xlist_entry_t mutex_root; /*! root of the user defined mutex list */ 152 154 xlist_entry_t barrier_root; /*! root of the user defined barrier list */ 153 155 xlist_entry_t condvar_root; /*! root of the user defined condvar list */ 154 156 remote_queuelock_t sync_lock; /*! lock protecting user defined synchro lists */ 157 158 xlist_entry_t dir_root; /*! root of the user defined DIR list */ 159 remote_queuelock_t dir_lock; /*! lock protexting user defined DIR list */ 155 160 156 161 uint32_t term_state; /*! termination status (flags & exit status) */ -
trunk/kernel/kern/rpc.c
r610 r611 77 77 &rpc_undefined, // 24 unused slot 78 78 &rpc_mapper_handle_miss_server, // 25 79 &rpc_ undefined, // 26 unused slot79 &rpc_vmm_delete_vseg_server, // 26 80 80 &rpc_vmm_create_vseg_server, // 27 81 81 &rpc_vmm_set_cow_server, // 28 … … 113 113 "undefined", // 24 114 114 "MAPPER_HANDLE_MISS", // 25 115 " undefined",// 26115 "VMM_DELETE_VSEG", // 26 116 116 "VMM_CREATE_VSEG", // 27 117 117 "VMM_SET_COW", // 28 … … 283 283 bool_t blocking; // blocking RPC when true 284 284 remote_fifo_t * rpc_fifo; // local pointer on RPC fifo 285 uint32_t count; // current number of expected responses 285 286 286 287 // makes RPC thread not preemptable … … 302 303 uint32_t cycle = (uint32_t)hal_get_cycles(); 303 304 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 304 printk("\n[%s] RPC thread %xon core[%d] takes RPC_FIFO ownership / cycle %d\n",305 __FUNCTION__, server_ptr-> trdid, server_core_lid, cycle );305 printk("\n[%s] RPC thread[%x,%x] on core[%d] takes RPC_FIFO ownership / cycle %d\n", 306 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, server_core_lid, cycle ); 306 307 #endif 307 308 // try to consume one RPC request … … 326 327 uint32_t items = remote_fifo_items( XPTR( local_cxy , rpc_fifo ) ); 327 328 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 328 printk("\n[%s] RPC thread %xgot rpc %s / client_cxy %x / items %d / cycle %d\n",329 __FUNCTION__, server_ptr-> trdid, rpc_str[index], desc_cxy, items, cycle );329 printk("\n[%s] RPC thread[%x,%x] got rpc %s / client_cxy %x / items %d / cycle %d\n", 330 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle ); 330 331 #endif 331 332 // register client thread in RPC thread descriptor … … 338 339 cycle = (uint32_t)hal_get_cycles(); 339 340 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 340 printk("\n[%s] RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n", 341 __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, cycle ); 342 #endif 343 // decrement response counter in RPC descriptor if blocking RPC 344 if( blocking ) 341 printk("\n[%s] RPC thread[%x,%x] completes rpc %s / client_cxy %x / cycle %d\n", 342 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, rpc_str[index], desc_cxy, cycle ); 343 #endif 344 // decrement expected responses counter in RPC descriptor 345 count = hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 ); 346 347 // decrement response counter in RPC descriptor if last response 348 if( count == 1 ) 345 349 { 346 // decrement responses counter in RPC descriptor347 hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );348 349 350 // get client thread pointer and client core lid from RPC descriptor 350 351 client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); … … 359 360 cycle = (uint32_t)hal_get_cycles(); 360 361 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 361 printk("\n[%s] RPC thread %x unblocked client thread %x / cycle %d\n", 362 __FUNCTION__, server_ptr->trdid, client_ptr->trdid, cycle ); 362 printk("\n[%s] RPC thread[%x,%x] unblocked client thread[%x,%x] / cycle %d\n", 363 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, 364 client_ptr->process->pid, client_ptr->trdid, cycle ); 363 365 #endif 364 366 // send IPI to client core 365 367 dev_pic_send_ipi( desc_cxy , client_core_lid ); 366 367 } // end if blocking RPC 368 } 368 369 } // end RPC handling if fifo non empty 369 370 } // end if RPC_fIFO ownership successfully taken and released … … 376 377 uint32_t cycle = (uint32_t)hal_get_cycles(); 377 378 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 378 printk("\n[%s] RPC thread %xsuicides / cycle %d\n",379 __FUNCTION__, server_ptr-> trdid, cycle );379 printk("\n[%s] RPC thread[%x,%x] suicides / cycle %d\n", 380 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, cycle ); 380 381 #endif 381 382 // update RPC threads counter … … 395 396 uint32_t cycle = (uint32_t)hal_get_cycles(); 396 397 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 397 printk("\n[%s] RPC thread %xblock IDLE & deschedules / cycle %d\n",398 __FUNCTION__, server_ptr-> trdid, cycle );398 printk("\n[%s] RPC thread[%x,%x] block IDLE & deschedules / cycle %d\n", 399 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, cycle ); 399 400 #endif 400 401 // RPC thread blocks on IDLE … … 425 426 #endif 426 427 427 assert( (cxy != local_cxy) , " targetcluster is not remote\n");428 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 428 429 429 430 // initialise RPC descriptor header … … 498 499 #endif 499 500 500 assert( (cxy != local_cxy) , " targetcluster is not remote\n");501 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 501 502 502 503 // initialise RPC descriptor header … … 576 577 #endif 577 578 578 assert( (cxy != local_cxy) , " targetcluster is not remote\n");579 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 579 580 580 581 // initialise RPC descriptor header … … 677 678 #endif 678 679 679 assert( (cxy != local_cxy) , " targetcluster is not remote\n");680 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 680 681 681 682 // initialise RPC descriptor header … … 784 785 #endif 785 786 786 assert( (cxy != local_cxy) , " targetcluster is not remote\n");787 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 787 788 788 789 // initialise RPC descriptor header … … 862 863 863 864 ///////////////////////////////////////////////////////////////////////////////////////// 864 // [9] Marshaling functions attached to RPC_PROCESS_SIGACTION (multicast /non blocking)865 // [9] Marshaling functions attached to RPC_PROCESS_SIGACTION (non blocking) 865 866 ///////////////////////////////////////////////////////////////////////////////////////// 866 867 … … 869 870 rpc_desc_t * rpc ) 870 871 { 871 872 872 #if DEBUG_RPC_PROCESS_SIGACTION 873 uint32_t cycle = (uint32_t)hal_get_cycles(); 874 uint32_t action = rpc->args[0]; 875 pid_t pid = rpc->args[1]; 873 uint32_t cycle = (uint32_t)hal_get_cycles(); 876 874 thread_t * this = CURRENT_THREAD; 877 875 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 878 printk("\n[%s] thread[%x,%x] enter to request %s of process %x in cluster %x/ cycle %d\n",879 __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle );880 #endif 881 882 // check some RPCarguments883 884 876 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 877 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 878 #endif 879 880 // check RPC "index" and "blocking" arguments 881 assert( (rpc->blocking == false) , "must be non-blocking\n"); 882 assert( (rpc->index == RPC_PROCESS_SIGACTION ) , "bad RPC index\n" ); 885 883 886 884 // register RPC request in remote RPC fifo and return … … 890 888 cycle = (uint32_t)hal_get_cycles(); 891 889 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 892 printk("\n[%s] thread[%x,%x] requested %s of process %x in cluster %x / cycle %d\n", 893 __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle ); 894 #endif 895 890 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 891 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 892 #endif 896 893 } // end rpc_process_sigaction_client() 897 894 … … 899 896 void rpc_process_sigaction_server( xptr_t xp ) 900 897 { 901 pid_t pid; // target process identifier 902 process_t * process; // pointer on local target process descriptor 903 uint32_t action; // sigaction index 904 thread_t * client_ptr; // pointer on client thread in client cluster 905 xptr_t client_xp; // extended pointer client thread 906 cxy_t client_cxy; // client cluster identifier 907 rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster 908 xptr_t count_xp; // extended pointer on responses counter 909 uint32_t count_value; // responses counter value 910 lid_t client_lid; // client core local index 898 #if DEBUG_RPC_PROCESS_SIGACTION 899 uint32_t cycle = (uint32_t)hal_get_cycles(); 900 thread_t * this = CURRENT_THREAD; 901 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 902 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 903 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 904 #endif 911 905 912 906 // get client cluster identifier and pointer on RPC descriptor 913 c lient_cxy = GET_CXY( xp );914 rpc 907 cxy_t client_cxy = GET_CXY( xp ); 908 rpc_desc_t * desc = GET_PTR( xp ); 915 909 916 910 // get arguments from RPC descriptor 917 action = (uint32_t)hal_remote_l64( XPTR(client_cxy , &rpc->args[0]) ); 918 pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &rpc->args[1]) ); 919 920 #if DEBUG_RPC_PROCESS_SIGACTION 921 uint32_t cycle = (uint32_t)hal_get_cycles(); 922 thread_t * this = CURRENT_THREAD; 923 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 924 printk("\n[%s] thread[%x,%x] enter to %s process %x in cluster %x / cycle %d\n", 925 __FUNCTION__, this->process->pid, this->trdid, 926 process_action_str( action ), pid, local_cxy, cycle ); 927 #endif 911 pid_t pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &desc->args[0]) ); 912 uint32_t action = (uint32_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) ); 928 913 929 914 // get client thread pointers 930 client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );931 client_xp = XPTR( client_cxy , client_ptr );915 thread_t * client_ptr = hal_remote_lpt( XPTR( client_cxy , &desc->thread ) ); 916 xptr_t client_xp = XPTR( client_cxy , client_ptr ); 932 917 933 918 // get local process descriptor 934 process = cluster_get_local_process_from_pid( pid );919 process_t * process = cluster_get_local_process_from_pid( pid ); 935 920 936 921 // call relevant kernel function … … 939 924 else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 940 925 941 // build extended pointer on response counter in RPC942 count_xp = XPTR( client_cxy , &rpc->responses );943 944 // decrement the responses counter in RPC descriptor,945 count_value = hal_remote_atomic_add( count_xp , -1 );946 947 // unblock the client thread only if it is the last response.948 if( count_value == 1 )949 {950 // get client core lid951 client_lid = (lid_t) hal_remote_l32 ( XPTR( client_cxy , &rpc->lid ) );952 953 // unblock client thread954 thread_unblock( client_xp , THREAD_BLOCKED_RPC );955 956 // send an IPI to client core957 // dev_pic_send_ipi( client_cxy , client_lid );958 }959 960 926 #if DEBUG_RPC_PROCESS_SIGACTION 961 927 cycle = (uint32_t)hal_get_cycles(); 962 928 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 963 printk("\n[%s] thread[%x,%x] exit after %s process %x in cluster %x / cycle %d\n", 964 __FUNCTION__, this->process->pid, this->trdid, 965 process_action_str( action ), pid, local_cxy, cycle ); 966 #endif 967 929 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 930 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 931 #endif 968 932 } // end rpc_process_sigaction_server() 969 933 … … 991 955 #endif 992 956 993 assert( (cxy != local_cxy) , " targetcluster is not remote\n");957 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 994 958 995 959 // initialise RPC descriptor header … … 1091 1055 #endif 1092 1056 1093 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1057 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1094 1058 1095 1059 // initialise RPC descriptor header … … 1163 1127 #endif 1164 1128 1165 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1129 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1166 1130 1167 1131 // initialise RPC descriptor header … … 1251 1215 #endif 1252 1216 1253 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1217 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1254 1218 1255 1219 // initialise RPC descriptor header … … 1324 1288 #endif 1325 1289 1326 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1290 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1327 1291 1328 1292 // initialise RPC descriptor header … … 1408 1372 #endif 1409 1373 1410 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1374 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1411 1375 1412 1376 // initialise RPC descriptor header … … 1480 1444 #endif 1481 1445 1482 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1446 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1483 1447 1484 1448 // initialise RPC descriptor header … … 1569 1533 #endif 1570 1534 1571 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1535 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1572 1536 1573 1537 // initialise RPC descriptor header … … 1649 1613 #endif 1650 1614 1651 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1615 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1652 1616 1653 1617 // initialise RPC descriptor header … … 1729 1693 #endif 1730 1694 1731 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1695 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1732 1696 1733 1697 // initialise RPC descriptor header … … 1808 1772 #endif 1809 1773 1810 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1774 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1811 1775 1812 1776 // initialise RPC descriptor header … … 1896 1860 #endif 1897 1861 1898 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1862 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1899 1863 1900 1864 // initialise RPC descriptor header … … 1975 1939 #endif 1976 1940 1977 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1941 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1978 1942 1979 1943 // initialise RPC descriptor header … … 2053 2017 #endif 2054 2018 2055 assert( (cxy != local_cxy) , " targetcluster is not remote\n");2019 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2056 2020 2057 2021 // initialise RPC descriptor header … … 2125 2089 { 2126 2090 #if DEBUG_RPC_MAPPER_HANDLE_MISS 2091 thread_t * this = CURRENT_THREAD; 2127 2092 uint32_t cycle = (uint32_t)hal_get_cycles(); 2128 2093 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) … … 2131 2096 #endif 2132 2097 2133 assert( (cxy != local_cxy) , " targetcluster is not remote\n");2098 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2134 2099 2135 2100 // initialise RPC descriptor header … … 2162 2127 { 2163 2128 #if DEBUG_RPC_MAPPER_HANDLE_MISS 2129 thread_t * this = CURRENT_THREAD; 2164 2130 uint32_t cycle = (uint32_t)hal_get_cycles(); 2165 2131 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) … … 2199 2165 2200 2166 ///////////////////////////////////////////////////////////////////////////////////////// 2201 // [26] undefined slot 2202 ///////////////////////////////////////////////////////////////////////////////////////// 2167 // [26] Marshaling functions attached to RPC_VMM_DELETE_VSEG (parallel / non blocking) 2168 ///////////////////////////////////////////////////////////////////////////////////////// 2169 2170 ////////////////////////////////////////////////// 2171 void rpc_vmm_delete_vseg_client( cxy_t cxy, 2172 rpc_desc_t * rpc ) 2173 { 2174 #if DEBUG_RPC_VMM_DELETE_VSEG 2175 thread_t * this = CURRENT_THREAD; 2176 uint32_t cycle = (uint32_t)hal_get_cycles(); 2177 if( cycle > DEBUG_RPC_VMM_DELETE_VSEG ) 2178 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2179 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2180 #endif 2181 2182 // check RPC "index" and "blocking" arguments 2183 assert( (rpc->blocking == false) , "must be non-blocking\n"); 2184 assert( (rpc->index == RPC_VMM_DELETE_VSEG ) , "bad RPC index\n" ); 2185 2186 // register RPC request in remote RPC fifo 2187 rpc_send( cxy , rpc ); 2188 2189 #if DEBUG_RPC_VMM_DELETE_VSEG 2190 cycle = (uint32_t)hal_get_cycles(); 2191 if( cycle > DEBUG_RPC_VMM_DELETE_VSEG ) 2192 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2193 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2194 #endif 2195 } 2196 2197 //////////////////////////////////////////// 2198 void rpc_vmm_delete_vseg_server( xptr_t xp ) 2199 { 2200 #if DEBUG_RPC_VMM_DELETE_VSEG 2201 uint32_t cycle = (uint32_t)hal_get_cycles(); 2202 thread_t * this = CURRENT_THREAD; 2203 if( DEBUG_RPC_VMM_DELETE_VSEG < cycle ) 2204 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2205 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2206 #endif 2207 2208 // get client cluster identifier and pointer on RPC descriptor 2209 cxy_t client_cxy = GET_CXY( xp ); 2210 rpc_desc_t * desc = GET_PTR( xp ); 2211 2212 // get arguments from RPC descriptor 2213 pid_t pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &desc->args[0]) ); 2214 intptr_t vaddr = (intptr_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) ); 2215 2216 // call relevant kernel function 2217 vmm_delete_vseg( pid , vaddr ); 2218 2219 #if DEBUG_RPC_VMM_DELETE_VSEG 2220 cycle = (uint32_t)hal_get_cycles(); 2221 if( DEBUG_RPC_VMM_DELETE_VSEG < cycle ) 2222 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2223 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2224 #endif 2225 } 2203 2226 2204 2227 ///////////////////////////////////////////////////////////////////////////////////////// … … 2218 2241 struct vseg_s ** vseg ) 2219 2242 { 2220 assert( (cxy != local_cxy) , "target cluster is not remote\n"); 2243 #if DEBUG_RPC_VMM_CREATE_VSEG 2244 thread_t * this = CURRENT_THREAD; 2245 uint32_t cycle = (uint32_t)hal_get_cycles(); 2246 if( cycle > DEBUG_RPC_VMM_CREATE_VSEG ) 2247 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2248 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2249 #endif 2250 2251 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2221 2252 2222 2253 // initialise RPC descriptor header … … 2242 2273 *vseg = (vseg_t *)(intptr_t)rpc.args[8]; 2243 2274 2275 #if DEBUG_RPC_VMM_CREATE_VSEG 2276 cycle = (uint32_t)hal_get_cycles(); 2277 if( cycle > DEBUG_RPC_VMM_CREATE_VSEG ) 2278 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2279 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2280 #endif 2244 2281 } 2245 2282 … … 2247 2284 void rpc_vmm_create_vseg_server( xptr_t xp ) 2248 2285 { 2286 #if DEBUG_RPC_VMM_CREATE_VSEG 2287 thread_t * this = CURRENT_THREAD; 2288 uint32_t cycle = (uint32_t)hal_get_cycles(); 2289 if( cycle > DEBUG_RPC_VMM_CREATE_VSEG ) 2290 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2291 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2292 #endif 2293 2249 2294 // get client cluster identifier and pointer on RPC descriptor 2250 2295 cxy_t cxy = GET_CXY( xp ); … … 2274 2319 hal_remote_s64( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg ); 2275 2320 2321 #if DEBUG_RPC_VMM_CREATE_VSEG 2322 cycle = (uint32_t)hal_get_cycles(); 2323 if( cycle > DEBUG_RPC_VMM_CREATE_VSEG ) 2324 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2325 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2326 #endif 2276 2327 } 2277 2328 … … 2284 2335 process_t * process ) 2285 2336 { 2286 assert( (cxy != local_cxy) , " targetcluster is not remote\n");2337 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2287 2338 2288 2339 // initialise RPC descriptor header … … 2326 2377 bool_t detailed ) 2327 2378 { 2328 assert( (cxy != local_cxy) , " targetcluster is not remote\n");2379 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2329 2380 2330 2381 // initialise RPC descriptor header -
trunk/kernel/kern/rpc.h
r610 r611 54 54 /*********************************************************************************** 55 55 * This enum defines all RPC indexes. 56 * It must be consistent with the rpc_server[] array defined in in the rpc.c file.56 * It must be consistent with the rpc_server[] arrays defined in in the rpc.c file. 57 57 **********************************************************************************/ 58 58 … … 68 68 RPC_THREAD_KERNEL_CREATE = 7, 69 69 RPC_UNDEFINED_8 = 8, 70 RPC_PROCESS_SIGACTION = 9, 70 RPC_PROCESS_SIGACTION = 9, // non blocking 71 71 72 72 RPC_VFS_INODE_CREATE = 10, … … 87 87 RPC_UNDEFINED_24 = 24, 88 88 RPC_MAPPER_HANDLE_MISS = 25, 89 RPC_ UNDEFINED_26 = 26,89 RPC_VMM_DELETE_VSEG = 26, // non blocking 90 90 RPC_VMM_CREATE_VSEG = 27, 91 91 RPC_VMM_SET_COW = 28, … … 281 281 282 282 /*********************************************************************************** 283 * [9] The RPC_PROCESS_SIGACTION allows a thread running in any cluster 284 * to request a cluster identified by the <cxy> argument (local or remote) 285 * to execute a given sigaction for a given cluster. The <action_type> and 286 * the <pid> arguments are defined in the shared RPC descriptor, that must be 287 * initialised by the client thread. 283 * [9] The non blocking RPC_PROCESS_SIGACTION allows any client thread running in 284 * any cluster to send parallel RPC requests to one or several servers (that can be 285 * local or remote), to execute a given sigaction, defined by the <action_type> 286 * argument[1], for a given process identified by the <pid> argument[0]. 288 287 * 289 * WARNING : It is implemented as a NON BLOCKING multicast RPC, that can be sent 290 * in parallel to all process copies. The various RPC server threads atomically 291 * decrement the <response> field in the shared RPC descriptor. 292 * The last server thread unblock the client thread that blocked (after sending 293 * all RPC requests) in the process_sigaction() function. 288 * WARNING : It is implemented as a NON BLOCKING RPC, that can be sent in parallel 289 * to several servers. The RPC descriptor, containing the <action_type> and <pid> 290 * arguments, as well as the RPC <index>, <blocked>, and <response> fields, must 291 * be allocated and initialised by the calling function itself. 292 * Each RPC server thread atomically decrements the <response> field in this 293 * shared RPC descriptor. The last server thread unblock the client thread, 294 * that blocked only after sending all parallel RPC requests to all servers. 294 295 *********************************************************************************** 295 296 * @ cxy : server cluster identifier. 296 * @ rpc : pointer on ishared RPC descriptor initialized by the client thread.297 * @ rpc : pointer on shared RPC descriptor initialized by the client thread. 297 298 **********************************************************************************/ 298 299 void rpc_process_sigaction_client( cxy_t cxy, … … 550 551 * On the server side, this RPC call the mapper_handle_miss() function and return 551 552 * an extended pointer on the allocated page descriptor and an error status. 553 *********************************************************************************** 552 554 * @ cxy : server cluster identifier. 553 555 * @ mapper : [in] local pointer on mapper. … … 566 568 567 569 /*********************************************************************************** 568 * [26] undefined slot 569 **********************************************************************************/ 570 * [26] The non blocking RPC_VMM_DELETE_VSEG allows any client thread running in 571 * any cluster to send parallel RPC requests to one or several clusters (that can be 572 * local or remote), to delete from a given VMM, identified by the <pid> argument[0] 573 * a given vseg, identified by the <vaddr> argument[1]. 574 * 575 * WARNING : It is implemented as a NON BLOCKING RPC, that can be sent in parallel 576 * to several servers. The RPC descriptor, containing the <pid> and <vaddr> 577 * arguments, as well as the RPC <index>, <blocked>, and <response> fields, must 578 * be allocated and initialised by the calling function itself. 579 * Each RPC server thread atomically decrements the the <response> field in this 580 * shared RPC descriptor. The last server thread unblock the client thread, 581 * that blocked only after sending all paralle RPC requests to all servers. 582 *********************************************************************************** 583 * @ cxy : server cluster identifier. 584 * @ rpc : pointer on shared RPC descriptor initialized by the client thread. 585 **********************************************************************************/ 586 void rpc_vmm_delete_vseg_client( cxy_t cxy, 587 struct rpc_desc_s * rpc ); 588 589 void rpc_vmm_delete_vseg_server( xptr_t xp ); 570 590 571 591 /*********************************************************************************** -
trunk/kernel/kern/thread.c
r593 r611 326 326 { 327 327 printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ ); 328 vmm_ remove_vseg( vseg);328 vmm_delete_vseg( process->pid , vseg->min ); 329 329 return ENOMEM; 330 330 } … … 348 348 { 349 349 printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ ); 350 vmm_ remove_vseg( vseg);350 vmm_delete_vseg( process->pid , vseg->min ); 351 351 thread_release( thread ); 352 352 return EINVAL; … … 369 369 { 370 370 printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ ); 371 vmm_ remove_vseg( vseg);371 vmm_delete_vseg( process->pid , vseg->min ); 372 372 thread_release( thread ); 373 373 return ENOMEM; … … 379 379 { 380 380 printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ ); 381 vmm_ remove_vseg( vseg);381 vmm_delete_vseg( process->pid , vseg->min ); 382 382 thread_release( thread ); 383 383 return ENOMEM; … … 538 538 539 539 // register STACK vseg in local child VSL 540 vmm_ vseg_attach( &child_process->vmm , vseg );540 vmm_attach_vseg_to_vsl( &child_process->vmm , vseg ); 541 541 542 542 #if (DEBUG_THREAD_USER_FORK & 1) … … 560 560 if( error ) 561 561 { 562 vmm_vseg_detach( &child_process->vmm , vseg ); 563 vseg_free( vseg ); 562 vmm_detach_vseg_from_vsl( &child_process->vmm , vseg ); 564 563 thread_release( child_ptr ); 565 564 printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ );
Note: See TracChangeset
for help on using the changeset viewer.