Changeset 632 for trunk/kernel
- Timestamp:
- May 28, 2019, 2:56:04 PM (6 years ago)
- Location:
- trunk/kernel
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/dqdt.c
r583 r632 373 373 // It traverses the quad tree from clusters to root. 374 374 /////////////////////////////////////////////////////////////////////////// 375 // @ node 375 // @ node_xp : extended pointer on current node 376 376 // @ increment : number of pages variation 377 377 /////////////////////////////////////////////////////////////////////////// 378 static void dqdt_propagate_pages( xptr_t node ,378 static void dqdt_propagate_pages( xptr_t node_xp, 379 379 int32_t increment ) 380 380 { 381 381 // get current node cluster identifier and local pointer 382 cxy_t cxy = GET_CXY( node);383 dqdt_node_t * ptr = GET_PTR( node);382 cxy_t node_cxy = GET_CXY( node_xp ); 383 dqdt_node_t * node_ptr = GET_PTR( node_xp ); 384 384 385 385 // update current node pages number 386 hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , increment );386 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->pages ) , increment ); 387 387 388 388 // get extended pointer on parent node 389 xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) );389 xptr_t parent_xp = (xptr_t)hal_remote_l64( XPTR( node_cxy , &node_ptr->parent ) ); 390 390 391 391 // propagate if required 392 if ( parent != XPTR_NULL ) dqdt_propagate_pages( parent, increment ); 393 } 394 395 /////////////////////////////////////////// 396 void dqdt_increment_pages( uint32_t order ) 397 { 398 cluster_t * cluster = LOCAL_CLUSTER; 399 dqdt_node_t * node = &cluster->dqdt_tbl[0]; 400 401 // update DQDT node level 0 402 hal_atomic_add( &node->pages , (1 << order) ); 403 404 // propagate to DQDT upper levels 405 if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , (1 << order) ); 392 if ( parent_xp != XPTR_NULL ) dqdt_propagate_pages( parent_xp, increment ); 393 } 394 395 //////////////////////////////////////// 396 void dqdt_increment_pages( cxy_t cxy, 397 uint32_t order ) 398 { 399 // get local pointer on node[0] (same in all clusters) 400 dqdt_node_t * node_ptr = &LOCAL_CLUSTER->dqdt_tbl[0]; 401 402 // update DQDT node[0] in remote cluster cxy 403 hal_remote_atomic_add( XPTR( cxy , &node_ptr->pages ) , (1 << order) ); 404 405 // get extended pointer on parent node in remote cluster cxy 406 xptr_t parent_xp = hal_remote_l64( XPTR( cxy , &node_ptr->parent ) ); 407 408 // propagate to DQDT upper levels 409 if( parent_xp != XPTR_NULL ) dqdt_propagate_pages( parent_xp , (1 << order) ); 406 410 407 411 #if DEBUG_DQDT_UPDATE_PAGES … … 409 413 if( cycle > DEBUG_DQDT_UPDATE_PAGES ) 410 414 printk("\n[DBG] %s : thread %x in process %x / %x pages in cluster %x / cycle %d\n", 411 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->pages, local_cxy, cycle ); 412 #endif 413 414 } 415 416 /////////////////////////////////////////// 417 void dqdt_decrement_pages( uint32_t order ) 418 { 419 cluster_t * cluster = LOCAL_CLUSTER; 420 dqdt_node_t * node = &cluster->dqdt_tbl[0]; 421 422 // update DQDT node level 0 423 hal_atomic_add( &node->pages , -(1 << order) ); 424 425 // propagate to DQDT upper levels 426 if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , -(1 << order) ); 415 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 416 hal_remote_l32( XPTR( cxy , &node_ptr->pages ), cxy, cycle ); 417 #endif 418 419 } 420 421 //////////////////////////////////////// 422 void dqdt_decrement_pages( cxy_t cxy, 423 uint32_t order ) 424 { 425 // get local pointer on node[0] (same in all clusters) 426 dqdt_node_t * node_ptr = &LOCAL_CLUSTER->dqdt_tbl[0]; 427 428 // update DQDT node[0] in remote cluster cxy 429 hal_remote_atomic_add( XPTR( cxy , &node_ptr->pages ) , -(1 << order) ); 430 431 // get extended pointer on parent node in remote cluster cxy 432 xptr_t parent_xp = hal_remote_l64( XPTR( cxy , &node_ptr->parent ) ); 433 434 // propagate to DQDT upper levels 435 if( parent_xp != XPTR_NULL ) dqdt_propagate_pages( parent_xp , -(1 << order) ); 427 436 428 437 #if DEBUG_DQDT_UPDATE_PAGES … … 430 439 if( cycle > DEBUG_DQDT_UPDATE_PAGES ) 431 440 printk("\n[DBG] %s : thread %x in process %x / %x pages in cluster %x / cycle %d\n", 432 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->pages, local_cxy, cycle ); 441 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 442 hal_remote_l32( XPTR( cxy , &node_ptr->pages ), cxy, cycle ); 433 443 #endif 434 444 … … 478 488 if( cycle > DEBUG_DQDT_UPDATE_THREADS ) 479 489 printk("\n[DBG] %s : thread %x in process %x / %d threads in cluster %x / cycle %d\n", 480 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->threads, local_cxy, cycle ); 490 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 491 node->threads, local_cxy, cycle ); 481 492 #endif 482 493 … … 499 510 if( cycle > DEBUG_DQDT_UPDATE_THREADS ) 500 511 printk("\n[DBG] %s : thread %x in process %x / %d threads in cluster %x / cycle %d\n", 501 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->threads, local_cxy, cycle ); 512 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 513 node->threads, local_cxy, cycle ); 502 514 #endif 503 515 -
trunk/kernel/kern/dqdt.h
r583 r632 105 105 106 106 /**************************************************************************************** 107 * This local function updates the total number of pages in level 0 DQDT node, 108 * and immediately propagates the variation to the DQDT upper levels. 107 * These two functions can be called by any thread running in any cluster. 108 * They increment/decrement the total number of 4 Kbytes pages allocated in a cluster 109 * identified by the <cxy> argument, as specified by the <order> argument. The level 0 110 * DQDT node is udated, and this change is immediately propagated to upper levels. 109 111 * They are called by PPM on each physical memory page allocation or release. 110 112 **************************************************************************************** 111 * @ order : ln2( number of small pages ) 113 * @ cxy : target cluster identifier. 114 * @ order : ln2( number of 4 Kbytes pages ) 112 115 ***************************************************************************************/ 113 void dqdt_increment_pages( uint32_t order ); 114 void dqdt_decrement_pages( uint32_t order ); 116 void dqdt_increment_pages( cxy_t cxy , 117 uint32_t order ); 118 119 void dqdt_decrement_pages( cxy_t cxy, 120 uint32_t order ); 115 121 116 122 /**************************************************************************************** -
trunk/kernel/kern/kernel_init.c
r630 r632 133 133 134 134 "CLUSTER_KCM", // 1 135 " PPM_FREE",// 2136 " SCHED_STATE",// 3137 "VMM_ STACK",// 4138 "V MM_MMAP",// 5139 " VFS_CTX",// 6140 "K CM_STATE", // 7141 " KHM_STATE",// 8142 "HTAB_STATE", // 9 143 135 "SCHED_STATE", // 2 136 "VMM_STACK", // 3 137 "VMM_MMAP", // 4 138 "VFS_CTX", // 5 139 "KCM_STATE", // 6 140 "KHM_STATE", // 7 141 "HTAB_STATE", // 8 142 143 "PPM_FREE", // 9 144 144 "THREAD_JOIN", // 10 145 145 "XHTAB_STATE", // 11 -
trunk/kernel/kern/rpc.c
r629 r632 51 51 rpc_server_t * rpc_server[RPC_MAX_INDEX] = 52 52 { 53 &rpc_ pmem_get_pages_server,// 054 &rpc_ pmem_release_pages_server,// 155 &rpc_ ppm_display_server,// 253 &rpc_undefined, // 0 54 &rpc_undefined, // 1 55 &rpc_undefined, // 2 56 56 &rpc_process_make_fork_server, // 3 57 57 &rpc_user_dir_create_server, // 4 … … 87 87 char * rpc_str[RPC_MAX_INDEX] = 88 88 { 89 " PMEM_GET_PAGES",// 090 " PMEM_RELEASE_PAGES",// 191 " PPM_DISPLAY", // 289 "undefined_0", // 0 90 "undefined_1", // 1 91 "undefined_2", // 2 92 92 "PROCESS_MAKE_FORK", // 3 93 93 "USER_DIR_CREATE", // 4 … … 423 423 424 424 ///////////////////////////////////////////////////////////////////////////////////////// 425 // [0] Marshaling functions attached to RPC_PMEM_GET_PAGES (blocking) 426 ///////////////////////////////////////////////////////////////////////////////////////// 427 425 // [0] RPC_PMEM_GET_PAGES deprecated [AG] May 2019 426 ///////////////////////////////////////////////////////////////////////////////////////// 427 428 /* 428 429 /////////////////////////////////////////////// 429 430 void rpc_pmem_get_pages_client( cxy_t cxy, … … 495 496 #endif 496 497 } 497 498 ///////////////////////////////////////////////////////////////////////////////////////// 499 // [1] Marshaling functions attached to RPC_PMEM_RELEASE_PAGES 500 ///////////////////////////////////////////////////////////////////////////////////////// 501 498 */ 499 500 ///////////////////////////////////////////////////////////////////////////////////////// 501 // [1] RPC_PMEM_RELEASE_PAGES deprecated [AG] may 2019 502 ///////////////////////////////////////////////////////////////////////////////////////// 503 504 /* 502 505 ////////////////////////////////////////////////// 503 506 void rpc_pmem_release_pages_client( cxy_t cxy, … … 565 568 #endif 566 569 } 567 568 ///////////////////////////////////////////////////////////////////////////////////////// 569 // [2] Marshaling functions attached to RPC_PPM_DISPLAY 570 ///////////////////////////////////////////////////////////////////////////////////////// 571 570 */ 571 572 ///////////////////////////////////////////////////////////////////////////////////////// 573 // [2] RPC_PPM_DISPLAY deprecated [AG] May 2019 574 ///////////////////////////////////////////////////////////////////////////////////////// 575 576 /* 572 577 ///////////////////////////////////////// 573 578 void rpc_ppm_display_client( cxy_t cxy ) … … 621 626 #endif 622 627 } 628 */ 623 629 624 630 ///////////////////////////////////////////////////////////////////////////////////////// -
trunk/kernel/kern/rpc.h
r628 r632 60 60 typedef enum 61 61 { 62 RPC_ PMEM_GET_PAGES = 0,63 RPC_ PMEM_RELEASE_PAGES = 1,64 RPC_ PPM_DISPLAY = 2,62 RPC_UNDEFINED_0 = 0, // RPC_PMEM_GET_PAGES deprecated [AG] 63 RPC_UNDEFINED_1 = 1, // RPC_PMEM_RELEASE_PAGES deprecated [AG] 64 RPC_UNDEFINED_2 = 2, // RPC_PMEM_DISPLAY deprecated [AG] 65 65 RPC_PROCESS_MAKE_FORK = 3, 66 66 RPC_USER_DIR_CREATE = 4, … … 177 177 * [0] The RPC_PMEM_GET_PAGES allocates one or several pages in a remote cluster, 178 178 * and returns the local pointer on the page descriptor. 179 * deprecated [AG] may 2019 179 180 *********************************************************************************** 180 181 * @ cxy : server cluster identifier … … 182 183 * @ page : [out] local pointer on page descriptor / NULL if failure 183 184 **********************************************************************************/ 185 186 /* 184 187 void rpc_pmem_get_pages_client( cxy_t cxy, 185 188 uint32_t order, … … 187 190 188 191 void rpc_pmem_get_pages_server( xptr_t xp ); 192 */ 189 193 190 194 /*********************************************************************************** 191 195 * [1] The RPC_PMEM_RELEASE_PAGES release one or several pages to a remote cluster. 196 * deprecated [AG] may 2019 192 197 *********************************************************************************** 193 198 * @ cxy : server cluster identifier 194 199 * @ page : [in] local pointer on page descriptor to release. 195 200 **********************************************************************************/ 201 202 /* 196 203 void rpc_pmem_release_pages_client( cxy_t cxy, 197 204 struct page_s * page ); 198 205 199 206 void rpc_pmem_release_pages_server( xptr_t xp ); 207 */ 200 208 201 209 /*********************************************************************************** 202 210 * [2] The RPC_PPM_DISPLAY allows any client thread to require any remote cluster 203 211 * identified by the <cxy> argumentto display the physical memory allocator state. 204 **********************************************************************************/ 212 * deprecated [AG] may 2019 213 **********************************************************************************/ 214 215 /* 205 216 void rpc_ppm_display_client( cxy_t cxy ); 206 217 207 218 void rpc_ppm_display_server( xptr_t xp ); 219 */ 208 220 209 221 /*********************************************************************************** -
trunk/kernel/kernel_config.h
r629 r632 95 95 #define DEBUG_HAL_CONTEXT 0 96 96 #define DEBUG_HAL_EXCEPTIONS 0 97 #define DEBUG_HAL_GPT_SET_PTE 098 97 #define DEBUG_HAL_GPT_COPY 0 99 98 #define DEBUG_HAL_GPT_CREATE 0 100 99 #define DEBUG_HAL_GPT_DESTROY 0 100 #define DEBUG_HAL_GPT_LOCK_PTE 0 101 #define DEBUG_HAL_GPT_SET_PTE 0 101 102 #define DEBUG_HAL_IOC_RX 0 102 103 #define DEBUG_HAL_IOC_TX 0 … … 140 141 #define DEBUG_QUEUELOCK_CXY 0 141 142 142 #define DEBUG_RPC_CLIENT_GENERIC 0143 #define DEBUG_RPC_SERVER_GENERIC 0143 #define DEBUG_RPC_CLIENT_GENERIC 21000000 144 #define DEBUG_RPC_SERVER_GENERIC 21000000 144 145 145 146 #define DEBUG_RPC_KCM_ALLOC 0 … … 147 148 #define DEBUG_RPC_MAPPER_HANDLE_MISS 0 148 149 #define DEBUG_RPC_MAPPER_MOVE_USER 0 149 #define DEBUG_RPC_PMEM_GET_PAGES 0150 #define DEBUG_RPC_PMEM_GET_PAGES 21000000 150 151 #define DEBUG_RPC_PMEM_RELEASE_PAGES 0 151 152 #define DEBUG_RPC_PROCESS_MAKE_FORK 0 … … 254 255 #define DEBUG_VMM_DESTROY 0 255 256 #define DEBUG_VMM_FORK_COPY 0 256 #define DEBUG_VMM_GET_ONE_PPN 0257 #define DEBUG_VMM_GET_ONE_PPN 2 257 258 #define DEBUG_VMM_GET_PTE 0 258 #define DEBUG_VMM_HANDLE_PAGE_FAULT 0259 #define DEBUG_VMM_HANDLE_PAGE_FAULT 2 259 260 #define DEBUG_VMM_HANDLE_COW 0 260 261 #define DEBUG_VMM_MMAP_ALLOC 0 261 #define DEBUG_VMM_PAGE_ALLOCATE 0262 #define DEBUG_VMM_PAGE_ALLOCATE 2 262 263 #define DEBUG_VMM_REMOVE_VSEG 0 263 264 #define DEBUG_VMM_RESIZE_VSEG 0 … … 276 277 277 278 #define LOCK_CLUSTER_KCM 1 // local (B) protect dynamic KCM creation in cluster 278 #define LOCK_ PPM_FREE 2 // local (B) protect PPM allocator free_pages lists279 #define LOCK_ SCHED_STATE 3 // local (B) protect scheduler state for given core280 #define LOCK_VMM_ STACK 4 // local (B) protect VMM stack vseg allocator281 #define LOCK_V MM_MMAP 5 // local (B) protect VMM mmap vseg allocator282 #define LOCK_ VFS_CTX 6 // local (B) protect vfs contextstate283 #define LOCK_K CM_STATE 7 // local (B) protect KCM allocator state284 #define LOCK_ KHM_STATE 8 // local (B) protect KHM allocatorstate285 #define LOCK_HTAB_STATE 9 // local (B) protect a local htab state 286 279 #define LOCK_SCHED_STATE 2 // local (B) protect scheduler state for given core 280 #define LOCK_VMM_STACK 3 // local (B) protect VMM stack vseg allocator 281 #define LOCK_VMM_MMAP 4 // local (B) protect VMM mmap vseg allocator 282 #define LOCK_VFS_CTX 5 // local (B) protect vfs context state 283 #define LOCK_KCM_STATE 6 // local (B) protect KCM allocator state 284 #define LOCK_KHM_STATE 7 // local (B) protect KHM allocator state 285 #define LOCK_HTAB_STATE 8 // local (B) protect a local htab state 286 287 #define LOCK_PPM_FREE 9 // remote (B) protect PPM allocator free_pages lists 287 288 #define LOCK_THREAD_JOIN 10 // remote (B) protect join/exit between two threads 288 289 #define LOCK_XHTAB_STATE 11 // remote (B) protect a distributed xhtab state -
trunk/kernel/libk/list.h
r612 r632 1 1 /* 2 * list.h - Double circular chained lists, inspired from linux2 * list.h - Double circular linked list 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018i,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 28 28 #include <kernel_config.h> 29 29 #include <hal_kernel_types.h> 30 #include <hal_remote.h> 30 31 #include <printk.h> 31 32 … … 34 35 #endif 35 36 37 38 //////////////////////////////////////////////////////////////////////////// 39 // Double circular linked list functions & macros 40 // 41 // It defines a generic local list, as all elements are in the same cluster. 42 // 43 // There is two sets of access functions, because these local lists can be 44 // accessed by local threads, using local pointers, but they can also be 45 // accessed by remote threads running in any cluster, with specific access 46 // functions using extended pointers. 47 //////////////////////////////////////////////////////////////////////////// 48 49 /*************************************************************************** 50 * This structure defines a Double Circular Linked List entry. 51 * Note : The list root is an extra list-entry_t, that is NOT part 52 * of the set of linked elements. 53 **************************************************************************/ 54 55 typedef struct list_entry_s 56 { 57 struct list_entry_s * next; 58 struct list_entry_s * pred; 59 } 60 list_entry_t; 36 61 37 62 /*************************************************************************** … … 46 71 #endif 47 72 48 ////////////////////////////////////////////////////////////////////////////49 ////////////////////////////////////////////////////////////////////////////50 // Double circular linked list functions & macros51 ////////////////////////////////////////////////////////////////////////////52 ////////////////////////////////////////////////////////////////////////////53 54 /***************************************************************************55 * This structure defines a Double Circular Linked List entry.56 * Note : The list root is an extra list-entry_t, that is NOT part57 * of the set of linked elements.58 **************************************************************************/59 60 typedef struct list_entry_s61 {62 struct list_entry_s * next;63 struct list_entry_s * pred;64 }65 list_entry_t;66 67 73 /*************************************************************************** 68 74 * This macro returns a pointer on a structure containing a list_entry_t. … … 77 83 (container_type *)( (char*)__member_ptr - OFFSETOF( container_type , member_name ));}) 78 84 85 86 //////////////////////////////////////////////////////////////////////////// 87 // These functions and macros mus be called by a thread running 88 // in the local cluster to access the local list. 89 //////////////////////////////////////////////////////////////////////////// 90 79 91 /*************************************************************************** 80 92 * This macro returns t pointer on the first element of a list. … … 101 113 /*************************************************************************** 102 114 * This macro traverse a rooted double linked list in forward order. 103 * WARNING : Don't use 2 LIST_FOREACH in the same function, because the104 * variable __ptr will be defined twice, wich result in a compilation error.115 * WARNING : Don't use this macro when you want to remove one or several 116 * item(s) from the traversed list. 105 117 *************************************************************************** 106 118 * @ root : pointer on the root list_entry … … 149 161 150 162 /*************************************************************************** 151 * This function inserts a new entry in first place of a double linked list. 163 * This function must be called by a thread running in local cluster. 164 * It inserts a new entry in first place of a double linked list. 152 165 *************************************************************************** 153 166 * @ root : pointer on the list root … … 157 170 list_entry_t * entry ) 158 171 { 159 list_entry_t * pred_entry; 160 list_entry_t * next_entry; 161 162 pred_entry = root; 163 next_entry = root->next; 164 165 entry->next = next_entry; 166 entry->pred = pred_entry; 167 168 pred_entry->next = entry; 169 next_entry->pred = entry; 170 } 171 172 /*************************************************************************** 173 * This function inserts a new entry in last place of a double linked list. 172 list_entry_t * next = root->next; 173 174 entry->next = next; 175 entry->pred = root; 176 177 root->next = entry; 178 next->pred = entry; 179 } 180 181 /*************************************************************************** 182 * This function must be called by a thread running in local cluster. 183 * It inserts a new entry in last place of a double linked list. 174 184 *************************************************************************** 175 185 * @ root : pointer on the list root … … 179 189 list_entry_t * entry ) 180 190 { 181 list_entry_t * pred_entry; 182 list_entry_t * next_entry; 183 184 pred_entry = root->pred; 185 next_entry = root; 186 187 entry->next = next_entry; 188 entry->pred = pred_entry; 189 190 pred_entry->next = entry; 191 next_entry->pred = entry; 192 } 193 194 /*************************************************************************** 195 * This function returns true if the list is empty. 191 list_entry_t * pred = root->pred; 192 193 entry->next = root; 194 entry->pred = pred; 195 196 root->pred = entry; 197 pred->next = entry; 198 } 199 200 /*************************************************************************** 201 * This function must be called by a thread running in local cluster. 202 * It returns true if the list is empty. 196 203 *************************************************************************** 197 204 * @ root : pointer on the list root … … 202 209 } 203 210 204 /*************************************************************************** 205 * This function remove an entry from a rooted double linked list. 211 212 /*************************************************************************** 213 * This function must be called by a thread running in local cluster. 214 * It removes an entry from the list. 206 215 *************************************************************************** 207 216 * @ entry : pointer on the entry to be removed. … … 209 218 static inline void list_unlink( list_entry_t * entry ) 210 219 { 211 list_entry_t * pred _entry;212 list_entry_t * next _entry;213 214 pred _entry= entry->pred;215 next _entry= entry->next;216 217 pred _entry->next = entry->next;218 next _entry->pred = entry->pred;220 list_entry_t * pred; 221 list_entry_t * next; 222 223 pred = entry->pred; 224 next = entry->next; 225 226 pred->next = next; 227 next->pred = pred; 219 228 } 220 229 … … 278 287 279 288 289 290 //////////////////////////////////////////////////////////////////////////// 291 // These functions and macros can be used by any thread running 292 // in any cluster to access a remote local list. 293 //////////////////////////////////////////////////////////////////////////// 294 295 /*************************************************************************** 296 * This macro can be used by a thread running in any cluster to access 297 * a remote local list. It returns a local pointer on the first element 298 * of the remote list in the remote cluster. 299 *************************************************************************** 300 * @ cxy : remote list cluster identifier 301 * @ root : local pointer on the list root 302 * @ type : type of the linked element 303 * @ member : name of the list_entry_t field 304 **************************************************************************/ 305 306 #define LIST_REMOTE_FIRST( cxy , root , type , member ) \ 307 ({ list_entry_t * __first = hal_remote_lpt( XPTR( cxy , &root->next ) ); \ 308 LIST_ELEMENT( __first , type , member ); }) 309 310 /*************************************************************************** 311 * This macro can be used by a thread running in any cluster to access 312 * a remote local list. It traverse the list in forward order. 313 * WARNING : Don't use this macro when you want to remove one or several 314 * item(s) from the traversed list. 315 *************************************************************************** 316 * @ cxy : remote list cluster identifier 317 * @ root : pointer on the root list_entry 318 * @ iter : pointer on the current list_entry 319 **************************************************************************/ 320 321 #define LIST_REMOTE_FOREACH( cxy , root , iter ) \ 322 for( (iter) = hal_remote_lpt( XPTR( cxy , &(root)->next ) ) ; \ 323 (iter) != (root) ; \ 324 (iter) = hal_remote_lpt( XPTR( cxy , &(iter)->next ) ) ) 325 326 /*************************************************************************** 327 * This function can be called by a thread running in any cluster to access 328 * a remote local list. It returns true if the list is empty. 329 *************************************************************************** 330 * @ cxy : remote list cluster identifier 331 * @ root : local pointer on the remote list root 332 **************************************************************************/ 333 static inline bool_t list_remote_is_empty( cxy_t cxy, 334 list_entry_t * root ) 335 { 336 list_entry_t * next = hal_remote_lpt( XPTR( cxy , &root->next ) ); 337 return( root == next ); 338 } 339 340 /*************************************************************************** 341 * This function can be called by a thread running in any cluster to access 342 * a remote local list. It inserts a new entry in first place of the list. 343 *************************************************************************** 344 * @ cxy : remote list cluster identifier 345 * @ root : local pointer on the remote list root 346 * @ entry : local pointer on the remote entry to be inserted 347 **************************************************************************/ 348 static inline void list_remote_add_first( cxy_t cxy, 349 list_entry_t * root, 350 list_entry_t * entry ) 351 { 352 list_entry_t * first; // local pointer on current first entry 353 list_entry_t * next; // local pointer on current first->next entry 354 355 first = hal_remote_lpt( XPTR( cxy , &root->next ) ); 356 next = hal_remote_lpt( XPTR( cxy , &first->next ) ); 357 358 hal_remote_spt( XPTR( cxy , &entry->next ) , first ); 359 hal_remote_spt( XPTR( cxy , &entry->pred ) , root ); 360 361 hal_remote_spt( XPTR( cxy , &root->next ) , entry ); 362 hal_remote_spt( XPTR( cxy , &next->pred ) , entry ); 363 } 364 365 /*************************************************************************** 366 * This function can be called by a thread running in any cluster to access 367 * a remote local list. It inserts a new entry in last place of the list. 368 *************************************************************************** 369 * @ cxy : remote list cluster identifier 370 * @ root : local pointer on the remote list root 371 * @ entry : local pointer on the remote entry to be inserted 372 **************************************************************************/ 373 static inline void list_remote_add_last( cxy_t cxy, 374 list_entry_t * root, 375 list_entry_t * entry ) 376 { 377 list_entry_t * last; // local pointer on current last entry 378 list_entry_t * pred; // local pointer on current last->pred entry 379 380 last = hal_remote_lpt( XPTR( cxy , &root->pred ) ); 381 pred = hal_remote_lpt( XPTR( cxy , &last->pred ) ); 382 383 hal_remote_spt( XPTR( cxy , &entry->next ) , root ); 384 hal_remote_spt( XPTR( cxy , &entry->pred ) , pred ); 385 386 hal_remote_spt( XPTR( cxy , &root->pred ) , entry ); 387 hal_remote_spt( XPTR( cxy , &pred->next ) , entry ); 388 } 389 390 /*************************************************************************** 391 * This function can be called by a thread running in any cluster to access 392 * a remote local list. It removes an entry from the list. 393 *************************************************************************** 394 * @ cxy : remote list cluster identifier 395 * @ entry : pointer on the entry to be removed. 396 **************************************************************************/ 397 static inline void list_remote_unlink( cxy_t cxy, 398 list_entry_t * entry ) 399 { 400 list_entry_t * pred; 401 list_entry_t * next; 402 403 pred = hal_remote_lpt( XPTR( cxy , &entry->pred ) ); 404 next = hal_remote_lpt( XPTR( cxy , &entry->next ) ); 405 406 hal_remote_spt( XPTR( cxy , &pred->next ) , next ); 407 hal_remote_spt( XPTR( cxy , &next->pred ) , pred ); 408 } 409 410 411 280 412 #endif /* _LIST_H_ */ -
trunk/kernel/libk/remote_barrier.c
r629 r632 506 506 uint32_t nthreads ) 507 507 { 508 page_t * dqt_page;509 508 xptr_t dqt_page_xp; 510 509 page_t * rpc_page; … … 514 513 uint32_t z; // actual DQT size == max(x_size,y_size) 515 514 uint32_t levels; // actual number of DQT levels 516 kmem_req_t req; // kmem request517 515 xptr_t rpc_xp; // extended pointer on RPC descriptors array 518 516 rpc_desc_t * rpc; // pointer on RPC descriptors array … … 522 520 uint32_t y; // Y coordinate in QDT mesh 523 521 uint32_t l; // level coordinate 522 kmem_req_t req; // kmem request 524 523 525 524 // compute size and number of DQT levels … … 554 553 cxy_t ref_cxy = GET_CXY( ref_xp ); 555 554 556 // 1. allocate memory for DQT barrier descriptor in reference cluster 557 if( ref_cxy == local_cxy ) 558 { 559 req.type = KMEM_PAGE; 560 req.size = 2; // 4 pages == 16 Kbytes 561 req.flags = AF_ZERO; 562 dqt_page = kmem_alloc( &req ); 563 dqt_page_xp = XPTR( local_cxy , dqt_page ); 564 } 565 else 566 { 567 rpc_pmem_get_pages_client( ref_cxy, 568 2, 569 &dqt_page ); 570 dqt_page_xp = XPTR( ref_cxy , dqt_page ); 571 } 572 573 if( dqt_page == NULL ) return NULL; 555 // 1. allocate 4 4 Kbytes pages for DQT barrier descriptor in reference cluster 556 dqt_page_xp = ppm_remote_alloc_pages( ref_cxy , 2 ); 557 558 if( dqt_page_xp == XPTR_NULL ) return NULL; 574 559 575 560 // get pointers on DQT barrier descriptor … … 948 933 949 934 // 4. release memory allocated for barrier descriptor 950 xptr_t page_xp = ppm_base2page( barrier_xp ); 951 page_t * page = GET_PTR( page_xp ); 952 953 if( barrier_cxy == local_cxy ) 954 { 955 req.type = KMEM_PAGE; 956 req.ptr = page; 957 kmem_free( &req ); 958 } 959 else 960 { 961 rpc_pmem_release_pages_client( barrier_cxy, 962 page ); 963 } 935 xptr_t page_xp = ppm_base2page( barrier_xp ); 936 cxy_t page_cxy = GET_CXY( page_xp ); 937 page_t * page_ptr = GET_PTR( page_xp ); 938 939 ppm_remote_free_pages( page_cxy , page_ptr ); 964 940 965 941 #if DEBUG_BARRIER_DESTROY -
trunk/kernel/mm/page.c
r567 r632 93 93 } 94 94 95 //////////////////////////////// 96 void page_print( page_t * page ) 95 96 //////////////////////////////////////////////////// 97 inline void page_remote_set_flag( xptr_t page_xp, 98 uint32_t value ) 97 99 { 98 printk("*** Page %d : base = %x / flags = %x / order = %d / count = %d\n", 99 page->index, 100 GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) ), 101 page->flags, 102 page->order, 103 page->refcount ); 100 cxy_t page_cxy = GET_CXY( page_xp ); 101 page_t * page_ptr = GET_PTR( page_xp ); 102 103 hal_remote_atomic_or( XPTR( page_cxy , &page_ptr->flags ) , value ); 104 104 } 105 105 106 ////////////////////////////////////////////////////// 107 inline void page_remote_clear_flag( xptr_t page_xp, 108 uint32_t value ) 109 { 110 cxy_t page_cxy = GET_CXY( page_xp ); 111 page_t * page_ptr = GET_PTR( page_xp ); 112 113 hal_remote_atomic_and( XPTR( page_cxy , &page_ptr->flags ) , value ); 114 } 115 116 ///////////////////////////////////////////////////// 117 inline bool_t page_remote_is_flag( xptr_t page_xp, 118 uint32_t value ) 119 { 120 cxy_t page_cxy = GET_CXY( page_xp ); 121 page_t * page_ptr = GET_PTR( page_xp ); 122 123 uint32_t flags = hal_remote_l32( XPTR( page_cxy , &page_ptr->flags ) ); 124 125 return (flags & value) ? 1 : 0; 126 } 127 128 ///////////////////////////////////////////////////// 129 inline void page_remote_refcount_up( xptr_t page_xp ) 130 { 131 cxy_t page_cxy = GET_CXY( page_xp ); 132 page_t * page_ptr = GET_PTR( page_xp ); 133 134 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->refcount ) , 1 ); 135 } 136 137 /////////////////////////////////////////////////////// 138 inline void page_remote_refcount_down( xptr_t page_xp ) 139 { 140 cxy_t page_cxy = GET_CXY( page_xp ); 141 page_t * page_ptr = GET_PTR( page_xp ); 142 143 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->refcount ) , -1 ); 144 } 145 146 /////////////////////////////////////////// 147 void page_remote_display( xptr_t page_xp ) 148 { 149 page_t page; // local copy of page decriptor 150 151 hal_remote_memcpy( XPTR( local_cxy , &page ) , page_xp , sizeof( page_t ) ); 152 153 printk("*** Page %d in cluster %x : ppn %x / flags %x / order %d / refcount %d\n", 154 page.index, 155 GET_CXY( page_xp ), 156 ppm_page2ppn( page_xp ), 157 page.flags, 158 page.order, 159 page.refcount ); 160 } 161 162 163 -
trunk/kernel/mm/page.h
r625 r632 3 3 * 4 4 * Authors Ghassan Almalles (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 68 68 69 69 /************************************************************************************* 70 * This function initializes one page descriptor. 70 * This function must be called by a thread running in the local cluster. 71 * It initializes the page descriptor. 71 72 ************************************************************************************* 72 73 * @ page : pointer to page descriptor … … 75 76 76 77 /************************************************************************************* 77 * This function atomically set one or several flags in page descriptor flags. 78 * This function must be called by a thread running in the local cluster. 79 * It atomically set one or several flags in page descriptor flags. 78 80 ************************************************************************************* 79 81 * @ page : pointer to page descriptor. … … 84 86 85 87 /************************************************************************************* 86 * This function atomically reset one or several flags in page descriptor flags. 88 * This function must be called by a thread running in the local cluster. 89 * It atomically reset one or several flags in page descriptor flags. 87 90 ************************************************************************************* 88 91 * @ page : pointer to page descriptor. … … 93 96 94 97 /************************************************************************************* 95 * This function tests the value of one or several flags in page descriptor flags. 98 * This function must be called by a thread running in the local cluster. 99 * It tests the value of one or several flags in page descriptor flags. 96 100 ************************************************************************************* 97 101 * @ page : pointer to page descriptor. … … 103 107 104 108 /************************************************************************************* 105 * This function resets to 0 all bytes in a given page. 109 * This function must be called by a thread running in the local cluster. 110 * It resets to 0 all bytes in a given page. 106 111 ************************************************************************************* 107 112 * @ page : pointer on page descriptor. … … 110 115 111 116 /************************************************************************************* 112 * This blocking function atomically increments the page refcount. 117 * This function must be called by a thread running in the local cluster. 118 * It atomically increments the page refcount. 113 119 ************************************************************************************* 114 120 * @ page : pointer on page descriptor. … … 117 123 118 124 /************************************************************************************* 119 * This blocking function atomically decrements the page refcount. 125 * This function must be called by a thread running in the local cluster. 126 * It atomically decrements the page refcount. 120 127 ************************************************************************************* 121 128 * @ page : pointer on page descriptor. … … 123 130 inline void page_refcount_down( page_t * page ); 124 131 125 /*************************************************************************************126 * This function display the values contained in a page descriptor.127 *************************************************************************************128 * @ page : pointer on page descriptor.129 ************************************************************************************/130 void page_print( page_t * page );131 132 132 133 134 135 /************************************************************************************* 136 * This function can be called by any thread running in any cluster. 137 * It atomically set one or several flags in a remote page descriptor 138 * identified by the <page_xp> argument. 139 ************************************************************************************* 140 * @ page_xp : extended pointer to page descriptor. 141 * @ value : all non zero bits in value will be set. 142 ************************************************************************************/ 143 inline void page_remote_set_flag( xptr_t page_xp, 144 uint32_t value ); 145 146 /************************************************************************************* 147 * This function can be called by any thread running in any cluster. 148 * It atomically reset one or several flags in a remote page descriptor 149 * identified by the <page_xp> argument. 150 ************************************************************************************* 151 * @ page_xp : extended pointer to page descriptor. 152 * @ value : all non zero bits in value will be cleared. 153 ************************************************************************************/ 154 inline void page_remote_clear_flag( xptr_t page_xp, 155 uint32_t value ); 156 157 /************************************************************************************* 158 * This function can be called by any thread running in any cluster. 159 * It tests the value of one or several flags in a remote page descriptor 160 * identified by the <page_xp> argument. 161 ************************************************************************************* 162 * @ page_xp : extended pointer to page descriptor. 163 * @ value : all non zero bits will be tested. 164 * @ returns true if at least one non zero bit in value is set / false otherwise. 165 ************************************************************************************/ 166 inline bool_t page_remote_is_flag( xptr_t page_xp, 167 uint32_t value ); 168 169 /************************************************************************************* 170 * This function can be called by any thread running in any cluster. 171 * It atomically increments the refcount for the remote page identified by 172 * the <page_xp> argument. 173 ************************************************************************************* 174 * @ page_xp : extended pointer on page descriptor. 175 ************************************************************************************/ 176 inline void page_remote_refcount_up( xptr_t page_xp ); 177 178 /************************************************************************************* 179 * This function can be called by any thread running in any cluster. 180 * It atomically decrements the refcount for the remote page identified by 181 * the <page_xp> argument. 182 ************************************************************************************* 183 * @ page_xp : extended pointer on page descriptor. 184 ************************************************************************************/ 185 inline void page_remote_refcount_down( xptr_t page_xp ); 186 187 /************************************************************************************* 188 * This debug function can be called by any thread running in any cluster. 189 * It displays the values contained in a page descriptor. 190 ************************************************************************************* 191 * @ page_xp : extended pointer on page descriptor. 192 ************************************************************************************/ 193 void page_remote_display( xptr_t page_xp ); 194 133 195 #endif /* _PAGE_H_ */ -
trunk/kernel/mm/ppm.c
r625 r632 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 45 45 //////////////////////////////////////////////////////////////////////////////////////// 46 46 47 ////////////////////////////////////////////////48 inline bool_t ppm_page_is_valid( page_t * page )49 {50 ppm_t * ppm = &LOCAL_CLUSTER->ppm;51 uint32_t pgnr = (uint32_t)( page - ppm->pages_tbl );52 return (pgnr <= ppm->pages_nr);53 }54 47 55 48 ///////////////////////////////////////////// … … 151 144 void ppm_free_pages_nolock( page_t * page ) 152 145 { 153 page_t * buddy; // searched buddy page descriptor154 uint32_t buddy_index; // buddy page index155 page_t * current; // current (merged) page descriptor156 uint32_t current_index; // current (merged) page index157 uint32_t current_order; // current (merged) pageorder146 page_t * buddy; // searched buddy block page descriptor 147 uint32_t buddy_index; // buddy bloc index in page_tbl[] 148 page_t * current; // current (merged) block page descriptor 149 uint32_t current_index; // current (merged) block index in page_tbl[] 150 uint32_t current_order; // current (merged) block order 158 151 159 152 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 160 153 page_t * pages_tbl = ppm->pages_tbl; 161 154 162 163 164 165 166 155 assert( !page_is_flag( page , PG_FREE ) , 156 "page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 157 158 assert( !page_is_flag( page , PG_RESERVED ) , 159 "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 167 160 168 161 // update released page descriptor flags … … 172 165 // - merge with current page descriptor if found 173 166 // - exit to release the current page descriptor if not found 174 current = page ,167 current = page; 175 168 current_index = (uint32_t)(page - ppm->pages_tbl); 176 169 for( current_order = page->order ; … … 181 174 buddy = pages_tbl + buddy_index; 182 175 183 if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break; 184 185 // remove buddy from free list 176 // exit this loop if buddy block not found 177 if( !page_is_flag( buddy , PG_FREE ) || 178 (buddy->order != current_order) ) break; 179 180 // remove buddy block from free_list 186 181 list_unlink( &buddy->list ); 187 182 ppm->free_pages_nr[current_order] --; 188 183 189 // merge buddy with current 184 // reset order field in buddy block page descriptor 190 185 buddy->order = 0; 186 187 // compute merged block index in page_tbl[] 191 188 current_index &= buddy_index; 192 189 } 193 190 194 // update merged page descriptor order191 // update pointer and order field for merged block page descriptor 195 192 current = pages_tbl + current_index; 196 193 current->order = current_order; 197 194 198 // insert currentin free list195 // insert merged block in free list 199 196 list_add_first( &ppm->free_pages_root[current_order] , ¤t->list ); 200 197 ppm->free_pages_nr[current_order] ++; … … 205 202 page_t * ppm_alloc_pages( uint32_t order ) 206 203 { 204 page_t * current_block; 207 205 uint32_t current_order; 208 page_t * remaining_block;209 206 uint32_t current_size; 207 page_t * found_block; 210 208 211 209 #if DEBUG_PPM_ALLOC_PAGES … … 213 211 uint32_t cycle = (uint32_t)hal_get_cycles(); 214 212 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 215 printk("\n[%s] thread[%x,%x] enter for %d page(s) / cycle %d\n",216 __FUNCTION__, this->process->pid, this->trdid, 1<<order, c ycle );213 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 214 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 217 215 #endif 218 216 219 217 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 220 218 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 221 ppm_ print("enter ppm_alloc_pages");219 ppm_remote_display( local_cxy ); 222 220 #endif 223 221 … … 227 225 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 228 226 229 page_t * block = NULL; 227 //build extended pointer on lock protecting remote PPM 228 xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock ); 230 229 231 230 // take lock protecting free lists 232 busylock_acquire( &ppm->free_lock ); 231 remote_busylock_acquire( lock_xp ); 232 233 current_block = NULL; 233 234 234 235 // find a free block equal or larger to requested size … … 237 238 if( !list_is_empty( &ppm->free_pages_root[current_order] ) ) 238 239 { 239 block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list ); 240 list_unlink( &block->list ); 241 break; 240 // get first free block in this free_list 241 current_block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list ); 242 243 // remove this block from this free_list 244 list_unlink( ¤t_block->list ); 245 246 // register pointer on found block 247 found_block = current_block; 248 249 // update this free-list number of blocks 250 ppm->free_pages_nr[current_order] --; 251 252 // compute found block size 253 current_size = (1 << current_order); 254 255 break; 242 256 } 243 257 } 244 258 245 if( block == NULL ) // return failure259 if( current_block == NULL ) // return failure if no free block found 246 260 { 247 261 // release lock protecting free lists 248 busylock_release( &ppm->free_lock);262 remote_busylock_release( lock_xp ); 249 263 250 264 #if DEBUG_PPM_ALLOC_PAGES 251 265 cycle = (uint32_t)hal_get_cycles(); 252 266 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 253 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) / cycle %d\n",254 __FUNCTION__, this->process->pid, this->trdid, 1<<order, c ycle );267 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 268 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 255 269 #endif 256 270 … … 258 272 } 259 273 260 // update free-lists after removing a block 261 ppm->free_pages_nr[current_order] --; 262 current_size = (1 << current_order); 263 264 // split the removed block in smaller sub-blocks if required 274 275 // split the found block in smaller sub-blocks if required 265 276 // and update the free-lists accordingly 266 277 while( current_order > order ) 267 278 { 268 279 current_order --; 280 281 // update pointer, size, and order fiels for new free block 269 282 current_size >>= 1; 270 271 remaining_block = block + current_size; 272 remaining_block->order = current_order; 273 274 list_add_first( &ppm->free_pages_root[current_order] , &remaining_block->list ); 283 current_block = found_block + current_size; 284 current_block->order = current_order; 285 286 // insert new free block in relevant free_list 287 list_add_first( &ppm->free_pages_root[current_order] , ¤t_block->list ); 288 289 // update number of blocks in free list 275 290 ppm->free_pages_nr[current_order] ++; 276 291 } 277 292 278 // update page descriptor279 page_clear_flag( block , PG_FREE );280 page_refcount_up( block );281 block->order = order;293 // update found block page descriptor 294 page_clear_flag( found_block , PG_FREE ); 295 page_refcount_up( found_block ); 296 found_block->order = order; 282 297 283 298 // release lock protecting free lists 284 busylock_release( &ppm->free_lock);299 remote_busylock_release( lock_xp ); 285 300 286 301 // update DQDT 287 dqdt_increment_pages( order );302 dqdt_increment_pages( local_cxy , order ); 288 303 289 304 #if DEBUG_PPM_ALLOC_PAGES 290 305 cycle = (uint32_t)hal_get_cycles(); 291 306 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 292 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x / cycle %d\n",307 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n", 293 308 __FUNCTION__, this->process->pid, this->trdid, 294 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );309 1<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle ); 295 310 #endif 296 311 297 312 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 298 313 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 299 ppm_ print("exit ppm_alloc_pages");300 #endif 301 302 return block;314 ppm_remote_display( local_cxy ); 315 #endif 316 317 return found_block; 303 318 304 319 } // end ppm_alloc_pages() … … 311 326 312 327 #if DEBUG_PPM_FREE_PAGES 313 uint32_t cycle = (uint32_t)hal_get_cycles(); 328 thread_t * this = CURRENT_THREAD; 329 uint32_t cycle = (uint32_t)hal_get_cycles(); 314 330 if( DEBUG_PPM_FREE_PAGES < cycle ) 315 printk("\n[%s] thread[%x,%x] enter for %d page(s) / ppn %x / cycle %d\n",331 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n", 316 332 __FUNCTION__, this->process->pid, this->trdid, 317 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );333 1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 318 334 #endif 319 335 320 336 #if(DEBUG_PPM_FREE_PAGES & 0x1) 321 337 if( DEBUG_PPM_FREE_PAGES < cycle ) 322 ppm_print("enter ppm_free_pages"); 323 #endif 338 ppm_remote_display( local_cxy ); 339 #endif 340 341 //build extended pointer on lock protecting free_lists 342 xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock ); 324 343 325 344 // get lock protecting free_pages[] array 326 busylock_acquire( &ppm->free_lock);345 remote_busylock_acquire( lock_xp ); 327 346 328 347 ppm_free_pages_nolock( page ); 329 348 330 // release lock protecting free_ pages[] array331 busylock_release( &ppm->free_lock);349 // release lock protecting free_lists 350 remote_busylock_release( lock_xp ); 332 351 333 352 // update DQDT 334 dqdt_decrement_pages( page->order );353 dqdt_decrement_pages( local_cxy , page->order ); 335 354 336 355 #if DEBUG_PPM_FREE_PAGES 337 356 cycle = (uint32_t)hal_get_cycles(); 338 357 if( DEBUG_PPM_FREE_PAGES < cycle ) 339 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn %x / cycle %d\n",358 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n", 340 359 __FUNCTION__, this->process->pid, this->trdid, 341 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );360 1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle ); 342 361 #endif 343 362 344 363 #if(DEBUG_PPM_FREE_PAGES & 0x1) 345 364 if( DEBUG_PPM_FREE_PAGES < cycle ) 346 ppm_ print("exit ppm_free_pages");365 ppm_remote_display( local_cxy ); 347 366 #endif 348 367 349 368 } // end ppm_free_pages() 350 369 351 //////////////////////// 352 void ppm_display( void ) 370 ///////////////////////////////////////////// 371 xptr_t ppm_remote_alloc_pages( cxy_t cxy, 372 uint32_t order ) 373 { 374 uint32_t current_order; 375 uint32_t current_size; 376 page_t * current_block; 377 page_t * found_block; 378 379 #if DEBUG_PPM_ALLOC_PAGES 380 thread_t * this = CURRENT_THREAD; 381 uint32_t cycle = (uint32_t)hal_get_cycles(); 382 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 383 printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n", 384 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 385 #endif 386 387 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 388 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 389 ppm_remote_display( cxy ); 390 #endif 391 392 // check order 393 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 394 395 // get local pointer on PPM (same in all clusters) 396 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 397 398 //build extended pointer on lock protecting remote PPM 399 xptr_t lock_xp = XPTR( cxy , &ppm->free_lock ); 400 401 // take lock protecting free lists in remote cluster 402 remote_busylock_acquire( lock_xp ); 403 404 current_block = NULL; 405 406 // find in remote cluster a free block equal or larger to requested size 407 for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ ) 408 { 409 // get local pointer on the root of relevant free_list in remote cluster 410 list_entry_t * root = &ppm->free_pages_root[current_order]; 411 412 if( !list_remote_is_empty( cxy , root ) ) 413 { 414 // get local pointer on first free page descriptor in remote cluster 415 current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list ); 416 417 // remove first free page from the free-list in remote cluster 418 list_remote_unlink( cxy , ¤t_block->list ); 419 420 // register found block 421 found_block = current_block; 422 423 // decrement relevant free-list number of items in remote cluster 424 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 ); 425 426 // compute found block size 427 current_size = (1 << current_order); 428 429 break; 430 } 431 } 432 433 if( current_block == NULL ) // return failure 434 { 435 // release lock protecting free lists 436 remote_busylock_release( lock_xp ); 437 438 #if DEBUG_PPM_ALLOC_PAGES 439 cycle = (uint32_t)hal_get_cycles(); 440 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 441 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 442 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 443 #endif 444 445 return XPTR_NULL; 446 } 447 448 // split the found block in smaller sub-blocks if required 449 // and update the free-lists accordingly in remote cluster 450 while( current_order > order ) 451 { 452 // update order, size, and local pointer for new free block 453 current_order --; 454 current_size >>= 1; 455 current_block = found_block + current_size; 456 457 // update new free block order field in remote cluster 458 hal_remote_s32( XPTR( cxy , ¤t_block->order ) , current_order ); 459 460 // get local pointer on the root of the relevant free_list in remote cluster 461 list_entry_t * root = &ppm->free_pages_root[current_order]; 462 463 // insert new free block in this free_list 464 list_remote_add_first( cxy , root, ¤t_block->list ); 465 466 // update free-list number of items in remote cluster 467 hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 ); 468 } 469 470 // update refcount, flags and order fields in found block remote page descriptor 471 page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE ); 472 page_remote_refcount_up( XPTR( cxy , found_block ) ); 473 hal_remote_s32( XPTR( cxy , &found_block->order ) , order ); 474 475 // release lock protecting free lists in remote cluster 476 remote_busylock_release( lock_xp ); 477 478 // update DQDT page counter in remote cluster 479 dqdt_increment_pages( cxy , order ); 480 481 #if DEBUG_PPM_ALLOC_PAGES 482 cycle = (uint32_t)hal_get_cycles(); 483 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 484 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n", 485 __FUNCTION__, this->process->pid, this->trdid, 486 1<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle ); 487 #endif 488 489 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 490 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 491 ppm_remote_display( cxy ); 492 #endif 493 494 return XPTR( cxy , found_block ); 495 496 } // end ppm_remote_alloc_pages() 497 498 ////////////////////////////////////////// 499 void ppm_remote_free_pages( cxy_t cxy, 500 page_t * page ) 501 { 502 xptr_t page_xp; // extended pointer on released page descriptor 503 uint32_t order; // released block order 504 page_t * buddy_ptr; // searched buddy block page descriptor 505 uint32_t buddy_order; // searched buddy block order 506 uint32_t buddy_index; // buddy block index in page_tbl[] 507 page_t * current_ptr; // current (merged) block page descriptor 508 uint32_t current_index; // current (merged) block index in page_tbl[] 509 uint32_t current_order; // current (merged) block order 510 511 #if DEBUG_PPM_FREE_PAGES 512 thread_t * this = CURRENT_THREAD; 513 uint32_t cycle = (uint32_t)hal_get_cycles(); 514 if( DEBUG_PPM_FREE_PAGES < cycle ) 515 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n", 516 __FUNCTION__, this->process->pid, this->trdid, 517 1<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle ); 518 #endif 519 520 #if(DEBUG_PPM_FREE_PAGES & 0x1) 521 if( DEBUG_PPM_FREE_PAGES < cycle ) 522 ppm_remote_display( cxy ); 523 #endif 524 525 // build extended pointer on released page descriptor 526 page_xp = XPTR( cxy , page ); 527 528 // get released page order 529 order = hal_remote_l32( XPTR( cxy , &page->order ) ); 530 531 // get local pointer on PPM (same in all clusters) 532 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 533 534 // build extended pointer on lock protecting remote PPM 535 xptr_t lock_xp = XPTR( cxy , &ppm->free_lock ); 536 537 // get local pointer on remote PPM page_tbl[] array 538 page_t * pages_tbl = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) ); 539 540 // get lock protecting free_pages in remote cluster 541 remote_busylock_acquire( lock_xp ); 542 543 assert( !page_remote_is_flag( page_xp , PG_FREE ) , 544 "page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 545 546 assert( !page_remote_is_flag( page_xp , PG_RESERVED ) , 547 "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 548 549 // update released page descriptor flags 550 page_remote_set_flag( page_xp , PG_FREE ); 551 552 // search the buddy page descriptor 553 // - merge with current page descriptor if found 554 // - exit to release the current page descriptor if not found 555 current_ptr = page; 556 current_index = (uint32_t)(page - ppm->pages_tbl); 557 for( current_order = order ; 558 current_order < CONFIG_PPM_MAX_ORDER ; 559 current_order++ ) 560 { 561 buddy_index = current_index ^ (1 << current_order); 562 buddy_ptr = pages_tbl + buddy_index; 563 564 // get buddy block order 565 buddy_order = hal_remote_l32( XPTR( cxy , &buddy_ptr->order ) ); 566 567 // exit loop if buddy block not found 568 if( !page_remote_is_flag( XPTR( cxy , buddy_ptr ) , PG_FREE ) || 569 (buddy_order != current_order) ) break; 570 571 // remove buddy from free list in remote cluster 572 list_remote_unlink( cxy , &buddy_ptr->list ); 573 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , -1 ); 574 575 // reset order field in buddy block page descriptor 576 hal_remote_s32( XPTR( cxy , &buddy_ptr->order ) , 0 ); 577 578 // compute merged block index in page_tbl[] array 579 current_index &= buddy_index; 580 } 581 582 // update merged page descriptor order field 583 current_ptr = pages_tbl + current_index; 584 hal_remote_s32( XPTR( cxy , ¤t_ptr->order ) , current_order ); 585 586 // insert merged block into relevant free list in remote cluster 587 list_remote_add_first( cxy , &ppm->free_pages_root[current_order] , ¤t_ptr->list ); 588 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , 1 ); 589 590 // release lock protecting free_pages[] array 591 remote_busylock_release( lock_xp ); 592 593 // update DQDT 594 dqdt_decrement_pages( cxy , page->order ); 595 596 #if DEBUG_PPM_FREE_PAGES 597 cycle = (uint32_t)hal_get_cycles(); 598 if( DEBUG_PPM_FREE_PAGES < cycle ) 599 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n", 600 __FUNCTION__, this->process->pid, this->trdid, 601 1<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle ); 602 #endif 603 604 #if(DEBUG_PPM_FREE_PAGES & 0x1) 605 if( DEBUG_PPM_FREE_PAGES < cycle ) 606 ppm_remote_display( cxy ); 607 #endif 608 609 } // end ppm_remote_free_pages() 610 611 //////////////////////////////////// 612 void ppm_remote_display( cxy_t cxy ) 353 613 { 354 614 uint32_t order; … … 358 618 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 359 619 360 // get lock protecting free lists 361 busylock_acquire( &ppm->free_lock ); 620 // build extended pointer on lock protecting remote PPM 621 xptr_t lock_xp = XPTR( cxy , &ppm->free_lock ); 622 623 // get lock protecting free lists in remote cluster 624 remote_busylock_acquire( lock_xp ); 362 625 363 626 printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr ); … … 365 628 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) 366 629 { 367 printk("- order = %d / free_pages = %d\t: ", 368 order , ppm->free_pages_nr[order] ); 369 370 LIST_FOREACH( &ppm->free_pages_root[order] , iter ) 630 // get number of free pages for free_list[order] in remote cluster 631 uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) ); 632 printk("- order = %d / free_pages = %d\t: ", order , n ); 633 634 LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter ) 371 635 { 372 636 page = LIST_ELEMENT( iter , page_t , list ); … … 377 641 } 378 642 379 // release lock protecting free lists 380 busylock_release( &ppm->free_lock);643 // release lock protecting free lists in remote cluster 644 remote_busylock_release( lock_xp ); 381 645 } 382 646 383 //////////////////////////////// ///////384 error_t ppm_assert_order( ppm_t * ppm)647 //////////////////////////////// 648 error_t ppm_assert_order( void ) 385 649 { 386 650 uint32_t order; 387 651 list_entry_t * iter; 388 652 page_t * page; 653 654 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 389 655 390 656 for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) … … 438 704 hal_remote_s32( page_flags_xp , flags | PG_DIRTY ); 439 705 440 // The PPM dirty list is a LOCAL list !!! 441 // We must update 4 pointers to insert a new page in this list. 442 // We can use the standard LIST API when the page is local, 443 // but we cannot use the standard API if the page is remote... 444 445 if( page_cxy == local_cxy ) // locally update the PPM dirty list 446 { 447 list_add_first( &ppm->dirty_root , &page_ptr->list ); 448 } 449 else // remotely update the PPM dirty list 450 { 451 // get local and remote pointers on "root" list entry 452 list_entry_t * root = &ppm->dirty_root; 453 xptr_t root_xp = XPTR( page_cxy , root ); 454 455 // get local and remote pointers on "page" list entry 456 list_entry_t * list = &page_ptr->list; 457 xptr_t list_xp = XPTR( page_cxy , list ); 458 459 // get local and remote pointers on first dirty page 460 list_entry_t * dirt = hal_remote_lpt( XPTR( page_cxy, &root->next ) ); 461 xptr_t dirt_xp = XPTR( page_cxy , dirt ); 462 463 // set root.next, list.next, list pred, curr.pred in remote cluster 464 hal_remote_spt( root_xp , list ); 465 hal_remote_spt( list_xp , dirt ); 466 hal_remote_spt( list_xp + sizeof(intptr_t) , root ); 467 hal_remote_spt( dirt_xp + sizeof(intptr_t) , list ); 468 } 706 // insert the page in the remote dirty list 707 list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list ); 469 708 470 709 done = true; … … 512 751 hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) ); 513 752 514 // The PPM dirty list is a LOCAL list !!! 515 // We must update 4 pointers to remove a page from this list. 516 // we can use the standard LIST API when the page is local, 517 // but we cannot use the standard API if the page is remote... 518 519 if( page_cxy == local_cxy ) // locally update the PPM dirty list 520 { 521 list_unlink( &page_ptr->list ); 522 } 523 else // remotely update the PPM dirty list 524 { 525 // get local and remote pointers on "page" list entry 526 list_entry_t * list = &page_ptr->list; 527 xptr_t list_xp = XPTR( page_cxy , list ); 528 529 // get local and remote pointers on "next" page list entry 530 list_entry_t * next = hal_remote_lpt( list_xp ); 531 xptr_t next_xp = XPTR( page_cxy , next ); 532 533 // get local and remote pointers on "pred" page list entry 534 list_entry_t * pred = hal_remote_lpt( list_xp + sizeof(intptr_t) ); 535 xptr_t pred_xp = XPTR( page_cxy , pred ); 536 537 // set root.next, list.next, list pred, curr.pred in remote cluster 538 hal_remote_spt( pred_xp , next ); 539 hal_remote_spt( list_xp , NULL ); 540 hal_remote_spt( list_xp + sizeof(intptr_t) , NULL ); 541 hal_remote_spt( next_xp + sizeof(intptr_t) , pred ); 542 } 753 // remove the page from remote dirty list 754 list_remote_unlink( page_cxy , &page_ptr->list ); 543 755 544 756 done = true; -
trunk/kernel/mm/ppm.h
r625 r632 51 51 * 52 52 * The main service provided by the PMM is the dynamic allocation of physical pages 53 * from the "kernel_heap" section. This low-level allocator implements the buddy53 * from the "kernel_heap" section. This low-level allocator implements the "buddy" 54 54 * algorithm: an allocated block is an integer number n of small pages, where n 55 * is a power of 2, and ln(n) is called order. 56 * This allocator being shared by the local threads, the free_page lists rooted 57 * in the PPM descriptor are protected by a local busylock, because it is used 58 * by the idle_thread during kernel_init(). 59 * 60 * Another service is to register the dirty pages in a specific dirty_list, that is 55 * is a power of 2, and ln(n) is called order. The free_pages_root[] array contains 56 * the roots ot the local lists of free pages for different sizes, as required by 57 * the "buddy" algorithm. 58 * The local threads can access these free_lists by calling the ppm_alloc_pages() and 59 * ppm_free_page() functions, but the remote threads can access the same free lists, 60 * by calling the ppm_remote_alloc_pages() and ppm_remote_free_pages functions. 61 * Therefore, these free lists are protected by a remote_busy_lock. 62 * 63 * Another service is to register the dirty pages in a specific local dirty_list, 61 64 * also rooted in the PPM, in order to be able to synchronize all dirty pages on disk. 62 65 * This dirty list is protected by a specific remote_queuelock, because it can be 63 * modified by a remote thread , but it contains only local pages.66 * modified by a remote thread. 64 67 ****************************************************************************************/ 65 68 66 69 typedef struct ppm_s 67 70 { 68 busylock_tfree_lock; /*! lock protecting free_pages[] lists */71 remote_busylock_t free_lock; /*! lock protecting free_pages[] lists */ 69 72 list_entry_t free_pages_root[CONFIG_PPM_MAX_ORDER]; /*! roots of free lists */ 70 73 uint32_t free_pages_nr[CONFIG_PPM_MAX_ORDER]; /*! free pages number */ … … 80 83 81 84 /***************************************************************************************** 82 * This is the low-level physical pages allocation function. 83 * It allocates N contiguous physical pages. N is a power of 2. 84 * In normal use, it should not be called directly, as the recommended way to get 85 * physical pages is to call the generic allocator defined in kmem.h. 86 ***************************************************************************************** 87 * @ order : ln2( number of 4 Kbytes pages) 88 * @ returns a pointer on the page descriptor if success / NULL otherwise 85 * This local allocator must be called by a thread running in local cluster. 86 * It allocates n contiguous physical 4 Kbytes pages from the local cluster, where 87 * n is a power of 2 defined by the <order> argument. 88 * In normal use, it should not be called directly, as the recommended way to allocate 89 * physical pages is to call the generic allocator defined in kmem.h. 90 ***************************************************************************************** 91 * @ order : ln2( number of 4 Kbytes pages) 92 * @ returns a local pointer on the page descriptor if success / NULL if error. 89 93 ****************************************************************************************/ 90 94 page_t * ppm_alloc_pages( uint32_t order ); 91 95 92 96 /***************************************************************************************** 93 * This is the low-level physical pages release function. It takes the lock protecting 94 * the free_list before register the released page in the relevant free_list. 97 * This function must be called by a thread running in local cluster to release 98 * physical pages. It takes the lock protecting the free_lists before register the 99 * released page in the relevant free_list. 95 100 * In normal use, you do not need to call it directly, as the recommended way to free 96 101 * physical pages is to call the generic allocator defined in kmem.h. 97 102 ***************************************************************************************** 98 * @ page : pointer tothe page descriptor to be released103 * @ page : local pointer on the page descriptor to be released 99 104 ****************************************************************************************/ 100 105 void ppm_free_pages( page_t * page ); … … 105 110 * there is no concurrent access issue. 106 111 ***************************************************************************************** 107 * @ page : pointer tothe page descriptor to be released112 * @ page : local pointer on the page descriptor to be released 108 113 ****************************************************************************************/ 109 114 void ppm_free_pages_nolock( page_t * page ); 110 115 111 116 /***************************************************************************************** 112 * This function check if a page descriptor pointer is valid. 113 ***************************************************************************************** 114 * @ page : pointer on a page descriptor 115 * @ returns true if valid / false otherwise. 116 ****************************************************************************************/ 117 inline bool_t ppm_page_is_valid( page_t * page ); 117 * This remote allocator can be called by any thread running in any cluster. 118 * It allocates n contiguous physical 4 Kbytes pages from cluster identified 119 * by the <cxy> argument, where n is a power of 2 defined by the <order> argument. 120 * In normal use, it should not be called directly, as the recommended way to allocate 121 * physical pages is to call the generic allocator defined in kmem.h. 122 ***************************************************************************************** 123 * @ cxy : remote cluster identifier. 124 * @ order : ln2( number of 4 Kbytes pages) 125 * @ returns an extended pointer on the page descriptor if success / XPTR_NULL if error. 126 ****************************************************************************************/ 127 xptr_t ppm_remote_alloc_pages( cxy_t cxy, 128 uint32_t order ); 129 130 /***************************************************************************************** 131 * This function can be called by any thread running in any cluster to release physical 132 * pages to a remote cluster. It takes the lock protecting the free_list before register 133 * the released page in the relevant free_list. 134 * In normal use, you do not need to call it directly, as the recommended way to free 135 * physical pages is to call the generic allocator defined in kmem.h. 136 ***************************************************************************************** 137 * @ cxy : remote cluster identifier. 138 * @ page : local pointer on the page descriptor to be released in remote cluster. 139 ****************************************************************************************/ 140 void ppm_remote_free_pages( cxy_t cxy, 141 page_t * page ); 142 143 /***************************************************************************************** 144 * This debug function can be called by any thread running in any cluster to display 145 * the current PPM state of a remote cluster. 146 ***************************************************************************************** 147 * @ cxy : remote cluster identifier. 148 ****************************************************************************************/ 149 void ppm_remote_display( cxy_t cxy ); 118 150 119 151 … … 172 204 173 205 /***************************************************************************************** 174 * This function prints the PPM allocator status in the calling thread cluster. 175 ***************************************************************************************** 176 * string : character string printed in header 177 ****************************************************************************************/ 178 void ppm_display( void ); 179 180 /***************************************************************************************** 181 * This function checks PPM allocator consistency. 182 ***************************************************************************************** 183 * @ ppm : pointer on PPM allocator. 206 * This function can be called by any thread running in any cluster. 207 * It displays the PPM allocator status in cluster identified by the <cxy> argument. 208 ***************************************************************************************** 209 * @ cxy : remote cluster 210 ****************************************************************************************/ 211 void ppm_remote_display( cxy_t cxy ); 212 213 /***************************************************************************************** 214 * This function must be called by a thread running in local cluster. 215 * It checks the consistency of the local PPM allocator. 216 ***************************************************************************************** 184 217 * @ return 0 if PPM is OK / return -1 if PPM not consistent. 185 218 ****************************************************************************************/ 186 error_t ppm_assert_order( ppm_t * ppm);219 error_t ppm_assert_order( void ); 187 220 188 221 -
trunk/kernel/mm/vmm.c
r630 r632 1226 1226 ppn_t ppn; // current PTE ppn value 1227 1227 uint32_t attr; // current PTE attributes 1228 kmem_req_t req; // request to release memory1229 1228 xptr_t page_xp; // extended pointer on page descriptor 1230 1229 cxy_t page_cxy; // page descriptor cluster … … 1335 1334 1336 1335 // release physical page to relevant kmem when required 1337 if( ppn_release ) 1338 { 1339 if( page_cxy == local_cxy ) 1340 { 1341 req.type = KMEM_PAGE; 1342 req.ptr = page_ptr; 1343 kmem_free( &req ); 1344 } 1345 else 1346 { 1347 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1348 } 1349 } 1336 if( ppn_release ) ppm_remote_free_pages( page_cxy , page_ptr ); 1350 1337 1351 1338 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) … … 1681 1668 ////////////////////////////////////////////////////////////////////////////////////// 1682 1669 // This static function compute the target cluster to allocate a physical page 1683 // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) 1684 // and returns an extended pointer on the allocated page descriptor. 1685 // It can be called by a thread running in any cluster. 1670 // for a given <vpn> in a given <vseg>, allocates the page and returns an extended 1671 // pointer on the allocated page descriptor. 1686 1672 // The vseg cannot have the FILE type. 1687 1673 ////////////////////////////////////////////////////////////////////////////////////// … … 1690 1676 { 1691 1677 1692 #if DEBUG_VMM_ ALLOCATE_PAGE1678 #if DEBUG_VMM_PAGE_ALLOCATE 1693 1679 uint32_t cycle = (uint32_t)hal_get_cycles(); 1694 1680 thread_t * this = CURRENT_THREAD; 1695 if( DEBUG_VMM_ ALLOCATE_PAGE < (uint32_t)hal_get_cycles())1681 if( DEBUG_VMM_PAGE_ALLOCATE < cycle ) 1696 1682 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1697 1683 __FUNCTION__ , this->process->pid, this->trdid, vpn, cycle ); 1698 1684 #endif 1699 1685 1700 page_t * page_ptr;1686 xptr_t page_xp; 1701 1687 cxy_t page_cxy; 1702 kmem_req_t req;1703 1688 uint32_t index; 1704 1689 … … 1727 1712 } 1728 1713 1729 // allocate a physical page from target cluster 1730 if( page_cxy == local_cxy ) // target cluster is the local cluster 1731 { 1732 req.type = KMEM_PAGE; 1733 req.size = 0; 1734 req.flags = AF_NONE; 1735 page_ptr = (page_t *)kmem_alloc( &req ); 1736 } 1737 else // target cluster is not the local cluster 1738 { 1739 rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); 1740 } 1741 1742 #if DEBUG_VMM_ALLOCATE_PAGE 1714 // allocate a 4 Kbytes physical page from target cluster 1715 page_xp = ppm_remote_alloc_pages( page_cxy , 0 ); 1716 1717 #if DEBUG_VMM_PAGE_ALLOCATE 1743 1718 cycle = (uint32_t)hal_get_cycles(); 1744 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1745 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", 1746 __FUNCTION__ , this->process->pid, this->trdid, vpn, 1747 ppm_page2ppn( XPTR( page_cxy , page_ptr ) , cycle ); 1748 #endif 1749 1750 if( page_ptr == NULL ) return XPTR_NULL; 1751 else return XPTR( page_cxy , page_ptr ); 1719 if( DEBUG_VMM_PAGE_ALLOCATE < cycle ) 1720 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cluster %x / cycle %d\n", 1721 __FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), page_cxy, cycle ); 1722 #endif 1723 1724 return page_xp; 1752 1725 1753 1726 } // end vmm_page_allocate() … … 1769 1742 uint32_t cycle = (uint32_t)hal_get_cycles(); 1770 1743 thread_t * this = CURRENT_THREAD; 1771 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1744 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1745 if( vpn == 0x40B ) 1772 1746 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", 1773 1747 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); … … 1815 1789 1816 1790 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1817 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1791 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1792 if( vpn == 0x40B ) 1818 1793 printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", 1819 1794 __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); … … 1829 1804 1830 1805 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1831 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1806 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1807 if( vpn == 0x40B ) 1832 1808 printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", 1833 1809 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 1846 1822 1847 1823 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1848 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1824 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1825 if( vpn == 0x40B ) 1849 1826 printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", 1850 1827 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 1863 1840 1864 1841 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1865 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1842 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1843 if( vpn == 0x40B ) 1866 1844 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" 1867 1845 " %d bytes from mapper / %d bytes from BSS\n", … … 1897 1875 #if DEBUG_VMM_GET_ONE_PPN 1898 1876 cycle = (uint32_t)hal_get_cycles(); 1899 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1877 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1878 if( vpn == 0x40B ) 1900 1879 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", 1901 1880 __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); … … 1928 1907 1929 1908 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1930 if( DEBUG_VMM_HANDLE_PAGE_FAULT < start_cycle)1909 if( vpn == 0x40b ) 1931 1910 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1932 1911 __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); … … 1950 1929 1951 1930 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1952 if( DEBUG_VMM_HANDLE_PAGE_FAULT < start_cycle ) 1953 printk("\n[%s] thread[%x,%x] found vseg %s\n", 1954 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); 1931 uint32_t cycle = (uint32_t)hal_get_cycles(); 1932 if( vpn == 0x40b ) 1933 printk("\n[%s] thread[%x,%x] found vseg %s / cycle %d\n", 1934 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle ); 1955 1935 #endif 1956 1936 … … 1958 1938 local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1959 1939 1960 // lock targetPTE in local GPT and get current PPN and attributes1940 // lock PTE in local GPT and get current PPN and attributes 1961 1941 error = hal_gpt_lock_pte( local_gpt_xp, 1962 1942 vpn, … … 1971 1951 } 1972 1952 1973 // handle page fault only if PTE still unmapped after lock 1953 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1954 cycle = (uint32_t)hal_get_cycles(); 1955 if( vpn == 0x40b ) 1956 printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x / cycle %d\n", 1957 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, cycle ); 1958 #endif 1959 1960 // handle page fault only if local PTE still unmapped after lock 1974 1961 if( (attr & GPT_MAPPED) == 0 ) 1975 1962 { … … 1984 1971 (ref_cxy == local_cxy ) ) 1985 1972 { 1986 // allocate and initialise a physical page depending on the vseg type 1973 1974 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1975 if( vpn == 0x40b ) 1976 printk("\n[%s] thread[%x,%x] : access local gpt : local_cxy %x / ref_cxy %x / type %s\n", 1977 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 1978 #endif 1979 // allocate and initialise a physical page 1987 1980 error = vmm_get_one_ppn( vseg , vpn , &ppn ); 1988 1981 … … 1999 1992 2000 1993 // define attr from vseg flags 2001 attr = GPT_MAPPED | GPT_SMALL ;1994 attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE; 2002 1995 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; 2003 1996 if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; … … 2006 1999 2007 2000 // set PTE to local GPT 2001 // it unlocks this PTE 2008 2002 hal_gpt_set_pte( local_gpt_xp, 2009 2003 vpn, … … 2016 2010 2017 2011 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2018 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle)2019 printk("\n[%s] local page fault handled / vpn %x/ ppn %x / attr %x / cycle %d\n",2020 __FUNCTION__, vpn, ppn, attr, end_cycle );2012 if( vpn == 0x40b ) 2013 printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n", 2014 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); 2021 2015 #endif 2022 2016 … … 2033 2027 else 2034 2028 { 2029 2030 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2031 if( vpn == 0x40b ) 2032 printk("\n[%s] thread[%x,%x] access ref gpt : local_cxy %x / ref_cxy %x / type %s\n", 2033 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 2034 #endif 2035 2035 // build extended pointer on reference GPT 2036 2036 ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); 2037 2037 2038 // get current PPN and attributes from reference GPT 2039 // without locking the PTE (in case of false page fault) 2040 hal_gpt_get_pte( ref_gpt_xp, 2041 vpn, 2042 &ref_attr, 2043 &ref_ppn ); 2044 2045 if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT 2038 // lock PTE in reference GPT and get current PPN and attributes 2039 error = hal_gpt_lock_pte( ref_gpt_xp, 2040 vpn, 2041 &ref_attr, 2042 &ref_ppn ); 2043 if( error ) 2044 { 2045 printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n", 2046 __FUNCTION__ , vpn , process->pid ); 2047 2048 // unlock PTE in local GPT 2049 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2050 2051 return EXCP_KERNEL_PANIC; 2052 } 2053 2054 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2055 if( vpn == 0x40b ) 2056 printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n", 2057 __FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn ); 2058 #endif 2059 2060 if( ref_attr & GPT_MAPPED ) // false page fault 2046 2061 { 2047 2062 // update local GPT from reference GPT values 2063 // this unlocks the PTE in local GPT 2048 2064 hal_gpt_set_pte( local_gpt_xp, 2049 2065 vpn, … … 2051 2067 ref_ppn ); 2052 2068 2069 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2070 if( vpn == 0x40b ) 2071 printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n", 2072 __FUNCTION__, this->process->pid, this->trdid ); 2073 #endif 2074 2075 // unlock the PTE in reference GPT 2076 hal_gpt_unlock_pte( ref_gpt_xp, vpn ); 2077 2078 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2079 if( vpn == 0x40b ) 2080 printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n", 2081 __FUNCTION__, this->process->pid, this->trdid ); 2082 #endif 2083 2053 2084 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2054 2085 uint32_t end_cycle = (uint32_t)hal_get_cycles(); … … 2056 2087 2057 2088 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2058 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle)2059 printk("\n[%s] false page fault handled / vpn %x/ ppn %x / attr %x / cycle %d\n",2060 __FUNCTION__, vpn, ref_ppn, ref_attr, end_cycle );2089 if( vpn == 0x40b ) 2090 printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n", 2091 __FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle ); 2061 2092 #endif 2062 2093 … … 2067 2098 return EXCP_NON_FATAL; 2068 2099 } 2069 else // true page fault => update both GPTs2100 else // true page fault 2070 2101 { 2071 2102 // allocate and initialise a physical page depending on the vseg type … … 2077 2108 __FUNCTION__ , process->pid , vpn ); 2078 2109 2079 // unlock PTE in local GPT 2110 // unlock PTE in local GPT and in reference GPT 2080 2111 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2112 hal_gpt_unlock_pte( ref_gpt_xp , vpn ); 2081 2113 2082 2114 return EXCP_KERNEL_PANIC; 2083 2115 } 2084 2116 2085 // lock PTE in reference GPT2086 error = hal_gpt_lock_pte( ref_gpt_xp,2087 vpn,2088 &ref_attr,2089 &ref_ppn );2090 if( error )2091 {2092 printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n",2093 __FUNCTION__ , vpn , process->pid );2094 2095 // unlock PTE in local GPT2096 hal_gpt_unlock_pte( local_gpt_xp , vpn );2097 2098 return EXCP_KERNEL_PANIC;2099 }2100 2101 2117 // define attr from vseg flags 2102 attr = GPT_MAPPED | GPT_SMALL ;2118 attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE; 2103 2119 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; 2104 2120 if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; … … 2106 2122 if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; 2107 2123 2124 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2125 if( vpn == 0x40b ) 2126 printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n", 2127 __FUNCTION__, this->process->pid, this->trdid ); 2128 #endif 2108 2129 // set PTE in reference GPT 2130 // this unlock the PTE 2109 2131 hal_gpt_set_pte( ref_gpt_xp, 2110 2132 vpn, … … 2112 2134 ppn ); 2113 2135 2136 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2137 if( vpn == 0x40b ) 2138 printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n", 2139 __FUNCTION__, this->process->pid, this->trdid ); 2140 #endif 2141 2114 2142 // set PTE in local GPT 2143 // this unlock the PTE 2115 2144 hal_gpt_set_pte( local_gpt_xp, 2116 2145 vpn, … … 2123 2152 2124 2153 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2125 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle)2126 printk("\n[%s] global page fault handled / vpn %x/ ppn %x / attr %x / cycle %d\n",2127 __FUNCTION__, vpn, ppn, attr, end_cycle );2154 if( vpn == 0x40b ) 2155 printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n", 2156 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); 2128 2157 #endif 2129 2158 … … 2138 2167 else // page has been locally mapped by another concurrent thread 2139 2168 { 2140 // unlock PTE in local GPT2169 // unlock the PTE in local GPT 2141 2170 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2142 2171 2172 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2173 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2174 #endif 2175 2176 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2177 if( vpn == 0x40b ) 2178 printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n", 2179 __FUNCTION__, vpn, ppn, attr, end_cycle ); 2180 #endif 2181 2182 #if CONFIG_INSTRUMENTATION_PGFAULTS 2183 this->info.false_pgfault_nr++; 2184 this->info.false_pgfault_cost += (end_cycle - start_cycle); 2185 #endif 2143 2186 return EXCP_NON_FATAL; 2144 2187 } … … 2214 2257 2215 2258 // lock target PTE in relevant GPT (local or reference) 2259 // and get current PTE value 2216 2260 error = hal_gpt_lock_pte( gpt_xp, 2217 2261 vpn, -
trunk/kernel/mm/vmm.h
r629 r632 202 202 /********************************************************************************************* 203 203 * This function modifies one GPT entry identified by the <process> and <vpn> arguments 204 * in all clusters containing a process copy. 204 * in all clusters containing a process copy. It is used to maintain coherence in GPT 205 * copies, using the list of copies stored in the owner process, and remote_write accesses. 205 206 * It must be called by a thread running in the process owner cluster. 206 * It is used to update to maintain coherence in GPT copies, using the list of copies 207 * stored in the owner process, and uses remote_write accesses. 207 * Use the RPC_VMM_GLOBAL_UPDATE_PTE if required. 208 208 * It cannot fail, as only mapped PTE2 in GPT copies are updated. 209 209 *********************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.