Changeset 407 for trunk/kernel/kern/rpc.h
- Timestamp:
- Nov 7, 2017, 3:08:12 PM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/rpc.h
r401 r407 30 30 #include <bits.h> 31 31 #include <spinlock.h> 32 #include <vseg.h> 32 33 #include <remote_fifo.h> 33 34 … … 82 83 RPC_MAPPER_MOVE_BUFFER = 24, 83 84 RPC_MAPPER_GET_PAGE = 25, 85 RPC_VMM_CREATE_VSEG = 26, 86 RPC_SCHED_DISPLAY = 27, 84 87 85 88 RPC_MAX_INDEX = 30, … … 100 103 typedef struct rpc_desc_s 101 104 { 102 rpc_index_t index; // index of requested RPC service 103 volatile uint32_t response; // response valid when 0 104 uint64_t args[10]; // input/output arguments buffer 105 rpc_index_t index; /*! index of requested RPC service */ 106 volatile uint32_t response; /*! response valid when 0 */ 107 struct thread_s * thread; /*! local pointer on client thread */ 108 uint32_t lid; /*! index of core running the calling thread */ 109 uint64_t args[10]; /*! input/output arguments buffer */ 105 110 } 106 111 rpc_desc_t; 107 108 /***********************************************************************************109 * This structure defines the RPC fifo, containing a remote_fifo, the owner RPC110 * thread TRDID (used as a light lock), and the intrumentation counter.111 *112 * Implementation note: the TRDID is a good owner identifier, because all113 * RPC threads in a given cluster belong to the same process_zero kernel process,114 * and RPC threads cannot have local index LTID = 0.115 **********************************************************************************/116 117 typedef struct rpc_fifo_s118 {119 trdid_t owner; // owner thread / 0 if no owner120 uint64_t count; // total number of received RPCs (instrumentation)121 remote_fifo_t fifo; // embedded remote fifo122 }123 rpc_fifo_t;124 125 112 126 113 /**********************************************************************************/ … … 149 136 150 137 /*********************************************************************************** 151 * This function initialises the local RPC fifo and the lock protecting readers.152 * The number of slots is defined by the CONFIG_REMOTE_FIFO_SLOTS parameter.153 * Each slot contains an extended pointer on the RPC descriptor.154 ***********************************************************************************155 * @ rf : pointer on the local RPC fifo.156 **********************************************************************************/157 void rpc_fifo_init( rpc_fifo_t * rf );158 159 /***********************************************************************************160 138 * This function is the entry point for RPC handling on the server side. 161 * It is executed by a core receiving an IPI .162 * It checks the RPC fifo, try to take the light-lock and activates (or creates)163 * an RPC thread in case of success.164 * **********************************************************************************165 * @ returns true if success / false otherwise.166 **********************************************************************************/ 167 bool_trpc_check();139 * It is executed by a core receiving an IPI, and each time the core enters, 140 * or exit the kernel to handle . 141 * It does nothing and return if the RPC_FIFO is empty. 142 * The calling thread checks if it exist at least one non-blocked RPC thread, 143 * creates a new RPC if required, and deschedule to allow the RPC thead to execute. 144 **********************************************************************************/ 145 void rpc_check(); 168 146 169 147 /*********************************************************************************** 170 148 * This function contains the loop to execute all pending RPCs on the server side. 171 * It should be called with irq disabled and after light lock acquisition. 149 * It is called by the rpc_thread_func() function with irq disabled, and after 150 * RPC_FIFO ownership acquisition. 172 151 *********************************************************************************** 173 152 * @ rpc_fifo : pointer on the local RPC fifo 174 153 **********************************************************************************/ 175 void rpc_execute_all( rpc_fifo_t * rpc_fifo ); 176 177 /********************************************************************************** 178 * This function is called by any thread running on any core in any cluster, 179 * that detected a non-empty RPC_FIFO and got the RPC_FIFO ownership. 180 * It activates one RPC thread, and immediately switches to the RPC thread. 181 * It gets the first free RPC thread from the core free-list, or creates a new one 182 * when the core free-list is empty. 183 *********************************************************************************** 184 * @ rpc_fifo : pointer on the non-empty RPC fifo. 185 * @ return 0 if success / return ENOMEM if error. 186 **********************************************************************************/ 187 error_t rpc_activate_thread( rpc_fifo_t * rpc_fifo ); 188 189 /*********************************************************************************** 190 * This function contains the infinite loop executed by each RPC thread. 154 void rpc_execute_all( remote_fifo_t * rpc_fifo ); 155 156 /*********************************************************************************** 157 * This function contains the infinite loop executed by a RPC thread. 191 158 **********************************************************************************/ 192 159 void rpc_thread_func(); … … 266 233 *********************************************************************************** 267 234 * @ cxy : server cluster identifier. 268 * @ attr : [in] pointer on pthread_attr_t in client cluster.269 * @ thread_xp : [out] pointer onbuffer for thread extended pointer.235 * @ attr : [in] local pointer on pthread_attr_t in client cluster. 236 * @ thread_xp : [out] buffer for thread extended pointer. 270 237 * @ error : [out] error status (0 if success). 271 238 **********************************************************************************/ … … 274 241 void * start_func, 275 242 void * start_arg, 276 struct pthread_attr_s* attr,243 pthread_attr_t * attr, 277 244 xptr_t * thread_xp, 278 245 error_t * error ); … … 499 466 500 467 /*********************************************************************************** 501 * [21] The RPC_VMM_GET_PTE returns in the "ppn" and "attr"arguments the PTE value502 * for a given VPN in a given process.468 * [21] The RPC_VMM_GET_PTE returns in the <ppn> and <attr> arguments the PTE value 469 * for a given <vpn> in a given <process> (page_fault or copy_on_write event). 503 470 * The server cluster is supposed to be the reference cluster, and the vseg 504 471 * containing the VPN must be registered in the reference VMM. 505 * It returns an error if physical memory cannot be allocated for the PTE2,472 * It returns an error if physical memory cannot be allocated for the missing PTE2, 506 473 * or for the missing page itself. 507 474 *********************************************************************************** … … 509 476 * @ process : [in] pointer on process descriptor in server cluster. 510 477 * @ vaddr : [in] virtual address to be searched. 478 * @ cow : [in] "copy_on_write" event if true / "page_fault" event if false. 511 479 * @ attr : [out] address of buffer for attributes. 512 480 * @ ppn : [out] address of buffer for PPN. … … 516 484 struct process_s * process, 517 485 vpn_t vpn, 486 bool_t cow, 518 487 uint32_t * attr, 519 488 ppn_t * ppn, … … 601 570 void rpc_mapper_get_page_server( xptr_t xp ); 602 571 572 /*********************************************************************************** 573 * [26] The RPC_VMM_CREATE_VSEG allows a client thread to request the remote 574 * reference cluster of a given process to allocate and register in the reference 575 * process VMM a new vseg descriptor. 576 * On the server side, this RPC uses the vmm_create_vseg() function, and returns 577 * to the client the local pointer on the created vseg descriptor. 578 *********************************************************************************** 579 * @ cxy : server cluster identifier. 580 * @ process : [in] local pointer on process descriptor in server. 581 * @ type : [in] vseg type. 582 * @ base : [in] base address (unused for dynamically allocated vsegs). 583 * @ size : [in] number of bytes. 584 * @ file_offset : [in] offset in file (for CODE, DATA, FILE types). 585 * @ file_size : [in] can be smaller than size for DATA type. 586 * @ mapper_xp : [in] extended pointer on mapper (for CODE, DATA, FILE types). 587 * @ vseg_cxy : [in] target cluster for mapping (if not data type). 588 * @ vseg : [out] local pointer on vseg descriptor / NULL if failure. 589 **********************************************************************************/ 590 void rpc_vmm_create_vseg_client( cxy_t cxy, 591 struct process_s * process, 592 vseg_type_t type, 593 intptr_t base, 594 uint32_t size, 595 uint32_t file_offset, 596 uint32_t file_size, 597 xptr_t mapper_xp, 598 cxy_t vseg_cxy, 599 struct vseg_s ** vseg ); 600 601 void rpc_vmm_create_vseg_server( xptr_t xp ); 602 603 /*********************************************************************************** 604 * [27] The RPC_SCHED_DISPLAY allows a client thread to request the display 605 * of a remote scheduler, identified by the <lid> argument. 606 *********************************************************************************** 607 * @ cxy : server cluster identifier. 608 * @ lid : [in] local index of target core in client cluster. 609 **********************************************************************************/ 610 void rpc_sched_display_client( cxy_t cxy, 611 lid_t lid ); 612 613 void rpc_sched_display_server( xptr_t xp ); 614 603 615 #endif
Note: See TracChangeset
for help on using the changeset viewer.