Changeset 657 for trunk/kernel
- Timestamp:
- Mar 18, 2020, 11:16:59 PM (5 years ago)
- Location:
- trunk/kernel
- Files:
-
- 4 added
- 1 deleted
- 60 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/Makefile
r647 r657 83 83 build/kern/process.o \ 84 84 build/kern/chdev.o \ 85 build/kern/socket.o \ 85 86 build/kern/cluster.o \ 86 87 build/kern/scheduler.o \ … … 126 127 build/libk/remote_condvar.o \ 127 128 build/libk/remote_barrier.o \ 129 build/libk/remote_buf.o \ 128 130 build/libk/memcpy.o \ 129 131 build/libk/htab.o \ … … 192 194 build/syscalls/sys_get_nb_cores.o \ 193 195 build/syscalls/sys_get_thread_info.o \ 194 build/syscalls/sys_fbf.o 196 build/syscalls/sys_fbf.o \ 197 build/syscalls/sys_socket.o 195 198 196 199 VFS_OBJS = build/fs/vfs.o \ -
trunk/kernel/devices/dev_dma.c
r647 r657 2 2 * dev_dma.c - DMA (Interrupt Controler Unit) generic device API implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 84 84 85 85 //////////////////////////////////////////////// 86 error_t dev_dma_ remote_memcpy( xptr_t dst_xp,87 88 86 error_t dev_dma_async_memcpy( xptr_t dst_xp, 87 xptr_t src_xp, 88 uint32_t size ) 89 89 { 90 90 thread_t * this = CURRENT_THREAD; 91 91 92 #if CONGIG_DEBUG_DEV_DMA92 #if DEBUG_DEV_DMA 93 93 uint32_t cycle = (uint32_t)hal_get_cycles(); 94 94 if( CONGIG_DEBUG_DEV_DMA < cycle ) … … 107 107 108 108 // register command in calling thread descriptor 109 this->dma_cmd.sync = false; 109 110 this->dma_cmd.dev_xp = dev_xp; 110 111 this->dma_cmd.dst_xp = dst_xp; … … 112 113 this->dma_cmd.size = size; 113 114 114 // register client thread in waiting queue, activate server thread115 // block client thread on THREAD_BLOCKED_IOand deschedule.116 // it is re-activated by the ISR signaling IO operation completion.115 // register the client thread in waiting queue, activate the server thread, 116 // block the client thread on THREAD_BLOCKED_IO, and deschedule. 117 // it is re-activated by the server thread when the transfer is completed. 117 118 chdev_register_command( dev_xp ); 118 119 119 #if CONGIG_DEBUG_DEV_DMA120 #if DEBUG_DEV_DMA 120 121 cycle = (uint32_t)hal_get_cycles(); 121 122 if( CONGIG_DEBUG_DEV_DMA < cycle ) … … 127 128 return this->dma_cmd.error; 128 129 129 } // dev_dma_ remote_memcpy()130 } // dev_dma_async_memcpy() 130 131 132 ////////////////////////////////////////////// 133 error_t dev_dma_sync_memcpy( xptr_t dst_xp, 134 xptr_t src_xp, 135 uint32_t size ) 136 { 137 thread_t * this = CURRENT_THREAD; 138 139 #if DEBUG_DEV_DMA 140 uint32_t cycle = (uint32_t)hal_get_cycles(); 141 if( CONGIG_DEBUG_DEV_DMA < cycle ) 142 printk("\n[DBG] %s : thread %x enters / dst %l / src %l / size = %x\n", 143 __FUNCTION__ , this, dst_xp, src_xp, size ); 144 #endif 145 146 // select DMA channel corresponding to core lid 147 uint32_t channel = this->core->lid; 148 149 // get extended pointer on selected DMA chdev descriptor 150 xptr_t dev_xp = chdev_dir.dma[channel]; 151 152 // check DMA chdev definition 153 assert( (dev_xp != XPTR_NULL) , "undefined DMA chdev descriptor" ); 154 155 // register command in calling thread descriptor 156 this->dma_cmd.sync = true; 157 this->dma_cmd.dev_xp = dev_xp; 158 this->dma_cmd.dst_xp = dst_xp; 159 this->dma_cmd.src_xp = src_xp; 160 this->dma_cmd.size = size; 161 162 // get driver command function 163 cxy_t dev_cxy = GET_CXY( dev_xp ); 164 chdev_t * dev_ptr = GET_PTR( dev_xp ); 165 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( dev_cxy , &dev_ptr->cmd ) ); 166 167 // call directly the blocking driver function 168 cmd( XPTR( local_cxy , this ) ); 169 170 #if DEBUG_DEV_DMA 171 cycle = (uint32_t)hal_get_cycles(); 172 if( CONGIG_DEBUG_DEV_DMA < cycle ) 173 printk("\n[DBG] %s : thread %x exit / dst %l / src %l / size = %x\n", 174 __FUNCTION__ , this, dst_xp, src_xp, size ); 175 #endif 176 177 // return I/O operation status from calling thread descriptor 178 return this->dma_cmd.error; 179 180 } // dev_dma_sync_memcpy() 181 -
trunk/kernel/devices/dev_dma.h
r647 r657 2 2 * dev_dma.h - DMA (Direct Memory Access) generic device API definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 36 36 * to/from remote clusters. The burst size is defined by the cache line size. 37 37 * Each DMA channel is described by a specific chdev descriptor, handling its private 38 * waiting threads queue. It implement one single command : move data from a (remote) 39 * source buffer to a (remote) destination buffer. 38 * waiting threads queue. It implements two blocking commands : 39 * - move synchronously data from a remote source buffer to a remote destination buffer, 40 * using a polling policy to wait completion (No DMA_IRQ use). 41 * - move synchronously data from a remote source buffer to a remote destination buffer, 42 * using a descheduling policy to wait completion (reactivated bythe IDMA_IRQ). 40 43 ****************************************************************************************/ 41 44 … … 50 53 typedef struct dma_command_s 51 54 { 52 xptr_t dev_xp; /*! extended pointer on the DMA chdev descriptor */ 55 bool_t sync; /*! polling policy if true / descheduling policy if false */ 56 xptr_t dev_xp; /*! extended pointer on the DMA chdev descriptor */ 53 57 xptr_t src_xp; /*! extended pointer on source buffer. */ 54 58 xptr_t dst_xp; /*! extended pointer on destination buffer. */ … … 83 87 /***************************************************************************************** 84 88 * This blocking function register a DMA request in the device queue. 85 * It uses a descheduling policy to wait completion, and return an error status86 * when the transfer is completed.89 * It uses a descheduling policy to wait completion, 90 * It return an error status when the transfer is completed. 87 91 ***************************************************************************************** 88 * @ dst 89 * @ src : extended pointer on Rsource buffer.92 * @ dst_xp : extended pointer on destination buffer. 93 * @ src_xp : extended pointer on source buffer. 90 94 * @ size : number of bytes to move. 91 95 ****************************************************************************************/ 92 error_t dev_dma_remote_memcpy( xptr_t dst_xp, 93 xptr_t src_xp, 94 uint32_t size ); 96 error_t dev_dma_async_memcpy( xptr_t dst_xp, 97 xptr_t src_xp, 98 uint32_t size ); 99 100 /***************************************************************************************** 101 * This blocking function register a DMA request in the device queue. 102 * It uses a polling policy to wait completion. 103 * It return an error status when the transfer is completed. 104 ***************************************************************************************** 105 * @ dst_xp : extended pointer on destination buffer. 106 * @ src_xp : extended pointer on source buffer. 107 * @ size : number of bytes to move. 108 ****************************************************************************************/ 109 error_t dev_dma_sync_memcpy( xptr_t dst_xp, 110 xptr_t src_xp, 111 uint32_t size ); 95 112 96 113 -
trunk/kernel/devices/dev_fbf.c
r647 r657 2 2 * dev_fbf.c - FBF (Frame Buffer) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 26 26 #include <hal_gpt.h> 27 27 #include <hal_drivers.h> 28 #include <hal_irqmask.h> 29 #include <hal_macros.h> 30 #include <hal_uspace.h> 31 #include <hal_vmm.h> 28 32 #include <thread.h> 29 33 #include <printk.h> 30 34 #include <string.h> 35 #include <memcpy.h> 31 36 #include <chdev.h> 32 37 #include <dev_fbf.h> … … 41 46 char * dev_fbf_cmd_str( uint32_t cmd_type ) 42 47 { 43 if ( cmd_type == FBF_READ ) return "READ"; 44 else if( cmd_type == FBF_WRITE ) return "WRITE"; 45 else if( cmd_type == FBF_GET_CONFIG ) return "GET_CONFIG"; 46 else return "undefined"; 48 if ( cmd_type == FBF_GET_CONFIG ) return "GET_CONFIG"; 49 else if( cmd_type == FBF_CREATE_WINDOW ) return "CREATE_WINDOW"; 50 else if( cmd_type == FBF_DELETE_WINDOW ) return "DELETE_WINDOW"; 51 else if( cmd_type == FBF_MOVE_WINDOW ) return "MOVE_WINDOW"; 52 else if( cmd_type == FBF_REFRESH_WINDOW ) return "REFRESH_WINDOW"; 53 else if( cmd_type == FBF_DIRECT_WRITE ) return "DIRECT_WRITE"; 54 else if( cmd_type == FBF_DIRECT_READ ) return "DIRECT_READ"; 55 else return "undefined"; 47 56 } 48 57 … … 50 59 void dev_fbf_init( chdev_t * fbf ) 51 60 { 61 uint32_t wid; 62 52 63 // set chdev name 53 64 strcpy( fbf->name, "fbf" ); 54 65 55 // call driver init function 66 // initialize lock protecting the windows 67 remote_rwlock_init( XPTR( local_cxy , &fbf->ext.fbf.windows_lock ), 68 LOCK_FBF_WINDOWS ); 69 70 // initialize root of windows xlist 71 xlist_root_init( XPTR( local_cxy , &fbf->ext.fbf.windows_root ) ); 72 73 // initialize windows_tbl[] array 74 for( wid = 0 ; wid < CONFIG_FBF_WINDOWS_MAX_NR ; wid++ ) 75 { 76 fbf->ext.fbf.windows_tbl[wid] = XPTR_NULL; 77 } 78 79 // initialize wid allocator bitmap 80 bitmap_init( fbf->ext.fbf.windows_bitmap , CONFIG_FBF_WINDOWS_MAX_NR ); 81 82 // call driver init function to initialize the harware FBF 83 // and initialize the width, height, and subsampling FBF chdev fields 56 84 hal_drivers_fbf_init( fbf ); 57 85 … … 66 94 xptr_t dev_xp = chdev_dir.fbf[0]; 67 95 68 96 assert( (dev_xp != XPTR_NULL) , "undefined FBF chdev descriptor" ); 69 97 70 98 // get FBF chdev cluster and local pointer … … 79 107 } // end dev_fbf_get_config() 80 108 81 ///////////////////////////////////////////////////// 82 error_t dev_fbf_move_data( uint32_t cmd_type, 83 void * user_buffer, 84 uint32_t length, 85 uint32_t offset ) 86 { 87 // get pointer on calling thread 88 thread_t * this = CURRENT_THREAD; 109 /////////////////////////////////////////////// 110 uint32_t dev_fbf_create_window( uint32_t nlines, 111 uint32_t npixels, 112 uint32_t l_min, 113 uint32_t p_min, 114 intptr_t * user_buffer ) 115 { 116 kmem_req_t req; 117 fbf_window_t * window; // window descriptor (created in local cluster) 118 vseg_t * vseg; // vseg descriptor (created in reference cluster) 119 intptr_t vseg_base; // vseg base address in user space 120 121 // get local pointers on calling thread and process 122 thread_t * this = CURRENT_THREAD; 123 process_t * process = this->process; 89 124 90 125 #if DEBUG_DEV_FBF 91 126 uint32_t cycle = (uint32_t)hal_get_cycles(); 92 127 if( DEBUG_DEV_FBF < cycle ) 93 printk("\n[%s] thread[%x,%x] : %s / buffer %x / length %d / offset %x / cycle %d\n", 94 __FUNCTION__ , this->process->pid, this->trdid, 95 dev_fbf_cmd_str(cmd_type), user_buffer, length, offset, cycle ); 128 printk("\n[%s] thread[%x,%x] enter : nlines %d / npixels %d / l_min %d / p_min %d / cycle %d\n", 129 __FUNCTION__ , process->pid, this->trdid, nlines, npixels, l_min, p_min, cycle ); 130 #endif 131 132 // get cluster and pointers on FBF chdev 133 xptr_t fbf_xp = chdev_dir.fbf[0]; 134 cxy_t fbf_cxy = GET_CXY( fbf_xp ); 135 chdev_t * fbf_ptr = GET_PTR( fbf_xp ); 136 137 // check fbf_xp definition 138 assert( (fbf_xp != XPTR_NULL) , "undefined FBF chdev descriptor" ); 139 140 // get FBF width and height 141 uint32_t fbf_width = hal_remote_l32( XPTR( fbf_cxy , &fbf_ptr->ext.fbf.width ) ); 142 uint32_t fbf_height = hal_remote_l32( XPTR( fbf_cxy , &fbf_ptr->ext.fbf.height ) ); 143 144 // check new window size and coordinates 145 if( (((l_min + nlines) > fbf_height) || ((p_min + npixels) > fbf_width)) ) 146 { 147 printk("\n[ERROR] in %s / thread[%x,%x]" 148 "illegal new coordinates (%d,%d) for window (%d,%d) in fbf (%d,%d)\n", 149 process->pid, this->trdid, p_min, l_min, npixels, nlines, fbf_width, fbf_height ); 150 return -1; 151 } 152 153 // build extended pointers on windows lock, root, and wid allocator 154 xptr_t windows_lock_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_lock ); 155 xptr_t windows_root_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_root ); 156 xptr_t windows_bitmap_xp = XPTR( fbf_cxy , fbf_ptr->ext.fbf.windows_bitmap ); 157 158 // allocate memory for the window descriptor in local cluster 159 req.type = KMEM_KCM; 160 req.order = bits_log2( sizeof(fbf_window_t) ); 161 req.flags = AF_ZERO | AF_KERNEL; 162 window = kmem_alloc( &req ); 163 164 if( window == NULL ) 165 { 166 printk("\n[ERROR] in %s / thread[%x,%x] cannot allocate window descriptor\n", 167 __FUNCTION__, process->pid, this->trdid ); 168 return -1; 169 } 170 171 #if (DEBUG_DEV_FBF & 1) 172 cycle = (uint32_t)hal_get_cycles(); 173 if( DEBUG_DEV_FBF < cycle ) 174 printk("\n[%s] thread[%x,%x] created window descriptor %x / cycle %d\n", 175 __FUNCTION__ , process->pid, this->trdid, window, cycle ); 176 #endif 177 178 // getpointers on reference process 179 xptr_t ref_xp = process->ref_xp; 180 process_t * ref_ptr = GET_PTR( ref_xp ); 181 cxy_t ref_cxy = GET_CXY( ref_xp ); 182 183 // allocate a new vseg, and introduce it in the reference process VSL 184 if( ref_cxy == local_cxy ) 185 { 186 vseg = vmm_create_vseg( process, // owner process 187 VSEG_TYPE_ANON, // localised, public 188 0, // base, unused for ANON 189 nlines * npixels, // size 190 0, // file_offset, unused for ANON 191 0, // file_size, unused for ANON 192 XPTR_NULL, // mapper_xp, unused for ANON 193 local_cxy ); // mapping cluster 194 } 195 else 196 { 197 rpc_vmm_create_vseg_client( ref_cxy, 198 ref_ptr, 199 VSEG_TYPE_ANON, 200 0, // base, unused for ANON 201 nlines * npixels, // size 202 0, // file_offset, unused for ANON 203 0, // file size, unused for ANON 204 XPTR_NULL, // mapper_xp, unused for ANON 205 local_cxy, 206 &vseg ); 207 } 208 209 if( vseg == NULL ) 210 { 211 printk("\n[ERROR] in %s / thread[%x,%x] cannot create vseg in reference cluster\n", 212 __FUNCTION__, process->pid, this->trdid ); 213 req.ptr = (void *)window; 214 kmem_free( &req ); 215 return -1; 216 } 217 218 // get vseg base 219 vseg_base = (intptr_t)hal_remote_lpt( XPTR( ref_cxy , &vseg->min ) ); 220 221 #if (DEBUG_DEV_FBF & 1) 222 cycle = (uint32_t)hal_get_cycles(); 223 if( DEBUG_DEV_FBF < cycle ) 224 printk("\n[%s] thread[%x,%x] allocated vseg / base %x / cycle %d\n", 225 __FUNCTION__ , process->pid, this->trdid, vseg_base, cycle ); 226 #endif 227 228 // take the lock protecting windows in write mode 229 remote_rwlock_wr_acquire( windows_lock_xp ); 230 231 // allocate a wid from allocator in FBF descriptor extension 232 uint32_t wid = bitmap_remote_alloc( windows_bitmap_xp , CONFIG_FBF_WINDOWS_MAX_NR ); 233 234 if( wid == 0xFFFFFFFF ) 235 { 236 printk("\n[ERROR] in %s / thread[%x,%x] cannot allocate buffer for window\n", 237 __FUNCTION__, process->pid, this->trdid ); 238 req.ptr = (void *)window; 239 kmem_free( &req ); 240 vmm_remove_vseg( process , vseg ); 241 return -1; 242 } 243 244 // initialize window descriptor 245 window->pid = process->pid; 246 window->wid = wid; 247 window->height = nlines; 248 window->width = npixels; 249 window->l_min = l_min; 250 window->p_min = p_min; 251 window->hidden = false; 252 window->buffer = (uint8_t *)vseg_base; 253 254 // register new window in xlist rooted in FBF extension 255 xlist_add_last( windows_root_xp , XPTR( local_cxy , &window->xlist ) ); 256 257 // build extended pointer on relevant entry in windows_tbl[] array 258 xptr_t windows_tbl_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_tbl[wid] ); 259 260 // register new window in windows_tbl[] stored in FBF extension 261 hal_remote_s64( windows_tbl_xp , XPTR( local_cxy , window ) ); 262 263 // release the lock protecting windows in write mode 264 remote_rwlock_wr_release( windows_lock_xp ); 265 266 #if DEBUG_DEV_FBF 267 cycle = (uint32_t)hal_get_cycles(); 268 if( DEBUG_DEV_FBF < cycle ) 269 printk("\n[%s] thread[%x,%x] exit / wid %d / buffer %x / cycle %d\n", 270 __FUNCTION__ , this->process->pid, this->trdid, wid , window->buffer, cycle ); 271 #endif 272 273 #if (DEBUG_DEV_FBF & 1) 274 hal_vmm_display( ref_xp , true ); 275 #endif 276 277 // return pointer on allocated buffer 278 *user_buffer = vseg_base; 279 280 return wid; 281 282 } // end dev_fbf_create_window() 283 284 //////////////////////////////////////////////////////////////////////////////////////// 285 // This static function is called by the dev_fbf_display() function. 286 // For a partial FBF line, identified by the <f_line>, <f_p_min>, <f_p_max> arguments 287 // in FBF reference, and for one remote window, identified by the <window_xp> argument, 288 // it updates the target buffer identified by <t_buffer>, and containing exactly 289 // (f_p_max - f_p_min) pixels. 290 // Depending on the actual overlap between the window and the <t_buffer> representing 291 // the partial FBF line, it moves up to (f_p_max - f_p_min) pixels from the window 292 // buffer to the target buffer. The number of moved pixels can be nul if no overlap. 293 //////////////////////////////////////////////////////////////////////////////////////// 294 // @ f_line : [in] line index in FBF reference (from 0 to fbf_height-1). 295 // @ f_p_min : [in] first pixel in FBF line. 296 // @ f_p_max : [in] last pixel in FBF line (excluded). 297 // @ window_xp : [in] extended pointer on checked window . 298 // @ t_buffer : [out] local pointer on target buffer to be updated. 299 /////////////////////////////////////////////////////////////////////////////////////// 300 __attribute__ ((noinline)) static void handle_one_window( uint32_t f_line, 301 uint32_t f_p_min, 302 uint32_t f_p_max, 303 xptr_t window_xp, 304 uint8_t * t_buffer ) 305 { 306 // get remote window descriptor cluster and local pointer 307 cxy_t window_cxy = GET_CXY( window_xp ); 308 fbf_window_t * window_ptr = GET_PTR( window_xp ); 309 310 // get remote window min/max coordinates in FBF reference 311 uint32_t w_l_min = hal_remote_l32( XPTR( window_cxy , &window_ptr->l_min ) ); 312 uint32_t w_p_min = hal_remote_l32( XPTR( window_cxy , &window_ptr->p_min ) ); 313 uint32_t w_height = hal_remote_l32( XPTR( window_cxy , &window_ptr->height ) ); 314 uint32_t w_width = hal_remote_l32( XPTR( window_cxy , &window_ptr->width ) ); 315 uint32_t w_l_max = w_l_min + w_height; 316 uint32_t w_p_max = w_p_min + w_width; 317 318 // does nothing if partial FBF line does not overlap the window 319 if( (f_line < w_l_min) || (f_line >= w_l_max) || 320 (f_p_max < w_p_min) || (f_p_min >= w_p_max) ) return; 321 322 // get pointer on window buffer in user space 323 uint8_t * w_buffer = hal_remote_lpt( XPTR( window_cxy , &window_ptr->buffer ) ); 324 325 // get min & max indexes for pixels to be moved in FBF reference 326 uint32_t f_pixel_min = (f_p_min < w_p_min) ? w_p_min : f_p_min; 327 uint32_t f_pixel_max = (f_p_max < w_p_max) ? f_p_max : w_p_max; 328 329 // compute number of pixels to move from w_buffer to f_buffer 330 uint32_t npixels = f_pixel_max - f_pixel_min; 331 332 // compute offset in target buffer 333 uint32_t t_offset = f_pixel_min - f_p_min; 334 335 // compute line index in window 336 uint32_t w_line = f_line - w_l_min; 337 338 // compute offset in window buffer 339 uint32_t w_offset = (w_line * w_height) + f_pixel_min - w_p_min; 340 341 // move pixels from w_buffer (user space) to t_buffer in kernel space 342 hal_copy_from_uspace( XPTR( local_cxy , &t_buffer[t_offset] ), 343 &w_buffer[w_offset], 344 npixels ); 345 346 } // end handle_one_window() 347 348 //////////////////////////////////////////////////////////////////////////////////////// 349 // This static function is called by dev_fbf_refresh_window(), dev_fbf_move_window(), 350 // dev_fbf_resize_window(), and dev_fbf_delete_window(). It updates all lines of the 351 // window identified by the <window_xp>, <line_first>, and <line_last>> arguments. 352 // It scan all registered windows to take into account the overlap priorities defined 353 // by the windows xlist. It does not take the lock protecting the xlist, that must be 354 // taken by the calling function. 355 //////////////////////////////////////////////////////////////////////////////////////// 356 // @ window_xp : [in] extended pointer on window defining the FBF pixels to refresh. 357 // @ line_first : [in] first line index. 358 // @ line_last : [in] last line index (excluded). 359 //////////////////////////////////////////////////////////////////////////////////////// 360 error_t fbf_update( xptr_t window_xp, 361 uint32_t line_first, 362 uint32_t line_last ) 363 { 364 uint32_t line; // iterator to scan the FBF lines 365 uint32_t pixel; // iterator to scan pixels in one FBF line 366 xptr_t iter_xp; // iterator to scan the list of windows 367 error_t error; 368 369 // this intermediate buffer stores one line in 370 // target window, to handle other windows overlap 371 uint8_t line_buffer[CONFIG_FBF_WINDOWS_MAX_WIDTH]; 372 373 // get pointer on calling thread and core lid 374 thread_t * this = CURRENT_THREAD; 375 376 // get window cluster and local pointer 377 cxy_t window_cxy = GET_CXY( window_xp ); 378 fbf_window_t * window_ptr = GET_PTR( window_xp ); 379 380 #if DEBUG_DEV_FBF 381 uint32_t wid = hal_remote_l32( XPTR( window_cxy , &window_ptr->wid ) ); 382 uint32_t lid = this->core->lid; 383 uint32_t cycle = (uint32_t)hal_get_cycles(); 384 if( DEBUG_DEV_FBF < cycle ) 385 printk("\n[%s] core[%x,%d] enter / wid %d / cycle %d\n", 386 __FUNCTION__, local_cxy, lid, wid, cycle ); 96 387 #endif 97 388 … … 101 392 chdev_t * fbf_ptr = GET_PTR( fbf_xp ); 102 393 103 // check fbf_xp definition 104 assert( (fbf_xp != XPTR_NULL) , "undefined FBF chdev descriptor" ); 394 // get frame buffer width 395 uint32_t fbf_width = hal_remote_l32( XPTR( fbf_cxy , &fbf_ptr->ext.fbf.width ) ); 396 397 // get pointer on driver command function 398 dev_cmd_t * cmd = hal_remote_lpt( XPTR( fbf_cxy , &fbf_ptr->cmd ) ); 399 400 // build extended pointers on windows xlist root 401 xptr_t windows_root_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_root ); 402 403 // get window size and coordinates 404 uint32_t p_min = hal_remote_l32( XPTR( window_cxy , &window_ptr->p_min ) ); 405 uint32_t l_min = hal_remote_l32( XPTR( window_cxy , &window_ptr->l_min ) ); 406 uint32_t w_pixels = hal_remote_l32( XPTR( window_cxy , &window_ptr->width ) ); 407 408 error = 0; 409 410 // loop on target window lines (FBF coordinates) 411 for( line = l_min + line_first ; line < (l_min + line_last) ; line++ ) 412 { 413 // reset the line buffer to default value 414 for( pixel = 0 ; pixel < w_pixels ; pixel++ ) line_buffer[pixel] = 127; 415 416 // loop on all windows 417 XLIST_FOREACH( windows_root_xp , iter_xp ) 418 { 419 // get pointers on remote window 420 xptr_t tgt_xp = XLIST_ELEMENT( iter_xp , fbf_window_t , xlist ); 421 fbf_window_t * tgt_ptr = GET_PTR( window_xp ); 422 cxy_t tgt_cxy = GET_CXY( window_xp ); 423 424 bool_t hidden = hal_remote_l32( XPTR( tgt_cxy , &tgt_ptr->hidden ) ); 425 426 // fill the line_buf for this window if not hidden 427 if( hidden == false ) handle_one_window( line, // line index 428 p_min, // pixel_min 429 p_min + w_pixels, // pixel_max 430 tgt_xp, // window_xp 431 line_buffer ); 432 } // end for windows 433 434 // compute offset in FBF 435 uint32_t fbf_offset = p_min + (line * fbf_width); 436 437 // register command in calling thread descriptor 438 this->fbf_cmd.dev_xp = fbf_xp; 439 this->fbf_cmd.type = FBF_DRIVER_KERNEL_WRITE; 440 this->fbf_cmd.buffer = line_buffer; 441 this->fbf_cmd.npixels = w_pixels; 442 this->fbf_cmd.offset = fbf_offset; 443 444 // call driver to display one line 445 cmd( XPTR( local_cxy , this ) ); 446 447 error |= this->fbf_cmd.error; 448 449 } // end for lines 450 451 #if DEBUG_DEV_FBF 452 cycle = (uint32_t)hal_get_cycles(); 453 if( DEBUG_DEV_FBF < cycle ) 454 printk("\n[%s] core[%x,%d] exit / wid %d / cycle %d\n", 455 __FUNCTION__, local_cxy, this->core->lid, wid, cycle ); 456 #endif 457 458 // return I/O operation status 459 return error; 460 461 } // end fbf_update() 462 463 ////////////////////////////////////////////// 464 error_t dev_fbf_delete_window( uint32_t wid ) 465 { 466 kmem_req_t req; 467 468 thread_t * this = CURRENT_THREAD; 469 process_t * process = this->process; 470 471 #if DEBUG_DEV_FBF 472 uint32_t cycle = (uint32_t)hal_get_cycles(); 473 if( DEBUG_DEV_FBF < cycle ) 474 printk("\n[%s] thread[%x,%x] enters : wid %d / cycle %d\n", 475 __FUNCTION__ , process->pid, this->trdid, wid, cycle ); 476 #endif 477 478 // get cluster and pointers on FBF chdev 479 xptr_t fbf_xp = chdev_dir.fbf[0]; 480 cxy_t fbf_cxy = GET_CXY( fbf_xp ); 481 chdev_t * fbf_ptr = GET_PTR( fbf_xp ); 482 483 // build extended pointers on windows lock, and wid allocator 484 xptr_t windows_lock_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_lock ); 485 xptr_t wid_bitmap_xp = XPTR( fbf_cxy , fbf_ptr->ext.fbf.windows_bitmap ); 486 487 // build extended pointer on relevant entry in windows_tbl[] array 488 xptr_t windows_tbl_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_tbl[wid] ); 489 490 // get extended pointer on remote window descriptor 491 xptr_t window_xp = hal_remote_l64( windows_tbl_xp ); 492 493 if( window_xp == XPTR_NULL ) 494 { 495 printk("\n[ERROR] in %s / thread[%x,%x] / wid %d non registered\n", 496 __FUNCTION__, process->pid, this->trdid, wid ); 497 return -1; 498 } 499 500 // get cluster and local pointer on remote window 501 cxy_t window_cxy = GET_CXY( window_xp ); 502 fbf_window_t * window_ptr = GET_PTR( window_xp ); 503 504 // get process owner PID 505 pid_t owner_pid = hal_remote_l32( XPTR( window_cxy , &window_ptr->pid ) ); 506 507 // check caller PID / owner PID 508 if( owner_pid != process->pid ) 509 { 510 printk("\n[ERROR] in %s : caller PID (%x) != owner PID (%x)\n", 511 __FUNCTION__, process->pid , owner_pid ); 512 return -1; 513 } 514 515 // get associated buffer, and number of lines 516 uint8_t * buffer = hal_remote_lpt( XPTR( window_cxy , &window_ptr->buffer ) ); 517 uint32_t nlines = hal_remote_l32( XPTR( window_cxy , &window_ptr->height ) ); 518 519 // 1. take the lock protecting windows in write mode 520 remote_rwlock_wr_acquire( windows_lock_xp ); 521 522 // 2. update the FBF window 523 fbf_update( window_xp , 0 , nlines ); 524 525 // 3. remove the window from windows_tbl[] array 526 hal_remote_s64( windows_tbl_xp , XPTR_NULL ); 527 528 // 4. remove the window from xlist 529 xlist_unlink( XPTR( window_cxy , &window_ptr->xlist ) ); 530 531 // 5. release wid to bitmap 532 bitmap_remote_clear( wid_bitmap_xp , wid ); 533 534 // 6. release the lock protecting windows in write mode 535 remote_rwlock_wr_release( windows_lock_xp ); 536 537 // 7. release memory allocated for window descriptor 538 req.type = KMEM_KCM; 539 req.ptr = window_ptr; 540 kmem_remote_free( window_cxy , &req ); 541 542 // 8. release the associated vseg 543 vmm_global_delete_vseg( process , (intptr_t)buffer ); 544 545 #if DEBUG_DEV_FBF 546 cycle = (uint32_t)hal_get_cycles(); 547 if( DEBUG_DEV_FBF < cycle ) 548 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 549 __FUNCTION__ , process->pid, this->trdid, cycle ); 550 #endif 551 552 return 0; 553 554 } // end dev_fbf_delete_window() 555 556 //////////////////////////////////////////// 557 error_t dev_fbf_move_window( uint32_t wid, 558 uint32_t l_min, 559 uint32_t p_min ) 560 { 561 thread_t * this = CURRENT_THREAD; 562 process_t * process = this->process; 563 564 #if DEBUG_DEV_FBF 565 uint32_t cycle = (uint32_t)hal_get_cycles(); 566 if( DEBUG_DEV_FBF < cycle ) 567 printk("\n[%s] thread[%x,%x] enters : wid %d / l_min %d / p_min %d / cycle %d\n", 568 __FUNCTION__ , process->pid, this->trdid, wid, l_min, p_min, cycle ); 569 #endif 570 571 // get cluster and pointers on FBF chdev 572 xptr_t fbf_xp = chdev_dir.fbf[0]; 573 cxy_t fbf_cxy = GET_CXY( fbf_xp ); 574 chdev_t * fbf_ptr = GET_PTR( fbf_xp ); 575 576 // build extended pointers on windows lock and root 577 xptr_t windows_lock_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_lock ); 578 xptr_t windows_root_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_root ); 579 580 // build extended pointer on relevant entry in windows_tbl[] array 581 xptr_t windows_tbl_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_tbl[wid] ); 582 583 // get extended pointer on remote window descriptor 584 xptr_t window_xp = hal_remote_l64( windows_tbl_xp ); 585 586 if( window_xp == XPTR_NULL ) 587 { 588 printk("\n[ERROR] in %s / thread[%x,%x] / wid %d non registered\n", 589 __FUNCTION__, process->pid, this->trdid, wid ); 590 return -1; 591 } 592 593 // get cluster and local pointer for remote window 594 cxy_t window_cxy = GET_CXY( window_xp ); 595 fbf_window_t * window_ptr = GET_PTR( window_xp ); 596 597 // get process owner PID, coordinates, and number of lines 598 pid_t owner_pid = hal_remote_l32( XPTR( window_cxy , &window_ptr->pid ) ); 599 uint32_t p_zero = hal_remote_l32( XPTR( window_cxy , &window_ptr->p_min ) ); 600 uint32_t l_zero = hal_remote_l32( XPTR( window_cxy , &window_ptr->l_min ) ); 601 uint32_t nlines = hal_remote_l32( XPTR( window_cxy , &window_ptr->height ) ); 602 603 // check caller PID / owner PID 604 if( owner_pid != process->pid ) 605 { 606 printk("\n[ERROR] in %s : caller PID (%x) != owner PID (%x)\n", 607 __FUNCTION__, process->pid , owner_pid ); 608 return -1; 609 } 610 611 // does nothing if no change 612 if( (p_zero == p_min) && (l_zero == l_min) ) return 0; 613 614 // 1. take the lock protecting windows in write mode 615 remote_rwlock_wr_acquire( windows_lock_xp ); 616 617 #if ( DEBUG_DEV_FBF & 1 ) 618 printk("\n[%s] lock taken\n", __FUNCTION__ ); 619 #endif 620 621 // 2. gives the window the lowest priority 622 xptr_t xlist_entry_xp = XPTR( window_cxy , &window_ptr->xlist ); 623 xlist_unlink( xlist_entry_xp ); 624 xlist_add_first( windows_root_xp , xlist_entry_xp ); 625 626 #if ( DEBUG_DEV_FBF & 1 ) 627 printk("\n[%s] set low priority \n", __FUNCTION__ ); 628 #endif 629 630 // 3. set the "hidden" flag in window descriptor 631 hal_remote_s32( XPTR( window_cxy , &window_ptr->hidden ) , true ); 632 633 #if ( DEBUG_DEV_FBF & 1 ) 634 printk("\n[%s] hidden set\n", __FUNCTION__ ); 635 #endif 636 637 // 4. refresh the FBF for the current window position 638 fbf_update( window_xp , 0 , nlines ); 639 640 #if ( DEBUG_DEV_FBF & 1 ) 641 printk("\n[%s] refreshed old position\n", __FUNCTION__ ); 642 #endif 643 644 // 5. set the new coordinates in the window descriptor, 645 hal_remote_s32( XPTR( window_cxy , &window_ptr->l_min ), l_min ); 646 hal_remote_s32( XPTR( window_cxy , &window_ptr->p_min ), p_min ); 647 648 #if ( DEBUG_DEV_FBF & 1 ) 649 printk("\n[%s] l_min & p_min updated\n", __FUNCTION__ ); 650 #endif 651 652 // 6. gives the window the highest priority 653 xlist_unlink( xlist_entry_xp ); 654 xlist_add_last( windows_root_xp , xlist_entry_xp ); 655 656 #if ( DEBUG_DEV_FBF & 1 ) 657 printk("\n[%s] set high priority\n", __FUNCTION__ ); 658 #endif 659 660 // 7. reset the "hidden" flag in window descriptor 661 hal_remote_s32( XPTR( window_cxy , &window_ptr->hidden ) , false ); 662 663 #if ( DEBUG_DEV_FBF & 1 ) 664 printk("\n[%s] hidden reset\n", __FUNCTION__ ); 665 #endif 666 667 // 8. refresh the FBF for the new window position 668 fbf_update( window_xp , 0 , nlines ); 669 670 #if ( DEBUG_DEV_FBF & 1 ) 671 printk("\n[%s] refresh new position\n", __FUNCTION__ ); 672 #endif 673 674 // 9. release the lock protecting windows in write mode 675 remote_rwlock_wr_release( windows_lock_xp ); 676 677 #if DEBUG_DEV_FBF 678 cycle = (uint32_t)hal_get_cycles(); 679 if( DEBUG_DEV_FBF < cycle ) 680 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 681 __FUNCTION__ , process->pid, this->trdid, cycle ); 682 #endif 683 684 return 0; 685 686 } // end dev_fbf_move_window() 687 688 ///////////////////////////////////////////// 689 error_t dev_fbf_resize_window( uint32_t wid, 690 uint32_t width, 691 uint32_t height ) 692 { 693 thread_t * this = CURRENT_THREAD; 694 process_t * process = this->process; 695 696 #if DEBUG_DEV_FBF 697 uint32_t cycle = (uint32_t)hal_get_cycles(); 698 if( DEBUG_DEV_FBF < cycle ) 699 printk("\n[%s] thread[%x,%x] enters : wid %d / width %d / height %d / cycle %d\n", 700 __FUNCTION__ , process->pid , this->trdid , wid, width , height , cycle ); 701 #endif 702 703 // get cluster and pointers on FBF chdev 704 xptr_t fbf_xp = chdev_dir.fbf[0]; 705 cxy_t fbf_cxy = GET_CXY( fbf_xp ); 706 chdev_t * fbf_ptr = GET_PTR( fbf_xp ); 707 708 // build extended pointers on windows lock and root 709 xptr_t windows_lock_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_lock ); 710 xptr_t windows_root_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_root ); 711 712 // build extended pointer on relevant entry in windows_tbl[] array 713 xptr_t windows_tbl_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_tbl[wid] ); 714 715 // get extended pointer on remote window descriptor 716 xptr_t window_xp = hal_remote_l64( windows_tbl_xp ); 717 718 if( window_xp == XPTR_NULL ) 719 { 720 printk("\n[ERROR] in %s / thread[%x,%x] / wid %d non registered\n", 721 __FUNCTION__, process->pid, this->trdid, wid ); 722 return -1; 723 } 724 725 // get cluster and local pointer for remote window 726 cxy_t window_cxy = GET_CXY( window_xp ); 727 fbf_window_t * window_ptr = GET_PTR( window_xp ); 728 729 // get process owner PID, width, height, and buffer 730 pid_t owner_pid = hal_remote_l32( XPTR( window_cxy , &window_ptr->pid ) ); 731 uint32_t nlines = hal_remote_l32( XPTR( window_cxy , &window_ptr->height ) ); 732 uint32_t npixels = hal_remote_l32( XPTR( window_cxy , &window_ptr->width ) ); 733 void * base = hal_remote_lpt( XPTR( window_cxy , &window_ptr->buffer ) ); 734 735 // check caller PID / owner PID 736 if( owner_pid != process->pid ) 737 { 738 printk("\n[ERROR] in %s : caller PID (%x) != owner PID (%x)\n", 739 __FUNCTION__, process->pid , owner_pid ); 740 return -1; 741 } 742 743 // does nothing if no change 744 if( (width == npixels) && (height == nlines) ) return 0; 745 746 // compute old_size and new size 747 uint32_t old_size = nlines * npixels; 748 uint32_t new_size = width * height; 749 750 // 1. take the lock protecting windows in write mode 751 remote_rwlock_wr_acquire( windows_lock_xp ); 752 753 #if ( DEBUG_DEV_FBF & 1 ) 754 printk("\n[%s] lock taken\n", __FUNCTION__ ); 755 #endif 756 757 // 2. gives the window the lowest priority (remove, then add first) 758 xptr_t xlist_entry_xp = XPTR( window_cxy , &window_ptr->xlist ); 759 xlist_unlink( xlist_entry_xp ); 760 xlist_add_first( windows_root_xp , xlist_entry_xp ); 761 762 #if ( DEBUG_DEV_FBF & 1 ) 763 printk("\n[%s] set low priority\n", __FUNCTION__ ); 764 #endif 765 766 // 3. set the "hidden" flag in window descriptor 767 hal_remote_s32( XPTR( window_cxy , &window_ptr->hidden ) , true ); 768 769 #if ( DEBUG_DEV_FBF & 1 ) 770 printk("\n[%s] hidden set\n", __FUNCTION__ ); 771 #endif 772 773 // 4. refresh the FBF for the current window size 774 fbf_update( window_xp , 0 , nlines ); 775 776 #if ( DEBUG_DEV_FBF & 1 ) 777 printk("\n[%s] refreshed old window\n", __FUNCTION__ ); 778 #endif 779 780 // 5. set the new width & height in the window descriptor, 781 hal_remote_s32( XPTR( window_cxy , &window_ptr->width ), width ); 782 hal_remote_s32( XPTR( window_cxy , &window_ptr->height ), height ); 783 784 #if ( DEBUG_DEV_FBF & 1 ) 785 printk("\n[%s] width & height updated\n", __FUNCTION__ ); 786 #endif 787 788 // 6. resize vseg if required 789 vmm_global_resize_vseg( process, (intptr_t)base, (intptr_t)base, width * height ); 790 791 #if ( DEBUG_DEV_FBF & 1 ) 792 printk("\n[%s] vseg resized\n", __FUNCTION__ ); 793 #endif 794 795 // 7. fill buffer extension if required 796 if( new_size > old_size ) memset( base + old_size , 0 , new_size - old_size ); 797 798 #if ( DEBUG_DEV_FBF & 1 ) 799 printk("\n[%s] buffer extension initialized\n", __FUNCTION__ ); 800 #endif 801 802 // 8. gives the window the highest priority 803 xlist_unlink( xlist_entry_xp ); 804 xlist_add_last( windows_root_xp , xlist_entry_xp ); 805 806 #if ( DEBUG_DEV_FBF & 1 ) 807 printk("\n[%s] set high priority\n", __FUNCTION__ ); 808 #endif 809 810 // 9. reset the "hidden" flag in window descriptor 811 hal_remote_s32( XPTR( window_cxy , &window_ptr->hidden ) , false ); 812 813 #if ( DEBUG_DEV_FBF & 1 ) 814 printk("\n[%s] hidden reset\n", __FUNCTION__ ); 815 #endif 816 817 // 10. refresh the FBF for the new window position 818 fbf_update( window_xp , 0 , height ); 819 820 #if ( DEBUG_DEV_FBF & 1 ) 821 printk("\n[%s] refresh new position\n", __FUNCTION__ ); 822 #endif 823 824 // 11. release the lock protecting windows in write mode 825 remote_rwlock_wr_release( windows_lock_xp ); 826 827 #if DEBUG_DEV_FBF 828 cycle = (uint32_t)hal_get_cycles(); 829 if( DEBUG_DEV_FBF < cycle ) 830 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 831 __FUNCTION__ , process->pid, this->trdid, cycle ); 832 #endif 833 834 return 0; 835 836 } // end dev_fbf_resize_window() 837 838 /////////////////////////////////////////////// 839 error_t dev_fbf_refresh_window( uint32_t wid, 840 uint32_t line_first, 841 uint32_t line_last ) 842 { 843 // get local pointers on calling thread and process 844 thread_t * this = CURRENT_THREAD; 845 process_t * process = this->process; 846 847 #if DEBUG_DEV_FBF 848 uint32_t cycle = (uint32_t)hal_get_cycles(); 849 if( DEBUG_DEV_FBF < cycle ) 850 printk("\n[%s] thread[%x,%x] enters for wid %d / first %d / last %d / cycle %d\n", 851 __FUNCTION__ , process->pid, this->trdid, wid, line_first, line_last, cycle ); 852 #endif 853 854 // get cluster and pointers on FBF chdev 855 xptr_t fbf_xp = chdev_dir.fbf[0]; 856 cxy_t fbf_cxy = GET_CXY( fbf_xp ); 857 chdev_t * fbf_ptr = GET_PTR( fbf_xp ); 858 859 // build extended pointer on windows lock 860 xptr_t windows_lock_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_lock ); 861 862 // build extended pointer on relevant entry in windows_tbl[] array 863 xptr_t windows_tbl_xp = XPTR( fbf_cxy , &fbf_ptr->ext.fbf.windows_tbl[wid] ); 864 865 // get pointers on remote window descriptor 866 xptr_t window_xp = hal_remote_l64( windows_tbl_xp ); 867 cxy_t window_cxy = GET_CXY( window_xp ); 868 fbf_window_t * window_ptr = GET_PTR( window_xp ); 869 870 // check <wid> argument 871 if( window_xp == XPTR_NULL ) 872 { 873 printk("\n[ERROR] in %s / thread[%x,%x] / wid %d non registered\n", 874 __FUNCTION__, process->pid, this->trdid, wid ); 875 return -1; 876 } 877 878 // get process owner PID 879 pid_t owner_pid = hal_remote_l32( XPTR( window_cxy , &window_ptr->pid ) ); 880 881 // check caller PID / owner PID 882 if( owner_pid != process->pid ) 883 { 884 printk("\n[ERROR] in %s : caller PID (%x) != owner PID (%x)\n", 885 __FUNCTION__, process->pid , owner_pid ); 886 return -1; 887 } 888 889 // get number of lines in window 890 uint32_t nlines = hal_remote_l32( XPTR( window_cxy , &window_ptr->height ) ); 891 892 // check <line_first> and <line_last> arguments 893 if( (line_first >= nlines) || (line_last > nlines) || (line_first >= line_last) ) 894 { 895 printk("\n[ERROR] in %s : illegal (l_first %d , l_last %d) / height %d\n", 896 __FUNCTION__, line_first, line_last, nlines ); 897 return -1; 898 } 899 900 // take the lock protecting windows xlist in read mode 901 remote_rwlock_rd_acquire( windows_lock_xp ); 902 903 // update FBF 904 fbf_update( window_xp , line_first , line_last ); 905 906 // release the lock protecting windows xlist in write mode 907 remote_rwlock_rd_release( windows_lock_xp ); 908 909 #if DEBUG_DEV_FBF 910 cycle = (uint32_t)hal_get_cycles(); 911 if( DEBUG_DEV_FBF < cycle ) 912 printk("\n[%s] thread[%x,%x] exit for wid %d / cycle %d\n", 913 __FUNCTION__, process->pid, this->trdid, wid, cycle ); 914 #endif 915 916 return 0; 917 918 } // end dev_fbf_refresh_window() 919 920 /////////////////////////////////////////////// 921 // TODO Deprecated : january 2020 [AG] 922 /////////////////////////////////////////////// 923 error_t dev_fbf_move_data( bool_t is_write, 924 void * user_buffer, 925 uint32_t npixels, 926 uint32_t offset ) 927 { 928 // get pointer on calling thread 929 thread_t * this = CURRENT_THREAD; 930 931 #if DEBUG_DEV_FBF 932 uint32_t cycle = (uint32_t)hal_get_cycles(); 933 if( DEBUG_DEV_FBF < cycle ) 934 printk("\n[%s] thread[%x,%x] : buffer %x / npixels %d / offset %x / cycle %d\n", 935 __FUNCTION__ , this->process->pid, this->trdid, 936 user_buffer, npixels, offset, cycle ); 937 #endif 938 939 // get pointers on FBF chdev 940 xptr_t fbf_xp = chdev_dir.fbf[0]; 941 cxy_t fbf_cxy = GET_CXY( fbf_xp ); 942 chdev_t * fbf_ptr = GET_PTR( fbf_xp ); 105 943 106 944 // get frame buffer width and height … … 108 946 uint32_t height = hal_remote_l32 ( XPTR( fbf_cxy , &fbf_ptr->ext.fbf.height ) ); 109 947 110 // check offset and length versus FBF size 111 assert( ((offset + length) <= (width * height)) , 112 "offset %d / length %d / width %d / height %d\n", offset, length, width, height ); 948 // check offset and npixels versus FBF size 949 if( ((offset + npixels) > (width * height)) ) 950 { 951 printk("\n[ERROR] in %s : offset (%d) + npixels (%d) / width (%d) / height (%d)\n", 952 __FUNCTION__, offset, npixels, width, height ); 953 return -1; 954 } 113 955 114 956 // register command in calling thread descriptor 115 957 this->fbf_cmd.dev_xp = fbf_xp; 116 this->fbf_cmd.type = cmd_type;958 this->fbf_cmd.type = is_write ? FBF_DRIVER_USER_WRITE : FBF_DRIVER_USER_READ; 117 959 this->fbf_cmd.buffer = user_buffer; 118 960 this->fbf_cmd.offset = offset; 119 this->fbf_cmd. length = length;961 this->fbf_cmd.npixels = npixels; 120 962 121 963 // get driver command function … … 130 972 cycle = (uint32_t)hal_get_cycles(); 131 973 if( DEBUG_DEV_FBF < cycle ) 132 printk("\n[%s] thread[%x,%x] completes %s / error = %d / cycle %d\n", 133 __FUNCTION__ , this->process->pid, this->trdid, 134 dev_fbf_cmd_str(cmd_type), error , cycle ); 974 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 975 __FUNCTION__ , this->process->pid, this->trdid, cycle ); 135 976 #endif 136 977 … … 139 980 140 981 } // end dev_fbf_move_data() 982 983 -
trunk/kernel/devices/dev_fbf.h
r647 r657 1 1 /* 2 * dev_fbf.h - FBF ( Block Device Controler) generic device API definition.2 * dev_fbf.h - FBF (Frame Buffer) generic device API definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 27 27 #include <hal_kernel_types.h> 28 28 #include <shared_fbf.h> 29 #include <remote_rwlock.h> 30 #include <bits.h> 29 31 30 32 /**** Forward declarations ****/ … … 33 35 34 36 /***************************************************************************************** 35 * Generic Frame Buffer Controlerdefinition37 * Frame Buffer Controler API definition 36 38 * 37 39 * This device provide access to an external graphic display, that is seen 38 40 * as a fixed size frame buffer, mapped in the kernel address space. 39 * The supported pixel encoding types are defined in the <shared_fbf.h> file. 40 * 41 * It supports three command types: 42 * GET_CONFIG : return frame buffer size and type. 43 * READ : move bytes from frame buffer to memory / deschedule the calling thread. 44 * WRITE : move bytes from memory to frame buffer / deschedule the calling thread. 45 * 46 * The READ and WRITE operations do not use the FBF device waiting queue, 47 * the server thread, and the IOC IRQ. The client thread does not deschedule: 48 * it registers the command in the thread descriptor, and calls directly the FBF driver. 49 * that makes a (user <-> kernel) memcpy. 41 * The only pixel encoding type in the current implementation is one byte per pixel 42 * (256 levels of gray). 43 * 44 * It supports a first API, for the user syscalls, implementing a simple windows manager. 45 * This windows manager allows any process to create and use for display one (or several) 46 * window(s). Each window defines a private buffer, dynamically allocated in user space, 47 * that can be directly accessed by the owner process. 48 * These windows can be moved in the frame buffer, they can be resized, they can overlap 49 * other windows, but a window must be entirely contained in the frame buffer. 50 * 51 * To avoid contention, the window descriptor, and the associated user buffer are not 52 * allocated in the cluster containing the FBF chdev, but are distributed: each window 53 * is allocated in the cluster defined by the thread that required the window creation. 54 * 55 * Each window has a single process owner, but all the windows are registered in the FBF 56 * chdev as a windows_tbl[] array, indexed by the window identifier (wid), and each entry 57 * contains an extended pointer on the window descriptor. All windows are also registered 58 * in a trans-cluster xlist, defining the overlapping order (last window in xlist has the 59 * highest priority). 60 * 61 * To refresh a window <wid>, the owner process calls the dev_fbf_refresh_window() 62 * function, that sends parallel RPC_FBF_DISPLAY requests to other cores. All cores 63 * synchronously execute the dev_fbf_display() function. This function scan all windows 64 * to respect the overlaping order, and updates all pixels of the <wid> window. 50 65 * 51 * Note: As we don't use any external DMA to move data, but a purely software approach, 52 * there is no L2/L3 coherence issue. 66 * 1) This "syscall" API defines five syscalls : 67 * - FBF_GET_CONFIG : returns the FBF width, height, and pixel encoding type. 68 * - FBF_CREATE_WINDOW : create a new window , owned by the calling process. 69 * - FBF_DELETE_WINDOW : delete a registered window. 70 * - FBF_MOVE_WINDOW : move in FBF a registered window. 71 * - FBF_REFRESH_WINDOW : request refresh of a given window. 72 * 73 * These 5 operations do not use the FBF device waiting queue, the associated 74 * device thread and the FBF IRQ, as the client thread does NOT deschedule, 75 * and does NOT call the FBF driver. 76 * 77 * 2) Two extra syscalls exist but are deprecated: 78 * - FBF_DIRECT_WRITE : move synchronously pixels from an user buffer to the FBF. 79 * - FBF_DIRECT_READ : move synchronously pixels from the FBF to an user buffer. 80 * 81 * For these deprecated operations, the client thread calls 82 * directly the driver to move data between the user buffer and the FBF. 83 * 84 * 3) The FBF device defines defines four command types to access FBF driver(s) : 85 * - FBF_DRIVER_KERNEL_WRITE : move pixels from a kernel window to the FBF. 86 * - FBF_DRIVER_KERNEL_READ : move pixels from the FBF to a kernel window. 87 * - FBF_DRIVER_USER_WRITE : move bytes from an user buffer to the FBF. 88 * - FBF_DRIVER_USER_READ : move bytes from the FBF to an user buffer. 89 * 90 * Note: As we don't use any external DMA to move data to or from the frame buffer, 91 * but only software memcpy, there is no L2/L3 coherence issue for this device. 92 * 53 93 *****************************************************************************************/ 54 94 … … 59 99 typedef struct fbf_extend_s 60 100 { 61 uint32_t width; /*! number of pixels per line. */ 62 uint32_t height; /*! total number of lines. */ 63 uint32_t subsampling; /*! pixel encoding type. */ 101 remote_rwlock_t windows_lock; /*! lock protecting windows xlist */ 102 xlist_entry_t windows_root; /*! root of the windows xlist */ 103 104 xptr_t windows_tbl[CONFIG_FBF_WINDOWS_MAX_NR]; /*! window desc. */ 105 bitmap_t windows_bitmap[CONFIG_FBF_WINDOWS_MAX_NR >> 5]; /*! wid allocator */ 106 107 uint32_t width; /*! number of pixels per line. */ 108 uint32_t height; /*! total number of lines. */ 109 uint32_t subsampling; /*! pixel encoding type. */ 64 110 } 65 111 fbf_extend_t; … … 70 116 *****************************************************************************************/ 71 117 72 enum fbf_impl_e 118 typedef enum 73 119 { 74 120 IMPL_FBF_SCL = 0, … … 77 123 fbf_impl_t; 78 124 125 /****************************************************************************************** 126 * This structure defines the FBF command for all drivers implementing the FBF device. 127 *****************************************************************************************/ 128 129 typedef enum 130 { 131 FBF_DRIVER_USER_READ = 1, 132 FBF_DRIVER_USER_WRITE = 2, 133 FBF_DRIVER_KERNEL_READ = 3, 134 FBF_DRIVER_KERNEL_WRITE = 4, 135 } 136 fbf_driver_cmd_type_t; 137 79 138 typedef struct fbf_command_s 80 139 { 81 140 xptr_t dev_xp; /*! extended pointer on device descriptor */ 82 uint32_t type; /*! requested operation type.*/83 uint32_t length;/*! number of bytes. */84 uint32_t offset; /*! offset in frame buffer ( bytes)*/85 void * buffer; /*! pointer on memory buffer in user space*/141 uint32_t type; /*! requested driver operation type. */ 142 uint32_t npixels; /*! number of bytes. */ 143 uint32_t offset; /*! offset in frame buffer (pixels) */ 144 void * buffer; /*! pointer on memory buffer (kernel or user) */ 86 145 uint32_t error; /*! operation status (0 if success) */ 87 146 } 88 147 fbf_command_t; 89 148 90 91 /****************************************************************************************** 92 * This function returns a printable string for a given FBF command <cmd_type>. 93 ****************************************************************************************** 94 * @ cmd_type : FBF command type (defined in shared_fbf.h file). 149 /****************************************************************************************** 150 * This structure defines an FBF window descriptor, allocated to a given user process. 151 * The window descriptor and the associated buffer are allocated in the cluster where 152 * is running the thread requesting the window creation. 153 * The <wid> allocator, the window_tbl[] array, and the root of the xlist of windows are 154 * imlemented in the FBF device extension. 155 *****************************************************************************************/ 156 157 typedef struct fbf_window_s 158 { 159 pid_t pid; /*! owner process identifier */ 160 uint32_t wid; /*! window identifier */ 161 uint32_t height; /*! number of lines in window */ 162 uint32_t width; /*! number of pixels per line in window */ 163 uint32_t l_min; /*! first line index in FBF */ 164 uint32_t p_min; /*! first pixel index in FBF */ 165 uint8_t * buffer; /*! pointer on buffer in user space */ 166 bool_t hidden; /*! no display on FBF when true */ 167 xlist_entry_t xlist; /*! member of registered FBF windows list */ 168 } 169 fbf_window_t; 170 171 /****************************************************************************************** 172 * This function returns a printable string for a given FBF user command <cmd_type>. 173 * WARNING : It must be kept consistent with the enum in the <shared_fbf.h> file 174 ****************************************************************************************** 175 * @ cmd_type : FBF user command type (defined in shared_fbf.h file). 95 176 * @ returns a string pointer. 96 177 *****************************************************************************************/ … … 100 181 * This function completes the FBF chdev descriptor initialisation. 101 182 * It calls the specific driver initialisation function, to initialise the hardware 102 * device and the chdev extension. It must be called by a local thread.183 * device, and the chdev extension. It must be called by a local thread. 103 184 ****************************************************************************************** 104 185 * @ chdev : pointer on FBF chdev descriptor. … … 107 188 108 189 /****************************************************************************************** 109 * This function returns the frame buffer size and type. 190 * This function implements the fbf_get_config() syscall, and returns the FBF 191 * size and type. It can be called by a client thread running in any cluster. 110 192 * It does NOT access the hardware, as the size and type have been registered 111 * in the chdev descriptor extension. 193 * in the chdev descriptor extension by the dev_fbf_init() function. 194 * It can be called by any thread running in any cluster. 112 195 ****************************************************************************************** 113 196 * @ width : [out] number of pixels per line. … … 120 203 121 204 /****************************************************************************************** 122 * This blocking function moves <length> bytes between the frame buffer, starting from 123 * byte defined by <offset>, and an user buffer defined by the <user_buffer> argument. 124 * It can be called by a client thread running in any cluster. 125 * The transfer direction are defined by the <cmd_type> argument. 205 * This function implements the fbf_create_window() syscall. 206 * It registers a new window in the windows_tbl[] array, and the windows list, 207 * registers in the reference cluster an ANON vseg, that will be mapped in local cluster. 208 * The window index <wid> is dynamically allocated. The owner is the calling process. 209 * The FBF window is defined by the <nlines>, <npixels>, <l_min>, <p_min> arguments. 210 * It can be called by any thread running in any cluster. As this vseg is not directly 211 * mapped to the frame buffer, the owner process can access this private buffer without 212 * syscall. As for any vseg, the physical memory is allocated on demand at each page fault. 213 * The created vseg base address in user space is returned in the <user_base> argument. 214 * 215 * Implementation note: 216 * 1. it allocates memory in the local cluster for the window, 217 * 2. it creates in the associated vseg, 218 * 3. it initializes the window descriptor, 219 * 4. it takes the lock protecting the windows in write mode, 220 * 5. it allocates a new <wid>, 221 * 6. it registers the window in the window_tbl[] array, 222 * 7. it registers the window in the windows list, 223 * 8. it releases the lock protecting windows. 224 * It does not call the FBF driver. 225 ****************************************************************************************** 226 * @ nlines : [in] number of lines in window. 227 * @ npixels : [in] number of pixels per line in window. 228 * @ l_min : [in] first pixel index in FBF. 229 * @ p_min : [in] first line index in FBF. 230 * @ user_base : [out] pointer on allocated buffer base in user space. 231 * @ return the <wid> index if success / returns -1 if failure 232 *****************************************************************************************/ 233 uint32_t dev_fbf_create_window( uint32_t nlines, 234 uint32_t npixels, 235 uint32_t l_min, 236 uint32_t p_min, 237 intptr_t * user_base ); 238 239 /****************************************************************************************** 240 * This function implements the fbf_delete_window() syscall to delete a FBF window, 241 * and release all memory allocated for this window and for the associated vseg. 242 * releases the memory allocated for the window buffer and for the window descriptor. 243 * It can be called by any thread running in any cluster. 244 * 245 * Implementation note: 246 * 1. it takes the lock protecting windows in write mode, 247 * 2. it set the hidden flag in deleted window descriptor, 248 * 3. it refresh the FBF window, 249 * 4. it removes the window from windows_tbl[] array, 250 * 5. it removes the window from xlist, 251 * 6. it releases the wid to bitmap, 252 * 7. it releases the lock protecting windows, 253 * 8. it releases the memory allocated for window descriptor, 254 * 9. it deletes the associated vseg in all clusters 255 * It does not call directly the FBF driver. 256 ****************************************************************************************** 257 * @ wid : [in] window index in window_tbl[]. 258 * @ returns 0 if success / returns -1 if wid not registered. 259 *****************************************************************************************/ 260 error_t dev_fbf_delete_window( uint32_t wid ); 261 262 /****************************************************************************************** 263 * This function implements the fbf_move_window() syscall. 264 * It moves a window identified by the <wid> argument to a new position in the FBF, 265 * defined by the <l_min> and <p_min> arguments. 266 * It can be called by any thread running in any cluster. 267 * 268 * Implementation note: 269 * 1. it takes the lock protecting windows in write mode, 270 * 2. it gives the modified window the lowest priority, 271 * 3. it set the "hidden" flag in window descriptor, 272 * 4. it refresh the FBF for the current window position, 273 * 5. it set the new coordinates in the window descriptor, 274 * 6. it gives the modified window the highest priority, 275 * 7. it reset the "hidden" flag in window descriptor, 276 * 8. it refresh the FBF for the new window position, 277 * 9. it releases the lock protecting windows, 278 * It does not call directly the FBF driver. 279 ****************************************************************************************** 280 * @ wid : [in] window index in window_tbl[]. 281 * @ l_min : [in] new first pixel index in FBF. 282 * @ p_min : [in] new first line index in FBF. 283 * @ returns 0 if success / returns -1 if illegal arguments. 284 *****************************************************************************************/ 285 error_t dev_fbf_move_window( uint32_t wid, 286 uint32_t l_min, 287 uint32_t p_min ); 288 289 /****************************************************************************************** 290 * This function implements the fbf_resize_window() syscall. 291 * It changes the <width> and <height> of a window identified by the <wid> argument. 292 * It updates the associated vseg "size" if required, but does not change the vseg "base". 293 * When the new window buffer is larger than the existing one, it is 0 filled. 294 * It can be called by any thread running in any cluster. 295 * 296 * Implementation note: 297 * 1. it takes the lock protecting windows in write mode, 298 * 2. it gives the modified window the lowest priority, 299 * 3. it set the "hidden" flag in window descriptor, 300 * 4. it refresh the FBF for the current window, 301 * 5. it set the new size in the window descriptor, 302 * 6. it resizes the associated vseg if required, 303 * 7. if fill the window buffer extension with 0 if required, 304 * 8. it gives the modified window the highest priority, 305 * 9. it reset the "hidden" flag in window descriptor, 306 * 10. it refresh the FBF for the new window, 307 * 11. it releases the lock protecting windows, 308 * It does not call directly the FBF driver. 309 ****************************************************************************************** 310 * @ wid : [in] window index in window_tbl[]. 311 * @ width : [in] new number of pixels per line. 312 * @ height : [in] new number of lines. 313 * @ returns 0 if success / returns -1 if illegal arguments. 314 *****************************************************************************************/ 315 error_t dev_fbf_resize_window( uint32_t wid, 316 uint32_t width, 317 uint32_t height ); 318 319 /****************************************************************************************** 320 * This function implements the fbf_refresh_window() syscall. 321 * It allows an owner process to signal the windows manager that some lines of a window 322 * identified by the <wid>, <line_min>, and <line_max> argument have been modified, and 323 * must be refreshed in the FBF. It scans all the registered FBF windows to respect the 324 * overlap order defined by the windows xlist. 325 * It can be called by any thread running in any cluster. 326 * 327 * Implementation note: 328 * 1. it takes the lock protecting windows in read mode, 329 * 2. it refresh the FBF, 330 * 3. it releases the lock protecting windows, 331 * It does not call directly the FBF driver. 332 ****************************************************************************************** 333 * @ wid : [in] window index in window_tbl[] 334 * @ line_first : [in] first line index in window. 335 * @ line_last : [in] last line index (excluded). 336 * @ returns 0 if success / returns -1 if wid not registered. 337 *****************************************************************************************/ 338 error_t dev_fbf_refresh_window( uint32_t wid, 339 uint32_t line_first, 340 uint32_t line_last ); 341 342 /****************************************************************************************** 343 * WARNING : This function is deprecated ( january 2020 [AG] ). It was defined 344 * to implement the fbf_read() and fbf_write() deprecated syscalls. 345 * 346 * It moves <length> bytes between the frame buffer, starting from pixel defined 347 * by the <offset> argument, and an user buffer defined by the <user_buffer> argument. 348 * The transfer direction are defined by the <is_write> argument. 349 * An error is returned when <offset> + <npixels> is larger than the FBF size. 126 350 * The request is registered in the client thread descriptor, but the client thread is 127 351 * not descheduled, and calls directly the FBF driver. 128 ****************************************************************************************** 129 * @ cmd_type : FBF_READ / FBF_WRITE / FBF_SYNC_READ / FBF_SYN_WRITE. 130 * @ user_buffer : pointer on memory buffer in user space. 131 * @ length : number of bytes. 132 * @ offset : first byte in frame buffer. 133 * @ returns 0 if success / returns EINVAL if error. 134 *****************************************************************************************/ 135 error_t dev_fbf_move_data( uint32_t cmd_type, 352 * It can be called by a client thread running in any cluster. 353 ****************************************************************************************** 354 * @ is_write : [in] write FBF weh true / read FBF when false 355 * @ user_buffer : [in] pointer on memory buffer in user space. 356 * @ npixels : [in] number of bytes. 357 * @ offset : [in] first byte in frame buffer. 358 * @ returns 0 if success / returns -1 if error. 359 *****************************************************************************************/ 360 error_t dev_fbf_move_data( bool_t is_write, 136 361 void * user_buffer, 137 uint32_t length,362 uint32_t npixels, 138 363 uint32_t offset ); 139 364 -
trunk/kernel/devices/dev_ioc.c
r647 r657 2 2 * dev_ioc.c - IOC (Block Device Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 37 37 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 38 38 39 //////////////////////////////////////// 40 char * dev_ioc_cmd_str( cmd_type_t cmd )39 //////////////////////////////////////////// 40 char * dev_ioc_cmd_str( ioc_cmd_type_t cmd ) 41 41 { 42 42 if ( cmd == IOC_READ ) return "READ"; … … 91 91 } // end dev_ioc_init() 92 92 93 /////////////////////////////////////////////// 94 error_t dev_ioc_move_data( uint32_t cmd_type, 93 //////////////////////////////////////////////////////////////////////////////////// 94 // This static function executes an asynchronous SYNC_READ or SYNC_WRITE request. 95 // thread in the IOC device waiting queue, activates the server thread, blocks on 96 // the THREAD_BLOCKED_IO condition and deschedules. 97 // The clent is re-activated by the server thread after IO operation completion. 98 //////////////////////////////////////////////////////////////////////////////////// 99 static error_t dev_ioc_move( uint32_t cmd_type, 100 xptr_t buffer_xp, 101 uint32_t lba, 102 uint32_t count ) 103 { 104 thread_t * this = CURRENT_THREAD; // pointer on client thread 105 106 // get extended pointer on IOC chdev descriptor 107 xptr_t ioc_xp = chdev_dir.ioc[0]; 108 109 // check dev_xp 110 assert( (ioc_xp != XPTR_NULL) , "undefined IOC chdev descriptor" ); 111 112 // register command in client thread 113 this->ioc_cmd.dev_xp = ioc_xp; 114 this->ioc_cmd.type = cmd_type; 115 this->ioc_cmd.buf_xp = buffer_xp; 116 this->ioc_cmd.lba = lba; 117 this->ioc_cmd.count = count; 118 119 // register client thread in IOC queue, blocks and deschedules 120 chdev_register_command( ioc_xp ); 121 122 // return I/O operation status 123 return this->ioc_cmd.error; 124 125 } // end dev_ioc_move() 126 127 //////////////////////////////////////////////////////////////////////////////////// 128 // This static function executes a synchronous READ or WRITE request. 129 // It register the command in the client thread descriptor, and calls directly 130 // the driver cmd function. 131 //////////////////////////////////////////////////////////////////////////////////// 132 error_t dev_ioc_sync_move( uint32_t cmd_type, 95 133 xptr_t buffer_xp, 96 134 uint32_t lba, … … 98 136 { 99 137 thread_t * this = CURRENT_THREAD; // pointer on client thread 100 101 #if ( DEBUG_DEV_IOC_RX || DEBUG_DEV_IOC_TX )102 uint32_t cycle = (uint32_t)hal_get_cycles();103 #endif104 105 // software L2/L3 cache coherence for memory buffer106 if( chdev_dir.iob )107 {108 if( (cmd_type == IOC_SYNC_READ) || (cmd_type == IOC_READ) )109 {110 dev_mmc_inval( buffer_xp , count<<9 );111 }112 else // (cmd_type == IOC_SYNC_WRITE) or (cmd_type == IOC_WRITE)113 {114 dev_mmc_sync ( buffer_xp , count<<9 );115 }116 }117 138 118 139 // get extended pointer on IOC chdev descriptor … … 129 150 this->ioc_cmd.count = count; 130 151 131 // for a synchronous acces, the driver is directly called by the client thread 132 if( (cmd_type == IOC_SYNC_READ) || (cmd_type == IOC_SYNC_WRITE) ) 133 { 134 135 #if DEBUG_DEV_IOC_RX 136 uint32_t cycle = (uint32_t)hal_get_cycles(); 137 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_SYNC_READ) ) 138 printk("\n[%s] thread[%x,%x] enters for SYNC_READ / lba %x / buffer[%x,%x] / cycle %d\n", 139 __FUNCTION__ , this->process->pid, this->trdid, lba, 140 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 141 #endif 142 143 #if DEBUG_DEV_IOC_TX 144 uint32_t cycle = (uint32_t)hal_get_cycles(); 145 if( (DEBUG_DEV_IOC_TX < cycle) && (cmd_type == IOC_SYNC_WRITE) ) 146 printk("\n[%s] thread[%x,%x] enters for SYNC_WRITE / lba %x / buffer[%x,%x] / cycle %d\n", 147 __FUNCTION__ , this->process->pid, this->trdid, lba, 148 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 149 #endif 150 // get driver command function 151 cxy_t ioc_cxy = GET_CXY( ioc_xp ); 152 chdev_t * ioc_ptr = GET_PTR( ioc_xp ); 153 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) ); 154 155 // call driver function 156 cmd( XPTR( local_cxy , this ) ); 157 158 #if DEBUG_DEV_IOC_RX 159 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_SYNC_READ) ) 160 printk("\n[%s] thread[%x,%x] resumes for IOC_SYNC_READ\n", 161 __FUNCTION__, this->process->pid , this->trdid ) 162 #endif 163 164 #if DEBUG_DEV_IOC_TX 165 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_SYNC_WRITE) ) 166 printk("\n[%s] thread[%x,%x] resumes for IOC_SYNC_WRITE\n", 167 __FUNCTION__, this->process->pid , this->trdid ) 168 #endif 169 170 } 171 // for an asynchronous access, the client thread registers in the chdev waiting queue, 172 // activates server thread, blocks on THREAD_BLOCKED_IO and deschedules. 173 // It is re-activated by the server thread after IO operation completion. 174 else // (cmd_type == IOC_READ) || (cmd_type == IOC_WRITE) 175 { 176 177 #if DEBUG_DEV_IOC_RX 178 uint32_t cycle = (uint32_t)hal_get_cycles(); 179 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_READ) ) 180 printk("\n[%s] thread[%x,%x] enters for READ / lba %x / buffer[%x,%x] / cycle %d\n", 181 __FUNCTION__ , this->process->pid, this->trdid, lba, 182 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 183 #endif 184 185 #if DEBUG_DEV_IOC_TX 186 uint32_t cycle = (uint32_t)hal_get_cycles(); 187 if( (DEBUG_DEV_IOC_TX < cycle) && (cmd_type == IOC_WRITE) ) 188 printk("\n[%s] thread[%x,%x] enters for WRITE / lba %x / buffer[%x,%x] / cycle %d\n", 189 __FUNCTION__ , this->process->pid, this->trdid, lba, 190 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 191 #endif 192 chdev_register_command( ioc_xp ); 193 194 #if(DEBUG_DEV_IOC_RX ) 195 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_READ) ) 196 printk("\n[%s] thread[%x,%x] resumes for IOC_READ\n", 197 __FUNCTION__, this->process->pid , this->trdid ) 198 #endif 199 200 #if(DEBUG_DEV_IOC_TX & 1) 201 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_WRITE) ) 202 printk("\n[%s] thread[%x,%x] resumes for IOC_WRITE\n", 203 __FUNCTION__, this->process->pid , this->trdid ) 204 #endif 205 206 } 152 // get driver command function 153 cxy_t ioc_cxy = GET_CXY( ioc_xp ); 154 chdev_t * ioc_ptr = GET_PTR( ioc_xp ); 155 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) ); 156 157 // call driver function whithout blocking & descheduling 158 cmd( XPTR( local_cxy , this ) ); 207 159 208 160 // return I/O operation status 209 161 return this->ioc_cmd.error; 210 162 211 } // end dev_ioc_move_data() 212 213 163 } // end dev_ioc_sync_move() 164 165 /////////////////////////////////////////// 166 error_t dev_ioc_read( xptr_t buffer_xp, 167 uint32_t lba, 168 uint32_t count ) 169 { 170 171 #if DEBUG_DEV_IOC_RX 172 thread_t * this = CURRENT_THREAD; 173 uint32_t cycle = (uint32_t)hal_get_cycles(); 174 if( DEBUG_DEV_IOC_RX < cycle ) 175 printk("\n[%s] thread[%x,%x] enters IOC_READ / lba %x / buffer[%x,%x] / cycle %d\n", 176 __FUNCTION__, this->process->pid, this->trdid, lba, 177 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 178 #endif 179 180 // software L2/L3 cache coherence for memory buffer 181 if( chdev_dir.iob ) dev_mmc_inval( buffer_xp , count<<9 ); 182 183 // request an asynchronous transfer 184 error_t error = dev_ioc_move( IOC_READ, 185 buffer_xp, 186 lba, 187 count ); 188 #if(DEBUG_DEV_IOC_RX & 1) 189 cycle = (uint32_t)hal_get_cycles(); 190 if( DEBUG_DEV_IOC_RX < cycle ) 191 printk("\n[%s] thread[%x,%x] exit IOC_READ / cycle %d\n", 192 __FUNCTION__, this->process->pid , this->trdid , cycle ); 193 #endif 194 195 return error; 196 197 } // end dev_ioc_read() 198 199 /////////////////////////////////////////// 200 error_t dev_ioc_write( xptr_t buffer_xp, 201 uint32_t lba, 202 uint32_t count ) 203 { 204 205 #if DEBUG_DEV_IOC_TX 206 thread_t * this = CURRENT_THREAD; 207 uint32_t cycle = (uint32_t)hal_get_cycles(); 208 if( DEBUG_DEV_IOC_TX < cycle ) 209 printk("\n[%s] thread[%x,%x] enters IOC_WRITE / lba %x / buffer[%x,%x] / cycle %d\n", 210 __FUNCTION__, this->process->pid, this->trdid, lba, 211 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 212 #endif 213 214 // software L2/L3 cache coherence for memory buffer 215 if( chdev_dir.iob ) dev_mmc_sync ( buffer_xp , count<<9 ); 216 217 // request a blocking, but asynchronous, transfer 218 error_t error = dev_ioc_move( IOC_WRITE, 219 buffer_xp, 220 lba, 221 count ); 222 #if(DEBUG_DEV_IOC_TX & 1) 223 cycle = (uint32_t)hal_get_cycles(); 224 if( DEBUG_DEV_IOC_TX < cycle ) 225 printk("\n[%s] thread[%x,%x] exit IOC_WRITE / cycle %d\n", 226 __FUNCTION__, this->process->pid , this->trdid , cycle ); 227 #endif 228 229 return error; 230 231 } // end dev_ioc_write() 232 233 234 /////////////////////////////////////////// 235 error_t dev_ioc_sync_read( xptr_t buffer_xp, 236 uint32_t lba, 237 uint32_t count ) 238 { 239 240 #if DEBUG_DEV_IOC_RX 241 thread_t * this = CURRENT_THREAD; 242 uint32_t cycle = (uint32_t)hal_get_cycles(); 243 if( DEBUG_DEV_IOC_RX < cycle ) 244 printk("\n[%s] thread[%x,%x] enters IOC_SYNC_READ / lba %x / buffer[%x,%x] / cycle %d\n", 245 __FUNCTION__, this->process->pid, this->trdid, lba, 246 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 247 #endif 248 249 // software L2/L3 cache coherence for memory buffer 250 if( chdev_dir.iob ) dev_mmc_inval( buffer_xp , count<<9 ); 251 252 // request an asynchronous transfer 253 error_t error = dev_ioc_sync_move( IOC_SYNC_READ, 254 buffer_xp, 255 lba, 256 count ); 257 #if(DEBUG_DEV_IOC_RX & 1) 258 cycle = (uint32_t)hal_get_cycles(); 259 if( DEBUG_DEV_IOC_RX < cycle ) 260 printk("\n[%s] thread[%x,%x] exit IOC_SYNC_READ / cycle %d\n", 261 __FUNCTION__, this->process->pid , this->trdid , cycle ); 262 #endif 263 264 return error; 265 266 } // end dev_ioc_sync_read() 267 268 ///////////////////////////////////////////////// 269 error_t dev_ioc_sync_write( xptr_t buffer_xp, 270 uint32_t lba, 271 uint32_t count ) 272 { 273 274 #if DEBUG_DEV_IOC_TX 275 thread_t * this = CURRENT_THREAD; 276 uint32_t cycle = (uint32_t)hal_get_cycles(); 277 if( DEBUG_DEV_IOC_TX < cycle ) 278 printk("\n[%s] thread[%x,%x] enters IOC_SYNC_WRITE / lba %x / buffer[%x,%x] / cycle %d\n", 279 __FUNCTION__, this->process->pid, this->trdid, lba, 280 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 281 #endif 282 283 // software L2/L3 cache coherence for memory buffer 284 if( chdev_dir.iob ) dev_mmc_sync ( buffer_xp , count<<9 ); 285 286 // request a blocking, but asynchronous, transfer 287 error_t error = dev_ioc_sync_move( IOC_SYNC_WRITE, 288 buffer_xp, 289 lba, 290 count ); 291 #if(DEBUG_DEV_IOC_TX & 1) 292 cycle = (uint32_t)hal_get_cycles(); 293 if( DEBUG_DEV_IOC_TX < cycle ) 294 printk("\n[%s] thread[%x,%x] exit IOC_SYNC_WRITE / cycle %d\n", 295 __FUNCTION__, this->process->pid , this->trdid , cycle ); 296 #endif 297 298 return error; 299 300 } // end dev_ioc_sync_write() 301 302 -
trunk/kernel/devices/dev_ioc.h
r647 r657 2 2 * dev_ioc.h - IOC (Block Device Controler) generic device API definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 45 45 * - SYNC_WRITE : move blocks from memory to device, with a busy waiting policy. 46 46 * 47 * For the heREAD or WRITE operations, the client thread is descheduled, and the work47 * For the READ or WRITE operations, the client thread is descheduled, and the work 48 48 * is done by the server thread associated to the IOC device: 49 49 * The client thread calls the dev_ioc_move_data() kernel functions that (i) registers … … 93 93 94 94 /****************************************************************************************** 95 * This defines the (implementation independant) command passed to the driver.95 * This structure defines the IOC command for all drivers implementing the IOC device. 96 96 *****************************************************************************************/ 97 97 … … 103 103 IOC_SYNC_WRITE = 3, 104 104 } 105 cmd_type_t;105 ioc_cmd_type_t; 106 106 107 107 typedef struct ioc_command_s … … 111 111 uint32_t lba; /*! first block index */ 112 112 uint32_t count; /*! number of blocks */ 113 xptr_t buf_xp; /*! extended pointer on memory buffer*/113 xptr_t buf_xp; /*! extended pointer on kernel memory buffer */ 114 114 uint32_t error; /*! operation status (0 if success) */ 115 115 } … … 122 122 * @ return pointer on string. 123 123 *****************************************************************************************/ 124 char * dev_ioc_cmd_str( cmd_type_t cmd );124 char * dev_ioc_cmd_str( ioc_cmd_type_t cmd ); 125 125 126 126 /****************************************************************************************** … … 138 138 139 139 /****************************************************************************************** 140 * This blocking function moves <count> contiguous blocks of data between the block device 141 * starting from block defined by the <lba> argument and a kernel memory buffer, defined 142 * by the <buffer_xp> argument. The transfer direction and mode are defined by the 143 * <cmd_type> argument. The request is always registered in the calling thread descriptor. 144 * - In synchronous mode, the calling thread is not descheduled, and directly calls the 145 * IOC driver, polling the IOC status to detect transfer completion. 146 * - In asynchronous mode, the calling thread blocks and deschedules, and the IOC driver 147 * is called by the server thread associated to the IOC device. 148 ****************************************************************************************** 149 * @ cmd_type : IOC_READ / IOC_WRITE / IOC_SYNC_READ / IOC_SYN_WRITE. 150 * @ buffer_xp : extended pointer on kernel buffer in memory (must be block aligned). 151 * @ lba : first block index on device. 152 * @ count : number of blocks to transfer. 153 * @ returns 0 if success / returns -1 if error. 154 *****************************************************************************************/ 155 error_t dev_ioc_move_data( uint32_t cmd_type, 156 xptr_t buffer_xp, 140 * This blocking function register an asynchronous READ request : move <count> contiguous 141 * blocks from the block device, starting from block defined by the <lba> argument, to a 142 * kernel buffer defined by the <buffer_xp> argument. 143 * It register the request in the client thread descriptor, it register the client thread 144 * in the IOC device queue, it blocks on the THREAD_BLOCKED_IO condition, and deschedules. 145 * It will be reactivated by the DEV server thread when the transfer is completed. 146 * It can be executed by a thread running in any cluster. 147 ****************************************************************************************** 148 * @ buffer_xp : extended pointer on kernel buffer in memory (must be block aligned). 149 * @ lba : first block index on device. 150 * @ count : number of blocks to transfer. 151 * @ returns 0 if success / returns -1 if error. 152 *****************************************************************************************/ 153 error_t dev_ioc_read( xptr_t buffer_xp, 154 uint32_t lba, 155 uint32_t count ); 156 157 /****************************************************************************************** 158 * This blocking function register an asynchronous WRITE request : move <count> contiguous 159 * blocks from a kernel buffer defined by the <buffer_xp> argument to the block device, 160 * starting from block defined by the <lba> argument. 161 * It register the request in the client thread descriptor, it register the client thread 162 * in the IOC device queue, it blocks on the THREAD_BLOCKED_IO condition, and deschedules. 163 * It will be reactivated by the DEV server thread when the transfer is completed. 164 * It can be executed by a thread running in any cluster. 165 ****************************************************************************************** 166 * @ buffer_xp : extended pointer on kernel buffer in memory (must be block aligned). 167 * @ lba : first block index on device. 168 * @ count : number of blocks to transfer. 169 * @ returns 0 if success / returns -1 if error. 170 *****************************************************************************************/ 171 error_t dev_ioc_write( xptr_t buffer_xp, 172 uint32_t lba, 173 uint32_t count ); 174 175 /****************************************************************************************** 176 * This blocking function executes a synchronous SYNC_READ request : it moves <count> 177 * contiguous blocks of data from the block device, starting from block defined by the 178 * <lba> argument to a kernel memory buffer, defined by the <buffer_xp> argument. 179 * The request is registered in the calling thread descriptor, but the client thread calls 180 * directly the driver cmd function, that is also a blocking function returning only 181 * when the transfer is completed. 182 * It can be executed by a thread running in any cluster. 183 ****************************************************************************************** 184 * @ buffer_xp : extended pointer on kernel buffer in memory (must be block aligned). 185 * @ lba : first block index on device. 186 * @ count : number of blocks to transfer. 187 * @ returns 0 if success / returns -1 if error. 188 *****************************************************************************************/ 189 error_t dev_ioc_sync_read( xptr_t buffer_xp, 157 190 uint32_t lba, 158 191 uint32_t count ); 159 192 193 /****************************************************************************************** 194 * This blocking function executes a synchronous SYNC_WRITE request : it moves <count> 195 * contiguous blocks of data from a kernel memory buffer, defined by the <buffer_xp> 196 * argument to the block device, starting from block defined by the <lba> argument. 197 * The request is registered in the calling thread descriptor, but the client thread calls 198 * directly the driver cmd() function, that is also a blocking function returning only 199 * when the transfer is completed. 200 * It can be executed by a thread running in any cluster. 201 ****************************************************************************************** 202 * @ buffer_xp : extended pointer on kernel buffer in memory (must be block aligned). 203 * @ lba : first block index on device. 204 * @ count : number of blocks to transfer. 205 * @ returns 0 if success / returns -1 if error. 206 *****************************************************************************************/ 207 error_t dev_ioc_sync_write( xptr_t buffer_xp, 208 uint32_t lba, 209 uint32_t count ); 210 160 211 #endif /* _DEV_IOC_H */ -
trunk/kernel/devices/dev_mmc.c
r647 r657 2 2 * dev_mmc.c - MMC (Memory Cache Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 176 176 177 177 ///////////////////////////////////////// 178 error_t dev_mmc_ set_error( cxy_t cxy,178 error_t dev_mmc_error_set( cxy_t cxy, 179 179 uint32_t index, 180 180 uint32_t wdata ) … … 185 185 // store command arguments in thread descriptor 186 186 this->mmc_cmd.dev_xp = chdev_dir.mmc[cxy]; 187 this->mmc_cmd.type = MMC_ SET_ERROR;187 this->mmc_cmd.type = MMC_ERROR_SET; 188 188 this->mmc_cmd.reg_index = index; 189 189 this->mmc_cmd.reg_ptr = &wdata; … … 194 194 195 195 ////////////////////////////////////////// 196 error_t dev_mmc_ get_error( cxy_t cxy,196 error_t dev_mmc_error_get( cxy_t cxy, 197 197 uint32_t index, 198 198 uint32_t * rdata ) … … 203 203 // store command arguments in thread descriptor 204 204 this->mmc_cmd.dev_xp = chdev_dir.mmc[cxy]; 205 this->mmc_cmd.type = MMC_ GET_ERROR;205 this->mmc_cmd.type = MMC_ERROR_GET; 206 206 this->mmc_cmd.reg_index = index; 207 207 this->mmc_cmd.reg_ptr = rdata; … … 210 210 return dev_mmc_access( this ); 211 211 } 212 213 ////////////////////////////////////////// //////////214 error_t dev_mmc_ get_instrumentation( cxy_t cxy,215 216 212 213 ////////////////////////////////////////// 214 error_t dev_mmc_instr_get( cxy_t cxy, 215 uint32_t index, 216 uint32_t * rdata ) 217 217 { 218 218 // get calling thread local pointer … … 221 221 // store command arguments in thread descriptor 222 222 this->mmc_cmd.dev_xp = chdev_dir.mmc[cxy]; 223 this->mmc_cmd.type = MMC_ GET_INSTRU;223 this->mmc_cmd.type = MMC_INSTR_GET; 224 224 this->mmc_cmd.reg_index = index; 225 225 this->mmc_cmd.reg_ptr = rdata; … … 228 228 return dev_mmc_access( this ); 229 229 } 230 -
trunk/kernel/devices/dev_mmc.h
r565 r657 2 2 * dev_mmc.h - MMC (Generic L2 cache controller) device API definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 34 34 * acting in all clusters containing a level 2 cache controller. 35 35 * 36 * It supports five command types: 36 * It supports three different services: 37 * 1) L2/L3 software cache-coherence operations. 38 * 2) error reporting for architecture specific addressing error. 39 * 3) architecture specific intrumentation registers access. 40 * 41 * It supports therefore five command types: 37 42 * - MMC_CC_INVAL : invalidate all cache lines covering a given buffer in L2 cache. 38 43 * - MMC_CC_SYNC : synchronize all cache lines covering a given buffer to L3 cache. 39 * - MMC_ GET_ERROR: return content of a given error signaling register.40 * - MMC_ SET_ERROR: set a given error signaling register.41 * - MMC_ GET_INSTRU: return content of a given instrumentation register.44 * - MMC_ERROR_GET : return content of a given error signaling register. 45 * - MMC_ERROR_SET : set a given error signaling register. 46 * - MMC_INSTR_GET : return content of a given instrumentation register. 42 47 * 43 48 * As all L2 caches can be accessed by any thread running in any cluster, a calling … … 73 78 MMC_CC_INVAL = 0, 74 79 MMC_CC_SYNC = 1, 75 MMC_ GET_ERROR= 2,76 MMC_ SET_ERROR= 3,77 MMC_ GET_INSTRU= 4,80 MMC_ERROR_GET = 2, 81 MMC_ERROR_SET = 3, 82 MMC_INSTR_GET = 4, 78 83 }; 79 84 … … 126 131 127 132 /***************************************************************************************** 128 * This function set a value in one error signaling MMCregister.133 * This function set a value in one (architecture specific) MMC_ERROR register. 129 134 * It can be executed by any thread in any cluster, because it uses remote accesses 130 135 * to access the L2 cache instrumentation interface in any cluster. … … 135 140 * @ return 0 if success / return EINVAL if failure 136 141 ****************************************************************************************/ 137 error_t dev_mmc_ set_error( cxy_t cxy,142 error_t dev_mmc_error_set( cxy_t cxy, 138 143 uint32_t index, 139 144 uint32_t wdata ); 140 145 141 146 /***************************************************************************************** 142 * This function returns the value contained in one error signaling MMC register.143 * It can be executed by any thread in any cluster, because it uses remote accesses144 * to access the L2 cache instrumentation interface in any cluster.147 * This function returns the value contained in one (architecture specific) MMC_ERROR 148 * register. It can be executed by any thread in any cluster, because it uses remote 149 * accesses to access the L2 cache instrumentation interface in any cluster. 145 150 ***************************************************************************************** 146 151 * @ cxy : MMC cluster identifier. … … 149 154 * @ return 0 if success / return EINVAL if failure 150 155 ****************************************************************************************/ 151 error_t dev_mmc_ get_error( cxy_t cxy,156 error_t dev_mmc_error_get( cxy_t cxy, 152 157 uint32_t index, 153 158 uint32_t * rdata ); 154 159 155 156 160 /***************************************************************************************** 157 * This function returns the value contained in one instrumentation MMC register.158 * It can be executed by any thread in any cluster, because it uses remote accesses159 * to access the L2 cache configuration interface in any cluster.161 * This function returns the value contained in one (architecture specific) MMC_INSTR 162 * register. It can be executed by any thread in any cluster, because it uses remote 163 * accesses to access the L2 cache instrumentation interface in any cluster. 160 164 ***************************************************************************************** 161 165 * @ cxy : MMC cluster identifier. 162 * @ index : instrumentationregister index in MMC peripheral.166 * @ index : error register index in MMC peripheral. 163 167 * @ rdata : local pointer on buffer for returned value. 164 168 * @ return 0 if success / return EINVAL if failure 165 169 ****************************************************************************************/ 166 error_t dev_mmc_ get_instrumentation( cxy_t cxy,167 uint32_t index,168 169 170 error_t dev_mmc_instr_get( cxy_t cxy, 171 uint32_t index, 172 uint32_t * rdata ); 173 170 174 #endif /* _DEV_MMC_H_ */ -
trunk/kernel/devices/dev_nic.c
r647 r657 2 2 * dev_nic.c - NIC (Network Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 24 24 #include <hal_kernel_types.h> 25 25 #include <hal_special.h> 26 #include <hal_uspace.h> 27 #include <remote_buf.h> 26 28 #include <printk.h> 27 29 #include <chdev.h> 28 30 #include <thread.h> 31 #include <socket.h> 29 32 #include <hal_drivers.h> 30 33 #include <dev_nic.h> 34 #include <vfs.h> 35 #include <shared_socket.h> 31 36 32 37 ///////////////////////////////////////////////////////////////////////////////////////// 33 // Extern global variables38 // Extern global variables 34 39 ///////////////////////////////////////////////////////////////////////////////////////// 35 40 36 41 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 42 43 //////////////////////////////////////////////////////////////////////////////////////////// 44 // This static function is used by the dev_nic_rx_handle_tcp() & dev_nic_tx_handle_tcp() 45 // functions to check acceptability of a given sequence number. It returns true when 46 // the <seq> argument is contained in a wrap-around window defined by the <min> and <max> 47 // arguments. The window wrap-around when (min > max). 48 //////////////////////////////////////////////////////////////////////////////////////////// 49 // @ seq : [in] value to be checked. 50 // @ min : [in] first base. 51 // @ max : [in] window size. 52 //////////////////////////////////////////////////////////////////////////////////////////// 53 static inline bool_t is_in_window( uint32_t seq, 54 uint32_t min, 55 uint32_t max ) 56 { 57 if( max >= min ) // no wrap_around => only one window [min,max] 58 { 59 return( (seq >= min) && (seq <= max) ); 60 } 61 else // window wrap-around => two windows [min,0xFFFFFFFF] and [0,max] 62 { 63 return( (seq <= max) || (seq >= min) ); 64 } 65 } 66 67 //////////////////////////////////////////////////////////////////////////////////////////// 68 // this static function compute a channel index in range [0,nic_channelx[ from 69 // a remote IP address and remote port. 70 // TODO this function should be provided by the NIC driver. 71 //////////////////////////////////////////////////////////////////////////////////////////// 72 // @ addr : [in] IP address. 73 // @ port : [in] TCP/UDP port. 74 //////////////////////////////////////////////////////////////////////////////////////////// 75 static inline uint32_t dev_nic_channel_index( uint32_t addr, 76 uint16_t port ) 77 { 78 // get number of NIC channels 79 uint32_t nic_channels = LOCAL_CLUSTER->nb_nic_channels; 80 81 // compute NIC channel index 82 return ( ((addr ) & 0xFF) ^ 83 ((addr > 8 ) & 0xFF) ^ 84 ((addr > 16) & 0xFF) ^ 85 ((addr > 24) & 0xFF) ^ 86 ((port ) & 0xFF) ^ 87 ((port > 8 ) & 0xFF) ) % nic_channels; 88 } 89 90 //////////////////////////////////////////////////////////////////////////////////////// 91 // This static function computes the checksum for an IP packet header. 92 // The "checksum" field itself is not taken into account for this computation. 93 //////////////////////////////////////////////////////////////////////////////////////// 94 // @ buffer : [in] pointer on IP packet header (20 bytes) 95 // @ return the checksum value on 16 bits 96 //////////////////////////////////////////////////////////////////////////////////////// 97 uint16_t dev_nic_ip_checksum( uint8_t * buffer ) 98 { 99 uint32_t i; 100 uint32_t cs; // 32 bits accumulator 101 uint16_t * buf; 102 103 buf = (uint16_t *)buffer; 104 105 // compute checksum 106 for( i = 0 , cs = 0 ; i < 10 ; i++ ) 107 { 108 if( i != 5 ) cs += buf[i]; 109 } 110 111 // one's complement 112 return ~cs; 113 } 114 115 //////////////////////////////////////////////////////////////////////////////////////// 116 // This static function computes the checksum for an UDP packet defined by 117 // the <buffer> and <size> arguments. 118 //////////////////////////////////////////////////////////////////////////////////////// 119 // @ buffer : [in] pointer on UDP packet base. 120 // @ size : [in] number of bytes in this packet (including header). 121 // @ return the checksum value on 16 bits 122 //////////////////////////////////////////////////////////////////////////////////////// 123 uint16_t dev_nic_udp_checksum( uint8_t * buffer, 124 uint32_t size ) 125 { 126 uint32_t i; 127 uint32_t carry; 128 uint32_t cs; // 32 bits accumulator 129 uint16_t * buf; 130 uint32_t max; // number of uint16_t in packet 131 132 // compute max & buf 133 buf = (uint16_t *)buffer; 134 max = size >> 1; 135 136 // extend buffer[] if required 137 if( size & 1 ) 138 { 139 max++; 140 buffer[size] = 0; 141 } 142 143 // compute checksum for UDP packet 144 for( i = 0 , cs = 0 ; i < size ; i++ ) cs += buf[i]; 145 146 // handle carry 147 carry = (cs >> 16); 148 if( carry ) 149 { 150 cs += carry; 151 carry = (cs >> 16); 152 if( carry ) cs += carry; 153 } 154 155 // one's complement 156 return ~cs; 157 } 158 159 //////////////////////////////////////////////////////////////////////////////////////// 160 // This static function computes the checksum for a TCP segment defined by 161 // the <buffer> and <size> arguments. 162 // It includes the pseudo header defined by the <src_ip_addr>, <dst_ip_addr>, 163 // <size> arguments, and by the TCP_PROTOCOL code. 164 //////////////////////////////////////////////////////////////////////////////////////// 165 // @ buffer : [in] pointer on TCP segment base. 166 // @ size : [in] number of bytes in this segment (including header). 167 // @ src_ip_addr : [in] source IP address (pseudo header) 168 // @ dst_ip_addr : [in] destination IP address (pseudo header) 169 // @ return the checksum value on 16 bits 170 //////////////////////////////////////////////////////////////////////////////////////// 171 uint16_t dev_nic_tcp_checksum( uint8_t * buffer, 172 uint32_t size, 173 uint32_t src_ip_addr, 174 uint32_t dst_ip_addr ) 175 { 176 uint32_t i; 177 uint32_t carry; 178 uint32_t cs; // 32 bits accumulator 179 uint16_t * buf; 180 uint32_t max; // number of uint16_t in segment 181 182 // compute max & buf 183 buf = (uint16_t *)buffer; 184 max = size >> 1; 185 186 // extend buffer[] if required 187 if( size & 1 ) 188 { 189 max++; 190 buffer[size] = 0; 191 } 192 193 // compute checksum for TCP segment 194 for( i = 0 , cs = 0 ; i < size ; i++ ) cs += buf[i]; 195 196 // complete checksum for pseudo-header 197 cs += src_ip_addr; 198 cs += dst_ip_addr; 199 cs += PROTOCOL_TCP; 200 cs += size; 201 202 // handle carry 203 carry = (cs >> 16); 204 if( carry ) 205 { 206 cs += carry; 207 carry = (cs >> 16); 208 if( carry ) cs += carry; 209 } 210 211 // one's complement 212 return ~cs; 213 } 37 214 38 215 ////////////////////////////////// … … 80 257 } // end dev_nic_init() 81 258 259 260 ///////////////////////////////////////////////////////////////////////////////////////// 261 // Functions implementing the SOCKET related syscalls 262 ///////////////////////////////////////////////////////////////////////////////////////// 263 264 ////////////////////////////////////// 265 int dev_nic_socket( uint32_t domain, 266 uint32_t type ) 267 { 268 uint32_t fdid; 269 socket_t * socket; 270 error_t error; 271 272 // allocate memory for the file descriptor and for the socket 273 error = socket_create( local_cxy, 274 domain, 275 type, 276 &socket, // unused here 277 &fdid ); 278 279 if( error ) return -1; 280 return fdid; 281 } 282 283 //////////////////////////////// 284 int dev_nic_bind( uint32_t fdid, 285 uint32_t addr, 286 uint16_t port ) 287 { 288 vfs_inode_type_t type; 289 socket_t * socket; 290 uint32_t state; 291 292 thread_t * this = CURRENT_THREAD; 293 process_t * process = this->process; 294 295 // get pointers on file descriptor 296 xptr_t file_xp = process_fd_get_xptr( process , fdid ); 297 vfs_file_t * file_ptr = GET_PTR( file_xp ); 298 cxy_t file_cxy = GET_CXY( file_xp ); 299 300 // check file_xp 301 if( file_xp == XPTR_NULL ) 302 { 303 printk("\n[ERROR] in %s : undefined fdid %d", 304 __FUNCTION__, fdid ); 305 return -1; 306 } 307 308 type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 309 socket = hal_remote_lpt( XPTR( file_cxy , &file_ptr->socket ) ); 310 311 // check file descriptor type 312 if( type != INODE_TYPE_SOCK ) 313 { 314 printk("\n[ERROR] in %s : illegal file type %s", 315 __FUNCTION__, vfs_inode_type_str( type ) ); 316 return -1; 317 } 318 319 state = (type == SOCK_STREAM) ? TCP_STATE_BOUND : UDP_STATE_BOUND; 320 321 // update the socket descriptor 322 hal_remote_s32( XPTR( file_cxy , &socket->local_addr ) , addr ); 323 hal_remote_s32( XPTR( file_cxy , &socket->local_port ) , port ); 324 hal_remote_s32( XPTR( file_cxy , &socket->state ) , state ); 325 326 return 0; 327 328 } // end dev_nic_bind() 329 330 ////////////////////////////////// 331 int dev_nic_listen( uint32_t fdid, 332 uint32_t max_pending ) 333 { 334 xptr_t file_xp; 335 vfs_file_t * file_ptr; 336 cxy_t file_cxy; 337 vfs_inode_type_t file_type; 338 socket_t * socket_ptr; 339 uint32_t socket_type; 340 uint32_t socket_state; 341 342 thread_t * this = CURRENT_THREAD; 343 process_t * process = this->process; 344 345 if( max_pending != 0 ) 346 { 347 printk("\n[WARNING] in %s : max_pending argument non supported\n", 348 __FUNCTION__ ); 349 } 350 351 // get pointers on file descriptor 352 file_xp = process_fd_get_xptr( process , fdid ); 353 file_ptr = GET_PTR( file_xp ); 354 file_cxy = GET_CXY( file_xp ); 355 356 // check file_xp 357 if( file_xp == XPTR_NULL ) 358 { 359 printk("\n[ERROR] in %s : undefined fdid %d", 360 __FUNCTION__, fdid ); 361 return -1; 362 } 363 364 file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 365 socket_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->socket ) ); 366 367 // check file descriptor type 368 if( file_type != INODE_TYPE_SOCK ) 369 { 370 printk("\n[ERROR] in %s : illegal file type %s", 371 __FUNCTION__, vfs_inode_type_str(file_type) ); 372 return -1; 373 } 374 375 // get socket type and state 376 socket_type = hal_remote_l32( XPTR( file_cxy , &socket_ptr->type )); 377 socket_state = hal_remote_l32( XPTR( file_cxy , &socket_ptr->state )); 378 379 // check socket type 380 if( socket_type != SOCK_STREAM ) 381 { 382 printk("\n[ERROR] in %s : illegal socket type", 383 __FUNCTION__ ); 384 return -1; 385 } 386 387 // check socket state 388 if( socket_state != TCP_STATE_BOUND ) 389 { 390 printk("\n[ERROR] in %s : illegal socket state %s", 391 __FUNCTION__, socket_state_str(socket_state) ); 392 return -1; 393 } 394 395 // update socket.state 396 hal_remote_s32( XPTR( file_cxy , &socket_ptr->state ) , TCP_STATE_LISTEN ); 397 398 return 0; 399 400 } // end dev_nic_listen() 401 82 402 /////////////////////////////////// 83 error_t dev_nic_read( pkd_t * pkd ) 84 { 85 error_t error; 86 87 // get pointers on this NIC-RX kernel thread 88 thread_t * thread_ptr = CURRENT_THREAD; 89 xptr_t thread_xp = XPTR( local_cxy , thread_ptr ); 90 91 // get local pointer on core running this kernel thead 92 core_t * core = thread_ptr->core; 93 94 // check thread can yield 95 assert( (thread_ptr->busylocks == 0), 96 "cannot yield : busylocks = %d\n", thread_ptr->busylocks ); 403 int dev_nic_connect( uint32_t fdid, 404 uint32_t remote_addr, 405 uint16_t remote_port ) 406 { 407 vfs_inode_type_t file_type; 408 socket_t * socket; 409 uint32_t socket_state; // socket state 410 uint32_t socket_type; // socket type 411 uint32_t local_addr; // local IP address 412 uint32_t local_port; // local port 413 xptr_t tx_server_xp; // extended pointer on TX server thread 414 thread_t * tx_server_ptr; // local pointer on TX server thread 415 416 thread_t * this = CURRENT_THREAD; 417 process_t * process = this->process; 418 419 // get pointers on file descriptor 420 xptr_t file_xp = process_fd_get_xptr( process , fdid ); 421 vfs_file_t * file_ptr = GET_PTR( file_xp ); 422 cxy_t file_cxy = GET_CXY( file_xp ); 423 424 // check file_xp 425 if( file_xp == XPTR_NULL ) 426 { 427 printk("\n[ERROR] in %s : undefined fdid %d", 428 __FUNCTION__, fdid ); 429 return -1; 430 } 431 432 file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 433 socket = hal_remote_lpt( XPTR( file_cxy , &file_ptr->socket ) ); 434 435 // check file descriptor type 436 if( file_type != INODE_TYPE_SOCK ) 437 { 438 printk("\n[ERROR] in %s : illegal file type %s", 439 __FUNCTION__, vfs_inode_type_str( file_type ) ); 440 return -1; 441 } 442 443 // get relevant socket infos 444 socket_type = hal_remote_l32( XPTR( file_cxy , &socket->type ) ); 445 socket_state = hal_remote_l32( XPTR( file_cxy , &socket->state ) ); 446 local_addr = hal_remote_l32( XPTR( file_cxy , &socket->local_addr ) ); 447 local_port = hal_remote_l32( XPTR( file_cxy , &socket->local_port ) ); 448 449 if( socket_type == SOCK_DGRAM ) // UDP 450 { 451 if( socket_state != UDP_STATE_BOUND ) 452 { 453 printk("\n[ERROR] in %s : illegal socket statea %s for CONNECT", 454 __FUNCTION__, socket_state_str(socket_state) ); 455 return -1; 456 } 457 } 458 else if( socket_type == SOCK_STREAM ) // TCP 459 { 460 if( socket_state != TCP_STATE_BOUND ) 461 { 462 printk("\n[ERROR] in %s : illegal socket state %s for CONNECT", 463 __FUNCTION__, socket_state_str(socket_state) ); 464 return -1; 465 } 466 } 467 else 468 { 469 printk("\n[ERROR] in %s : illegal socket type %d for CONNECT", 470 __FUNCTION__, socket_type ); 471 return -1; 472 } 473 474 // compute nic_channel index from remote_addr and remote_port 475 uint32_t nic_channel = dev_nic_channel_index( remote_addr , remote_port ); 476 477 // link new socket to chdev servers 478 socket_link_to_servers( XPTR( file_cxy , socket ), 479 nic_channel ); 480 481 // update the socket descriptor 482 hal_remote_s32( XPTR( file_cxy , &socket->remote_addr ) , remote_addr ); 483 hal_remote_s32( XPTR( file_cxy , &socket->remote_port ) , remote_port ); 484 hal_remote_s32( XPTR( file_cxy , &socket->nic_channel ) , nic_channel ); 485 486 // the actual connection mechanism depends on socket type 487 // UDP : client thread directly updates the local socket state 488 // TCP : client thread request TX server thread to start the 3 steps handshake 489 490 if( socket_type == SOCK_DGRAM ) // UDP 491 { 492 // directly update the local socket state 493 hal_remote_s32( XPTR( file_cxy , &socket->state ) , UDP_STATE_CONNECT ); 494 } 495 else // TCP 496 { 497 // get pointers on NIC_TX[index] chdev 498 xptr_t tx_chdev_xp = chdev_dir.nic_tx[nic_channel]; 499 chdev_t * tx_chdev_ptr = GET_PTR( tx_chdev_xp ); 500 cxy_t tx_chdev_cxy = GET_CXY( tx_chdev_xp ); 501 502 // get pointers on NIC_TX[channel] server thread 503 tx_server_ptr = hal_remote_lpt( XPTR( tx_chdev_cxy , &tx_chdev_ptr->server )); 504 tx_server_xp = XPTR( tx_chdev_cxy , tx_server_ptr ); 505 506 // register command arguments in socket descriptor 507 hal_remote_s64( XPTR( file_cxy , &socket->tx_cmd ), 508 SOCKET_TX_CONNECT ); 509 510 // update the "tx_client" field in socket descriptor 511 hal_remote_s64( XPTR( file_cxy , &socket->tx_client ), 512 XPTR( local_cxy , this ) ); 513 514 // unblock NIC_TX server thread 515 thread_unblock( tx_server_xp , THREAD_BLOCKED_CLIENT ); 516 517 // block on THREAD_BLOCKED_IO condition and deschedules 518 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IO ); 519 sched_yield( "blocked in connect" ); 520 521 // reset the "tx_client" field in socket descriptor 522 hal_remote_s64( XPTR( file_cxy , &socket->tx_client ), 523 XPTR_NULL ); 524 } 525 526 return 0; 527 528 } // end dev_nic_connect() 529 530 //////////////////////////////////// 531 int dev_nic_accept( uint32_t fdid, 532 uint32_t * remote_addr, 533 uint16_t * remote_port ) 534 { 535 xptr_t file_xp; // extended pointer on remote file 536 vfs_file_t * file_ptr; 537 cxy_t file_cxy; 538 vfs_inode_type_t file_type; // file descriptor type 539 socket_t * socket; // local pointer on remote waiting socket 540 uint32_t socket_type; // waiting socket type 541 uint32_t socket_state; // waiting socket state 542 uint32_t socket_domain; // waiting socket domain 543 uint32_t socket_local_addr; // waiting socket local IP address 544 uint32_t socket_local_port; // waiting socket local port 545 xptr_t crqq_xp; // extended pointer on socket.crqq queue 546 socket_t * new_socket; // local pointer on new socket 547 uint32_t new_fdid; // new socket file descriptor index 548 sockaddr_t new_sockaddr; // one request in crqq queue 549 uint32_t new_remote_addr; // new socket remote IP address 550 uint32_t new_remote_port; // new socket remote port 551 error_t error; 552 553 thread_t * this = CURRENT_THREAD; 554 process_t * process = this->process; 555 556 // get pointers on file descriptor 557 file_xp = process_fd_get_xptr( process , fdid ); 558 file_ptr = GET_PTR( file_xp ); 559 file_cxy = GET_CXY( file_xp ); 560 561 // check file_xp 562 if( file_xp == XPTR_NULL ) 563 { 564 printk("\n[ERROR] in %s : undefined fdid %d", 565 __FUNCTION__, fdid ); 566 return -1; 567 } 568 569 file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 570 socket = hal_remote_lpt( XPTR( file_cxy , &file_ptr->socket ) ); 571 572 // check file descriptor type 573 if( file_type != INODE_TYPE_SOCK ) 574 { 575 printk("\n[ERROR] in %s : illegal file type %s / thread[%x,%x]\n", 576 __FUNCTION__, vfs_inode_type_str(file_type), process->pid, this->trdid ); 577 return -1; 578 } 579 580 // get socket type, domain, state, local_addr and local_port 581 socket_type = hal_remote_l32( XPTR( file_cxy , &socket->type )); 582 socket_state = hal_remote_l32( XPTR( file_cxy , &socket->state )); 583 socket_domain = hal_remote_l32( XPTR( file_cxy , &socket->domain )); 584 socket_local_addr = hal_remote_l32( XPTR( file_cxy , &socket->local_addr )); 585 socket_local_port = hal_remote_l32( XPTR( file_cxy , &socket->local_port )); 586 587 // check socket type 588 if( socket_type != SOCK_STREAM ) 589 { 590 printk("\n[ERROR] in %s : illegal socket type / thread[%x,%x]\n", 591 __FUNCTION__, process->pid , this->trdid ); 592 return -1; 593 } 594 595 // check socket state 596 if( socket_state != TCP_STATE_LISTEN ) 597 { 598 printk("\n[ERROR] in %s : illegal socket state %s / thread[%x,%x]\n", 599 __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid ); 600 return -1; 601 } 602 603 // select a cluster for the new socket 604 cxy_t new_cxy = cluster_random_select(); 605 606 // allocate memory for the new socket descriptor 607 error = socket_create( new_cxy, 608 socket_domain, 609 socket_type, 610 &new_socket, 611 &new_fdid ); 612 if( error ) 613 { 614 printk("\n[ERROR] in %s : cannot allocate new socket / thread[%x,%x]\n", 615 __FUNCTION__, process->pid, this->trdid ); 616 return -1; 617 } 618 619 // build extended pointer on socket.crqq 620 crqq_xp = XPTR( file_cxy , &socket->crqq ); 621 622 // blocks and deschedules if requests queue empty 623 if( remote_buf_status( crqq_xp ) == 0 ) 624 { 625 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IO ); 626 sched_yield( "socket.crqq queue empty"); 627 } 628 629 // extract first request from the socket.crqq queue 630 remote_buf_get_to_kernel( crqq_xp, 631 (uint8_t *)(&new_sockaddr), 632 sizeof(sockaddr_t) ); 633 634 new_remote_addr = new_sockaddr.s_addr; 635 new_remote_port = new_sockaddr.s_port; 636 637 // compute NIC channel index from remote_addr and remote_port 638 uint32_t nic_channel = dev_nic_channel_index( new_remote_addr , new_remote_port ); 639 640 // update new socket descriptor 641 new_socket->local_addr = hal_remote_l32(XPTR( file_cxy , &socket->local_addr )); 642 new_socket->local_port = hal_remote_l32(XPTR( file_cxy , &socket->local_port )); 643 new_socket->remote_addr = new_remote_addr; 644 new_socket->remote_port = new_remote_port; 645 new_socket->nic_channel = nic_channel; 646 647 // link new socket to chdev servers 648 socket_link_to_servers( XPTR( new_cxy , new_socket ), 649 nic_channel ); 650 // return success 651 *remote_addr = new_remote_addr; 652 *remote_port = new_remote_port; 653 654 return new_fdid; 655 656 } // end dev_nic_accept() 657 658 //////////////////////////////////////////////////////////////////////////////////////// 659 // This static and blocking function is called by the four functions : 660 // dev_nic_send() / dev_nic_recv() / dev_nic_sendto() / dev_nic_recvfrom(). 661 //////////////////////////////////////////////////////////////////////////////////////// 662 // Implementation note 663 // The behavior is very different for SEND & RECV : 664 // - For a SEND, the client thread checks that there is no TX command registered 665 // in the socket. It registers the command arguments in the socket descriptor 666 // (tx_client, tx_cmd, tx_buf, tx_len). Then the client thread unblocks the 667 // TX server thread from the BLOCKED_CLIENT condition, blocks itself on the 668 // BLOCKED_IO condition, and deschedules. It is unblocked by the TX server thread 669 // when the last byte has been sent (for UDP) or acknowledged (for TCP). 670 // When the client thread resumes, it reset the command in socket, and returns. 671 // - For a RECV, the client thread checks that there is no RX command registered 672 // in the socket. It registers itself in socket (rx_client). It checks the status 673 // of the receive buffer. It the rx_buf is empty, it blocks on the BLOCKED_IO 674 // condition, and deschedules. It is unblocked by the RX server thread when an UDP 675 // packet or TCP segment has been writen in the rx_buf. When it resumes, it moves 676 // the available data from the rx_buf to the user buffer, reset its registration 677 // in socket (reset the rx_buf for an UDP socket), and returns. 678 //////////////////////////////////////////////////////////////////////////////////////// 679 int dev_nic_register_cmd( bool_t is_send, 680 uint32_t fdid, 681 uint8_t * u_buf, 682 uint32_t length, 683 bool_t explicit, 684 uint32_t explicit_addr, 685 uint32_t explicit_port ) 686 { 687 vfs_inode_type_t file_type; // file descriptor type 688 socket_t * socket_ptr; // local pointer on socket descriptor 689 uint32_t socket_state; // current socket state 690 uint32_t socket_type; // socket type (UDP/TCP) 691 uint32_t nic_channel; // NIC channel for this socket 692 xptr_t socket_lock_xp; // extended pointer on socket lock 693 xptr_t file_xp; // extended pointer on file descriptor 694 vfs_file_t * file_ptr; 695 cxy_t file_cxy; 696 xptr_t chdev_xp; // extended pointer on NIC_TX[channel] chdev 697 chdev_t * chdev_ptr; 698 cxy_t chdev_cxy; 699 uint32_t remote_addr; 700 uint32_t remote_port; 701 uint32_t status; // number of bytes in rx_buf 702 int32_t moved_bytes; // total number of moved bytes (fot return) 703 xptr_t server_xp; // extended pointer on NIC_TX / NIC_RX server thread 704 thread_t * server_ptr; // local pointer on NIC_TX / NIC_RX server thread 705 706 thread_t * this = CURRENT_THREAD; 707 process_t * process = this->process; 708 709 // get pointers on file descriptor identifying the socket 710 file_xp = process_fd_get_xptr( process , fdid ); 711 file_ptr = GET_PTR( file_xp ); 712 file_cxy = GET_CXY( file_xp ); 713 714 if( file_xp == XPTR_NULL ) 715 { 716 printk("\n[ERROR] in %s : undefined fdid %d / thread%x,%x]\n", 717 __FUNCTION__, fdid , process->pid, this->trdid ); 718 return -1; 719 } 720 721 // get file type and socket pointer 722 file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 723 724 // get local pointer on socket 725 socket_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->socket ) ); 726 727 // check file descriptor type 728 if( file_type != INODE_TYPE_SOCK ) 729 { 730 printk("\n[ERROR] in %s : illegal file type %s / fdid %d / thread%x,%x]\n", 731 __FUNCTION__, vfs_inode_type_str(file_type), fdid, process->pid, this->trdid ); 732 return -1; 733 } 734 735 // build extended pointer on file lock protecting socket 736 socket_lock_xp = XPTR( file_cxy , &file_ptr->lock ); 737 738 // take the socket lock 739 remote_rwlock_wr_acquire( socket_lock_xp ); 740 741 // get socket type, state, and channel 742 socket_type = hal_remote_l32( XPTR( file_cxy , &socket_ptr->type )); 743 socket_state = hal_remote_l32( XPTR( file_cxy , &socket_ptr->state )); 744 nic_channel = hal_remote_l32( XPTR( file_cxy , &socket_ptr->nic_channel )); 745 746 // check socket state / type 747 if( socket_type == SOCK_STREAM ) // TCP socket 748 { 749 if( socket_state != TCP_STATE_ESTAB ) 750 { 751 printk("\n[ERROR] in %s : illegal SEND/RECV for state %s / thread%x,%x]\n", 752 __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid ); 753 return -1; 754 } 755 756 if( explicit ) 757 { 758 // get remote IP address and type from socket descriptor 759 remote_addr = hal_remote_l32( XPTR( file_cxy , &socket_ptr->remote_addr )); 760 remote_port = hal_remote_l32( XPTR( file_cxy , &socket_ptr->remote_port )); 761 762 if( (remote_addr != explicit_addr) || (remote_port != explicit_port) ) 763 { 764 printk("\n[ERROR] in %s : wrong expliciy access / thread%x,%x]\n", 765 __FUNCTION__, process->pid, this->trdid ); 766 return -1; 767 } 768 } 769 } 770 else // UDP socket 771 { 772 if( explicit ) 773 { 774 if( socket_state == UDP_STATE_UNBOUND ) 775 { 776 printk("\n[ERROR] in %s : illegal SEND/RECV for state %s / thread%x,%x]\n", 777 __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid ); 778 return -1; 779 } 780 781 // update remote IP address and port into socket descriptor 782 hal_remote_s32( XPTR( file_cxy , &socket_ptr->remote_addr ), explicit_addr ); 783 hal_remote_s32( XPTR( file_cxy , &socket_ptr->remote_port ), explicit_port ); 784 } 785 else 786 { 787 if( socket_state != UDP_STATE_CONNECT ) 788 { 789 printk("\n[ERROR] in %s : illegal SEND/RECV for state %s / thread%x,%x]\n", 790 __FUNCTION__, socket_state_str(socket_state), process->pid, this->trdid ); 791 return -1; 792 } 793 } 794 } 795 796 /////////////////////////////////////////////////////// 797 if( is_send ) // SEND command 798 { 799 // build extended pointer on socket "tx_client" 800 xptr_t client_xp = XPTR( file_cxy , &socket_ptr->tx_client ); 801 802 // check no previous SEND command 803 xptr_t client = hal_remote_l64( client_xp ); 804 805 if( client != XPTR_NULL ) // release socket lock and return error 806 { 807 // release socket lock 808 remote_rwlock_wr_release( socket_lock_xp ); 809 810 // get previous thread cluster & local pointer 811 cxy_t prev_cxy = GET_CXY( client ); 812 thread_t * prev_ptr = GET_PTR( client ); 813 814 // get previous command type and trdid 815 uint32_t prev_cmd = hal_remote_l32( XPTR( prev_cxy , &prev_ptr->nic_cmd.type )); 816 uint32_t prev_tid = hal_remote_l32( XPTR( prev_cxy , &prev_ptr->trdid )); 817 818 printk("\n[ERROR] in %s : previous command %s for thread %x / thread%x,%x]\n", 819 __FUNCTION__, socket_cmd_str(prev_cmd), prev_tid, 820 process->pid, this->trdid ); 821 822 return -1; 823 } 824 825 // client thread registers in socket descriptor 826 hal_remote_s64( client_xp , XPTR( local_cxy , this ) ); 827 hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_cmd ) , SOCKET_TX_SEND ); 828 hal_remote_spt( XPTR( file_cxy , &socket_ptr->tx_buf ) , u_buf ); 829 hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_len ) , length ); 830 hal_remote_s32( XPTR( file_cxy , &socket_ptr->tx_todo ) , length ); 831 832 // release socket lock 833 remote_rwlock_wr_release( socket_lock_xp ); 834 835 // get pointers on relevant chdev 836 chdev_xp = chdev_dir.nic_tx[nic_channel]; 837 chdev_ptr = GET_PTR( chdev_xp ); 838 chdev_cxy = GET_CXY( chdev_xp ); 839 840 // get pointers on NIC_TX[channel] server thread 841 server_ptr = hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server )); 842 server_xp = XPTR( chdev_cxy , server_ptr ); 843 844 // unblocks the NIC_TX server thread 845 thread_unblock( server_xp , THREAD_BLOCKED_CLIENT ); 846 847 // client thread blocks itself and deschedules 848 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IO ); 849 sched_yield( "blocked in nic_io" ); 850 851 // take the socket lock when unblocked 852 remote_rwlock_wr_acquire( socket_lock_xp ); 853 854 // unlink client thread from socket 855 hal_remote_s64( client_xp , XPTR_NULL ); 856 857 // release socket lock 858 remote_rwlock_wr_release( socket_lock_xp ); 859 860 // exit waiting loop and return 861 return length; 862 863 } // end SEND 864 865 //////////////////////////////////////////////////////// 866 else // RECV command 867 { 868 // build extended pointers on socket "rx_client" 869 xptr_t client_xp = XPTR( file_cxy , &socket_ptr->rx_client ); 870 871 // check no previous RECV command 872 xptr_t client = hal_remote_l64( client_xp ); 873 874 if( client != XPTR_NULL ) // release socket lock and return error 875 { 876 // release socket lock 877 remote_rwlock_wr_release( socket_lock_xp ); 878 879 // get previous thread cluster & local pointer 880 cxy_t prev_cxy = GET_CXY( client ); 881 thread_t * prev_ptr = GET_PTR( client ); 882 883 // get previous command type and trdid 884 uint32_t prev_cmd = hal_remote_l32( XPTR( prev_cxy , &prev_ptr->nic_cmd.type )); 885 uint32_t prev_tid = hal_remote_l32( XPTR( prev_cxy , &prev_ptr->trdid )); 886 887 printk("\n[ERROR] in %s : previous command %s for thread %x / thread%x,%x]\n", 888 __FUNCTION__, socket_cmd_str(prev_cmd), prev_tid, process->pid, this->trdid ); 889 return -1; 890 } 891 892 // build extended pointer on "rx_buf" 893 xptr_t rx_buf_xp = XPTR( file_cxy , &socket_ptr->rx_buf ); 894 895 // get rx_buf status from socket 896 status = remote_buf_status( rx_buf_xp ); 897 898 if( status == 0 ) // rx_buf empty => blocks and deschedules 899 { 900 // release socket lock 901 remote_rwlock_wr_release( socket_lock_xp ); 902 903 // client thread blocks itself and deschedules 904 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IO ); 905 sched_yield( "blocked in nic_io" ); 906 907 // take socket lock 908 remote_rwlock_wr_release( socket_lock_xp ); 909 } 910 911 // number of moved bytes cannot be larger than u_buf size 912 moved_bytes = ( length < status ) ? length : status; 913 914 // move data from kernel rx_buf to user u_buf 915 remote_buf_get_to_user( rx_buf_xp, 916 u_buf, 917 moved_bytes ); 918 919 // reset rx_buf for an UDP socket 920 if( socket_type == SOCK_DGRAM ) remote_buf_reset( rx_buf_xp ); 921 922 // unlink client thread from socket 923 hal_remote_s64( client_xp , XPTR_NULL ); 924 925 // release socket lock 926 remote_rwlock_wr_release( socket_lock_xp ); 927 928 // exit waiting loop and return 929 return moved_bytes; 930 931 } // end SEND 932 933 } // end dev_nic_register_cmd() 934 935 936 /////////////////////////////////// 937 int dev_nic_send( uint32_t fdid, 938 uint8_t * u_buf, 939 uint32_t length ) 940 { 941 #if DEBUG_DEV_NIC_TX 942 thread_t * this = CURRENT_THREAD; 943 process_t * process = this->process; 944 trdid_t trdid = this->trdid; 945 pid_t pid = process->pid; 946 uint32_t cycle = (uint32_t)hal_get_cycle(); 947 if (DEBUG_DEV_NIC_TX < cycle ) 948 printk("[%s] thread[%x,%x] enters : fdid %d / buf %x / length %d / cycle %d\n", 949 __FUNCTION__, pid, trdid, fdid, u_buf, length, cycle ); 950 #endif 951 952 error_t error = dev_nic_register_cmd( true, // SEND 953 fdid, 954 u_buf, 955 length, 956 false, 0, 0 ); // no explicit remote socket 957 #if DEBUG_DEV_NIC_TX 958 cycle = (uint32_t)hal_get_cycle(); 959 if (DEBUG_DEV_NIC_TX < cycle ) 960 printk("[%s] thread[%x,%x] exit : fdid %d / cycle %d\n", 961 __FUNCTION__, pid, trdid, cycle ); 962 #endif 963 964 return error; 965 966 } // end dev_nic_send() 967 968 /////////////////////////////////// 969 int dev_nic_recv( uint32_t fdid, 970 uint8_t * u_buf, 971 uint32_t length ) 972 { 973 #if DEBUG_DEV_NIC_RX 974 thread_t * this = CURRENT_THREAD; 975 process_t * process = this->process; 976 trdid_t trdid = this->trdid; 977 pid_t pid = process->pid; 978 uint32_t cycle = (uint32_t)hal_get_cycle(); 979 if (DEBUG_DEV_NIC_RX < cycle ) 980 printk("[%s] thread[%x,%x] enters : fdid %d / buf %x / length %d / cycle %d\n", 981 __FUNCTION__, pid, trdid, fdid, u_buf, length, cycle ); 982 #endif 983 984 error_t error = dev_nic_register_cmd( false, // RECV 985 fdid, 986 u_buf, 987 length, 988 false, 0, 0 ); // no explicit remote socket 989 #if DEBUG_DEV_NIC_RX 990 cycle = (uint32_t)hal_get_cycle(); 991 if (DEBUG_DEV_NIC_RX < cycle ) 992 printk("[%s] thread[%x,%x] exit : fdid %d / cycle %d\n", 993 __FUNCTION__, pid, trdid, cycle ); 994 #endif 995 996 return error; 997 998 } // end dev_nic_recv() 999 1000 ///////////////////////////////////// 1001 int dev_nic_sendto( uint32_t fdid, 1002 uint8_t * u_buf, 1003 uint32_t length, 1004 uint32_t remote_addr, 1005 uint32_t remote_port ) 1006 { 1007 #if DEBUG_DEV_NIC_TX 1008 thread_t * this = CURRENT_THREAD; 1009 process_t * process = this->process; 1010 trdid_t trdid = this->trdid; 1011 pid_t pid = process->pid; 1012 uint32_t cycle = (uint32_t)hal_get_cycle(); 1013 if (DEBUG_DEV_NIC_TX < cycle ) 1014 printk("[%s] thread[%x,%x] enters : fdid %d / buf %x / length %d / cycle %d\n", 1015 __FUNCTION__, pid, trdid, fdid, u_buf, length, cycle ); 1016 #endif 1017 1018 error_t error = dev_nic_register_cmd( true, // SEND 1019 fdid, 1020 u_buf, 1021 length, 1022 true, // explicit remote socket 1023 remote_addr, 1024 remote_port ); 1025 #if DEBUG_DEV_NIC_TX 1026 cycle = (uint32_t)hal_get_cycle(); 1027 if (DEBUG_DEV_NIC_TX < cycle ) 1028 printk("[%s] thread[%x,%x] exit : fdid %d / cycle %d\n", 1029 __FUNCTION__, pid, trdid, cycle ); 1030 #endif 1031 1032 return error; 1033 1034 } // end dev_nic_sendto() 1035 1036 /////////////////////////////////////// 1037 int dev_nic_recvfrom( uint32_t fdid, 1038 uint8_t * u_buf, 1039 uint32_t length, 1040 uint32_t remote_addr, 1041 uint32_t remote_port ) 1042 { 1043 #if DEBUG_DEV_NIC_RX 1044 thread_t * this = CURRENT_THREAD; 1045 process_t * process = this->process; 1046 trdid_t trdid = this->trdid; 1047 pid_t pid = process->pid; 1048 uint32_t cycle = (uint32_t)hal_get_cycle(); 1049 if (DEBUG_DEV_NIC_RX < cycle ) 1050 printk("[%s] thread[%x,%x] enters : fdid %d / buf %x / length %d / cycle %d\n", 1051 __FUNCTION__, pid, trdid, fdid, u_buf, length, cycle ); 1052 #endif 1053 1054 error_t error = dev_nic_register_cmd( false, // RECV 1055 fdid, 1056 u_buf, 1057 length, 1058 true, // explicit remote socket 1059 remote_addr, 1060 remote_port ); 1061 #if DEBUG_DEV_NIC_RX 1062 cycle = (uint32_t)hal_get_cycle(); 1063 if (DEBUG_DEV_NIC_RX < cycle ) 1064 printk("[%s] thread[%x,%x] exit : fdid %d / cycle %d\n", 1065 __FUNCTION__, pid, trdid, cycle ); 1066 #endif 1067 1068 return error; 1069 1070 } // end dev_nic_recvfrom() 1071 1072 1073 1074 1075 1076 1077 1078 1079 /////////////////////////////////////////////////////////////////////////////////////////// 1080 // Functions called by the NIC_RX server thread 1081 /////////////////////////////////////////////////////////////////////////////////////////// 1082 1083 ///////////////////////////////////////////////////////////////////////////////////////// 1084 // This static function is called by the NIC_RX[channel] server thread to register 1085 // a send request defined by the <flags> argument in the R2T queue specified by 1086 // the <queue_xp> argument. 1087 ///////////////////////////////////////////////////////////////////////////////////////// 1088 // @ queue_xp : [in] extended pointer on the R2T qeue descriptor. 1089 // @ flags : [in] flags to be set in the TCP segment. 1090 ///////////////////////////////////////////////////////////////////////////////////////// 1091 static void dev_nic_rx_put_r2t_request( xptr_t queue_xp, 1092 uint32_t flags ) 1093 { 1094 while( 1 ) 1095 { 1096 error_t error = remote_buf_put_from_kernel( queue_xp, 1097 (uint8_t *)(&flags), 1098 1 ); 1099 1100 if( error ) sched_yield( "waiting R2T queue" ); 1101 else break; 1102 } 1103 1104 } // end dev_nic_rx_put_r2t_request() 1105 1106 /////////////////////////////////////////////////////////////////////////////////////////// 1107 // This static function is called by the dev_nic_rx_server() function. 1108 // It calls directly the NIC driver (with the READABLE command) and returns the status 1109 // of the NIC_RX queue identified by the <chdev> argument. 1110 // in the <readable> buffer. 1111 /////////////////////////////////////////////////////////////////////////////////////////// 1112 // @ chdev : [in] local pointer on NIC_TX chdev. 1113 // @ readable : [out] zero if queue empty. 1114 // @ returns 0 if success / returns -1 if failure in accessing NIC device. 1115 /////////////////////////////////////////////////////////////////////////////////////////// 1116 error_t dev_nic_rx_queue_readable( chdev_t * chdev, 1117 uint32_t * readable ) 1118 { 1119 thread_t * this = CURRENT_THREAD; 1120 1121 // initialize NIC_READABLE command in thread descriptor 1122 this->nic_cmd.dev_xp = XPTR( local_cxy , chdev ); 1123 this->nic_cmd.type = NIC_CMD_READABLE; 1124 1125 // call driver to test readable 1126 chdev->cmd( XPTR( local_cxy , this ) ); 1127 1128 // return status 1129 *readable = this->nic_cmd.status; 1130 1131 // return error 1132 return this->nic_cmd.error; 1133 } 1134 1135 /////////////////////////////////////////////////////////////////////////////////////////// 1136 // This static function is called by the dev_nic_rx_server() function. 1137 // It moves one Ethernet packet from the NIC_RX_QUEUE identified the <chdev> argument, 1138 // to the 2K bytes kernel buffer identified by the <buffer> argument. The actual 1139 // Ethernet packet length is returned in the <length> argument. 1140 // It calls directly the NIC driver with the READ command, without registering in the 1141 // waiting queue, because only the NIC_RX server thread can access this NIC_RX_QUEUE. 1142 /////////////////////////////////////////////////////////////////////////////////////////// 1143 // @ chdev : [in] local pointer on NIC_TX chdev. 1144 // @ buffer : [in] local pointer on destination kernel buffer. 1145 // @ length : [out] Ethernet packet size in bytes. 1146 // @ returns 0 if success / returns -1 if failure in accessing NIC device. 1147 /////////////////////////////////////////////////////////////////////////////////////////// 1148 error_t dev_nic_rx_move_packet( chdev_t * chdev, 1149 uint8_t * k_buf, 1150 uint32_t * length ) 1151 { 1152 thread_t * this = CURRENT_THREAD; 97 1153 98 1154 #if DEBUG_DEV_NIC_RX 99 1155 uint32_t cycle = (uint32_t)hal_get_cycles(); 100 1156 if( DEBUG_DEV_NIC_RX < cycle ) 101 printk("\n[ DBG] %s : thread %x enters for packet %x in cluster %x\n",102 __FUNCTION__ , thread_ptr , pkd , local_cxy);1157 printk("\n[%s] thread[%x,%x] enters / cycle %d\n", 1158 __FUNCTION__, this->process->pid, this->trdid, cycle ); 103 1159 #endif 104 1160 105 // get pointer on NIC-RX chdev descriptor 106 uint32_t channel = thread_ptr->chdev->channel; 107 xptr_t dev_xp = chdev_dir.nic_rx[channel]; 108 cxy_t dev_cxy = GET_CXY( dev_xp ); 109 chdev_t * dev_ptr = (chdev_t *)GET_PTR( dev_xp ); 110 111 assert( (dev_xp != XPTR_NULL) , "undefined NIC chdev descriptor" ); 112 113 assert( (dev_cxy == local_cxy) , " chdev must be local" ); 114 115 // initialize command in thread descriptor 116 thread_ptr->nic_cmd.dev_xp = dev_xp; 117 118 // call driver to test readable 119 thread_ptr->nic_cmd.cmd = NIC_CMD_READABLE; 120 dev_ptr->cmd( thread_xp ); 1161 // initialize NIC_READ command in thread descriptor 1162 this->nic_cmd.type = NIC_CMD_READ; 1163 this->nic_cmd.buffer = k_buf; 1164 1165 // call NIC driver 1166 chdev->cmd( XPTR( local_cxy , this ) ); 1167 1168 // returns packet length 1169 *length = this->nic_cmd.length; 121 1170 122 1171 // check error 123 error = thread_ptr->nic_cmd.error; 124 if( error ) return error; 125 126 // block and deschedule if queue non readable 127 if( thread_ptr->nic_cmd.status == false ) 128 { 129 // enable NIC-RX IRQ 130 dev_pic_enable_irq( core->lid , dev_xp ); 131 132 // block client thread on THREAD_BLOCKED_IO 133 thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_IO ); 134 135 // deschedule client thread 136 sched_yield("client blocked on I/O"); 137 138 // disable NIC-RX IRQ 139 dev_pic_disable_irq( core->lid , dev_xp ); 140 } 141 142 // call driver for actual read 143 thread_ptr->nic_cmd.cmd = NIC_CMD_READ; 144 thread_ptr->nic_cmd.buffer = pkd->buffer; 145 dev_ptr->cmd( thread_xp ); 146 147 // check error 148 error = thread_ptr->nic_cmd.error; 149 if( error ) return error; 150 151 // returns packet length 152 pkd->length = thread_ptr->nic_cmd.length; 1172 if( this->nic_cmd.error ) 1173 { 153 1174 154 1175 #if DEBUG_DEV_NIC_RX 155 1176 cycle = (uint32_t)hal_get_cycles(); 156 1177 if( DEBUG_DEV_NIC_RX < cycle ) 157 printk("\n[ DBG] %s : thread %x exit for packet %x in cluster %x\n",158 __FUNCTION__ , thread_ptr , pkd , local_cxy);1178 printk("\n[%s] thread[%x,%x] exit / ERROR in NIC_RX / cycle %d\n", 1179 __FUNCTION__, this->process->pid, this->trdid, cycle ); 159 1180 #endif 160 1181 1182 return -1; 1183 } 1184 else 1185 { 1186 1187 #if DEBUG_DEV_NIC_RX 1188 cycle = (uint32_t)hal_get_cycles(); 1189 if( DEBUG_DEV_NIC_RX < cycle ) 1190 printk("\n[%s] thread[%x,%x] exit / SUCCESS / cycle %d\n", 1191 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1192 #endif 1193 1194 return 0; 1195 } 1196 1197 } // end dev_nic_rx_move_packet() 1198 1199 /////////////////////////////////////////////////////////////////////////////////////////// 1200 // This static function is called by the dev_nic_rx_server() function. 1201 // It analyses an Ethernet frame contained in the kernel buffer defined 1202 // by the <buffer> argument, and returns in the <ip_length> argument the length 1203 // of the IP packet contained in the Ethernet packet payload. 1204 /////////////////////////////////////////////////////////////////////////////////////////// 1205 // @ buffer : [in] pointer on a received Ethernet packet 1206 // @ ip_length : [out] length of IP packet (in bytes). 1207 // @ return 0 if success / return -1 if illegal packet length. 1208 /////////////////////////////////////////////////////////////////////////////////////////// 1209 static error_t dev_nic_rx_check_eth( uint8_t * buffer, 1210 uint32_t * ip_length ) 1211 { 1212 uint32_t length = ((uint32_t)buffer[12] << 8) | (uint32_t)buffer[13]; 1213 1214 *ip_length = length; 1215 161 1216 return 0; 162 163 } // end dev_nic_read() 164 165 166 //////////////////////////////////// 167 error_t dev_nic_write( pkd_t * pkd ) 168 { 169 error_t error; 170 171 // get pointers on the NIC-TX kernel tread 172 thread_t * thread_ptr = CURRENT_THREAD; 173 xptr_t thread_xp = XPTR( local_cxy , thread_ptr ); 174 175 // get local pointer on core running this kernel thead 176 core_t * core = thread_ptr->core; 1217 } 1218 1219 /////////////////////////////////////////////////////////////////////////////////////////// 1220 // This static function analyses the IP packet contained in the kernel buffer 1221 // defined by the <buffer> argument, and returns in the <ip_src_addr>, <ip_dst_addr>, 1222 // <header_length> and <protocol> arguments the informations contained in the IP header. 1223 // It checks the IP packet length versus the value contained in Ethernet header. 1224 // It checks the IP header checksum. 1225 /////////////////////////////////////////////////////////////////////////////////////////// 1226 // @ buffer : [in] pointer on the IP packet. 1227 // @ expected_length : [in] expected IP packet length (from Ethernet header). 1228 // @ ip_src_addr : [out] source IP address. 1229 // @ ip_dst_addr : [out] destination IP address. 1230 // @ protocol : [out] transport protocol type. 1231 // @ return 0 if success / return -1 if illegal packet. 1232 /////////////////////////////////////////////////////////////////////////////////////////// 1233 static error_t dev_nic_rx_check_ip( uint8_t * buffer, 1234 uint32_t expected_length, 1235 uint32_t * ip_src_addr, 1236 uint32_t * ip_dst_addr, 1237 uint32_t * trsp_protocol ) 1238 { 1239 uint32_t length = ((uint32_t)buffer[2] << 8) | (uint32_t)buffer[3]; 1240 1241 // discard packet if eth_length != ip_length 1242 if( length != expected_length ) 1243 { 1244 1245 #if DEBUG_NIC_DEV 1246 thread_t * this = CURRENT_THREAD; 1247 printk("\n[%s] thread[%x,%x] enters : length (%d) != expected_length (%d)\n", 1248 __FUNCTION__, this->process->pid, this->trdid, length, expected_length ); 1249 #endif 1250 1251 return -1; 1252 } 1253 1254 // compute IP header checksum 1255 uint32_t received_cs = (uint32_t)dev_nic_ip_checksum( buffer ); 1256 1257 // extract IP header checksum 1258 uint32_t computed_cs = ((uint32_t)buffer[10] << 8) | ((uint32_t)buffer[11]); 1259 1260 // discard packet if bad checksum 1261 if( received_cs != computed_cs ) 1262 { 1263 1264 #if DEBUG_NIC_DEV 1265 thread_t * this = CURRENT_THREAD; 1266 printk("\n[%s] thread[%x,%x] computed checksum (%d) != received checksum (%d)\n", 1267 __FUNCTION__, this->process->pid, this->trdid, computed_cs, received_cs ); 1268 #endif 1269 1270 return -1; 1271 } 1272 1273 1274 *ip_src_addr = ((uint32_t)buffer[12] << 24) | 1275 ((uint32_t)buffer[13] << 16) | 1276 ((uint32_t)buffer[14] << 8) | 1277 ((uint32_t)buffer[15] ) ; 1278 1279 *ip_dst_addr = ((uint32_t)buffer[16] << 24) | 1280 ((uint32_t)buffer[17] << 16) | 1281 ((uint32_t)buffer[18] << 8) | 1282 ((uint32_t)buffer[19] ) ; 1283 1284 *trsp_protocol = (uint32_t)buffer[9]; 1285 1286 return 0; 1287 } 1288 1289 /////////////////////////////////////////////////////////////////////////////////////////// 1290 // This static function analyses the UDP packet contained in the kernel buffer 1291 // defined by the <k_buf> and <k_length> arguments. 1292 // It checks the UDP checksum, and discard corrupted packets. 1293 // It scans the list of sockets attached to the NIC_RX chdev to find a matching socket, 1294 // and discard the received packet if no UDP socket found. 1295 // Finally, it copies the payload to the socket "rx_buf", as long as the packet payload 1296 // is not larger than the rx_buf. 1297 // It set the "rx_valid" flip-flop, and unblock the client thread when the last expected 1298 // byte has been received. 1299 /////////////////////////////////////////////////////////////////////////////////////////// 1300 // @ chdev : [in] local pointer on local NIC_RX chdev descriptor. 1301 // @ k_buf : [in] pointer on the UDP packet in local kernel buffer. 1302 // @ k_length : [in] number of bytes in buffer (including UDP header). 1303 // @ pkt_src_addr : [in] source IP address (from IP packet header). 1304 // @ pkt_dst_addr : [in] destination IP address (from IP packet header). 1305 /////////////////////////////////////////////////////////////////////////////////////////// 1306 static void dev_nic_rx_handle_udp_packet( chdev_t * chdev, 1307 uint8_t * k_buf, 1308 uint32_t k_length, 1309 uint32_t pkt_src_addr, 1310 uint32_t pkt_dst_addr ) 1311 { 1312 xptr_t root_xp; // extended pointer on attached sockets list root 1313 xptr_t lock_xp; // extended pointer on chdev lock 1314 xptr_t iter_xp; // iterator on socket list 1315 xptr_t socket_xp; // extended pointer on socket descriptor 1316 cxy_t socket_cxy; 1317 socket_t * socket_ptr; 1318 uint32_t socket_type; // socket type 1319 uint32_t socket_state; // socket state 1320 uint32_t local_addr; // local IP address from socket 1321 uint32_t local_port; // local port from socket 1322 uint32_t remote_addr; // remote IP address from socket 1323 uint32_t remote_port; // remote port from socket 1324 bool_t match_socket; // matching socket found 1325 uint16_t checksum; // computed checksum 1326 uint16_t pkt_checksum; // received checksum 1327 xptr_t socket_rbuf_xp; // extended pointer on socket rx_buf 1328 xptr_t socket_lock_xp; // extended pointer on socket lock 1329 xptr_t socket_client_xp; // extended pointer on socket rx_client field 1330 xptr_t client_xp; // extended pointer on client thread descriptor 1331 uint32_t payload; // number of bytes in payload 1332 uint32_t status; // number of bytes in rx_buf 1333 uint32_t space; // number of free slots in rx_buf 1334 uint32_t moved_bytes; // number of bytes actually moved to rx_buf 1335 1336 // build extended pointers on list of sockets attached to NIC_RX chdev 1337 root_xp = XPTR( local_cxy , &chdev->wait_root ); 1338 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 1339 1340 // compute UDP packet checksum 1341 checksum = dev_nic_udp_checksum( k_buf , k_length ); 1342 1343 // get checksum from received packet header 1344 pkt_checksum = ((uint16_t)k_buf[6] << 8) | (uint16_t)k_buf[7]; 1345 1346 // discard corrupted packet 1347 if( pkt_checksum != checksum ) return; 1348 1349 // get src_port and dst_port from UDP header 1350 uint32_t pkt_src_port = ((uint32_t)k_buf[0] << 8) | (uint32_t)k_buf[1]; 1351 uint32_t pkt_dst_port = ((uint32_t)k_buf[2] << 8) | (uint32_t)k_buf[3]; 1352 1353 // discard unexpected packet 1354 if( xlist_is_empty( root_xp ) ) return; 1355 1356 // take the tock protecting the sockets list 1357 remote_busylock_acquire( lock_xp ); 1358 1359 match_socket = false; 1360 1361 // scan sockets list to find a match 1362 XLIST_FOREACH( root_xp , iter_xp ) 1363 { 1364 // get socket cluster and local pointer 1365 socket_xp = XLIST_ELEMENT( iter_xp , socket_t , rx_list ); 1366 socket_ptr = GET_PTR( socket_xp ); 1367 socket_cxy = GET_CXY( socket_xp ); 1368 1369 // get socket type 1370 socket_type = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->type )); 1371 socket_state = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->state )); 1372 1373 // skip TCP socket 1374 if( socket_type == SOCK_STREAM ) continue; 1375 1376 // get relevant info from socket descriptor 1377 local_addr = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->local_addr )); 1378 remote_addr = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->remote_addr )); 1379 local_port = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->local_port )); 1380 remote_port = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->remote_port )); 1381 1382 // compute matching 1383 bool_t local_match = (local_addr == pkt_dst_addr) && 1384 (local_port == pkt_dst_port); 1385 1386 bool_t remote_match = (remote_addr == pkt_src_addr) && 1387 (remote_port == pkt_src_port); 1388 1389 if (socket_state == UDP_STATE_CONNECT ) match_socket = local_match && remote_match; 1390 else match_socket = local_match; 1391 1392 // exit loop when socket found 1393 if( match_socket ) break; 1394 } 1395 1396 // release the lock protecting the sockets list 1397 remote_busylock_release( lock_xp ); 1398 1399 // discard unexpected packet 1400 if( match_socket == false ) return; 1401 1402 // build extended pointers on various socket fields 1403 socket_rbuf_xp = XPTR( socket_cxy , &socket_ptr->rx_buf ); 1404 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 1405 socket_client_xp = XPTR( socket_cxy , &socket_ptr->rx_client ); 1406 1407 // take the lock protecting the socket 1408 remote_rwlock_wr_acquire( socket_lock_xp ); 1409 1410 // get status & space from rx_buf 1411 status = remote_buf_status( socket_rbuf_xp ); 1412 space = NIC_RX_BUF_SIZE - status; 1413 1414 // get client thread 1415 client_xp = hal_remote_l64( socket_client_xp ); 1416 1417 // get number of bytes in payload 1418 payload = k_length - UDP_HEAD_LEN; 1419 1420 // compute number of bytes to move : min (space , seg_payload) 1421 moved_bytes = ( space < payload ) ? space : payload; 1422 1423 // move payload from kernel buffer to socket rx_buf 1424 remote_buf_put_from_kernel( socket_rbuf_xp, 1425 k_buf + UDP_HEAD_LEN, 1426 moved_bytes ); 1427 1428 // unblock client thread if registered 1429 if( client_xp != XPTR_NULL ) 1430 { 1431 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 1432 } 1433 1434 // release the lock protecting the socket 1435 remote_rwlock_wr_release( socket_lock_xp ); 1436 1437 } // end dev_nic_rx_handle_udp_packet() 1438 1439 /////////////////////////////////////////////////////////////////////////////////////////// 1440 // This static function is called by the dev_nic_rx_server() function to handle one RX 1441 // TCP segment contained in a kernel buffer defined by the <k_buf> & <k_length> arguments. 1442 // It the received segment doesn't match an existing local socket, or is corrupted, 1443 // this faulty segment is discarded. 1444 /////////////////////////////////////////////////////////////////////////////////////////// 1445 // Implementation note: 1446 // 1) It checks the TCP checksum, and discard the corrupted segment. 1447 // 2) It scans the list of sockets attached to the RX chdev, to find the socket 1448 // matching the TCP segment header, and discards the segment if no socket found. 1449 // 3) When a socket has been found, it takes the lock protecting the socket state, 1450 // because the socket is accessed by both the NIC_TX and NIC_RX server threads. 1451 // 4) Depending on the socket state, it handle the received segment, including the 1452 // SYN, FIN, ACK and RST flags. It updates the socket state when required, moves 1453 // data to the rx_buf when possible, and registers requests to the TX server 1454 // thread in the R2T queue attached to the socket, to insert control flags in the 1455 // TX stream, as required. 1456 // 5) Finally, it releases the lock protecting the socke and returns. 1457 /////////////////////////////////////////////////////////////////////////////////////////// 1458 // @ chdev : [in] local pointer on local NIC_RX chdev descriptor. 1459 // @ k_buf : [in] pointer on the TCP packet in local kernel buffer. 1460 // @ k_length : [in] number of bytes in buffer (including TCP header). 1461 // @ seg_src_addr : [in] source IP address (from IP packet header). 1462 // @ seg_dst_addr : [in] destination IP address (from IP packet header). 1463 /////////////////////////////////////////////////////////////////////////////////////////// 1464 static void dev_nic_rx_handle_tcp_segment( chdev_t * chdev, 1465 uint8_t * k_buf, 1466 uint32_t k_length, 1467 uint32_t seg_src_addr, 1468 uint32_t seg_dst_addr ) 1469 { 1470 xptr_t root_xp; // extended pointer on attached sockets list root 1471 xptr_t lock_xp; // extended pointer on chdev lock 1472 xptr_t iter_xp; // iterator for these queues 1473 bool_t match_socket; // true if socket found 1474 xptr_t socket_xp; // extended pointer on matching socket descriptor 1475 cxy_t socket_cxy; 1476 socket_t * socket_ptr; 1477 uint32_t local_addr; // local IP address from socket 1478 uint32_t local_port; // local port from socket 1479 uint32_t remote_addr; // remote IP address from socket 1480 uint32_t remote_port; // remote port from socket 1481 uint32_t socket_state; // socket state 1482 uint32_t socket_type; // socket type 1483 uint32_t socket_tx_nxt; // next byte to send in TX stream 1484 uint32_t socket_tx_una; // first unacknowledged byte in TX stream 1485 uint32_t socket_rx_nxt; // next expected byte in RX stream 1486 uint32_t socket_rx_wnd; // current window value in RX stream 1487 xptr_t socket_lock_xp; // extended pointer on lock protecting socket state 1488 xptr_t socket_rx_buf_xp; // extended pointer on socket rx_buf 1489 xptr_t socket_r2tq_xp; // extended pointer on socket r2t queue 1490 xptr_t socket_client_xp; // extended pointer on socket rx_client thread 1491 uint16_t checksum; // computed TCP segment chechsum 1492 1493 // build extended pointer on xlist of all sockets attached to NIC_RX chdev 1494 root_xp = XPTR( local_cxy , &chdev->wait_root ); 1495 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 1496 1497 // get relevant infos from TCP segment header 1498 uint32_t seg_src_port = ((uint32_t)k_buf[0] << 8) | (uint32_t)k_buf[1]; 1499 uint32_t seg_dst_port = ((uint32_t)k_buf[2] << 8) | (uint32_t)k_buf[3]; 1500 1501 uint32_t seg_seq_num = ((uint32_t)k_buf[4] << 24) | 1502 ((uint32_t)k_buf[5] << 16) | 1503 ((uint32_t)k_buf[6] << 8) | 1504 ((uint32_t)k_buf[7] ); 1505 1506 uint32_t seg_ack_num = ((uint32_t)k_buf[8] << 24) | 1507 ((uint32_t)k_buf[9] << 16) | 1508 ((uint32_t)k_buf[10] << 8) | 1509 ((uint32_t)k_buf[11] ); 1510 1511 uint8_t seg_hlen = k_buf[12] >> 2; // TCP header length in bytes 1512 1513 uint8_t seg_flags = k_buf[13]; 1514 1515 bool_t seg_ack_set = ((seg_flags & TCP_FLAG_ACK) != 0); 1516 bool_t seg_syn_set = ((seg_flags & TCP_FLAG_SYN) != 0); 1517 bool_t seg_fin_set = ((seg_flags & TCP_FLAG_FIN) != 0); 1518 bool_t seg_rst_set = ((seg_flags & TCP_FLAG_RST) != 0); 1519 1520 uint16_t seg_window = ((uint32_t)k_buf[14] << 8) | (uint32_t)k_buf[15]; 1521 1522 uint16_t seg_checksum = ((uint32_t)k_buf[16] << 8) | (uint32_t)k_buf[17]; 1523 1524 uint32_t seg_payload = k_length - seg_hlen; // number of bytes in payload 1525 1526 // 1. compute TCP checksum 1527 checksum = dev_nic_tcp_checksum( k_buf, 1528 k_length, 1529 seg_src_addr, 1530 seg_dst_addr ); 1531 1532 // discard segment if corrupted 1533 if( seg_checksum != checksum ) return; 1534 1535 match_socket = false; 1536 1537 // take the lock protecting the list of sockets 1538 remote_busylock_acquire( lock_xp ); 1539 1540 // 2. scan list of sockets to find a matching socket 1541 XLIST_FOREACH( root_xp , iter_xp ) 1542 { 1543 // get socket cluster and local pointer 1544 socket_xp = XLIST_ELEMENT( iter_xp , socket_t , rx_list ); 1545 socket_ptr = GET_PTR( socket_xp ); 1546 socket_cxy = GET_CXY( socket_xp ); 1547 1548 // get socket type and state 1549 socket_type = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->type )); 1550 socket_state = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->state )); 1551 1552 // skip UDP socket 1553 if( socket_type == SOCK_DGRAM ) continue; 1554 1555 // get relevant socket infos for matching 1556 local_addr = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->local_addr )); 1557 remote_addr = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->remote_addr )); 1558 local_port = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->local_port )); 1559 remote_port = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->remote_port )); 1560 1561 // compute matching condition 1562 // (in LISTEN state, remote_port and remote_addr can be unspecified) 1563 if( socket_state == TCP_STATE_LISTEN ) 1564 { 1565 match_socket = (local_addr == seg_dst_addr) && 1566 (local_port == seg_dst_port) ; 1567 } 1568 else 1569 { 1570 match_socket = (local_addr == seg_dst_addr) && 1571 (local_port == seg_dst_port) && 1572 (remote_addr == seg_src_addr) && 1573 (remote_port == seg_src_port) ; 1574 } 1575 1576 // exit loop if matching 1577 if( match_socket ) break; 1578 1579 } // end loop on sockets 1580 1581 // release the lock protecting the list of sockets 1582 remote_busylock_release( lock_xp ); 1583 1584 // discard segment if no matching socket found 1585 if( match_socket == false ) return; 1586 1587 // From here the actions depend on both the socket state, 1588 // and the received segment flags 1589 // - update socket state, 1590 // - move data to rx_buf, 1591 // - make a R2T request when required 1592 1593 // build extended pointers on various socket fields 1594 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 1595 socket_rx_buf_xp = XPTR( socket_cxy , &socket_ptr->rx_buf ); 1596 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 1597 socket_client_xp = XPTR( socket_cxy , &socket_ptr->rx_client ); 1598 1599 // 3. take the lock protecting the matching socket 1600 remote_rwlock_wr_acquire( socket_lock_xp ); 1601 1602 // get relevant socket infos from socket descriptor 1603 socket_state = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->state )); 1604 socket_rx_nxt = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->rx_nxt )); 1605 socket_rx_wnd = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->rx_wnd )); 1606 socket_tx_una = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_una )); 1607 socket_tx_nxt = hal_remote_l32(XPTR( socket_cxy , &socket_ptr->tx_nxt )); 1608 1609 switch( socket_state ) 1610 { 1611 ////////////////////// 1612 case TCP_STATE_LISTEN: 1613 { 1614 // [1] discard segment if RST flag 1615 if( seg_rst_set ) return; 1616 1617 // [2] send a RST & discard segment if ACK flag 1618 if( seg_ack_set ) 1619 { 1620 // set socket.tx_nxt to seg_ack_num 1621 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 1622 seg_ack_num ); 1623 1624 // make RST request to R2T queue 1625 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1626 TCP_FLAG_RST ); 1627 // discard segment 1628 break; 1629 } 1630 1631 // [3] handle SYN flag 1632 if( seg_syn_set ) 1633 { 1634 // set socket.rx_nxt to seg_seq_num + 1 1635 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1636 seg_seq_num + 1 ); 1637 1638 // set socket.tx_nxt to ISS 1639 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 1640 TCP_ISS ); 1641 1642 // set socket.rx_irs to seg_seq_num 1643 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_irs ), 1644 seg_seq_num + 1 ); 1645 1646 // make SYN.ACK request to R2T queue 1647 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1648 TCP_FLAG_SYN | TCP_FLAG_ACK ); 1649 1650 // set socket.tx_nxt to ISS + 1 1651 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 1652 TCP_ISS + 1 ); 1653 1654 // set socket.tx_una to ISS 1655 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_una ), 1656 TCP_ISS ); 1657 1658 // update socket.state 1659 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1660 TCP_STATE_SYN_RCVD ); 1661 1662 // update socket.remote_addr 1663 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->remote_addr ), 1664 seg_src_addr ); 1665 1666 // update socket.remote_port 1667 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->remote_port ), 1668 seg_src_port ); 1669 } 1670 break; 1671 } 1672 //////////////////////// 1673 case TCP_STATE_SYN_SENT: 1674 { 1675 // [1] check ACK flag 1676 if( seg_ack_set ) 1677 { 1678 if( seg_ack_num != TCP_ISS + 1 ) // ACK not acceptable 1679 { 1680 // discard segment if RST 1681 if( seg_rst_set ) break; 1682 1683 // set socket.tx_nxt to seg_ack_num 1684 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 1685 seg_ack_num ); 1686 1687 // make an RST request to R2T queue 1688 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1689 TCP_FLAG_RST ); 1690 // discard segment 1691 break; 1692 } 1693 } 1694 1695 // [2] check RST flag 1696 if( seg_rst_set ) 1697 { 1698 // TODO signal "error: connection reset" to user 1699 1700 // update socket.state 1701 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1702 TCP_STATE_BOUND ); 1703 1704 // discard segment 1705 break; 1706 } 1707 1708 // [3] handle SYN flag when (no ACK or acceptable ACK, and no RST) 1709 if( seg_syn_set ) 1710 { 1711 // TODO Ne faut-il pas tester seg_seq_num ? 1712 1713 if( seg_ack_set ) // received both SYN and ACK 1714 { 1715 // set socket.rx_nxt to seg_seq_num + 1 1716 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1717 seg_seq_num + 1 ); 1718 1719 // set socket.tx_una to seg_ack_num 1720 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_una ), 1721 seg_ack_num ); 1722 1723 // set socket.rx_irs to seg_seq_num 1724 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_irs ), 1725 seg_seq_num ); 1726 1727 // update socket.state 1728 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1729 TCP_STATE_ESTAB ); 1730 1731 // make an ACK request to R2T queue 1732 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1733 TCP_FLAG_ACK ); 1734 } 1735 else // received SYN without ACK 1736 { 1737 // update socket.state 1738 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1739 TCP_STATE_SYN_RCVD ); 1740 1741 // set socket.tx_nxt to ISS 1742 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 1743 TCP_ISS ); 1744 1745 // make a SYN.ACK request to R2T queue 1746 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1747 TCP_FLAG_SYN | TCP_FLAG_ACK ); 1748 } 1749 } 1750 break; 1751 } 1752 //////////////////////// 1753 case TCP_STATE_SYN_RCVD: 1754 case TCP_STATE_ESTAB: 1755 case TCP_STATE_FIN_WAIT1: 1756 case TCP_STATE_FIN_WAIT2: 1757 case TCP_STATE_CLOSE_WAIT: 1758 case TCP_STATE_CLOSING: 1759 case TCP_STATE_LAST_ACK: 1760 case TCP_STATE_TIME_WAIT: 1761 { 1762 // [1] check sequence number 1763 1764 // compute min & max acceptable sequence numbers 1765 uint32_t seq_min = socket_rx_nxt; 1766 uint32_t seq_max = socket_rx_nxt + socket_rx_wnd - 1; 1767 1768 // compute sequence number for last byte in segment 1769 uint32_t seg_seq_last = seg_seq_num + seg_payload - 1; 1770 1771 if( (seg_seq_num != socket_rx_nxt) || // out_of_order 1772 (is_in_window( seg_seq_last, 1773 seq_min, 1774 seq_max ) == false) ) // out_of_window 1775 { 1776 // discard segment 1777 return; 1778 } 1779 1780 // [2] handle RST flag 1781 1782 if( seg_rst_set ) 1783 { 1784 if( socket_state == TCP_STATE_SYN_RCVD ) 1785 { 1786 // TODO unblock all clients threads with "reset" responses 1787 } 1788 else if( (socket_state == TCP_STATE_ESTAB ) || 1789 (socket_state == TCP_STATE_FIN_WAIT1 ) || 1790 (socket_state == TCP_STATE_FIN_WAIT2 ) || 1791 (socket_state == TCP_STATE_CLOSE_WAIT) ) 1792 { 1793 // TODO all pending send & received commands 1794 // must receive "reset" responses 1795 1796 // TODO destroy the socket 1797 } 1798 else // all other states 1799 { 1800 1801 1802 } 1803 1804 // [3] handle security & precedence TODO ... someday 1805 1806 // [4] handle SYN flag 1807 1808 if( seg_syn_set ) // received SYN 1809 { 1810 // TODO signal error to user 1811 1812 // make an RST request to R2T queue 1813 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1814 TCP_FLAG_RST ); 1815 // update socket state 1816 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1817 TCP_STATE_BOUND ); 1818 } 1819 1820 // [5] handle ACK flag 1821 1822 if( seg_ack_set == false ) 1823 { 1824 // discard segment when ACK not set 1825 break; 1826 } 1827 else if( socket_state == TCP_STATE_SYN_RCVD ) 1828 { 1829 if( is_in_window( seg_ack_num , socket_tx_una , socket_tx_nxt ) ) 1830 { 1831 // update socket.state to ESTAB 1832 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1833 TCP_STATE_ESTAB ); 1834 } 1835 else // unacceptable ACK 1836 { 1837 // set socket.tx_nxt to seg_ack_num 1838 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1839 seg_ack_num ); 1840 1841 // make an RST request to R2T queue 1842 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1843 TCP_FLAG_RST ); 1844 } 1845 } 1846 else if( (socket_state == TCP_STATE_ESTAB) || 1847 (socket_state == TCP_STATE_FIN_WAIT1) || 1848 (socket_state == TCP_STATE_FIN_WAIT1) || 1849 (socket_state == TCP_STATE_CLOSE_WAIT) || 1850 (socket_state == TCP_STATE_CLOSING) ) 1851 { 1852 if( is_in_window( seg_ack_num + 1 , socket_tx_una , socket_tx_nxt ) ) 1853 { 1854 // update socket.tx_una 1855 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_una ), 1856 seg_ack_num ); 1857 1858 // update socket.tx_wnd 1859 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_wnd ), 1860 seg_window ); 1861 } 1862 else // unacceptable ACK 1863 { 1864 // discard segment 1865 break; 1866 } 1867 1868 // specific for FIN_WAIT1 1869 if( socket_state == TCP_STATE_FIN_WAIT1 ) 1870 { 1871 if( seg_fin_set ) 1872 { 1873 // update socket.state 1874 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1875 TCP_STATE_FIN_WAIT2 ); 1876 } 1877 } 1878 1879 // specific for CLOSING 1880 if( socket_state == TCP_STATE_CLOSING ) 1881 { 1882 if( seg_ack_set ) 1883 { 1884 // update socket.state 1885 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1886 TCP_STATE_TIME_WAIT ); 1887 } 1888 else 1889 { 1890 // discard segment 1891 break; 1892 } 1893 } 1894 } 1895 else if( socket_state == TCP_STATE_LAST_ACK ) 1896 { 1897 if( seg_ack_set ) 1898 { 1899 // update socket.state 1900 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1901 TCP_STATE_TIME_WAIT ); 1902 } 1903 1904 } 1905 1906 // [6] handle URG flag TODO ... someday 1907 1908 // [7] Move DATA to rx_buf and unblock client thread 1909 1910 if( seg_payload ) 1911 { 1912 if( (socket_state == TCP_STATE_ESTAB) || 1913 (socket_state == TCP_STATE_FIN_WAIT1) || 1914 (socket_state == TCP_STATE_FIN_WAIT2) ) 1915 { 1916 // get number of bytes already stored in rx_buf 1917 uint32_t status = remote_buf_status( socket_rx_buf_xp ); 1918 1919 // compute empty space in rx_buf 1920 uint32_t space = NIC_RX_BUF_SIZE - status; 1921 1922 // compute number of bytes to move : min (space , seg_payload) 1923 uint32_t nbytes = ( space < seg_payload ) ? space : seg_payload; 1924 1925 // move payload from k_buf to rx_buf 1926 remote_buf_put_from_kernel( socket_rx_buf_xp, 1927 k_buf + seg_hlen, 1928 nbytes ); 1929 // update socket.rx_nxt 1930 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1931 socket_rx_nxt + nbytes ); 1932 1933 // update socket.rx_wnd 1934 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_wnd ), 1935 socket_rx_wnd - nbytes ); 1936 1937 // make an ACK request to R2T queue 1938 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1939 TCP_FLAG_ACK ); 1940 1941 // get extended pointer on rx_client thread 1942 xptr_t client_xp = hal_remote_l64( socket_client_xp ); 1943 1944 // unblock client thread 1945 if( client_xp != XPTR_NULL ) 1946 { 1947 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 1948 } 1949 } 1950 } 1951 1952 // [8] handle FIN flag 1953 1954 if( seg_fin_set ) 1955 { 1956 if( (socket_state == TCP_STATE_UNBOUND) || 1957 (socket_state == TCP_STATE_BOUND) || 1958 (socket_state == TCP_STATE_LISTEN) || 1959 (socket_state == TCP_STATE_SYN_SENT) ) 1960 { 1961 // discard segment 1962 break; 1963 } 1964 else // all other states 1965 { 1966 // TODO signal "connection closing" 1967 1968 // make an ACK request to R2T queue 1969 dev_nic_rx_put_r2t_request( socket_r2tq_xp, 1970 TCP_FLAG_ACK ); 1971 1972 // increment socket.rx_nxt 1973 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 1974 socket_rx_nxt + 1 ); 1975 1976 if( (socket_state == TCP_STATE_SYN_RCVD) || 1977 (socket_state == TCP_STATE_ESTAB) ) 1978 { 1979 // update socket.state 1980 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1981 TCP_STATE_TIME_WAIT ); 1982 } 1983 else if( socket_state == TCP_STATE_FIN_WAIT1 ) 1984 { 1985 if( seg_ack_set ) 1986 { 1987 // TODO start "time-wait" timer / turn off others timers 1988 1989 // update socket.state 1990 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1991 TCP_STATE_TIME_WAIT ); 1992 } 1993 else 1994 { 1995 // update socket.state 1996 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 1997 TCP_STATE_CLOSING ); 1998 } 1999 } 2000 else if( socket_state == TCP_STATE_FIN_WAIT2 ) 2001 { 2002 // TODO start "time-wait" timer / turn off other timers 2003 2004 // update socket.state 2005 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 2006 TCP_STATE_TIME_WAIT ); 2007 } 2008 else if( socket_state == TCP_STATE_TIME_WAIT ) 2009 { 2010 // TODO restart "time_wait" timer 2011 } 2012 } 2013 } // end if FIN 2014 } // end case sockets synchronized 2015 } // end switch socket state 2016 2017 // release the lock protecting socket 2018 remote_rwlock_wr_acquire( socket_lock_xp ); 2019 2020 } // end socket found 2021 2022 } // end dev_nic_rx_handle_tcp_segment() 2023 2024 2025 ///////////////////////////////////////// 2026 void dev_nic_rx_server( chdev_t * chdev ) 2027 { 2028 uint8_t k_buf[2048]; // kernel buffer for one ETH/IP/UDP packet 2029 2030 uint32_t pkt_src_addr; // packet source IP address 2031 uint32_t pkt_dst_addr; // packet destination IP address 2032 uint32_t trsp_protocol; // transport protocol (TCP / UDP) 2033 uint32_t eth_length; // size of Ethernet packet (bytes) 2034 uint32_t ip_length; // size of IP packet in bytes 2035 uint32_t nic_queue_readable; // NIC_RX queue non empty when true 2036 error_t error; 2037 2038 thread_t * this = CURRENT_THREAD; 2039 2040 // check chdev direction and type 2041 assert( (chdev->func == DEV_FUNC_NIC) && (chdev->is_rx == true) , 2042 "illegal chdev type or direction" ); 177 2043 178 2044 // check thread can yield 179 assert( (thread_ptr->busylocks == 0), 180 "cannot yield : busylocks = %d\n", thread_ptr->busylocks ); 2045 assert( (this->busylocks == 0), 2046 "cannot yield : busylocks = %d\n", this->busylocks ); 2047 2048 while( 1 ) 2049 { 2050 // check NIC_RX_QUEUE readable 2051 error = dev_nic_rx_queue_readable( chdev, 2052 &nic_queue_readable ); 2053 if( error ) 2054 { 2055 printk("\n[PANIC] in %s : cannot access NIC_TX[%d] queue\n", 2056 __FUNCTION__, chdev->channel ); 2057 } 2058 2059 if( nic_queue_readable ) // NIC_TX_QUEUE non empty 2060 { 2061 // moves one Ethernet packet to kernel buffer 2062 error = dev_nic_rx_move_packet( chdev, 2063 k_buf, 2064 ð_length ); 2065 if( error ) 2066 { 2067 printk("\n[PANIC] in %s : cannot read the NIC_TX[%d] queue\n", 2068 __FUNCTION__, chdev->channel ); 2069 } 2070 2071 // analyse the ETH header 2072 error = dev_nic_rx_check_eth( k_buf, 2073 &ip_length ); 2074 2075 // discard packet if error reported by Ethernet layer 2076 if( error ) continue; 2077 2078 // analyse the IP header 2079 error = dev_nic_rx_check_ip( k_buf + ETH_HEAD_LEN, 2080 ip_length, 2081 &pkt_src_addr, 2082 &pkt_dst_addr, 2083 &trsp_protocol ); 2084 2085 // discard packet if error reported by IP layer 2086 if( error ) continue; 2087 2088 // call relevant transport protocol 2089 if( trsp_protocol == PROTOCOL_UDP ) 2090 { 2091 dev_nic_rx_handle_udp_packet( chdev, 2092 k_buf + ETH_HEAD_LEN + IP_HEAD_LEN, 2093 ip_length - IP_HEAD_LEN, 2094 pkt_src_addr, 2095 pkt_dst_addr ); 2096 } 2097 else if ( trsp_protocol == PROTOCOL_TCP) 2098 { 2099 dev_nic_rx_handle_tcp_segment( chdev, 2100 k_buf + ETH_HEAD_LEN + IP_HEAD_LEN, 2101 ip_length - IP_HEAD_LEN, 2102 pkt_src_addr, 2103 pkt_dst_addr ); 2104 } 2105 } 2106 else // block and deschedule if NIC_RX_QUEUE empty 2107 { 2108 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_ISR ); 2109 sched_yield( "waiting RX client" ); 2110 } 2111 2112 } // end of while loop 2113 2114 } // end dev_nic_rx_server() 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 /////////////////////////////////////////////////////////////////////////////////////////// 2126 // Functions used by the NIC_TX server thread 2127 /////////////////////////////////////////////////////////////////////////////////////////// 2128 2129 2130 /////////////////////////////////////////////////////////////////////////////////////////// 2131 // These static functions are called by the NIC_TX server thread to report the 2132 // completion (success or error) of a TX command. 2133 // - it print an error message in case of error. 2134 // - it updates the "tx_error" field in socket descriptor. 2135 // - it unblocks the client thread. 2136 /////////////////////////////////////////////////////////////////////////////////////////// 2137 // @ socket_xp : [in] extended pointer on socket 2138 // @ cmd_type : [in] SOCKET_TX_CONNECT / SOCKET_TX_SEND / SOCKET_TX_CLOSE 2139 // @ socket_state : [in] current socket state 2140 /////////////////////////////////////////////////////////////////////////////////////////// 2141 static void dev_nic_tx_report_error( xptr_t socket_xp, 2142 uint32_t cmd_type, 2143 uint32_t socket_state ) 2144 { 2145 printk("\n[ERROR] in %s : command %s in %s state\n", 2146 __FUNCTION__, socket_cmd_str(cmd_type), socket_state_str(socket_state) ); 2147 2148 // get socket thread cluster and local pointer 2149 socket_t * socket_ptr = GET_PTR( socket_xp ); 2150 cxy_t socket_cxy = GET_CXY( socket_xp ); 2151 2152 // set tx_error field in socket descriptor 2153 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_error ) , 1 ); 2154 2155 // get extended point on client thread 2156 xptr_t client_xp = hal_remote_l64( XPTR( socket_cxy , &socket_ptr->tx_client )); 2157 2158 // unblock the client thread 2159 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 2160 } 2161 2162 //////////////////////////////////////////////////////////// 2163 static void dev_nic_tx_report_success( xptr_t socket_xp ) 2164 { 2165 // get socket thread cluster and local pointer 2166 socket_t * socket_ptr = GET_PTR( socket_xp ); 2167 cxy_t socket_cxy = GET_CXY( socket_xp ); 2168 2169 // set tx_error field in socket descriptor 2170 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_error ) , 0 ); 2171 2172 // get extended point on client thread 2173 xptr_t client_xp = hal_remote_l64( XPTR( socket_cxy , &socket_ptr->tx_client )); 2174 2175 // unblock the client thread 2176 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 2177 } 2178 2179 2180 2181 2182 2183 /////////////////////////////////////////////////////////////////////////////////////////// 2184 // This static function is called by the dev_nic_tx_server() function. 2185 // It calls directly the NIC driver (WRITABLE command) and returns the status 2186 // of the NIC_TX queue identified by the <chdev> argument. 2187 // in the <writable> buffer. 2188 /////////////////////////////////////////////////////////////////////////////////////////// 2189 // @ chdev : [in] local pointer on NIC_TX chdev. 2190 // @ length : [in] packet length in bytes. 2191 // @ writable : [out] zero if queue full. 2192 // @ returns 0 if success / returns -1 if failure in accessing NIC device. 2193 /////////////////////////////////////////////////////////////////////////////////////////// 2194 error_t dev_nic_tx_queue_writable( chdev_t * chdev, 2195 uint32_t length, 2196 uint32_t * writable ) 2197 { 2198 thread_t * this = CURRENT_THREAD; 2199 2200 // initialize READABLE command in thread descriptor 2201 this->nic_cmd.dev_xp = XPTR( local_cxy , chdev ); 2202 this->nic_cmd.type = NIC_CMD_WRITABLE; 2203 this->nic_cmd.length = length; 2204 2205 // call driver to test writable 2206 chdev->cmd( XPTR( local_cxy , this ) ); 2207 2208 // return status 2209 *writable = this->nic_cmd.status; 2210 2211 // return error 2212 return this->nic_cmd.error; 2213 2214 } // end dev_nic_tx_queue_writable 2215 2216 /////////////////////////////////////////////////////////////////////////////////////////// 2217 // This static function is called by the dev_nic_tx_server() function. 2218 // It moves one ETH/IP/UDP packet from the kernel buffer identified by the <buffer> and 2219 // <length> arguments to the NIC_TX_QUEUE identified the <chdev> argument. 2220 // It calls directly the NIC driver, without registering in a waiting queue, because 2221 // only this NIC_TX server thread can access this NIC_TX_QUEUE. 2222 // 1) It checks NIC_TX_QUEUE status in a while loop, using the NIC_CMD_WRITABLE command. 2223 // As long as the queue is not writable, it blocks and deschedules. It is re-activated 2224 // by the NIC-TX ISR as soon as the queue changes status. 2225 // 2) When the queue is writable, it put the ETH/IP/UDP packet into the NIC_TX_QUEUE, 2226 // using the driver NIC_CMD_WRITE command. 2227 // Both commands are successively registered in this NIC-TX server thread descriptor 2228 // to be passed to the driver. 2229 /////////////////////////////////////////////////////////////////////////////////////////// 2230 // @ chdev : [in] local pointer on NIC_TX chdev. 2231 // @ buffer : [in] pointer on a local kernel buffer (2K bytes). 2232 // @ length : [in] actual Ethernet packet length in bytes. 2233 /////////////////////////////////////////////////////////////////////////////////////////// 2234 void dev_nic_tx_move_packet( chdev_t * chdev, 2235 uint8_t * buffer, 2236 uint32_t length ) 2237 { 2238 error_t error; 2239 uint32_t writable; 2240 2241 thread_t * this = CURRENT_THREAD; 2242 2243 // get extended pointers on server tread and chdev 2244 xptr_t thread_xp = XPTR( local_cxy , this ); 2245 xptr_t chdev_xp = XPTR( local_cxy , chdev ); 2246 2247 // get local pointer on core running this server thead 2248 core_t * core = this->core; 2249 2250 // check thread can yield 2251 assert( (this->busylocks == 0), 2252 "cannot yield : busylocks = %d\n", this->busylocks ); 181 2253 182 2254 #if DEBUG_DEV_NIC_RX 183 2255 uint32_t cycle = (uint32_t)hal_get_cycles(); 184 2256 if( DEBUG_DEV_NIC_RX < cycle ) 185 printk("\n[ DBG] %s : thread %x enters for packet %x in cluster %x\n",186 __FUNCTION__ , thread_ptr , pkd , local_cxy);2257 printk("\n[%s] thread[%x,%x] enters for packet %x / cycle %d\n", 2258 __FUNCTION__, this->process->pid, this->trdid, pkd, cycle ); 187 2259 #endif 188 2260 189 // get pointer on NIC-TX chdev descriptor 190 uint32_t channel = thread_ptr->chdev->channel; 191 xptr_t dev_xp = chdev_dir.nic_tx[channel]; 192 cxy_t dev_cxy = GET_CXY( dev_xp ); 193 chdev_t * dev_ptr = (chdev_t *)GET_PTR( dev_xp ); 194 195 assert( (dev_xp != XPTR_NULL) , "undefined NIC chdev descriptor" ); 196 197 assert( (dev_cxy == local_cxy) , " chdev must be local" ); 198 199 // initialize command in thread descriptor 200 thread_ptr->nic_cmd.dev_xp = dev_xp; 201 202 // call driver to test writable 203 thread_ptr->nic_cmd.cmd = NIC_CMD_WRITABLE; 204 dev_ptr->cmd( thread_xp ); 205 206 // check error 207 error = thread_ptr->nic_cmd.error; 208 if( error ) return error; 209 210 // block and deschedule if queue non writable 211 if( thread_ptr->nic_cmd.status == false ) 212 { 213 // enable NIC-TX IRQ 214 dev_pic_enable_irq( core->lid ,dev_xp ); 215 216 // block client thread on THREAD_BLOCKED I/O condition 217 thread_block( XPTR( local_cxy , thread_ptr ) , THREAD_BLOCKED_IO ); 218 219 // deschedule client thread 220 sched_yield("client blocked on I/O"); 221 222 // disable NIC-TX IRQ 223 dev_pic_disable_irq( core->lid , dev_xp ); 224 } 225 226 // call driver for actual write 227 thread_ptr->nic_cmd.cmd = NIC_CMD_WRITE; 228 thread_ptr->nic_cmd.buffer = pkd->buffer; 229 thread_ptr->nic_cmd.length = pkd->length; 230 dev_ptr->cmd( thread_xp ); 231 232 // check error 233 error = thread_ptr->nic_cmd.error; 234 if( error ) return error; 2261 // check NIC_TX_QUEUE writable 2262 while( 1 ) 2263 { 2264 error = dev_nic_tx_queue_writable( chdev, 2265 length, 2266 &writable ); 2267 if( error ) 2268 { 2269 printk("\n[PANIC] in %s : cannot access NIC_TX queue\n", __FUNCTION__ ); 2270 return; 2271 } 2272 2273 if( writable == 0 ) // block & deschedule if non writable 2274 { 2275 // enable NIC-TX IRQ 2276 dev_pic_enable_irq( core->lid , chdev_xp ); 2277 2278 // block TX server thread 2279 thread_block( thread_xp , THREAD_BLOCKED_ISR ); 2280 2281 // deschedule TX server thread 2282 sched_yield("client blocked on NIC_TX queue full"); 2283 2284 // disable NIC-TX IRQ 2285 dev_pic_disable_irq( core->lid , chdev_xp ); 2286 } 2287 else // exit loop if writable 2288 { 2289 break; 2290 } 2291 } 2292 2293 // initialize WRITE command in server thread descriptor 2294 this->nic_cmd.dev_xp = chdev_xp; 2295 this->nic_cmd.type = NIC_CMD_WRITE; 2296 this->nic_cmd.buffer = buffer; 2297 this->nic_cmd.length = length; 2298 2299 // call driver to move packet 2300 chdev->cmd( thread_xp ); 235 2301 236 2302 #if DEBUG_DEV_NIC_RX 237 2303 cycle = (uint32_t)hal_get_cycles(); 238 2304 if( DEBUG_DEV_NIC_RX < cycle ) 239 printk("\n[ DBG] %s : thread %x exit for packet %x in cluster%x\n",240 __FUNCTION__ , th read_ptr , pkd , local_cxy);2305 printk("\n[%s] thread[%x,%x] exit for packet %x\n", 2306 __FUNCTION__ , this->process->pid, this->trdid , pkd ); 241 2307 #endif 242 2308 243 return 0; 244 } // end dev_nic_write() 245 246 247 2309 return; 2310 2311 } // end dev_nic_tx_move_packet() 2312 2313 /////////////////////////////////////////////////////////////////////////////////////////// 2314 // This static function is called by the dev_nic_tx_server() function to build an UDP 2315 // header in the kernel buffer defined by the <k_buf> arguement, as specified by the 2316 // <socket_xp> argument. The <length> argument defines the number of bytes in payload. 2317 // It set the "src_port", "dst_port", "total_length" and "checksum" fields in UDP header. 2318 // The payload must be previouly loaded in the pernel buffer. 2319 /////////////////////////////////////////////////////////////////////////////////////////// 2320 // @ k_buf : [in] pointer on first byte of UDP header in kernel buffer. 2321 // @ socket_xp : [in] extended pointer on socket. 2322 // @ length : [in] number of bytes in payload. 2323 /////////////////////////////////////////////////////////////////////////////////////////// 2324 void dev_nic_tx_build_udp_header( uint8_t * k_buf, 2325 xptr_t socket_xp, 2326 uint32_t length ) 2327 { 2328 uint16_t checksum; // checksum value 2329 uint32_t total_length; // total UDP packet length 2330 uint32_t local_addr; // local IP address 2331 uint32_t remote_addr; // remote IP address 2332 uint32_t local_port; // local port 2333 uint32_t remote_port; // remote port 2334 2335 // get socket cluster an local pointer 2336 socket_t * socket_ptr = GET_PTR( socket_xp ); 2337 cxy_t socket_cxy = GET_CXY( socket_xp ); 2338 2339 // get relevant infos from socket 2340 local_addr = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->local_addr )); 2341 remote_addr = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->remote_addr )); 2342 local_port = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->local_port )); 2343 remote_port = hal_remote_l32(XPTR(socket_cxy , &socket_ptr->remote_port )); 2344 2345 // compute UDP packet total length 2346 total_length = length + UDP_HEAD_LEN; 2347 2348 // set src_port and dst_port in header 2349 k_buf[0] = local_port >> 8; 2350 k_buf[1] = local_port; 2351 k_buf[2] = remote_port >> 8; 2352 k_buf[3] = remote_port; 2353 2354 // set packet length in header 2355 k_buf[4] = total_length >> 8; 2356 k_buf[5] = total_length; 2357 2358 // compute UDP packet checksum 2359 checksum = dev_nic_udp_checksum( k_buf , total_length ); 2360 2361 // set checksum 2362 k_buf[6] = checksum >> 8; 2363 k_buf[7] = checksum; 2364 2365 } // end dev_nic_tx_build_udp_header() 2366 2367 /////////////////////////////////////////////////////////////////////////////////////////// 2368 // This static function is called by the dev_nic_tx_server() function. 2369 // It builds a TCP header in the kernel buffer defined by the <k_buf> argument. 2370 // The payload must have been previouly registered in this buffer. 2371 // The "local_addr", "local_port", "remote_addr", "remote_port", seq_num", "ack_num", 2372 // and "window" fields are obtained from the <socket_xp> argument. 2373 // The <length> argument defines the number of bytes in payload, and the <flags> argument 2374 // defines the flags to be set in TCP header. 2375 /////////////////////////////////////////////////////////////////////////////////////////// 2376 // @ k_buf : [in] pointer on first byte of TCP header in kernel buffer. 2377 // @ length : [in] number of bytes in payload. 2378 // @ socket_xp : [in] extended pointer on socket. 2379 // @ flags : [in] flags to be set in TCP header. 2380 /////////////////////////////////////////////////////////////////////////////////////////// 2381 void dev_nic_tx_build_tcp_header( uint8_t * k_buf, 2382 uint32_t length, 2383 xptr_t socket_xp, 2384 uint8_t flags ) 2385 { 2386 uint16_t checksum; // global segment checksum 2387 uint32_t total_length; // total UDP packet length 2388 uint32_t src_addr; // local IP address 2389 uint32_t dst_addr; // remote IP address 2390 uint16_t src_port; // local port 2391 uint16_t dst_port; // remote port 2392 uint32_t seq_num; // first byte of segment in TX stream 2393 uint32_t ack_num; // next expected byte in RX stream 2394 uint16_t window; // window of accepted segments in RX stream 2395 2396 // get socket cluster an local pointer 2397 socket_t * sock_ptr = GET_PTR( socket_xp ); 2398 cxy_t sock_cxy = GET_CXY( socket_xp ); 2399 2400 // get relevant infos from socket 2401 src_addr = hal_remote_l32(XPTR( sock_cxy , &sock_ptr->local_addr )); 2402 dst_addr = hal_remote_l32(XPTR( sock_cxy , &sock_ptr->remote_addr )); 2403 src_port = hal_remote_l32(XPTR( sock_cxy , &sock_ptr->local_port )); 2404 dst_port = hal_remote_l32(XPTR( sock_cxy , &sock_ptr->remote_port )); 2405 seq_num = hal_remote_l32(XPTR( sock_cxy , &sock_ptr->tx_nxt )); 2406 ack_num = hal_remote_l32(XPTR( sock_cxy , &sock_ptr->rx_nxt )); 2407 window = hal_remote_l32(XPTR( sock_cxy , &sock_ptr->rx_wnd )); 2408 2409 // compute TCP segment total length 2410 total_length = length + TCP_HEAD_LEN; 2411 2412 // set "src_port" and "dst_port" 2413 k_buf[0] = src_port >> 8; 2414 k_buf[1] = src_port; 2415 k_buf[2] = dst_port >> 8; 2416 k_buf[3] = dst_port; 2417 2418 // set "seq_num" 2419 k_buf[4] = seq_num >> 24; 2420 k_buf[5] = seq_num >> 16; 2421 k_buf[6] = seq_num >> 8; 2422 k_buf[7] = seq_num; 2423 2424 // set "ack_num" 2425 k_buf[8] = ack_num >> 24; 2426 k_buf[9] = ack_num >> 16; 2427 k_buf[10] = ack_num >> 8; 2428 k_buf[11] = ack_num; 2429 2430 // set "hlen" 2431 k_buf[12] = 5; 2432 2433 // set "flags" 2434 k_buf[13] = flags & 0x3F; 2435 2436 // set "window" 2437 k_buf[14] = window >> 8; 2438 k_buf[15] = window; 2439 2440 // reset "checksum" 2441 k_buf[16] = 0; 2442 k_buf[17] = 0; 2443 2444 // set "urgent_ptr" 2445 k_buf[18] = 0; 2446 k_buf[19] = 0; 2447 2448 // compute TCP segment checksum 2449 checksum = dev_nic_tcp_checksum( k_buf, 2450 total_length, 2451 src_addr, 2452 dst_addr ); 2453 // set "checksum" 2454 k_buf[16] = checksum >> 8; 2455 k_buf[17] = checksum; 2456 2457 } // end dev_nic_tx_build_tcp_header() 2458 2459 2460 /////////////////////////////////////////////////////////////////////////////////////////// 2461 // This static function is called by the dev_nic_tx_server() function. 2462 // It builds the IP header in the 20 first bytes of <buffer>. 2463 /////////////////////////////////////////////////////////////////////////////////////////// 2464 // @ buffer : pointer on first byte of IP header in kernel buffer 2465 // @ src_addr : source IP address. 2466 // @ dst_addr : destination IP address. 2467 // @ length : number of bytes in IP packet payload. 2468 /////////////////////////////////////////////////////////////////////////////////////////// 2469 void dev_nic_tx_build_ip_header( uint8_t * buffer, 2470 uint32_t src_addr, 2471 uint32_t dst_addr, 2472 uint16_t length ) 2473 { 2474 uint16_t hcs; 2475 2476 uint16_t total = length + IP_HEAD_LEN; 2477 2478 buffer[0] = 0x45; // IPV4 / IHL = 20 bytes 2479 buffer[1] = 0; // DSCP / ECN 2480 buffer[2] = total >> 8; 2481 buffer[3] = total; 2482 2483 buffer[4] = 0x40; // Don't Fragment 2484 buffer[5] = 0; 2485 buffer[6] = 0; 2486 buffer[7] = 0; 2487 2488 buffer[8] = 0xFF; // TTL 2489 buffer[9] = 0x11; // UDP protocol 2490 2491 buffer[12] = src_addr >> 24; 2492 buffer[13] = src_addr >> 16; 2493 buffer[14] = src_addr >> 8; 2494 buffer[15] = src_addr; 2495 2496 buffer[16] = dst_addr >> 24; 2497 buffer[17] = dst_addr >> 16; 2498 buffer[18] = dst_addr >> 8; 2499 buffer[19] = dst_addr; 2500 2501 // compute IP header checksum 2502 hcs = dev_nic_ip_checksum( buffer ); 2503 2504 // set checksum 2505 buffer[10] = hcs >> 8; 2506 buffer[11] = hcs; 2507 2508 } // end dev_nic_tx_build_ip_header 2509 2510 /////////////////////////////////////////////////////////////////////////////////////////// 2511 // This static function is called by the dev_nic_tx_server() function. 2512 // It builds the Ethernet header in the 14 first bytes of <buffer>. 2513 /////////////////////////////////////////////////////////////////////////////////////////// 2514 // @ buffer : pointer on first byte of Ethernet header in kernel buffer 2515 // @ src_mac_54 : two MSB bytes in source MAC address. 2516 // @ src_mac_32 : two MED bytes in source MAC address. 2517 // @ src_mac_10 : two LSB bytes in source MAC address. 2518 // @ dst_mac_54 : two MSB bytes in destination MAC address. 2519 // @ dst_mac_32 : two MED bytes in destination MAC address. 2520 // @ dst_mac_10 : two LSB bytes in destination MAC address. 2521 // @ length : number of bytes in Ethernet frame payload. 2522 /////////////////////////////////////////////////////////////////////////////////////////// 2523 void dev_nic_tx_build_eth_header( uint8_t * buffer, 2524 uint16_t src_mac_54, 2525 uint16_t src_mac_32, 2526 uint16_t src_mac_10, 2527 uint16_t dst_mac_54, 2528 uint16_t dst_mac_32, 2529 uint16_t dst_mac_10, 2530 uint32_t length ) 2531 { 2532 buffer[0] = dst_mac_54 >> 8; 2533 buffer[1] = dst_mac_54; 2534 buffer[2] = dst_mac_32 >> 8; 2535 buffer[3] = dst_mac_32; 2536 buffer[4] = dst_mac_10 >> 8; 2537 buffer[5] = dst_mac_10; 2538 2539 buffer[6] = src_mac_54 >> 8; 2540 buffer[7] = src_mac_54; 2541 buffer[8] = src_mac_32 >> 8; 2542 buffer[9] = src_mac_32; 2543 buffer[10] = src_mac_10 >> 8; 2544 buffer[11] = src_mac_10; 2545 2546 buffer[12] = length >> 8; 2547 buffer[13] = length; 2548 2549 } // end dev_nic_tx_build_eth_header() 2550 2551 /////////////////////////////////////////////////////////////////////////////////////////// 2552 // This static function is called by the dev_nic_tx_server() function to handle one 2553 // TX command, or one R2T request, registered in the socket identified by the <socket_xp> 2554 // argument. If there is one valid command, or if the R2T queue is non empty (for a TCP 2555 // socket), it builds an ETH/IP/UDP packet (or a ETH/IP/TCP segment), in the buffer 2556 // defined by the <k_buf> argument, and registers it in the NIC_TX queue defined by the 2557 // <chdev> argument. The supported commands are SOCKET_SEND/SOCKET_CONNECT/SOCKET_CLOSE. 2558 // It unblocks the client thread when the command is completed. 2559 /////////////////////////////////////////////////////////////////////////////////////////// 2560 // When there is a packet to send, it makes the following actions: 2561 // 1) it takes the lock protecting the socket state. 2562 // 2) it get the command arguments from client thread descriptor. 2563 // 3) it build an UDP packet or a TCP segment, depending on both the command type, and 2564 // the socket state, updates the socket state, and unblocks the client thread. 2565 // 4) it release the lock protecting the socket. 2566 // 5) it build the IP header. 2567 // 6) it build the ETH header. 2568 // 7) it copies the packet in the NIC_TX queue. 2569 /////////////////////////////////////////////////////////////////////////////////////////// 2570 // @ socket_xp : [in] extended pointer on client socket. 2571 // @ k_buf : [in] local pointer on kernel buffer (2 Kbytes). 2572 // @ chdev : [in] local pointer on NIC_RX chdev. 2573 /////////////////////////////////////////////////////////////////////////////////////////// 2574 static void dev_nic_tx_handle_one_cmd( xptr_t socket_xp, 2575 uint8_t * k_buf, 2576 chdev_t * chdev ) 2577 { 2578 socket_t * socket_ptr; 2579 cxy_t socket_cxy; 2580 xptr_t client_xp; // extended pointer on client thread 2581 thread_t * client_ptr; 2582 cxy_t client_cxy; 2583 sock_cmd_t cmd; // NIC command type 2584 uint8_t * buf; // pointer on user buffer 2585 uint32_t len; // user buffer length 2586 uint32_t todo; // number of bytes not yet sent 2587 uint32_t socket_type; // socket type (UDP/TCP) 2588 uint32_t socket_state; // socket state 2589 xptr_t socket_lock_xp; // extended pointer on socket lock 2590 xptr_t socket_r2tq_xp; // extended pointer on R2T queue 2591 uint32_t src_ip_addr; // source IP address 2592 uint32_t dst_ip_addr; // destination IP address 2593 uint32_t tx_una; // next byte to be sent 2594 uint32_t tx_nxt; // first unacknowledged byte 2595 uint32_t nbytes; // number of bytes in UDP/TCP packet payload 2596 uint8_t * k_base; // pointer UDP/TCP packet in kernel buffer 2597 uint32_t trsp_length; // length of TCP/UDP packet 2598 uint8_t r2t_flags; // flags defined by one R2T queue request 2599 bool_t do_send; // build & send a packet when true 2600 2601 // get socket cluster and local pointer 2602 socket_cxy = GET_CXY( socket_xp ); 2603 socket_ptr = GET_PTR( socket_xp ); 2604 2605 // build extended pointer on socket lock and r2t queue 2606 socket_lock_xp = XPTR( socket_cxy , &socket_ptr->lock ); 2607 socket_r2tq_xp = XPTR( socket_cxy , &socket_ptr->r2tq ); 2608 2609 // 1. take lock protecting this socket 2610 remote_rwlock_wr_acquire( socket_lock_xp ); 2611 2612 // get pointers on TX client thread from socket 2613 client_xp = hal_remote_l64( XPTR( socket_cxy , &socket_ptr->tx_client )); 2614 client_cxy = GET_CXY( client_xp ); 2615 client_ptr = GET_PTR( client_xp ); 2616 2617 // check valid command 2618 if( client_xp != XPTR_NULL ) // valid command found 2619 { 2620 // 2. get command arguments from socket 2621 cmd = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->tx_cmd )); 2622 buf = hal_remote_lpt( XPTR(socket_cxy , &socket_ptr->tx_buf )); 2623 len = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->tx_len )); 2624 todo = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->tx_todo )); 2625 2626 // get socket type and state 2627 socket_type = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->type )); 2628 socket_state = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->state )); 2629 2630 // 3. UDP : build UDP packet and update UDP socket state 2631 if( socket_type == SOCK_DGRAM ) 2632 { 2633 if( socket_state == UDP_STATE_UNBOUND ) 2634 { 2635 // report illegal command 2636 dev_nic_tx_report_error( socket_xp, cmd, socket_state ); 2637 2638 do_send = false; 2639 } 2640 else // BOUND or CONNECT state 2641 { 2642 if( cmd == SOCKET_TX_SEND ) 2643 { 2644 // compute payload length 2645 nbytes = ( PAYLOAD_MAX_LEN < todo ) ? PAYLOAD_MAX_LEN : todo; 2646 2647 // compute UDP packet base in kernel buffer 2648 k_base = k_buf + ETH_HEAD_LEN + IP_HEAD_LEN; 2649 2650 // move payload to kernel buffer 2651 hal_copy_from_uspace( XPTR(local_cxy , k_base + UDP_HEAD_LEN ), 2652 buf + (len - todo), 2653 nbytes ); 2654 // build UDP header 2655 dev_nic_tx_build_udp_header( k_base, 2656 socket_xp, 2657 nbytes ); 2658 2659 // update "tx_todo" in socket descriptor 2660 hal_remote_s32( XPTR(socket_cxy , socket_ptr->tx_todo), 2661 todo - nbytes ); 2662 2663 // unblock client thread when SEND command completed 2664 if( nbytes == todo ) 2665 { 2666 dev_nic_tx_report_success( socket_xp ); 2667 } 2668 2669 do_send = true; 2670 } 2671 else 2672 { 2673 // report illegal command 2674 dev_nic_tx_report_error( socket_xp, cmd, socket_state ); 2675 2676 do_send = false; 2677 } 2678 } 2679 2680 // compute transport packet length 2681 trsp_length = UDP_HEAD_LEN + nbytes; 2682 2683 } // end UDP 2684 2685 // 3. TCP : build TCP segment and update TCP socket state 2686 if( socket_type == SOCK_STREAM ) 2687 { 2688 // extract one request from TCP socket R2T queue if queue non empty 2689 if( remote_buf_status( socket_r2tq_xp ) ) 2690 { 2691 remote_buf_get_to_kernel( socket_r2tq_xp , &r2t_flags , 1 ); 2692 } 2693 else 2694 { 2695 r2t_flags = 0; 2696 } 2697 2698 ///////////////////////////////////// 2699 if( socket_state == TCP_STATE_ESTAB ) // connected TCP socket 2700 { 2701 if( cmd == SOCKET_TX_SEND ) 2702 { 2703 // get "tx_nxt", and "tx_una" from socket descriptor 2704 tx_nxt = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->tx_nxt )); 2705 tx_una = hal_remote_l32( XPTR(socket_cxy , &socket_ptr->tx_una )); 2706 2707 // compute actual payload length 2708 nbytes = ( PAYLOAD_MAX_LEN < todo ) ? PAYLOAD_MAX_LEN : todo; 2709 2710 // compute TCP segment base in kernel buffer 2711 k_base = k_buf + ETH_HEAD_LEN + IP_HEAD_LEN; 2712 2713 // move payload to kernel buffer 2714 hal_copy_from_uspace( XPTR( local_cxy , k_base + TCP_HEAD_LEN ), 2715 buf + (len - todo), 2716 nbytes ); 2717 2718 // build TCP header 2719 dev_nic_tx_build_tcp_header( k_base, 2720 socket_xp, 2721 nbytes, // payload 2722 TCP_FLAG_ACK | r2t_flags ); // flags 2723 2724 // update "tx_todo" in socket descriptor 2725 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_todo ), 2726 todo - nbytes ); 2727 2728 // update "tx_nxt" in socket descriptor 2729 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 2730 tx_nxt + nbytes ); 2731 2732 // unblock client thread when SEND command completed 2733 if( (todo == 0) && (tx_nxt == tx_una) ) 2734 { 2735 dev_nic_tx_report_success( socket_xp ); 2736 } 2737 2738 do_send = true; 2739 } 2740 else if( cmd == SOCKET_TX_CLOSE ) 2741 { 2742 // build TCP FIN segment 2743 dev_nic_tx_build_tcp_header( k_base, 2744 socket_xp, 2745 0, // payload 2746 TCP_FLAG_FIN | r2t_flags ); // flags 2747 // update socket state 2748 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 2749 TCP_STATE_FIN_WAIT1 ); 2750 2751 do_send = true; 2752 } 2753 else // cmd == CONNECT 2754 { 2755 // report illegal command 2756 dev_nic_tx_report_error( socket_xp , cmd , socket_state ); 2757 2758 do_send = false; 2759 } 2760 } 2761 ////////////////////////////////////////// 2762 else if( socket_state == TCP_STATE_BOUND ) // unconnected TCP socket 2763 { 2764 if ( cmd == SOCKET_TX_CONNECT ) 2765 { 2766 // set socket.tx_nxt 2767 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_nxt ), 2768 TCP_ISS ); 2769 2770 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_nxt ), 0 ); 2771 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->rx_wnd ), 2772 NIC_RX_BUF_SIZE); 2773 2774 // build TCP SYN segment 2775 dev_nic_tx_build_tcp_header( k_base, 2776 socket_xp, 2777 0, // payload 2778 TCP_FLAG_SYN ); // flags 2779 // update socket state 2780 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 2781 TCP_STATE_SYN_SENT ); 2782 2783 do_send = true; 2784 } 2785 else // cmd == SEND / CLOSE 2786 { 2787 // report illegal command 2788 dev_nic_tx_report_error( socket_xp, cmd, socket_state ); 2789 2790 do_send = false; 2791 } 2792 } 2793 /////////////////////////////////////////// 2794 else if( socket_state == TCP_STATE_LISTEN ) // server wait connect 2795 { 2796 if( cmd == SOCKET_TX_CONNECT ) 2797 { 2798 // update socket.state 2799 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 2800 TCP_STATE_SYN_SENT ); 2801 2802 // set socket.tx_una 2803 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_una ), 2804 TCP_ISS ); 2805 2806 // set socket.tx_nxt 2807 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->tx_una ), 2808 TCP_ISS + 1 ); 2809 2810 // build TCP SYN segment 2811 dev_nic_tx_build_tcp_header( k_base, 2812 socket_xp, 2813 0, // payload 2814 TCP_FLAG_SYN ); // flags 2815 do_send = true; 2816 } 2817 else // cmd == CLOSE / SEND 2818 { 2819 // report illegal command 2820 dev_nic_tx_report_error( socket_xp, cmd, socket_state ); 2821 2822 do_send = false; 2823 } 2824 } 2825 ///////////////////////////////////////////// 2826 else if( socket_state == TCP_STATE_SYN_RCVD ) // socket wait ACK 2827 { 2828 if( cmd == SOCKET_TX_CLOSE ) 2829 { 2830 // build TCP FIN segment 2831 dev_nic_tx_build_tcp_header( k_base, 2832 socket_xp, 2833 0, // payload 2834 TCP_FLAG_FIN ); // flags 2835 // update socket state 2836 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 2837 TCP_STATE_FIN_WAIT1 ); 2838 2839 do_send = true; 2840 } 2841 else // SEND / CONNECT 2842 { 2843 // report illegal command 2844 dev_nic_tx_report_error( socket_xp, cmd, socket_state ); 2845 2846 do_send = false; 2847 } 2848 } 2849 //////////////////////////////////////////////// 2850 else if( socket_state == TCP_STATE_CLOSE_WAIT ) // wait local close() 2851 { 2852 if( cmd == SOCKET_TX_CLOSE ) 2853 { 2854 // build TCP FIN segment 2855 dev_nic_tx_build_tcp_header( k_base, 2856 socket_xp, 2857 0, // payload 2858 TCP_FLAG_FIN ); // flags 2859 // update socket state 2860 hal_remote_s32( XPTR( socket_cxy , &socket_ptr->state ), 2861 TCP_STATE_LAST_ACK ); 2862 2863 do_send = true; 2864 } 2865 else // SEND / CONNECT 2866 { 2867 // report illegal command 2868 dev_nic_tx_report_error( socket_xp, cmd, socket_state ); 2869 2870 do_send = false; 2871 } 2872 } 2873 //// 2874 else 2875 { 2876 // report illegal command 2877 dev_nic_tx_report_error( socket_xp, cmd, socket_state ); 2878 2879 do_send = false; 2880 } 2881 2882 // compute TCP segment length 2883 trsp_length = TCP_HEAD_LEN + nbytes; 2884 } 2885 } 2886 else // no valid command found 2887 { 2888 if( socket_type == SOCK_DGRAM ) // UDP socket 2889 { 2890 do_send = false; 2891 } 2892 else // TCP socket 2893 { 2894 if( remote_buf_status( socket_r2tq_xp ) == 0 ) // R2T queue empty 2895 { 2896 do_send = false; 2897 } 2898 else // pending request in R2T queue 2899 { 2900 // get one request from R2T queue 2901 remote_buf_get_to_kernel( socket_r2tq_xp , &r2t_flags , 1 ); 2902 2903 // build TCP header for an empty segment 2904 dev_nic_tx_build_tcp_header( k_base, 2905 socket_xp, 2906 0, // payload 2907 r2t_flags ); // flags 2908 do_send = true; 2909 } 2910 } 2911 } 2912 2913 // 4. release the lock protecting the socket 2914 remote_rwlock_wr_release( socket_lock_xp ); 2915 2916 // return if no packet to send 2917 if( do_send == false ) return; 2918 2919 // 5. build IP header 2920 dev_nic_tx_build_ip_header( k_buf + ETH_HEAD_LEN, 2921 src_ip_addr, 2922 dst_ip_addr, 2923 IP_HEAD_LEN + trsp_length ); 2924 2925 // 6. build ETH header 2926 dev_nic_tx_build_eth_header( k_buf, 2927 (uint16_t)SRC_MAC_54, 2928 (uint16_t)SRC_MAC_32, 2929 (uint16_t)SRC_MAC_10, 2930 (uint16_t)DST_MAC_54, 2931 (uint16_t)DST_MAC_32, 2932 (uint16_t)DST_MAC_10, 2933 ETH_HEAD_LEN + IP_HEAD_LEN + trsp_length ); 2934 2935 // 7. move packet to NIC_TX queue 2936 dev_nic_tx_move_packet( chdev, 2937 k_buf, 2938 ETH_HEAD_LEN + IP_HEAD_LEN + trsp_length ); 2939 2940 } // end dev_nic_tx_handle_one_cmd() 2941 2942 ///////////////////////////////////////// 2943 void dev_nic_tx_server( chdev_t * chdev ) 2944 { 2945 uint8_t k_buf[NIC_KERNEL_BUF_SIZE]; // buffer for one packet 2946 2947 xptr_t root_xp; // extended pointer on clients list root 2948 xptr_t lock_xp; // extended pointer on lock protecting this list 2949 xptr_t socket_xp; // extended pointer on on client socket 2950 socket_t * socket_ptr; 2951 cxy_t socket_cxy; 2952 xptr_t entry_xp; // extended pointer on socket tx_list entry 2953 2954 thread_t * this = CURRENT_THREAD; 2955 2956 // check chdev direction and type 2957 assert( (chdev->func == DEV_FUNC_NIC) && (chdev->is_rx == false) , 2958 "illegal chdev type or direction" ); 2959 2960 // check thread can yield 2961 assert( (this->busylocks == 0), 2962 "cannot yield : busylocks = %d\n", this->busylocks ); 2963 2964 // build extended pointer on client sockets lock & root 2965 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 2966 root_xp = XPTR( local_cxy , &chdev->wait_root ); 2967 2968 while( 1 ) // TX server infinite loop 2969 { 2970 // take the lock protecting the client sockets queue 2971 remote_busylock_acquire( lock_xp ); 2972 2973 /////////////// block and deschedule if no clients 2974 if( xlist_is_empty( root_xp ) == false ) 2975 { 2976 // release the lock protecting the TX client sockets queue 2977 remote_busylock_release( lock_xp ); 2978 2979 // block and deschedule 2980 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_CLIENT ); 2981 sched_yield( "waiting client" ); 2982 } 2983 ////////////// 2984 else 2985 { 2986 // get first client socket 2987 socket_xp = XLIST_FIRST( root_xp , socket_t , tx_list ); 2988 socket_cxy = GET_CXY( socket_xp ); 2989 socket_ptr = GET_PTR( socket_xp ); 2990 2991 // build extended pointer on socket xlist_entry 2992 entry_xp = XPTR( socket_cxy , &socket_ptr->tx_list ); 2993 2994 // remove this socket from the waiting queue 2995 xlist_unlink( entry_xp ); 2996 2997 // release the lock protecting the client sockets queue 2998 remote_busylock_release( lock_xp ); 2999 3000 // handle this TX client 3001 dev_nic_tx_handle_one_cmd( socket_xp, 3002 k_buf, 3003 chdev ); 3004 3005 // take the lock protecting the client sockets queue 3006 remote_busylock_acquire( lock_xp ); 3007 3008 // add this socket in last position of queue 3009 xlist_add_last( root_xp , entry_xp ); 3010 3011 // release the lock protecting the client sockets queue 3012 remote_busylock_release( lock_xp ); 3013 } 3014 } // end while 3015 } // end dev_nic_tx_server() 3016 3017 -
trunk/kernel/devices/dev_nic.h
r457 r657 2 2 * dev_nic.h - NIC (Network Controler) generic device API definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 27 27 #include <kernel_config.h> 28 28 #include <hal_kernel_types.h> 29 #include <remote_busylock.h> 30 #include <remote_buf.h> 31 #include <xlist.h> 32 33 /**** Forward declarations ****/ 34 35 struct chdev_s; 29 36 30 37 /***************************************************************************************** 31 38 * Generic Network Interface Controler definition 32 39 * 33 * This device provide access to an external Gigabit Ethernet network controler. 34 * It assume that the NIC hardware peripheral handles two packets queues for sent (TX) 35 * and received (RX) packets. Packets are (Ethernet/IPV4). 40 * This device provides access to a generic Gigabit Ethernet network controler. 41 * It assumes that the NIC hardware peripheral handles two packets queues for sent (TX) 42 * and received (RX) packets. 43 * 44 * The supported protocols stack is : Ethernet / IPV4 / TCP or UDP 36 45 * 37 * The NIC device is handling an (infinite) stream of packets to or from the network. 46 * 1) hardware assumptions 47 * 48 * The NIC device is handling two (infinite) streams of packets to or from the network. 38 49 * It is the driver responsibility to move the RX packets from the NIC to the RX queue, 39 50 * and the TX packets from the TX queue to the NIC. 40 51 * 41 * AS the RX and TX queues are independant, there is one NIC-RX device descriptor 42 * to handle RX packets, and another NIC-TX device descriptor to handle TX packets. 43 * In order to improve throughput, the hardware NIC controller can optionnally implement 44 * multiple channels: 45 * - The RX channels are indexed by an hash key derived from the source IP address. 46 * - The TX channels are indexed by an hash key derived from the destination IP address. 47 * These 2*N devices, and 2*N associated server threads, are distributed in 2*N clusters. 48 * The 2*N server threads implement the protocols stack. The RX server threads block 49 * and deschedule when the RX queue is empty. The TX server stack block and deschedule 50 * when the queue is full. 51 * 52 * It is the driver responsibily to re-activate a blocked server thread when 53 * the queue state is modified: not full for TX, or not empty for RX. 52 * AS the RX and TX queues are independant, there is one NIC_RX device descriptor 53 * to handle RX packets, and another NIC_TX device descriptor to handle TX packets. 54 * 55 * In order to improve throughput, the NIC controller can implement multiple (N) channels. 56 * In this case, the channel index is defined by an hash function computed from the remote 57 * IP address and port. This index is computed by the hardware for an RX packet, and is 58 * computed by the kernel for a TX packet, using a specific driver function. TODO ... 59 * The 2*N chdevs, and the associated server threads implementing the protocols stack, 60 * are distributed in 2*N different clusters. 61 * 62 * 2) User API 63 * 64 * On the user side, ALMOS-MKH implements the POSIX socket API. 65 * The kernel functions implementing the socket related syscalls are : 66 * - dev_nic_socket() : create a local socket registered in process fd_array[]. 67 * - dev_nic_bind() : attach a local IP address and port to a local socket. 68 * - dev_nic_listen() : local server makes a passive open. 69 * - dev_nic_connect() : local client makes an active open to a remote server. 70 * - dev_nic_accept() : local server accept a new remote client. 71 * - dev_nic_send() : send data on a connected socket. 72 * - dev_nic_recv() : receive data on a connected socket. 73 * - dev_nic_sendto() : send a packet to a remote (IP address/port). 74 * - dev_nic_recvfrom() : receive a paket from a remote (IP address/port). 75 * - dev_nic_close() : close a socket 76 * 77 * 3) TX stream 78 * 79 * The internal API between the client threads and the TX server thread defines 80 * the 3 following commands: 81 * . SOCKET_TX_CONNECT : request to execute the 3 steps TCP connection handshake. 82 * . SOCKET_TX_SEND : send data to a remote socket (UDP or TCP). 83 * . SOCKET_TX_CLOSE : request to execute the 3 steps TCP close handshake. 84 * 85 * - These 3 commands are blocking for the client thread that registers the command in the 86 * socket descriptor, blocks on the BLOCKED_IO condition, and deschedules. 87 * - The TX server thread is acting as a multiplexer. It scans the list of attached sockets, 88 * to handle all valid commands: one UDP packet or TCP segment per iteration. 89 * It uses the user buffer defined by the client thread, and attached to socket descriptor, 90 * as a retransmission buffer. It blocks and deschedules on the BLOCKED_CLIENT condition, 91 * when there is no more active TX command registered in any socket. It is re-activated 92 * by the first client thread registering a new TX command in the socket descriptor. 93 * It unblocks a client thread only when a command is fully completed. It signals errors 94 * to the client thread using the tx_error field in socket descriptor. 95 * 96 * 4) RX stream 97 * 98 * The communication between the RX server thread and the client threads expecting data 99 * is done through receive buffers (one private buffer per socket) that are handled 100 * as single-writer / single reader-FIFOs, called rx_buf. 101 * - The RX server thread is acting as a demultiplexor: it handle one TCP segment or UDP 102 * packet per iteration, and register the data in the rx_buf of the socket matching 103 * the packet. It simply discard all packets that does not match a registered socket. 104 * When a client thread is registered in the socket descriptor, the RX server thread 105 * unblocks this client thread as soon as there is data available in rx_buf. 106 * It blocks and deschedules on the BLOCKED_ISR condition when there is no more packets 107 * in the NIC_RX queue. It is unblocked by the hardware ISR. 108 * - The client thread simply access the rx_buf attached to socket descriptor, and consumes 109 * the available data when the rx_buf is non empty. It blocks on the BLOCKED_IO condition, 110 * and deschedules when the rx_buf is empty. 111 * 112 * 5) R2T queue 113 * 114 * To implement the TCP "3 steps handshake" protocol, the RX server thread can directly 115 * request the associated TX server thread to send control packets in the TX stream, 116 * using a dedicate R2T (RX to TX) FIFO stored in the socket descriptor. 117 * 118 * 6) NIC driver API 119 * 120 * The generic NIC device "driver" API defines the following commands to the NIC driver: 121 * - READABLE : returns true if at least one RX paquet is available in RX queue. 122 * - WRITABLE : returns true if at least one empty slot is available in TX queue. 123 * - READ : consume one packet from the RX queue. 124 * - WRITE : produce one packet to the TX queue. 125 * All RX or TX paquets are sent or received in standard 2 Kbytes kernel buffers, 126 * that are dynamically allocated by the protocols stack. 127 * 128 * The actual TX an RX queues structures depends on the hardware NIC implementation, 129 * and are defined in the HAL specific driver code. 54 130 * 55 131 * WARNING: the WTI mailboxes used by the driver ro receive events from the hardware 56 132 * (available RX packet, or available free TX slot, for a given channel), must be 57 133 * statically allocated during the kernel initialisation phase, and must be 58 * routed to the cluster containing the associated device descriptor and server thread. 59 * to simplify the server thread re-activation. 60 * 61 * Finally, the generic NIC device API defines the following commands: 62 * - READABLE : returns true if at least one RX paquet is available in RX queue. 63 * - WRITABLE : returns true if atleast one empty slot is available in TX queue. 64 * - READ : consume one packet from the RX queue. 65 * - WRITE : produce one packet fto the TX queue. 66 * All RX or TX paquets are sent or received in standard 2 Kbytes kernel buffers, 67 * that are dynamically allocated by the protocols stack. The structure pkd_t 68 * defining a packet descriptor is defined below, and contain the buffer pointer 69 * and the actual Ethernet packet Length. 70 * 71 * The actual TX an RX queues structures depends on the hardware NIC implementation, 72 * and are defined in the driver code. 134 * routed to the cluster containing the associated TX/RX chdev and server thread. 135 * 73 136 *****************************************************************************************/ 74 137 … … 78 141 79 142 /****************************************************************************************** 80 * This defines the extension for the generic IOC device. 143 * Various constants used by the Protocols stack 144 *****************************************************************************************/ 145 146 #define SRC_MAC_54 0x54 147 #define SRC_MAC_32 0x32 148 #define SRC_MAC_10 0x10 149 #define DST_MAC_54 0x54 150 #define DST_MAC_32 0x32 151 #define DST_MAC_10 0x10 152 153 #define TCP_HEAD_LEN 20 154 #define UDP_HEAD_LEN 8 155 #define IP_HEAD_LEN 20 156 #define ETH_HEAD_LEN 14 157 158 #define PROTOCOL_UDP 0x11 159 #define PROTOCOL_TCP 0x06 160 161 #define TCP_ISS 0x10000 162 163 #define PAYLOAD_MAX_LEN 1500 // max payload for and UDP packet or a TCP segment 164 165 #define TCP_FLAG_FIN 0x01 166 #define TCP_FLAG_SYN 0x02 167 #define TCP_FLAG_RST 0x04 168 #define TCP_FLAG_PSH 0x08 169 #define TCP_FLAG_ACK 0x10 170 #define TCP_FLAG_URG 0x20 171 172 #define NIC_RX_BUF_SIZE 0x100000 // 1 Mbytes 173 #define NIC_R2T_QUEUE_SIZE 0x64 // smallest KCM size 174 #define NIC_CRQ_QUEUE_SIZE 0x8 // 8 * sizeof(sockaddr_t) = smallest KCM size 175 #define NIC_PKT_MAX_SIZE 1500 // for Ethernet 176 #define NIC_KERNEL_BUF_SIZE 2000 // for on ETH/IP/TCP packet 177 178 /***************************************************************************************** 179 * This defines the extension for the generic NIC device. 81 180 * The actual queue descriptor depends on the implementation. 82 *****************************************************************************************/ 181 * 182 * WARNING : for all NIC_TX and NIC_RX chdevs, the xlist rooted in in the chdev 183 * ("wait_root" and "wait_lock" fields) is actually a list of sockets. 184 ****************************************************************************************/ 83 185 84 186 typedef struct nic_extend_s 85 187 { 86 void * queue; /*! local pointer on the packets queue descriptor (RX or TX)*/188 void * queue; /*! local pointer on NIC queue descriptor (RX or TX) */ 87 189 } 88 190 nic_extend_t; 89 191 90 /****************************************************************************************** 91 * This structure defines the Ethernet/IPV4 packet descriptor, that is sent to, 92 * or received from, the protocols stack. 93 *****************************************************************************************/ 94 95 typedef struct pkd_s 96 { 97 char * buffer; /*! local pointer on 2 Kbytes buffer containing packet */ 98 uint32_t length; /*! actual number of bytes */ 99 } 100 pkd_t; 101 102 /****************************************************************************************** 192 /***************************************************************************************** 103 193 * This enum defines the various implementations of the generic NIC peripheral. 104 194 * This array must be kept consistent with the define in the arch_info.h file. 105 **************************************************************************************** */106 107 enum nic_impl_e195 ****************************************************************************************/ 196 197 typedef enum nic_impl_e 108 198 { 109 199 IMPL_NIC_CBF = 0, … … 112 202 nic_impl_t; 113 203 114 /****************************************************************************************** 115 * This defines the (implementation independant) command passed to the NIC driver. 116 *****************************************************************************************/ 204 /**************************************************************************************** 205 * This defines the (implementation independant) commands to access the NIC hardware. 206 * These commands are registered by the NIC_TX and NIC_RX server threads in the 207 * server thread descriptor, to be used by the NIC driver. 208 * The buffer is always a 2K bytes kernel buffer, containing an Ethernet packet. 209 ****************************************************************************************/ 117 210 118 211 typedef enum nic_cmd_e 119 212 { 120 NIC_CMD_WRITABLE = 0, /*! test TX queue not full (for a given length packet)*/121 NIC_CMD_WRITE = 1, /*! put one (given length) packet to TX queue*/122 NIC_CMD_READABLE = 2, /*! test RX queue not empty (for any length packet)*/123 NIC_CMD_READ = 3, /*! get one (any length) packet from RX queue*/213 NIC_CMD_WRITABLE = 10, /*! test TX queue not full (for a given packet length) */ 214 NIC_CMD_WRITE = 11, /*! put one (given length) packet to TX queue */ 215 NIC_CMD_READABLE = 12, /*! test RX queue not empty (for any packet length) */ 216 NIC_CMD_READ = 13, /*! get one (any length) packet from RX queue */ 124 217 } 125 218 nic_cmd_t; … … 127 220 typedef struct nic_command_s 128 221 { 129 xptr_t dev_xp; /*! extended pointer on device descriptor*/130 nic_cmd_t cmd; /*! requested operation type*/131 char * buffer; /*! local pointer on 2 Kbytes buffer containing packet*/132 uint32_t length; /*! actual number of bytes*/133 bool_t status; /*! return true if writable or readable (depend on command)*/134 uint32_t error; /*! return an error from the hardware (0 if no error)*/222 xptr_t dev_xp; /*! extended pointer on NIC chdev descriptor */ 223 nic_cmd_t type; /*! command type */ 224 uint8_t * buffer; /*! local pointer on buffer (kernel or user space) */ 225 uint32_t length; /*! number of bytes in buffer */ 226 uint32_t status; /*! return value (depends on command type) */ 227 uint32_t error; /*! return an error from the hardware (0 if no error) */ 135 228 } 136 229 nic_command_t; 230 231 /***************************************************************************************** 232 * This structure defines a socket descriptor. In order to parallelize the transfers, 233 * the set of all registered sockets is split in several subsets. 234 * The number of subsets is the number of NIC channels. 235 * The distribution key is computed from the (remote_addr/remote_port) couple. 236 * This computation is done by the NIC hardware for RX packets, 237 * and by the dev_nic_connect() function for the TX packets. 238 * 239 * A socket is attached to the NIC_TX[channel] & NIC_RX[channel] chdevs. 240 * Each socket descriptor allows the TX and TX server threads to access various buffers: 241 * - the user "send" buffer contains the data to be send by the TX server thread. 242 * - the kernel "receive" buffer contains the data received by the RX server thread. 243 * - the kernel "r2t" buffer allows the RX server thread to make direct requests 244 * to the associated TX server (to implement the TCP 3 steps handshake). 245 * 246 * The synchronisation mechanism between the clients threads and the servers threads 247 * is different for TX and RX transfers: 248 * 249 * 1) For a TX transfer, it can exist only one client thread for a given socket, 250 * the transfer is always initiated by the local process, and all TX commands 251 * (CONNECT/SEND/CLOSE) are blocking for the client thread. The user buffer is 252 * used by TCP to handle retransmissions when required.in case of re 253 * The client thread registers the command in the thread descriptor, registers itself 254 * in the socket descriptor, unblocks the TX server thread from the BLOCKED_CLIENT 255 * condition, blocks itself on the BLOCKED_IO condition, and deschedules. 256 * When the command is completed, the TX server thread unblocks the client thread. 257 * The TX server blocks itself on the BLOCKED_CLIENT condition, when there is no 258 * pending commands and the R2T queue is empty. It is unblocked when a client 259 * register a new command, or when the TX server thread register a mew request 260 * in the R2T queue. 261 * The tx_valid flip-flop is SET by the client thread to signal a valid command. 262 * It is RESET by the server thread when the command is completed: For a SEND, 263 * all bytes have been sent (UDP) or acknowledged (TCP). 264 * 265 * 2) For an RX transfer, it can exist only one client thread for a given socket, 266 * but the transfer is initiated by the remote process, and the RECV command 267 * is not really blocking: the data can arrive before the local RECV command is 268 * executed, and the server thread does not wait to receive all requested data 269 * to deliver data to client thread. Therefore each socket contains a receive 270 * buffer (rx_buf) handled as a single-writer/single-reader fifo. 271 * The client thread consumes data from the rx_buf when possible. It blocks on the 272 * BLOCKED_IO condition and deschedules when the rx_buf is empty. 273 * It is unblocked by the RX server thread when new data is available in the rx_buf. 274 * The RX server blocks itself on the BLOCKED_ISR condition When the NIC_RX packets 275 * queue is empty. It is unblocked by the hardware when new packets are available. 276 * 277 * Note : the socket domains and types are defined in the "shared_socket.h" file. 278 ****************************************************************************************/ 279 280 /****************************************************************************************** 281 * This function returns a printable string for a given NIC command <type>. 282 ****************************************************************************************** 283 * @ type : NIC command type 284 *****************************************************************************************/ 285 char * nic_cmd_str( uint32_t type ); 286 287 /****************************************************************************************** 288 * This function returns a printable string for a given socket <state>. 289 ****************************************************************************************** 290 * @ state : socket state 291 *****************************************************************************************/ 292 char * socket_state_str( uint32_t state ); 137 293 138 294 /****************************************************************************************** … … 143 299 * device and the specific data structures when required. 144 300 * It creates the associated server thread and allocates a WTI from local ICU. 301 * For a TX_NIC chedv, it allocates and initializes the R2T waiting queue used by the 302 * NIC_RX[channel] server to send direct requests to the NIC_TX[channel] server. 145 303 * It must de executed by a local thread. 146 304 ****************************************************************************************** … … 149 307 void dev_nic_init( struct chdev_s * chdev ); 150 308 151 /****************************************************************************************** 152 * This blocking function must be called by the kernel thread running in the cluster 153 * containing the NIC_RX channel device descriptor. 154 * It read one packet (Ethernet/IPV4) from the NIC_RX queue associated to the NIC channel. 155 * It calls directly the NIC driver, without registering in a waiting queue, because 156 * only this NIC_RX thread can access this packets queue. 157 * 1) It test the packets queue status, using the NIC_CMD_WRITABLE command. 158 * If it is empty, it unmask the NIC-RX channel IRQ, blocks and deschedule. 159 * It is re-activated by the NIC-RX ISR (generated by the NIC) as soon as the queue 160 * becomes not empty. 161 * 2) if the queue is not empty, it get one packet, using the driver NIC_CMD_READ command. 162 * Both commands are successively registered in the NIC-RX server thread descriptor 163 * to be passed to the driver. 164 * 165 * WARNING : for a RX packet the initiator is the NIC hardware, and the protocols 166 * stack is traversed upward, from the point of view of function calls. 167 ****************************************************************************************** 168 * @ pkd : pointer on packet descriptor (expected). 169 * @ returns 0 if success / returns non zero if ENOMEM, or error reported from NIC. 170 *****************************************************************************************/ 171 error_t dev_nic_read( pkd_t * pkd ); 172 173 /****************************************************************************************** 174 * This blocking function must be called by the kernel thread running in the cluster 175 * containing the NIC_TX channel device descriptor. 176 * It writes one packet (Ethernet/IPV4) to the NIC_RX queue associated to the NIC channel. 177 * It calls directly the NIC driver, without registering in a waiting queue, because 178 * only this NIC_TX thread can access this packets queue. 179 * 1) It test the packets queue status, using the NIC_CMD_READABLE command. 180 * If it is full, it unmask the NIC-TX channel IRQ, blocks and deschedule. 181 * It is re-activated by the NIC-TX ISR (generated by the NIC) as soon as the queue 182 * is not full. 183 * 2) If the queue is not empty, it put one packet, using the driver NIC_CMD_WRITE command. 184 * Both commands are successively registered in the NIC-TX server thread descriptor 185 * to be passed to the driver. 186 * 187 * WARNING : for a TX packet the initiator is the "client" thread, and the protocols 188 * stack is traversed downward from the point of view of function calls. 189 ****************************************************************************************** 190 * @ pkd : pointer on packet descriptor (to be filed). 191 * @ returns 0 if success / returns if length > 2K, undefined key, or error from NIC. 192 *****************************************************************************************/ 193 error_t dev_nic_write( pkd_t * pkd ); 194 195 196 /****************************************************************************************** 197 * This function is executed by the server thread associated to a NIC channel device 198 * descriptor (RX or TX). This thread is created by the dev_nic_init() function. 199 * It executes an infinite loop, handling one packet per iteration. 200 * 201 * -- For a TX channel -- 202 * 1) It allocates a 2 Kbytes buffer. 203 * 2) It copies the client TCP/UDP packet in this buffer. 204 * 3) It calls the IP layer to add the IP header. 205 * 4) It calls the ETH layer to add the ETH header. 206 * 5) It calls the dev_nic_write() blocking function to move the packet to the TX queue. 207 * 6) It releases the 2 Kbytes buffer. 208 * 209 * When the waiting threads queue is empty, it blocks on the THREAD_BLOCKED_IO_CMD 210 * condition and deschedule. It is re-activated by a client thread registering a command. 211 * 212 * -- For a RX channel -- 213 * 1) It allocates a 2 Kbytes buffer. 214 * 2 It calls the dev_nic_read() blocking function to move the ETH packet to this buffer. 215 * 3) It calls the ETH layer to analyse the ETH header. 216 * 4) It calls the IP layer to analyse the IP header. TODO ??? 217 * 5) It calls the transport (TCP/UDP) layer. TODO ??? 218 * 5) It deliver the packet to the client thread. TODO ??? 219 * 6) It releases the 2 Kbytes buffer. 220 * 221 * When the RX packets queue is empty, it blocks on the THREAD_BLOCKED_IO_CMD 222 * condition and deschedule. It is re-activated by the NIC driver when this queue 223 * becomes non empty. 224 ****************************************************************************************** 225 * @ dev : local pointer on NIC chdev descriptor. 226 *****************************************************************************************/ 227 void dev_nic_server( struct chdev_s * chdev ); 228 309 310 /* functions implementing the socket API */ 311 312 /**************************************************************************************** 313 * This function implements the socket() syscall. 314 * This function allocates and intializes in the calling thread cluster: 315 * - a new socket descriptor, defined by the <domain> and <type> arguments, 316 * - a new file descriptor, associated to this socket, 317 * It registers the file descriptor in the reference process fd_array[], set 318 * the socket state to IDLE, and returns the <fdid> value. 319 **************************************************************************************** 320 * @ domain : [in] socket protocol family (AF_UNIX / AF_INET) 321 * @ type : [in] socket type (SOCK_DGRAM / SOCK_STREAM). 322 * @ return a file descriptor <fdid> if success / return -1 if failure. 323 ***************************************************************************************/ 324 int dev_nic_socket( uint32_t domain, 325 uint32_t type ); 326 327 /**************************************************************************************** 328 * This function implements the bind() syscall. 329 * It initializes the "local_addr" and "local_port" fields in the socket 330 * descriptor identified by the <fdid> argument and set the socket state to BOUND. 331 * It can be called by a thread running in any cluster. 332 **************************************************************************************** 333 * @ fdid : [in] file descriptor identifying the socket. 334 * @ addr : [in] local IP address. 335 * @ port : [in] local port. 336 * @ return 0 if success / return -1 if failure. 337 ***************************************************************************************/ 338 int dev_nic_bind( uint32_t fdid, 339 uint32_t addr, 340 uint16_t port ); 341 342 /**************************************************************************************** 343 * This function implements the listen() syscall(). 344 * It is called by a (local) server process to specify the max size of the queue 345 * registering the (remote) client process connections, and set the socket identified 346 * by the <fdid> argument to LISTEN state. It applies only to sockets of type TCP. 347 * It can be called by a thread running in any cluster. 348 * TODO handle the <max_pending> argument... 349 **************************************************************************************** 350 * @ fdid : [in] file descriptor identifying the local server socket. 351 * @ max_pending : [in] max number of accepted remote client connections. 352 ***************************************************************************************/ 353 int dev_nic_listen( uint32_t fdid, 354 uint32_t max_pending ); 355 356 /**************************************************************************************** 357 * This function implements the connect() syscall. 358 * It is used by a (local) client process to connect a local socket identified by 359 * the <fdid> argument, to a remote socket identified by the <remote_addr> and 360 * <remote_port> arguments. It can be used for both UDP and TCP sockets. 361 * It computes the nic_channel index from <remote_addr> and <remote_port> values, 362 * and initializes "remote_addr","remote_port", "nic_channel" in local socket. 363 * It registers the socket in the two lists of clients rooted in the NIC_RX[channel] 364 * and NIC_TX[channel] chdevs. It can be called by a thread running in any cluster. 365 * WARNING : the clients are the socket descriptors, and NOT the threads descriptors. 366 **************************************************************************************** 367 * Implementation Note: 368 * - For a TCP socket, it updates the "remote_addr", "remote_port", "nic_channel" fields 369 * in the socket descriptor defined by the <fdid> argument, and register this socket, 370 * in the lists of sockets attached to the NIC_TX and NIC_RX chdevs. 371 * Then, it registers a CONNECT command in the "nic_cmd" field ot the client thread 372 * descriptor to request the NIC_TX server thread to execute the 3 steps handshake, 373 * and updates the "tx_client" field in the socket descriptor. It unblocks the NIC_TX 374 * server thread, blocks on the THREAD_BLOCKED_IO condition and deschedules. 375 * - For an UDP socket, it simply updates "remote_addr", "remote_port", "nic_channel" 376 * in the socket descriptor defined by the <fdid> argument, and register this socket, 377 * in the lists of sockets attached to the NIC_TX and NIC_RX chdevs. 378 * Then, it set the socket state to CONNECT, without unblocking the NIC_TX server 379 * thread, and without blocking itself. 380 * TODO : the nic_channel index computation must be done by a driver specific function. 381 **************************************************************************************** 382 * @ fdid : [in] file descriptor identifying the socket. 383 * @ remote_addr : [in] remote IP address. 384 * @ remote_port : [in] remote port. 385 * @ return 0 if success / return -1 if failure. 386 ***************************************************************************************/ 387 int dev_nic_connect( uint32_t fdid, 388 uint32_t remote_addr, 389 uint16_t remote_port ); 390 391 /**************************************************************************************** 392 * This function implements the accept() syscall(). 393 * It is executed by a server process, waiting for one (or several) client process(es) 394 * requesting a connection on a socket identified by the <fdid> argument. 395 * This socket was previouly created with socket(), bound to a local address with bind(), 396 * and is listening for connections after a listen(). 397 * This function extracts the first connection request on the CRQQ queue of pending 398 * requests, creates a new socket with the same properties as the existing socket, 399 * and allocates a new file descriptor for this new socket. 400 * If no pending connections are present on the queue, it blocks the caller until a 401 * connection is present. 402 * The new socket cannot accept more connections, but the original socket remains open. 403 * It returns the new socket <fdid>, and register in the <address> an <port> arguments 404 * the remote client IP address & port. It applies only to sockets of type SOCK_STREAM. 405 **************************************************************************************** 406 * @ fdid : [in] file descriptor identifying the listening socket. 407 * @ address : [out] server IP address. 408 * @ port : [out] server port address length in bytes. 409 * @ return the new socket <fdid> if success / return -1 if failure 410 ***************************************************************************************/ 411 int dev_nic_accept( uint32_t fdid, 412 uint32_t * address, 413 uint16_t * port ); 414 415 /**************************************************************************************** 416 * This blocking function implements the send() syscall. 417 * It is used to send data stored in the user buffer, identified the <u_buf> and <length> 418 * arguments, to a connected (TCP or UDP) socket, identified by the <fdid> argument. 419 * The work is actually done by the NIC_TX server thread, and the synchronisation 420 * between the client and the server threads uses the "rx_valid" set/reset flip-flop: 421 * The client thread registers itself in the socket descriptor, registers in the queue 422 * rooted in the NIC_TX[index] chdev, set "rx_valid", unblocks the server thread, and 423 * finally blocks on THREAD_BLOCKED_IO, and deschedules. 424 * When the TX server thread completes the command (all data has been sent for an UDP 425 * socket, or acknowledeged for a TCP socket), the server thread reset "rx_valid" and 426 * unblocks the client thread. 427 * This function can be called by a thread running in any cluster. 428 * WARNING : This implementation does not support several concurent SEND/SENDTO commands 429 * on the same socket, as only one TX thread can register in a given socket. 430 **************************************************************************************** 431 * @ fdid : [in] file descriptor identifying the socket. 432 * @ u_buf : [in] pointer on buffer containing packet in user space. 433 * @ length : [in] packet size in bytes. 434 * @ return number of sent bytes if success / return -1 if failure. 435 ***************************************************************************************/ 436 int dev_nic_send( uint32_t fdid, 437 uint8_t * u_buf, 438 uint32_t length ); 439 440 /**************************************************************************************** 441 * This blocking function implements the sendto() syscall. 442 * It registers the <remote_addr> and <remote_port> arguments in the local socket 443 * descriptor, and does the same thing as the dev_nic_send() function above, 444 * but can be called on an unconnected UDP socket. 445 **************************************************************************************** 446 * @ fdid : [in] file descriptor identifying the socket. 447 * @ u_buf : [in] pointer on buffer containing packet in user space. 448 * @ length : [in] packet size in bytes. 449 * @ remote_addr : [in] destination IP address. 450 * @ remote_port : [in] destination port. 451 * @ return number of sent bytes if success / return -1 if failure. 452 ***************************************************************************************/ 453 int dev_nic_sendto( uint32_t fdid, 454 uint8_t * u_buf, 455 uint32_t length, 456 uint32_t remote_addr, 457 uint32_t remote_port ); 458 459 /**************************************************************************************** 460 * This blocking function implements the recv() syscall. 461 * It is used to receive data that has been stored by the NIC_RX server thread in the 462 * rx_buf of a connected (TCP or UDP) socket, identified by the <fdid> argument. 463 * The synchronisation between the client and the server threads uses the "rx_valid" 464 * set/reset flip-flop: If "rx_valid" is set, the client simply moves the available 465 * data from the "rx_buf" to the user buffer identified by the <u_buf> and <length> 466 * arguments, and reset the "rx_valid" flip_flop. If "rx_valid" is not set, the client 467 * thread register itself in the socket descriptor, registers in the clients queue rooted 468 * in the NIC_RX[index] chdev, and finally blocks on THREAD_BLOCKED_IO, and deschedules. 469 * The client thread is re-activated by the RX server, that set the "rx_valid" flip-flop 470 * as soon as data is available in the "rcv_buf" (can be less than the user buffer size). 471 * This function can be called by a thread running in any cluster. 472 * WARNING : This implementation does not support several concurent RECV/RECVFROM 473 * commands on the same socket, as only one RX thread can register in a given socket. 474 **************************************************************************************** 475 * @ fdid : [in] file descriptor identifying the socket. 476 * @ u_buf : [in] pointer on buffer in user space. 477 * @ length : [in] buffer size in bytes. 478 * @ return number of received bytes if success / return -1 if failure. 479 ***************************************************************************************/ 480 int dev_nic_recv( uint32_t fdid, 481 uint8_t * u_buf, 482 uint32_t length ); 483 484 /**************************************************************************************** 485 * This blocking function implements the recvfrom() syscall. 486 * It registers the <remote_addr> and <remote_port> arguments in the local socket 487 * descriptor, and does the same thing as the dev_nic_recv() function above, 488 * but can be called on an unconnected UDP socket. 489 **************************************************************************************** 490 * @ fdid : [in] file descriptor identifying the socket. 491 * @ u_buf : [in] pointer on buffer containing packet in user space. 492 * @ length : [in] packet size in bytes. 493 * @ remote_addr : [in] destination IP address. 494 * @ remote_port : [in] destination port. 495 * @ return number of received bytes if success / return -1 if failure. 496 ***************************************************************************************/ 497 int dev_nic_recvfrom( uint32_t fdid, 498 uint8_t * u_buf, 499 uint32_t length, 500 uint32_t remote_addr, 501 uint32_t remote_port ); 502 503 504 /* Instrumentation functions */ 505 506 507 /****************************************************************************************** 508 * This instrumentation function displays on the TXT0 kernel terminal the content 509 * of the instrumentation registers contained in the NIC device. 510 *****************************************************************************************/ 511 void dev_nic_print_stats( void ); 512 513 /****************************************************************************************** 514 * This instrumentation function reset all instrumentation registers contained 515 * in the NIC device. 516 *****************************************************************************************/ 517 void dev_nic_clear_stats( void ); 518 519 520 /* Functions executed by the TX and RX server threads */ 521 522 /****************************************************************************************** 523 * This function is executed by the server thread associated to a NIC_TX[channel] chdev. 524 * This TX server thread is created by the dev_nic_init() function. 525 * It build and send UDP packets or TCP segments for all clients threads registered in 526 * the NIC_TX[channel] chdev. The command types are (CONNECT / SEND / CLOSE), and the 527 * priority between clients is round-robin. It takes into account the request registered 528 * by the RX server thread in the R2T queue associated to the involved socket. 529 * When a command is completed, it unblocks the client thread. For a SEND command, the 530 * last byte must have been sent for an UDP socket, and it must have been acknowledged 531 * for a TCP socket. 532 * When the TX client threads queue is empty, it blocks on THREAD_BLOCKED_CLIENT 533 * condition and deschedules. It is re-activated by a client thread registering a command. 534 ****************************************************************************************** 535 * Implementation note: 536 * It execute an infinite loop in which it takes the lock protecting the clients list 537 * to build a "kleenex" list of currently registered clients. 538 * For each client registered in this "kleenex" list, it takes the lock protecting the 539 * socket state, build one packet/segment in a local 2K bytes kernel buffer, calls the 540 * transport layer to add the UDP/TCP header, calls the IP layer to add the IP header, 541 * calls the ETH layer to add the ETH header, and moves the packet to the NIC_TX_QUEUE. 542 * Finally, it updates the socket state, and release the socket lock. 543 ****************************************************************************************** 544 * @ chdev : [in] local pointer on one local NIC_TX[channel] chdev descriptor. 545 *****************************************************************************************/ 546 void dev_nic_tx_server( struct chdev_s * chdev ); 547 548 549 /****************************************************************************************** 550 * This function is executed by the server thread associated to a NIC_RX[channel] chdev. 551 * This RX server thread is created by the dev_nic_init() function. 552 * It handles all UDP packets or TCP segments received by the sockets attached to 553 * the NIC_RX[channel] chdev. It writes the received data in the socket rcv_buf, and 554 * unblocks the client thread waiting on a RECV command. 555 * To implement the three steps handshahke required by a TCP connection, it posts direct 556 * requests to the TX server, using the R2T queue attached to the involved socket. 557 * It blocks on the THREAD_BLOCKED_ISR condition and deschedules when the NIC_RX_QUEUE 558 * is empty. It is re-activated by the NIC_RX_ISR, when the queue becomes non empty. 559 ****************************************************************************************** 560 * Implementation note: 561 * It executes an infinite loop in which it extracts one packet from the NIC_RX_QUEUE 562 * of received packets, copies this packet in a local 2 kbytes kernel buffer, checks 563 * the Ethernet header, checks the IP header, calls the relevant (TCP or UDP) transport 564 * protocol that search a matching socket for the received packet. It copies the payload 565 * to the relevant socket rcv_buf when the packet is acceptable, and unblocks the client 566 * thread. It discard the packet if no socket found. 567 ****************************************************************************************** 568 * @ chdev : [in] local pointer on one local NIC_RX[channel] chdev descriptor. 569 *****************************************************************************************/ 570 void dev_nic_rx_server( struct chdev_s * chdev ); 229 571 230 572 #endif /* _DEV_NIC_H */ -
trunk/kernel/devices/dev_txt.c
r647 r657 133 133 } // end dev_txt_init() 134 134 135 //////////////////////////////////////////////////////////////////////////////////136 // This static function is called by dev_txt_read(), dev_txt_write() functions.137 ////////////////////////////////////i/////////////////////////////////////////////138 static error_t dev_txt_access( uint32_t type,139 uint32_t channel,140 char * buffer,141 uint32_t count )142 {143 xptr_t dev_xp;144 thread_t * this = CURRENT_THREAD;145 146 // check channel argument147 assert( (channel < CONFIG_MAX_TXT_CHANNELS) , "illegal channel index" );148 149 // get extended pointer on remote TXT chdev descriptor150 if( type == TXT_WRITE ) dev_xp = chdev_dir.txt_tx[channel];151 else dev_xp = chdev_dir.txt_rx[channel];152 153 assert( (dev_xp != XPTR_NULL) , "undefined TXT chdev descriptor" );154 155 // register command in calling thread descriptor156 this->txt_cmd.dev_xp = dev_xp;157 this->txt_cmd.type = type;158 this->txt_cmd.buf_xp = XPTR( local_cxy , buffer );159 this->txt_cmd.count = count;160 161 // register client thread in waiting queue, activate server thread162 // block client thread on THREAD_BLOCKED_IO and deschedule.163 // it is re-activated by the ISR signaling IO operation completion.164 chdev_register_command( dev_xp );165 166 // return I/O operation status from calling thread descriptor167 return this->txt_cmd.error;168 169 } // end dev_txt_access()170 171 135 ///////////////////////////////////////// 172 136 error_t dev_txt_write( uint32_t channel, … … 180 144 #endif 181 145 146 thread_t * this = CURRENT_THREAD; 147 182 148 #if DEBUG_DEV_TXT_TX 183 thread_t * this = CURRENT_THREAD;184 149 uint32_t cycle = (uint32_t)hal_get_cycles(); 185 150 if( DEBUG_DEV_TXT_TX < cycle ) … … 188 153 #endif 189 154 155 // check channel argument 156 assert( (channel < CONFIG_MAX_TXT_CHANNELS) , "illegal channel index" ); 157 158 // get pointers on chdev 159 xptr_t dev_xp = chdev_dir.txt_tx[channel]; 160 cxy_t dev_cxy = GET_CXY( dev_xp ); 161 chdev_t * dev_ptr = GET_PTR( dev_xp ); 162 163 // check dev_xp 164 assert( (dev_xp != XPTR_NULL) , "undefined TXT chdev descriptor" ); 165 190 166 // If we use MTTY (vci_multi_tty), we do a synchronous write on TXT[0] 191 167 // If we use TTY (vci_tty_tsar), we do a standard asynchronous write 192 168 // TODO this is not very clean ... [AG] 193 169 194 // get pointers on chdev 195 xptr_t dev_xp = chdev_dir.txt_tx[0]; 196 cxy_t dev_cxy = GET_CXY( dev_xp ); 197 chdev_t * dev_ptr = GET_PTR( dev_xp ); 198 199 if( dev_ptr->impl == IMPL_TXT_MTY ) 170 if( dev_ptr->impl == IMPL_TXT_MTY ) 200 171 { 201 172 // get driver command function … … 216 187 else 217 188 { 218 // register command in chdev queue for an asynchronous access 219 error = dev_txt_access( TXT_WRITE , channel , buffer , count ); 189 // register command in calling thread descriptor 190 this->txt_cmd.dev_xp = dev_xp; 191 this->txt_cmd.type = TXT_WRITE; 192 this->txt_cmd.buf_xp = XPTR( local_cxy , buffer ); 193 this->txt_cmd.count = count; 194 195 // register client thread in waiting queue, activate server thread 196 // block client thread on THREAD_BLOCKED_IO and deschedule. 197 // it is re-activated by the ISR signaling IO operation completion. 198 chdev_register_command( dev_xp ); 199 200 // get I/O operation status from calling thread descriptor 201 error = this->txt_cmd.error; 220 202 221 203 if( error ) … … 251 233 #endif 252 234 235 thread_t * this = CURRENT_THREAD; 236 253 237 #if DEBUG_DEV_TXT_RX 254 thread_t * this = CURRENT_THREAD;255 238 uint32_t cycle = (uint32_t)hal_get_cycles(); 256 239 if( DEBUG_DEV_TXT_RX < cycle ) … … 259 242 #endif 260 243 261 // register command in chdev queue for an asynchronous access 262 error = dev_txt_access( TXT_READ , channel , buffer , 1 ); 244 // check channel argument 245 assert( (channel < CONFIG_MAX_TXT_CHANNELS) , "illegal channel index" ); 246 247 // get pointers on chdev 248 xptr_t dev_xp = chdev_dir.txt_rx[channel]; 249 250 // check dev_xp 251 assert( (dev_xp != XPTR_NULL) , "undefined TXT chdev descriptor" ); 252 253 // register command in calling thread descriptor 254 this->txt_cmd.dev_xp = dev_xp; 255 this->txt_cmd.type = TXT_READ; 256 this->txt_cmd.buf_xp = XPTR( local_cxy , buffer ); 257 this->txt_cmd.count = 1; 258 259 // register client thread in waiting queue, activate server thread 260 // block client thread on THREAD_BLOCKED_IO and deschedule. 261 // it is re-activated by the ISR signaling IO operation completion. 262 chdev_register_command( dev_xp ); 263 264 // get I/O operation status from calling thread descriptor 265 error = this->txt_cmd.error; 263 266 264 267 if( error ) 265 268 { 266 printk("\n[ERROR] in %s : cannot get character/ cycle %d\n",267 __FUNCTION__, (uint32_t)hal_get_cycles() );269 printk("\n[ERROR] in %s : cannot write string %s / cycle %d\n", 270 __FUNCTION__, buffer, (uint32_t)hal_get_cycles() ); 268 271 } 269 272 -
trunk/kernel/fs/devfs.c
r637 r657 3 3 * 4 4 * Author Mohamed Lamine Karaoui (2014,2015) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) Sorbonne Universites … … 53 53 #endif 54 54 55 /////////////////////////////////// //56 devfs_ctx_t * devfs_ctx_alloc( void)55 /////////////////////////////////// 56 xptr_t devfs_ctx_alloc( cxy_t cxy ) 57 57 { 58 58 kmem_req_t req; … … 62 62 req.flags = AF_KERNEL | AF_ZERO; 63 63 64 return kmem_alloc( &req ); 64 // allocates devfs context from target cluster 65 return XPTR( cxy , kmem_remote_alloc( cxy , &req ) ); 65 66 } 66 67 67 68 ///////////////////////////////////////////// 68 void devfs_ctx_init( devfs_ctx_t * devfs_ctx,69 xptr_t 70 xptr_t 69 void devfs_ctx_init( xptr_t devfs_ctx_xp, 70 xptr_t devfs_dev_inode_xp, 71 xptr_t devfs_external_inode_xp ) 71 72 { 72 devfs_ctx->dev_inode_xp = devfs_dev_inode_xp; 73 devfs_ctx->external_inode_xp = devfs_external_inode_xp; 74 75 fs_context[FS_TYPE_DEVFS].extend = devfs_ctx; 73 // get cluster and local pointer on remote devfs context 74 devfs_ctx_t * devfs_ctx_ptr = GET_PTR( devfs_ctx_xp ); 75 cxy_t devfs_ctx_cxy = GET_CXY( devfs_ctx_xp ); 76 77 // set values in remote devfs context 78 hal_remote_s64( XPTR( devfs_ctx_cxy , &devfs_ctx_ptr->dev_inode_xp ), 79 devfs_dev_inode_xp ); 80 81 hal_remote_s64( XPTR( devfs_ctx_cxy , &devfs_ctx_ptr->external_inode_xp ), 82 devfs_external_inode_xp ); 83 84 // register devfs context in the remote fs_context array[] 85 hal_remote_spt( XPTR( devfs_ctx_cxy , &fs_context[FS_TYPE_DEVFS].extend ), 86 devfs_ctx_ptr ); 76 87 } 77 88 78 ////////////////////////////////////////////// ///79 void devfs_ctx_destroy( devfs_ctx_t * devfs_ctx)89 ////////////////////////////////////////////// 90 void devfs_ctx_destroy( xptr_t devfs_ctx_xp ) 80 91 { 81 92 kmem_req_t req; 82 93 94 // get cluster and local pointer on devfs context 95 devfs_ctx_t * devfs_ctx_ptr = GET_PTR( devfs_ctx_xp ); 96 cxy_t devfs_ctx_cxy = GET_CXY( devfs_ctx_xp ); 97 83 98 req.type = KMEM_KCM; 84 req.ptr = devfs_ctx; 85 kmem_free( &req ); 99 req.ptr = devfs_ctx_ptr; 100 101 // release devfs context descriptor to remote cluster 102 kmem_remote_free( devfs_ctx_cxy , &req ); 86 103 } 87 104 … … 95 112 vfs_inode_t * inode; 96 113 114 // get 115 97 116 // create DEVFS "dev" inode in cluster 0 98 117 error = vfs_add_child_in_parent( 0, // cxy … … 158 177 /////////////////////////////////////////////////// 159 178 void devfs_local_init( xptr_t devfs_dev_inode_xp, 160 xptr_t devfs_external_inode_xp, 161 xptr_t * devfs_internal_inode_xp ) 179 xptr_t devfs_external_inode_xp ) 162 180 { 181 xptr_t internal_inode_xp; // extended pointer on <internal> inode 182 vfs_inode_t * internal_inode_ptr; // local pointer on <internal> inode 163 183 char node_name[16]; 164 184 xptr_t chdev_xp; … … 168 188 vfs_inode_t * inode_ptr; 169 189 uint32_t channel; 170 xptr_t unused_xp; // required by add_child_in_parent()190 xptr_t unused_xp; // required by add_child_in_parent() 171 191 error_t error; 192 172 193 173 194 #if DEBUG_DEVFS_LOCAL_INIT … … 179 200 #endif 180 201 181 // create "internal"directory202 // create <internal> directory 182 203 snprintf( node_name , 16 , "internal_%x" , local_cxy ); 183 204 184 error = vfs_add_child_in_parent( local_cxy, 185 FS_TYPE_DEVFS, 186 devfs_dev_inode_xp, 187 node_name, 205 error = vfs_add_child_in_parent( local_cxy, // target cluster 206 FS_TYPE_DEVFS, // FS type 207 devfs_dev_inode_xp, // parent inode 208 node_name, // child name 188 209 &unused_xp, 189 devfs_internal_inode_xp );190 191 // set inode "type" field192 in ode_ptr = GET_PTR( *devfs_internal_inode_xp );193 in ode_ptr->type = INODE_TYPE_DEV;210 &internal_inode_xp ); // child inode 211 212 // set <internal> inode "type" field 213 internal_inode_ptr = GET_PTR( internal_inode_xp ); 214 internal_inode_ptr->type = INODE_TYPE_DEV; 194 215 195 216 // create dentries <.> and <..> in <internal> 196 error |= vfs_add_special_dentries( *devfs_internal_inode_xp,217 error |= vfs_add_special_dentries( internal_inode_xp, 197 218 devfs_dev_inode_xp ); 198 219 … … 226 247 error = vfs_add_child_in_parent( local_cxy, 227 248 FS_TYPE_DEVFS, 228 *devfs_internal_inode_xp,249 internal_inode_xp, 229 250 chdev_ptr->name, 230 251 &unused_xp, … … 270 291 error = vfs_add_child_in_parent( local_cxy, 271 292 FS_TYPE_DEVFS, 272 *devfs_internal_inode_xp,293 internal_inode_xp, 273 294 chdev_ptr->name, 274 295 &unused_xp, -
trunk/kernel/fs/devfs.h
r612 r657 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2014,2015) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) 2011,2012 UPMC Sorbonne Universites … … 26 26 #define _DEVFS_H_ 27 27 28 28 29 ////////////////////////////////////////////////////////////////////////////////////////// 29 // The DEVFS File System contains inodes and dentries associated to all chdev descriptors30 // availablesin the architecture.30 // The DEVFS File System contains the inodes and dentries associated to the chdev 31 // descriptors available in the architecture. 31 32 // 32 33 // It is structured as a three levels tree structure : … … 41 42 // 42 43 // The DEVFS extensions to the generic VFS are the following: 43 // 1) The vfs_ctx_t "extend" void* field is pointing on the devfs_ctx_t structure. 44 // This structure contains two extended pointers on the DEVFS "dev" directory inode, 45 // and on the "external" directory inode. 46 // 2) The vfs_inode_t "extend" void* field is pointing on the chdev descriptor. 44 // 1) The vfs_ctx_t "extend" field is pointing on the devfs_ctx_t structure. 45 // 2) The vfs_inode_t "extend" field is pointing on the chdev descriptor. 47 46 ////////////////////////////////////////////////////////////////////////////////////////// 48 47 … … 57 56 } 58 57 devfs_ctx_t; 59 60 61 58 /***************************************************************************************** 62 * This fuction allocates memory from local cluster for a DEVFS context descriptor. 59 * This fuction allocates memory for a DEVFS context descriptor in cluster identified 60 * by the <cxy> argument. 63 61 ***************************************************************************************** 64 * @ return a pointer on the created context / return NULL if failure. 62 * @ cxy : [in] target cluster identifier. 63 * @ return an extended pointer on the created context / return NULL if failure. 65 64 ****************************************************************************************/ 66 devfs_ctx_t * devfs_ctx_alloc( void);65 xptr_t devfs_ctx_alloc( cxy_t cxy ); 67 66 68 67 /***************************************************************************************** … … 70 69 * to the relevant VFS context in the local cluster. 71 70 ***************************************************************************************** 72 * @ devfs_ctx : localpointer on DEVFS context.71 * @ devfs_ctx : [in] extended pointer on DEVFS context. 73 72 * @ devfs_dev_inode_xp : [out] extended pointer on <dev> inode. 74 73 * @ devfs_external_inode_xp : [out] extended pointer on <external> inode. 75 74 ****************************************************************************************/ 76 void devfs_ctx_init( devfs_ctx_t * devfs_ctx,77 xptr_t 78 xptr_t 75 void devfs_ctx_init( xptr_t devfs_ctx_xp, 76 xptr_t devfs_dev_inode_xp, 77 xptr_t devfs_external_inode_xp ); 79 78 80 79 /***************************************************************************************** … … 83 82 * @ devfs_ctx : local pointer on DEVFS context. 84 83 ****************************************************************************************/ 85 void devfs_ctx_destroy( devfs_ctx_t * devfs_ctx);84 void devfs_ctx_destroy( xptr_t devfs_ctx_xp ); 86 85 87 86 /***************************************************************************************** 88 * This function start to create the DEVFS subtree. 89 * This function should be called once in the cluster containing the VFS parent inode. 90 * More precisely, it creates in cluster 0 the "dev" and "external" DEVFS directories. 91 * For each one, it creates the inode and link the associated dentry to parent inode. 92 * The DEVFS root inode is linked to the VFS parent inode identified by <parent_inode_xp>. 87 * This function starts to create the DEVFS subtree. 88 * This function creates in cluster 0 the "dev" and "external" DEVFS directories. 89 * For each one, it creates the inode and the associated dentry. The DEVFS root inode 90 * <dev is linked to the VFS parent inode identified by <parent_inode_xp>. 93 91 ***************************************************************************************** 94 * @ parent_inode_xp : extended pointer on the parent VFS inode.92 * @ parent_inode_xp : [in] extended pointer on the parent VFS inode. 95 93 * @ devfs_dev_inode_xp : [out] extended pointer on created <dev> inode. 96 94 * @ devfs_external_inode_xp : [out] extended pointer on created <external> inode. … … 105 103 * 1. In each cluster (i), it creates the "internal" directory, 106 104 * linked to the DEVFS "dev" parent directory. 107 * 2. In each cluster (i), it creates - for each external chdev in cluster (i) -105 * 2. In each cluster (i), it creates, for each external chdev in cluster (i), 108 106 * a pseudo-file, linked to the DEVFS "external" parent directory. 109 * 3. In each cluster (i), it creates - for each internal chdev in cluster (i) -107 * 3. In each cluster (i), it creates, for each internal chdev in cluster (i), 110 108 * a pseudo-file, linked to the DEVFS "internal" parent directory. 111 109 ***************************************************************************************** 112 * @ devfs_dev_inode_xp : extended pointer on DEVFS root inode. 113 * @ devfs_external_inode_xp : extended pointer on DEVFS external inode. 114 * @ devfs_internal_inode_xp : [out] extended pointer on created <internal> inode. 110 * @ devfs_dev_inode_xp : [in] extended pointer on DEVFS root inode. 111 * @ devfs_external_inode_xp : [in] extended pointer on DEVFS external inode. 115 112 ****************************************************************************************/ 116 113 void devfs_local_init( xptr_t devfs_dev_inode_xp, 117 xptr_t devfs_external_inode_xp, 118 xptr_t * devfs_internal_inode_xp ); 114 xptr_t devfs_external_inode_xp ); 119 115 120 116 /****************************************************************************************** -
trunk/kernel/fs/fatfs.c
r656 r657 2 2 * fatfs.c - FATFS file system API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 122 122 for( i = nbytes ; i > 0 ; i-- ) 123 123 { 124 res = (res<<8) | hal_remote_lb( buffer_xp +offset+i-1 );124 res = (res<<8) | hal_remote_lb( buffer_xp + offset + i-1 ); 125 125 } 126 126 } … … 129 129 for( i = 0 ; i < nbytes ; i++ ) 130 130 { 131 res = (res<<8) | hal_remote_lb( buffer_xp +offset+i );131 res = (res<<8) | hal_remote_lb( buffer_xp + offset + i ); 132 132 } 133 133 } … … 135 135 136 136 } // end fatfs_get_remote_record() 137 138 /* 137 139 138 140 ////////////////////////////////////////////////////////////////////////////////////////// … … 164 166 } // end fatfs_set_record() 165 167 168 */ 169 166 170 ////////////////////////////////////////////////////////////////////////////////////////// 167 171 // This function writes one, two, or four bytes from a 32 bits integer to a remote … … 185 189 for( i = nbytes ; i > 0 ; i-- ) 186 190 { 187 hal_remote_sb( (buffer_xp +offset+i-1) , (uint8_t)(value>>((i-1)<<3)) );191 hal_remote_sb( (buffer_xp + offset + i-1 ) , (uint8_t)(value>>((i-1)<<3)) ); 188 192 } 189 193 } … … 192 196 for( i = 0 ; i < nbytes ; i++ ) 193 197 { 194 hal_remote_sb( (buffer_xp +offset+i) , (uint8_t)(value>>((nbytes-1-i)<<3)) );198 hal_remote_sb( (buffer_xp + offset + i) , (uint8_t)(value>>((nbytes-1-i)<<3)) ); 195 199 } 196 200 } … … 470 474 cxy_t fat_cxy; // FAT cluster identifier 471 475 fatfs_ctx_t * fatfs_ctx; // local pointer on FATFS context in FAT cluster 472 xptr_t fat_mapper_xp; // extended pointer on FAT mapper473 476 mapper_t * fat_mapper_ptr; // local pointer on FAT mapper 474 477 uint32_t page_id; // current page index in FAT mapper … … 482 485 fatfs_ctx = GET_PTR( fatfs_ctx_xp ); 483 486 484 // get FAT mapper pointers from FATFS context 485 fat_mapper_xp = hal_remote_l64( XPTR( fat_cxy , &fatfs_ctx->fat_mapper_xp ) ); 486 fat_mapper_ptr = GET_PTR( fat_mapper_xp ); 487 488 // check FAT cluster 489 assert( (fat_cxy == GET_CXY( fat_mapper_xp )) , "unconsistent FAT cluster" ); 487 // get FAT mapper pointer from FATFS context 488 fat_mapper_ptr = hal_remote_lpt( XPTR( fat_cxy , &fatfs_ctx->fat_mapper ) ); 490 489 491 490 // build extended pointer on FAT mapper radix tree … … 575 574 576 575 // update the FS_INFO sector on IOC device 577 return dev_ioc_ move_data( IOC_SYNC_WRITE ,fs_info_buffer_xp , fs_info_lba , 1 );576 return dev_ioc_sync_write( fs_info_buffer_xp , fs_info_lba , 1 ); 578 577 579 578 } // end fatfs_update_ioc_fsinfo() … … 581 580 ////////////////////////////////////////////////////////////////////////////////////////// 582 581 // This static function decrements the "free_clusters" variable, and updates the 583 // "free_cluster_hint" variable in the FATFS context in FAT cluster, identified 584 // by the <fat_ctx_xp> argument, when a new <cluster> has been allocated from FAT. 582 // "free_cluster_hint" variable in the FATFS context in FAT cluster, when a new 583 // <cluster_id> has been allocated from the FAT. 584 // It synchronously updates the FS_INFO sector on the IOC device. 585 // The FATFS context in FAT cluster is identified by the <fat_ctx_xp> argument. 586 // It can be called by a thead running in any cluster. 585 587 // It scan all slots in the FAT mapper seen as an array of 32 bits words, looking for the 586 // first free slot larger than the <cluster> argument, to update "free_cluster_hint". 587 // It synchronously updates the FS_INFO sector on the IOC device. 588 // It can be called by a thead running in any cluster. 588 // first free slot larger than the <cluster_id>. 589 589 // The lock protecting exclusive access to the FAT must be taken by the calling function. 590 590 ////////////////////////////////////////////////////////////////////////////////////////// 591 591 // @ fatfs_ctx_xp : extended pointer on FATFS context in FAT cluster. 592 // @ cluster 592 // @ cluster_id : recently allocated cluster index in FAT. 593 593 // @ return 0 if success, return -1 if the FS_INFO sector cannot be updated. 594 594 ////////////////////////////////////////////////////////////////////////////////////////// 595 595 static error_t fatfs_free_clusters_decrement( xptr_t fatfs_ctx_xp, 596 uint32_t cluster )596 uint32_t cluster_id ) 597 597 { 598 598 error_t error; 599 cxy_t fat_cxy; // FAT cluster identifier600 fatfs_ctx_t * fat_ctx_ptr; // local pointer on fatfs context in FAT cluster601 xptr_t fat_mapper_xp; // extendedpointer on FAT mapper602 xptr_t hint_xp; // extended pointer on "free_cluster_hint" shared variable603 xptr_t numb_xp; // extended pointer on "free_clusters" shared variable604 uint32_t numb; // "free_clusters" variable current value605 uint32_t hint; // "free_cluster_hint" variable current value606 uint32_t page_id; // page index in FAT mapper607 uint32_t slot_id; // slot index in one page of FAT (1024 slots per page)608 uint32_t page_max; // max number of pagesin FAT mapper609 xptr_t page_xp; // extended pointer on current page in FAT mapper610 xptr_t base_xp; // extended pointer on current page base611 xptr_t slot_xp; // extended pointer on current slot in FAT mapper599 cxy_t fat_cxy; // FAT cluster identifier 600 fatfs_ctx_t * fat_ctx_ptr; // local pointer on fatfs context in FAT cluster 601 mapper_t * fat_mapper_ptr; // local pointer on FAT mapper 602 xptr_t fat_mapper_xp; // extended pointer on FAT mapper 603 xptr_t hint_xp; // extended pointer on "free_cluster_hint" shared variable 604 xptr_t numb_xp; // extended pointer on "free_clusters" shared variable 605 uint32_t page_id; // page index in FAT mapper 606 uint32_t slot_id; // slot index in one page of FAT (1024 slots per page) 607 uint32_t page_max; // max number of pages in FAT mapper 608 xptr_t page_xp; // extended pointer on current page in FAT mapper 609 xptr_t base_xp; // extended pointer on current page base 610 xptr_t slot_xp; // extended pointer on current slot in FAT mapper 611 uint32_t found; // free slot found when non zero 612 612 613 613 #if DEBUG_FATFS_FREE_CLUSTERS … … 615 615 thread_t * this = CURRENT_THREAD; 616 616 if( DEBUG_FATFS_FREE_CLUSTERS < cycle ) 617 printk("\n[%s] thread[%x,%x] enter for allocated cluster %x / cycle %d\n",618 __FUNCTION__, this->process->pid, this->trdid, cluster , cycle );617 printk("\n[%s] thread[%x,%x] enter for allocated cluster_id %x / cycle %d\n", 618 __FUNCTION__, this->process->pid, this->trdid, cluster_id , cycle ); 619 619 #endif 620 620 … … 627 627 numb_xp = XPTR( fat_cxy , &fat_ctx_ptr->free_clusters ); 628 628 629 // update "free_clusters" value 630 numb = hal_remote_l32( numb_xp ) - 1; 631 hal_remote_s32( numb_xp , numb ); 632 633 // get extended pointer on FAT mapper 634 fat_mapper_xp = hal_remote_l64( XPTR( fat_cxy , &fat_ctx_ptr->fat_mapper_xp ) ); 635 636 // initialise variables to scan the FAT mapper 637 // and find the first free slot > cluster 638 page_id = (cluster + 1) >> 10; 639 slot_id = (cluster + 1) & 0x3FF; 629 // get pointers on FAT mapper from FATFS context 630 fat_mapper_ptr = hal_remote_lpt( XPTR( fat_cxy , &fat_ctx_ptr->fat_mapper ) ); 631 fat_mapper_xp = XPTR( fat_cxy , fat_mapper_ptr ); 632 633 // initialise the loop variables to scan the FAT mapper 634 page_id = (cluster_id + 1) >> 10; 635 slot_id = (cluster_id + 1) & 0x3FF; 640 636 page_max = hal_remote_l32( XPTR( fat_cxy, &fat_ctx_ptr->fat_sectors_count ) ) >> 3; 641 642 // scan FAT mapper / loop on pages 643 while ( page_id < page_max ) 637 found = 0; 638 639 // scan FAT mapper : first loop on pages 640 while ( (page_id < page_max) && (found == 0) ) 644 641 { 645 642 // get current page from mapper 646 page_xp = mapper_ remote_get_page( fat_mapper_xp , page_id );643 page_xp = mapper_get_fat_page( fat_mapper_xp , page_id ); 647 644 648 645 if( page_xp == XPTR_NULL ) … … 655 652 base_xp = ppm_page2base( page_xp ); 656 653 657 // scan FAT mapper /loop on slots658 while ( slot_id < 1024)654 // scan the FAT mapper : second loop on slots 655 while ( (slot_id < 1024) && (found == 0) ) 659 656 { 660 657 // get extended pointer on current slot … … 664 661 if ( hal_remote_l32( slot_xp ) == FREE_CLUSTER ) 665 662 { 666 // update "free_cluster_hint" value 667 hint = (page_id << 10) + slot_id - 1; 668 hal_remote_s32( hint_xp , hint ); 669 670 // update FS_INFO sector on IOC device 671 error = fatfs_update_ioc_fat( fatfs_ctx_xp, 672 page_id, 673 page_id ); 674 675 if( error ) 676 { 677 printk("\n[ERROR] in %s : cannot update FS_INFO on IOC\n", __FUNCTION__ ); 678 return -1; 679 } 663 // exit both loops 664 found = 1; 665 } 666 else 667 { 668 // update slot_id if not found 669 slot_id++; 670 } 671 } // end loop on slots 672 673 // update page_id & slot_id variables if not found 674 if( found == 0 ) 675 { 676 page_id++; 677 slot_id = 0; 678 } 679 } // end loop on pages 680 681 if( found ) // free cluster found 682 { 683 // update "free_clusters" and "free_cluster_hint" value in FATFS context 684 hal_remote_atomic_add( numb_xp , -1 ); 685 hal_remote_s32( hint_xp , (page_id << 10) + slot_id - 1 ); 686 687 // update FS_INFO sector on IOC device 688 error = fatfs_update_ioc_fsinfo( fatfs_ctx_xp ); 689 690 if( error ) 691 { 692 printk("\n[ERROR] in %s : cannot update FS_INFO on IOC\n", __FUNCTION__ ); 693 return -1; 694 } 680 695 681 696 #if DEBUG_FATFS_FREE_CLUSTERS 682 cycle = (uint32_t)hal_get_cycles(); 683 if( DEBUG_FATFS_FREE_CLUSTERS < (uint32_t)hal_get_cycles() ) 684 printk("\n[%s] thread[%x,%x] exit / hint %x / free %x / cycle %d\n", 697 if( DEBUG_FATFS_FREE_CLUSTERS < cycle ) 698 printk("\n[%s] thread[%x,%x] exit / hint %x / free %x\n", 685 699 __FUNCTION__, this->process->pid, this->trdid, 686 hal_remote_l32(hint_xp), hal_remote_l32(numb_xp), cycle ); 687 #endif 688 return 0; 689 } 690 691 // update slot_id 692 slot_id = 0; 693 694 } // end loop on slots 695 696 // update page_id & slot_id variables 697 page_id++; 698 slot_id = 0; 699 700 } // end loop on pages 701 702 // return error if no free cluster found 703 printk("\n[ERROR] in %s : No free cluster found\n", __FUNCTION__ ); 704 return -1; 700 hal_remote_l32(hint_xp), hal_remote_l32(numb_xp) ); 701 #endif 702 return 0; 703 } 704 else // free cluster not found 705 { 706 printk("\n[ERROR] in %s : No free cluster_id found\n", __FUNCTION__ ); 707 return -1; 708 } 705 709 706 710 } // end fatfs_free_clusters_decrement() … … 709 713 // This static function increments the "free_clusters" variable, and updates the 710 714 // "free_cluster_hint" variables in the FATFS context in FAT cluster, identified 711 // by the <fat_ctx_xp> argument, when a FATFS cluster isreleased.715 // by the <fat_ctx_xp> argument, when a FATFS <cluster_id> has been released. 712 716 // If the released cluster index is smaller than the current (hint) value, 713 717 // it set "free_cluster_hint" <= cluster. 714 // It does NOT update the FS_INFO sector on the IOC device. 718 // It does NOT update the FS_INFO sector on the IOC device, as this is done by the 719 // calling fatfs_release_inode() function. 715 720 // It can be called by a thead running in any cluster. 716 721 // The lock protecting exclusive access to the FAT must be taken by the calling function. 717 722 ////////////////////////////////////////////////////////////////////////////////////////// 718 723 // @ fatfs_ctx_xp : extended pointer on FATFS context in FAT cluster. 719 // @ cluster 724 // @ cluster_id : recently released cluster index in FAT. 720 725 // @ return 0 if success, return -1 if the FS_INFO sector cannot be updated. 721 726 ////////////////////////////////////////////////////////////////////////////////////////// 722 727 static error_t fatfs_free_clusters_increment( xptr_t fatfs_ctx_xp, 723 uint32_t cluster ) 724 { 725 error_t error; 728 uint32_t cluster_id ) 729 { 726 730 cxy_t fat_cxy; // FAT cluster identifier 727 731 fatfs_ctx_t * fat_ctx_ptr; // local pointer on fatfs context in FAT cluster … … 735 739 thread_t * this = CURRENT_THREAD; 736 740 if( DEBUG_FATFS_FREE_CLUSTERS < cycle ) 737 printk("\n[%s] thread[%x,%x] enter for released cluster %x / cycle %d\n",738 __FUNCTION__, this->process->pid, this->trdid, cluster , cycle );741 printk("\n[%s] thread[%x,%x] enter for released cluster_id %x / cycle %d\n", 742 __FUNCTION__, this->process->pid, this->trdid, cluster_id , cycle ); 739 743 #endif 740 744 … … 753 757 // update "numb" and "hint" variables as required 754 758 numb++; 755 if ( (cluster - 1) < hint ) hint = cluster- 1;759 if ( (cluster_id - 1) < hint ) hint = cluster_id - 1; 756 760 757 761 // update free_clusters … … 759 763 hal_remote_s32( hint_xp , hint ); 760 764 761 // update FS_INFO sector on IOC device762 error = fatfs_update_ioc_fsinfo( fatfs_ctx_xp );763 764 if( error )765 {766 printk("\n[ERROR] in %s : cannot update FS_INFO on IOC\n", __FUNCTION__ );767 return -1;768 }769 770 765 #if DEBUG_FATFS_FREE_CLUSTERS 771 cycle = (uint32_t)hal_get_cycles(); 772 if( DEBUG_FATFS_FREE_CLUSTERS < (uint32_t)hal_get_cycles() ) 773 printk("\n[%s] thread[%x,%x] exit / hint %x / free %x / cycle %d\n", 766 if( DEBUG_FATFS_FREE_CLUSTERS < cycle ) 767 printk("\n[%s] thread[%x,%x] exit / hint %x / free %x\n", 774 768 __FUNCTION__, this->process->pid, this->trdid, 775 hal_remote_l32( hint_xp ), hal_remote_l32( numb_xp ) , cycle);769 hal_remote_l32( hint_xp ), hal_remote_l32( numb_xp ) ); 776 770 #endif 777 771 … … 853 847 854 848 ////////////////////////////////////////////////////////////////////////////////////////// 849 // This static function access the FAT mapper to allocate a new cluster in the FATFS, 850 // and returns in <searched_cluster_id> the FATFS cluster index of a free cluster. 851 // It updates the FAT mapper (handling miss from IOC device if required) : 852 // - if the <last_cluster_id> is zero, the new cluster is the first allocated cluster, 853 // and the <searched_cluster_id> FAT slot is set to END_OF_CHAIN_CLUSTER. 854 // - if the <last_cluster_id> argument is not zero, the new cluster is not the first, 855 // the <last_cluster_id> FAT slot is set to <searched_cluster_id>, 856 // the <searched_cluster_id> FAT slot is set to END_OF_CHAIN_CLUSTER. 857 // This function also updates the two "free_cluster_hint" and "free_clusters" variables 858 // stored in the FATFS context. It takes the rwlock stored in the FATFS context in the 859 // FAT cluster to get exclusive access to the FAT. 860 // This function synchronously updates the FAT region on IOC device. 861 // It can be called by a thread running in any cluster as it uses remote accesses. 862 ////////////////////////////////////////////////////////////////////////////////////////// 863 // @ last_cluster_id : [in] previous last cluster index. 864 // @ searched_cluster_id : [out] allocated cluster index. 865 // @ return 0 if success / return -1 if no more free clusters on IOC device. 866 ////////////////////////////////////////////////////////////////////////////////////////// 867 static error_t fatfs_cluster_alloc( uint32_t last_cluster_id, 868 uint32_t * searched_cluster_id ) 869 { 870 error_t error; 871 uint32_t free_clusters; // total number of free clusters 872 uint32_t hint; // hint + 1 is the first free cluster 873 uint32_t new_cluster_id; // allocated cluster index in FAT 874 uint32_t new_page_id; // allocated cluster page index in FAT mapper 875 uint32_t new_slot_id; // allocated cluster slot index in page (1024 slots per page) 876 xptr_t new_page_xp; // extended pointer on FAT page for allocated cluster 877 xptr_t new_slot_xp; // extended pointer on allocated cluster slot in FAT 878 uint32_t last_page_id; // last cluster page index in FAT mapper 879 uint32_t last_slot_id; // last cluster slot index in page (1024 slots per page) 880 xptr_t last_slot_xp; // extended pointer on last cluster slot in FAT 881 xptr_t last_page_xp; // extended pointer on FAT page for last cluster 882 vfs_ctx_t * vfs_ctx; // local pointer on VFS context (same in all clusters) 883 fatfs_ctx_t * loc_fatfs_ctx; // local pointer on local FATFS context 884 fatfs_ctx_t * fat_fatfs_ctx; // local pointer on FATFS context in FAT cluster 885 mapper_t * fat_mapper_ptr; // local pointer on FAT mapper 886 xptr_t fat_mapper_xp; // extended pointer on FAT mapper 887 cxy_t fat_cxy; // FAT mapper cluster identifier 888 xptr_t lock_xp; // extended pointer on lock protecting free clusters info 889 xptr_t hint_xp; // extended pointer on free_cluster_hint in FAT cluster 890 xptr_t free_xp; // extended pointer on free_clusters_number in FAT cluster 891 892 #if DEBUG_FATFS_CLUSTER_ALLOC 893 uint32_t cycle = (uint32_t)hal_get_cycles(); 894 thread_t * this = CURRENT_THREAD; 895 if( DEBUG_FATFS_CLUSTER_ALLOC < cycle ) 896 printk("\n[%s] thread[%x,%x] enter / lats_cluster_id %x / cycle = %d\n", 897 __FUNCTION__, this->process->pid, this->trdid, last_cluster_id, cycle ); 898 #endif 899 900 // get local pointer on VFS context (same in all clusters) 901 vfs_ctx = &fs_context[FS_TYPE_FATFS]; 902 903 // get local pointer on local FATFS context 904 loc_fatfs_ctx = vfs_ctx->extend; 905 906 // get FAT cluster 907 fat_cxy = CONFIG_VFS_ROOT_CXY; 908 909 // get pointers on FAT mapper 910 fat_mapper_ptr = loc_fatfs_ctx->fat_mapper; 911 fat_mapper_xp = XPTR( fat_cxy , fat_mapper_ptr ); 912 913 // get local pointer on FATFS context in FAT cluster 914 fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) ); 915 916 // build relevant extended pointers on free clusters info in FAT cluster 917 lock_xp = XPTR( fat_cxy , &fat_fatfs_ctx->lock ); 918 hint_xp = XPTR( fat_cxy , &fat_fatfs_ctx->free_cluster_hint ); 919 free_xp = XPTR( fat_cxy , &fat_fatfs_ctx->free_clusters ); 920 921 // take the FAT lock in write mode 922 remote_rwlock_wr_acquire( lock_xp ); 923 924 // get hint and free_clusters values from FATFS context in FAT cluster 925 hint = hal_remote_l32( hint_xp ); 926 free_clusters = hal_remote_l32( free_xp ); 927 928 #if (DEBUG_FATFS_CLUSTER_ALLOC & 1) 929 if( DEBUG_FATFS_CLUSTER_ALLOC < cycle ) 930 printk("\n[%s] thread[%x,%x] get free info : hint %x / free_clusters %x\n", 931 __FUNCTION__, this->process->pid, this->trdid, hint, free_clusters ); 932 #endif 933 934 // check "free_clusters" 935 if ( free_clusters == 0 ) 936 { 937 printk("\n[ERROR] in %s : no more free FATFS clusters\n", __FUNCTION__ ); 938 remote_rwlock_wr_release( lock_xp ); 939 return -1; 940 } 941 else if ( free_clusters < CONFIG_VFS_FREE_CLUSTERS_MIN ) 942 { 943 printk("\n[WARNING] in %s : only %d free FATFS clusters\n", 944 __FUNCTION__, CONFIG_VFS_FREE_CLUSTERS_MIN ); 945 } 946 947 // get new cluster, page & slot indexes in FAT 948 new_cluster_id = hint + 1; 949 new_page_id = new_cluster_id >> 10; 950 new_slot_id = new_cluster_id & 0x3FF; 951 952 // get relevant FAT page descriptor from FAT mapper 953 new_page_xp = mapper_get_fat_page( fat_mapper_xp , new_page_id ); 954 955 if( new_page_xp == XPTR_NULL ) 956 { 957 printk("\n[ERROR] in %s : cannot acces FAT mapper\n", __FUNCTION__ ); 958 remote_rwlock_wr_release( lock_xp ); 959 return -1; 960 } 961 962 // build extended pointer on new cluster slot in FAT mapper 963 new_slot_xp = ppm_page2base( new_page_xp ) + (new_slot_id << 2); 964 965 // check selected cluster actually free 966 if( hal_remote_l32( new_slot_xp ) != FREE_CLUSTER ) 967 { 968 printk("\n[ERROR] in %s : selected cluster_id %x not free\n", 969 __FUNCTION__, new_cluster_id ); 970 remote_rwlock_wr_release( lock_xp ); 971 return -1; 972 } 973 974 // update new_cluster slot in FAT mapper 975 hal_remote_s32( new_slot_xp , END_OF_CHAIN_CLUSTER_MIN ); 976 977 // handle last_cluster_id argument if non zero 978 if( last_cluster_id ) 979 { 980 // get last cluster page & slot indexes in FAT 981 last_page_id = last_cluster_id >> 10; 982 last_slot_id = last_cluster_id & 0x3FF; 983 984 // get relevant FAT page descriptor from FAT mapper 985 last_page_xp = mapper_get_fat_page( fat_mapper_xp , last_page_id ); 986 987 if( last_page_xp == XPTR_NULL ) 988 { 989 printk("\n[ERROR] in %s : cannot acces FAT mapper\n", __FUNCTION__ ); 990 remote_rwlock_wr_release( lock_xp ); 991 return -1; 992 } 993 994 // build extended pointer on new cluster slot in FAT mapper 995 last_slot_xp = ppm_page2base( last_page_xp ) + (last_slot_id << 2); 996 997 // check last cluster actually end of chain 998 if( hal_remote_l32( last_slot_xp ) != END_OF_CHAIN_CLUSTER_MIN ) 999 { 1000 printk("\n[ERROR] in %s : last_cluster_id %x not END_OF_CHAIN\n", 1001 __FUNCTION__, last_cluster_id ); 1002 remote_rwlock_wr_release( lock_xp ); 1003 return -1; 1004 } 1005 1006 // update last_cluster slot in FAT mapper 1007 hal_remote_s32( last_slot_xp , new_cluster_id ); 1008 } 1009 else 1010 { 1011 last_page_xp = XPTR_NULL; 1012 } 1013 1014 // update the FAT new_page on device 1015 error = fatfs_move_page( new_page_xp , IOC_SYNC_WRITE ); 1016 1017 if( error ) 1018 { 1019 printk("\n[ERROR] in %s : cannot update FAT on IOC device\n", __FUNCTION__ ); 1020 remote_rwlock_wr_release( lock_xp ); 1021 return -1; 1022 } 1023 1024 // update the FAT last_page on device when required 1025 if( (last_page_xp != XPTR_NULL) && (last_page_xp != new_page_xp) ) 1026 { 1027 error = fatfs_move_page( last_page_xp , IOC_SYNC_WRITE ); 1028 1029 if( error ) 1030 { 1031 printk("\n[ERROR] in %s : cannot update FAT on IOC device\n", __FUNCTION__ ); 1032 remote_rwlock_wr_release( lock_xp ); 1033 return -1; 1034 } 1035 } 1036 1037 // update free cluster info in FATFS context and in FS_INFO sector 1038 error = fatfs_free_clusters_decrement( XPTR( fat_cxy , fat_fatfs_ctx ) , new_cluster_id ); 1039 1040 if( error ) 1041 { 1042 printk("\n[ERROR] in %s : cannot update free cluster info\n", __FUNCTION__ ); 1043 remote_rwlock_wr_release( lock_xp ); 1044 return -1; 1045 } 1046 1047 // release FAT lock 1048 remote_rwlock_wr_release( lock_xp ); 1049 1050 #if DEBUG_FATFS_CLUSTER_ALLOC 1051 cycle = (uint32_t)hal_get_cycles(); 1052 if( DEBUG_FATFS_CLUSTER_ALLOC < cycle ) 1053 printk("\n[%s] thread[%x,%x] exit / allocated cluster_id %x in FAT / cycle %d\n", 1054 __FUNCTION__, this->process->pid, this->trdid, new_cluster_id, cycle ); 1055 #endif 1056 1057 *searched_cluster_id = new_cluster_id; 1058 return 0; 1059 1060 } // end fatfs_cluster_alloc() 1061 1062 1063 ////////////////////////////////////////////////////////////////////////////////////////// 855 1064 // This static function access the FAT (File Allocation Table), stored in the FAT mapper, 856 1065 // and returns in <searched_cluster_id> the FATFS cluster_id for a given page of a given 857 // inode, identified by the <searched_page_id> argument ,that is the page index in file1066 // inode, identified by the <searched_page_id> argument that is the page index in file 858 1067 // (i.e. the page index in file mapper). The entry point in the FAT is defined by the 859 1068 // <first_cluster_id> argument, that is the cluster_id of an already allocated cluster. … … 861 1070 // fatfs_inode extension), or any page of the file whose <first_page_id> argument 862 1071 // is smaller than the searched <first_page_id> argument. 863 // This function can be called by a thread running in any cluster, as it uses remote 864 // access primitives when the FAT mapper is remote. 1072 // This function can be called by a thread running in any cluster. 865 1073 // The FAT mapper being a WRITE-THROUGH cache, this function updates the FAT mapper 866 1074 // from informations stored on IOC device in case of miss when scanning the FAT mapper. … … 885 1093 uint32_t current_cluster_id; // index of cluster in FATFS 886 1094 xptr_t lock_xp; // extended pointer on FAT lock 1095 xptr_t fat_mapper_xp; // extended pointer on FAT mapper 1096 mapper_t * fat_mapper_ptr; // local pointer on FAT mapper 1097 cxy_t fat_cxy; // FAT cluster 1098 error_t error; 887 1099 888 1100 assert( (searched_page_id > first_page_id) , … … 893 1105 thread_t * this = CURRENT_THREAD; 894 1106 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 895 printk("\n[%s] thread[%x,%x] enter / first_cluster_id %x / searched_page_id %d / cycle %d\n", 896 __FUNCTION__, this->process->pid, this->trdid, first_cluster_id, searched_page_id, cycle ); 1107 printk("\n[%s] thread[%x,%x] enter / frst_pid %x / frst_cid %x / srch_pid %d / cycle %d\n", 1108 __FUNCTION__, this->process->pid, this->trdid, 1109 first_page_id, first_cluster_id, searched_page_id, cycle ); 897 1110 #endif 898 1111 … … 903 1116 fatfs_ctx_t * loc_fatfs_ctx = vfs_ctx->extend; 904 1117 905 // get extended pointer and cluster on FAT mapper 906 xptr_t fat_mapper_xp = loc_fatfs_ctx->fat_mapper_xp; 907 cxy_t fat_cxy = GET_CXY( fat_mapper_xp ); 1118 // get FAT cluster 1119 fat_cxy = CONFIG_VFS_ROOT_CXY; 1120 1121 // get pointers on FAT mapper 1122 fat_mapper_ptr = loc_fatfs_ctx->fat_mapper; 1123 fat_mapper_xp = XPTR( fat_cxy , fat_mapper_ptr ); 908 1124 909 1125 // get local pointer on FATFS context in FAT cluster … … 931 1147 932 1148 // get pointer on current page descriptor in FAT mapper 933 xptr_t current_page_xp = mapper_ remote_get_page( fat_mapper_xp , fat_page_index );1149 xptr_t current_page_xp = mapper_get_fat_page( fat_mapper_xp , fat_page_index ); 934 1150 935 1151 if( current_page_xp == XPTR_NULL ) … … 942 1158 943 1159 // get pointer on buffer containing the FAT mapper page 944 xptr_t base_xp = ppm_page2base( current_page_xp );945 uint32_t * buffer = (uint32_t *)GET_PTR( base_xp );1160 xptr_t base_xp = ppm_page2base( current_page_xp ); 1161 uint32_t * buffer = (uint32_t *)GET_PTR( base_xp ); 946 1162 947 1163 // get next_cluster_id from FAT slot 948 1164 uint32_t next_cluster_id = hal_remote_l32( XPTR( fat_cxy, &buffer[fat_slot_index] ) ); 949 1165 950 // allocate a new FAT cluster when there is no cluster 951 // allocated on device for the current page 1166 // allocate a new FAT cluster when END_OF_CHAIN found 952 1167 if( next_cluster_id >= END_OF_CHAIN_CLUSTER_MIN ) 953 1168 { … … 956 1171 957 1172 // allocate a new cluster_id (and update both FAT mapper and FAT on device). 958 error _t error = fatfs_cluster_alloc( &next_cluster_id );959 1173 error = fatfs_cluster_alloc( current_cluster_id, 1174 &next_cluster_id ); 960 1175 if( error ) 961 1176 { … … 968 1183 #if (DEBUG_FATFS_GET_CLUSTER & 1) 969 1184 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 970 printk("\n[%s] allocated a new cluster_id % din FATFS\n",1185 printk("\n[%s] allocated a new cluster_id %x in FATFS\n", 971 1186 __FUNCTION__, next_cluster_id ); 972 1187 #endif … … 977 1192 #if (DEBUG_FATFS_GET_CLUSTER & 1) 978 1193 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 979 printk("\n[%s] traverse FAT / fat_page_index %d / fat_slot_index %d/ next_cluster_id %x\n",980 __FUNCTION__, fat_page_index, fat_slot_index, next_cluster_id );1194 printk("\n[%s] traverse FAT / current_cluster_id %x / next_cluster_id %x\n", 1195 __FUNCTION__, current_cluster_id , next_cluster_id ); 981 1196 #endif 982 1197 … … 991 1206 #if DEBUG_FATFS_GET_CLUSTER 992 1207 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 993 printk("\n[%s] thread[%x,%x] exit / searched_cluster_id = %d\n", 994 __FUNCTION__, this->process->pid, this->trdid, current_cluster_id ); 1208 printk("\n[%s] thread[%x,%x] exit / frst_pid %d / frst_cid %x / srch_pid %d / srch_cid %x\n", 1209 __FUNCTION__, this->process->pid, this->trdid, 1210 first_page_id, first_cluster_id, searched_page_id, current_cluster_id ); 995 1211 #endif 996 1212 … … 1000 1216 } // end fatfs_get_cluster() 1001 1217 1002 1003 1004 1005 1006 //////////////////////////////////////////////////////////////////////////////////////////1007 // FATFS specific extern functions1008 //////////////////////////////////////////////////////////////////////////////////////////1009 1010 ///////////////////////////////////1011 void fatfs_display_ctx( cxy_t cxy )1012 {1013 // get pointer on local FATFS context1014 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS];1015 fatfs_ctx_t * ctx = hal_remote_lpt( XPTR( cxy , &vfs_ctx->extend ) );1016 1017 uint32_t fat_sectors = hal_remote_l32( XPTR( cxy , &ctx->fat_sectors_count ) );1018 uint32_t sector_size = hal_remote_l32( XPTR( cxy , &ctx->bytes_per_sector ) );1019 uint32_t sec_per_clus = hal_remote_l32( XPTR( cxy , &ctx->sectors_per_cluster ) );1020 uint32_t fat_lba = hal_remote_l32( XPTR( cxy , &ctx->fat_begin_lba ) );1021 uint32_t data_lba = hal_remote_l32( XPTR( cxy , &ctx->cluster_begin_lba ) );1022 uint32_t fsinfo_lba = hal_remote_l32( XPTR( cxy , &ctx->fs_info_lba ) );1023 uint32_t root_dir_clus = hal_remote_l32( XPTR( cxy , &ctx->root_dir_cluster ) );1024 uint32_t free_clusters = hal_remote_l32( XPTR( cxy , &ctx->free_clusters ) );1025 uint32_t free_cluster_hint = hal_remote_l32( XPTR( cxy , &ctx->free_cluster_hint ) );1026 xptr_t mapper_xp = hal_remote_l64( XPTR( cxy , &ctx->fat_mapper_xp ) );1027 void * fs_info_buffer = hal_remote_lpt( XPTR( cxy , &ctx->fs_info_buffer ) );1028 1029 printk("\n*** FAT context in cluster %x\n"1030 "- fat_sectors = %d\n"1031 "- sector size = %d\n"1032 "- cluster size = %d\n"1033 "- fat_lba = %x\n"1034 "- data_lba = %x\n"1035 "- fsinfo_lba = %x\n"1036 "- root_dir_cluster = %x\n"1037 "- free_clusters = %x\n"1038 "- free_cluster_hint = %x\n"1039 "- fat_mapper_ptr = %x\n"1040 "- fs_info_buffer = %x\n",1041 cxy,1042 fat_sectors,1043 sector_size,1044 sector_size * sec_per_clus,1045 fat_lba,1046 data_lba,1047 fsinfo_lba,1048 root_dir_clus,1049 free_clusters,1050 free_cluster_hint,1051 GET_PTR( mapper_xp ),1052 fs_info_buffer );1053 1054 } // end fatfs_ctx_display()1055 1056 //////////////////////////////////////////1057 void fatfs_display_fat( uint32_t page_id,1058 uint32_t min_slot,1059 uint32_t nb_slots )1060 {1061 uint32_t line;1062 1063 // compute number of lines to display1064 uint32_t min_line = min_slot >> 3;1065 uint32_t max_line = (min_slot + nb_slots - 1) >> 3;1066 1067 // get pointer on local FATFS context1068 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS];1069 fatfs_ctx_t * loc_fatfs_ctx = (fatfs_ctx_t *)vfs_ctx->extend;1070 1071 // get pointers on FAT mapper (in FAT cluster)1072 xptr_t mapper_xp = loc_fatfs_ctx->fat_mapper_xp;1073 cxy_t mapper_cxy = GET_CXY( mapper_xp );1074 1075 // get pointer on FATFS context in FAT cluster1076 fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( mapper_cxy , &vfs_ctx->extend ) );1077 1078 // get current value of hint and free_clusters1079 uint32_t hint = hal_remote_l32( XPTR( mapper_cxy , &fat_fatfs_ctx->free_cluster_hint ) );1080 uint32_t free = hal_remote_l32( XPTR( mapper_cxy , &fat_fatfs_ctx->free_clusters ) );1081 1082 // get extended pointer on requested page descriptor in FAT mapper1083 xptr_t page_xp = mapper_remote_get_page( mapper_xp , page_id );1084 1085 // get pointers on requested page base1086 xptr_t base_xp = ppm_page2base( page_xp );1087 void * base = GET_PTR( base_xp );1088 1089 printk("\n***** FAT mapper / cxy %x / page_id %d / base %x / free_clusters %x / hint %x\n",1090 mapper_cxy, page_id, base, free, hint );1091 1092 for( line = min_line ; line <= max_line ; line++ )1093 {1094 printk("%d : %X | %X | %X | %X | %X | %X | %X | %X\n", (line<<3),1095 hal_remote_l32( base_xp + ((line<<5) ) ),1096 hal_remote_l32( base_xp + ((line<<5) + 4 ) ),1097 hal_remote_l32( base_xp + ((line<<5) + 8 ) ),1098 hal_remote_l32( base_xp + ((line<<5) + 12 ) ),1099 hal_remote_l32( base_xp + ((line<<5) + 16 ) ),1100 hal_remote_l32( base_xp + ((line<<5) + 20 ) ),1101 hal_remote_l32( base_xp + ((line<<5) + 24 ) ),1102 hal_remote_l32( base_xp + ((line<<5) + 28 ) ) );1103 }1104 1105 } // end fatfs_display_fat()1106 1107 1108 1109 ///////////////////////////////////////////////////////////////////////////////////////1110 // Generic API : the following functions are called by the kernel VFS1111 // and must be defined by all supported file systems.1112 ///////////////////////////////////////////////////////////////////////////////////////1113 1114 /////////////////////////////////////1115 fatfs_ctx_t * fatfs_ctx_alloc( void )1116 {1117 kmem_req_t req;1118 req.type = KMEM_KCM;1119 req.order = bits_log2( sizeof(fatfs_ctx_t) );1120 req.flags = AF_KERNEL | AF_ZERO;1121 1122 return kmem_alloc( &req );1123 }1124 1125 //////////////////////////////////////////////1126 void fatfs_ctx_init( fatfs_ctx_t * fatfs_ctx )1127 {1128 error_t error;1129 kmem_req_t req;1130 uint8_t * buffer;1131 xptr_t buffer_xp;1132 1133 #if DEBUG_FATFS_CTX_INIT1134 uint32_t cycle = (uint32_t)hal_get_cycles();1135 thread_t * this = CURRENT_THREAD;1136 if( DEBUG_FATFS_CTX_INIT < cycle )1137 printk("\n[%s] thread[%x,%x] enter for fatfs_ctx = %x / cycle %d\n",1138 __FUNCTION__ , this->process->pid, this->trdid, fatfs_ctx , cycle );1139 #endif1140 1141 // check argument1142 assert( (fatfs_ctx != NULL) , "pointer on FATFS context is NULL" );1143 1144 // check only cluster 0 does FATFS initialization1145 assert( (local_cxy == 0) , "only cluster 0 can initialize FATFS");1146 1147 // allocate a permanent 512 bytes buffer to store1148 // - temporarily the BOOT sector1149 // - permanently the FS_INFO sector1150 req.type = KMEM_KCM;1151 req.order = 9; // 512 bytes1152 req.flags = AF_KERNEL | AF_ZERO;1153 buffer = kmem_alloc( &req );1154 1155 if( buffer == NULL )1156 {1157 printk("\n[PANIC] in %s : cannot allocate buffer\n", __FUNCTION__ );1158 hal_core_sleep();1159 }1160 1161 buffer_xp = XPTR( local_cxy , buffer );1162 1163 // load the BOOT record from device1164 error = dev_ioc_move_data( IOC_SYNC_READ , buffer_xp , 0 , 1 );1165 1166 if ( error )1167 {1168 printk("\n[PANIC] in %s : cannot access boot record\n", __FUNCTION__ );1169 hal_core_sleep();1170 }1171 1172 #if (DEBUG_FATFS_CTX_INIT & 0x1)1173 if( DEBUG_FATFS_CTX_INIT < cycle )1174 putb( "boot record", buffer , 256 );1175 #endif1176 1177 // get sector size from boot record1178 uint32_t sector_size = fatfs_get_record( BPB_BYTSPERSEC , buffer );1179 if ( sector_size != 512 )1180 {1181 printk("\n[PANIC] in %s : sector size must be 512 bytes\n", __FUNCTION__ );1182 hal_core_sleep();1183 }1184 1185 // get cluster size from boot record1186 uint32_t nb_sectors = fatfs_get_record( BPB_SECPERCLUS , buffer );1187 if ( nb_sectors != 8 )1188 {1189 printk("\n[PANIC] in %s : cluster size must be 8 sectors\n", __FUNCTION__ );1190 hal_core_sleep();1191 }1192 1193 // get number of FAT copies from boot record1194 uint32_t nb_fats = fatfs_get_record( BPB_NUMFATS , buffer );1195 if ( nb_fats != 1 )1196 {1197 printk("\n[PANIC] in %s : number of FAT copies must be 1\n", __FUNCTION__ );1198 hal_core_sleep();1199 }1200 1201 // get number of sectors in FAT from boot record1202 uint32_t fat_sectors = fatfs_get_record( BPB_FAT32_FATSZ32 , buffer );1203 if ( (fat_sectors & 0xF) != 0 )1204 {1205 printk("\n[PANIC] in %s : FAT size not multiple of 16 sectors\n", __FUNCTION__ );1206 hal_core_sleep();1207 }1208 1209 // get root cluster from boot record1210 uint32_t root_cluster = fatfs_get_record( BPB_FAT32_ROOTCLUS , buffer );1211 if ( root_cluster != 2 )1212 {1213 printk("\n[PANIC] in %s : root cluster index must be 2\n", __FUNCTION__ );1214 hal_core_sleep();1215 }1216 1217 // get FAT lba from boot record1218 uint32_t fat_lba = fatfs_get_record( BPB_RSVDSECCNT , buffer );1219 1220 // get FS_INFO sector lba from boot record1221 uint32_t fs_info_lba = fatfs_get_record( BPB_FAT32_FSINFO , buffer );1222 1223 // load the FS_INFO record from device1224 error = dev_ioc_move_data( IOC_SYNC_READ , buffer_xp , fs_info_lba , 1 );1225 1226 if ( error )1227 {1228 printk("\n[PANIC] in %s : cannot access FS_INFO record\n", __FUNCTION__ );1229 hal_core_sleep();1230 }1231 1232 // get free_clusters number from FS_INFO record1233 uint32_t free_clusters = fatfs_get_record( FS_FREE_CLUSTERS , buffer );1234 if ( free_clusters >= fat_sectors << 7 )1235 {1236 printk("\n[PANIC] in %s : unconsistent free_clusters\n", __FUNCTION__ );1237 hal_core_sleep();1238 }1239 1240 // get free_cluster_hint from FS_INFO record1241 uint32_t free_cluster_hint = fatfs_get_record( FS_FREE_CLUSTER_HINT , buffer );1242 1243 if ( free_cluster_hint >= fat_sectors << 7 )1244 {1245 printk("\n[PANIC] in %s : unconsistent free_cluster_hint\n", __FUNCTION__ );1246 hal_core_sleep();1247 }1248 1249 // allocate a mapper for the FAT itself1250 mapper_t * fat_mapper = mapper_create( FS_TYPE_FATFS );1251 if ( fat_mapper == NULL )1252 {1253 printk("\n[PANIC] in %s : no memory for FAT mapper\n", __FUNCTION__ );1254 hal_core_sleep();1255 }1256 1257 // the inode field is NULL for the FAT mapper1258 fat_mapper->inode = NULL;1259 1260 // initialize the FATFS context1261 fatfs_ctx->fat_begin_lba = fat_lba;1262 fatfs_ctx->fat_sectors_count = fat_sectors;1263 fatfs_ctx->bytes_per_sector = sector_size;1264 fatfs_ctx->sectors_per_cluster = nb_sectors;1265 fatfs_ctx->cluster_begin_lba = fat_lba + fat_sectors;1266 fatfs_ctx->root_dir_cluster = 2;1267 fatfs_ctx->fat_mapper_xp = XPTR( local_cxy , fat_mapper );1268 fatfs_ctx->fs_info_lba = fs_info_lba;1269 fatfs_ctx->free_clusters = free_clusters;1270 fatfs_ctx->free_cluster_hint = free_cluster_hint;1271 fatfs_ctx->fs_info_buffer = buffer;1272 1273 remote_rwlock_init( XPTR( local_cxy , &fatfs_ctx->lock ) , LOCK_FATFS_FAT );1274 1275 #if (DEBUG_FATFS_CTX_INIT & 0x1)1276 if( DEBUG_FATFS_CTX_INIT < cycle )1277 fatfs_ctx_display( fatfs_ctx );1278 #endif1279 1280 #if DEBUG_FATFS_CTX_INIT1281 cycle = (uint32_t)hal_get_cycles();1282 if( DEBUG_FATFS_CTX_INIT < cycle )1283 printk("\n[%s] thread[%x,%x] exit for fatfs_ctx = %x / cycle %d\n",1284 __FUNCTION__, this->process->pid, this->trdid, fatfs_ctx, cycle );1285 #endif1286 1287 } // end fatfs_ctx_init()1288 1289 /////////////////////////////////////////////////1290 void fatfs_ctx_destroy( fatfs_ctx_t * fatfs_ctx )1291 {1292 kmem_req_t req;1293 req.type = KMEM_KCM;1294 req.ptr = fatfs_ctx;1295 kmem_free( &req );1296 }1297 1298 ///////////////////////////////////////////////1299 error_t fatfs_add_dentry( vfs_inode_t * inode,1300 vfs_dentry_t * dentry )1301 {1302 error_t error;1303 uint32_t length; // dentry name length1304 uint32_t nb_lfn; // number or required LFN1305 char sfn[11]; // buffer for SFN name1306 uint8_t checksum; // name checksum1307 mapper_t * mapper; // loal pointer on parent inode mapper1308 xptr_t mapper_xp; // extended pointer on parent inode mapper1309 xptr_t child_xp; // extended pointer on child inode1310 cxy_t child_cxy; // child inode cluster1311 vfs_inode_t * child_ptr; // child inode local pointer1312 uint32_t size; // child inode size1313 uint32_t type; // child inode type1314 uint32_t cluster; // child inode cluster index1315 1316 #if DEBUG_FATFS_ADD_DENTRY1317 char dir_name[CONFIG_VFS_MAX_NAME_LENGTH];1318 uint32_t cycle = (uint32_t)hal_get_cycles();1319 thread_t * this = CURRENT_THREAD;1320 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );1321 if( DEBUG_FATFS_ADD_DENTRY < cycle )1322 printk("\n[%s] thread[%x,%x] enter / parent <%s> / child <%s> / cycle %d\n",1323 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle );1324 #endif1325 1326 // check arguments1327 assert( (inode != NULL) , "inode pointer is NULL\n" );1328 assert( (dentry != NULL) , "dentry pointer is NULL\n" );1329 assert( (inode->mapper != NULL ) , "mapper pointer is NULL\n" );1330 1331 // get pointers on directory mapper1332 mapper = inode->mapper;1333 mapper_xp = XPTR( local_cxy , mapper );1334 1335 // get extended pointers on remote child inode1336 child_xp = dentry->child_xp;1337 child_cxy = GET_CXY( child_xp );1338 child_ptr = GET_PTR( child_xp );1339 1340 // get relevant infos from child inode1341 type = hal_remote_l32( XPTR( child_cxy , &child_ptr->type ) );1342 size = hal_remote_l32( XPTR( child_cxy , &child_ptr->size ) );1343 cluster = (uint32_t)(intptr_t)hal_remote_lpt( XPTR( child_cxy , &child_ptr->extend ) );1344 1345 // analyse dentry name1346 error = fatfs_name_format( dentry->name,1347 &length,1348 &nb_lfn,1349 sfn,1350 &checksum );1351 if ( error )1352 {1353 printk("\n[ERROR] in %s : dentry name > 31 bytes\n", __FUNCTION__ );1354 return -1;1355 }1356 1357 // Search end of directory with two embedded loops:1358 // - scan the pages in the mapper1359 // - scan the entries in each page to find NO_MORE_ENTRY1360 1361 xptr_t page_xp; // extended pointer on page descriptor1362 xptr_t base_xp; // extended pointer on page base1363 uint8_t * base; // local pointer on page base (array of bytes)1364 uint32_t page_id = 0; // page index in mapper1365 uint32_t offset = 0; // position in page1366 uint32_t found = 0; // NO_MORE_ENTRY found1367 1368 // loop on pages in mapper1369 while ( found == 0 )1370 {1371 // get extended pointer on page descriptor in mapper1372 page_xp = mapper_remote_get_page( mapper_xp , page_id );1373 1374 if ( page_xp == XPTR_NULL )1375 {1376 printk("\n[ERROR] in %s : cannot extend directory mapper\n", __FUNCTION__ );1377 return -1;1378 }1379 1380 // get pointer on page base1381 base_xp = ppm_page2base( page_xp );1382 base = GET_PTR( base_xp );1383 1384 // loop on directory entries in this page1385 while ( (offset < 4096) && (found == 0) )1386 {1387 if ( fatfs_get_record( LDIR_ORD, (base + offset) ) == NO_MORE_ENTRY )1388 {1389 found = 1;1390 }1391 else1392 {1393 offset = offset + 32;1394 }1395 } // end loop on entries1396 1397 if ( found == 0 )1398 {1399 page_id++;1400 offset = 0;1401 }1402 } // end loop on pages1403 1404 // Modify the directory mapper: depending on the name length,1405 // the new child requires to write (3, 4, or 5) directory entries.1406 // To actually register the new child, we use a 5 steps FSM1407 // (one state per entry to be written), that is traversed as:1408 // LFN3 -> LFN2 -> LFN1 -> NORMAL -> NOMORE1409 // At most two pages are modified:1410 // - the page containing the NO_MORE_ENTRY is always modified1411 // - the following page can be modified if the name spread on to pages.1412 1413 char * name = dentry->name;1414 1415 uint32_t step; // FSM state1416 1417 if ( nb_lfn == 1 ) step = 3;1418 else if ( nb_lfn == 2 ) step = 4;1419 else if ( nb_lfn == 3 ) step = 5;1420 1421 uint8_t * entry; // pointer on directory entry to be written1422 uint32_t i; // byte index in one 32 bytes directory1423 uint32_t c; // character index in name1424 1425 while ( step )1426 {1427 // when the new child is split on two pages,1428 // we need to access a new page in mapper1429 if ( offset >= 4096 )1430 {1431 // copy the modified page to IOC device1432 fatfs_move_page( page_xp , IOC_SYNC_WRITE );1433 1434 // get the next page in FAT mapper1435 page_xp = mapper_remote_get_page( mapper_xp , page_id + 1 );1436 1437 if ( page_xp == XPTR_NULL )1438 {1439 printk("\n[ERROR] in %s : cannot extend directory mapper\n", __FUNCTION__ );1440 return -1;1441 }1442 1443 // get pointer on page base1444 base_xp = ppm_page2base( page_xp );1445 base = GET_PTR( base_xp );1446 1447 // update offset1448 offset = 0;1449 }1450 1451 // compute directory entry address1452 entry = base + offset;1453 1454 #if (DEBUG_FATFS_ADD_DENTRY & 1)1455 cycle = (uint32_t)hal_get_cycles();1456 if( DEBUG_FATFS_ADD_DENTRY < cycle )1457 printk("\n[%s] FSM step = %d / offset = %x / nb_lfn = %d / cycle %d\n",1458 __FUNCTION__, step, offset, nb_lfn, cycle );1459 #endif1460 1461 // write 32 bytes (one directory entry) per iteration1462 switch ( step )1463 {1464 case 5: // write LFN3 entry1465 {1466 c = 26;1467 // scan the 32 bytes in dir_entry1468 for ( i = 0 ; i < 32 ; i++ )1469 {1470 if (i == 0)1471 {1472 if ( nb_lfn == 3) entry[i] = 0x43;1473 else entry[i] = 0x03;1474 }1475 else if ( ( ((i >= 1 ) && (i<=10) && ((i&1)==1)) ||1476 ((i >= 14) && (i<=25) && ((i&1)==0)) ||1477 ((i >= 28) && (i<=31) && ((i&1)==0)) ) &&1478 ( c < length ) )1479 {1480 entry[i] = name[c];1481 c++;1482 }1483 else if (i == 11) entry[i] = 0x0F;1484 else if (i == 13) entry[i] = checksum;1485 else entry[i] = 0x00;1486 }1487 step--;1488 break;1489 }1490 case 4: // write LFN2 entry1491 {1492 c = 13;1493 // scan the 32 bytes in dir_entry1494 for ( i = 0 ; i < 32 ; i++ )1495 {1496 if (i == 0)1497 {1498 if ( nb_lfn == 2) entry[i] = 0x42;1499 else entry[i] = 0x02;1500 }1501 else if ( ( ((i >= 1 ) && (i<=10) && ((i&1)==1)) ||1502 ((i >= 14) && (i<=25) && ((i&1)==0)) ||1503 ((i >= 28) && (i<=31) && ((i&1)==0)) ) &&1504 ( c < length ) )1505 {1506 entry[i] = name[c];1507 c++;1508 }1509 else if (i == 11) entry[i] = 0x0F;1510 else if (i == 13) entry[i] = checksum;1511 else entry[i] = 0x00;1512 }1513 step--;1514 break;1515 }1516 case 3: // Write LFN1 entry1517 {1518 c = 0;1519 // scan the 32 bytes in dir_entry1520 for ( i = 0 ; i < 32 ; i++ )1521 {1522 if (i == 0)1523 {1524 if ( nb_lfn == 1) entry[i] = 0x41;1525 else entry[i] = 0x01;1526 }1527 else if ( ( ((i >= 1 ) && (i<=10) && ((i&1)==1)) ||1528 ((i >= 14) && (i<=25) && ((i&1)==0)) ||1529 ((i >= 28) && (i<=31) && ((i&1)==0)) ) &&1530 ( c < length ) )1531 {1532 entry[i] = name[c];1533 c++;1534 }1535 else if (i == 11) entry[i] = 0x0F;1536 else if (i == 13) entry[i] = checksum;1537 else entry[i] = 0x00;1538 }1539 step--;1540 break;1541 }1542 case 2: // write NORMAL entry1543 {1544 // scan the 32 bytes in dir_entry1545 for ( i = 0 ; i < 32 ; i++ )1546 {1547 if ( i < 11 ) // 8.3 SFN1548 {1549 entry[i] = sfn[i];1550 }1551 else if (i == 11) // ATTR1552 {1553 if (type == INODE_TYPE_DIR) entry[i] = 0x10;1554 else entry[i] = 0x20;1555 }1556 else if (i == 20) entry[i] = cluster>>16; // cluster.B21557 else if (i == 21) entry[i] = cluster>>24; // cluster.B31558 else if (i == 26) entry[i] = cluster>>0; // cluster.B01559 else if (i == 27) entry[i] = cluster>>8; // cluster.B11560 else if (i == 28) entry[i] = size>>0; // size.B01561 else if (i == 29) entry[i] = size>>8; // size.B11562 else if (i == 30) entry[i] = size>>16; // size.B21563 else if (i == 31) entry[i] = size>>24; // size.B31564 else entry[i] = 0x00;1565 }1566 1567 // update the "extend" field in dentry descriptor1568 dentry->extend = (void*)(intptr_t)(((page_id<<12) + offset)>>5);1569 1570 step--;1571 break;1572 }1573 case 1: // write NOMORE entry1574 {1575 entry [0] = 0x00;1576 step--;1577 break;1578 }1579 } // end switch step1580 1581 offset += 32;1582 1583 } // exit while1584 1585 #if (DEBUG_FATFS_ADD_DENTRY & 1)1586 cycle = (uint32_t)hal_get_cycles();1587 if( DEBUG_FATFS_ADD_DENTRY < cycle )1588 printk("\n[%s] thread[%x,%x] before IOC access / cycle %d\n",1589 __FUNCTION__, this->process->pid, this->trdid, cycle );1590 #endif1591 1592 // copy the modified page to the IOC device1593 fatfs_move_page( page_xp , IOC_SYNC_WRITE );1594 1595 #if DEBUG_FATFS_ADD_DENTRY1596 cycle = (uint32_t)hal_get_cycles();1597 if( DEBUG_FATFS_ADD_DENTRY < cycle )1598 printk("\n[%s] thread[%x,%x] exit / parent <%s> / child <%s> / cycle %d\n",1599 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle );1600 #endif1601 1602 return 0;1603 1604 } // end fatfs_add_dentry()1605 1606 //////////////////////////////////////////////////1607 error_t fatfs_remove_dentry( vfs_inode_t * inode,1608 vfs_dentry_t * dentry )1609 {1610 xptr_t mapper_xp; // extended pointer on mapper1611 mapper_t * mapper; // local pointer on mapper1612 xptr_t page_xp; // extended pointer on mapper page descriptor1613 xptr_t base_xp; // extended pointer on mapper page base1614 uint8_t * base; // local pointer on mapper page base1615 1616 #if DEBUG_FATFS_REMOVE_DENTRY1617 char dir_name[CONFIG_VFS_MAX_NAME_LENGTH];1618 uint32_t cycle = (uint32_t)hal_get_cycles();1619 thread_t * this = CURRENT_THREAD;1620 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );1621 if( DEBUG_FATFS_REMOVE_DENTRY < cycle )1622 printk("\n[%s] thread[%x,%x] enter / parent <%s> / child <%s> / cycle %d\n",1623 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle );1624 #endif1625 1626 // check arguments1627 assert( (inode != NULL) , "inode pointer is NULL\n" );1628 assert( (dentry != NULL) , "dentry pointer is NULL\n" );1629 assert( (inode->type == INODE_TYPE_DIR) , "inode is not a directory\n" );1630 assert( (inode->mapper != NULL ) , "mapper pointer is NULL\n" );1631 1632 // get pointers on directory mapper1633 mapper = inode->mapper;1634 mapper_xp = XPTR( local_cxy , mapper );1635 1636 // compute number of LFN entries1637 uint32_t nb_lfn;1638 uint32_t name_length = strlen( dentry->name );1639 1640 if ( name_length <= 13 ) nb_lfn = 1;1641 else if ( name_length <= 26 ) nb_lfn = 2;1642 else nb_lfn = 3;1643 1644 // we must invalidate (2, 3 or 4) 32 bytes entries:1645 // the NORMAL entry (registered in dentry->extend) and all preceding LFN entries1646 // At most two pages are modified:1647 // - the page containing the NORMAL entry is always modified.1648 // - the preceding page is modified when the name spread on two pages.1649 1650 // get 32 bytes directory entry index from dentry->extend1651 uint32_t dentry_id = (uint32_t)(intptr_t)dentry->extend;1652 1653 // get page index and offset in parent directory mapper1654 uint32_t page_id = dentry_id >> 7;1655 uint32_t offset = (dentry_id & 0x7F)<<5;1656 1657 #if DEBUG_FATFS_REMOVE_DENTRY & 11658 if( DEBUG_FATFS_REMOVE_DENTRY < cycle )1659 printk("\n[%s] dentry_id %x / page_id %x / offset %x\n",1660 __FUNCTION__, dentry_id, page_id, offset );1661 #endif1662 1663 // get extended pointer on page descriptor from parent directory mapper1664 page_xp = mapper_remote_get_page( mapper_xp , page_id );1665 1666 if ( page_xp == XPTR_NULL )1667 {1668 printk("\n[ERROR] in %s : cannot extend directory mapper\n", __FUNCTION__ );1669 return -1;1670 }1671 1672 // get pointers on page base1673 base_xp = ppm_page2base( page_xp );1674 base = GET_PTR( base_xp );1675 1676 // invalidate NORMAL entry in directory cache1677 base[offset] = 0xE5;1678 1679 // invalidate LFN entries1680 while ( nb_lfn )1681 {1682 if (offset == 0) // we must load page (page_id - 1)1683 {1684 1685 // check page_id1686 assert( (page_id > 0), "page_id and offset cannot be both 0\n" );1687 1688 // copy the modified page to the IOC device1689 fatfs_move_page( page_xp , IOC_SYNC_WRITE );1690 1691 // get extended pointer on page descriptor from parent directory mapper1692 page_xp = mapper_remote_get_page( mapper_xp , page_id );1693 1694 if ( page_xp == XPTR_NULL )1695 {1696 printk("\n[ERROR] in %s : cannot access directory mapper\n", __FUNCTION__ );1697 return -1;1698 }1699 1700 // get pointers on page base1701 base_xp = ppm_page2base( page_xp );1702 base = GET_PTR( base_xp );1703 1704 // update offset1705 offset = 4096;1706 }1707 1708 offset = offset - 32;1709 1710 // check for LFN entry1711 assert( (fatfs_get_record( DIR_ATTR, base + offset ) == ATTR_LONG_NAME_MASK ),1712 "this directory entry must be a LFN\n");1713 1714 // invalidate LFN entry1715 base[offset] = 0xE5;1716 1717 nb_lfn--;1718 }1719 1720 // copy the modified page to the IOC device1721 fatfs_move_page( page_xp , IOC_SYNC_WRITE );1722 1723 1724 #if DEBUG_FATFS_REMOVE_DENTRY1725 cycle = (uint32_t)hal_get_cycles();1726 if( DEBUG_FATFS_REMOVE_DENTRY < cycle )1727 printk("\n[%s] thread[%x,%x] exit / parent %s / child %s / cycle %d\n",1728 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle );1729 #endif1730 1731 return 0;1732 1733 } // end fatfs_remove_dentry1734 1735 1736 1218 ////////////////////////////////////////////////////////////////////////////////////////////// 1737 // This static function scan the pages of a mapper containing a FAT32 directory, identified 1738 // by the <mapper> argument, to find the directory entry identified by the <name> argument, 1739 // and return a pointer on the directory entry, described as and array of 32 bytes, and the 1740 // index of this entry in the FAT32 mapper, seen as an array of 32 bytes entries. 1741 // It is called by the fatfs_new_dentry() and fatfs_update_dentry() functions. 1742 // It must be called by a thread running in the cluster containing the mapper. 1219 // This static function scan the pages of a directory mapper, identified by the <mapper_xp> 1220 // argument, to find the directory entry identified by the <name> argument, and returns 1221 // a pointer on the directory entry, described as an array of 32 bytes, and the index of 1222 // this entry in the FAT32 mapper, seen as an array of 32 bytes entries. 1223 // It makes a local copy of each directory entry to reduce the number of remote accesses. 1224 // It is called by the fatfs_new_dentry_from_mapper() function. 1225 // It can be called by a thread running in any cluster. 1743 1226 ////////////////////////////////////////////////////////////////////////////////////////////// 1744 // @ mapper : [in] localpointer on directory mapper.1227 // @ mapper_xp : [in] extended pointer on directory mapper. 1745 1228 // @ name : [in] searched directory entry name. 1746 1229 // @ entry : [out] buffer for the pointer on the 32 bytes directory entry (when found). … … 1748 1231 // @ return 0 if found / return 1 if not found / return -1 if mapper access error. 1749 1232 ////////////////////////////////////////////////////////////////////////////////////////////// 1750 static error_t fatfs_scan_directory( mapper_t * mapper,1233 static error_t fatfs_scan_directory( xptr_t mapper_xp, 1751 1234 char * name, 1752 1235 uint8_t ** entry, 1753 1236 uint32_t * index ) 1754 1237 { 1755 // Two embedded loops to scan the directory mapper: 1756 // - scan the parent directory mapper pages 1757 // - scan the directory entries in each 4 Kbytes page 1758 1759 // check parent_inode and child_inode 1760 assert( (mapper != NULL) , "mapper pointer is NULL\n" ); 1761 assert( (name != NULL ), "child name is undefined\n" ); 1762 assert( (entry != NULL ), "entry buffer undefined\n" ); 1238 uint8_t buf[32]; // local buffer for one FAT32 directory entry 1239 1240 // check arguments 1241 assert( (mapper_xp != XPTR_NULL) , "mapper pointer is NULL\n" ); 1242 assert( (name != NULL ) , "child name is undefined\n" ); 1763 1243 1764 1244 #if DEBUG_FATFS_SCAN_DIRECTORY 1765 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1766 uint32_t cycle = (uint32_t)hal_get_cycles(); 1767 thread_t * this = CURRENT_THREAD; 1768 vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , parent_name ); 1245 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1246 uint32_t cycle = (uint32_t)hal_get_cycles(); 1247 thread_t * this = CURRENT_THREAD; 1248 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 1249 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 1250 vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 1251 vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , parent_name ); 1769 1252 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1770 1253 printk("\n[%s] thread[%x,%x] enter to search child <%s> in parent <%s> / cycle %d\n", … … 1777 1260 char lfn2[16]; // buffer for one partial cname 1778 1261 char lfn3[16]; // buffer for one partial cname 1779 xptr_t mapper_xp; // extended pointer on mapper descriptor 1780 xptr_t page_xp; // extended pointer on page descriptor 1781 xptr_t base_xp; // extended pointer on page base 1782 uint8_t * base; // local pointer on page base 1262 xptr_t page_xp; // extended pointer on one page descriptor 1263 xptr_t base_xp; // extended pointer on one page base 1783 1264 uint8_t attr; // directory entry ATTR field 1784 1265 uint8_t ord; // directory entry ORD field … … 1789 1270 uint32_t offset = 0; // byte offset in page 1790 1271 1791 mapper_xp = XPTR( local_cxy , mapper ); 1272 // Two embedded loops to scan the directory mapper: 1273 // - scan the parent directory mapper pages 1274 // - scan the directory entries in each 4 Kbytes page 1792 1275 1793 1276 // scan the mapper pages … … 1795 1278 { 1796 1279 // get one page 1797 page_xp = mapper_ remote_get_page( mapper_xp , page_id );1280 page_xp = mapper_get_page( mapper_xp , page_id ); 1798 1281 1799 1282 if( page_xp == XPTR_NULL) … … 1804 1287 // get page base 1805 1288 base_xp = ppm_page2base( page_xp ); 1806 base = (uint8_t *)GET_PTR( base_xp ); 1807 1808 #if (DEBUG_FATFS_SCAN_DIRECTORY & 0x1) 1289 1290 #if (DEBUG_FATFS_SCAN_DIRECTORY & 1) 1809 1291 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1810 1292 mapper_display_page( mapper_xp , page_xp , 256 ); … … 1813 1295 while( (offset < 4096) && (found == 0) ) 1814 1296 { 1815 attr = fatfs_get_record( DIR_ATTR , base + offset ); 1816 ord = fatfs_get_record( LDIR_ORD , base + offset ); 1297 // makes a local copy of current directory entry (32 bytes) 1298 hal_remote_memcpy( XPTR( local_cxy , buf ) , base_xp + offset , 32 ); 1299 1300 // get attr and ord from local buffer 1301 attr = fatfs_get_record( DIR_ATTR , buf ); 1302 ord = fatfs_get_record( LDIR_ORD , buf ); 1817 1303 1818 1304 if (ord == NO_MORE_ENTRY) // no more entry => break … … 1832 1318 seq = ord & 0x3; 1833 1319 lfn = (seq > lfn) ? seq : lfn; 1834 if ( seq == 1 ) fatfs_get_name_from_long( b ase + offset, lfn1 );1835 else if ( seq == 2 ) fatfs_get_name_from_long( b ase + offset, lfn2 );1836 else if ( seq == 3 ) fatfs_get_name_from_long( b ase + offset, lfn3 );1320 if ( seq == 1 ) fatfs_get_name_from_long( buf , lfn1 ); 1321 else if ( seq == 2 ) fatfs_get_name_from_long( buf , lfn2 ); 1322 else if ( seq == 3 ) fatfs_get_name_from_long( buf , lfn3 ); 1837 1323 offset = offset + 32; 1838 1324 } … … 1842 1328 if ( lfn == 0 ) 1843 1329 { 1844 fatfs_get_name_from_short( b ase + offset, cname );1330 fatfs_get_name_from_short( buf , cname ); 1845 1331 } 1846 1332 else if ( lfn == 1 ) … … 1863 1349 if ( strcmp( name , cname ) == 0 ) 1864 1350 { 1351 uint8_t * base = GET_PTR( base_xp ); 1865 1352 *entry = base + offset; 1866 *index = ( (page_id<<12) + offset)>>5;1867 found 1353 *index = ( (page_id << 12) + offset ) >> 5; 1354 found = 1; 1868 1355 } 1869 1356 offset = offset + 32; 1870 1357 lfn = 0; 1871 1358 } 1359 1872 1360 } // end loop on directory entries in page 1873 1361 … … 1881 1369 1882 1370 #if DEBUG_FATFS_SCAN_DIRECTORY 1883 cycle = (uint32_t)hal_get_cycles();1884 1371 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1885 1372 printk("\n[%s] thread[%x,%x] exit / found child <%s> in <%s>\n", … … 1892 1379 1893 1380 #if DEBUG_FATFS_SCAN_DIRECTORY 1894 cycle = (uint32_t)hal_get_cycles();1895 1381 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1896 1382 printk("\n[%s] thread[%x,%x] exit / child <%s> in <%s> not found\n", … … 1909 1395 1910 1396 1911 1912 ///////////////////////////////////////////////////// 1913 error_t fatfs_new_dentry( vfs_inode_t * parent_inode, 1914 char * name, 1915 xptr_t child_inode_xp ) 1916 { 1917 uint8_t * entry; // pointer on FAT32 directory entry (array of 32 bytes) 1918 uint32_t index; // index of FAT32 directory entry in mapper 1919 mapper_t * mapper; // pointer on directory mapper 1920 uint32_t cluster; // directory entry cluster 1921 uint32_t size; // directory entry size 1922 bool_t is_dir; // directory entry type (file/dir) 1923 xptr_t root_xp; // extended pointer on root of parent dentries 1924 xptr_t iter_xp; // iterator for this list 1925 cxy_t child_inode_cxy; // child inode cluster 1926 vfs_inode_t * child_inode_ptr; // child inode local pointer 1927 xptr_t dentry_xp; // extended pointer on searched dentry descriptor 1928 cxy_t dentry_cxy; // cluster identifier of dentry (must be local_cxy) 1929 vfs_dentry_t * dentry_ptr; // local pointer 1930 error_t error; 1931 1932 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1933 1934 // check arguments 1935 assert( (parent_inode != NULL) , "parent_inode is NULL\n" ); 1936 assert( (name != NULL) , "name is NULL\n" ); 1937 assert( (child_inode_xp != XPTR_NULL ) , "child_inode is NULL\n" ); 1938 1939 // get child inode cluster and local pointer 1940 child_inode_cxy = GET_CXY( child_inode_xp ); 1941 child_inode_ptr = GET_PTR( child_inode_xp ); 1942 1943 // build extended pointer on root of list of parent dentries 1944 root_xp = XPTR( child_inode_cxy , &child_inode_ptr->parents ); 1945 1946 // check child inode has at least one parent 1947 assert( (xlist_is_empty( root_xp ) == false ), "child inode must have one parent\n"); 1948 1949 #if DEBUG_FATFS_NEW_DENTRY 1397 ////////////////////////////////////////////////////////////////////////////////////////// 1398 // FATFS debug functions 1399 ////////////////////////////////////////////////////////////////////////////////////////// 1400 1401 /////////////////////////////////// 1402 void fatfs_display_ctx( cxy_t cxy ) 1403 { 1404 // get pointer on local FATFS context 1405 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS]; 1406 fatfs_ctx_t * ctx = hal_remote_lpt( XPTR( cxy , &vfs_ctx->extend ) ); 1407 1408 uint32_t fat_sectors = hal_remote_l32( XPTR( cxy , &ctx->fat_sectors_count ) ); 1409 uint32_t sector_size = hal_remote_l32( XPTR( cxy , &ctx->bytes_per_sector ) ); 1410 uint32_t sec_per_clus = hal_remote_l32( XPTR( cxy , &ctx->sectors_per_cluster ) ); 1411 uint32_t fat_lba = hal_remote_l32( XPTR( cxy , &ctx->fat_begin_lba ) ); 1412 uint32_t data_lba = hal_remote_l32( XPTR( cxy , &ctx->cluster_begin_lba ) ); 1413 uint32_t fsinfo_lba = hal_remote_l32( XPTR( cxy , &ctx->fs_info_lba ) ); 1414 uint32_t root_dir_clus = hal_remote_l32( XPTR( cxy , &ctx->root_dir_cluster ) ); 1415 uint32_t free_clusters = hal_remote_l32( XPTR( cxy , &ctx->free_clusters ) ); 1416 uint32_t free_cluster_hint = hal_remote_l32( XPTR( cxy , &ctx->free_cluster_hint ) ); 1417 void * fat_mapper = hal_remote_lpt( XPTR( cxy , &ctx->fat_mapper ) ); 1418 void * fs_info_buffer = hal_remote_lpt( XPTR( cxy , &ctx->fs_info_buffer ) ); 1419 1420 printk("\n*** FAT context in cluster %x\n" 1421 "- fat_sectors = %d\n" 1422 "- sector size = %d\n" 1423 "- cluster size = %d\n" 1424 "- fat_lba = %x\n" 1425 "- data_lba = %x\n" 1426 "- fsinfo_lba = %x\n" 1427 "- root_dir_cluster = %x\n" 1428 "- free_clusters = %x\n" 1429 "- free_cluster_hint = %x\n" 1430 "- fat_mapper = %x\n" 1431 "- fs_info_buffer = %x\n", 1432 cxy, 1433 fat_sectors, 1434 sector_size, 1435 sector_size * sec_per_clus, 1436 fat_lba, 1437 data_lba, 1438 fsinfo_lba, 1439 root_dir_clus, 1440 free_clusters, 1441 free_cluster_hint, 1442 fat_mapper, 1443 fs_info_buffer ); 1444 1445 } // end fatfs_display_ctx() 1446 1447 ////////////////////////////////////////// 1448 void fatfs_display_fat( uint32_t min_slot, 1449 uint32_t nb_slots ) 1450 { 1451 // one FAT mapper page contains 1024 slots = 128 lines of 8 slots 1452 1453 uint32_t page_id; 1454 uint32_t line; 1455 cxy_t fat_cxy; // FAT cluster 1456 mapper_t * mapper; // local pointer on FAT mapper 1457 xptr_t mapper_xp; // extended pointer on fat_mapper 1458 uint32_t min_cluster_id; // index of min slot to be displayed 1459 uint32_t min_page_id; // index of min page to be displayed 1460 uint32_t min_line_id; // index of min line in min page ( < 128 ) 1461 uint32_t max_cluster_id; // index of max slot to be displayed 1462 uint32_t max_page_id; // index of max page to be displayed 1463 uint32_t max_line_id; // index of max line in max page ( < 128 ) 1464 1465 // compute min values 1466 min_cluster_id = min_slot & 0xFFFFFFF8; 1467 min_line_id = (min_cluster_id & 0x3FF) >> 3; 1468 min_page_id = min_cluster_id >> 10; 1469 1470 // compute max values 1471 max_cluster_id = min_slot + nb_slots - 1; 1472 max_line_id = (max_cluster_id & 0x3FF) >> 3; 1473 max_page_id = max_cluster_id >> 10; 1474 1475 // get pointer on local FATFS context 1476 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS]; 1477 fatfs_ctx_t * loc_fatfs_ctx = (fatfs_ctx_t *)vfs_ctx->extend; 1478 1479 // get FAT cluster 1480 fat_cxy = CONFIG_VFS_ROOT_CXY; 1481 1482 // get pointers on FAT mapper (in FAT cluster) 1483 mapper = loc_fatfs_ctx->fat_mapper; 1484 mapper_xp = XPTR( fat_cxy , mapper ); 1485 1486 // get pointer on FATFS context in FAT cluster 1487 fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) ); 1488 1489 // get current value of hint and free_clusters 1490 uint32_t hint = hal_remote_l32( XPTR( fat_cxy , &fat_fatfs_ctx->free_cluster_hint ) ); 1491 uint32_t free = hal_remote_l32( XPTR( fat_cxy , &fat_fatfs_ctx->free_clusters ) ); 1492 1493 printk("\n***** FAT mapper / cxy %x / free_clusters %x / hint %x\n", fat_cxy, free, hint ); 1494 1495 // scan all pages as required by min_page_id and max_page_id 1496 for( page_id = min_page_id ; page_id <= max_page_id ; page_id++ ) 1497 { 1498 // get extended pointer on requested page descriptor in FAT mapper 1499 xptr_t page_xp = mapper_get_fat_page( mapper_xp , page_id ); 1500 1501 // get extended pointer on requested page base 1502 xptr_t base_xp = ppm_page2base( page_xp ); 1503 1504 // compute min_line & max_line in current page 1505 uint32_t min_line = (page_id == min_page_id) ? min_line_id : 0; 1506 uint32_t max_line = (page_id == max_page_id) ? max_line_id : 127; 1507 1508 // loop on lines in current page 1509 for( line = min_line ; line <= max_line ; line++ ) 1510 { 1511 printk("%x : %X | %X | %X | %X | %X | %X | %X | %X\n", 1512 (page_id << 10) + (line <<3 ), 1513 hal_remote_l32( base_xp + ((line<<5) ) ), 1514 hal_remote_l32( base_xp + ((line<<5) + 4 ) ), 1515 hal_remote_l32( base_xp + ((line<<5) + 8 ) ), 1516 hal_remote_l32( base_xp + ((line<<5) + 12 ) ), 1517 hal_remote_l32( base_xp + ((line<<5) + 16 ) ), 1518 hal_remote_l32( base_xp + ((line<<5) + 20 ) ), 1519 hal_remote_l32( base_xp + ((line<<5) + 24 ) ), 1520 hal_remote_l32( base_xp + ((line<<5) + 28 ) ) ); 1521 } 1522 } 1523 } // end fatfs_display_fat() 1524 1525 ///////////////////////////////////// 1526 error_t fatfs_check_free_info( void ) 1527 { 1528 error_t error; 1529 fatfs_ctx_t * fatfs_ctx_ptr; // local pointer on fatfs context in cluster 0 1530 uint32_t ctx_free_clusters; // number of free clusters from fatfs context 1531 uint32_t ctx_free_cluster_hint; // free cluster hint from fatfs context 1532 uint32_t ioc_free_clusters; // number of free clusters from fatfs context 1533 uint32_t ioc_free_cluster_hint; // free cluster hint from fatfs context 1534 uint32_t fs_info_lba; // lba of FS_INFO sector on IOC device 1535 uint8_t * fs_info_buffer; // local pointer on FS_INFO buffer in cluster 0 1536 xptr_t fs_info_buffer_xp; // extended pointer on FS_INFO buffer in cluster 0 1537 uint8_t tmp_buf[512]; // 512 bytes temporary buffer 1538 xptr_t tmp_buf_xp; // extended pointer on temporary buffer 1539 1540 #if DEBUG_FATFS_SYNC_FSINFO 1950 1541 uint32_t cycle = (uint32_t)hal_get_cycles(); 1951 1542 thread_t * this = CURRENT_THREAD; 1952 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name ); 1953 if( DEBUG_FATFS_NEW_DENTRY < cycle ) 1543 if( DEBUG_FATFS_SYNC_FSINFO < cycle ) 1544 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 1545 __FUNCTION__ , this->process->pid, this->trdid, cycle ); 1546 #endif 1547 1548 // get pointer on fatfs context in cluster 0 1549 fatfs_ctx_ptr = hal_remote_lpt( XPTR( 0 , &fs_context[FS_TYPE_FATFS].extend ) ); 1550 1551 // get "free_clusters" and "free_cluster_hint" from fatfs context in cluster 0 1552 ctx_free_clusters = hal_remote_l32( XPTR( 0 , &fatfs_ctx_ptr->free_clusters ) ); 1553 ctx_free_cluster_hint = hal_remote_l32( XPTR( 0 , &fatfs_ctx_ptr->free_cluster_hint ) ); 1554 1555 // get fs_info_lba 1556 fs_info_lba = hal_remote_l32( XPTR( 0 , &fatfs_ctx_ptr->fs_info_lba ) ); 1557 1558 // build extended pointer on temporary buffer 1559 tmp_buf_xp = XPTR( local_cxy , tmp_buf ); 1560 1561 // copy FS_INFO sector from IOC to local buffer 1562 error = dev_ioc_sync_read( tmp_buf_xp , fs_info_lba , 1 ); 1563 1564 if ( error ) 1565 { 1566 printk("\n[ERROR] in %s : cannot access FS_INFO on IOC device\n", __FUNCTION__ ); 1567 return -1; 1568 } 1569 1570 // get current values of "free_clusters" and "free_cluster_hint" from FS_INFO on IOC 1571 ioc_free_clusters = fatfs_get_remote_record( FS_FREE_CLUSTERS , tmp_buf_xp ); 1572 ioc_free_cluster_hint = fatfs_get_remote_record( FS_FREE_CLUSTER_HINT , tmp_buf_xp ); 1573 1574 #if DEBUG_FATFS_SYNC_FSINFO 1575 if( DEBUG_FATFS_SYNC_FSINFO < cycle ) 1576 printk("\n[%s] thread[%x,%x] / ctx_free %x / ioc_free %x / ctx_hint %x / ioc_hint %x\n", 1577 __FUNCTION__ , this->process->pid, this->trdid, 1578 ctx_free_clusters, ioc_free_clusters, ctx_free_cluster_hint, ioc_free_cluster_hint ); 1579 #endif 1580 1581 // check values 1582 if( (ioc_free_clusters != ctx_free_clusters) || 1583 (ioc_free_cluster_hint != ctx_free_cluster_hint) ) 1584 { 1585 printk("\n[WARNING] in %s : unconsistent free clusters info\n" 1586 " ioc_free %x / ctx_free %x / ioc_hint %x / ctx_hint %x\n", 1587 __FUNCTION__, ioc_free_clusters, ctx_free_clusters, 1588 ioc_free_cluster_hint, ctx_free_cluster_hint ); 1589 1590 // get pointers on FS_INFO buffer in cluster 0 1591 fs_info_buffer = hal_remote_lpt( XPTR( 0 , &fatfs_ctx_ptr->fs_info_buffer ) ); 1592 fs_info_buffer_xp = XPTR( 0 , fs_info_buffer ); 1593 1594 // update FS_INFO buffer in cluster 0 1595 fatfs_set_remote_record(FS_FREE_CLUSTERS ,fs_info_buffer_xp,ctx_free_clusters ); 1596 fatfs_set_remote_record(FS_FREE_CLUSTER_HINT,fs_info_buffer_xp,ctx_free_cluster_hint); 1597 1598 // update the FS_INFO sector on IOC device 1599 error = dev_ioc_sync_write( fs_info_buffer_xp , fs_info_lba , 1 ); 1600 1601 if ( error ) 1602 { 1603 printk("\n[ERROR] in %s : cannot update FS_INFO on IOC device\n", __FUNCTION__ ); 1604 return -1; 1605 } 1606 } 1607 1608 #if DEBUG_FATFS_SYNC_FSINFO 1609 cycle = (uint32_t)hal_get_cycles(); 1610 if( DEBUG_FATFS_SYNC_FSINFO < cycle ) 1611 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 1612 __FUNCTION__ , this->process->pid, this->trdid, cycle ); 1613 #endif 1614 1615 return 0; 1616 1617 } // end fatfs_check_free_info() 1618 1619 1620 1621 1622 1623 /////////////////////////////////////////////////////////////////////////////////////// 1624 // Generic API : the following functions are called by the kernel VFS 1625 // and must be defined by all supported file systems. 1626 /////////////////////////////////////////////////////////////////////////////////////// 1627 1628 1629 ///////////////////////////////////// 1630 xptr_t fatfs_ctx_alloc( cxy_t cxy ) 1631 { 1632 kmem_req_t req; 1633 1634 // allocate memory from remote cluster 1635 req.type = KMEM_KCM; 1636 req.order = bits_log2( sizeof(fatfs_ctx_t) ); 1637 req.flags = AF_KERNEL | AF_ZERO; 1638 1639 return XPTR( cxy , kmem_remote_alloc( cxy , &req ) ); 1640 1641 } //end faffs_ctx_alloc() 1642 1643 /////////////////////////////////////////////// 1644 error_t fatfs_ctx_init( xptr_t fatfs_ctx_xp ) 1645 { 1646 error_t error; 1647 kmem_req_t req; 1648 cxy_t cxy; // FATFS context cluster identifier 1649 fatfs_ctx_t * fatfs_ctx_ptr; // local pointer on FATFS context 1650 uint8_t * buffer; // local pointer on 512 bytes buffer 1651 xptr_t buffer_xp; // extended pointer on 512 bytes buffer 1652 xptr_t fat_mapper_xp; // extended pointer on FAT mapper 1653 mapper_t * fat_mapper; // local pointer on FAT mapper 1654 1655 // get FATFS context cluster and local pointer 1656 cxy = GET_CXY( fatfs_ctx_xp ); 1657 fatfs_ctx_ptr = GET_PTR( fatfs_ctx_xp ); 1658 1659 #if DEBUG_FATFS_CTX_INIT 1660 uint32_t cycle = (uint32_t)hal_get_cycles(); 1661 thread_t * this = CURRENT_THREAD; 1662 if( DEBUG_FATFS_CTX_INIT < cycle ) 1663 printk("\n[%s] thread[%x,%x] enter for fatfs_ctx (%x,%x) / cycle %d\n", 1664 __FUNCTION__ , this->process->pid, this->trdid, cxy, fatfs_ctx_ptr , cycle ); 1665 #endif 1666 1667 // allocate a 512 bytes buffer in remote cluster, used to store 1668 // temporarily the BOOT sector, and permanently the FS_INFO sector 1669 req.type = KMEM_KCM; 1670 req.order = 9; // 512 bytes 1671 req.flags = AF_KERNEL | AF_ZERO; 1672 buffer = kmem_remote_alloc( cxy , &req ); 1673 1674 if( buffer == NULL ) 1675 { 1676 printk("\n[PANIC] in %s : cannot allocate buffer in cluster %x\n", 1677 __FUNCTION__ , cxy ); 1678 return -1; 1679 } 1680 1681 // build extended pointer on buffer 1682 buffer_xp = XPTR( cxy , buffer ); 1683 1684 // load the BOOT record from device 1685 error = dev_ioc_sync_read( buffer_xp , 0 , 1 ); 1686 1687 if ( error ) 1688 { 1689 printk("\n[PANIC] in %s : cannot access boot record\n", __FUNCTION__ ); 1690 return -1; 1691 } 1692 1693 #if (DEBUG_FATFS_CTX_INIT & 0x1) 1694 uint8_t bs[256]; 1695 hal_remote_memcpy( XPTR( local_cxy , bs ) , buffer_xp , 256 ); 1696 if( DEBUG_FATFS_CTX_INIT < cycle ) 1697 putb( "boot record", bs , 256 ); 1698 #endif 1699 1700 // get sector size from boot record 1701 uint32_t sector_size = fatfs_get_remote_record( BPB_BYTSPERSEC , buffer_xp ); 1702 1703 if ( sector_size != 512 ) 1704 { 1705 printk("\n[PANIC] in %s : sector size must be 512 bytes\n", __FUNCTION__ ); 1706 return -1; 1707 } 1708 1709 // get cluster size from boot record 1710 uint32_t nb_sectors = fatfs_get_remote_record( BPB_SECPERCLUS , buffer_xp ); 1711 1712 if ( nb_sectors != 8 ) 1713 { 1714 printk("\n[PANIC] in %s : cluster size must be 8 sectors\n", __FUNCTION__ ); 1715 return -1; 1716 } 1717 1718 // get number of FAT copies from boot record 1719 uint32_t nb_fats = fatfs_get_remote_record( BPB_NUMFATS , buffer_xp ); 1720 1721 if ( nb_fats != 1 ) 1722 { 1723 printk("\n[PANIC] in %s : number of FAT copies must be 1\n", __FUNCTION__ ); 1724 return -1; 1725 } 1726 1727 // get number of sectors in FAT from boot record 1728 uint32_t fat_sectors = fatfs_get_remote_record( BPB_FAT32_FATSZ32 , buffer_xp ); 1729 1730 if ( (fat_sectors & 0xF) != 0 ) 1731 { 1732 printk("\n[PANIC] in %s : FAT size not multiple of 16 sectors\n", __FUNCTION__ ); 1733 return -1; 1734 } 1735 1736 // get root cluster from boot record 1737 uint32_t root_cluster = fatfs_get_remote_record( BPB_FAT32_ROOTCLUS , buffer_xp ); 1738 1739 if ( root_cluster != 2 ) 1740 { 1741 printk("\n[PANIC] in %s : root cluster index must be 2\n", __FUNCTION__ ); 1742 return -1; 1743 } 1744 1745 // get FAT lba from boot record 1746 uint32_t fat_lba = fatfs_get_remote_record( BPB_RSVDSECCNT , buffer_xp ); 1747 1748 // get FS_INFO sector lba from boot record 1749 uint32_t fs_info_lba = fatfs_get_remote_record( BPB_FAT32_FSINFO , buffer_xp ); 1750 1751 // load the FS_INFO record from device 1752 error = dev_ioc_sync_read( buffer_xp , fs_info_lba , 1 ); 1753 1754 if ( error ) 1755 { 1756 printk("\n[PANIC] in %s : cannot access FS_INFO record\n", __FUNCTION__ ); 1757 return -1; 1758 } 1759 1760 // get free_clusters number from FS_INFO record 1761 uint32_t free_clusters = fatfs_get_remote_record( FS_FREE_CLUSTERS , buffer_xp ); 1762 1763 if ( free_clusters >= fat_sectors << 7 ) 1764 { 1765 printk("\n[PANIC] in %s : unconsistent free_clusters\n", __FUNCTION__ ); 1766 return -1; 1767 } 1768 1769 // get free_cluster_hint from FS_INFO record 1770 uint32_t free_hint = fatfs_get_remote_record( FS_FREE_CLUSTER_HINT , buffer_xp ); 1771 1772 if ( free_hint >= fat_sectors << 7 ) 1773 { 1774 printk("\n[PANIC] in %s : unconsistent free_cluster_hint\n", __FUNCTION__ ); 1775 return -1; 1776 } 1777 1778 // allocate a mapper for the FAT in remote cluster 1779 fat_mapper_xp = mapper_create( cxy , FS_TYPE_FATFS ); 1780 1781 // get local pointer on FAT mapper 1782 fat_mapper = GET_PTR( fat_mapper_xp ); 1783 1784 if ( fat_mapper == NULL ) 1785 { 1786 printk("\n[PANIC] in %s : no memory for FAT mapper in cluster %x\n", 1787 __FUNCTION__ , cxy ); 1788 return -1; 1789 } 1790 1791 #if (DEBUG_FATFS_CTX_INIT & 0x1) 1792 if( DEBUG_FATFS_CTX_INIT < cycle ) 1793 printk("\n[%s] sector_size %d / nb_sectors %d / fat_sectors %x / hint %x\n", 1794 __FUNCTION__, sector_size, nb_sectors, fat_sectors, free_hint ); 1795 #endif 1796 1797 // the inode field is NULL for the FAT mapper 1798 hal_remote_spt( XPTR( cxy , &fat_mapper->inode ) , NULL ); 1799 1800 // initialize the FATFS context 1801 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->fat_begin_lba ), fat_lba ); 1802 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->fat_sectors_count ), fat_sectors ); 1803 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->bytes_per_sector ), sector_size ); 1804 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->sectors_per_cluster ), nb_sectors ); 1805 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->cluster_begin_lba ), fat_lba + fat_sectors ); 1806 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->root_dir_cluster ), 2 ); 1807 hal_remote_spt( XPTR( cxy , &fatfs_ctx_ptr->fat_mapper ), fat_mapper ); 1808 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->fs_info_lba ), fs_info_lba ); 1809 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->free_clusters ), free_clusters ); 1810 hal_remote_s32( XPTR( cxy , &fatfs_ctx_ptr->free_cluster_hint ), free_hint ); 1811 hal_remote_spt( XPTR( cxy , &fatfs_ctx_ptr->fs_info_buffer ), buffer ); 1812 1813 // initialize FATFS lock 1814 remote_rwlock_init( XPTR( cxy , &fatfs_ctx_ptr->lock ) , LOCK_FATFS_FAT ); 1815 1816 #if DEBUG_FATFS_CTX_INIT 1817 if( DEBUG_FATFS_CTX_INIT < cycle ) 1818 printk("\n[%s] thread[%x,%x] exit for fatfs_ctx (%x,%x)\n", 1819 __FUNCTION__, this->process->pid, this->trdid, cxy, fatfs_ctx_ptr ); 1820 #endif 1821 1822 return 0; 1823 1824 } // end fatfs_ctx_init() 1825 1826 ////////////////////////////////////////////// 1827 void fatfs_ctx_destroy( xptr_t fatfs_ctx_xp ) 1828 { 1829 kmem_req_t req; 1830 mapper_t * fat_mapper; 1831 uint8_t * fs_info_buffer; 1832 1833 // get FATFS context cluster and local pointer 1834 fatfs_ctx_t * fatfs_ctx_ptr = GET_PTR( fatfs_ctx_xp ); 1835 cxy_t fatfs_ctx_cxy = GET_CXY( fatfs_ctx_xp ); 1836 1837 // get pointer on FAT mapper 1838 fat_mapper = hal_remote_lpt( XPTR( fatfs_ctx_cxy , &fatfs_ctx_ptr->fat_mapper ) ); 1839 1840 // release FAT mapper 1841 mapper_destroy( XPTR( fatfs_ctx_cxy , fat_mapper ) ); 1842 1843 // get pointer on FS_INFO buffer 1844 fs_info_buffer = hal_remote_lpt( XPTR( fatfs_ctx_cxy , &fatfs_ctx_ptr->fs_info_buffer ) ); 1845 1846 // release FS_INFO buffer 1847 req.type = KMEM_KCM; 1848 req.ptr = fs_info_buffer; 1849 kmem_remote_free( fatfs_ctx_cxy , &req ); 1850 1851 // release FATFS context descriptor 1852 req.type = KMEM_KCM; 1853 req.ptr = fatfs_ctx_ptr; 1854 kmem_remote_free( fatfs_ctx_cxy , &req ); 1855 1856 } // end fatfs_ctx_destroy() 1857 1858 ///////////////////////////////////////////////////////// 1859 error_t fatfs_add_dentry( xptr_t parent_inode_xp, 1860 vfs_dentry_t * dentry_ptr ) 1861 { 1862 error_t error; 1863 vfs_inode_t * parent_inode_ptr; // parent inode local pointer 1864 cxy_t parent_cxy; // pparent inode cluster 1865 xptr_t child_inode_xp; // extended pointer on child inode 1866 cxy_t child_cxy; // child inode cluster 1867 vfs_inode_t * child_inode_ptr; // child inode local pointer 1868 uint32_t length; // dentry name length 1869 uint32_t nb_lfn; // number or required LFN 1870 char sfn[11]; // buffer for SFN name 1871 uint8_t checksum; // name checksum 1872 mapper_t * mapper_ptr; // local pointer on parent directory mapper 1873 xptr_t mapper_xp; // extended pointer on parent directory mapper 1874 uint32_t size; // child inode size 1875 uint32_t type; // child inode type 1876 void * extend; // child inode extension 1877 uint32_t cluster_id; // child inode first cluster_id in FATFS 1878 1879 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1880 1881 uint8_t buf[32]; // local buffer for one FAT32 directory entry 1882 1883 // check arguments 1884 assert( (parent_inode_xp != XPTR_NULL) , "parent_inode_xp argument is NULL\n" ); 1885 assert( (dentry_ptr != NULL) , "dentry_ptr argument is NULL\n" ); 1886 1887 // get directory inode cluster and local pointer 1888 parent_cxy = GET_CXY( parent_inode_xp ); 1889 parent_inode_ptr = GET_PTR( parent_inode_xp ); 1890 1891 // get extended pointers on child inode 1892 child_inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 1893 child_cxy = GET_CXY( child_inode_xp ); 1894 child_inode_ptr = GET_PTR( child_inode_xp ); 1895 1896 // get a local copy of the child name 1897 vfs_inode_get_name( child_inode_xp , child_name ); 1898 1899 #if DEBUG_FATFS_ADD_DENTRY 1900 uint32_t cycle = (uint32_t)hal_get_cycles(); 1901 thread_t * this = CURRENT_THREAD; 1902 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1903 vfs_inode_get_name( parent_inode_xp , parent_name ); 1904 if( DEBUG_FATFS_ADD_DENTRY < cycle ) 1905 printk("\n[%s] thread[%x,%x] enter for <%s> in <%s> directory / cycle %d\n", 1906 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle ); 1907 #endif 1908 1909 // get pointers on parent directory mapper 1910 mapper_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_inode_ptr->mapper ) ); 1911 mapper_xp = XPTR( parent_cxy , mapper_ptr ); 1912 1913 #if (DEBUG_FATFS_ADD_DENTRY & 1) 1914 mapper_display_page( mapper_xp , 0 , 512 ); 1915 #endif 1916 1917 // get relevant infos from child inode 1918 type = hal_remote_l32( XPTR( child_cxy , &child_inode_ptr->type ) ); 1919 size = hal_remote_l32( XPTR( child_cxy , &child_inode_ptr->size ) ); 1920 extend = hal_remote_lpt( XPTR( child_cxy , &child_inode_ptr->extend ) ); 1921 cluster_id = (uint32_t)(intptr_t)extend; 1922 1923 // analyse child name 1924 error = fatfs_name_format( child_name, 1925 &length, 1926 &nb_lfn, 1927 sfn, 1928 &checksum ); 1929 if ( error ) 1930 { 1931 printk("\n[ERROR] in %s : dentry name > 31 bytes\n", __FUNCTION__ ); 1932 return -1; 1933 } 1934 1935 // Search end of directory with two embedded loops: 1936 // - scan the pages in the mapper 1937 // - scan the entries in each page to find NO_MORE_ENTRY 1938 1939 xptr_t page_xp; // extended pointer on page descriptor 1940 xptr_t base_xp; // extended pointer on page base 1941 1942 // initialise loop variables 1943 uint32_t page_id = 0; // page index in mapper 1944 uint32_t offset = 0; // position in page 1945 uint32_t found = 0; // NO_MORE_ENTRY found 1946 1947 // loop on pages in mapper 1948 while ( found == 0 ) 1949 { 1950 // get extended pointer on page descriptor in mapper 1951 page_xp = mapper_get_page( mapper_xp , page_id ); 1952 1953 if ( page_xp == XPTR_NULL ) 1954 { 1955 printk("\n[ERROR] in %s : cannot extend directory mapper\n", 1956 __FUNCTION__ ); 1957 return -1; 1958 } 1959 1960 // get pointer on page base 1961 base_xp = ppm_page2base( page_xp ); 1962 1963 // loop on directory entries in this page 1964 while ( (offset < 4096) && (found == 0) ) 1965 { 1966 if ( fatfs_get_remote_record( LDIR_ORD, (base_xp + offset) ) == NO_MORE_ENTRY ) 1967 { 1968 found = 1; 1969 } 1970 else 1971 { 1972 offset = offset + 32; 1973 } 1974 } // end loop on entries 1975 1976 if ( found == 0 ) 1977 { 1978 page_id++; 1979 offset = 0; 1980 } 1981 } // end loop on pages 1982 1983 #if (DEBUG_FATFS_ADD_DENTRY & 1) 1984 if( DEBUG_FATFS_ADD_DENTRY < cycle ) 1985 printk("\n[%s] thread[%x,%x] found NO_MORE_ENTRY : page_id %d / offset %d\n", 1986 __FUNCTION__, this->process->pid, this->trdid, page_id, offset ); 1987 #endif 1988 1989 // Modify the directory mapper: depending on the name length, 1990 // the new child requires to write (3, 4, or 5) directory entries. 1991 // We build one complete directory entry in a local buffer 1992 // before copying it the remote mapper. We use use a 5 steps FSM 1993 // (one state per entry to be written), that is traversed as : 1994 // LFN3 -> LFN2 -> LFN1 -> NORMAL -> NOMORE 1995 // At most two pages are modified: 1996 // - the page containing the NO_MORE_ENTRY is always modified 1997 // - the following page can be modified if the name spread on two pages. 1998 1999 uint32_t step; // FSM state 2000 2001 if ( nb_lfn == 1 ) step = 3; 2002 else if ( nb_lfn == 2 ) step = 4; 2003 else if ( nb_lfn == 3 ) step = 5; 2004 2005 uint32_t i; // byte index in one 32 bytes directory 2006 uint32_t c; // character index in name 2007 2008 while ( step ) 2009 { 2010 // this block is only executed when the new name spread 2011 // on two pages, and we need to access a new page in mapper 2012 if ( offset >= 4096 ) 2013 { 2014 // copy the modified page to IOC device 2015 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2016 2017 if ( error ) 2018 { 2019 printk("\n[ERROR] in %s : cannot update directory on device\n", 2020 __FUNCTION__ ); 2021 return -1; 2022 } 2023 2024 // get the next page in directory mapper 2025 page_xp = mapper_get_page( mapper_xp , page_id + 1 ); 2026 2027 if ( page_xp == XPTR_NULL ) 2028 { 2029 printk("\n[ERROR] in %s : cannot extend directory mapper\n", 2030 __FUNCTION__ ); 2031 return -1; 2032 } 2033 2034 // get pointer on page base 2035 base_xp = ppm_page2base( page_xp ); 2036 2037 // update offset 2038 offset = 0; 2039 } 2040 2041 #if (DEBUG_FATFS_ADD_DENTRY & 1) 2042 cycle = (uint32_t)hal_get_cycles(); 2043 if( DEBUG_FATFS_ADD_DENTRY < cycle ) 2044 printk("\n[%s] FSM step = %d / offset = %x / nb_lfn = %d / cycle %d\n", 2045 __FUNCTION__, step, offset, nb_lfn, cycle ); 2046 #endif 2047 2048 // write one FATFS directory entry (32 bytes) per iteration 2049 switch ( step ) 2050 { 2051 case 5: // write LFN3 entry 2052 { 2053 c = 26; 2054 2055 // write 32 bytes in local buf 2056 for ( i = 0 ; i < 32 ; i++ ) 2057 { 2058 if (i == 0) 2059 { 2060 if ( nb_lfn == 3) buf[i] = 0x43; 2061 else buf[i] = 0x03; 2062 } 2063 else if ( ( ((i >= 1 ) && (i<=10) && ((i&1)==1)) || 2064 ((i >= 14) && (i<=25) && ((i&1)==0)) || 2065 ((i >= 28) && (i<=31) && ((i&1)==0)) ) && 2066 ( c < length ) ) 2067 { 2068 buf[i] = child_name[c]; 2069 c++; 2070 } 2071 else if (i == 11) buf[i] = 0x0F; 2072 else if (i == 13) buf[i] = checksum; 2073 else buf[i] = 0x00; 2074 } 2075 2076 // copy 32 bytes from local buffer to remote mapper 2077 hal_remote_memcpy( base_xp + offset , XPTR( local_cxy , buf ) , 32 ); 2078 2079 step--; 2080 break; 2081 } 2082 case 4: // write LFN2 entry 2083 { 2084 c = 13; 2085 2086 // write 32 bytes in local buf 2087 for ( i = 0 ; i < 32 ; i++ ) 2088 { 2089 if (i == 0) 2090 { 2091 if ( nb_lfn == 2) buf[i] = 0x42; 2092 else buf[i] = 0x02; 2093 } 2094 else if ( ( ((i >= 1 ) && (i<=10) && ((i&1)==1)) || 2095 ((i >= 14) && (i<=25) && ((i&1)==0)) || 2096 ((i >= 28) && (i<=31) && ((i&1)==0)) ) && 2097 ( c < length ) ) 2098 { 2099 buf[i] = child_name[c]; 2100 c++; 2101 } 2102 else if (i == 11) buf[i] = 0x0F; 2103 else if (i == 13) buf[i] = checksum; 2104 else buf[i] = 0x00; 2105 } 2106 2107 // copy 32 bytes from local buffer to remote mapper 2108 hal_remote_memcpy( base_xp + offset , XPTR( local_cxy , buf ) , 32 ); 2109 2110 step--; 2111 break; 2112 } 2113 case 3: // Write LFN1 entry 2114 { 2115 c = 0; 2116 2117 // write 32 bytes in local buf 2118 for ( i = 0 ; i < 32 ; i++ ) 2119 { 2120 if (i == 0) 2121 { 2122 if ( nb_lfn == 1) buf[i] = 0x41; 2123 else buf[i] = 0x01; 2124 } 2125 else if ( ( ((i >= 1 ) && (i<=10) && ((i&1)==1)) || 2126 ((i >= 14) && (i<=25) && ((i&1)==0)) || 2127 ((i >= 28) && (i<=31) && ((i&1)==0)) ) && 2128 ( c < length ) ) 2129 { 2130 buf[i] = child_name[c]; 2131 c++; 2132 } 2133 else if (i == 11) buf[i] = 0x0F; 2134 else if (i == 13) buf[i] = checksum; 2135 else buf[i] = 0x00; 2136 } 2137 2138 // copy 32 bytes from local buffer to remote mapper 2139 hal_remote_memcpy( base_xp + offset , XPTR( local_cxy , buf ) , 32 ); 2140 2141 step--; 2142 break; 2143 } 2144 case 2: // write NORMAL entry 2145 { 2146 // write 32 bytes in local buf 2147 for ( i = 0 ; i < 32 ; i++ ) 2148 { 2149 if ( i < 11 ) // 8.3 SFN 2150 { 2151 buf[i] = sfn[i]; 2152 } 2153 else if (i == 11) // ATTR 2154 { 2155 if (type == INODE_TYPE_DIR) buf[i] = 0x10; 2156 else buf[i] = 0x20; 2157 } 2158 else if (i == 20) buf[i] = cluster_id>>16; // cluster.B2 2159 else if (i == 21) buf[i] = cluster_id>>24; // cluster.B3 2160 else if (i == 26) buf[i] = cluster_id>>0; // cluster.B0 2161 else if (i == 27) buf[i] = cluster_id>>8; // cluster.B1 2162 else if (i == 28) buf[i] = size>>0; // size.B0 2163 else if (i == 29) buf[i] = size>>8; // size.B1 2164 else if (i == 30) buf[i] = size>>16; // size.B2 2165 else if (i == 31) buf[i] = size>>24; // size.B3 2166 else buf[i] = 0x00; 2167 } 2168 2169 // copy 32 bytes from local buffer to remote mapper 2170 hal_remote_memcpy( base_xp + offset , XPTR( local_cxy , buf ) , 32 ); 2171 2172 // set the dentry "extend" field 2173 hal_remote_spt( XPTR( parent_cxy , &dentry_ptr->extend ), 2174 (void *)(intptr_t)(((page_id << 12) + offset) >> 5 ) ); 2175 step--; 2176 break; 2177 } 2178 case 1: // write NOMORE entry 2179 { 2180 hal_remote_s32( base_xp + offset , 0 ); 2181 2182 step--; 2183 break; 2184 } 2185 2186 } // end switch step 2187 2188 offset += 32; 2189 2190 } // end while 2191 2192 // copy the modified page to the IOC device 2193 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2194 2195 if ( error ) 2196 { 2197 printk("\n[ERROR] in %s : cannot update directory on device\n", 2198 __FUNCTION__ ); 2199 return -1; 2200 } 2201 2202 #if DEBUG_FATFS_ADD_DENTRY 2203 cycle = (uint32_t)hal_get_cycles(); 2204 if( DEBUG_FATFS_ADD_DENTRY < cycle ) 2205 printk("\n[%s] thread[%x,%x] exit for <%s> in <%s> directory\n", 2206 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name ); 2207 #endif 2208 2209 #if (DEBUG_FATFS_ADD_DENTRY & 1) 2210 mapper_display_page( mapper_xp , 0 , 512 ); 2211 #endif 2212 2213 return 0; 2214 2215 } // end fatfs_add_dentry() 2216 2217 //////////////////////////////////////////////////////////// 2218 error_t fatfs_remove_dentry( xptr_t parent_inode_xp, 2219 vfs_dentry_t * dentry_ptr ) 2220 { 2221 error_t error; 2222 vfs_inode_t * parent_inode_ptr; // parent inode local pointer 2223 cxy_t parent_cxy; // pparent inode cluster 2224 xptr_t child_inode_xp; // extended pointer on child inode 2225 cxy_t child_cxy; // child inode cluster 2226 vfs_inode_t * child_inode_ptr; // child inode local pointer 2227 xptr_t mapper_xp; // extended pointer on mapper 2228 mapper_t * mapper_ptr; // local pointer on mapper 2229 xptr_t page_xp; // extended pointer on mapper page descriptor 2230 xptr_t base_xp; // extended pointer on mapper page base 2231 uint32_t dentry_id; // FAT32 directory entry index 2232 uint32_t page_id; // page index in directory mapper 2233 uint32_t offset; // offset in this mapper page 2234 2235 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2236 2237 // check arguments 2238 assert( (parent_inode_xp != XPTR_NULL) , "parent_inode_xp argument is NULL\n" ); 2239 assert( (dentry_ptr != NULL) , "dentry_ptr argument is NULL\n" ); 2240 2241 // get directory inode cluster and local pointer 2242 parent_cxy = GET_CXY( parent_inode_xp ); 2243 parent_inode_ptr = GET_PTR( parent_inode_xp ); 2244 2245 // get extended pointers on child inode 2246 child_inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 2247 child_cxy = GET_CXY( child_inode_xp ); 2248 child_inode_ptr = GET_PTR( child_inode_xp ); 2249 2250 // get a local copy of the child name 2251 vfs_inode_get_name( child_inode_xp , child_name ); 2252 2253 #if DEBUG_FATFS_REMOVE_DENTRY 2254 uint32_t cycle = (uint32_t)hal_get_cycles(); 2255 thread_t * this = CURRENT_THREAD; 2256 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2257 vfs_inode_get_name( parent_inode_xp , parent_name ); 2258 if( DEBUG_FATFS_REMOVE_DENTRY < cycle ) 2259 printk("\n[%s] thread[%x,%x] enter for <%s> in <%s> directory / cycle %d\n", 2260 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle ); 2261 #endif 2262 2263 // get pointers on directory mapper 2264 mapper_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_inode_ptr->mapper ) ); 2265 mapper_xp = XPTR( parent_cxy , mapper_ptr ); 2266 2267 // compute number of LFN entries 2268 uint32_t nb_lfn; 2269 uint32_t name_length = strlen( child_name ); 2270 2271 if ( name_length <= 13 ) nb_lfn = 1; 2272 else if ( name_length <= 26 ) nb_lfn = 2; 2273 else nb_lfn = 3; 2274 2275 // We must invalidate (2,3,4) 32 bytes entries: 2276 // - the NORMAL entry, registered in dentry->extend. 2277 // - the (1,2,3) preceding LFN entries. 2278 // At most two pages are modified: 2279 // - the page containing the NORMAL entry is always modified. 2280 // - the preceding page is modified when the name spread on two pages. 2281 2282 // get NORMAL entry index from dentry extension 2283 dentry_id = (uint32_t)(intptr_t)hal_remote_lpt( XPTR( parent_cxy , &dentry_ptr->extend ) ); 2284 2285 // get page index and offset in parent directory mapper 2286 page_id = dentry_id >> 7; 2287 offset = (dentry_id & 0x7F)<<5; 2288 2289 #if DEBUG_FATFS_REMOVE_DENTRY & 1 2290 if( DEBUG_FATFS_REMOVE_DENTRY < cycle ) 2291 printk("\n[%s] dentry_id %x / page_id %x / offset %x\n", 2292 __FUNCTION__, dentry_id, page_id, offset ); 2293 #endif 2294 2295 // get extended pointer on page descriptor 2296 page_xp = mapper_get_page( mapper_xp , page_id ); 2297 2298 if ( page_xp == XPTR_NULL ) 2299 { 2300 printk("\n[ERROR] in %s : cannot access directory mapper\n", __FUNCTION__ ); 2301 return -1; 2302 } 2303 2304 // get extended pointer on page base 2305 base_xp = ppm_page2base( page_xp ); 2306 2307 // invalidate NORMAL entry in directory cache 2308 hal_remote_sb( base_xp + offset , 0xE5 ); 2309 2310 // invalidate LFN entries 2311 while ( nb_lfn ) 2312 { 2313 // this block is only executed when the removed name 2314 // spread on two mapper pages 2315 if (offset == 0) // we must load page (page_id - 1) 2316 { 2317 // copy the modified page to the IOC device 2318 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2319 2320 if ( error ) 2321 { 2322 printk("\n[ERROR] in %s : cannot update directory on device\n", 2323 __FUNCTION__ ); 2324 return -1; 2325 } 2326 2327 // get extended pointer on page descriptor 2328 page_xp = mapper_get_page( mapper_xp , page_id ); 2329 2330 if ( page_xp == XPTR_NULL ) 2331 { 2332 printk("\n[ERROR] in %s : cannot access directory mapper\n", __FUNCTION__ ); 2333 return -1; 2334 } 2335 2336 // get extended pointer on page base 2337 base_xp = ppm_page2base( page_xp ); 2338 2339 // update offset 2340 offset = 4096; 2341 } 2342 2343 offset = offset - 32; 2344 2345 // check for LFN entry 2346 assert( (fatfs_get_remote_record( DIR_ATTR, base_xp + offset ) == ATTR_LONG_NAME_MASK ), 2347 "this directory entry must be a LFN\n"); 2348 2349 // invalidate LFN entry 2350 hal_remote_sb( base_xp + offset , 0xE5 ); 2351 2352 nb_lfn--; 2353 } 2354 2355 // copy the modified page to the IOC device 2356 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2357 2358 if ( error ) 2359 { 2360 printk("\n[ERROR] in %s : cannot update directory on device\n", 2361 __FUNCTION__ ); 2362 return -1; 2363 } 2364 2365 #if DEBUG_FATFS_REMOVE_DENTRY 2366 cycle = (uint32_t)hal_get_cycles(); 2367 if( DEBUG_FATFS_REMOVE_DENTRY < cycle ) 2368 printk("\n[%s] thread[%x,%x] exit for <%s> in <%s> directory\n", 2369 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name ); 2370 #endif 2371 2372 return 0; 2373 2374 } // end fatfs_remove_dentry 2375 2376 ///////////////////////////////////////////////////////////////////// 2377 error_t fatfs_new_dentry_from_mapper( xptr_t parent_inode_xp, 2378 vfs_dentry_t * dentry_ptr ) 2379 { 2380 uint32_t cluster_id; // directory entry first FATFS cluster 2381 uint32_t size; // directory entry size 2382 bool_t is_dir; // directory entry type (file/dir) 2383 cxy_t parent_cxy; // parent inode cluster 2384 vfs_inode_t * parent_inode_ptr; // parent inode local pointer 2385 mapper_t * parent_mapper; // pointer on parent directory mapper 2386 xptr_t child_inode_xp; // extended pointer on child inode 2387 cxy_t child_cxy; // child inode cluster 2388 vfs_inode_t * child_inode_ptr; // child inode local pointer 2389 error_t error; 2390 2391 uint8_t buf[32]; // FAT32 directory entry local copy 2392 2393 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; // local parent name copy 2394 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; // local child name copy 2395 2396 // check arguments 2397 assert( (parent_inode_xp != XPTR_NULL) , "parent_inode_xp is NULL\n" ); 2398 assert( (dentry_ptr != NULL ) , "dentry_ptr is NULL\n" ); 2399 2400 // get parent inode cluster and local pointer 2401 parent_cxy = GET_CXY( parent_inode_xp ); 2402 parent_inode_ptr = GET_PTR( parent_inode_xp ); 2403 2404 // get child inode cluster and pointers 2405 child_inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 2406 child_cxy = GET_CXY( child_inode_xp ); 2407 child_inode_ptr = GET_PTR( child_inode_xp ); 2408 2409 // get child and parent names 2410 vfs_inode_get_name( parent_inode_xp , parent_name ); 2411 vfs_inode_get_name( child_inode_xp , child_name ); 2412 2413 #if DEBUG_FATFS_NEW_DENTRY_FROM 2414 uint32_t cycle = (uint32_t)hal_get_cycles(); 2415 thread_t * this = CURRENT_THREAD; 2416 if( DEBUG_FATFS_NEW_DENTRY_FROM < cycle ) 1954 2417 printk("\n[%s] thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n", 1955 __FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle );2418 __FUNCTION__, this->process->pid, this->trdid, child_name , parent_name , cycle ); 1956 2419 #endif 1957 2420 1958 2421 // get local pointer on parent mapper 1959 mapper = parent_inode->mapper; 1960 1961 // get pointer and index in mapper for searched directory entry 1962 error = fatfs_scan_directory( mapper, name , &entry , &index ); 1963 1964 // return non fatal error if not found 1965 if( error ) 1966 { 1967 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name ); 1968 printk("\n[ERROR] in %s : cannot find <%s> entry in <%s> directory mapper\n", 1969 __FUNCTION__, name , parent_name, name ); 1970 return -1; 1971 } 1972 1973 1974 // get relevant infos from FAT32 directory entry 1975 cluster = (fatfs_get_record( DIR_FST_CLUS_HI , entry ) << 16) | 1976 (fatfs_get_record( DIR_FST_CLUS_LO , entry ) ) ; 1977 is_dir = (fatfs_get_record( DIR_ATTR , entry ) & ATTR_DIRECTORY); 1978 size = fatfs_get_record( DIR_FILE_SIZE , entry ); 1979 1980 // scan list of parent dentries to search the parent_inode 1981 bool_t found = false; 1982 XLIST_FOREACH( root_xp , iter_xp ) 1983 { 1984 // get pointers on dentry 1985 dentry_xp = XLIST_ELEMENT( iter_xp , vfs_dentry_t , parents ); 1986 dentry_cxy = GET_CXY( dentry_xp ); 1987 dentry_ptr = GET_PTR( dentry_xp ); 1988 1989 // get local pointer on current parent directory inode 1990 vfs_inode_t * current = hal_remote_lpt( XPTR( dentry_cxy , &dentry_ptr->parent ) ); 1991 1992 // check if current parent is the searched parent 1993 if( XPTR( dentry_cxy , current ) == XPTR( local_cxy , parent_inode ) ) 1994 { 1995 found = true; 1996 break; 1997 } 1998 } 1999 2000 if( found == false ) 2001 { 2002 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name ); 2003 printk("\n[ERROR] in %s : cannot find <%s> directory in list of parents for <%s>\n", 2004 __FUNCTION__, parent_name, name ); 2005 return -1; 2006 } 2422 parent_mapper = hal_remote_lpt( XPTR( parent_cxy , &parent_inode_ptr->mapper ) ); 2423 2424 // try to get pointer and index of directory entry in mapper 2425 uint8_t * entry = NULL; 2426 uint32_t index = 0; 2427 2428 error = fatfs_scan_directory( XPTR( parent_cxy , parent_mapper ), 2429 child_name, 2430 &entry, 2431 &index ); 2432 2433 // an error can be non fatal, for a new (created) entry 2434 if( error ) return -1; 2435 2436 // get local copy of found directory entry 2437 hal_remote_memcpy( XPTR( local_cxy , buf ), 2438 XPTR( parent_cxy , entry ), 32 ); 2439 2440 // get relevant infos from directory entry 2441 cluster_id = (fatfs_get_record( DIR_FST_CLUS_HI , buf ) << 16) | 2442 (fatfs_get_record( DIR_FST_CLUS_LO , buf ) ) ; 2443 is_dir = (fatfs_get_record( DIR_ATTR , buf ) & ATTR_DIRECTORY ); 2444 size = fatfs_get_record( DIR_FILE_SIZE , buf ); 2007 2445 2008 2446 // update the child inode "type", "size", and "extend" fields 2009 2447 vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE; 2010 2448 2011 hal_remote_s32( XPTR( child_ inode_cxy , &child_inode_ptr->type ) , type );2012 hal_remote_s32( XPTR( child_ inode_cxy , &child_inode_ptr->size ) , size );2013 hal_remote_s32( XPTR( child_ inode_cxy , &child_inode_ptr->extend ) , cluster);2449 hal_remote_s32( XPTR( child_cxy , &child_inode_ptr->type ) , type ); 2450 hal_remote_s32( XPTR( child_cxy , &child_inode_ptr->size ) , size ); 2451 hal_remote_s32( XPTR( child_cxy , &child_inode_ptr->extend ) , cluster_id ); 2014 2452 2015 2453 // update the dentry "extend" field 2016 dentry_ptr->extend = (void *)(intptr_t)index;2017 2018 #if DEBUG_FATFS_NEW_DENTRY 2454 hal_remote_spt( XPTR( parent_cxy , &dentry_ptr->extend ) , (void *)(intptr_t)index ); 2455 2456 #if DEBUG_FATFS_NEW_DENTRY_FROM 2019 2457 cycle = (uint32_t)hal_get_cycles(); 2020 if( DEBUG_FATFS_NEW_DENTRY < cycle ) 2021 printk("\n[%s] thread[%x,%x] exit for <%s> in <%s> / cluster_id %x / size %d / cycle %d\n", 2022 __FUNCTION__, this->process->pid, this->trdid, name, parent_name, cluster, size, cycle ); 2023 #endif 2024 2025 2026 #if (DEBUG_FATFS_NEW_DENTRY & 1) 2027 if( DEBUG_FATFS_NEW_DENTRY < cycle ) 2028 { 2029 fatfs_display_fat( 0 , 0 , 64 ); 2030 fatfs_display_fat( cluster >> 10 , (cluster & 0x3FF) , 32 ); 2031 } 2458 if( DEBUG_FATFS_NEW_DENTRY_FROM < cycle ) 2459 printk("\n[%s] thread[%x,%x] exit for <%s> in <%s> / cluster_id %x / size %d\n", 2460 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cluster_id, size ); 2461 #endif 2462 2463 #if (DEBUG_FATFS_NEW_DENTRY_FROM & 1) 2464 if( DEBUG_FATFS_NEW_DENTRY_FROM < cycle ) 2465 fatfs_display_fat( cluster_id , 32 ); 2032 2466 #endif 2033 2467 2034 2468 return 0; 2035 2469 2036 } // end fatfs_new_dentry() 2037 2038 ////////////////////////////////////////////////// 2039 error_t fatfs_update_dentry( vfs_inode_t * inode, 2040 vfs_dentry_t * dentry, 2041 uint32_t size ) 2042 { 2043 uint8_t * entry; // pointer on FAT32 directory entry (array of 32 bytes) 2044 uint32_t index; // index of FAT32 directory entry in mapper 2045 mapper_t * mapper; // pointer on directory mapper 2046 error_t error; 2047 2048 char dir_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2470 } // end fatfs_new_dentry_from_mapper() 2471 2472 /////////////////////////////////////////////////////////////////// 2473 error_t fatfs_new_dentry_to_mapper( xptr_t parent_inode_xp, 2474 vfs_dentry_t * dentry_ptr ) 2475 { 2476 uint32_t cluster_id; // directory entry cluster 2477 cxy_t parent_cxy; // parent inode cluster identifier 2478 vfs_inode_t * parent_inode_ptr; // child inode local pointer 2479 xptr_t child_inode_xp; // extended pointer on child inode 2480 cxy_t child_cxy; // child inode cluster identifier 2481 vfs_inode_t * child_inode_ptr; // child inode local pointer 2482 error_t error; 2483 2484 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2485 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2049 2486 2050 2487 // check arguments 2051 assert( (inode != NULL) , "inode is NULL\n" ); 2052 assert( (dentry != NULL) , "dentry is NULL\n" ); 2488 assert( (parent_inode_xp != XPTR_NULL) , "parent_inode_xp argument is NULL\n" ); 2489 assert( (dentry_ptr != NULL) , "dentry_ptr argument NULL\n" ); 2490 2491 // get child inode cluster and local pointer 2492 parent_cxy = GET_CXY( parent_inode_xp ); 2493 parent_inode_ptr = GET_PTR( parent_inode_xp ); 2494 2495 // get child inode cluster and pointers 2496 child_inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 2497 child_cxy = GET_CXY( child_inode_xp ); 2498 child_inode_ptr = GET_PTR( child_inode_xp ); 2499 2500 // get child and parent names 2501 vfs_inode_get_name( parent_inode_xp , parent_name ); 2502 vfs_inode_get_name( child_inode_xp , child_name ); 2503 2504 #if DEBUG_FATFS_NEW_DENTRY_TO_MAP 2505 uint32_t cycle = (uint32_t)hal_get_cycles(); 2506 thread_t * this = CURRENT_THREAD; 2507 if( DEBUG_FATFS_NEW_DENTRY_TO_MAP < cycle ) 2508 printk("\n[%s] thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n", 2509 __FUNCTION__, this->process->pid, this->trdid, child_name , parent_name , cycle ); 2510 #endif 2511 2512 // 1. allocate one FATFS cluster (update FAT and FSINFO) 2513 error = fatfs_cluster_alloc( 0 , &cluster_id ); 2514 2515 if( error ) 2516 { 2517 printk("\n[ERROR] in %s : cannot find a free cluster_id\n", 2518 __FUNCTION__ ); 2519 return -1; 2520 } 2521 2522 // 2. register cluster_id in inode descriptor 2523 hal_remote_spt( XPTR( child_cxy , &child_inode_ptr->extend ), 2524 (void*)(intptr_t)cluster_id ); 2525 2526 // 3. introduce dentry in the directory mapper 2527 error = fatfs_add_dentry( parent_inode_xp , dentry_ptr ); 2528 2529 if( error ) 2530 { 2531 printk("\n[ERROR] in %s : cannot update parent directory mapper\n", 2532 __FUNCTION__ ); 2533 // TODO release cluster_id [AG] 2534 return -1; 2535 } 2536 2537 #if DEBUG_FATFS_NEW_DENTRY_TO_MAP 2538 if( DEBUG_FATFS_NEW_DENTRY_TO_MAP < cycle ) 2539 printk("\n[%s] thread[%x,%x] exit for <%s> in <%s> / cluster_id %x\n", 2540 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cluster_id ); 2541 #endif 2542 2543 return 0; 2544 2545 } // end fatfs_new_dentry_to mapper() 2546 2547 2548 //////////////////////////////////////////////////////////// 2549 error_t fatfs_update_dentry( xptr_t parent_inode_xp, 2550 vfs_dentry_t * dentry_ptr ) 2551 { 2552 cxy_t parent_cxy; // parent directory cluster identifier 2553 vfs_inode_t * parent_inode_ptr; // extended pointer on parent directory inode 2554 mapper_t * parent_mapper_ptr; // local pointer on parent directory mapper 2555 xptr_t parent_mapper_xp; // extended pointer on parent directory mapper 2556 xptr_t child_inode_xp; // extended pointer on child inode 2557 cxy_t child_cxy; // child inode cluster identifier 2558 vfs_inode_t * child_inode_ptr; // extended pointer on child inode 2559 2560 uint32_t current_size; // current size in directory mapper 2561 uint32_t new_size; // new size (from child inode) 2562 2563 uint32_t entry_id; // directory entry index in parent directory mapper 2564 uint32_t page_id; // page_index in parent directory mapper 2565 uint32_t offset; // directory entry offset in page 2566 xptr_t page_xp; // extended pointer on page descriptor 2567 xptr_t base_xp; // extended pointer on page base 2568 xptr_t entry_xp; // extended pointer on directory entry 2569 2570 error_t error; 2571 2572 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2573 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2574 2575 // check arguments 2576 assert( (parent_inode_xp != XPTR_NULL) , "parent_inode_xp argument is NULL\n" ); 2577 assert( (dentry_ptr != NULL) , "dentry_ptr argument is NULL\n" ); 2578 2579 // get parent directory cluster ans local pointer 2580 parent_inode_ptr = GET_PTR( parent_inode_xp ); 2581 parent_cxy = GET_CXY( parent_inode_xp ); 2582 2583 // get extended pointer on child inode 2584 child_inode_xp = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) ); 2585 2586 // get child and parent names 2587 vfs_inode_get_name( parent_inode_xp , parent_name ); 2588 vfs_inode_get_name( child_inode_xp , child_name ); 2053 2589 2054 2590 #if DEBUG_FATFS_UPDATE_DENTRY 2055 2591 uint32_t cycle = (uint32_t)hal_get_cycles(); 2056 2592 thread_t * this = CURRENT_THREAD; 2057 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );2058 2593 if( DEBUG_FATFS_UPDATE_DENTRY < cycle ) 2059 printk("\n[%s] thread[%x,%x] enter for <%s/%s> / size %d / cycle %d\n", 2060 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, size, cycle ); 2061 #endif 2062 2063 // get local pointer on mapper 2064 mapper = inode->mapper; 2065 2066 // get pointer and index in mapper for searched directory entry 2067 error = fatfs_scan_directory( mapper, dentry->name , &entry , &index ); 2068 2069 if( error ) 2070 { 2071 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name ); 2072 printk("\n[ERROR] in %s : cannot find <%s> in parent mapper <%s>\n", 2073 __FUNCTION__, dentry->name, dir_name ); 2074 return -1; 2075 } 2076 2077 // get current size value 2078 uint32_t current_size = fatfs_get_record( DIR_FILE_SIZE , entry ); 2594 printk("\n[%s] thread[%x,%x] enter for <%s> in <%s> / new_size %d / cycle %d\n", 2595 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, new_size, cycle ); 2596 #endif 2597 2598 // get child inode cluster and local pointer 2599 child_cxy = GET_CXY( child_inode_xp ); 2600 child_inode_ptr = GET_PTR( child_inode_xp ); 2601 2602 // get size from child inode 2603 new_size = hal_remote_l32( XPTR( child_cxy , &child_inode_ptr->size ) ); 2604 2605 // get local and extended pointers on parent directory mapper 2606 parent_mapper_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_inode_ptr->mapper ) ); 2607 parent_mapper_xp = XPTR( parent_cxy , parent_mapper_ptr ); 2608 2609 // get directory entry index from dentry extension 2610 entry_id = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &dentry_ptr->extend ) ); 2611 2612 // get page index and offset in parent directory mapper 2613 page_id = entry_id >> 7; 2614 offset = (entry_id & 0x7F) << 5; 2615 2616 // get extended pointers on page descriptor and page base 2617 page_xp = mapper_get_page( parent_mapper_xp , page_id ); 2618 base_xp = ppm_page2base( page_xp ); 2619 2620 // build extended pointer on directory entry 2621 entry_xp = base_xp + offset; 2622 2623 // get current size from directory mapper 2624 current_size = fatfs_get_remote_record( DIR_FILE_SIZE , entry_xp ); 2079 2625 2080 2626 // update dentry in mapper & device only if required 2081 if( size != current_size )2627 if( new_size != current_size ) 2082 2628 { 2083 2629 // set size field in FAT32 directory entry 2084 fatfs_set_record( DIR_FILE_SIZE , entry , size ); 2085 2086 // get pointer on modified page base 2087 void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK)); 2088 2089 // get extended pointer on modified page descriptor 2090 xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) ); 2091 2092 // synchronously update the modified page on device 2630 fatfs_set_remote_record( DIR_FILE_SIZE , entry_xp , new_size ); 2631 2632 // synchronously update the modified mapper page on device 2093 2633 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2094 2634 2095 2635 if( error ) 2096 2636 { 2097 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );2098 2637 printk("\n[ERROR] in %s : cannot update parent directory <%s> on device\n", 2099 __FUNCTION__, dir_name );2638 __FUNCTION__, parent_name ); 2100 2639 return -1; 2101 2640 } … … 2105 2644 cycle = (uint32_t)hal_get_cycles(); 2106 2645 if( DEBUG_FATFS_UPDATE_DENTRY < cycle ) 2107 printk("\n[%s] thread[%x,%x] exit / updated size for <%s/%s>/ cycle %d\n",2108 __FUNCTION__, this->process->pid, this->trdid, dir_name, dentry->name, cycle );2646 printk("\n[%s] thread[%x,%x] exit for <%s> in <%s> directory / size %d / cycle %d\n", 2647 __FUNCTION__, this->process->pid, this->trdid, parent_name, child->name, new_size, cycle ); 2109 2648 #endif 2110 2649 … … 2168 2707 { 2169 2708 // get one page from mapper 2170 page_xp = mapper_ remote_get_page( mapper_xp , page_id );2709 page_xp = mapper_get_page( mapper_xp , page_id ); 2171 2710 2172 2711 if( page_xp == XPTR_NULL) return -1; … … 2273 2812 } // end fatfs_get_user_dir() 2274 2813 2275 /////////////////////////////////////////////// 2276 error_t fatfs_sync_inode( vfs_inode_t * inode ) 2277 { 2814 /////////////////////////////////////////// 2815 error_t fatfs_sync_inode( xptr_t inode_xp ) 2816 { 2817 cxy_t inode_cxy; // remote inode cluster 2818 vfs_inode_t * inode_ptr; // remote inode local pointer 2819 mapper_t * mapper; // remote inode mapper local pointer 2820 uint32_t size; // remote inode size in bytes 2821 uint32_t type; // remote inode type 2822 xptr_t rt_xp; // extended pointer on mapper radix tree 2823 uint32_t npages; // number of pages in mapper 2824 uint32_t page_id; // current page index in mapper 2825 xptr_t page_xp; // extended pointer on current page 2826 page_t * page_ptr; // local pointer on current page 2827 uint32_t flags; // current page flags 2828 error_t error; 2278 2829 2279 2830 // check inode pointer and cluster index 2280 assert( (inode != NULL) , "inode pointer undefined\n" ); 2281 assert( (inode->mapper != NULL ) , "mapper pointer undefined\n" ); 2282 assert( (inode->type == INODE_TYPE_FILE) , "inode must be a file\n" ); 2831 assert( (inode_xp != XPTR_NULL) , "inode pointer undefined\n" ); 2283 2832 2284 2833 #if DEBUG_FATFS_SYNC_INODE … … 2286 2835 uint32_t cycle = (uint32_t)hal_get_cycles(); 2287 2836 thread_t * this = CURRENT_THREAD; 2288 vfs_inode_get_name( XPTR( local_cxy , inode ), name );2837 vfs_inode_get_name( inode_xp , name ); 2289 2838 if( DEBUG_FATFS_SYNC_INODE < cycle ) 2290 2839 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n", … … 2292 2841 #endif 2293 2842 2294 error_t error; 2295 mapper_t * mapper; 2296 page_t * page; 2297 uint32_t page_id; 2298 2299 // get mapper from inode 2300 mapper = inode->mapper; 2301 2302 // compute max number of pages in mapper from file size 2303 uint32_t size = inode->size; 2304 uint32_t pages = size >> CONFIG_PPM_PAGE_SHIFT; 2305 if( size & CONFIG_PPM_PAGE_MASK ) pages++; 2843 // get inode cluster and local pointer 2844 inode_cxy = GET_CXY( inode_xp ); 2845 inode_ptr = GET_PTR( inode_xp ); 2846 2847 //get inode mapper pointer 2848 mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 2849 2850 assert( (mapper != NULL) , "mapper pointer is NULL\n" ); 2851 2852 // get inode type and size 2853 size = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->size ) ); 2854 type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 2855 2856 assert( (type == INODE_TYPE_FILE) , "inode is not a file\n" ); 2857 2858 // compute number of pages 2859 npages = size >> CONFIG_PPM_PAGE_SHIFT; 2860 if( size & CONFIG_PPM_PAGE_MASK ) npages++; 2306 2861 2307 // get pointeron mapper radix tree2308 grdxt_t * rt = &mapper->rt;2862 // build pointers on mapper radix tree 2863 rt_xp = XPTR( inode_cxy , &mapper->rt ); 2309 2864 2310 2865 // scan all pages 2311 for( page_id = 0 ; page_id < pages ; page_id++ )2866 for( page_id = 0 ; page_id < npages ; page_id++ ) 2312 2867 { 2313 2868 // get page descriptor from mapper 2314 page = grdxt_lookup( rt, page_id );2869 page_xp = grdxt_remote_lookup( rt_xp , page_id ); 2315 2870 2316 2871 // check all existing pages 2317 if ( page != NULL ) 2318 { 2319 if ( page->flags & PG_DIRTY ) 2872 if ( page_xp != XPTR_NULL ) 2873 { 2874 // get page cluster and local pointer 2875 page_ptr = GET_PTR( page_xp ); 2876 2877 // get page flags 2878 flags = hal_remote_l32( XPTR( inode_cxy , &page_ptr->flags ) ); 2879 2880 if ( flags & PG_DIRTY ) 2320 2881 { 2321 2882 2322 2883 #if (DEBUG_FATFS_SYNC_INODE & 1) 2323 2884 if( DEBUG_FATFS_SYNC_INODE < cycle ) 2324 printk("\n[%s] thread[%x,%x] synchronizes page %d from<%s> mapper to IOC device\n",2885 printk("\n[%s] thread[%x,%x] synchronizes page %d of <%s> mapper to IOC device\n", 2325 2886 __FUNCTION__, page_id, name ); 2326 2887 #endif 2327 // build extended pointer on page descriptor2328 xptr_t page_xp = XPTR( local_cxy , page );2329 2330 2888 // move page from mapper to device 2331 2889 error = fatfs_move_page( page_xp , IOC_WRITE ); … … 2342 2900 cycle = (uint32_t)hal_get_cycles(); 2343 2901 if( DEBUG_FATFS_SYNC_INODE < cycle ) 2344 printk("\n[%s] thread[%x,%x] exit for <%s> / cycle %d\n",2345 __FUNCTION__ , this->process->pid, this->trdid, name , cycle);2902 printk("\n[%s] thread[%x,%x] exit for <%s>\n", 2903 __FUNCTION__ , this->process->pid, this->trdid, name ); 2346 2904 #endif 2347 2905 … … 2349 2907 2350 2908 } // end fatfs_sync_inode() 2351 2352 2353 2354 2355 2356 2909 2357 2910 ////////////////////////////// 2358 2911 error_t fatfs_sync_fat( void ) 2359 2912 { 2913 2914 fatfs_ctx_t * fatfs_ctx; 2915 cxy_t fat_cxy; 2916 mapper_t * mapper_ptr; 2917 xptr_t mapper_xp; 2918 uint32_t start_page_id; 2919 uint32_t found_page_id; 2920 page_t * page_ptr; 2921 xptr_t page_xp; 2922 uint32_t flags; 2923 error_t error; 2360 2924 2361 2925 #if DEBUG_FATFS_SYNC_FAT … … 2366 2930 __FUNCTION__ , this->process->pid, this->trdid, cycle ); 2367 2931 #endif 2368 2369 uint32_t page_id; 2370 error_t error; 2371 2372 // get FAT mapper pointers an cluster 2373 fatfs_ctx_t * fatfs_ctx = fs_context[FS_TYPE_FATFS].extend; 2374 xptr_t mapper_xp = fatfs_ctx->fat_mapper_xp; 2375 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 2376 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 2377 2378 // compute max number of 4 Kbytes pages in FAT mapper 2379 // TODO : this could be improved (see fatfs.h) [AG] 2380 uint32_t pages = fatfs_ctx->fat_sectors_count >> 3; 2381 2932 2933 // get FAT cluster 2934 fat_cxy = CONFIG_VFS_ROOT_CXY; 2935 2936 // get FAT mapper pointers 2937 fatfs_ctx = fs_context[FS_TYPE_FATFS].extend; 2938 mapper_ptr = fatfs_ctx->fat_mapper; 2939 mapper_xp = XPTR( fat_cxy , mapper_ptr ); 2940 2382 2941 // get pointers on remote FAT mapper radix tree 2383 2942 grdxt_t * rt_ptr = &mapper_ptr->rt; 2384 xptr_t rt_xp = XPTR( mapper_cxy , rt_ptr ); 2385 2386 // scan all pages 2387 for( page_id = 0 ; page_id < pages ; page_id++ ) 2388 { 2389 // get extended pointer on page descriptor from FAT mapper 2390 xptr_t page_xp = grdxt_remote_lookup( rt_xp , page_id ); 2391 2392 // check all existing pages 2393 if ( page_xp != XPTR_NULL ) 2394 { 2395 page_t * page_ptr = GET_PTR( page_xp ); 2396 uint32_t flags = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->flags ) ); 2397 2398 if ( flags & PG_DIRTY ) 2399 { 2943 xptr_t rt_xp = XPTR( fat_cxy , rt_ptr ); 2944 2945 // initialise page_id 2946 start_page_id = 0; 2947 2948 // scan FAT mapper 2949 while( 1 ) 2950 { 2951 // get one page 2952 page_xp = grdxt_remote_get_first( rt_xp , start_page_id , &found_page_id ); 2953 2954 // exit loop when no more page found 2955 if ( page_xp != XPTR_NULL ) break; 2956 2957 // get page flags 2958 page_ptr = GET_PTR( page_xp ); 2959 flags = hal_remote_l32( XPTR( fat_cxy , &page_ptr->flags ) ); 2960 2961 if ( flags & PG_DIRTY ) 2962 { 2400 2963 2401 2964 #if (DEBUG_FATFS_SYNC_FAT & 1) … … 2404 2967 __FUNCTION__, page_id ); 2405 2968 #endif 2406 // move page from mapper to device 2407 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2408 2409 if ( error ) return -1; 2410 2411 // reset page dirty flag 2412 ppm_page_undo_dirty( page_xp ); 2413 } 2414 } 2969 // move page from mapper to device 2970 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2971 2972 if ( error ) return -1; 2973 2974 // reset page dirty flag 2975 ppm_page_undo_dirty( page_xp ); 2976 } 2977 2978 // update loop variable 2979 start_page_id = found_page_id + 1; 2980 2415 2981 } // end loop on pages 2416 2982 … … 2418 2984 cycle = (uint32_t)hal_get_cycles(); 2419 2985 if( DEBUG_FATFS_SYNC_FAT < cycle ) 2420 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",2421 __FUNCTION__ , this->process->pid, this->trdid , cycle);2986 printk("\n[%s] thread[%x,%x] exit\n", 2987 __FUNCTION__ , this->process->pid, this->trdid ); 2422 2988 #endif 2423 2989 … … 2425 2991 2426 2992 } // end fatfs_sync_fat() 2427 2428 ////////////////////////////////////2429 error_t fatfs_sync_free_info( void )2430 {2431 error_t error;2432 fatfs_ctx_t * fatfs_ctx_ptr; // local pointer on fatfs context in cluster 02433 uint32_t ctx_free_clusters; // number of free clusters from fatfs context2434 uint32_t ctx_free_cluster_hint; // free cluster hint from fatfs context2435 uint32_t ioc_free_clusters; // number of free clusters from fatfs context2436 uint32_t ioc_free_cluster_hint; // free cluster hint from fatfs context2437 uint32_t fs_info_lba; // lba of FS_INFO sector on IOC device2438 uint8_t * fs_info_buffer; // local pointer on FS_INFO buffer in cluster 02439 xptr_t fs_info_buffer_xp; // extended pointer on FS_INFO buffer in cluster 02440 uint8_t tmp_buf[512]; // 512 bytes temporary buffer2441 xptr_t tmp_buf_xp; // extended pointer on temporary buffer2442 2443 #if DEBUG_FATFS_SYNC_FSINFO2444 uint32_t cycle = (uint32_t)hal_get_cycles();2445 thread_t * this = CURRENT_THREAD;2446 if( DEBUG_FATFS_SYNC_FSINFO < cycle )2447 printk("\n[%s] thread[%x,%x] enter / cycle %d\n",2448 __FUNCTION__ , this->process->pid, this->trdid, cycle );2449 #endif2450 2451 // get pointer on fatfs context in cluster 02452 fatfs_ctx_ptr = hal_remote_lpt( XPTR( 0 , &fs_context[FS_TYPE_FATFS].extend ) );2453 2454 // get "free_clusters" and "free_cluster_hint" from fatfs context in cluster 02455 ctx_free_clusters = hal_remote_l32( XPTR( 0 , &fatfs_ctx_ptr->free_clusters ) );2456 ctx_free_cluster_hint = hal_remote_l32( XPTR( 0 , &fatfs_ctx_ptr->free_cluster_hint ) );2457 2458 // get fs_info_lba2459 fs_info_lba = hal_remote_l32( XPTR( 0 , &fatfs_ctx_ptr->fs_info_lba ) );2460 2461 // build extended pointer on temporary buffer2462 tmp_buf_xp = XPTR( local_cxy , tmp_buf );2463 2464 // copy FS_INFO sector from IOC to local buffer2465 error = dev_ioc_move_data( IOC_SYNC_READ , tmp_buf_xp , fs_info_lba , 1 );2466 2467 if ( error )2468 {2469 printk("\n[ERROR] in %s : cannot access FS_INFO on IOC device\n", __FUNCTION__ );2470 return -1;2471 }2472 2473 // get current values of "free_clusters" and "free_cluster_hint" from FS_INFO on IOC2474 ioc_free_clusters = fatfs_get_remote_record( FS_FREE_CLUSTERS , tmp_buf_xp );2475 ioc_free_cluster_hint = fatfs_get_remote_record( FS_FREE_CLUSTER_HINT , tmp_buf_xp );2476 2477 #if DEBUG_FATFS_SYNC_FSINFO2478 if( DEBUG_FATFS_SYNC_FSINFO < cycle )2479 printk("\n[%s] thread[%x,%x] / ctx_free %x / ioc_free %x / ctx_hint %x / ioc_hint %x\n",2480 __FUNCTION__ , this->process->pid, this->trdid,2481 ctx_free_clusters, ioc_free_clusters, ctx_free_cluster_hint, ioc_free_cluster_hint );2482 #endif2483 2484 // check values2485 if( (ioc_free_clusters != ctx_free_clusters) ||2486 (ioc_free_cluster_hint != ctx_free_cluster_hint) )2487 {2488 printk("\n[WARNING] in %s : unconsistent free clusters info\n"2489 " ioc_free %x / ctx_free %x / ioc_hint %x / ctx_hint %x\n",2490 __FUNCTION__, ioc_free_clusters, ctx_free_clusters,2491 ioc_free_cluster_hint, ctx_free_cluster_hint );2492 2493 // get pointers on FS_INFO buffer in cluster 02494 fs_info_buffer = hal_remote_lpt( XPTR( 0 , &fatfs_ctx_ptr->fs_info_buffer ) );2495 fs_info_buffer_xp = XPTR( 0 , fs_info_buffer );2496 2497 // update FS_INFO buffer in cluster 02498 fatfs_set_remote_record(FS_FREE_CLUSTERS ,fs_info_buffer_xp,ctx_free_clusters );2499 fatfs_set_remote_record(FS_FREE_CLUSTER_HINT,fs_info_buffer_xp,ctx_free_cluster_hint);2500 2501 // update the FS_INFO sector on IOC device2502 error = dev_ioc_move_data( IOC_SYNC_WRITE , fs_info_buffer_xp , fs_info_lba , 1 );2503 2504 if ( error )2505 {2506 printk("\n[ERROR] in %s : cannot update FS_INFO on IOC device\n", __FUNCTION__ );2507 return -1;2508 }2509 }2510 2511 #if DEBUG_FATFS_SYNC_FSINFO2512 cycle = (uint32_t)hal_get_cycles();2513 if( DEBUG_FATFS_SYNC_FSINFO < cycle )2514 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",2515 __FUNCTION__ , this->process->pid, this->trdid, cycle );2516 #endif2517 2518 return 0;2519 2520 } // end fatfs_sync_free_info()2521 2522 //////////////////////////////////////////////////////////2523 error_t fatfs_cluster_alloc( uint32_t * searched_cluster )2524 {2525 error_t error;2526 uint32_t page_id; // page index in FAT mapper2527 uint32_t slot_id; // slot index in page (1024 slots per page)2528 uint32_t cluster; // first free cluster index in FAT2529 uint32_t free_clusters; // total number of free clusters2530 vfs_ctx_t * vfs_ctx; // local pointer on VFS context (same in all clusters)2531 fatfs_ctx_t * loc_fatfs_ctx; // local pointer on local FATFS context2532 fatfs_ctx_t * fat_fatfs_ctx; // local pointer on FATFS context in FAT cluster2533 xptr_t fat_mapper_xp; // extended pointer on FAT mapper2534 cxy_t fat_cxy; // Fat mapper cluster identifier2535 xptr_t page_xp; // extended pointer on current page descriptor in mapper2536 xptr_t slot_xp; // extended pointer on FAT slot defined by hint2537 xptr_t lock_xp; // extended pointer on lock protecting free clusters info2538 xptr_t hint_xp; // extended pointer on free_cluster_hint in FAT cluster2539 xptr_t free_xp; // extended pointer on free_clusters_number in FAT cluster2540 2541 #if DEBUG_FATFS_CLUSTER_ALLOC2542 uint32_t cycle = (uint32_t)hal_get_cycles();2543 thread_t * this = CURRENT_THREAD;2544 if( DEBUG_FATFS_CLUSTER_ALLOC < cycle )2545 printk("\n[%s] thread[%x,%x] enter / cycle = %d\n",2546 __FUNCTION__, this->process->pid, this->trdid, cycle );2547 #endif2548 2549 // get local pointer on VFS context (same in all clusters)2550 vfs_ctx = &fs_context[FS_TYPE_FATFS];2551 2552 // get local pointer on local FATFS context2553 loc_fatfs_ctx = vfs_ctx->extend;2554 2555 // get extended pointer on FAT mapper2556 fat_mapper_xp = loc_fatfs_ctx->fat_mapper_xp;2557 2558 // get FAT cluster2559 fat_cxy = GET_CXY( fat_mapper_xp );2560 2561 // get local pointer on FATFS context in FAT cluster2562 fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) );2563 2564 // build relevant extended pointers on free clusters info in mapper cluster2565 lock_xp = XPTR( fat_cxy , &fat_fatfs_ctx->lock );2566 hint_xp = XPTR( fat_cxy , &fat_fatfs_ctx->free_cluster_hint );2567 free_xp = XPTR( fat_cxy , &fat_fatfs_ctx->free_clusters );2568 2569 // take the FAT lock in write mode2570 remote_rwlock_wr_acquire( lock_xp );2571 2572 // get hint and free_clusters values from FATFS context in FAT cluster2573 cluster = hal_remote_l32( hint_xp ) + 1;2574 free_clusters = hal_remote_l32( free_xp );2575 2576 #if (DEBUG_FATFS_CLUSTER_ALLOC & 1)2577 if( DEBUG_FATFS_CLUSTER_ALLOC < cycle )2578 printk("\n[%s] thread[%x,%x] get free info : hint %x / free_clusters %x\n",2579 __FUNCTION__, this->process->pid, this->trdid, (cluster - 1), free_clusters );2580 #endif2581 2582 // check "free_clusters"2583 if ( free_clusters == 0 )2584 {2585 printk("\n[ERROR] in %s : no more free FATFS clusters\n", __FUNCTION__ );2586 remote_rwlock_wr_release( lock_xp );2587 return -1;2588 }2589 else if ( free_clusters < CONFIG_VFS_FREE_CLUSTERS_MIN )2590 {2591 printk("\n[WARNING] in %s : only %n free FATFS clusters\n",2592 __FUNCTION__, CONFIG_VFS_FREE_CLUSTERS_MIN );2593 }2594 2595 // get page index & slot index for selected cluster2596 page_id = cluster >> 10;2597 slot_id = cluster & 0x3FF;2598 2599 // get relevant page descriptor from FAT mapper2600 page_xp = mapper_remote_get_page( fat_mapper_xp , page_id );2601 2602 if( page_xp == XPTR_NULL )2603 {2604 printk("\n[ERROR] in %s : cannot acces FAT mapper\n", __FUNCTION__ );2605 remote_rwlock_wr_release( lock_xp );2606 return -1;2607 }2608 2609 // build extended pointer on selected cluster slot in FAT mapper2610 slot_xp = ppm_page2base( page_xp ) + (slot_id << 2);2611 2612 // check selected cluster actually free2613 if( hal_remote_l32( slot_xp ) != FREE_CLUSTER )2614 {2615 printk("\n[ERROR] in %s : selected cluster %x not free\n", __FUNCTION__, cluster );2616 remote_rwlock_wr_release( lock_xp );2617 return -1;2618 }2619 2620 // update free cluster info in FATFS context and in FS_INFO sector2621 error = fatfs_free_clusters_decrement( XPTR( fat_cxy , fat_fatfs_ctx ) , cluster );2622 2623 if( error )2624 {2625 printk("\n[ERROR] in %s : cannot update free cluster info\n", __FUNCTION__ );2626 remote_rwlock_wr_release( lock_xp );2627 return -1;2628 }2629 2630 // update FAT mapper2631 hal_remote_s32( slot_xp , END_OF_CHAIN_CLUSTER_MAX );2632 2633 // we don't mark the FAT mapper page as dirty,2634 // because we synchronously update FAT on IOC device2635 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE );2636 2637 if( error )2638 {2639 printk("\n[ERROR] in %s : cannot update FAT on IOC device\n", __FUNCTION__ );2640 remote_rwlock_wr_release( lock_xp );2641 return -1;2642 }2643 2644 // release FAT lock2645 remote_rwlock_wr_release( lock_xp );2646 2647 #if DEBUG_FATFS_CLUSTER_ALLOC2648 cycle = (uint32_t)hal_get_cycles();2649 if( DEBUG_FATFS_CLUSTER_ALLOC < cycle )2650 printk("\n[%s] thread[%x,%x] exit / allocated cluster %x in FAT / cycle %d\n",2651 __FUNCTION__, this->process->pid, this->trdid, cluster, cycle );2652 #endif2653 2654 *searched_cluster = cluster;2655 return 0;2656 2657 } // end fatfs_cluster_alloc()2658 2993 2659 2994 ////////////////////////////////////////////// 2660 2995 error_t fatfs_release_inode( xptr_t inode_xp ) 2661 2996 { 2662 vfs_ctx_t * vfs_ctx; // local pointer on VFS context (same in all clusters).2663 fatfs_ctx_t * loc_fatfs_ctx; // local pointer on local FATFS context2664 cxy_t fat_cxy; // FAT cluster identifier2665 xptr_t fatfs_ctx_xp; // extended pointer on FATFScontext in FAT cluster2666 fatfs_ctx_t * fatfs_ctx_ptr; // local pointer on FATFS context in FAT cluster2667 xptr_t fat_mapper_xp; // extended pointer on FAT mapper2668 xptr_t lock_xp; // extended pointer on lock protecting FAT.2669 xptr_t first_xp; // extended pointer on inode extension2670 uint32_t first_cluster ; // first cluster index for released inode2671 vfs_inode_t * inode_ptr; // local pointer on target inode2672 cxy_t inode_cxy; // target inode cluster identifier2997 vfs_ctx_t * vfs_ctx; // local pointer on VFS context (same in all clusters) 2998 cxy_t fat_cxy; // FAT cluster identifier 2999 fatfs_ctx_t * fatfs_ctx_ptr; // local pointer on FATFS context in FAT cluster 3000 xptr_t fatfs_ctx_xp; // extended pointer on FATFS-context in FAT cluster 3001 mapper_t * fat_mapper_ptr; // local pointer on FAT mapper 3002 xptr_t fat_mapper_xp; // extended pointer on FAT mapper 3003 xptr_t lock_xp; // extended pointer on lock protecting FAT. 3004 xptr_t first_xp; // extended pointer on inode extension 3005 uint32_t first_cluster_id; // first cluster index for released inode 3006 vfs_inode_t * inode_ptr; // local pointer on target inode 3007 cxy_t inode_cxy; // target inode cluster identifier 2673 3008 error_t error; 2674 3009 … … 2680 3015 inode_cxy = GET_CXY( inode_xp ); 2681 3016 2682 // get first_cluster from inode extension2683 first_xp = XPTR( inode_cxy , &inode_ptr->extend );2684 first_cluster = (uint32_t)(intptr_t)hal_remote_lpt( first_xp );3017 // get first_cluster_id from inode extension 3018 first_xp = XPTR( inode_cxy , &inode_ptr->extend ); 3019 first_cluster_id = (uint32_t)(intptr_t)hal_remote_lpt( first_xp ); 2685 3020 2686 3021 // check first cluster index 2687 assert( (first_cluster != 0) , "inode extend is NULL\n" );3022 assert( (first_cluster_id != 0) , "inode extend is NULL\n" ); 2688 3023 2689 3024 #if DEBUG_FATFS_RELEASE_INODE … … 2693 3028 vfs_inode_get_name( inode_xp , name ); 2694 3029 if( DEBUG_FATFS_RELEASE_INODE < cycle ) 2695 printk("\n[%s] thread[%x,%x] enter for <%s> / first_cluster %x / cycle %d\n",2696 __FUNCTION__ , this->process->pid, this->trdid, name, first_cluster , cycle );3030 printk("\n[%s] thread[%x,%x] enter for <%s> / first_cluster_id %x / cycle %d\n", 3031 __FUNCTION__ , this->process->pid, this->trdid, name, first_cluster_id, cycle ); 2697 3032 #endif 2698 3033 … … 2700 3035 vfs_ctx = &fs_context[FS_TYPE_FATFS]; 2701 3036 2702 // get local pointer on local FATFS context 2703 loc_fatfs_ctx = vfs_ctx->extend; 2704 2705 // get FAT mapper cluster 2706 fat_mapper_xp = loc_fatfs_ctx->fat_mapper_xp; 2707 fat_cxy = GET_CXY( fat_mapper_xp ); 3037 // get FAT cluster 3038 fat_cxy = CONFIG_VFS_ROOT_CXY; 3039 3040 // get pointers on FATFS context in FAT cluster 3041 fatfs_ctx_ptr = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) ); 3042 fatfs_ctx_xp = XPTR( fat_cxy , fatfs_ctx_ptr ); 3043 3044 // get FAT mapper pointers 3045 fat_mapper_ptr = hal_remote_lpt( XPTR( fat_cxy , &fatfs_ctx_ptr->fat_mapper ) ); 3046 fat_mapper_xp = XPTR( fat_cxy , fat_mapper_ptr ); 2708 3047 2709 // get pointers on FATFS context in FAT cluster 2710 fatfs_ctx_ptr = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) ); 2711 fatfs_ctx_xp = XPTR( fat_cxy , fatfs_ctx_ptr ); 2712 2713 // get extended pointer on FAT lock in FAT cluster 3048 // build extended pointer on FAT lock in FAT cluster 2714 3049 lock_xp = XPTR( fat_cxy , &fatfs_ctx_ptr->lock ); 2715 3050 … … 2727 3062 if ( fatfs_recursive_release( fat_mapper_xp, 2728 3063 fatfs_ctx_xp, 2729 first_cluster ,3064 first_cluster_id, 2730 3065 &dirty_page_min, 2731 3066 &dirty_page_max ) ) … … 2791 3126 } // end fatfs_release_inode() 2792 3127 2793 //////////////////////////////////////////// 2794 error_t fatfs_move_page( xptr_t page_xp, 2795 cmd_type_t cmd_type ) 2796 { 2797 error_t error; 3128 ///////////////////////////////////////////////// 3129 error_t fatfs_move_page( xptr_t page_xp, 3130 ioc_cmd_type_t cmd_type ) 3131 { 3132 error_t error = 0; 3133 2798 3134 vfs_inode_t * inode_ptr; 2799 3135 mapper_t * mapper_ptr; … … 2829 3165 #if DEBUG_FATFS_MOVE_PAGE 2830 3166 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2831 printk("\n[%s] thread[%x,%x] enters for %s /page %d in FAT mapper / cycle %d\n",3167 printk("\n[%s] thread[%x,%x] enters %s for page %d in FAT mapper / cycle %d\n", 2832 3168 __FUNCTION__, this->process->pid, this->trdid, dev_ioc_cmd_str(cmd_type), page_id, cycle ); 2833 3169 #endif … … 2836 3172 2837 3173 // access IOC device 2838 error = dev_ioc_move_data( cmd_type , buffer_xp , lba , 8 ); 3174 if (cmd_type == IOC_SYNC_WRITE) error = dev_ioc_sync_write( buffer_xp, lba, 8 ); 3175 else if(cmd_type == IOC_SYNC_READ ) error = dev_ioc_sync_read( buffer_xp, lba, 8 ); 3176 else 3177 { 3178 printk("\n[ERROR] in %s : illegal asynchronous FAT access\n", __FUNCTION__ ); 3179 } 2839 3180 2840 3181 if( error ) 2841 3182 { 2842 printk("\n[ERROR] in %s : cannot access device\n", __FUNCTION__ );3183 printk("\n[ERROR] in %s : cannot access IOC device\n", __FUNCTION__ ); 2843 3184 return -1; 2844 3185 } … … 2846 3187 #if DEBUG_FATFS_MOVE_PAGE 2847 3188 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2848 printk("\n[%s] thread[%x,%x] exit /page %d in FAT mapper\n",2849 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle);3189 printk("\n[%s] thread[%x,%x] exit %s for page %d in FAT mapper\n", 3190 __FUNCTION__, this->process->pid, this->trdid, dev_ioc_cmd_str(cmd_type), page_id ); 2850 3191 #endif 2851 3192 … … 2856 3197 2857 3198 #if DEBUG_FATFS_MOVE_PAGE 2858 vfs_inode_get_name( XPTR( page_cxy , inode_ptr ) , name );2859 3199 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2860 printk("\n[%s] thread[%x,%x] enters for %s / page %d in <%s> mapper/ cycle %d\n", 2861 __FUNCTION__, this->process->pid, this->trdid, 2862 dev_ioc_cmd_str( cmd_type ), page_id, name, cycle ); 3200 { 3201 vfs_inode_get_name( XPTR( page_cxy , inode_ptr ) , name ); 3202 printk("\n[%s] thread[%x,%x] enters %s for page %d in <%s> mapper / cycle %d\n", 3203 __FUNCTION__, this->process->pid, this->trdid, 3204 dev_ioc_cmd_str( cmd_type ), page_id, name, cycle ); 3205 } 2863 3206 #endif 2864 3207 … … 2893 3236 uint32_t lba = fatfs_lba_from_cluster( fatfs_ctx , searched_cluster_id ); 2894 3237 2895 // access IOC device to move 8 blocks 2896 error = dev_ioc_move_data( cmd_type , buffer_xp , lba , 8 ); 3238 // access IOC device 3239 if (cmd_type == IOC_WRITE ) error = dev_ioc_write( buffer_xp, lba, 8 ); 3240 else if(cmd_type == IOC_READ ) error = dev_ioc_read( buffer_xp, lba, 8 ); 3241 else if(cmd_type == IOC_SYNC_READ ) error = dev_ioc_sync_read( buffer_xp, lba, 8 ); 3242 else if(cmd_type == IOC_SYNC_WRITE) error = dev_ioc_sync_write( buffer_xp, lba, 8 ); 3243 else 3244 { 3245 printk("\n[ERROR] in %s : illegal cmd_type\n", __FUNCTION__ ); 3246 } 2897 3247 2898 3248 if( error ) … … 2904 3254 #if DEBUG_FATFS_MOVE_PAGE 2905 3255 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2906 vfs_inode_get_name( XPTR( page_cxy, inode_ptr ) , name ); 2907 printk("\n[%s] thread[%x,%x] exit / page %d in <%s> mapper / cluster_id %x\n", 2908 __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster_id ); 3256 { 3257 printk("\n[%s] thread[%x,%x] exit %s for page %d in <%s> mapper / cluster_id %x\n", 3258 __FUNCTION__, this->process->pid, this->trdid, 3259 dev_ioc_cmd_str( cmd_type ), page_id, name, searched_cluster_id ); 3260 } 3261 #endif 3262 3263 #if (DEBUG_FATFS_MOVE_PAGE & 1) 3264 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 3265 fatfs_display_fat( searched_cluster_id , 64 ); 2909 3266 #endif 2910 3267 -
trunk/kernel/fs/fatfs.h
r656 r657 2 2 * fatfs.h - FATFS file system API definition. 3 3 * 4 * Author Mohamed Lamine Karaoui (2014,2015) 5 * Alain Greiner (2016,2017,2018) 4 * Author Alain Greiner (2016,2017,2018,2019,2020) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 27 26 28 27 #include <hal_kernel_types.h> 29 #include <remote_ queuelock.h>28 #include <remote_rwlock.h> 30 29 #include <vfs.h> 31 30 #include <dev_ioc.h> … … 36 35 * 37 36 * The FATFS specific extensions to the generic VFS are the following: 38 * 1) The vfs_ctx_t "extend" field is a void* pointing on the fatfs_ctx_t structure. 39 * This structure contains various general informations such as the total 40 * number of sectors in FAT region, the number of bytes per sector, the number 41 * of sectors per cluster, the lba of FAT region, the lba of data region, or the 42 * cluster index for the root directory. It contains also an extended pointer 43 * on the FAT mapper. 37 * 1) The vfs_ctx_t "extend" field contains a local pointer on the local 38 * fatfs_ctx_t structure. 39 * 44 40 * 2) The vfs_inode_t "extend" contains, for each inode, 45 * the first FAT32 cluster_id (after cast to intptr). 46 * 3) The vfs_dentry_t "extend" field contains, for each dentry, the entry index 47 * in the FATFS directory (32 bytes per FATFS directory entry). 41 * the first FATFS cluster_id (after cast to intptr). 42 * 43 * 3) The vfs_dentry_t "extend" field contains a local pointer on the local 44 * FATFS directory entry (32 bytes) in the directory mapper. 48 45 * 49 46 * In the FAT32 File System, the File Allocation Table is is actually an array … … 57 54 *****************************************************************************************/ 58 55 59 ///////////////////////////////////////////////////////////////////////////////////////////60 61 56 /*************** Partition Boot Sector Format **********************************/ 62 57 // offset | length … … 182 177 * This structure defines a FATFS specific context extension to the VFS context. 183 178 * This fatfs context is replicated in all clusters. 179 * It contains read-only informations such as the total number of sectors in FAT region, 180 * the number of bytes per sector, the number of sectors per cluster, the lba of FAT, 181 * the lba of data region, the cluster_id for the root directory, and an extended 182 * pointer on the FAT mapper. 184 183 * 185 184 * WARNING 1 : All access to the FAT are protected by a remote_rwlock. … … 189 188 * functions to modify the FAT in both the FAT mapper and on IOC device. 190 189 * 191 * WARNING 2 : Most fields are constant values, but the <free_cluster_hint>,192 * < free_clusters>, <lock>, and the <fs_info_buffer> are shared variables,193 * that can be modified by any thread running in any cluster. The <fs_info_buffer>194 * contains a copy of the FS_INFO sector, and is only allocated in the FAT cluster195 * (cluster 0).It is used to synchronously update the free clusters info on IOC device.190 * WARNING 2 : Most fields are constant values, but <free_cluster_hint>, <free_clusters>, 191 * <lock>, and the buffer pointed by the <fs_info_xp> are shared variables, that can 192 * modified by any thread running in any cluster. The <fs_info_buffer> contains 193 * a copy of the FS_INFO sector, and is only allocated in the FAT cluster. 194 * It is used to synchronously update the free clusters info on IOC device. 196 195 * => For all these variables, only the values stored in the FAT cluster must be used. 197 196 ****************************************************************************************/ … … 207 206 uint32_t fs_info_lba; /*! lba of FS_INFO sector */ 208 207 uint32_t root_dir_cluster; /*! cluster index for root directory */ 209 xptr_t fat_mapper_xp; /*! extended pointer on FAT mapper*/208 struct mapper_s * fat_mapper; /*! local pointer on FAT mapper */ 210 209 211 210 /* shared variables (only the copy in FAT cluster must be used) */ 212 uint32_t free_cluster_hint; /*! cluster[hint+1] is the first free*/213 uint32_t free_clusters; /*! free clusters number*/211 uint32_t free_cluster_hint; /*! free_cluster_hint + 1 is first free */ 212 uint32_t free_clusters; /*! number of free clusters */ 214 213 remote_rwlock_t lock; /*! exclusive access to FAT */ 215 214 uint8_t * fs_info_buffer; /*! local pointer on FS_INFO buffer */ … … 224 223 * This debug function display the content of the FATFS context copy in cluster 225 224 * identified by the <cxy> argument. 226 * This function can be called by a thread running in any cluster.225 * This function can be called by any thread running in any cluster. 227 226 ***************************************************************************************** 228 227 * @ cxy : target cluster identifier. … … 232 231 /***************************************************************************************** 233 232 * This debug function access the FAT mapper to display the current FAT state, 234 * as defined by the < page_id>, <min_slot>, and <nb_slots> arguments.235 * It loads the missing pages from IOC to mapper if required.236 * This function can be called by a thread running in any cluster.237 * ****************************************************************************************238 * @ page_id : page index in FAT mapper (one page is 4 Kbytes = 1024 slots).239 * @ min_slot : first slot in page240 * @ nb_slots : number of slots (one slot is 4 bytes).241 * ***************************************************************************************/242 void fatfs_display_fat( uint32_t page_id, 243 233 * as defined by the <min_slot>, and <nb_slots> arguments. 234 * It display as many lines (8 slots par line) as required to display <nb_slots>, 235 * starting from the <min_slot>. The displayed slots can spread on several FAT mapper 236 * pages. It loads the missing pages from IOC to mapper if required. 237 * This function can be called by any thread running in any cluster. 238 ***************************************************************************************** 239 * @ min_slot : first FATFS cluster index. 240 * @ nb_slots : number of slots (one slot is 4 bytes. 241 ****************************************************************************************/ 242 void fatfs_display_fat( uint32_t min_slot, 244 243 uint32_t nb_slots ); 245 244 245 /***************************************************************************************** 246 * This function checks the current values of the "free_clusters" and "free_cluster_hint" 247 * variables in the FS_INFO sector on IOC, versus the values stored in the fatfs context. 248 * As these values are synchronously updated on IOC device at each modification, 249 * it does nothing if the values are equal. It updates the FS_INFO sector on IOC device, 250 * and displays a warning message on TXT0 if they are not equal. 251 * This function can be called by any thread running in any cluster. 252 ***************************************************************************************** 253 * @ return 0 if success / return -1 if failure during IOC device access. 254 ****************************************************************************************/ 255 error_t fatfs_check_free_info( void ); 246 256 247 257 ////////////////////////////////////////////////////////////////////////////////////////// … … 251 261 252 262 /***************************************************************************************** 253 * This fuction allocates memory from local cluster for a FATFS context descriptor. 254 ***************************************************************************************** 255 * @ return a pointer on the created context / return NULL if failure. 256 ****************************************************************************************/ 257 fatfs_ctx_t * fatfs_ctx_alloc( void ); 258 259 /***************************************************************************************** 260 * This function access the boot device, and initialises the local FATFS context, 261 * from informations contained in the boot record. This initialisation includes the 262 * creation of the FAT mapper in cluster 0. 263 ***************************************************************************************** 264 * @ vfs_ctx : local pointer on VFS context for FATFS. 265 ****************************************************************************************/ 266 void fatfs_ctx_init( fatfs_ctx_t * fatfs_ctx ); 267 268 /***************************************************************************************** 269 * This function releases memory dynamically allocated for the FATFS context extension. 270 ***************************************************************************************** 271 * @ vfs_ctx : local pointer on VFS context. 272 ****************************************************************************************/ 273 void fatfs_ctx_destroy( fatfs_ctx_t * fatfs_ctx ); 263 * This fuction allocates memory for a FATFS context descriptor in a cluster 264 * identified by the <cxy> argument. 265 ***************************************************************************************** 266 * @ cxy : target cluster identifier. 267 * @ return an extended pointer on the created context / return XPTR_NULL if failure. 268 ****************************************************************************************/ 269 xptr_t fatfs_ctx_alloc( cxy_t cxy ); 270 271 /***************************************************************************************** 272 * This fuction initialize a fatfs context identified by the <fatfs_ctx_xp> argument 273 * from informations found in the IOC device boot record. This initialisation includes 274 * allocation of the FS_INFO buffer and creation of the FAT mapper in the same cluster. 275 ***************************************************************************************** 276 * @ fatfs_ctx_xp : extended pointer on fatfs context. 277 * @ return 0 if success / return -1 if failure. 278 ****************************************************************************************/ 279 error_t fatfs_ctx_init( xptr_t fatfs_ctx_xp ); 280 281 /***************************************************************************************** 282 * This function releases memory dynamically allocated for the FATFS context. 283 ***************************************************************************************** 284 * @ vfs_ctx_xp : extended pointer on FATFS context. 285 ****************************************************************************************/ 286 void fatfs_ctx_destroy( xptr_t fatfs_ctx_xp ); 274 287 275 288 /***************************************************************************************** 276 289 * This function implements the generic vfs_fs_add_dentry() function for the FATFS. 277 290 ***************************************************************************************** 278 * This function updates a directory mapper identified by the <inode> argument 279 * to add a new directory entry identified by the <dentry> argument. 291 * This function introduces in a directory mapper identified by the <parent_inode_xp> 292 * argument a new directory entry identified by the <dentry_ptr> argument. 293 * The dentry descriptor and the associated inode descriptor must have been previously 294 * allocated, initialized, and registered in the Inode Tree. 295 * The dentry descriptor defines the "name" field. 296 * The inode descriptor defines the "type", "size", and "cluster_id" fields. 297 * The "extension field" in dentry descriptor is set : index in the FAT32 directory. 280 298 * All modified pages in the directory mapper are synchronously updated on IOC device. 281 * It must be called by a thread running in the cluster containing the directory inode.299 * This function can be called by any thread running in any cluster. 282 300 * 283 301 * Implementation note : this function works in two steps: … … 285 303 * to find the end of directory (NO_MORE_ENTRY marker). 286 304 * - Then it writes 3, 4, or 5 directory entries (depending on the name length), using 287 * a 5 steps FSM (one state per entry to be written), updates on IOC device the288 * modified pages , and updates the dentry extension field, that must contain289 * the dentry index in FATFS directory.290 * ****************************************************************************************291 * @ inode : local pointer on directory inode.292 * @ dentry : local pointer on dentry.293 * @ return 0 if success / return ENOENT if not found, or EIO if no access to IOC device.294 ****************************************************************************************/ 295 error_t fatfs_add_dentry( struct vfs_inode_s * inode,296 struct vfs_dentry_s * dentry );305 * a 5 steps FSM (one state per entry to be written), and updates on IOC device the 306 * modified pages. 307 ***************************************************************************************** 308 * @ parent_inode_xp : [in] extended pointer on parent directory inode. 309 * @ dentry_ptr : [in] local pointer on dentry (in parent directory cluster). 310 * @ index : [out] index of the new entry in the FAT32 directory. 311 * @ return 0 if success / return -1 if failure. 312 ****************************************************************************************/ 313 error_t fatfs_add_dentry( xptr_t parent_inode_xp, 314 struct vfs_dentry_s * dentry_ptr ); 297 315 298 316 /***************************************************************************************** 299 317 * This function implements the generic vfs_fs_remove_dentry() function for the FATFS. 300 318 ***************************************************************************************** 301 * This function updates a directory identified by the < inode> argument302 * to remove a directory entry identified by the <dentry > argument.319 * This function updates a directory identified by the <parent_inode_xp> argument 320 * to remove a directory entry identified by the <dentry_ptr> argument. 303 321 * All modified pages in directory mapper are synchronously updated on IOC device. 304 * It must be called by a thread running in the cluster containing the inode.322 * This function can be called by any thread running in any cluster. 305 323 * 306 324 * Implementation note: this function uses the dentry extension to directly access … … 308 326 * updates the modified pages on IOC device. 309 327 ***************************************************************************************** 310 * @ inode : local pointer on directory inode. 311 * @ dentry : local pointer on dentry. 312 * @ return 0 if success / return ENOENT if not found, or EIO if no access to IOC device. 313 ****************************************************************************************/ 314 error_t fatfs_remove_dentry( struct vfs_inode_s * inode, 315 struct vfs_dentry_s * dentry ); 316 317 /***************************************************************************************** 318 * This function implements the generic vfs_fs_new_dentry() function for the FATFS. 319 ***************************************************************************************** 320 * It scan a parent directory mapper, identified by the <parent_inode> argument to find 321 * a directory entry identified by the <name> argument. In case of success, it completes 322 * initialization the inode/dentry couple, identified by the <child_inode_xp> argument. 323 * The child inode descriptor, and the associated dentry descriptor must have been 324 * previously allocated by the caller. 328 * @ parent_inode_xp : [in] extended pointer on parent directory inode. 329 * @ dentry_ptr : [in] local pointer on dentry (in parent directory cluster). 330 * @ return 0 if success / return -1 if failure. 331 ****************************************************************************************/ 332 error_t fatfs_remove_dentry( xptr_t parent_inode_xp, 333 struct vfs_dentry_s * dentry_ptr ); 334 335 /***************************************************************************************** 336 * This function implements the generic vfs_fs_new_dentry_from_mapper() for the FATFS. 337 ***************************************************************************************** 338 * It scan a parent directory mapper, identified by the <parent_inode_xp> argument 339 * to find a directory entry name defined by the <dentry_ptr> argument, and completes 340 * the initialization of the dentry and the associated child_inode descriptors, 341 * from informations found in the parent directory mapper : 325 342 * - It set the "type", "size", and "extend" fields in the child inode descriptor. 326 343 * - It set the " extend" field in the dentry descriptor. 327 * It must be called by a thread running in the cluster containing the parent inode. 328 ***************************************************************************************** 329 * @ parent_inode : local pointer on parent inode (directory). 330 * @ name : child name. 331 * @ child_inode_xp : extended pointer on remote child inode (file or directory). 344 * The child inode descriptor, and the dentry descriptor must have been previously 345 * allocated and introduced in the Inode Tree. 346 * This function can be called by any thread running in any cluster. 347 ***************************************************************************************** 348 * @ parent_inode_xp : extended pointer on parent inode (directory). 349 * @ dentry_ptr : local pointer on new dentry (in parent inode cluster). 350 * @ return 0 if success / return -1 if failure. 351 ****************************************************************************************/ 352 error_t fatfs_new_dentry_from_mapper( xptr_t parent_inode_xp, 353 struct vfs_dentry_s * dentry_ptr ); 354 355 /***************************************************************************************** 356 * This function implements the generic vfs_fs_new_dentry_to_mapper() for the FATFS. 357 ***************************************************************************************** 358 * This function introduces a brand new dentry identified by the <dentry_ptr> argument 359 * in the mapper of a directory identified by the <parent_inode_xp> argument. 360 * It is called by the vfs_lookup() function. 361 * The child inode descriptor, and the dentry descriptor must have been previously 362 * allocated and introduced in the Inode Tree. The dentry descriptor contains the name. 363 * 1. It allocates a new FATFS cluster_id, 364 * 2. It registers the allocated cluster_id in the child inode extension, 365 * 3. It add a new entry (32 bytes) in the directory mapper, 366 * This function can be called by any thread running in any cluster. 367 ***************************************************************************************** 368 * @ parent_inode_xp : [in] extended pointer on parent inode (directory). 369 * @ dentry_ptr : [in] local pointer on dentry (in parent inode cluster). 370 * @ return 0 if success / return -1 if failure. 371 ****************************************************************************************/ 372 error_t fatfs_new_dentry_to_mapper( xptr_t parent_inode_xp, 373 struct vfs_dentry_s * dentry_ptr ); 374 375 /***************************************************************************************** 376 * This function implements the generic vfs_fs_update_dentry() function for the FATFS. 377 ***************************************************************************************** 378 * It update the "size" of a directory entry identified by the <dentry_ptr> argument in 379 * the mapper of a directory identified by the <parent_inode_xp> argument, from the 380 * value registered in the inode descriptor. 381 * It scan the directory mapper to find the entry such as name == dentry_ptr->name. 382 * It set the "size" field in the directory mapper, and updates the modified directory 383 * page on the IOC device. 384 * This function can be called by any thread running in any cluster. 385 * 386 * TODO the current implementation uses the fatfs_scan_directory to access the 387 * FAT32 directory by name. We can access directly this directory entry if we use 388 * the dentry "extend" field... 389 ***************************************************************************************** 390 * @ parent_inode_xp : extended pointer on inode (directory). 391 * @ dentry_ptr : local pointer on dentry (in parent directory cluster). 332 392 * @ return 0 if success / return -1 if child not found. 333 393 ****************************************************************************************/ 334 error_t fatfs_new_dentry( struct vfs_inode_s * parent_inode, 335 char * name, 336 xptr_t child_inode_xp ); 337 338 /***************************************************************************************** 339 * This function implements the generic vfs_fs_update_dentry() function for the FATFS. 340 ***************************************************************************************** 341 * It update the size of a directory entry identified by the <dentry> argument in 342 * the mapper of a directory identified by the <inode> argument, as defined by the 343 * <size> argument. 344 * It scan the mapper to find the entry identified by the dentry "name" field. 345 * It set the "size" field in the in the directory mapper AND marks the page as DIRTY. 346 * It must be called by a thread running in the cluster containing the directory inode. 347 ***************************************************************************************** 348 * @ inode : local pointer on inode (directory). 349 * @ dentry : local pointer on dentry (for name). 350 * @ size : new size value. 351 * @ return 0 if success / return ENOENT if child not found. 352 ****************************************************************************************/ 353 error_t fatfs_update_dentry( struct vfs_inode_s * inode, 354 struct vfs_dentry_s * dentry, 355 uint32_t size ); 394 error_t fatfs_update_dentry( xptr_t parent_inode_xp, 395 struct vfs_dentry_s * dentry_ptr ); 356 396 357 397 /***************************************************************************************** … … 367 407 * the Inode Tree is dynamically created, and all dirent fields are documented in the 368 408 * dirent array. Otherwise, only the dentry name is documented. 369 * It must be called by a thread running in the cluster containing the directory inode. 370 ***************************************************************************************** 371 * @ inode : [in] local pointer on directory inode. 409 * 410 * WARNING : It must be called by a thread running in the cluster containing the 411 * target directory inode. 412 ***************************************************************************************** 413 * @ parent_inode_xp : [in] extended pointer on directory inode. 372 414 * @ array : [in] local pointer on array of dirents. 373 415 * @ max_dirent : [in] max number of slots in dirent array. … … 390 432 ***************************************************************************************** 391 433 * It updates the FATFS on the IOC device for a given inode identified by 392 * the <inode > argument. It scan all pages registered in the associated mapper,434 * the <inode_xp> argument. It scan all pages registered in the associated mapper, 393 435 * and copies from mapper to device each page marked as dirty. 394 436 * WARNING : The target <inode> cannot be a directory, because all modifications in a 395 437 * directory are synchronously done on the IOC device by the two fatfs_add_dentry() 396 438 * and fatfs_remove_dentry() functions. 397 ***************************************************************************************** 398 * @ inode : local pointer on inode. 399 * @ return 0 if success / return -1 if failure during IOC device access. 400 ****************************************************************************************/ 401 error_t fatfs_sync_inode( struct vfs_inode_s * inode ); 439 * This function can be called by any thread running in any cluster. 440 ***************************************************************************************** 441 * @ inode_xp : extended pointer on inode. 442 * @ return 0 if success / return -1 if failure. 443 ****************************************************************************************/ 444 error_t fatfs_sync_inode( xptr_t inode_xp ); 402 445 403 446 /***************************************************************************************** … … 407 450 * It scan all clusters registered in the FAT mapper, and copies from mapper to device 408 451 * each page marked as dirty. 409 * 410 * TODO : the current implementation check ALL pages in the FAT region, even if most 411 * pages are empty, and not copied in mapper. It is sub-optimal. 412 * A solution is to maintain in the FAT context two "dirty_min" and "dirty_max" 413 * variables defining the smallest/largest dirty page index in FAT mapper... 452 * This function can be called by any thread running in any cluster. 453 * 454 * Implementation note : this function uses the grdxt_remote_get_first() function 455 * to test only the pages actually registered in the FAT mapper. 414 456 ***************************************************************************************** 415 457 * @ return 0 if success / return -1 if failure during IOC device access. … … 418 460 419 461 /***************************************************************************************** 420 * This function implements the generic vfs_fs_sync_fsinfo() function for the FATFS.421 *****************************************************************************************422 * It checks the current values of the "free_clusters" and "free_cluster_hint" variables423 * in the FS_INFO sector on IOC, versus the values stored in the fatfs context.424 * As these values are synchronously updated on IOC device at each modification,425 * it does nothing if the values are equal. It updates the FS_INFO sector on IOC device,426 * and displays a warning message on TXT0 if they are not equal.427 * This function can be called by any thread running in any cluster.428 *****************************************************************************************429 * @ return 0 if success / return -1 if failure during IOC device access.430 ****************************************************************************************/431 error_t fatfs_sync_free_info( void );432 433 /*****************************************************************************************434 * This function implements the generic vfs_fs_cluster_alloc() function for the FATFS.435 *****************************************************************************************436 * It access the FAT (File allocation table), stored in the FAT mapper, and returns437 * in <searched_cluster> the FATFS cluster index of a free cluster.438 * It can be called by a thread running in any cluster, as it uses remote access439 * primitives when the FAT mapper is remote. It takes the rwlock stored in the FATFS440 * context located in the same cluster as the FAT mapper itself, to get exclusive441 * access to the FAT. It uses and updates the <free_cluster_hint> and <free_clusters>442 * variables stored in this FATFS context.443 * - it updates the <free_cluster_hint> and <free_clusters> variables in FATFS context.444 * - it updates the FAT mapper (handling miss from IOC device if required).445 * - it synchronously updates the FAT region on IOC device.446 * - it returns the allocated cluster index.447 *****************************************************************************************448 * @ searched_cluster_id : [out] allocated FATFS cluster index.449 * @ return 0 if success / return -1 if no more free clusters on IOC device.450 ****************************************************************************************/451 error_t fatfs_cluster_alloc( uint32_t * searched_cluster_id );452 453 /*****************************************************************************************454 462 * This function implements the generic vfs_fs_release_inode() function for the FATFS. 455 463 ***************************************************************************************** 456 * This function is used to remove a given file or directory from FATFS thefile system.464 * This function is used to remove a given file or directory from the FATFS file system. 457 465 * It releases all clusters allocated to a file/directory identified by the <inode_xp> 458 466 * argument. All released clusters are marked FREE_CLUSTER in the FAT mapper. … … 462 470 * synchronously update all modified pages in the FAT mapper to the IOC device. 463 471 * Finally the FS-INFO sector on the IOC device is updated. 472 * This function can be called by any thread running in any cluster. 464 473 ***************************************************************************************** 465 474 * @ inode_xp : extended pointer on inode. … … 478 487 * - For a regular file, it scan the FAT mapper to get the cluster_id on IOC device, 479 488 * and read/write this cluster. 480 * Itcan be called by any thread running in any cluster.489 * This function can be called by any thread running in any cluster. 481 490 * 482 491 * WARNING : For the FAT mapper, the inode field in the mapper MUST be NULL, as this 483 492 * is used to indicate that the corresponding mapper is the FAT mapper. 484 *485 * TODO : In this first implementation, the entry point in the FAT to get the cluster_id486 * is always the cluster_id of the first page, registered in the inode extension.487 * This can introduce a quadratic cost when trying of acessing all pages of a488 * big file. An optimisation would be to introduce in the inode extension two489 * new fields <other_page_id> & <other_cluster_id>, defining a second entry point490 * in the FAT.491 493 ***************************************************************************************** 492 494 * @ page_xp : extended pointer on page descriptor. … … 494 496 * @ return 0 if success / return EIO if error during device access. 495 497 ****************************************************************************************/ 496 error_t fatfs_move_page( xptr_t page_xp,497 cmd_type_t cmd_type );498 error_t fatfs_move_page( xptr_t page_xp, 499 ioc_cmd_type_t cmd_type ); 498 500 499 501 -
trunk/kernel/fs/ramfs.h
r188 r657 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2014,2015) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 29 29 // The RAMFS File System does not uses any external device to store data. 30 30 // It stores the dynamically created files and directories in the VFS mappers. 31 // The ramfs_read_page() and ramfs_write_page() functions should never be used.32 31 // The RAMFS cannot be used as the root File System. 33 32 // -
trunk/kernel/fs/vfs.c
r656 r657 3 3 * 4 4 * Author Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 33 33 #include <xhtab.h> 34 34 #include <string.h> 35 #include <rpc.h>36 35 #include <errno.h> 37 36 #include <kmem.h> … … 59 58 ////////////////////////////////////////////////////////////////////////////////////////// 60 59 61 /////////////////////////////////////// /62 void vfs_ctx_init( vfs_fs_type_t type,63 uint32_t attr,60 /////////////////////////////////////// 61 void vfs_ctx_init( cxy_t cxy, 62 vfs_fs_type_t fs_type, 64 63 uint32_t total_clusters, 65 64 uint32_t cluster_size, … … 67 66 void * extend ) 68 67 { 69 vfs_ctx_t * vfs_ctx = &fs_context[type]; 70 71 vfs_ctx->type = type; 72 vfs_ctx->attr = attr; 73 vfs_ctx->total_clusters = total_clusters; 74 vfs_ctx->cluster_size = cluster_size; 75 vfs_ctx->vfs_root_xp = vfs_root_xp; 76 vfs_ctx->extend = extend; 77 78 busylock_init( &vfs_ctx->lock , LOCK_VFS_CTX ); 79 80 bitmap_init( vfs_ctx->bitmap , BITMAP_SIZE(CONFIG_VFS_MAX_INODES) ); 81 } 82 83 //////////////////////////////////////////// 84 error_t vfs_ctx_inum_alloc( vfs_ctx_t * ctx, 68 // get pointer on relevant VFS context (same in all clusters) 69 vfs_ctx_t * vfs_ctx_ptr = &fs_context[fs_type]; 70 71 // initialise VFS context fields 72 hal_remote_s32( XPTR( cxy , &vfs_ctx_ptr->type ) , fs_type ); 73 hal_remote_s32( XPTR( cxy , &vfs_ctx_ptr->total_clusters ) , total_clusters ); 74 hal_remote_s32( XPTR( cxy , &vfs_ctx_ptr->cluster_size ) , cluster_size ); 75 hal_remote_s64( XPTR( cxy , &vfs_ctx_ptr->vfs_root_xp ) , vfs_root_xp ); 76 hal_remote_spt( XPTR( cxy , &vfs_ctx_ptr->extend ) , extend ); 77 78 // initialize VFS context lock 79 remote_busylock_init( XPTR( cxy , &vfs_ctx_ptr->lock ) , LOCK_VFS_CTX ); 80 81 // initialize inum allocator 82 bitmap_remote_init( XPTR( cxy , &vfs_ctx_ptr->bitmap ), 83 BITMAP_SIZE(CONFIG_VFS_MAX_INODES) ); 84 85 } // end vfs_ctx_init() 86 87 /////////////////////////////////////////////// 88 error_t vfs_ctx_inum_alloc( xptr_t ctx_xp, 85 89 uint32_t * inum ) 86 90 { 91 // get context cluster and local pointer 92 cxy_t ctx_cxy = GET_CXY( ctx_xp ); 93 vfs_ctx_t * ctx_ptr = GET_PTR( ctx_xp ); 94 95 // build extended pointer on lock protecting the inum allocator 96 xptr_t lock_xp = XPTR( ctx_cxy , &ctx_ptr->lock ); 97 98 // build extended pointer on inum bitmap 99 xptr_t bitmap_xp = XPTR( ctx_cxy , &ctx_ptr->bitmap ); 100 87 101 // get lock on inum allocator 88 busylock_acquire( &ctx->lock);102 remote_busylock_acquire( lock_xp ); 89 103 90 104 // get lid from local inum allocator 91 uint32_t lid = bitmap_ ffc( ctx->bitmap , CONFIG_VFS_MAX_INODES );105 uint32_t lid = bitmap_remote_ffc( bitmap_xp , CONFIG_VFS_MAX_INODES ); 92 106 93 107 if( lid == 0xFFFFFFFF ) // no more free slot => error 94 108 { 95 109 // release lock 96 busylock_release( &ctx->lock);110 remote_busylock_release( lock_xp ); 97 111 98 112 // return error 99 return 1;113 return -1; 100 114 } 101 115 else // found => return inum 102 116 { 103 117 // set slot allocated 104 bitmap_ set( ctx->bitmap , lid );118 bitmap_remote_set( bitmap_xp , lid ); 105 119 106 120 // release lock 107 busylock_release( &ctx->lock);121 remote_busylock_release( lock_xp ); 108 122 109 123 // return inum … … 111 125 return 0; 112 126 } 113 } 114 115 //////////////////////////////////////////// 116 void vfs_ctx_inum_release( vfs_ctx_t * ctx, 117 uint32_t inum ) 118 { 119 bitmap_clear( ctx->bitmap , inum & 0xFFFF ); 120 } 127 } // end vfs_ctx_inum_alloc() 128 129 ///////////////////////////////////////////// 130 void vfs_ctx_inum_release( xptr_t ctx_xp, 131 uint32_t inum ) 132 { 133 // get context cluster and local pointer 134 cxy_t ctx_cxy = GET_CXY( ctx_xp ); 135 vfs_ctx_t * ctx_ptr = GET_PTR( ctx_xp ); 136 137 // build extended pointer on inum bitmap 138 xptr_t bitmap_xp = XPTR( ctx_cxy , &ctx_ptr->bitmap ); 139 140 // build extended pointer on lock 141 xptr_t lock_xp = XPTR( ctx_cxy , &ctx_ptr->lock ); 142 143 // get lock 144 remote_busylock_acquire( lock_xp ); 145 146 bitmap_remote_clear( bitmap_xp , inum & 0xFFFF ); 147 148 // release lock 149 remote_busylock_release( lock_xp ); 150 151 } // end vfs_ctx_inum_release() 121 152 122 153 ////////////////////////////////////////////////////////////////////////////////////////// … … 140 171 } 141 172 142 //////////////////////////////////////////////////// 143 error_t vfs_inode_create( vfs_fs_type_t fs_type, 173 //////////////////////////////////////////////// 174 error_t vfs_inode_create( cxy_t cxy, 175 vfs_fs_type_t fs_type, 144 176 uint32_t attr, 145 177 uint32_t rights, … … 148 180 xptr_t * inode_xp ) 149 181 { 150 mapper_t * mapper; // associated mapper( to be allocated)151 vfs_inode_t * inode; // inode descriptor (to be allocated)152 153 uint32_t inum; // inode identifier (to be allocated)154 vfs_ctx_t * ctx; // file system context155 kmem_req_t req; // request to kernel memory allocator182 xptr_t mapper_xp; // extended pointer on associated mapper 183 mapper_t * mapper_ptr; // local pointer on associated mapper 184 vfs_inode_t * inode_ptr; // local pointer on allocated inode 185 uint32_t inum; // inode identifier (to be allocated) 186 vfs_ctx_t * ctx; // file system context 187 kmem_req_t req; // request to kernel memory allocator 156 188 error_t error; 157 189 … … 166 198 } 167 199 168 // allocate inum169 error = vfs_ctx_inum_alloc( ctx , &inum );170 171 if( error )172 {173 printk("\n[ERROR] in %s : cannot allocate inum\n", __FUNCTION__ );174 return -1;175 }176 177 // allocate memory for mapper178 mapper = mapper_create( fs_type );179 180 if( mapper == NULL )181 {182 printk("\n[ERROR] in %s : cannot allocate mapper\n", __FUNCTION__ );183 vfs_ctx_inum_release( ctx , inum );184 return ENOMEM;185 }186 187 200 // check inode descriptor contained in one page 188 201 assert( (sizeof(vfs_inode_t) <= CONFIG_PPM_PAGE_SIZE), 189 202 "inode descriptor must fit in one page" ); 190 203 204 // allocate inum 205 error = vfs_ctx_inum_alloc( XPTR( cxy , ctx ) , &inum ); 206 207 if( error ) 208 { 209 printk("\n[ERROR] in %s : cannot allocate inum\n", __FUNCTION__ ); 210 return -1; 211 } 212 213 // allocate memory for mapper in cluster cxy 214 mapper_xp = mapper_create( cxy , fs_type ); 215 216 if( mapper_xp == XPTR_NULL ) 217 { 218 printk("\n[ERROR] in %s : cannot allocate mapper\n", __FUNCTION__ ); 219 vfs_ctx_inum_release( XPTR( cxy , ctx ) , inum ); 220 return -1; 221 } 222 223 mapper_ptr = GET_PTR( mapper_xp ); 224 191 225 // allocate one page for VFS inode descriptor 192 // because the embedded "children xhtab footprint193 req.type = KMEM_PPM;194 req.order = 0;195 req.flags = AF_KERNEL | AF_ZERO;196 inode = kmem_alloc(&req );197 198 if( inode == NULL )226 // because the embedded "children" xhtab footprint 227 req.type = KMEM_PPM; 228 req.order = 0; 229 req.flags = AF_KERNEL | AF_ZERO; 230 inode_ptr = kmem_remote_alloc( cxy , &req ); 231 232 if( inode_ptr == NULL ) 199 233 { 200 234 printk("\n[ERROR] in %s : cannot allocate inode descriptor\n", __FUNCTION__ ); 201 vfs_ctx_inum_release( ctx, inum );202 mapper_destroy( mapper );235 vfs_ctx_inum_release( XPTR( cxy , ctx ) , inum ); 236 mapper_destroy( mapper_xp ); 203 237 return -1; 204 238 } 205 239 240 // initialise inode field in mapper 241 hal_remote_spt( XPTR( cxy , &mapper_ptr->inode ) , inode_ptr ); 242 206 243 // initialize inode descriptor 207 inode->type = INODE_TYPE_FILE; // default value 208 inode->inum = inum; 209 inode->attr = attr; 210 inode->rights = rights; 211 inode->uid = uid; 212 inode->gid = gid; 213 inode->ctx = ctx; 214 inode->mapper = mapper; 215 inode->extend = NULL; 216 inode->links = 0; 217 218 // initialise inode field in mapper 219 mapper->inode = inode; 220 244 hal_remote_s32( XPTR( cxy , &inode_ptr->type ) , INODE_TYPE_FILE ); // default value 245 hal_remote_s32( XPTR( cxy , &inode_ptr->inum ) , inum ); 246 hal_remote_s32( XPTR( cxy , &inode_ptr->attr ) , attr ); 247 hal_remote_s32( XPTR( cxy , &inode_ptr->rights ) , rights ); 248 hal_remote_s32( XPTR( cxy , &inode_ptr->links ) , 0 ); 249 hal_remote_s32( XPTR( cxy , &inode_ptr->uid ) , uid ); 250 hal_remote_s32( XPTR( cxy , &inode_ptr->gid ) , gid ); 251 hal_remote_spt( XPTR( cxy , &inode_ptr->ctx ) , ctx ); 252 hal_remote_spt( XPTR( cxy , &inode_ptr->mapper ) , mapper_ptr ); 253 hal_remote_spt( XPTR( cxy , &inode_ptr->extend ) , NULL ); 254 221 255 // initialize chidren dentries xhtab 222 xhtab_init( &inode->children, XHTAB_DENTRY_TYPE );256 xhtab_init( XPTR( cxy , &inode_ptr->children ) , XHTAB_DENTRY_TYPE ); 223 257 224 258 // initialize parents dentries xlist 225 xlist_root_init( XPTR( local_cxy , &inode->parents ) );259 xlist_root_init( XPTR( cxy , &inode_ptr->parents ) ); 226 260 227 261 // initialize lock protecting size 228 remote_rwlock_init( XPTR( local_cxy , &inode->size_lock ), LOCK_VFS_SIZE );262 remote_rwlock_init( XPTR( cxy , &inode_ptr->size_lock ), LOCK_VFS_SIZE ); 229 263 230 264 // initialise lock protecting inode tree traversal 231 remote_rwlock_init( XPTR( local_cxy , &inode->main_lock ), LOCK_VFS_MAIN );265 remote_rwlock_init( XPTR( cxy , &inode_ptr->main_lock ), LOCK_VFS_MAIN ); 232 266 233 267 // return extended pointer on inode 234 *inode_xp = XPTR( local_cxy , inode);268 *inode_xp = XPTR( cxy , inode_ptr ); 235 269 236 270 #if DEBUG_VFS_INODE_CREATE … … 238 272 thread_t * this = CURRENT_THREAD; 239 273 if( DEBUG_VFS_INODE_CREATE < cycle ) 240 printk("\n[%s] thread[%x,%x] created inode (%x,%x) / c ycle %d\n",241 __FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, cycle );274 printk("\n[%s] thread[%x,%x] created inode (%x,%x) / ctx %x / fs_type %d / cycle %d\n", 275 __FUNCTION__, this->process->pid, this->trdid, cxy, inode_ptr, ctx, ctx->type, cycle ); 242 276 #endif 243 277 … … 246 280 } // end vfs_inode_create() 247 281 248 ///////////////////////////////////////////// 249 void vfs_inode_destroy( vfs_inode_t * inode ) 250 { 282 ////////////////////////////////////////// 283 void vfs_inode_destroy( xptr_t inode_xp ) 284 { 285 // get cluster and local pointer 286 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 287 cxy_t inode_cxy = GET_CXY( inode_xp ); 288 251 289 // release memory allocated for mapper 252 mapper_destroy( inode->mapper);253 254 // release memory allocate for inode descriptor290 mapper_destroy( XPTR( inode_cxy , &inode_ptr->mapper ) ); 291 292 // release memory allocated for inode descriptor 255 293 kmem_req_t req; 256 294 req.type = KMEM_PPM; 257 req.ptr = inode ;258 kmem_ free(&req );295 req.ptr = inode_ptr; 296 kmem_remote_free( inode_cxy , &req ); 259 297 260 298 } // end vfs_inode_destroy() … … 339 377 340 378 // get inode cluster and local pointer 379 inode_ptr = GET_PTR( inode_xp ); 341 380 inode_cxy = GET_CXY( inode_xp ); 342 inode_ptr = GET_PTR( inode_xp );343 381 344 382 // build extended pointer on parents dentries root … … 367 405 } // end vfs_inode_get_name() 368 406 369 /////////////////////////////////////////////////////// 370 error_t vfs_inode_load_all_pages( vfs_inode_t * inode ) 371 { 372 373 assert( (inode != NULL) , "inode pointer is NULL" ); 374 407 //////////////////////////////////////////////////// 408 error_t vfs_inode_load_all_pages( xptr_t inode_xp ) 409 { 375 410 uint32_t page_id; 376 411 xptr_t page_xp; 377 412 378 mapper_t * mapper = inode->mapper; 379 uint32_t size = inode->size; 380 381 assert( (mapper != NULL) , "mapper pointer is NULL" ); 413 414 // get inode cluster and local pointer 415 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 416 cxy_t inode_cxy = GET_CXY( inode_xp ); 417 418 // get pointer on mapper and size 419 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 420 uint32_t size = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->size ) ); 382 421 383 422 #if DEBUG_VFS_INODE_LOAD_ALL 423 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 384 424 uint32_t cycle = (uint32_t)hal_get_cycles(); 385 425 thread_t * this = CURRENT_THREAD; 386 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 387 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 426 vfs_inode_get_name( inode_xp , name ); 388 427 if( DEBUG_VFS_INODE_LOAD_ALL < cycle ) 389 428 printk("\n[%s] thread[%x,%x] enter for <%s> in cluster %x / cycle %d\n", 390 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, cycle );429 __FUNCTION__, this->process->pid, this->trdid, name, inode_cxy, cycle ); 391 430 #endif 392 431 … … 400 439 // If the mage is missing, this function allocates the missing page, 401 440 // and load the page from IOC device into mapper 402 page_xp = mapper_ remote_get_page( XPTR( local_cxy , mapper ), page_id );441 page_xp = mapper_get_page( XPTR( inode_cxy , mapper ), page_id ); 403 442 404 443 if( page_xp == XPTR_NULL ) return -1; … … 408 447 cycle = (uint32_t)hal_get_cycles(); 409 448 if( DEBUG_VFS_INODE_LOAD_ALL < cycle ) 410 printk("\n[%s] thread[%x,%x] exit for <% x> in cluster %x / cycle %d\n",411 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, cycle);449 printk("\n[%s] thread[%x,%x] exit for <%s> in cluster %x\n", 450 __FUNCTION__, this->process->pid, this->trdid, name, inode_cxy ); 412 451 #endif 413 452 … … 425 464 vfs_inode_get_name( inode_xp , name ); 426 465 466 // get inode cluster and local pointer 427 467 cxy_t inode_cxy = GET_CXY( inode_xp ); 428 468 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); … … 461 501 ////////////////////////////////////////////////////////////////////////////////////////// 462 502 463 /////////////////////////////////////////////////// 464 error_t vfs_dentry_create( vfs_fs_type_t fs_type, 503 /////////////////////////////////////////////// 504 error_t vfs_dentry_create( cxy_t cxy, 505 vfs_fs_type_t fs_type, 465 506 char * name, 466 507 xptr_t * dentry_xp ) 467 508 { 468 vfs_ctx_t * ctx; // context descriptor 469 vfs_dentry_t * dentry; // dentry descriptor (to be allocated) 470 kmem_req_t req; // request to kernel memory allocator 509 kmem_req_t req; // request to kernel memory allocator 510 vfs_ctx_t * ctx = NULL; // context descriptor 511 vfs_dentry_t * dentry_ptr; // dentry descriptor (to be allocated) 512 513 #if DEBUG_VFS_DENTRY_CREATE 514 thread_t * this = CURRENT_THREAD; 515 uint32_t cycle = (uint32_t)hal_get_cycles(); 516 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 517 printk("\n[%s] thread[%x,%x] enters for <%s> / fs_type %x / cycle %d\n", 518 __FUNCTION__, this->process->pid, this->trdid, name, fs_type, cycle ); 519 #endif 471 520 472 521 // get pointer on context … … 476 525 else 477 526 { 478 ctx = NULL;527 printk("\n[ERROR] in %s undefined fs_type %d\n", __FUNCTION__, fs_type ); 479 528 return -1; 480 529 } … … 483 532 uint32_t length = strlen( name ); 484 533 485 if( length >= CONFIG_VFS_MAX_NAME_LENGTH ) return EINVAL;534 if( length >= CONFIG_VFS_MAX_NAME_LENGTH ) return -1; 486 535 487 536 // allocate memory for dentry descriptor 488 req.type = KMEM_KCM;489 req.order = bits_log2( sizeof(vfs_dentry_t) );490 req.flags = AF_KERNEL | AF_ZERO;491 dentry = kmem_alloc(&req );492 493 if( dentry == NULL )537 req.type = KMEM_KCM; 538 req.order = bits_log2( sizeof(vfs_dentry_t) ); 539 req.flags = AF_KERNEL | AF_ZERO; 540 dentry_ptr = kmem_remote_alloc( cxy , &req ); 541 542 if( dentry_ptr == NULL ) 494 543 { 495 544 printk("\n[ERROR] in %s : cannot allocate dentry descriptor\n", … … 499 548 500 549 // initialize dentry descriptor 501 dentry->ctx = ctx; 502 dentry->length = length; 503 dentry->extend = NULL; 504 strcpy( dentry->name , name ); 550 hal_remote_spt( XPTR( cxy , &dentry_ptr->ctx ) , ctx ); 551 hal_remote_s32( XPTR( cxy , &dentry_ptr->length ) , length ); 552 hal_remote_spt( XPTR( cxy , &dentry_ptr->extend ) , NULL ); 553 554 // register name 555 hal_remote_strcpy( XPTR( cxy, dentry_ptr->name ), 556 XPTR( local_cxy, name ) ); 505 557 506 558 // return extended pointer on dentry 507 *dentry_xp = XPTR( local_cxy , dentry);559 *dentry_xp = XPTR( cxy , dentry_ptr ); 508 560 509 561 #if DEBUG_VFS_DENTRY_CREATE 510 thread_t * this = CURRENT_THREAD;511 uint32_t cycle = (uint32_t)hal_get_cycles();512 562 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 513 printk("\n[%s] thread[%x,%x] created dentry <%s> : (%x,%x) / cycle %d\n",514 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, dentry, cycle);563 printk("\n[%s] thread[%x,%x] exit for <%s> / dentry (%x,%x)\n", 564 __FUNCTION__, this->process->pid, this->trdid, name, cxy, dentry_ptr ); 515 565 #endif 516 566 … … 519 569 } // end vfs_dentry_create() 520 570 521 //////////////////////////////////////////////// 522 void vfs_dentry_destroy( vfs_dentry_t * dentry ) 523 { 571 //////////////////////////////////////////// 572 void vfs_dentry_destroy( xptr_t dentry_xp ) 573 { 574 // get cluster and local pointer 575 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp ); 576 cxy_t dentry_cxy = GET_CXY( dentry_xp ); 577 524 578 // release memory allocated to dentry 525 579 kmem_req_t req; 526 580 req.type = KMEM_KCM; 527 req.ptr = dentry ;528 kmem_ free(&req );581 req.ptr = dentry_ptr; 582 kmem_remote_free( dentry_cxy , &req ); 529 583 530 584 } // end vfs_dentry_destroy() … … 536 590 537 591 ///////////////////////////////////////////// 538 error_t vfs_file_create( vfs_inode_t * inode,539 uint32_t 540 xptr_t 541 { 542 vfs_file_t * file ;592 error_t vfs_file_create( xptr_t inode_xp, 593 uint32_t attr, 594 xptr_t * file_xp ) 595 { 596 vfs_file_t * file_ptr; 543 597 kmem_req_t req; 598 uint32_t type; 599 mapper_t * mapper; 600 vfs_ctx_t * ctx; 601 602 // get inode cluster and local pointer 603 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 604 cxy_t inode_cxy = GET_CXY( inode_xp ); 544 605 545 606 #if DEBUG_VFS_FILE_CREATE … … 547 608 uint32_t cycle = (uint32_t)hal_get_cycles(); 548 609 if( DEBUG_VFS_OPEN < cycle ) 549 printk("\n[%s] thread[%x,%x] enter for inode %x in cluster %x/ cycle %d\n",550 __FUNCTION__, this->process->pid, this->trdid, inode , local_cxy, cycle );610 printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) / cycle %d\n", 611 __FUNCTION__, this->process->pid, this->trdid, inode_cxy, inode_ptr, cycle ); 551 612 #endif 552 613 … … 555 616 req.order = bits_log2( sizeof(vfs_file_t) ); 556 617 req.flags = AF_KERNEL | AF_ZERO; 557 file = kmem_alloc( &req ); 558 559 if( file == NULL ) return ENOMEM; 618 file_ptr = kmem_remote_alloc( inode_cxy , &req ); 619 620 if( file_ptr == NULL ) return -1; 621 622 // get type, ctx and mapper from inode descriptor 623 type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 624 ctx = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->ctx ) ); 625 mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 560 626 561 627 // initializes new file descriptor 562 file->gc = 0; 563 file->type = inode->type; 564 file->attr = attr; 565 file->offset = 0; 566 file->refcount = 1; 567 file->inode = inode; 568 file->ctx = inode->ctx; 569 file->mapper = inode->mapper; 570 571 remote_rwlock_init( XPTR( local_cxy , &file->lock ), LOCK_VFS_FILE ); 572 573 *file_xp = XPTR( local_cxy , file ); 628 hal_remote_s32( XPTR( inode_cxy , &file_ptr->type ) , type ); 629 hal_remote_s32( XPTR( inode_cxy , &file_ptr->attr ) , attr ); 630 hal_remote_s32( XPTR( inode_cxy , &file_ptr->offset ) , 0 ); 631 hal_remote_s32( XPTR( inode_cxy , &file_ptr->refcount ) , 1 ); 632 hal_remote_spt( XPTR( inode_cxy , &file_ptr->inode ) , inode_ptr ); 633 hal_remote_spt( XPTR( inode_cxy , &file_ptr->ctx ) , ctx ); 634 hal_remote_spt( XPTR( inode_cxy , &file_ptr->mapper ) , mapper ); 635 636 remote_rwlock_init( XPTR( inode_cxy , &file_ptr->lock ), LOCK_VFS_FILE ); 637 638 *file_xp = XPTR( inode_cxy , file_ptr ); 574 639 575 640 #if DEBUG_VFS_FILE_CREATE 576 641 cycle = (uint32_t)hal_get_cycles(); 577 642 if( DEBUG_VFS_OPEN < cycle ) 578 printk("\n[%s] thread[%x,%x] created file %x in cluster %x / cycle %d\n",579 __FUNCTION__, this->process->pid, this->trdid, file, local_cxy, cycle );643 printk("\n[%s] thread[%x,%x] created file (%x,%x) %x\n", 644 __FUNCTION__, this->process->pid, this->trdid, inode_cxy, file_ptr, cycle ); 580 645 #endif 581 646 … … 584 649 } // end vfs_file_create() 585 650 586 /////////////////////////////////////////// 587 void vfs_file_destroy( vfs_file_t * file ) 588 { 651 //////////////////////////////////////// 652 void vfs_file_destroy( xptr_t file_xp ) 653 { 654 // get file cluster and local pointer 655 vfs_file_t * file_ptr = GET_PTR( file_xp ); 656 cxy_t file_cxy = GET_CXY( file_xp ); 657 658 // release file descriptor 589 659 kmem_req_t req; 590 660 req.type = KMEM_KCM; 591 req.ptr = file ;592 kmem_ free(&req );661 req.ptr = file_ptr; 662 kmem_remote_free( file_cxy , &req ); 593 663 594 664 #if DEBUG_VFS_CLOSE 595 665 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 596 vfs_file_get_name( XPTR( local_cxy , file ), name );666 vfs_file_get_name( file_xp , name ); 597 667 thread_t * this = CURRENT_THREAD; 598 668 uint32_t cycle = (uint32_t)hal_get_cycles(); 599 669 if( DEBUG_VFS_CLOSE < cycle ) 600 670 printk("\n[%s] thread[%x,%x] deleted file <%s> in cluster %x / cycle %d\n", 601 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, cycle );671 __FUNCTION__, this->process->pid, this->trdid, name, file_cxy, cycle ); 602 672 #endif 603 673 … … 738 808 739 809 // create a new file descriptor in cluster containing inode 740 if( inode_cxy == local_cxy ) // target cluster is local 741 { 742 error = vfs_file_create( inode_ptr , file_attr , &file_xp ); 743 } 744 else // target cluster is remote 745 { 746 rpc_vfs_file_create_client( inode_cxy , inode_ptr , file_attr , &file_xp , &error ); 747 } 810 error = vfs_file_create( inode_xp , file_attr , &file_xp ); 748 811 749 812 if( error ) return error; … … 764 827 cycle = (uint32_t)hal_get_cycles(); 765 828 if( DEBUG_VFS_OPEN < cycle ) 766 printk("\n[%s] thread[%x,%x] exit for <%s> / fdid %d / cxy %x / cycle %d\n", 767 __FUNCTION__, process->pid, this->trdid, path, file_id, GET_CXY( file_xp ), cycle ); 829 printk("\n[%s] thread[%x,%x] exit for <%s> / fdid %d / file(%x,%x) / cycle %d\n", 830 __FUNCTION__, process->pid, this->trdid, path, file_id, 831 GET_CXY( file_xp ), GET_PTR( file_xp ), cycle ); 768 832 #endif 769 833 … … 880 944 __FUNCTION__ , this->process->pid, nbytes ); 881 945 else 882 printk("\n[%s] thread[%x,%x] exit / %d bytes moved from buffer to mapper / size %d\n",883 __FUNCTION__ , this->process->pid, nbytes , hal_remote_l32(XPTR(file_cxy,&inode->size)));946 printk("\n[%s] thread[%x,%x] exit / %d bytes moved from buffer to mapper\n", 947 __FUNCTION__ , this->process->pid, nbytes ); 884 948 } 885 949 #endif … … 1035 1099 { 1036 1100 cxy_t file_cxy; // cluster containing the file descriptor. 1037 vfs_file_t * file_ptr; // local po nter on file descriptor1101 vfs_file_t * file_ptr; // local pointer on file descriptor 1038 1102 cxy_t owner_cxy; // process owner cluster 1039 1103 pid_t pid; // process identifier … … 1076 1140 1077 1141 // copy all dirty pages from mapper to device 1078 if( file_cxy == local_cxy ) 1079 { 1080 error = mapper_sync( mapper_ptr ); 1081 } 1082 else 1083 { 1084 rpc_mapper_sync_client( file_cxy, 1085 mapper_ptr, 1086 &error ); 1087 } 1142 error = mapper_sync( XPTR( file_cxy , mapper_ptr ) ); 1088 1143 1089 1144 if( error ) … … 1128 1183 1129 1184 // update dentry size in parent directory mapper 1130 if( parent_cxy == local_cxy ) 1131 { 1132 error = vfs_fs_update_dentry( parent_inode_ptr, 1133 parent_dentry_ptr, 1134 size ); 1135 } 1136 else 1137 { 1138 rpc_vfs_fs_update_dentry_client( parent_cxy, 1139 parent_inode_ptr, 1140 parent_dentry_ptr, 1141 size, 1142 &error ); 1143 } 1185 error = vfs_fs_update_dentry( XPTR( parent_cxy , parent_inode_ptr ), 1186 parent_dentry_ptr ); 1144 1187 1145 1188 if( error ) … … 1159 1202 1160 1203 // copy all dirty pages from parent mapper to device 1161 if( parent_cxy == local_cxy ) 1162 { 1163 error = mapper_sync( parent_mapper_ptr ); 1164 } 1165 else 1166 { 1167 rpc_mapper_sync_client( parent_cxy, 1168 parent_mapper_ptr, 1169 &error ); 1170 } 1204 error = mapper_sync( XPTR( parent_cxy , parent_mapper_ptr ) ); 1171 1205 1172 1206 if( error ) … … 1222 1256 //////// 4) release memory allocated to file descriptor in remote cluster 1223 1257 1224 if( file_cxy == local_cxy ) // file cluster is local 1225 { 1226 vfs_file_destroy( file_ptr ); 1227 } 1228 else // file cluster is local 1229 { 1230 rpc_vfs_file_destroy_client( file_cxy , file_ptr ); 1231 } 1258 vfs_file_destroy( file_xp ); 1232 1259 1233 1260 #if DEBUG_VFS_CLOSE … … 1320 1347 1321 1348 // 2. create one new dentry in parent cluster 1322 if( parent_cxy == local_cxy ) 1323 { 1324 error = vfs_dentry_create( parent_fs_type, 1325 last_name, 1326 &dentry_xp ); 1327 } 1328 else 1329 { 1330 rpc_vfs_dentry_create_client( parent_cxy, 1331 parent_fs_type, 1332 last_name, 1333 &dentry_xp, 1334 &error ); 1335 } 1336 1349 error = vfs_dentry_create( parent_cxy, 1350 parent_fs_type, 1351 last_name, 1352 &dentry_xp ); 1337 1353 if( error ) 1338 1354 { … … 1361 1377 inode_cxy = cluster_random_select(); 1362 1378 1363 if( inode_cxy == local_cxy ) // target cluster is local 1364 { 1365 error = vfs_inode_create( parent_fs_type, 1366 attr, 1367 rights, 1368 uid, 1369 gid, 1370 &inode_xp ); 1371 } 1372 else // target cluster is remote 1373 { 1374 rpc_vfs_inode_create_client( inode_cxy, 1375 parent_fs_type, 1376 attr, 1377 rights, 1378 uid, 1379 gid, 1380 &inode_xp, 1381 &error ); 1382 } 1383 1379 // create inode 1380 error = vfs_inode_create( inode_cxy, 1381 parent_fs_type, 1382 attr, 1383 rights, 1384 uid, 1385 gid, 1386 &inode_xp ); 1384 1387 if( error ) 1385 1388 { 1386 1389 remote_rwlock_wr_release( lock_xp ); 1387 1390 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n", 1388 __FUNCTION__ , inode_cxy , path ); 1389 if( parent_cxy == local_cxy ) vfs_dentry_destroy( dentry_ptr ); 1390 else rpc_vfs_dentry_destroy_client( parent_cxy , dentry_ptr ); 1391 __FUNCTION__ , inode_cxy , path ); 1392 vfs_dentry_destroy( dentry_xp ); 1391 1393 return -1; 1392 1394 } … … 1434 1436 remote_rwlock_wr_release( lock_xp ); 1435 1437 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n", 1436 __FUNCTION__ , inode_cxy , path ); 1437 if( parent_cxy == local_cxy ) vfs_dentry_destroy( dentry_ptr ); 1438 else rpc_vfs_dentry_destroy_client( parent_cxy , dentry_ptr ); 1438 __FUNCTION__ , inode_cxy , path ); 1439 vfs_dentry_destroy( dentry_xp ); 1439 1440 return -1; 1440 1441 } … … 1445 1446 // 8. update parent directory mapper 1446 1447 // and synchronize the parent directory on IOC device 1447 if (parent_cxy == local_cxy) 1448 { 1449 error = vfs_fs_add_dentry( parent_ptr, 1450 dentry_ptr ); 1451 } 1452 else 1453 { 1454 rpc_vfs_fs_add_dentry_client( parent_cxy, 1455 parent_ptr, 1456 dentry_ptr, 1457 &error ); 1458 } 1459 1448 error = vfs_fs_add_dentry( parent_xp, 1449 dentry_ptr ); 1460 1450 if( error ) 1461 1451 { … … 1589 1579 { 1590 1580 // 1. create one new dentry 1591 if( new_parent_cxy == local_cxy ) 1592 { 1593 error = vfs_dentry_create( inode_fs_type, 1594 new_name, 1595 &dentry_xp ); 1596 } 1597 else 1598 { 1599 rpc_vfs_dentry_create_client( new_parent_cxy, 1600 inode_fs_type, 1601 new_name, 1602 &dentry_xp, 1603 &error ); 1604 } 1605 1581 error = vfs_dentry_create( new_parent_cxy, 1582 inode_fs_type, 1583 new_name, 1584 &dentry_xp ); 1606 1585 if( error ) 1607 1586 { … … 1643 1622 // and synchronize the parent directory on IOC device 1644 1623 if (new_parent_cxy == local_cxy) 1645 { 1646 error = vfs_fs_add_dentry( new_parent_ptr, 1647 dentry_ptr ); 1648 } 1649 else 1650 { 1651 rpc_vfs_fs_add_dentry_client( new_parent_cxy, 1652 new_parent_ptr, 1653 dentry_ptr, 1654 &error ); 1655 } 1624 error = vfs_fs_add_dentry( new_parent_xp, 1625 dentry_ptr ); 1656 1626 if( error ) 1657 1627 { … … 1703 1673 vfs_fs_type_t fs_type; // File system type 1704 1674 1705 char name[CONFIG_VFS_MAX_NAME_LENGTH]; // name of link to remove 1675 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; // name of link to remove 1676 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; // name of parent directory 1706 1677 1707 1678 thread_t * this = CURRENT_THREAD; … … 1731 1702 VFS_LOOKUP_PARENT, 1732 1703 &parent_xp, 1733 name );1704 child_name ); 1734 1705 if( error ) 1735 1706 { 1736 1707 remote_rwlock_wr_release( lock_xp ); 1737 1708 printk("\n[ERROR] in %s : cannot get parent inode for <%s> in <%s>\n", 1738 __FUNCTION__, name, path );1709 __FUNCTION__, child_name, path ); 1739 1710 return -1; 1740 1711 } 1741 1742 // get parent inode cluster and local pointer 1712 1713 // get parent inode name, cluster and local pointer 1714 vfs_inode_get_name( parent_xp , parent_name ); 1743 1715 parent_cxy = GET_CXY( parent_xp ); 1744 1716 parent_ptr = GET_PTR( parent_xp ); 1745 1717 1746 1718 #if( DEBUG_VFS_UNLINK & 1 ) 1747 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH];1748 vfs_inode_get_name( parent_xp , parent_name );1749 1719 if( DEBUG_VFS_UNLINK < cycle ) 1750 1720 printk("\n[%s] thread[%x,%x] : parent inode <%s> is (%x,%x)\n", … … 1756 1726 1757 1727 // try to get extended pointer on dentry from Inode Tree 1758 dentry_xp = xhtab_lookup( children_xp , name );1728 dentry_xp = xhtab_lookup( children_xp , child_name ); 1759 1729 1760 // when dentry not found in Inode Tree, try to get it from inode tree1730 // when dentry not found in Inode Tree, try to get it from mapper 1761 1731 1762 1732 if( dentry_xp == XPTR_NULL ) // miss target dentry in Inode Tree … … 1765 1735 #if( DEBUG_VFS_UNLINK & 1 ) 1766 1736 if( DEBUG_VFS_UNLINK < cycle ) 1767 printk("\n[%s] thread[%x,%x] : inode <%s> not found => scan parent mapper \n",1768 __FUNCTION__, process->pid, this->trdid, name );1737 printk("\n[%s] thread[%x,%x] : inode <%s> not found => scan parent mapper <%s>\n", 1738 __FUNCTION__, process->pid, this->trdid, child_name , parent_name ); 1769 1739 #endif 1770 1740 // get parent inode FS type … … 1779 1749 fs_type, 1780 1750 parent_xp, 1781 name,1751 child_name, 1782 1752 &dentry_xp, 1783 1753 &inode_xp ); 1784 1754 if( error ) 1785 1755 { 1786 printk("\n[ERROR] in %s : cannot create inode <%s> in path <%s>\n", 1787 __FUNCTION__ , name, path ); 1788 1789 vfs_remove_child_from_parent( dentry_xp ); 1756 printk("\n[ERROR] in %s : cannot create inode <%s> in Inode Tree\n", 1757 __FUNCTION__ , child_name ); 1790 1758 return -1; 1791 1759 } … … 1797 1765 // scan parent mapper to find the missing dentry, and complete 1798 1766 // initialisation of new dentry and new inode descriptors In Inode Tree 1799 if( parent_cxy == local_cxy ) 1767 error = vfs_fs_new_dentry_from_mapper( parent_xp, 1768 dentry_ptr ); 1769 if ( error ) 1800 1770 { 1801 error = vfs_fs_new_dentry( parent_ptr, 1802 name, 1803 inode_xp ); 1804 } 1805 else 1806 { 1807 rpc_vfs_fs_new_dentry_client( parent_cxy, 1808 parent_ptr, 1809 name, 1810 inode_xp, 1811 &error ); 1812 } 1813 1814 if ( error ) // dentry not found in parent mapper 1815 { 1816 printk("\n[ERROR] in %s : cannot get dentry <%s> in path <%s>\n", 1817 __FUNCTION__ , name, path ); 1771 printk("\n[ERROR] in %s : cannot get entry <%s> in parent <%s> mapper\n", 1772 __FUNCTION__ , child_name, parent_name ); 1818 1773 return -1; 1819 1774 } … … 1822 1777 if( DEBUG_VFS_UNLINK < cycle ) 1823 1778 printk("\n[%s] thread[%x,%x] : created missing inode & dentry <%s> in cluster %x\n", 1824 __FUNCTION__, process->pid, this->trdid, name, inode_cxy );1779 __FUNCTION__, process->pid, this->trdid, child_name, inode_cxy ); 1825 1780 #endif 1826 1781 … … 1856 1811 if( DEBUG_VFS_UNLINK < cycle ) 1857 1812 printk("\n[%s] thread[%x,%x] : unlink inode <%s> / type %s / %d links\n", 1858 __FUNCTION__, process->pid, this->trdid, name, vfs_inode_type_str(inode_type), inode_links ); 1813 __FUNCTION__, process->pid, this->trdid, child_name, 1814 vfs_inode_type_str(inode_type), inode_links ); 1859 1815 #endif 1860 1816 … … 1898 1854 // 2. update parent directory mapper 1899 1855 // and synchronize the parent directory on IOC device 1900 if (parent_cxy == local_cxy) 1901 { 1902 error = vfs_fs_remove_dentry( parent_ptr, 1903 dentry_ptr ); 1904 } 1905 else 1906 { 1907 rpc_vfs_fs_remove_dentry_client( parent_cxy, 1908 parent_ptr, 1909 dentry_ptr, 1910 &error ); 1911 } 1912 1856 error = vfs_fs_remove_dentry( parent_xp, 1857 dentry_ptr ); 1913 1858 if( error ) 1914 1859 { … … 2172 2117 2173 2118 ////////////////////////////////////////////////////////////////////////// 2174 // This staticfunction is called by the vfs_display() function.2119 // This recursive function is called by the vfs_display() function. 2175 2120 // that is supposed to take the TXT0 lock. 2176 2121 ////////////////////////////////////////////////////////////////////////// … … 2184 2129 uint32_t inode_size; 2185 2130 uint32_t inode_attr; 2186 uint32_t inode_dirty;2187 2131 void * inode_extd; 2188 2132 … … 2234 2178 2235 2179 // compute dirty 2236 inode_dirty = ((inode_attr & INODE_ATTR_DIRTY) != 0);2180 // inode_dirty = ((inode_attr & INODE_ATTR_DIRTY) != 0); unused [AG] dec 2019 2237 2181 2238 2182 // display inode 2239 nolock_printk("%s<%s> : %s / extd %x / %d bytes / dirty %d /cxy %x / inode %x / mapper %x\n",2183 nolock_printk("%s<%s> : %s / extd %x / %d bytes / cxy %x / inode %x / mapper %x\n", 2240 2184 indent_str[indent], name, vfs_inode_type_str( inode_type ), (uint32_t)inode_extd, 2241 inode_size, inode_ dirty, inode_cxy, inode_ptr, mapper_ptr );2185 inode_size, inode_cxy, inode_ptr, mapper_ptr ); 2242 2186 2243 2187 // scan directory entries when current inode is a directory … … 2328 2272 2329 2273 // print header 2330 nolock_printk("\n***** file systemstate\n\n");2274 nolock_printk("\n***** current VFS state\n\n"); 2331 2275 2332 2276 // call recursive function … … 2484 2428 vfs_inode_t * parent_ptr; // local pointer on parent inode 2485 2429 xptr_t dentry_xp; // extended pointer on dentry 2430 vfs_dentry_t * dentry_ptr; // local pointer on dentry 2486 2431 xptr_t child_xp; // extended pointer on child inode 2487 2432 cxy_t child_cxy; // cluster for child inode … … 2532 2477 last = false; 2533 2478 child_xp = XPTR_NULL; 2479 child_cxy = 0; 2480 child_ptr = NULL; 2534 2481 2535 2482 // loop on nodes in pathname … … 2570 2517 &child_xp ); 2571 2518 2572 // get child inode local pointer and cluster 2573 child_ptr = GET_PTR( child_xp ); 2574 child_cxy = GET_CXY( child_xp ); 2575 2576 if( found == false ) // not found in Inode Tree 2519 if( found == false ) // child not found in Inode Tree 2577 2520 { 2578 2521 // when a inode is not found in the Inode Tree: … … 2606 2549 #endif 2607 2550 // get parent inode FS type 2608 ctx_ptr = hal_remote_lpt( XPTR( parent_cxy ,&parent_ptr->ctx ) );2551 ctx_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->ctx ) ); 2609 2552 fs_type = hal_remote_l32( XPTR( parent_cxy , &ctx_ptr->type ) ); 2610 2553 … … 2626 2569 } 2627 2570 2628 // get child inode local pointer 2629 child_ptr = GET_PTR( child_xp ); 2571 // get child inode and dentry local pointers 2572 child_ptr = GET_PTR( child_xp ); 2573 dentry_ptr = GET_PTR( dentry_xp ); 2630 2574 2631 2575 #if (DEBUG_VFS_LOOKUP & 1) … … 2636 2580 // scan parent mapper to find the missing dentry, and complete 2637 2581 // the initialisation of dentry and child inode descriptors 2638 if( parent_cxy == local_cxy ) 2639 { 2640 error = vfs_fs_new_dentry( parent_ptr, 2641 name, 2642 child_xp ); 2643 } 2644 else 2645 { 2646 rpc_vfs_fs_new_dentry_client( parent_cxy, 2647 parent_ptr, 2648 name, 2649 child_xp, 2650 &error ); 2651 } 2652 2653 // when the missing dentry is not in the parent mapper, 2654 // a new dentry must be registered in parent directory mapper 2655 if ( error ) 2582 error = vfs_fs_new_dentry_from_mapper( parent_xp, 2583 dentry_ptr ); 2584 2585 if ( error ) // an error can be fatal or non-fatal : 2656 2586 { 2657 2587 if ( last && create ) // add a brand new dentry in parent directory 2658 2588 { 2659 error = vfs_new_dentry_init( parent_xp, 2660 dentry_xp, 2661 child_xp ); 2589 error = vfs_fs_new_dentry_to_mapper( parent_xp, 2590 dentry_ptr ); 2662 2591 if ( error ) 2663 2592 { 2664 printk("\n[ERROR] in %s : cannot init inode <%s> in path <%s>\n",2665 __FUNCTION__, name , pathname);2593 printk("\n[ERROR] in %s : cannot add dentry <%s> in parent dir\n", 2594 __FUNCTION__, name ); 2666 2595 vfs_remove_child_from_parent( dentry_xp ); 2667 2596 return -1; … … 2674 2603 vfs_inode_display( child_xp ); 2675 2604 #endif 2676 2677 2678 2605 } 2679 2606 else // not last or not create => error … … 2704 2631 if( type == INODE_TYPE_DIR ) 2705 2632 { 2706 if( child_cxy == local_cxy ) 2707 { 2708 error = vfs_inode_load_all_pages( child_ptr ); 2709 } 2710 else 2711 { 2712 rpc_vfs_inode_load_all_pages_client( child_cxy, 2713 child_ptr, 2714 &error ); 2715 } 2633 error = vfs_inode_load_all_pages( child_xp ); 2634 2716 2635 if ( error ) 2717 2636 { … … 2731 2650 } 2732 2651 } 2733 else // child directly found in inode tree2652 else // child found in Inode Tree 2734 2653 { 2654 // get child inode local pointer and cluster 2655 child_ptr = GET_PTR( child_xp ); 2656 child_cxy = GET_CXY( child_xp ); 2735 2657 2736 2658 #if (DEBUG_VFS_LOOKUP & 1) … … 2759 2681 // } 2760 2682 2761 // take lock on child inode and release lock on parent2762 // vfs_inode_lock( child_xp );2763 // vfs_inode_unlock( parent_xp );2764 2765 2683 // exit when last 2766 2684 if ( last ) // last inode in path => return relevant info … … 2792 2710 } 2793 2711 } 2794 else // not the last inode in path => update loop variables2712 else // not the last node in path => update loop variables 2795 2713 { 2796 2714 parent_xp = child_xp; 2797 2715 current = next; 2798 2716 } 2799 } 2717 } // end while loop on nodes in pathname 2718 2719 #if ( DEBUG_VFS_LOOKUP & 1 ) 2720 if( DEBUG_VFS_LOOKUP < cycle ) 2721 vfs_display( root_xp ); 2722 #endif 2800 2723 2801 2724 return 0; 2802 2725 2803 2726 } // end vfs_lookup() 2804 2805 ////////////////////////////////////////////////2806 error_t vfs_new_dentry_init( xptr_t parent_xp,2807 xptr_t dentry_xp,2808 xptr_t child_xp )2809 {2810 error_t error;2811 uint32_t cluster_id;2812 uint32_t child_type;2813 uint32_t child_size;2814 2815 #if DEBUG_VFS_NEW_DENTRY_INIT2816 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH];2817 char child_name[CONFIG_VFS_MAX_NAME_LENGTH];2818 vfs_inode_get_name( parent_xp , parent_name );2819 vfs_inode_get_name( child_xp , child_name );2820 uint32_t cycle = (uint32_t)hal_get_cycles();2821 thread_t * this = CURRENT_THREAD;2822 if( DEBUG_VFS_NEW_DENTRY_INIT < cycle )2823 printk("\n[%s] thread[%x,%x] enter / parent <%s> / child <%s> / cycle %d\n",2824 __FUNCTION__ , this->process->pid, this->trdid, parent_name, child_name, cycle );2825 #endif2826 2827 // get parent inode cluster and local pointer2828 cxy_t parent_cxy = GET_CXY( parent_xp );2829 vfs_inode_t * parent_ptr = GET_PTR( parent_xp );2830 2831 // get dentry local pointer2832 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );2833 2834 // get child inode cluster and local pointer2835 cxy_t child_cxy = GET_CXY( child_xp );2836 vfs_inode_t * child_ptr = GET_PTR( child_xp );2837 2838 // 1. allocate one free cluster_id in file system to child inode,2839 // and update the File Allocation Table in both the FAT mapper and IOC device.2840 // It depends on the child inode FS type.2841 vfs_ctx_t * ctx = hal_remote_lpt( XPTR( child_cxy , &child_ptr->ctx ) );2842 2843 error = vfs_fs_cluster_alloc( ctx->type,2844 &cluster_id );2845 if ( error )2846 {2847 printk("\n[ERROR] in %s : cannot find a free VFS cluster_id\n",2848 __FUNCTION__ );2849 return -1;2850 }2851 2852 #if( DEBUG_VFS_NEW_DENTRY_INIT & 1)2853 if( DEBUG_VFS_NEW_DENTRY_INIT < cycle )2854 printk("\n[%s] thread[%x,%x] allocated FS cluster_id %x to <%s>\n",2855 __FUNCTION__ , this->process->pid, this->trdid, cluster_id, child_name );2856 #endif2857 2858 // 2. update the child inode descriptor size and extend2859 child_type = hal_remote_l32( XPTR( child_cxy , &child_ptr->type ) );2860 child_size = 0;2861 2862 hal_remote_s32( XPTR( child_cxy , &child_ptr->size ) , child_size );2863 hal_remote_spt( XPTR( child_cxy , &child_ptr->extend ) , (void*)(intptr_t)cluster_id );2864 2865 // 3. update the parent inode mapper, and2866 // update the dentry extension if required2867 if( local_cxy == parent_cxy )2868 {2869 error = vfs_fs_add_dentry( parent_ptr,2870 dentry_ptr );2871 }2872 else2873 {2874 rpc_vfs_fs_add_dentry_client( parent_cxy,2875 parent_ptr,2876 dentry_ptr,2877 &error );2878 }2879 if ( error )2880 {2881 printk("\n[ERROR] in %s : cannot register child in parent directory\n",2882 __FUNCTION__ );2883 return -1;2884 }2885 2886 #if DEBUG_VFS_NEW_DENTRY_INIT2887 cycle = (uint32_t)hal_get_cycles();2888 if( DEBUG_VFS_NEW_DENTRY_INIT < cycle )2889 printk("\n[%s] thread[%x,%x] exit / parent <%s> / child <%s> / cycle %d\n",2890 __FUNCTION__ , this->process->pid, this->trdid, parent_name, child_name, cycle );2891 #endif2892 2893 return 0;2894 2895 } // end vfs_new_dentry_init()2896 2727 2897 2728 /////////////////////////////////////////////////// … … 2906 2737 xptr_t dentry_xp; // extended pointer on dentry (used for . and ..) 2907 2738 vfs_dentry_t * dentry_ptr; // local pointer on dentry (used for . and ..) 2908 2909 // xptr_t parents_root_xp; // extended pointer on inode "parents" field2910 // xptr_t parents_entry_xp; // extended pointer on dentry "parents" field2911 2739 xptr_t children_xhtab_xp; // extended pointer on inode "children" field 2912 2740 xptr_t children_entry_xp; // extended pointer on dentry "children" field … … 2924 2752 #endif 2925 2753 2926 // get newdirectory cluster and local pointer2754 // get child directory cluster and local pointer 2927 2755 child_cxy = GET_CXY( child_xp ); 2928 2756 child_ptr = GET_PTR( child_xp ); … … 2933 2761 2934 2762 //////////////////////////// create <.> dentry ////////////////////// 2935 if( child_cxy == local_cxy ) 2936 { 2937 error = vfs_dentry_create( fs_type, 2938 ".", 2939 &dentry_xp ); 2940 } 2941 else 2942 { 2943 rpc_vfs_dentry_create_client( child_cxy, 2944 fs_type, 2945 ".", 2946 &dentry_xp, 2947 &error ); 2948 } 2763 error = vfs_dentry_create( child_cxy, 2764 fs_type, 2765 ".", 2766 &dentry_xp ); 2949 2767 if( error ) 2950 2768 { … … 2967 2785 children_xhtab_xp = XPTR( child_cxy , &child_ptr->children ); 2968 2786 children_entry_xp = XPTR( child_cxy , &dentry_ptr->children ); 2787 2969 2788 error = xhtab_insert( children_xhtab_xp , "." , children_entry_xp ); 2789 2970 2790 if( error ) 2971 2791 { … … 2974 2794 return -1; 2975 2795 } 2976 2977 2796 2978 // don't register <.> dentry in child_inode xlist of parents2979 // parents_root_xp = XPTR( child_cxy , &child_ptr->parents );2980 // parents_entry_xp = XPTR( child_cxy , &dentry_ptr->parents );2981 // xlist_add_first( parents_root_xp , parents_entry_xp );2982 // hal_remote_atomic_add( XPTR( child_cxy , &child_ptr->links ) , 1 );2983 2984 2797 // update "parent" and "child_xp" fields in <.> dentry 2985 2798 hal_remote_s64( XPTR( child_cxy , &dentry_ptr->child_xp ) , child_xp ); … … 2997 2810 if( child_xp != parent_xp ) 2998 2811 { 2999 if( child_cxy == local_cxy ) 3000 { 3001 error = vfs_fs_add_dentry( child_ptr, 3002 dentry_ptr ); 3003 } 3004 else 3005 { 3006 rpc_vfs_fs_add_dentry_client( child_cxy, 3007 child_ptr, 3008 dentry_ptr, 3009 &error ); 3010 } 2812 error = vfs_fs_add_dentry( child_xp, 2813 dentry_ptr ); 3011 2814 if( error ) 3012 2815 { … … 3026 2829 3027 2830 ///////////////////////////// create <..> dentry /////////////////////// 3028 if( child_cxy == local_cxy ) 3029 { 3030 error = vfs_dentry_create( fs_type, 3031 "..", 3032 &dentry_xp ); 3033 } 3034 else 3035 { 3036 rpc_vfs_dentry_create_client( child_cxy, 3037 fs_type, 3038 "..", 3039 &dentry_xp, 3040 &error ); 3041 } 2831 error = vfs_dentry_create( child_cxy, 2832 fs_type, 2833 "..", 2834 &dentry_xp ); 3042 2835 if( error ) 3043 2836 { … … 3085 2878 if( child_xp != parent_xp ) 3086 2879 { 3087 if( child_cxy == local_cxy ) 3088 { 3089 error = vfs_fs_add_dentry( child_ptr, 3090 dentry_ptr ); 3091 } 3092 else 3093 { 3094 rpc_vfs_fs_add_dentry_client( child_cxy, 3095 child_ptr, 3096 dentry_ptr, 3097 &error ); 3098 } 2880 error = vfs_fs_add_dentry( child_xp, 2881 dentry_ptr ); 3099 2882 if( error ) 3100 2883 { … … 3259 3042 3260 3043 3261 ////////////////////////////////////////////////////////////// //////3044 ////////////////////////////////////////////////////////////// 3262 3045 error_t vfs_add_child_in_parent( cxy_t child_cxy, 3263 3046 vfs_fs_type_t fs_type, … … 3296 3079 3297 3080 // 1. create dentry in parent cluster 3298 if( parent_cxy == local_cxy ) // parent cluster is local 3299 { 3300 error = vfs_dentry_create( fs_type, 3301 name, 3302 &new_dentry_xp ); 3303 } 3304 else // parent cluster is remote 3305 { 3306 rpc_vfs_dentry_create_client( parent_cxy, 3307 fs_type, 3308 name, 3309 &new_dentry_xp, 3310 &error ); 3311 } 3312 3081 error = vfs_dentry_create( parent_cxy, 3082 fs_type, 3083 name, 3084 &new_dentry_xp ); 3313 3085 if( error ) 3314 3086 { … … 3334 3106 uint32_t gid = 0; 3335 3107 3336 if( child_cxy == local_cxy ) // child cluster is local 3337 { 3338 error = vfs_inode_create( fs_type, 3339 attr, 3340 mode, 3341 uid, 3342 gid, 3343 &new_inode_xp ); 3344 } 3345 else // child cluster is remote 3346 { 3347 rpc_vfs_inode_create_client( child_cxy, 3348 fs_type, 3349 attr, 3350 mode, 3351 uid, 3352 gid, 3353 &new_inode_xp, 3354 &error ); 3355 } 3356 3108 error = vfs_inode_create( child_cxy, 3109 fs_type, 3110 attr, 3111 mode, 3112 uid, 3113 gid, 3114 &new_inode_xp ); 3357 3115 if( error ) 3358 3116 { … … 3360 3118 __FUNCTION__ , child_cxy ); 3361 3119 3362 if( parent_cxy == local_cxy ) vfs_dentry_destroy( new_dentry_ptr ); 3363 else rpc_vfs_dentry_destroy_client( parent_cxy , new_dentry_ptr ); 3120 vfs_dentry_destroy( new_dentry_xp ); 3364 3121 return -1; 3365 3122 } … … 3374 3131 #endif 3375 3132 3376 3377 3133 // 3. register new_dentry in new_inode xlist of parents 3378 3134 parents_root_xp = XPTR( child_cxy , &new_inode_ptr->parents ); … … 3457 3213 3458 3214 // delete dentry descriptor 3459 if( parent_cxy == local_cxy ) 3460 { 3461 vfs_dentry_destroy( dentry_ptr ); 3462 } 3463 else 3464 { 3465 rpc_vfs_dentry_destroy_client( parent_cxy, 3466 dentry_ptr ); 3467 } 3215 vfs_dentry_destroy( dentry_xp ); 3468 3216 3469 3217 // delete child_inode descriptor if last link 3470 if( links == 1 ) 3471 { 3472 if( child_cxy == local_cxy ) 3473 { 3474 vfs_inode_destroy( child_inode_ptr ); 3475 } 3476 else 3477 { 3478 rpc_vfs_inode_destroy_client( child_cxy , child_inode_ptr ); 3479 } 3480 } 3218 if( links == 1 ) vfs_inode_destroy( child_inode_xp ); 3481 3219 3482 3220 } // end vfs_remove_child_from_parent() … … 3489 3227 ////////////////////////////////////////////////////////////////////////////////////////// 3490 3228 3491 ////////////////////////////////////////////// 3492 error_t vfs_fs_ move_page( xptr_t page_xp,3493 cmd_type_t cmd_type)3229 /////////////////////////////////////////////////// 3230 error_t vfs_fs_add_dentry( xptr_t inode_xp, 3231 vfs_dentry_t * dentry_ptr ) 3494 3232 { 3495 3233 error_t error = 0; 3496 3234 3497 assert( (page_xp != XPTR_NULL) , "page pointer is NULL" ); 3498 3499 page_t * page_ptr = GET_PTR( page_xp ); 3500 cxy_t page_cxy = GET_CXY( page_xp ); 3501 3502 // get local pointer on page mapper 3503 mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) ); 3504 3505 assert( (mapper != NULL) , "no mapper for page" ); 3235 assert( (inode_xp != XPTR_NULL) , "inode_xp argument is NULL" ); 3236 assert( (dentry_ptr != NULL ) , "dentry_ptr argument is NULL" ); 3237 3238 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 3239 cxy_t inode_cxy = GET_CXY( inode_xp ); 3240 3241 // get inode mapper 3242 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 3243 3244 assert( (mapper != NULL) , "mapper pointer is NULL") 3506 3245 3507 3246 // get FS type 3508 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( page_cxy , &mapper->type ) );3247 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( inode_cxy , &mapper->fs_type ) ); 3509 3248 3510 3249 // call relevant FS function 3511 3250 if( fs_type == FS_TYPE_FATFS ) 3512 3251 { 3513 error = fatfs_ move_page( page_xp , cmd_type);3252 error = fatfs_add_dentry( inode_xp , dentry_ptr ); 3514 3253 } 3515 3254 else if( fs_type == FS_TYPE_RAMFS ) 3516 3255 { 3517 assert( false , "should not be called for RAMFS\n" );3256 error = 0; // does nothing for RAMFS 3518 3257 } 3519 3258 else if( fs_type == FS_TYPE_DEVFS ) 3520 3259 { 3521 assert( false , "should not be called for DEVFS\n" );3260 error = 0; // does nothing for DEVFS 3522 3261 } 3523 3262 else … … 3528 3267 return error; 3529 3268 3530 } // end vfs_fs_ move_page()3531 3532 //////////////////////////////////////////////// 3533 error_t vfs_fs_ add_dentry( vfs_inode_t * inode,3534 vfs_dentry_t * dentry)3269 } // end vfs_fs_add_dentry() 3270 3271 ////////////////////////////////////////////////////// 3272 error_t vfs_fs_remove_dentry( xptr_t inode_xp, 3273 vfs_dentry_t * dentry_ptr ) 3535 3274 { 3536 3275 error_t error = 0; 3537 3276 3538 assert( (inode != NULL) , "inode pointer is NULL" ); 3539 assert( (dentry != NULL) , "dentry pointer is NULL" ); 3540 3541 mapper_t * mapper = inode->mapper; 3542 3543 assert( (mapper != NULL) , "mapper pointer is NULL" ); 3277 assert( (inode_xp != XPTR_NULL) , "inode_xp argument is NULL" ); 3278 assert( (dentry_ptr != NULL ) , "dentry_ptr argument is NULL" ); 3279 3280 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 3281 cxy_t inode_cxy = GET_CXY( inode_xp ); 3282 3283 // get inode mapper 3284 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 3285 3286 assert( (mapper != NULL) , "mapper pointer is NULL") 3544 3287 3545 3288 // get FS type 3546 vfs_fs_type_t fs_type = mapper->type;3289 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( inode_cxy , &mapper->fs_type ) ); 3547 3290 3548 3291 // call relevant FS function 3549 3292 if( fs_type == FS_TYPE_FATFS ) 3550 3293 { 3551 error = fatfs_add_dentry( inode , dentry ); 3294 error = fatfs_remove_dentry( inode_xp , dentry_ptr ); 3295 3552 3296 } 3553 3297 else if( fs_type == FS_TYPE_RAMFS ) … … 3566 3310 return error; 3567 3311 3568 } // end vfs_fs_ add_dentry()3569 3570 /////////////////////////////////////////////////// 3571 error_t vfs_fs_ remove_dentry( vfs_inode_t * inode,3572 vfs_dentry_t * dentry)3312 } // end vfs_fs_remove_dentry() 3313 3314 /////////////////////////////////////////////////////////////// 3315 error_t vfs_fs_new_dentry_from_mapper( xptr_t inode_xp, 3316 vfs_dentry_t * dentry_ptr ) 3573 3317 { 3574 3318 error_t error = 0; 3575 3319 3576 assert( (inode != NULL) , "inode pointer is NULL" ); 3577 assert( (dentry != NULL) , "dentry pointer is NULL" ); 3578 3579 mapper_t * mapper = inode->mapper; 3580 3581 assert( (mapper != NULL) , "mapper pointer is NULL" ); 3320 assert( (inode_xp != XPTR_NULL) , "inode_xp argument is NULL" ); 3321 assert( (dentry_ptr != NULL ) , "dentry_ptr argument is NULL" ); 3322 3323 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 3324 cxy_t inode_cxy = GET_CXY( inode_xp ); 3325 3326 // get inode mapper 3327 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 3328 3329 assert( (mapper != NULL) , "mapper pointer is NULL") 3582 3330 3583 3331 // get FS type 3584 vfs_fs_type_t fs_type = mapper->type;3332 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( inode_cxy , &mapper->fs_type ) ); 3585 3333 3586 3334 // call relevant FS function 3587 3335 if( fs_type == FS_TYPE_FATFS ) 3588 3336 { 3589 error = fatfs_ remove_dentry( inode , dentry );3337 error = fatfs_new_dentry_from_mapper( inode_xp , dentry_ptr ); 3590 3338 } 3591 3339 else if( fs_type == FS_TYPE_RAMFS ) 3592 3340 { 3593 error = 0; // does nothing for RAMFS3341 assert( false , "should not be called for RAMFS" ); 3594 3342 } 3595 3343 else if( fs_type == FS_TYPE_DEVFS ) 3596 3344 { 3597 error = 0; // does nothing for DEVFS3345 assert( false , "should not be called for DEVFS" ); 3598 3346 } 3599 3347 else … … 3604 3352 return error; 3605 3353 3606 } // end vfs_fs_remove_dentry() 3607 3608 //////////////////////////////////////////////// 3609 error_t vfs_fs_new_dentry( vfs_inode_t * parent, 3610 char * name, 3611 xptr_t child_xp ) 3354 } // end vfs_fs_new_dentry_from_mapper() 3355 3356 /////////////////////////////////////////////////////////////// 3357 error_t vfs_fs_new_dentry_to_mapper( xptr_t inode_xp, 3358 vfs_dentry_t * dentry_ptr ) 3612 3359 { 3613 3360 error_t error = 0; 3614 3361 3615 // check arguments 3616 assert( (parent != NULL) , "parent pointer is NULL"); 3617 assert( (child_xp != XPTR_NULL) , "child pointer is NULL"); 3618 3619 // get parent inode FS type 3620 vfs_fs_type_t fs_type = parent->ctx->type; 3362 assert( (inode_xp != XPTR_NULL) , "inode_xp argument is NULL" ); 3363 assert( (dentry_ptr != NULL ) , "dentry_ptr argument is NULL" ); 3364 3365 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 3366 cxy_t inode_cxy = GET_CXY( inode_xp ); 3367 3368 // get inode mapper 3369 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 3370 3371 assert( (mapper != NULL) , "mapper pointer is NULL") 3372 3373 // get FS type 3374 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( inode_cxy , &mapper->fs_type ) ); 3621 3375 3622 3376 // call relevant FS function 3623 3377 if( fs_type == FS_TYPE_FATFS ) 3624 3378 { 3625 error = fatfs_new_dentry ( parent , name , child_xp);3379 error = fatfs_new_dentry_to_mapper( inode_xp , dentry_ptr ); 3626 3380 } 3627 3381 else if( fs_type == FS_TYPE_RAMFS ) … … 3640 3394 return error; 3641 3395 3642 } // end vfs_fs_new_dentry() 3643 3644 /////////////////////////////////////////////////// 3645 error_t vfs_fs_update_dentry( vfs_inode_t * inode, 3646 vfs_dentry_t * dentry, 3647 uint32_t size ) 3396 } // end vfs_fs_new_dentry_to_mapper() 3397 3398 ////////////////////////////////////////////////////// 3399 error_t vfs_fs_update_dentry( xptr_t inode_xp, 3400 vfs_dentry_t * dentry_ptr ) 3648 3401 { 3649 3402 error_t error = 0; 3650 3403 3651 // check arguments 3652 assert( (inode != NULL) , "inode pointer is NULL"); 3653 assert( (dentry != NULL) , "dentry pointer is NULL"); 3654 3655 // get parent inode FS type 3656 vfs_fs_type_t fs_type = inode->ctx->type; 3404 assert( (inode_xp != XPTR_NULL) , "inode_xp argument is NULL" ); 3405 assert( (dentry_ptr != NULL ) , "dentry_ptr argument is NULL" ); 3406 3407 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 3408 cxy_t inode_cxy = GET_CXY( inode_xp ); 3409 3410 // get inode mapper 3411 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 3412 3413 assert( (mapper != NULL) , "mapper pointer is NULL") 3414 3415 // get FS type 3416 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( inode_cxy , &mapper->fs_type ) ); 3657 3417 3658 3418 // call relevant FS function 3659 3419 if( fs_type == FS_TYPE_FATFS ) 3660 3420 { 3661 error = fatfs_update_dentry( inode , dentry , size);3421 error = fatfs_update_dentry( inode_xp , dentry_ptr ); 3662 3422 } 3663 3423 else if( fs_type == FS_TYPE_RAMFS ) … … 3739 3499 } // end vfs_fs_get_user_dir() 3740 3500 3741 ///////////////////////////////////////////// ///3742 error_t vfs_fs_sync_inode( vfs_inode_t * inode)3501 ///////////////////////////////////////////// 3502 error_t vfs_fs_sync_inode( xptr_t inode_xp ) 3743 3503 { 3744 3504 error_t error = 0; 3745 3505 3746 // check arguments 3747 assert( (inode != NULL) , "inode pointer is NULL"); 3748 3749 // get inode FS type 3750 vfs_fs_type_t fs_type = inode->ctx->type; 3506 assert( (inode_xp != XPTR_NULL) , "inode_xp argument is NULL"); 3507 3508 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 3509 cxy_t inode_cxy = GET_CXY( inode_xp ); 3510 3511 // get inode mapper 3512 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 3513 3514 assert( (mapper != NULL) , "mapper pointer is NULL") 3515 3516 // get FS type 3517 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( inode_cxy , &mapper->fs_type ) ); 3751 3518 3752 3519 // call relevant FS function 3753 3520 if( fs_type == FS_TYPE_FATFS ) 3754 3521 { 3755 error = fatfs_sync_inode( inode );3522 error = fatfs_sync_inode( inode_xp ); 3756 3523 } 3757 3524 else if( fs_type == FS_TYPE_RAMFS ) … … 3799 3566 } // end vfs_fs_sync_fat() 3800 3567 3801 //////////////////////////////////////////////////////3802 error_t vfs_fs_sync_free_info( vfs_fs_type_t fs_type )3803 {3804 error_t error = 0;3805 3806 // call relevant FS function3807 if( fs_type == FS_TYPE_FATFS )3808 {3809 error = fatfs_sync_free_info();3810 }3811 else if( fs_type == FS_TYPE_RAMFS )3812 {3813 assert( false , "should not be called for RAMFS" );3814 }3815 else if( fs_type == FS_TYPE_DEVFS )3816 {3817 assert( false , "should not be called for DEVFS" );3818 }3819 else3820 {3821 assert( false , "undefined file system type" );3822 }3823 3824 return error;3825 3826 } // end vfs_fs_sync_fat()3827 3828 /////////////////////////////////////////////////3829 error_t vfs_fs_cluster_alloc( uint32_t fs_type,3830 uint32_t * cluster )3831 {3832 error_t error = 0;3833 3834 // call relevant FS function3835 if( fs_type == FS_TYPE_FATFS )3836 {3837 error = fatfs_cluster_alloc( cluster );3838 }3839 else if( fs_type == FS_TYPE_RAMFS )3840 {3841 assert( false , "should not be called for RAMFS" );3842 }3843 else if( fs_type == FS_TYPE_DEVFS )3844 {3845 assert( false , "should not be called for DEVFS" );3846 }3847 else3848 {3849 assert( false , "undefined file system type" );3850 }3851 3852 return error;3853 3854 } // end vfs_fs_cluster_alloc()3855 3856 3568 //////////////////////////////////////////////// 3857 3569 error_t vfs_fs_release_inode( xptr_t inode_xp ) … … 3859 3571 error_t error = 0; 3860 3572 3861 assert( (inode_xp != XPTR_NULL) , "inode pointeris NULL")3573 assert( (inode_xp != XPTR_NULL) , "inode_xp argument is NULL") 3862 3574 3863 3575 vfs_inode_t * inode_ptr = GET_PTR( inode_xp ); 3864 3576 cxy_t inode_cxy = GET_CXY( inode_xp ); 3865 3577 3866 // get local pointer on pagemapper3578 // get local pointer on mapper 3867 3579 mapper_t * mapper = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 3868 3580 … … 3870 3582 3871 3583 // get FS type from mapper 3872 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( inode_cxy , &mapper-> type ) );3584 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( inode_cxy , &mapper->fs_type ) ); 3873 3585 3874 3586 // call relevant FS function … … 3894 3606 } // end vfs_fs_release_inode() 3895 3607 3896 3608 ////////////////////////////////////////////////// 3609 error_t vfs_fs_move_page( xptr_t page_xp, 3610 ioc_cmd_type_t cmd_type ) 3611 { 3612 error_t error = 0; 3613 3614 assert( (page_xp != XPTR_NULL) , "page pointer is NULL" ); 3615 3616 page_t * page_ptr = GET_PTR( page_xp ); 3617 cxy_t page_cxy = GET_CXY( page_xp ); 3618 3619 // get local pointer on mapper 3620 mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) ); 3621 3622 assert( (mapper != NULL) , "no mapper for page" ); 3623 3624 // get FS type 3625 vfs_fs_type_t fs_type = hal_remote_l32( XPTR( page_cxy , &mapper->fs_type ) ); 3626 3627 // call relevant FS function 3628 if( fs_type == FS_TYPE_FATFS ) 3629 { 3630 error = fatfs_move_page( page_xp , cmd_type ); 3631 } 3632 else if( fs_type == FS_TYPE_RAMFS ) 3633 { 3634 assert( false , "should not be called for RAMFS\n" ); 3635 } 3636 else if( fs_type == FS_TYPE_DEVFS ) 3637 { 3638 assert( false , "should not be called for DEVFS\n" ); 3639 } 3640 else 3641 { 3642 assert( false , "undefined file system type" ); 3643 } 3644 3645 return error; 3646 3647 } // end vfs_fs_move_page() 3648 3649 -
trunk/kernel/fs/vfs.h
r656 r657 3 3 * 4 4 * Author Mohamed Lamine Karaoui (2014,2015) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 55 55 struct vseg_s; 56 56 struct page_s; 57 struct ksock_s; 57 58 58 59 /****************************************************************************************** … … 72 73 * The <extend> field is a pointer on the FS specific context extension. 73 74 * This extension is dynamically allocated by kernel_init in all clusters. 74 * In each cluster, both this VFS context and the FS specific context are handled as75 * private by the local OS intance.75 * In each cluster, the inum allocator can be accessed by any thread runing 76 * in any cluster, and is therefore protected by a remote_busylock. 76 77 *****************************************************************************************/ 77 78 78 79 typedef enum 79 80 { 80 FS_TYPE_ DEVFS = 0,81 FS_TYPE_ FATFS = 1,82 FS_TYPE_ RAMFS = 2,81 FS_TYPE_RAMFS = 0, 82 FS_TYPE_DEVFS = 1, 83 FS_TYPE_FATFS = 2, 83 84 84 85 FS_TYPES_NR = 3, … … 95 96 typedef struct vfs_ctx_s 96 97 { 97 vfs_fs_type_t type; /*! File System type */ 98 uint32_t attr; /*! global attributes for all files in FS */ 99 uint32_t total_clusters; /*! total number of clusters on device */ 100 uint32_t cluster_size; /*! cluster size on device (bytes) */ 101 xptr_t vfs_root_xp; /*! extended pointer on VFS root inode */ 102 busylock_t lock; /*! lock protecting inum allocator */ 103 uint32_t bitmap[BITMAP_SIZE(CONFIG_VFS_MAX_INODES)]; /* inum allocator */ 104 void * extend; /*! FS specific context extension */ 98 vfs_fs_type_t type; /*! File System type */ 99 uint32_t total_clusters; /*! total number of clusters on device */ 100 uint32_t cluster_size; /*! cluster size on device (bytes) */ 101 xptr_t vfs_root_xp; /*! extended pointer on VFS root inode */ 102 remote_busylock_t lock; /*! lock protecting inum allocator */ 103 uint32_t bitmap[BITMAP_SIZE(CONFIG_VFS_MAX_INODES)]; /* inum allocator */ 104 void * extend; /*! FS specific context extension */ 105 105 } 106 106 vfs_ctx_t; … … 109 109 * This structure define a VFS inode. 110 110 * An inode can have several children dentries (if it is a directory), an can have several 111 * parents dentries (if it has sseveral aliases links):111 * parents dentries (if it has several aliases links): 112 112 * - The "parents" field is the root of the xlist of parents dentries, and the "links" 113 113 * fiels define the number of aliases parent dentries. only a FILE inode can have … … 145 145 typedef enum 146 146 { 147 INODE_ATTR_DIRTY = 0x01, /* modified versus the value on device*/148 INODE_ATTR_INLOAD = 0x04, /* loading from device in progress*/149 INODE_ATTR_NEW = 0x08, /* not saved on device yet*/147 INODE_ATTR_DIRTY = 0x01, /*! modified versus the value on device */ 148 INODE_ATTR_INLOAD = 0x04, /*! loading from device in progress */ 149 INODE_ATTR_NEW = 0x08, /*! not saved on device yet */ 150 150 } 151 151 vfs_inode_attr_t; … … 193 193 194 194 /****************************************************************************************** 195 Rpt* This structure defines a directory entry.195 * This structure defines a directory entry. 196 196 * A dentry contains the name of a remote file/dir, an extended pointer on the 197 197 * inode representing this file/dir, a local pointer on the inode representing … … 225 225 typedef enum 226 226 { 227 FD_ATTR_READ_ENABLE = 0x01, /*! read access possible */228 FD_ATTR_WRITE_ENABLE = 0x02, /*! write access possible */229 FD_ATTR_APPEND = 0x04, /*! append on each write */230 FD_ATTR_CLOSE_EXEC = 0x08, /*! close file on exec */231 FD_ATTR_SYNC = 0x10, /*! synchronise FS on each write */232 FD_ATTR_IS_DIR = 0x20, /*! this is a directory */227 FD_ATTR_READ_ENABLE = 0x01, /*! read access possible */ 228 FD_ATTR_WRITE_ENABLE = 0x02, /*! write access possible */ 229 FD_ATTR_APPEND = 0x04, /*! append on each write */ 230 FD_ATTR_CLOSE_EXEC = 0x08, /*! close file on exec */ 231 FD_ATTR_SYNC = 0x10, /*! synchronise FS on each write */ 232 FD_ATTR_IS_DIR = 0x20, /*! this is a directory */ 233 233 } 234 234 vfs_file_attr_t; … … 237 237 { 238 238 struct vfs_ctx_s * ctx; /*! local pointer on FS context. */ 239 uint32_t gc; /*! generation counter */240 239 vfs_file_attr_t attr; /*! file attributes bit vector (see above) */ 241 240 vfs_inode_type_t type; /*! same type as inode */ … … 245 244 struct mapper_s * mapper; /*! associated file cache */ 246 245 struct vfs_inode_s * inode; /*! local pointer on associated inode */ 246 struct socket_s * socket; /*! local pointer on associated socket */ 247 247 void * extend; /*! FS specific extension */ 248 248 } … … 255 255 256 256 /****************************************************************************************** 257 * This function initialise a (statically allocated) VFS context in local cluster. 258 ****************************************************************************************** 257 * This function initialises a (statically allocated) VFS context in cluster identified 258 * by the <cxy> argument. 259 * It is called by the kernel_init() function. 260 ****************************************************************************************** 261 * @ cxy : target cluster identifier. 259 262 * @ fs_type : file system type. 260 * @ attr : global attributes (for all files in FS.261 263 * @ total_clusters : total number of clusters on device. 262 264 * @ cluster_size : cluster size on device (bytes). … … 264 266 * @ extend : fs_type_specific extension. 265 267 *****************************************************************************************/ 266 void vfs_ctx_init( vfs_fs_type_t type,267 uint32_t attr,268 void vfs_ctx_init( cxy_t cxy, 269 vfs_fs_type_t type, 268 270 uint32_t total_clusters, 269 271 uint32_t cluster_size, … … 277 279 * - the 16 LSB bits contains the local inode identifier : lid 278 280 ****************************************************************************************** 279 * @ ctx : localpointer on file system context.280 * @ inum : [ou ] buffer for allocated inode identifier.281 * @ ctx_xp : [in] extended pointer on file system context. 282 * @ inum : [out] buffer for allocated inode identifier. 281 283 * @ return 0 if success / return non-zero if error. 282 284 *****************************************************************************************/ 283 error_t vfs_ctx_inum_alloc( vfs_ctx_t * ctx,284 uint32_t 285 error_t vfs_ctx_inum_alloc( xptr_t ctx_xp, 286 uint32_t * inum ); 285 287 286 288 /****************************************************************************************** 287 289 * This function release an inode identifier. 288 290 ****************************************************************************************** 289 * @ ctx : localpointer on file system context.290 * @ inum : released inode identifier.291 *****************************************************************************************/ 292 void vfs_ctx_inum_release( vfs_ctx_t * ctx,291 * @ ctx_xp : [in] extended pointer on file system context. 292 * @ inum : [in] released inode identifier. 293 *****************************************************************************************/ 294 void vfs_ctx_inum_release( xptr_t ctx_xp, 293 295 uint32_t inum ); 294 296 … … 305 307 306 308 /****************************************************************************************** 307 * This function allocates memory from local cluster for an inode descriptor and the 308 * associated mapper, and partially initialise this inode from arguments values. 309 * This function allocates memory in cluster identified by the <cxy> argument 310 * for an inode descriptor and for the associated mapper, and partially initialise 311 * this inode from arguments values. 309 312 * It does NOT link it to the Inode Tree, as this is done by add_child_in_parent(). 310 * It must called by a local thread. Use the RPC_INODE_CREATE if client thread is remote.311 ****************************************************************************************** 312 * @ fs_type : file system type.313 * @ inode_type : inodetype.314 * @ attr : inode attributes.315 * @ rights : inode access rights.316 * @ uid : user owner ID.317 * @ gid : group owner ID.313 * It can be called by any thread running in any cluster. 314 ****************************************************************************************** 315 * @ cxy : [in] target cluster identifier 316 * @ fs_type : [in] file system type. 317 * @ attr : [in] inode attributes. 318 * @ rights : [in] inode access rights. 319 * @ uid : [in] user owner ID. 320 * @ gid : [in] group owner ID. 318 321 * @ inode_xp : [out] buffer for extended pointer on created inode. 319 * @ return 0 if success / return ENOMEM or EINVAL if error. 320 *****************************************************************************************/ 321 error_t vfs_inode_create( vfs_fs_type_t fs_type, 322 * @ return 0 if success / return -1 if error. 323 *****************************************************************************************/ 324 error_t vfs_inode_create( cxy_t cxy, 325 vfs_fs_type_t fs_type, 322 326 uint32_t attr, 323 327 uint32_t rights, … … 330 334 * all memory allocated to the mapper (both mapper descriptor and radix tree). 331 335 * The mapper should not contain any dirty page (should be synchronized before deletion). 332 * It must be executed by a thread running in the cluster containing the inode. 333 * Use the rpc_vfs_inode_destroy_client() function if required. 336 * It can be called by any thread running in any cluster. 334 337 ****************************************************************************************** 335 * @ inode : localpointer on inode descriptor.336 *****************************************************************************************/ 337 void vfs_inode_destroy( vfs_inode_t * inode);338 * @ inode_xp : extended pointer on inode descriptor. 339 *****************************************************************************************/ 340 void vfs_inode_destroy( xptr_t inode_xp ); 338 341 339 342 /****************************************************************************************** 340 343 * This function returns the <size> of a file/dir from a remote inode, 341 344 * taking the remote_rwlock protecting <size> in READ_MODE. 345 * It can be called by any thread running in any cluster. 342 346 ***************************************************************************************** 343 347 * @ inode_xp : extended pointer on the remote inode. … … 350 354 * It takes the rwlock protecting the file size in WRITE_MODE, and set the "size" field 351 355 * when the current size is smaller than the requested <size> argument. 356 * It can be called by any thread running in any cluster. 352 357 ***************************************************************************************** 353 358 * @ inode_xp : extended pointer on the remote inode. … … 358 363 359 364 /****************************************************************************************** 360 * This function takes the main lock of a remote inode .365 * This function takes the main lock of a remote inode identified by the <inode_xp>. 361 366 * This lock protect all inode fields, including the children dentries. 367 * It can be called by any thread running in any cluster. 362 368 ***************************************************************************************** 363 369 * @ inode_xp : extended pointer on the remote inode. … … 366 372 367 373 /****************************************************************************************** 368 * This function releases the main lock of a remote inode .374 * This function releases the main lock of a remote inode identified by <inode_xp>. 369 375 * This lock protect all inode fiels, including the children dentries. 376 * It can be called by any thread running in any cluster. 370 377 ***************************************************************************************** 371 378 * @ inode_xp : extended pointer on the remote inode. … … 377 384 * argument to a local buffer identified by the <name> argument. 378 385 * The local buffer size must be at least CONFIG_VFS_MAX_NAME_LENGTH. 386 * It can be called by any thread running in any cluster. 379 387 ****************************************************************************************** 380 388 * @ inode_xp : extended pointer on the remote inode. … … 385 393 386 394 /****************************************************************************************** 387 * This function accesses successively all pages of a file identified by the <inode >,395 * This function accesses successively all pages of a file identified by the <inode_xp>, 388 396 * argument, to force misses, and load all pages into mapper. 389 397 * The target inode can be a directory or a file, but this function is mainly used 390 398 * to prefetch a complete directory to the mapper. 391 * It must be executed by a thread running in the cluster containing the inode.392 399 * This function does NOT take any lock. 393 ****************************************************************************************** 394 * @ inode : local pointer on inode. 400 * It can be called by any thread running in any cluster. 401 ****************************************************************************************** 402 * @ inode_xp : extended pointer on inode. 395 403 * @ return 0 if success / return -1 if device access failure. 396 404 *****************************************************************************************/ 397 error_t vfs_inode_load_all_pages( vfs_inode_t * inode);398 399 /****************************************************************************************** 400 * This debug function display the curren state of an inode descriptor identified by401 * the <inode_xp> argument.405 error_t vfs_inode_load_all_pages( xptr_t inode_xp ); 406 407 /****************************************************************************************** 408 * This debug function display the curren state of an inode descriptor. 409 * It can be called by any thread running in any cluster. 402 410 *****************************************************************************************/ 403 411 void vfs_inode_display( xptr_t inode_xp ); … … 408 416 409 417 /****************************************************************************************** 410 * This function allocates memory from local cluster for a dentry descriptor, 411 * initialises it from arguments values, and returns the extended pointer on dentry. 412 * It must called by a local thread. Use the RPC_DENTRY_CREATE if client thread is remote. 413 ****************************************************************************************** 418 * This function allocates memory in cluster identified by the <cxy> argument 419 * for a dentry descriptor, initialises it from arguments values, and returns 420 * in <dentry_xp> the extended pointer on dentry. 421 * It can be called by any thread running in any cluster. 422 ****************************************************************************************** 423 * @ cxy : [in] target cluster identifier 414 424 * @ fs_type : [in] file system type. 415 425 * @ name : [in] directory entry file/dir name. … … 417 427 * @ return 0 if success / return ENOMEM or EINVAL if error. 418 428 *****************************************************************************************/ 419 error_t vfs_dentry_create( vfs_fs_type_t fs_type, 429 error_t vfs_dentry_create( cxy_t cxy, 430 vfs_fs_type_t fs_type, 420 431 char * name, 421 432 xptr_t * dentry_xp ); … … 424 435 * This function removes the dentry from the parent inode xhtab, and releases the memory 425 436 * allocated to the dentry descriptor. 426 * It must be executed by a thread running in the cluster containing the dentry. 427 * Use the RPC_DENTRY_DESTROY if required. 437 * It can be called by any thread running in any cluster. 428 438 ****************************************************************************************** 429 * @ dentry : [in] localpointer on dentry descriptor.430 *****************************************************************************************/ 431 void vfs_dentry_destroy( vfs_dentry_t * dentry);439 * @ dentry_xp : [in] extended pointer on dentry descriptor. 440 *****************************************************************************************/ 441 void vfs_dentry_destroy( xptr_t dentry_xp ); 432 442 433 443 … … 437 447 438 448 /****************************************************************************************** 439 * This function allocates memory and initializes a new local file descriptor. 440 * It must be executed in the cluster containing the inode. 441 * If the client thread is not running in the owner cluster, it must use the 442 * rpc_vfs_file_create_client() function. 449 * This function allocates memory and initializes a new file descriptor in the 450 * cluster defined by the <inode_xp> argument. 451 * It can be called by any thread running in any cluster. 443 452 ****************************************************************************************** 444 * @ inode : localpointer on associated inode.445 * @ attr :file descriptor attributes.446 * @ file_xp : [out] buffer for extended pointer on created file descriptor.453 * @ inode_xp : [in] extended pointer on associated inode. 454 * @ attr : [in] file descriptor attributes. 455 * @ file_xp : [out] buffer for extended pointer on created file descriptor. 447 456 * @ return 0 if success / return ENOMEM if error. 448 457 *****************************************************************************************/ 449 error_t vfs_file_create( vfs_inode_t * inode,458 error_t vfs_file_create( xptr_t inode_xp, 450 459 uint32_t attr, 451 460 xptr_t * file_xp ); 452 461 453 462 /****************************************************************************************** 454 * This function releases memory allocated to a local file descriptor.455 * It must be executed by a thread running in the cluster containing the inode,456 * and the file refcount must be zero. Use the RPC_VFS_FILE_DESTROY if required.463 * This function releases memory allocated to file descriptor identified 464 * by the <file_xp> argument. 465 * It can be called by any thread running in any cluster. 457 466 ****************************************************************************************** 458 * @ file : localpointer on file descriptor.459 *****************************************************************************************/ 460 void vfs_file_destroy( vfs_file_t * file);467 * @ file_xp : [in] extended pointer on file descriptor. 468 *****************************************************************************************/ 469 void vfs_file_destroy( xptr_t file_xp ); 461 470 462 471 /****************************************************************************************** 463 472 * These functions increment (resp. decrement) the count field in a remote file 464 473 * descriptor, using a remote_atomic access. 474 ***************************************************************************************** 475 * @ file_xp : extended pointer on file descriptor. 465 476 *****************************************************************************************/ 466 477 void vfs_file_count_up ( xptr_t file_xp ); … … 472 483 * The local buffer size must be at least CONFIG_VFS_MAX_NAME_LENGTH. 473 484 ***************************************************************************************** 474 * @ file_xp :extended pointer on the remote inode.475 * @ name : local buffer pointer.476 *************************************************************************************** **/485 * @ ionde_xp : [in] extended pointer on the remote inode. 486 * @ name : [out] local string. 487 ***************************************************************************************/ 477 488 void vfs_file_get_name( xptr_t inode_xp, 478 489 char * name ); … … 596 607 597 608 /****************************************************************************************** 598 * This function is called by the vfs_lookup() function when a new (dentry/inode) must 599 * be created from scratch and introduced in both the parent mapper and the IOC device. 600 * The dentry and inode descriptors must have been previously created by the caller. 601 * 1. It allocates one cluster from the relevant FS, updates the FAT mapper, 602 * and synchronously update the IOC device). 603 * 2. It set the "size", and "extend" fields in child inode descriptor. 604 * The size is 4096 for a directory, the size is 0 for a file. 605 * 3. It updates the parent directory mapper to introduce the new child, 606 * and synchronously update the IOC device. 607 * 4. It set the "extend" field in dentry descriptor. 608 * It can be called by a thread running in any cluster. 609 ****************************************************************************************** 610 * @ parent_xp : extended pointer on parent inode descriptor. 611 * @ dentry_xp : extended pointer on new dentry descriptor. 612 * @ child_xp : extended pointer on child inode descriptor. 613 * @ return 0 if success / -1 if failure. 614 *****************************************************************************************/ 615 error_t vfs_new_dentry_init( xptr_t parent_xp, 616 xptr_t dentry_xp, 617 xptr_t child_xp ); 618 619 /****************************************************************************************** 620 * This function creates in the directory identified by the <child_xp> argument, 609 * This function creates in the directory identified by the <child_inode_xp> argument, 621 610 * the two special dentries <.> and <..>. The parent directory inode is defined 622 * by the <parent_ xp> argument. The two dentries are introduced in the Inode Tree.611 * by the <parent_inode_xp> argument. The two dentries are introduced in the Inode Tree. 623 612 * They are also introduced in the child directory mapper, and the IOC device is updated. 624 613 * This function is called by all functions creating a brand new directory : vfs_mkdir(), … … 629 618 * @ return 0 if success / -1 if failure. 630 619 *****************************************************************************************/ 631 error_t vfs_add_special_dentries( xptr_t child_ xp,632 xptr_t parent_ xp );620 error_t vfs_add_special_dentries( xptr_t child_inode_xp, 621 xptr_t parent_inode_xp ); 633 622 634 623 /****************************************************************************************** … … 893 882 894 883 /****************************************************************************************** 895 * These functions define the VFS "FS" API toa specific File System896 *****************************************************************************************/ 897 898 /****************************************************************************************** 899 * This function makes the I/O operation to move one page identified by the <page_xp>900 * argument to/from the IOC device from/to the mapper, as defined by the <cmd_type>.884 * These functions define the VFS "FS" API to access a specific File System 885 *****************************************************************************************/ 886 887 /****************************************************************************************** 888 * This function introduces in a directory mapper identified by the <parent_inode_xp> 889 * argument, a new entry identified by the <dentry_ptr> argument. 901 890 * Depending on the file system type, it calls the proper, FS specific function. 902 * It is used in case of MISS on the mapper (read), or when a dirty page in the mapper 903 * must be updated in the File System (write). 904 * The mapper pointer, and the page index in file are obtained from the page descriptor. 905 * It can be executed by any thread running in any cluster. 906 * This function does NOT take any lock. 907 ****************************************************************************************** 908 * @ page_xp : extended pointer on page descriptor (for mapper and page_id). 909 * @ cmd_type : IOC_READ / IOC_WRITE / IOC_SYNC_READ / IOC_SYNC_WRITE 910 * @ returns 0 if success / return -1 if device access failure. 911 *****************************************************************************************/ 912 error_t vfs_fs_move_page( xptr_t page_xp, 913 cmd_type_t cmd_type ); 914 915 /****************************************************************************************** 916 * This function updates the mapper associated to a directory inode identified by the 917 * <parent> argument, to add a new entry identified by the <dentry> argument. 918 * The directory inode descriptor and the dentry descriptor are in the same cluster. 919 * Depending on the file system type, it calls the proper, FS specific function. 920 * It also updates the dentry descriptor and/or the inode descriptor extensions 921 * as required by the specific file system type. 891 * All informations related to the new directory must be contained in the dentry 892 * descriptor, or in the associated child inode descriptor. 893 * The dentry descriptor "extend" field is updated as required by the specific FS. 922 894 * Finally, it synchronously updates the parent directory on IOC device. 923 * 924 * It must be executed by a thread running in the cluster containing the parent directory. 925 * It can be the RPC_VFS_FS_ADD_DENTRY. This function does NOT take any lock. 895 * This function can be called by any thread running in any cluster. 926 896 ****************************************************************************************** 927 897 * @ parent : local pointer on parent (directory) inode. … … 929 899 * @ return 0 if success / return -1 if device access failure. 930 900 *****************************************************************************************/ 931 error_t vfs_fs_add_dentry( vfs_inode_t * parent, 932 vfs_dentry_t * dentry ); 933 934 /****************************************************************************************** 935 * This function updates the mapper associated to a directory inode identified by the 936 * <parent> argument, to remove an entry identified by the <dentry> argument. 937 * The directory inode descriptor and the dentry descriptor are in the same cluster. 901 error_t vfs_fs_add_dentry( xptr_t parent_inode_xp, 902 vfs_dentry_t * dentry_ptr ); 903 904 /****************************************************************************************** 905 * This function removes from a directory mapper identified by the <parent_inode_xp> 906 * argument, an entry identified by the <dentry_ptr> argument. 938 907 * Depending on the file system type, it calls the proper, FS specific function. 939 908 * Finally, it synchronously updates the parent directory on IOC device. 940 * 941 * Depending on the file system type, it calls the relevant, FS specific function. 942 * It must be executed by a thread running in the cluster containing the parent directory. 943 * It can be the RPC_VFS_FS_REMOVE_DENTRY. This function does NOT take any lock. 944 ****************************************************************************************** 945 * @ parent : local pointer on parent (directory) inode. 946 * @ dentry : local pointer on dentry containing name. 909 * This function can be called by any thread running in any cluster. 910 ****************************************************************************************** 911 * @ parent_inode_xp : extended pointer on parent (directory) inode. 912 * @ dentry_ptr : local pointer on dentry containing name. 947 913 * @ return 0 if success / return -1 if device access failure. 948 914 *****************************************************************************************/ 949 error_t vfs_fs_remove_dentry( vfs_inode_t * parent, 950 vfs_dentry_t * dentry ); 951 952 /****************************************************************************************** 953 * This function scan the mapper of an an existing parent inode directory, identified by 954 * the <parent> argument to find a directory entry identified by the <name> argument, 955 * and updates both the child inode descriptor, identified by the <child_xp> argument, 956 * and the associated dentry descriptor : 915 error_t vfs_fs_remove_dentry( xptr_t parent_inode_xp, 916 vfs_dentry_t * dentry_ptr ); 917 918 /****************************************************************************************** 919 * This function scan a directory mapper, identified by the <parent_inode_xp> argument 920 * to find a directory entry identified by the <dentry_ptr> argument, 921 * and updates both the child inode descriptor, and the associated dentry descriptor: 957 922 * - It set the "size", "type", and "extend" fields in inode descriptor. 958 923 * - It set the "extend" field in dentry descriptor. 959 * It is called by the vfs_lookup() function in case of miss. 960 * 924 * It is called by the vfs_lookup() function in case of miss, and does NOT take any lock. 961 925 * Depending on the file system type, it calls the relevant, FS specific function. 962 * It must be called by a thread running in the cluster containing the parent inode. 963 * It can be the RPC_VFS_FS_NEW_DENTRY. This function does NOT take any lock. 964 ****************************************************************************************** 965 * @ parent : local pointer on parent inode (directory). 966 * @ name : child name. 967 * @ child_xp : extended pointer on remote child inode (file or directory) 926 * This function can be called by any thread running in any cluster. 927 ****************************************************************************************** 928 * @ parent_inode_xp : extended pointer on parent inode (directory). 929 * @ dentry_ptr : local pointer on new entry (in parent inode cluster). 968 930 * @ return 0 if success / return -1 if dentry not found. 969 931 *****************************************************************************************/ 970 error_t vfs_fs_new_dentry( vfs_inode_t * parent, 971 char * name, 972 xptr_t child_xp ); 973 974 /****************************************************************************************** 975 * This function scan the mapper of an an existing inode directory, identified by 976 * the <inode> argument, to find a directory entry identified by the <dentry> argument, 977 * and update the size for this directory entry in mapper, as defined by <size>. 932 error_t vfs_fs_new_dentry_from_mapper( xptr_t parent_inode_xp, 933 vfs_dentry_t * dentry_ptr ); 934 935 /***************************************************************************************** 936 * This function introduces a brand new dentry identified by the <dentry_ptr> argument 937 * in the mapper of a directory identified by the <parent_inode_xp> argument. 938 * It is called by the vfs_lookup() function, and does NOT take any lock. 939 * The child inode descriptor, and the dentry descriptor must have been previously 940 * allocated and introduced in the Inode Tree. The dentry descriptor contains the name. 941 * Depending on the file system type, it calls the relevant, FS specific function. 942 * This function can be called by any thread running in any cluster. 943 ***************************************************************************************** 944 * @ parent_inode_xp : [in] extended pointer on parent inode (directory). 945 * @ dentry_ptr : [in] local pointer on dentry (in parent inode cluster). 946 * @ return 0 if success / return -1 if failure. 947 ****************************************************************************************/ 948 error_t vfs_fs_new_dentry_to_mapper( xptr_t parent_inode_xp, 949 vfs_dentry_t * dentry_ptr ); 950 951 /****************************************************************************************** 952 * This function updates the "size" field of a directory entry identified by the 953 * <dentry_ptr> argument in a directory mapper identified by the <parent_inode_xp> 954 * from the value contained in the inode descriptor. 978 955 * The parent directory on device is synchronously updated. 979 956 * It is called by the vfs_close() function. 980 *981 957 * Depending on the file system type, it calls the relevant, FS specific function. 982 * It must be called by a thread running in the cluster containing the parent inode. 983 * It can be the RPC_VFS_FS_UPDATE_DENTRY. This function does NOT take any lock. 984 ****************************************************************************************** 985 * @ parent : local pointer on parent inode (directory). 986 * @ dentry : local pointer on dentry. 987 * @ size : new size value (bytes). 988 * @ return 0 if success / return ENOENT if not found. 989 *****************************************************************************************/ 990 error_t vfs_fs_update_dentry( vfs_inode_t * inode, 991 vfs_dentry_t * dentry, 992 uint32_t size ); 958 * This function can be called by any thread running in any cluster. 959 ****************************************************************************************** 960 * @ parent_inode_xp : local pointer on parent inode (directory). 961 * @ dentry_ptr : local pointer on dentry (in parent directory cluster). 962 * @ return 0 if success / return -1 if not found. 963 *****************************************************************************************/ 964 error_t vfs_fs_update_dentry( xptr_t parent_inode_xp, 965 vfs_dentry_t * dentry_ptr ); 993 966 994 967 /****************************************************************************************** … … 1002 975 * the Inode Tree is dynamically created, and all dirent fiels are documented in the 1003 976 * dirent array. Otherwise, only the dentry name is documented. 1004 * 977 * This function does NOT take any lock. 1005 978 * Depending on the file system type, it calls the relevant, FS specific function. 1006 * It must be called by a thread running in the cluster containing the parent inode. 1007 * This function does NOT take any lock. 979 * 980 * WARNING : this function must be called by a thread running in the cluster containing 981 * the target directory inode. 1008 982 ****************************************************************************************** 1009 983 * @ inode : [in] local pointer on directory inode. … … 1031 1005 * directory are synchronously done on the IOC device by the two vfs_fs_add_dentry() 1032 1006 * and vfs_fs_remove_dentry() functions. 1033 *1034 1007 * Depending on the file system type, it calls the relevant, FS specific function. 1035 * It must be called by a thread running in the inodecluster.1008 * This function can be called by any thread running in any cluster. 1036 1009 ***************************************************************************************** 1037 * @ inode : localpointer on inode.1038 * @ return 0 if success / return EIO if failure during device access.1010 * @ inode_xp : remote pointer on inode. 1011 * @ return 0 if success / return -1 if failure. 1039 1012 ****************************************************************************************/ 1040 error_t vfs_fs_sync_inode( struct vfs_inode_s * inode);1013 error_t vfs_fs_sync_inode( xptr_t inode_xp ); 1041 1014 1042 1015 /***************************************************************************************** … … 1044 1017 * for the FAT itself. It scan all clusters registered in the FAT mapper, and copies 1045 1018 * to device each page marked as dirty. 1046 *1047 1019 * Depending on the file system type, it calls the relevant, FS specific function. 1048 1020 * It can be called by a thread running in any cluster. … … 1053 1025 error_t vfs_fs_sync_fat( vfs_fs_type_t fs_type ); 1054 1026 1055 /*****************************************************************************************1056 * This function updates the free clusters info on the IOC device for the FS defined1057 * by the <fs_type> argument.1058 *1059 * Depending on the file system type, it calls the relevant, FS specific function.1060 * It can be called by a thread running in any cluster.1061 *****************************************************************************************1062 * @ fs_type : specific file system type.1063 * @ return 0 if success / return EIO if failure during device access.1064 ****************************************************************************************/1065 error_t vfs_fs_sync_free_info( vfs_fs_type_t fs_type );1066 1067 /******************************************************************************************1068 * This function allocates a free cluster from the FS identified by the <fs_type>1069 * argument. It updates the selected FS File Allocation Table.1070 *1071 * Depending on the file system type, it calls the relevant, FS specific function.1072 * It can be called by a thread running in any cluster.1073 ******************************************************************************************1074 * @ fs_type : [in] File System type.1075 * @ cluster : [out] cluster index in File system.1076 * @ return 0 if success / return -1 if no free cluster1077 *****************************************************************************************/1078 error_t vfs_fs_cluster_alloc( uint32_t fs_type,1079 uint32_t * cluster );1080 1081 1027 /****************************************************************************************** 1082 1028 * This function makes all I/O operations required to release all clusters allocated … … 1084 1030 * Depending on the file system type, it calls the proper, FS specific function. 1085 1031 * It is called by the vfs_unlink() function. 1086 * It can be executed by a thread running in any cluster.1087 1032 * This function does NOT take any lock. 1033 * Depending on the file system type, it calls the relevant, FS specific function. 1034 * This function can be executed by any thread running in any cluster. 1088 1035 ****************************************************************************************** 1089 1036 * @ inode_xp : extended pointer on inode. … … 1092 1039 error_t vfs_fs_release_inode( xptr_t inode_xp ); 1093 1040 1041 /****************************************************************************************** 1042 * This function makes the I/O operation to move one page identified by the <page_xp> 1043 * argument to/from the IOC device from/to the mapper, as defined by the <cmd_type>. 1044 * It is used in case of MISS on the mapper (read), or when a dirty page in the mapper 1045 * must be updated in the File System (write). 1046 * The mapper pointer, and the page index in file are obtained from the page descriptor. 1047 * This function does NOT take any lock. 1048 * Depending on the file system type, it calls the proper, FS specific function. 1049 * This function can be executed by any thread running in any cluster. 1050 ****************************************************************************************** 1051 * @ page_xp : extended pointer on page descriptor (for mapper and page_id). 1052 * @ cmd_type : IOC_READ / IOC_WRITE / IOC_SYNC_READ / IOC_SYNC_WRITE 1053 * @ returns 0 if success / return -1 if device access failure. 1054 *****************************************************************************************/ 1055 error_t vfs_fs_move_page( xptr_t page_xp, 1056 ioc_cmd_type_t cmd_type ); 1057 1094 1058 1095 1059 #endif /* _VFS_H_ */ -
trunk/kernel/kern/chdev.c
r635 r657 1 1 /* 2 * chdev.c - channel device descriptor operationsimplementation.2 * chdev.c - channel device API implementation. 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 148 148 #endif 149 149 150 thread_t * this = CURRENT_THREAD; 150 thread_t * this = CURRENT_THREAD; 151 xptr_t client_xp = XPTR( local_cxy , this ); 151 152 152 153 // get chdev cluster and local pointer … … 197 198 lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock ); 198 199 199 // The following actions executein critical section,200 // because the lock_acquire / lock_release :201 // (1) take the lock protecting the chdev state200 // The following actions are executed in critical section, 201 // (because the busylock_acquire / busylock_release) 202 // (1) take the lock protecting the waiting queue 202 203 // (2) register client thread in server queue 203 204 // (3) unblock the server thread and block client thread … … 224 225 225 226 // 3. client thread unblocks server thread and blocks itself 226 thread_unblock( server_xp , THREAD_BLOCKED_ IDLE);227 thread_block( XPTR( local_cxy , CURRENT_THREAD ), THREAD_BLOCKED_IO );227 thread_unblock( server_xp , THREAD_BLOCKED_CLIENT ); 228 thread_block( client_xp , THREAD_BLOCKED_IO ); 228 229 229 230 #if (DEBUG_CHDEV_CMD_TX & 1) … … 300 301 server = CURRENT_THREAD; 301 302 302 // build extended pointer on root of client threads queue303 // build extended pointer on root & lock of client threads queue 303 304 root_xp = XPTR( local_cxy , &chdev->wait_root ); 304 305 // build extended pointer on lock protecting client threads queue306 305 lock_xp = XPTR( local_cxy , &chdev->wait_lock ); 307 306 … … 351 350 #endif 352 351 // block 353 thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_ IDLE);352 thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_CLIENT ); 354 353 355 354 // deschedule … … 358 357 else // waiting queue not empty 359 358 { 359 // get pointers on first client thread 360 client_xp = XLIST_FIRST( root_xp , thread_t , wait_list ); 361 client_cxy = GET_CXY( client_xp ); 362 client_ptr = GET_PTR( client_xp ); 363 364 // remove this client thread from chdev waiting queue 365 xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) ); 366 360 367 // release lock protecting the waiting queue 361 368 remote_busylock_release( lock_xp ); 362 363 // get extended pointer on first client thread364 client_xp = XLIST_FIRST( root_xp , thread_t , wait_list );365 366 // get client thread cluster and local pointer367 client_cxy = GET_CXY( client_xp );368 client_ptr = GET_PTR( client_xp );369 369 370 370 #if( DEBUG_CHDEV_SERVER_TX || DEBUG_CHDEV_SERVER_RX ) … … 402 402 // unblock client thread when driver returns 403 403 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 404 405 // get the lock protecting the waiting queue406 remote_busylock_acquire( lock_xp );407 408 // remove this client thread from chdev waiting queue409 xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );410 411 // release lock protecting the waiting queue412 remote_busylock_release( lock_xp );413 404 414 405 #if DEBUG_CHDEV_SERVER_RX -
trunk/kernel/kern/chdev.h
r625 r657 1 1 /* 2 * chdev.h - channel device (chdev) descriptor definition.2 * chdev.h - channel device (chdev) descriptor and API definition. 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 36 36 #include <dev_txt.h> 37 37 38 /**************************************************************************************** **38 /**************************************************************************************** 39 39 * Channel Device descriptor definition 40 40 * … … 42 42 * independant) Channel Device descriptor (in brief "chdev"). 43 43 * ALMOS-MKH supports multi-channels peripherals, and defines one separated chdev 44 * descriptor for each channel (and for each RX/TX direction for theNIC and TXT devices).44 * descriptor for each channel (and for each RX/TX direction for NIC and TXT devices). 45 45 * Each chdev contains a trans-clusters waiting queue, registering the "client threads", 46 46 * and an associated "server thread", handling these requests. … … 50 50 * - the server cluster, containing the chdev and the server thread, 51 51 * - the I/O cluster, containing the physical device. 52 *************************************************************************************** **/52 ***************************************************************************************/ 53 53 54 54 /**** Forward declarations ****/ … … 58 58 struct boot_info_s; 59 59 60 /**************************************************************************************** **61 * These macros extract the functionality and theimplementation from the peripheral type.62 *************************************************************************************** **/60 /**************************************************************************************** 61 * These macros extract functionality and implementation from the peripheral type. 62 ***************************************************************************************/ 63 63 64 64 #define FUNC_FROM_TYPE( type ) ((uint32_t)(type>>16)) 65 65 #define IMPL_FROM_TYPE( type ) ((uint32_t)(type & 0x0000FFFF)) 66 66 67 /**************************************************************************************** **67 /**************************************************************************************** 68 68 * This define the generic prototypes for the three functions that must be defined 69 69 * by the drivers implementing a generic device: … … 73 73 * The "cmd", "isr", and "aux" driver functions are registered in the generic chdev 74 74 * descriptor at kernel init, and are called to start and complete an I/O operation. 75 *************************************************************************************** **/75 ***************************************************************************************/ 76 76 77 77 typedef void (dev_ini_t) ( xptr_t dev ); … … 80 80 typedef void (dev_aux_t) ( void * args ); 81 81 82 /**************************************************************************************** **82 /**************************************************************************************** 83 83 * This enum defines the supported generic device types. 84 84 * These types are functionnal types: all (architecture specific) implementations … … 89 89 * distributed LAPIC controler, but it does not exist as a chdev in the kernel, 90 90 * as it is hidden in the driver associated to the PIC device. 91 *************************************************************************************** **/91 ***************************************************************************************/ 92 92 93 93 enum dev_func_type … … 116 116 * For each device type ***, the specific extension is defined in the "dev_***.h" file. 117 117 * 118 * NOTE :For most chdevs, the busylock is used to protect the waiting queue changes,118 * NOTE . For most chdevs, the busylock is used to protect the waiting queue changes, 119 119 * when a thread register in this queue, or is removed after service. 120 * This busylock is also used to protect direct access to the kernel TXT0 terminal 121 * (without using the server thread). 120 * . This busylock is also used to protect direct access to the shared 121 * kernel TXT0 terminal, that does not use the waiting queue. 122 * . For the NIC chdevs it is also used to protect registration (or removal) of a 123 * socket in the list of attached sockets rooted in NIC device extension. 122 124 *****************************************************************************************/ 123 125 … … 125 127 { 126 128 uint32_t func; /*! peripheral functionnal type */ 127 uint32_t impl; /*! peripheral i nplementation subtype*/129 uint32_t impl; /*! peripheral implementation type */ 128 130 uint32_t channel; /*! channel index */ 129 131 bool_t is_rx; /*! relevant for NIC and TXT peripherals */ … … 185 187 chdev_directory_t; 186 188 187 /**************************************************************************************** **189 /**************************************************************************************** 188 190 * This function display relevant values for a chdev descriptor. 189 **************************************************************************************** **191 **************************************************************************************** 190 192 * @ chdev : pointer on chdev. 191 *************************************************************************************** **/193 ***************************************************************************************/ 192 194 void chdev_print( chdev_t * chdev ); 193 195 194 /**************************************************************************************** **196 /**************************************************************************************** 195 197 * This function returns a printable string for a device functionnal types. 196 **************************************************************************************** **198 **************************************************************************************** 197 199 * @ func_type : functionnal type. 198 200 * @ return pointer on string. 199 *************************************************************************************** **/201 ***************************************************************************************/ 200 202 char * chdev_func_str( uint32_t func_type ); 201 203 202 /**************************************************************************************** **204 /**************************************************************************************** 203 205 * This function allocates memory and initializes a chdev descriptor in local cluster, 204 206 * from arguments values. It should be called by a local thread. 205 207 * The device specific fields are initialised later. 206 **************************************************************************************** **208 **************************************************************************************** 207 209 * @ func : functionnal type. 208 210 * @ impl : implementation type. … … 211 213 * @ base : extended pointer on peripheral segment base. 212 214 * @ return a local pointer on created chdev / return NULL if failure. 213 *************************************************************************************** **/215 ***************************************************************************************/ 214 216 chdev_t * chdev_create( uint32_t func, 215 217 uint32_t impl, … … 218 220 xptr_t base ); 219 221 220 /****************************************************************************************** 221 * This function registers the calling thread in the waiting queue of a remote 222 * chdev descriptor, activates (i.e. unblock) the server thread associated to chdev, 223 * and blocks itself on the THREAD_BLOCKED_IO condition. 224 ****************************************************************************************** 222 /**************************************************************************************** 223 * This generid function is executed by an user thread requesting an IOC or TXT chdev 224 * service. It registers the calling thread in the waiting queue of a the remote 225 * chdev descriptor identified by the <chdev_xp> argument. 226 * It activates (i.e. unblocks) the server thread associated to chdev, blocks itself, 227 * and deschedule. It is supposed to be re-activated by the server thread. 228 * It can be called by a thread running in any cluster. 229 * It cannot be used for a NIC or FBF chdev, because it is only convenient for one shot 230 * I/O operations, as the server thread removes the client thread from the waiting 231 * queue when it starts to execute the command. 232 **************************************************************************************** 233 * Implementation Note: 234 * The following actions are executed in a critical section, thanks to the 235 * busylock_acquire / busylock_release mechanism : 236 * 1) it takes the lock protecting the waiting queue. 237 * 2) it registers client thread in the server queue. 238 * 3) it unblocks the server thread from the THREAD_BLOCKED_CLIENT condition. 239 * 4) it blocks the client thread on the THREAD_BLOCKED_IO condition. 240 * 5) it send an IPI to the core running the server thread to force scheduling. 241 * 6) it releases the lock protecting waiting queue. 242 * 7) it deschedules. 243 **************************************************************************************** 225 244 * @ chdev_xp : extended pointer on remote chdev descriptor. 226 *************************************************************************************** **/245 ***************************************************************************************/ 227 246 void chdev_register_command( xptr_t chdev_xp ); 228 247 229 /****************************************************************************************** 230 * This function is executed by the server thread associated to a chdev descriptor. 231 * It executes an infinite loop to handle sequencially all commands registered by the 232 * client threads in the device waiting queue, until the queue is empty. 233 * The driver CMD function being blocking, these functions return only when the command 248 /**************************************************************************************** 249 * This generic function is executed by the server thread associated to an IOC or TXT 250 * chdev identified by the <chdev> argument, to execute all commands registered in the 251 * waiting queue attached to this chev. 252 * When the clients queue is empty, the server thread blocks on the THREAD_BLOCKED_CLIENT 253 * condition and deschedules. It is supposed to be re-activated by a client thread 254 * registering a new command. 255 * It cannot be used for a NIC or FBF chdev, because it is only convenient for one shot 256 * I/O operations, as the server thread removes the client thread from the waiting 257 * queue when it starts to execute the command. 258 **************************************************************************************** 259 * Implementation Note: 260 * All driver CMD functions are supposed to be blocking, and return only when the command 234 261 * is completed. These functions can use either a busy waiting policy, or a descheduling 235 * policy, blocking on the THREAD_BLOCKED_I O_ISR condition, and reactivated by the ISR.236 * When the waiting queue is empty, the server thread blocks on the THREAD_BLOCKED_IO_CMD237 * condition and deschedule. It is re-activated by a client thread registering a command.238 **************************************************************************************** **262 * policy, blocking on the THREAD_BLOCKED_ISR condition. In the descheduling scenario, 263 * the server thread is supposed to be reactivated by the ISR attached to the hardware 264 * interrupts signaling command completion. 265 **************************************************************************************** 239 266 * @ chdev : local pointer on device descriptor. 240 *************************************************************************************** **/267 ***************************************************************************************/ 241 268 void chdev_server_func( chdev_t * chdev ); 242 269 243 /**************************************************************************************** **270 /**************************************************************************************** 244 271 * This function returns an extended pointer on the chdev associated to a pseudo file 245 272 * descriptor (type INODE_TYPE_DEV) identified by the <file_xp> argument. 246 273 * It can be called by a thread running in any cluster. 247 274 * It enters kernel panic if the inode has not the expected type. 248 **************************************************************************************** **275 **************************************************************************************** 249 276 * @ file_xp : extended pointer on the pseudo file descriptor. 250 277 * @ return an extended pointer on chdev. 251 *************************************************************************************** **/278 ***************************************************************************************/ 252 279 xptr_t chdev_from_file( xptr_t file_xp ); 253 280 254 /**************************************************************************************** **281 /**************************************************************************************** 255 282 * This function displays the local copy of the external chdevs directory. 256 283 * (global variable replicated in all clusters) 257 *************************************************************************************** **/284 ***************************************************************************************/ 258 285 void chdev_dir_display( void ); 259 286 260 /**************************************************************************************** **287 /**************************************************************************************** 261 288 * This function displays the list of threads registered in the queue associated 262 289 * to the chdev identified by the <chdev_xp>. 263 **************************************************************************************** **290 **************************************************************************************** 264 291 * # root_xp : extended pointer 265 *************************************************************************************** **/292 ***************************************************************************************/ 266 293 void chdev_queue_display( xptr_t chdev_xp ); 267 294 -
trunk/kernel/kern/cluster.c
r637 r657 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019 )6 * Alain Greiner (2016,2017,2018,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/kern/cluster.h
r637 r657 4 4 * authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019 )6 * Alain Greiner (2016,2017,2018,2019,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 197 197 198 198 /****************************************************************************************** 199 * This function (pseudo) randomly selectsa valid cluster.199 * This function selects (pseudo) randomly a valid cluster. 200 200 * It is called by the vfs_cluster_lookup() function to place a new (missing) inode. 201 201 * It is called by the vmm_page_allocate() function to place a distributed vseg page. 202 * It is called by the dev_nic_accept() function to place a new server socket. 202 203 ****************************************************************************************** 203 204 * @ returns the selected cluster identifier. -
trunk/kernel/kern/core.c
r564 r657 54 54 // initialize scheduler 55 55 sched_init( core ); 56 } 57 58 ////////////////////// 59 lid_t core_lid( void ) 60 { 61 uint32_t i; 62 63 // get pointer on local cluser descriptor 64 cluster_t * cluster = LOCAL_CLUSTER; 65 66 // get core gid from hardware register 67 gid_t gid = hal_get_gid(); 68 69 // makes an associative search in core_tbl[] from gid 70 for( i = 0 ; i < cluster->cores_nr ; i++ ) 71 { 72 if( gid == cluster->core_tbl[i].gid ) return i; 73 } 74 75 assert( false , "core not found" ); 56 76 } 57 77 -
trunk/kernel/kern/core.h
r564 r657 83 83 84 84 /*************************************************************************************** 85 * This function returns the calling core local index (lid), making an associative 86 * in the local core_tbl[] array based on the hardwired (gid). 87 *************************************************************************************** 88 * @ returns always the lid value. 89 **************************************************************************************/ 90 lid_t core_lid( void ); 91 92 /*************************************************************************************** 85 93 * This function returns a pseudo random number from the core descriptor 86 94 * private random generator. -
trunk/kernel/kern/do_syscall.c
r647 r657 112 112 sys_get_thread_info, // 55 113 113 sys_fbf, // 56 114 sys_socket, // 57 114 115 }; 115 116 … … 181 182 case SYS_GET_THREAD_INFO: return "GET_THREAD_INFO"; // 55 182 183 case SYS_FBF: return "FBF"; // 56 184 case SYS_SOCKET: return "SOCKET"; // 57 183 185 184 186 default: return "undefined"; -
trunk/kernel/kern/kernel_init.c
r651 r657 3 3 * 4 4 * Authors : Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) Sorbonne Universites … … 136 136 "VMM_STACK", // 3 137 137 "VMM_MMAP", // 4 138 " VFS_CTX",// 5139 "K CM_STATE", // 6140 " KHM_STATE",// 7141 "HTAB_STATE", // 8 142 138 "KCM_STATE", // 5 139 "KHM_STATE", // 6 140 "HTAB_STATE", // 7 141 142 "VFS_CTX", // 8 143 143 "PPM_FREE", // 9 144 144 "THREAD_JOIN", // 10 … … 172 172 "VFS_MAIN", // 34 173 173 "FATFS_FAT", // 35 174 "FBF_WINDOWS", // 36 174 175 }; 175 176 … … 874 875 // @ cxy : [out] cluster identifier. 875 876 // @ lid : [out] core global identifier (hardware). 876 // @ return 0 if success / return EINVALif not found.877 // @ return 0 if success / return -1 if not found. 877 878 /////////////////////////////////////////////////////////////////////////////////////////// 878 879 static error_t __attribute__ ((noinline)) get_core_identifiers( boot_info_t * info, … … 898 899 } 899 900 } 900 return EINVAL; 901 } 902 903 904 905 906 907 ///////////////////////////////// 908 // kleenex debug function 909 ///////////////////////////////// 910 void display_fat( uint32_t step ) 911 { 912 fatfs_ctx_t * fatfs_ctx = fs_context[FS_TYPE_FATFS].extend; 913 if( fatfs_ctx != NULL ) 914 { 915 printk("\n[%s] step %d at cycle %d\n", __FUNCTION__, step, (uint32_t)hal_get_cycles() ); 916 xptr_t mapper_xp = fatfs_ctx->fat_mapper_xp; 917 mapper_display_page( mapper_xp , 0 , 128 ); 918 } 919 else 920 { 921 printk("\n[%s] step %d : fatfs context not initialized\n", __FUNCTION__, step ); 922 } 901 return -1; 923 902 } 924 903 … … 949 928 xptr_t devfs_dev_inode_xp; // extended pointer on DEVFS dev inode 950 929 xptr_t devfs_external_inode_xp; // extended pointer on DEVFS external inode 951 xptr_t devfs_internal_inode_xp; // extended pointer on DEVFS internal inode952 930 953 931 error_t error; … … 1139 1117 #endif 1140 1118 1141 #if ( DEBUG_KERNEL_INIT & 1 )1119 #if CONFIG_INSTRUMENTATION_CHDEVS 1142 1120 if( (core_lid == 0) & (local_cxy == 0) ) 1143 1121 chdev_dir_display(); … … 1145 1123 1146 1124 ///////////////////////////////////////////////////////////////////////////////// 1147 // STEP 6 : all cores enable IPI (Inter Procesor Interrupt), 1148 // all cores unblock the idle thread, and register it in scheduler. 1149 // core[0] in cluster[0] creates the VFS root inode. 1150 // It access the boot device to initialize the file system context. 1125 // STEP 6 : All cores enable IPI (Inter Procesor Interrupt), 1126 // All cores unblock the idle thread, and register it in scheduler. 1127 // The core[0] in cluster defined by the CONFIG_VFS_ROOT_CXY parameter, 1128 // access the IOC device to initialize the VFS for the FS identified 1129 // by the CONFIG_VFS_ROOT_IS_*** parameter. It does the following 1130 // actions in the VFS_ROOT cluster : 1131 // 1. allocate and initialize the selected FS context, 1132 // 2. create and initializes the VFS root inodes, 1133 // 3. initialize the VFS context for FATFS (in fs_context[] array), 1134 // 4. create the <.> and <..> dentries in VFS root directory, 1135 // 5. register the VFS root inode in process_zero descriptor, 1136 // 6. allocate the DEVFS context, 1137 // 7. initialize the VFS context for DEVFS (in fs_context[] array), 1138 // 8. create the <dev> and <external> inodes, 1139 // 9. initialize the DEVFS context. 1151 1140 ///////////////////////////////////////////////////////////////////////////////// 1152 1141 … … 1159 1148 core->scheduler.idle = thread; 1160 1149 1161 // core[O] in cluster[0]creates the VFS root1162 if( (core_lid == 0) && (local_cxy == 0) )1150 // core[O] in VFS_ROOT cluster creates the VFS root 1151 if( (core_lid == 0) && (local_cxy == CONFIG_VFS_ROOT_CXY ) ) 1163 1152 { 1164 vfs_root_inode_xp = XPTR_NULL;1165 1166 1153 // Only FATFS is supported yet, 1167 // other File System can be introduced here1154 // TODO other File System can be introduced below 1168 1155 if( CONFIG_VFS_ROOT_IS_FATFS ) 1169 1156 { 1170 // 1. allocate memory for FATFS context in cluster 01171 fatfs_ctx_t * fatfs_ctx = fatfs_ctx_alloc();1172 1173 if( fatfs_ctx ==NULL )1157 // 1. allocate memory and initialize FATFS context in VFS_ROOT cluster 1158 xptr_t fatfs_ctx_xp = fatfs_ctx_alloc( CONFIG_VFS_ROOT_CXY ); 1159 1160 if( fatfs_ctx_xp == XPTR_NULL ) 1174 1161 { 1175 printk("\n[PANIC] in %s : cannot create FATFS context in cluster 0\n",1176 __FUNCTION__ );1162 printk("\n[PANIC] in %s : cannot allocate FATFS context in cluster %x\n", 1163 __FUNCTION__ , CONFIG_VFS_ROOT_CXY ); 1177 1164 hal_core_sleep(); 1178 1165 } 1179 1166 1180 // 2. access boot device to initialize FATFS context 1181 fatfs_ctx_init( fatfs_ctx ); 1182 1183 // 3. get various informations from FATFS context 1184 uint32_t root_dir_cluster = fatfs_ctx->root_dir_cluster; 1185 uint32_t cluster_size = fatfs_ctx->bytes_per_sector * 1186 fatfs_ctx->sectors_per_cluster; 1187 uint32_t total_clusters = fatfs_ctx->fat_sectors_count << 7; 1188 1189 // 4. create VFS root inode in cluster 0 1190 error = vfs_inode_create( FS_TYPE_FATFS, // fs_type 1191 0, // attr 1192 0, // rights 1193 0, // uid 1194 0, // gid 1195 &vfs_root_inode_xp ); // return 1167 // initialise FATFS context in VFS_ROOT cluster from IOC device (boot_record) 1168 error = fatfs_ctx_init( fatfs_ctx_xp ); 1169 1196 1170 if( error ) 1197 1171 { 1198 printk("\n[PANIC] in %s : cannot create VFS root inode in cluster 0\n",1199 __FUNCTION__ );1172 printk("\n[PANIC] in %s : cannot initialize FATFS context in cluster %x\n", 1173 __FUNCTION__ , CONFIG_VFS_ROOT_CXY ); 1200 1174 hal_core_sleep(); 1201 1175 } 1202 1176 1203 // 5. update FATFS root inode "type" and "extend" fields 1204 cxy_t vfs_root_cxy = GET_CXY( vfs_root_inode_xp ); 1205 vfs_inode_t * vfs_root_ptr = GET_PTR( vfs_root_inode_xp ); 1206 hal_remote_s32( XPTR( vfs_root_cxy , &vfs_root_ptr->type ), INODE_TYPE_DIR ); 1207 hal_remote_spt( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ), 1177 #if( DEBUG_KERNEL_INIT & 1 ) 1178 printk("\n[%s] initialized FATFS context in cluster %x\n", 1179 __FUNCTION__, CONFIG_VFS_ROOT_CXY ); 1180 #endif 1181 1182 // get various informations from FATFS context 1183 fatfs_ctx_t * fatfs_ctx_ptr = GET_PTR( fatfs_ctx_xp ); 1184 1185 uint32_t root_dir_cluster = hal_remote_l32( XPTR( CONFIG_VFS_ROOT_CXY, 1186 &fatfs_ctx_ptr->root_dir_cluster ) ); 1187 1188 uint32_t bytes_per_sector = hal_remote_l32( XPTR( CONFIG_VFS_ROOT_CXY, 1189 &fatfs_ctx_ptr->bytes_per_sector ) ); 1190 1191 uint32_t sectors_per_cluster = hal_remote_l32( XPTR( CONFIG_VFS_ROOT_CXY, 1192 &fatfs_ctx_ptr->sectors_per_cluster ) ); 1193 1194 uint32_t cluster_size = bytes_per_sector * sectors_per_cluster; 1195 1196 uint32_t fat_sectors_count = hal_remote_l32( XPTR( CONFIG_VFS_ROOT_CXY, 1197 &fatfs_ctx_ptr->fat_sectors_count ) ) << 7; 1198 1199 uint32_t total_clusters = fat_sectors_count << 7; 1200 1201 // 2. create VFS root inode in VFS_ROOT cluster 1202 // TODO define attr, rights, uid, gid 1203 error = vfs_inode_create( CONFIG_VFS_ROOT_CXY, // target cluster 1204 FS_TYPE_FATFS, // fs_type 1205 0, // attr 1206 0, // rights 1207 0, // uid 1208 0, // gid 1209 &vfs_root_inode_xp ); // return 1210 if( error ) 1211 { 1212 printk("\n[PANIC] in %s : cannot create VFS root inode in cluster %x\n", 1213 __FUNCTION__ , CONFIG_VFS_ROOT_CXY ); 1214 hal_core_sleep(); 1215 } 1216 1217 #if( DEBUG_KERNEL_INIT & 1 ) 1218 vfs_inode_t * root_inode = GET_PTR( vfs_root_inode_xp ); 1219 printk("\n[%s] created </> root inode %x in cluster %x / ctx %x\n", 1220 __FUNCTION__, root_inode, CONFIG_VFS_ROOT_CXY, root_inode->ctx ); 1221 #endif 1222 1223 // update FATFS root inode "type" and "extend" fields 1224 vfs_inode_t * vfs_root_inode_ptr = GET_PTR( vfs_root_inode_xp ); 1225 1226 hal_remote_s32( XPTR( CONFIG_VFS_ROOT_CXY , &vfs_root_inode_ptr->type ), 1227 INODE_TYPE_DIR ); 1228 1229 hal_remote_spt( XPTR( CONFIG_VFS_ROOT_CXY , &vfs_root_inode_ptr->extend ), 1208 1230 (void*)(intptr_t)root_dir_cluster ); 1209 1231 1210 // 6. initialize the generic VFS context for FATFS 1211 vfs_ctx_init( FS_TYPE_FATFS, // fs type 1212 0, // attributes: unused 1213 total_clusters, // number of clusters 1214 cluster_size, // bytes 1215 vfs_root_inode_xp, // VFS root 1216 fatfs_ctx ); // extend 1232 // 3. initialize the VFS context for FATFS in VFS_ROOT cluster 1233 vfs_ctx_init( CONFIG_VFS_ROOT_CXY, // target cluster 1234 FS_TYPE_FATFS, // fs type 1235 total_clusters, // number of clusters 1236 cluster_size, // bytes 1237 vfs_root_inode_xp, // VFS root 1238 fatfs_ctx_ptr ); // extend 1239 1240 #if( DEBUG_KERNEL_INIT & 1 ) 1241 vfs_ctx_t * vfs_for_fatfs_ctx = &fs_context[FS_TYPE_FATFS]; 1242 printk("\n[%s] initialized VFS_for_FATFS context in cluster %x / ctx %x / fs_type %d\n", 1243 __FUNCTION__, CONFIG_VFS_ROOT_CXY, vfs_for_fatfs_ctx, vfs_for_fatfs_ctx->type ); 1244 #endif 1217 1245 } 1218 1246 else 1219 1247 { 1220 printk("\n[PANIC] in %s : unsupported VFS type in cluster 0\n",1221 __FUNCTION__ );1248 printk("\n[PANIC] in %s : unsupported VFS type in cluster %x\n", 1249 __FUNCTION__ , CONFIG_VFS_ROOT_CXY ); 1222 1250 hal_core_sleep(); 1223 1251 } 1224 1252 1225 // create the <.> and <..> dentries in VFS root directory1253 // 4. create the <.> and <..> dentries in VFS root directory 1226 1254 // the VFS root parent inode is the VFS root inode itself 1227 1255 vfs_add_special_dentries( vfs_root_inode_xp, 1228 1256 vfs_root_inode_xp ); 1229 1257 1230 // register VFS root inode in process_zero descriptor of cluster 0 1231 process_zero.vfs_root_xp = vfs_root_inode_xp; 1232 process_zero.cwd_xp = vfs_root_inode_xp; 1258 // 5. register VFS root inode in target cluster process_zero descriptor 1259 hal_remote_s64( XPTR( CONFIG_VFS_ROOT_CXY , &process_zero.vfs_root_xp ), 1260 vfs_root_inode_xp ); 1261 hal_remote_s64( XPTR( CONFIG_VFS_ROOT_CXY , &process_zero.cwd_xp ), 1262 vfs_root_inode_xp ); 1263 1264 // 6. allocate memory for DEVFS context in VFS_ROOT cluster 1265 xptr_t devfs_ctx_xp = devfs_ctx_alloc( CONFIG_VFS_ROOT_CXY ); 1266 1267 if( devfs_ctx_xp == XPTR_NULL ) 1268 { 1269 printk("\n[PANIC] in %s : cannot create DEVFS context in cluster %x\n", 1270 __FUNCTION__ , CONFIG_VFS_ROOT_CXY ); 1271 hal_core_sleep(); 1272 } 1273 1274 // 7. initialize the VFS context for DEVFS in VFS_ROOT cluster 1275 vfs_ctx_init( CONFIG_VFS_ROOT_CXY, // target cluster 1276 FS_TYPE_DEVFS, // fs type 1277 0, // total_clusters: unused 1278 0, // cluster_size: unused 1279 vfs_root_inode_xp, // VFS root 1280 GET_PTR( devfs_ctx_xp ) ); // extend 1281 1282 #if( DEBUG_KERNEL_INIT & 1 ) 1283 vfs_ctx_t * vfs_for_devfs_ctx = &fs_context[FS_TYPE_DEVFS]; 1284 printk("\n[%s] initialized VFS_for_DEVFS context in cluster %x / ctx %x / fs_type %d\n", 1285 __FUNCTION__, CONFIG_VFS_ROOT_CXY, vfs_for_devfs_ctx, vfs_for_devfs_ctx->type ); 1286 #endif 1287 1288 // 8. create "dev" and "external" inodes (directories) 1289 devfs_global_init( vfs_root_inode_xp, 1290 &devfs_dev_inode_xp, 1291 &devfs_external_inode_xp ); 1292 1293 // 9. initializes DEVFS context in VFS_ROOT cluster 1294 devfs_ctx_init( devfs_ctx_xp, 1295 devfs_dev_inode_xp, 1296 devfs_external_inode_xp ); 1233 1297 } 1234 1298 … … 1240 1304 1241 1305 #if DEBUG_KERNEL_INIT 1242 if( (core_lid == 0) & (local_cxy == 0) ) 1243 printk("\n[%s] exit barrier 6 : VFS root (%x,%x) in cluster 0 / cycle %d\n", 1244 __FUNCTION__, GET_CXY(process_zero.vfs_root_xp), 1245 GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() ); 1246 #endif 1247 1248 ///////////////////////////////////////////////////////////////////////////////// 1249 // STEP 7 : In all other clusters than cluster[0], the core[0] allocates memory 1250 // for the selected FS context, and initialise the local FS context and 1251 // the local VFS context from values stored in cluster 0. 1252 // They get the VFS root inode extended pointer from cluster 0. 1253 ///////////////////////////////////////////////////////////////////////////////// 1254 1255 if( (core_lid == 0) && (local_cxy != 0) ) 1306 if( (core_lid == 0) & (local_cxy == CONFIG_VFS_ROOT_CXY) ) 1307 printk("\n[%s] exit barrier 6 : VFS root inode (%x) created in cluster (%x) / cycle %d\n", 1308 __FUNCTION__, GET_CXY(vfs_root_inode_xp), 1309 GET_PTR(vfs_root_inode_xp), (uint32_t)hal_get_cycles() ); 1310 #endif 1311 1312 ///////////////////////////////////////////////////////////////////////////////// 1313 // STEP 7 : In all clusters other than the VFS_ROOT cluster, the core[0] makes 1314 // the following local actions to complete the VFS initialisation : 1315 // 1. allocate a local context for the selected FS extension, 1316 // 2. copy FS context from VFS_ROOT cluster to local cluster, 1317 // 3. copy VFS_for_FATFS context from VFS_ROOT cluster to local cluster, 1318 // 4. allocate a local context for the DEVFS extension, 1319 // 5. copy DEVFS context from VFS_ROOT cluster to local cluster, 1320 // 6. update the local "root_inode_xp" field in process_zero. 1321 ///////////////////////////////////////////////////////////////////////////////// 1322 1323 if( (core_lid == 0) && (local_cxy != CONFIG_VFS_ROOT_CXY) ) 1256 1324 { 1257 // File System must be FATFS in this implementation,1258 // but other File System can be introduced here1325 // only FATFS is supported yet 1326 // TODO other File System can be introduced below 1259 1327 if( CONFIG_VFS_ROOT_IS_FATFS ) 1260 1328 { 1261 // 1. allocate memory for local FATFS context 1262 fatfs_ctx_t * local_fatfs_ctx = fatfs_ctx_alloc(); 1263 1264 // check memory 1265 if( local_fatfs_ctx == NULL ) 1329 // 1. allocate a local FATFS context extension 1330 xptr_t local_fatfs_ctx_xp = fatfs_ctx_alloc( local_cxy ); 1331 1332 if( local_fatfs_ctx_xp == XPTR_NULL ) 1266 1333 { 1267 1334 printk("\n[PANIC] in %s : cannot create FATFS context in cluster %x\n", … … 1270 1337 } 1271 1338 1272 // 2. get local pointer on VFS context for FATFS 1273 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS]; 1274 1275 // 3. get local pointer on FATFS context in cluster 0 1276 fatfs_ctx_t * remote_fatfs_ctx = hal_remote_lpt( XPTR( 0 , &vfs_ctx->extend ) ); 1277 1278 // 4. copy FATFS context from cluster 0 to local cluster 1279 hal_remote_memcpy( XPTR( local_cxy , local_fatfs_ctx ), 1280 XPTR( 0 , remote_fatfs_ctx ), sizeof(fatfs_ctx_t) ); 1281 1282 // 5. copy VFS context from cluster 0 to local cluster 1283 hal_remote_memcpy( XPTR( local_cxy , vfs_ctx ), 1284 XPTR( 0 , vfs_ctx ), sizeof(vfs_ctx_t) ); 1285 1286 // 6. update extend field in local copy of VFS context 1287 vfs_ctx->extend = local_fatfs_ctx; 1288 1289 if( ((fatfs_ctx_t *)vfs_ctx->extend)->sectors_per_cluster != 8 ) 1290 { 1291 printk("\n[PANIC] in %s : illegal FATFS context in cluster %x\n", 1292 __FUNCTION__ , local_cxy ); 1293 hal_core_sleep(); 1294 } 1295 } 1296 1297 // get extended pointer on VFS root inode from cluster 0 1298 vfs_root_inode_xp = hal_remote_l64( XPTR( 0 , &process_zero.vfs_root_xp ) ); 1299 1300 // update local process_zero descriptor 1339 // get local pointer on VFS_for_FATFS context (same in all clusters) 1340 vfs_ctx_t * vfs_fat_ctx_ptr = &fs_context[FS_TYPE_FATFS]; 1341 1342 // build extended pointer on VFS_for_FATFS "extend" field in VFS_ROOT cluster 1343 xptr_t fatfs_extend_xp = XPTR( CONFIG_VFS_ROOT_CXY , &vfs_fat_ctx_ptr->extend ); 1344 1345 // get local pointer on FATFS context in VFS_ROOT cluster 1346 fatfs_ctx_t * remote_fatfs_ctx_ptr = hal_remote_lpt( fatfs_extend_xp ); 1347 1348 // build extended pointer on FATFS context in VFS_ROOT cluster 1349 xptr_t remote_fatfs_ctx_xp = XPTR( CONFIG_VFS_ROOT_CXY , remote_fatfs_ctx_ptr ); 1350 1351 // 2. copy FATFS context from VFS_ROOT cluster to local cluster 1352 hal_remote_memcpy( local_fatfs_ctx_xp, 1353 remote_fatfs_ctx_xp, 1354 sizeof(fatfs_ctx_t) ); 1355 1356 // build extended pointer on remote VFS_for_FATFS context 1357 xptr_t remote_vfs_ctx_xp = XPTR( CONFIG_VFS_ROOT_CXY , vfs_fat_ctx_ptr ); 1358 1359 // build extended pointer on local VFS_for_FATFS context 1360 xptr_t local_vfs_ctx_xp = XPTR( local_cxy , vfs_fat_ctx_ptr ); 1361 1362 // 3. copy VFS_for_FATFS context from VFS_ROOT cluster to local cluster 1363 hal_remote_memcpy( local_vfs_ctx_xp, 1364 remote_vfs_ctx_xp, 1365 sizeof(vfs_ctx_t) ); 1366 1367 // update "extend" field in local VFS_for_FATFS context 1368 vfs_fat_ctx_ptr->extend = GET_PTR( local_fatfs_ctx_xp ); 1369 1370 // check local FATFS and VFS context copies 1371 assert( (((fatfs_ctx_t *)vfs_fat_ctx_ptr->extend)->sectors_per_cluster == 8), 1372 "illegal FATFS context in cluster %x\n", local_cxy ); 1373 1374 } 1375 else 1376 { 1377 printk("\n[PANIC] in %s : unsupported VFS type in cluster %x\n", 1378 __FUNCTION__ , local_cxy ); 1379 hal_core_sleep(); 1380 } 1381 1382 // 4. allocate a local DEVFS context extension, 1383 xptr_t local_devfs_ctx_xp = devfs_ctx_alloc( local_cxy ); 1384 1385 // get local pointer on VFS_for_DEVFS context (same in all clusters) 1386 vfs_ctx_t * vfs_dev_ctx_ptr = &fs_context[FS_TYPE_DEVFS]; 1387 1388 // build extended pointer on VFS_for_DEVFS extend field in VFS_ROOT cluster 1389 xptr_t remote_extend_xp = XPTR( CONFIG_VFS_ROOT_CXY , &vfs_dev_ctx_ptr->extend ); 1390 1391 // get local pointer on DEVFS context in VFS_ROOT cluster 1392 devfs_ctx_t * remote_devfs_ctx_ptr = hal_remote_lpt( remote_extend_xp ); 1393 1394 // build extended pointer on FATFS context in VFS_ROOT cluster 1395 xptr_t remote_devfs_ctx_xp = XPTR( CONFIG_VFS_ROOT_CXY , remote_devfs_ctx_ptr ); 1396 1397 // 5. copy DEVFS context from VFS_ROOT cluster to local cluster 1398 hal_remote_memcpy( local_devfs_ctx_xp, 1399 remote_devfs_ctx_xp, 1400 sizeof(devfs_ctx_t) ); 1401 1402 // update "extend" field in local VFS_for_DEVFS context 1403 vfs_dev_ctx_ptr->extend = GET_PTR( local_devfs_ctx_xp ); 1404 1405 // get extended pointer on VFS root inode from VFS_ROOT cluster 1406 vfs_root_inode_xp = hal_remote_l64( XPTR( CONFIG_VFS_ROOT_CXY, 1407 &process_zero.vfs_root_xp ) ); 1408 1409 // 6. update local process_zero descriptor 1301 1410 process_zero.vfs_root_xp = vfs_root_inode_xp; 1302 1411 process_zero.cwd_xp = vfs_root_inode_xp; … … 1310 1419 1311 1420 #if DEBUG_KERNEL_INIT 1312 if( (core_lid == 0) & (local_cxy == 1) ) 1313 printk("\n[%s] exit barrier 7 : VFS root (%x,%x) in cluster 1 / cycle %d\n", 1314 __FUNCTION__, GET_CXY(process_zero.vfs_root_xp), 1315 GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() ); 1316 #endif 1317 1318 ///////////////////////////////////////////////////////////////////////////////// 1319 // STEP 8 : core[0] in cluster 0 makes the global DEVFS initialisation: 1320 // It initializes the DEVFS context, and creates the DEVFS 1321 // "dev" and "external" inodes in cluster 0. 1322 ///////////////////////////////////////////////////////////////////////////////// 1323 1324 if( (core_lid == 0) && (local_cxy == 0) ) 1421 if( (core_lid == 0) & (local_cxy == 0) ) 1422 printk("\n[%s] exit barrier 7 : VFS & DEVFS contexts replicated in all clusters / cycle %d\n", 1423 __FUNCTION__ , (uint32_t)hal_get_cycles() ); 1424 #endif 1425 1426 ///////////////////////////////////////////////////////////////////////////////// 1427 // STEP 8 : In all clusters in parallel, core[0] completes DEVFS initialization. 1428 // Each core[0] creates the local DEVFS "internal" directory, 1429 // and creates the pseudo-files for chdevs placed in local cluster. 1430 ///////////////////////////////////////////////////////////////////////////////// 1431 1432 if( core_lid == 0 ) 1325 1433 { 1326 // 1. allocate memory for DEVFS context extension in cluster 0 1327 devfs_ctx_t * devfs_ctx = devfs_ctx_alloc(); 1328 1329 if( devfs_ctx == NULL ) 1330 { 1331 printk("\n[PANIC] in %s : cannot create DEVFS context in cluster 0\n", 1332 __FUNCTION__ , local_cxy ); 1333 hal_core_sleep(); 1334 } 1335 1336 // 2. initialize the DEVFS entry in the vfs_context[] array 1337 vfs_ctx_init( FS_TYPE_DEVFS, // fs type 1338 0, // attributes: unused 1339 0, // total_clusters: unused 1340 0, // cluster_size: unused 1341 vfs_root_inode_xp, // VFS root 1342 devfs_ctx ); // extend 1343 1344 // 3. create "dev" and "external" inodes (directories) 1345 devfs_global_init( process_zero.vfs_root_xp, 1346 &devfs_dev_inode_xp, 1347 &devfs_external_inode_xp ); 1348 1349 // 4. initializes DEVFS context extension 1350 devfs_ctx_init( devfs_ctx, 1351 devfs_dev_inode_xp, 1352 devfs_external_inode_xp ); 1353 } 1434 // get local pointer on local DEVFS context 1435 devfs_ctx_t * ctx = fs_context[FS_TYPE_DEVFS].extend; 1436 1437 // populate DEVFS in all clusters 1438 devfs_local_init( ctx->dev_inode_xp, 1439 ctx->external_inode_xp ); 1440 } 1354 1441 1355 1442 ///////////////////////////////////////////////////////////////////////////////// … … 1361 1448 #if DEBUG_KERNEL_INIT 1362 1449 if( (core_lid == 0) & (local_cxy == 0) ) 1363 printk("\n[%s] exit barrier 8 : DEVFS root initialized in cluster 0 / cycle %d\n", 1364 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1365 #endif 1366 1367 ///////////////////////////////////////////////////////////////////////////////// 1368 // STEP 9 : In all clusters in parallel, core[0] completes DEVFS initialization. 1369 // Each core[0] get the "dev" and "external" extended pointers from 1370 // values stored in cluster(0), creates the DEVFS "internal" directory, 1371 // and creates the pseudo-files for all chdevs in local cluster. 1372 ///////////////////////////////////////////////////////////////////////////////// 1373 1374 if( core_lid == 0 ) 1375 { 1376 // get extended pointer on "extend" field of VFS context for DEVFS in cluster 0 1377 xptr_t extend_xp = XPTR( 0 , &fs_context[FS_TYPE_DEVFS].extend ); 1378 1379 // get pointer on DEVFS context in cluster 0 1380 devfs_ctx_t * devfs_ctx = hal_remote_lpt( extend_xp ); 1381 1382 devfs_dev_inode_xp = hal_remote_l64( XPTR( 0 , &devfs_ctx->dev_inode_xp ) ); 1383 devfs_external_inode_xp = hal_remote_l64( XPTR( 0 , &devfs_ctx->external_inode_xp ) ); 1384 1385 // populate DEVFS in all clusters 1386 devfs_local_init( devfs_dev_inode_xp, 1387 devfs_external_inode_xp, 1388 &devfs_internal_inode_xp ); 1389 } 1390 1391 ///////////////////////////////////////////////////////////////////////////////// 1392 if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ), 1393 (info->x_size * info->y_size) ); 1394 barrier_wait( &local_barrier , info->cores_nr ); 1395 ///////////////////////////////////////////////////////////////////////////////// 1396 1397 #if DEBUG_KERNEL_INIT 1398 if( (core_lid == 0) & (local_cxy == 0) ) 1399 printk("\n[%s] exit barrier 9 : DEVFS initialized in cluster 0 / cycle %d\n", 1450 printk("\n[%s] exit barrier 8 : DEVFS initialized in all clusters / cycle %d\n", 1400 1451 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1401 1452 #endif … … 1407 1458 1408 1459 ///////////////////////////////////////////////////////////////////////////////// 1409 // STEP 10: core[0] in cluster 0 creates the first user process (process_init).1410 // This include the first userprocess VMM (GPT and VSL) creation.1411 // 1460 // STEP 9 : core[0] in cluster 0 creates the first user process (process_init). 1461 // This include the process VMM (GPT and VSL) creation. 1462 // Finally, it prints the ALMOS-MKH banner. 1412 1463 ///////////////////////////////////////////////////////////////////////////////// 1413 1464 … … 1419 1470 #if DEBUG_KERNEL_INIT 1420 1471 if( (core_lid == 0) & (local_cxy == 0) ) 1421 printk("\n[%s] exit barrier 10: process_init created in cluster 0 / cycle %d\n",1472 printk("\n[%s] exit barrier 9 : process_init created in cluster 0 / cycle %d\n", 1422 1473 __FUNCTION__, (uint32_t)hal_get_cycles() ); 1423 1474 #endif -
trunk/kernel/kern/printk.h
r625 r657 2 2 * printk.h - Kernel Log & debug messages API definition. 3 3 * 4 * authors Alain Greiner (2016,2017,2018 )4 * authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 123 123 124 124 /********************************************************************************** 125 * This function displays a non-formated message on TXT0 terminal.125 * This debug function displays a non-formated message on TXT0 terminal. 126 126 * This function is actually used to debug the assembly level kernel functions. 127 127 ********************************************************************************** … … 131 131 132 132 /********************************************************************************** 133 * This function displays a 32 bits value in hexadecimal on TXT0 terminal.133 * This debug function displays a 32 bits value in hexadecimal on TXT0 terminal. 134 134 * This function is actually used to debug the assembly level kernel functions. 135 135 ********************************************************************************** … … 139 139 140 140 /********************************************************************************** 141 * This function displays a 32 bits signed value in decimal on TXT0 terminal.141 * This debug function displays a 32 bits signed value in decimal on TXT0 terminal. 142 142 * This function is actually used to debug the assembly level kernel functions. 143 143 ********************************************************************************** … … 147 147 148 148 /********************************************************************************** 149 * This function displays a 64 bits value in hexadecimal on TXT0 terminal.149 * This debug function displays a 64 bits value in hexadecimal on TXT0 terminal. 150 150 * This function is actually used to debug the assembly level kernel functions. 151 151 ********************************************************************************** … … 158 158 * array of bytes defined by <buffer> and <size> arguments (16 bytes per line). 159 159 * The <string> argument is displayed before the buffer content. 160 * The line format is an address fol owed by 16 (hexa) bytes.160 * The line format is an address followed by 16 (hexa) bytes. 161 161 ********************************************************************************** 162 162 * @ string : buffer name or identifier. -
trunk/kernel/kern/process.c
r651 r657 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019 )6 * Alain Greiner (2016,2017,2018,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 1103 1103 xptr_t xp; 1104 1104 1105 // get referenceprocess cluster and local pointer1105 // get target process cluster and local pointer 1106 1106 process_t * process_ptr = GET_PTR( process_xp ); 1107 1107 cxy_t process_cxy = GET_CXY( process_xp ); 1108 1108 1109 // check client process is reference process1109 // check target process is reference process 1110 1110 assert( (process_xp == hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp ) ) ), 1111 1111 "client process must be reference process\n" ); … … 1158 1158 1159 1159 } // end process_fd_register() 1160 1161 ///////////////////////////////////////////// 1162 void process_fd_remove( xptr_t process_xp, 1163 uint32_t fdid ) 1164 { 1165 pid_t pid; // target process PID 1166 lpid_t lpid; // target process LPID 1167 xptr_t iter_xp; // iterator for list of process copies 1168 xptr_t copy_xp; // extended pointer on process copy 1169 process_t * copy_ptr; // local pointer on process copy 1170 cxy_t copy_cxy; // process copy cluster identifier 1171 1172 // check process_xp argument 1173 assert( (process_xp != XPTR_NULL), "process_xp argument cannot be XPTR_NULL"); 1174 1175 // get target process cluster and local pointer 1176 process_t * process_ptr = GET_PTR( process_xp ); 1177 cxy_t process_cxy = GET_CXY( process_xp ); 1178 1179 // get target process pid and lpid 1180 pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid) ); 1181 lpid = LPID_FROM_PID( pid ); 1182 1183 // get process descriptor in owner cluster and in reference cluster 1184 xptr_t owner_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->owner_xp )); 1185 xptr_t ref_xp = hal_remote_l64( XPTR( process_cxy , &process_ptr->ref_xp )); 1186 1187 // check target process in in owner cluster 1188 assert( (process_xp == owner_xp), "target process must be in owner process\n" ); 1189 1190 #if DEBUG_PROCESS_FD_REMOVE 1191 uint32_t cycle = (uint32_t)hal_get_cycles(); 1192 thread_t * this = CURRENT_THREAD; 1193 if( DEBUG_PROCESS_FD_REMOVE < cycle ) 1194 printk("\n[%s] thread[%x,%x] enter for fdid %d in process %x / cycle %d\n", 1195 __FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle ); 1196 #endif 1197 1198 // build extended pointers on list_of_copies root and lock (in owner cluster) 1199 xptr_t copies_root_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_root[lpid] ); 1200 xptr_t copies_lock_xp = XPTR( process_cxy , &LOCAL_CLUSTER->pmgr.copies_lock[lpid] ); 1201 1202 // get reference process cluster and local pointer 1203 process_t * ref_ptr = GET_PTR( ref_xp ); 1204 cxy_t ref_cxy = GET_CXY( ref_xp ); 1205 1206 // build extended pointer on lock protecting reference fd_array 1207 xptr_t fd_lock_xp = XPTR( ref_cxy , &ref_ptr->fd_array.lock ); 1208 1209 // take lock protecting reference fd_array 1210 remote_queuelock_acquire( fd_lock_xp ); 1211 1212 // take the lock protecting the list of copies 1213 remote_queuelock_acquire( copies_lock_xp ); 1214 1215 // loop on list of process copies 1216 XLIST_FOREACH( copies_root_xp , iter_xp ) 1217 { 1218 // get pointers on process copy 1219 copy_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); 1220 copy_ptr = GET_PTR( copy_xp ); 1221 copy_cxy = GET_CXY( copy_xp ); 1222 1223 // release the fd_array entry in process copy 1224 hal_remote_s64( XPTR( copy_cxy , ©_ptr->fd_array.array[fdid] ), XPTR_NULL ); 1225 } 1226 1227 // release the lock protecting reference fd_array 1228 remote_queuelock_release( fd_lock_xp ); 1229 1230 // release the lock protecting the list of copies 1231 remote_queuelock_release( copies_lock_xp ); 1232 1233 #if DEBUG_PROCESS_FD_REMOVE 1234 cycle = (uint32_t)hal_get_cycles(); 1235 if( DEBUG_PROCESS_FD_REMOVE < cycle ) 1236 printk("\n[%s] thread[%x,%x] exit for fdid %d in process %x / cycle %d\n", 1237 __FUNCTION__, this->process->pid, this->trdid, fdid, pid, cycle ); 1238 #endif 1239 1240 } // end process_fd_remove() 1160 1241 1161 1242 //////////////////////////////////////////////// -
trunk/kernel/kern/process.h
r651 r657 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019 )6 * Alain Greiner (2016,2017,2018,2019,2020) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 70 70 /********************************************************************************************* 71 71 * This structure defines an array of extended pointers on the open file descriptors 72 * for a given process. We use an extended pointer because the open file descriptor 73 * isalways stored in the same cluster as the inode associated to the file.72 * for a given process. We use an extended pointer because the open file descriptors 73 * are always stored in the same cluster as the inode associated to the file. 74 74 * A free entry in this array contains the XPTR_NULL value. 75 75 * The array size is defined by the CONFIG_PROCESS_FILE_MAX_NR parameter. … … 79 79 * - the fd_array[] in a process copy is simply a cache containing a subset of the 80 80 * open files to speed the fdid to xptr translation, but the "lock" and "current 81 * fields are not used. 82 * - all modifications made by the process_fd_remove() are done in reference cluster 83 * and reported in all process_copies. 81 * fields are not significant for these copies. 82 * - the modifications made by the process_fd_remove() function are done in the 83 * reference cluster in all process_copies. 84 * - The modifications made by the process_fd_register() function are done in the 85 * reference cluster, and in the cluster containing the calling thread. 84 86 ********************************************************************************************/ 85 87 … … 436 438 437 439 /********************************************************************************************* 438 * This function allocates a free slot in the fd_array of the reference process 440 * This function allocates a free slot in the fd_array of the reference process descriptor 439 441 * identified by the <process_xp> argument, register the <file_xp> argument in the 440 442 * allocated slot, and return the slot index in the <fdid> buffer. 441 * It can be called by any thread in any cluster, because it uses remote access442 * primitives to access the reference process descriptor.443 * Note: we must use the reference process descriptor, because the reference fd_array is 444 * contained in the reference cluster. It can be called by any thread in any cluster. 443 445 * It takes the lock protecting the reference fd_array against concurrent accesses. 444 446 ********************************************************************************************* … … 454 456 /********************************************************************************************* 455 457 * This function uses as many remote accesses as required, to reset an entry in fd_array[], 456 * in all clusters containing a copy. The entry is identified by the <fdid> argument. 457 * This function must be executed by a thread running in reference cluster, that contains 458 * the complete list of process descriptors copies. 458 * identified by the <fdid> argument, in all clusters containing a copy of the 459 * process descriptor, identified by the <process_xp> argument. 460 * Note: we must use the owner process descriptor, because only this owner cluster contains 461 * the list of process copies. It can be called by any thread in any cluster. 459 462 * It takes the lock protecting the reference fd_array against concurrent accesses. 460 * TODO this function is not implemented yet.461 463 ********************************************************************************************* 462 464 * @ process : [in] pointer on the local process descriptor. 463 465 * @ fdid : [in] file descriptor index in the fd_array. 464 466 ********************************************************************************************/ 465 void process_fd_remove( process_t * process,466 uint32_t 467 void process_fd_remove( xptr_t process_xp, 468 uint32_t fdid ); 467 469 468 470 /********************************************************************************************* -
trunk/kernel/kern/rpc.c
r641 r657 2 2 * rpc.c - RPC operations implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 59 59 &rpc_thread_user_create_server, // 6 60 60 &rpc_thread_kernel_create_server, // 7 61 &rpc_ vfs_fs_update_dentry_server,// 861 &rpc_undefined, // 8 62 62 &rpc_process_sigaction_server, // 9 63 63 64 &rpc_vfs_inode_create_server, // 10 65 &rpc_vfs_inode_destroy_server, // 11 66 &rpc_vfs_dentry_create_server, // 12 67 &rpc_vfs_dentry_destroy_server, // 13 68 &rpc_vfs_file_create_server, // 14 69 &rpc_vfs_file_destroy_server, // 15 70 &rpc_vfs_fs_new_dentry_server, // 16 71 &rpc_vfs_fs_add_dentry_server, // 17 72 &rpc_vfs_fs_remove_dentry_server, // 18 73 &rpc_vfs_inode_load_all_pages_server, // 19 74 75 &rpc_undefined, // 20 76 &rpc_undefined, // 21 77 &rpc_undefined, // 22 78 &rpc_undefined, // 23 79 &rpc_mapper_sync_server, // 24 80 &rpc_vmm_resize_vseg_server, // 25 81 &rpc_vmm_remove_vseg_server, // 26 82 &rpc_vmm_create_vseg_server, // 27 83 &rpc_vmm_set_cow_server, // 28 84 &rpc_undefined, // 29 64 &rpc_undefined, // 10 65 &rpc_undefined, // 11 66 &rpc_undefined, // 12 67 &rpc_undefined, // 13 68 &rpc_undefined, // 14 69 &rpc_vmm_resize_vseg_server, // 15 70 &rpc_vmm_remove_vseg_server, // 16 71 &rpc_vmm_create_vseg_server, // 17 72 &rpc_vmm_set_cow_server, // 18 73 &rpc_undefined, // 19 85 74 }; 86 75 … … 95 84 "THREAD_USER_CREATE", // 6 96 85 "THREAD_KERNEL_CREATE", // 7 97 " VFS_FS_UPDATE_DENTRY",// 886 "FBF_DISPLAY", // 8 98 87 "PROCESS_SIGACTION", // 9 99 88 100 "VFS_INODE_CREATE", // 10 101 "VFS_INODE_DESTROY", // 11 102 "VFS_DENTRY_CREATE", // 12 103 "VFS_DENTRY_DESTROY", // 13 104 "VFS_FILE_CREATE", // 14 105 "VFS_FILE_DESTROY", // 15 106 "VFS_FS_NEW_DENTRY", // 16 107 "VFS_FS_ADD_DENTRY", // 17 108 "VFS_FS_REMOVE_DENTRY", // 18 109 "VFS_INODE_LOAD_ALL_PAGES", // 19 110 111 "VMM_GLOBAL_RESIZE_VSEG", // 20 112 "VMM_GLOBAL_UPDATE_PTE", // 21 113 "undefined_22", // 22 114 "undefined_23", // 23 115 "MAPPER_SYNC", // 24 116 "undefined_25", // 25 117 "VMM_REMOVE_VSEG", // 26 118 "VMM_CREATE_VSEG", // 27 119 "VMM_SET_COW", // 28 120 "undefined_29", // 29 89 "undefined_10", // 10 90 "undefined_11", // 11 91 "undefined_12", // 12 92 "undefined_13", // 13 93 "undefined_14", // 14 94 "VMM_RESIZE_VSEG", // 15 95 "VMM_REMOVE_VSEG", // 16 96 "VMM_CREATE_VSEG", // 17 97 "VMM_SET_COW", // 18 98 "undefined_19", // 19 121 99 }; 122 100 … … 423 401 424 402 425 ///////////////////////////////////////////////////////////////////////////////////////// 426 // [0] RPC_PMEM_GET_PAGES deprecated [AG] May 2019 427 ///////////////////////////////////////////////////////////////////////////////////////// 428 429 /* 430 /////////////////////////////////////////////// 431 void rpc_pmem_get_pages_client( cxy_t cxy, 432 uint32_t order, // in 433 page_t ** page ) // out 434 { 435 #if DEBUG_RPC_PMEM_GET_PAGES 436 thread_t * this = CURRENT_THREAD; 437 uint32_t cycle = (uint32_t)hal_get_cycles(); 438 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 439 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 440 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 441 #endif 442 443 uint32_t responses = 1; 444 445 // initialise RPC descriptor header 446 rpc_desc_t rpc; 447 rpc.index = RPC_PMEM_GET_PAGES; 448 rpc.blocking = true; 449 rpc.rsp = &responses; 450 451 // set input arguments in RPC descriptor 452 rpc.args[0] = (uint64_t)order; 453 454 // register RPC request in remote RPC fifo 455 rpc_send( cxy , &rpc ); 456 457 // get output arguments from RPC descriptor 458 *page = (page_t *)(intptr_t)rpc.args[1]; 459 460 #if DEBUG_RPC_PMEM_GET_PAGES 461 cycle = (uint32_t)hal_get_cycles(); 462 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 463 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 464 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 465 #endif 466 } 467 468 /////////////////////////////////////////// 469 void rpc_pmem_get_pages_server( xptr_t xp ) 470 { 471 #if DEBUG_RPC_PMEM_GET_PAGES 472 thread_t * this = CURRENT_THREAD; 473 uint32_t cycle = (uint32_t)hal_get_cycles(); 474 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 475 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 476 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 477 #endif 478 479 // get client cluster identifier and pointer on RPC descriptor 480 cxy_t cxy = GET_CXY( xp ); 481 rpc_desc_t * desc = GET_PTR( xp ); 482 483 // get input arguments from client RPC descriptor 484 uint32_t order = (uint32_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); 485 486 // call local pmem allocator 487 page_t * page = ppm_alloc_pages( order ); 488 489 // set output arguments into client RPC descriptor 490 hal_remote_s64( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 491 492 #if DEBUG_RPC_PMEM_GET_PAGES 493 cycle = (uint32_t)hal_get_cycles(); 494 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 495 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 496 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 497 #endif 498 } 499 */ 500 501 ///////////////////////////////////////////////////////////////////////////////////////// 502 // [1] RPC_PMEM_RELEASE_PAGES deprecated [AG] may 2019 503 ///////////////////////////////////////////////////////////////////////////////////////// 504 505 /* 506 ////////////////////////////////////////////////// 507 void rpc_pmem_release_pages_client( cxy_t cxy, 508 page_t * page ) // out 509 { 510 #if DEBUG_RPC_PMEM_RELEASE_PAGES 511 thread_t * this = CURRENT_THREAD; 512 uint32_t cycle = (uint32_t)hal_get_cycles(); 513 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 514 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 515 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 516 #endif 517 518 uint32_t responses = 1; 519 520 // initialise RPC descriptor header 521 rpc_desc_t rpc; 522 rpc.index = RPC_PMEM_RELEASE_PAGES; 523 rpc.blocking = true; 524 rpc.rsp = &responses; 525 526 // set input arguments in RPC descriptor 527 rpc.args[0] = (uint64_t)(intptr_t)page; 528 529 // register RPC request in remote RPC fifo 530 rpc_send( cxy , &rpc ); 531 532 #if DEBUG_RPC_PMEM_RELEASE_PAGES 533 cycle = (uint32_t)hal_get_cycles(); 534 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 535 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 536 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 537 #endif 538 } 539 540 /////////////////////////////////////////////// 541 void rpc_pmem_release_pages_server( xptr_t xp ) 542 { 543 #if DEBUG_RPC_PMEM_RELEASE_PAGES 544 thread_t * this = CURRENT_THREAD; 545 uint32_t cycle = (uint32_t)hal_get_cycles(); 546 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 547 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 548 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 549 #endif 550 551 // get client cluster identifier and pointer on RPC descriptor 552 cxy_t cxy = GET_CXY( xp ); 553 rpc_desc_t * desc = GET_PTR( xp ); 554 555 // get input arguments from client RPC descriptor 556 page_t * page = (page_t *)(intptr_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); 557 558 // release memory to local pmem 559 kmem_req_t req; 560 req.type = KMEM_PPM; 561 req.ptr = page; 562 kmem_free( &req ); 563 564 #if DEBUG_RPC_PMEM_RELEASE_PAGES 565 cycle = (uint32_t)hal_get_cycles(); 566 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 567 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 568 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 569 #endif 570 } 571 */ 572 573 ///////////////////////////////////////////////////////////////////////////////////////// 574 // [2] RPC_PPM_DISPLAY deprecated [AG] May 2019 575 ///////////////////////////////////////////////////////////////////////////////////////// 576 577 /* 578 ///////////////////////////////////////// 579 void rpc_ppm_display_client( cxy_t cxy ) 580 { 581 #if DEBUG_RPC_PPM_DISPLAY 582 thread_t * this = CURRENT_THREAD; 583 uint32_t cycle = (uint32_t)hal_get_cycles(); 584 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 585 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 586 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 587 #endif 588 589 uint32_t responses = 1; 590 591 // initialise RPC descriptor header 592 rpc_desc_t rpc; 593 rpc.index = RPC_PPM_DISPLAY; 594 rpc.blocking = true; 595 rpc.rsp = &responses; 596 597 // register RPC request in remote RPC fifo 598 rpc_send( cxy , &rpc ); 599 600 #if DEBUG_RPC_PPM_DISPLAY 601 cycle = (uint32_t)hal_get_cycles(); 602 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 603 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 604 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 605 #endif 606 } 607 608 //////////////////////////////////////////////////////////////////// 609 void rpc_ppm_display_server( xptr_t __attribute__((__unused__)) xp ) 610 { 611 #if DEBUG_RPC_PPM_DISPLAY 612 thread_t * this = CURRENT_THREAD; 613 uint32_t cycle = (uint32_t)hal_get_cycles(); 614 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 615 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 616 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 617 #endif 618 619 // call local kernel function 620 ppm_display(); 621 622 #if DEBUG_RPC_PPM_DISPLAY 623 cycle = (uint32_t)hal_get_cycles(); 624 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 625 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 626 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 627 #endif 628 } 629 */ 630 631 ///////////////////////////////////////////////////////////////////////////////////////// 632 // [3] Marshaling functions attached to RPC_PROCESS_MAKE_FORK 403 /***************************************************************************************/ 404 /************ Marshaling functions ordered by increasing index *************************/ 405 /***************************************************************************************/ 406 407 408 ///////////////////////////////////////////////////////////////////////////////////////// 409 // [0] undefined 410 ///////////////////////////////////////////////////////////////////////////////////////// 411 412 ///////////////////////////////////////////////////////////////////////////////////////// 413 // [1] undefined 414 ///////////////////////////////////////////////////////////////////////////////////////// 415 416 ///////////////////////////////////////////////////////////////////////////////////////// 417 // [2] undefined 418 ///////////////////////////////////////////////////////////////////////////////////////// 419 420 421 ///////////////////////////////////////////////////////////////////////////////////////// 422 // [3] Marshaling function attached to RPC_MAKE_FORK 633 423 ///////////////////////////////////////////////////////////////////////////////////////// 634 424 … … 1073 863 1074 864 ///////////////////////////////////////////////////////////////////////////////////////// 1075 // [8] Marshaling functions attached to RPC_VFS_FS_UPDATE_DENTRY 1076 ///////////////////////////////////////////////////////////////////////////////////////// 1077 1078 ///////////////////////////////////////////////////////// 1079 void rpc_vfs_fs_update_dentry_client( cxy_t cxy, 1080 vfs_inode_t * inode, 1081 vfs_dentry_t * dentry, 1082 uint32_t size, 1083 error_t * error ) 1084 { 1085 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1086 thread_t * this = CURRENT_THREAD; 1087 uint32_t cycle = (uint32_t)hal_get_cycles(); 1088 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1089 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1090 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1091 #endif 1092 1093 uint32_t responses = 1; 1094 1095 // initialise RPC descriptor header 1096 rpc_desc_t rpc; 1097 rpc.index = RPC_VFS_FS_UPDATE_DENTRY; 1098 rpc.blocking = true; 1099 rpc.rsp = &responses; 1100 1101 // set input arguments in RPC descriptor 1102 rpc.args[0] = (uint64_t)(intptr_t)inode; 1103 rpc.args[1] = (uint64_t)(intptr_t)dentry; 1104 rpc.args[2] = (uint64_t)size; 1105 1106 // register RPC request in remote RPC fifo 1107 rpc_send( cxy , &rpc ); 1108 1109 // get output values from RPC descriptor 1110 *error = (error_t)rpc.args[3]; 1111 1112 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1113 cycle = (uint32_t)hal_get_cycles(); 1114 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1115 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1116 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1117 #endif 1118 } 1119 1120 ///////////////////////////////////////////////// 1121 void rpc_vfs_fs_update_dentry_server( xptr_t xp ) 1122 { 1123 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1124 thread_t * this = CURRENT_THREAD; 1125 uint32_t cycle = (uint32_t)hal_get_cycles(); 1126 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1127 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1128 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1129 #endif 1130 1131 error_t error; 1132 vfs_inode_t * inode; 1133 vfs_dentry_t * dentry; 1134 uint32_t size; 1135 1136 // get client cluster identifier and pointer on RPC descriptor 1137 cxy_t client_cxy = GET_CXY( xp ); 1138 rpc_desc_t * desc = GET_PTR( xp ); 1139 1140 // get input arguments 1141 inode = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 1142 dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 1143 size = (uint32_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); 1144 1145 // call the kernel function 1146 error = vfs_fs_update_dentry( inode , dentry , size ); 1147 1148 // set output argument 1149 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1150 1151 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1152 cycle = (uint32_t)hal_get_cycles(); 1153 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1154 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1155 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1156 #endif 1157 } 865 // [8] undefined 866 ///////////////////////////////////////////////////////////////////////////////////////// 1158 867 1159 868 ///////////////////////////////////////////////////////////////////////////////////////// … … 1197 906 process_action_str( action ), pid, cycle ); 1198 907 #endif 1199 } // end rpc_process_sigaction_client()908 } 1200 909 1201 910 ////////////////////////////////////////////// … … 1241 950 process_action_str( action ), pid, cycle ); 1242 951 #endif 1243 } // end rpc_process_sigaction_server() 1244 1245 ///////////////////////////////////////////////////////////////////////////////////////// 1246 // [10] Marshaling functions attached to RPC_VFS_INODE_CREATE 1247 ///////////////////////////////////////////////////////////////////////////////////////// 1248 1249 ///////////////////////////////////////////////////// 1250 void rpc_vfs_inode_create_client( cxy_t cxy, 1251 uint32_t fs_type, // in 1252 uint32_t attr, // in 1253 uint32_t rights, // in 1254 uint32_t uid, // in 1255 uint32_t gid, // in 1256 xptr_t * inode_xp, // out 1257 error_t * error ) // out 1258 { 1259 #if DEBUG_RPC_VFS_INODE_CREATE 1260 thread_t * this = CURRENT_THREAD; 1261 uint32_t cycle = (uint32_t)hal_get_cycles(); 1262 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1263 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1264 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1265 #endif 1266 1267 uint32_t responses = 1; 1268 1269 // initialise RPC descriptor header 1270 rpc_desc_t rpc; 1271 rpc.index = RPC_VFS_INODE_CREATE; 1272 rpc.blocking = true; 1273 rpc.rsp = &responses; 1274 1275 // set input arguments in RPC descriptor 1276 rpc.args[0] = (uint64_t)fs_type; 1277 rpc.args[1] = (uint64_t)attr; 1278 rpc.args[2] = (uint64_t)rights; 1279 rpc.args[3] = (uint64_t)uid; 1280 rpc.args[4] = (uint64_t)gid; 1281 1282 // register RPC request in remote RPC fifo 1283 rpc_send( cxy , &rpc ); 1284 1285 // get output values from RPC descriptor 1286 *inode_xp = (xptr_t)rpc.args[5]; 1287 *error = (error_t)rpc.args[6]; 1288 1289 #if DEBUG_RPC_VFS_INODE_CREATE 1290 cycle = (uint32_t)hal_get_cycles(); 1291 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1292 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1293 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1294 #endif 1295 } 1296 1297 ///////////////////////////////////////////// 1298 void rpc_vfs_inode_create_server( xptr_t xp ) 1299 { 1300 #if DEBUG_RPC_VFS_INODE_CREATE 1301 thread_t * this = CURRENT_THREAD; 1302 uint32_t cycle = (uint32_t)hal_get_cycles(); 1303 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1304 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1305 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1306 #endif 1307 1308 uint32_t fs_type; 1309 uint32_t attr; 1310 uint32_t rights; 1311 uint32_t uid; 1312 uint32_t gid; 1313 xptr_t inode_xp; 1314 error_t error; 1315 1316 // get client cluster identifier and pointer on RPC descriptor 1317 cxy_t client_cxy = GET_CXY( xp ); 1318 rpc_desc_t * desc = GET_PTR( xp ); 1319 1320 // get input arguments from client rpc descriptor 1321 fs_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1322 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1323 rights = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1324 uid = (uid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 1325 gid = (gid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1326 1327 // call local kernel function 1328 error = vfs_inode_create( fs_type, 1329 attr, 1330 rights, 1331 uid, 1332 gid, 1333 &inode_xp ); 1334 1335 // set output arguments 1336 hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)inode_xp ); 1337 hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error ); 1338 1339 #if DEBUG_RPC_VFS_INODE_CREATE 1340 cycle = (uint32_t)hal_get_cycles(); 1341 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1342 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1343 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1344 #endif 1345 } 1346 1347 ///////////////////////////////////////////////////////////////////////////////////////// 1348 // [11] Marshaling functions attached to RPC_VFS_INODE_DESTROY 1349 ///////////////////////////////////////////////////////////////////////////////////////// 1350 1351 ///////////////////////////////////////////////////////////// 1352 void rpc_vfs_inode_destroy_client( cxy_t cxy, 1353 struct vfs_inode_s * inode ) 1354 { 1355 #if DEBUG_RPC_VFS_INODE_DESTROY 1356 thread_t * this = CURRENT_THREAD; 1357 uint32_t cycle = (uint32_t)hal_get_cycles(); 1358 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1359 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1360 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1361 #endif 1362 1363 uint32_t responses = 1; 1364 1365 // initialise RPC descriptor header 1366 rpc_desc_t rpc; 1367 rpc.index = RPC_VFS_INODE_DESTROY; 1368 rpc.blocking = true; 1369 rpc.rsp = &responses; 1370 1371 // set input arguments in RPC descriptor 1372 rpc.args[0] = (uint64_t)(intptr_t)inode; 1373 1374 // register RPC request in remote RPC fifo 1375 rpc_send( cxy , &rpc ); 1376 1377 #if DEBUG_RPC_VFS_INODE_DESTROY 1378 cycle = (uint32_t)hal_get_cycles(); 1379 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1380 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1381 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1382 #endif 1383 } 1384 1385 ////////////////////////////////////////////// 1386 void rpc_vfs_inode_destroy_server( xptr_t xp ) 1387 { 1388 #if DEBUG_RPC_VFS_INODE_DESTROY 1389 thread_t * this = CURRENT_THREAD; 1390 uint32_t cycle = (uint32_t)hal_get_cycles(); 1391 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1392 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1393 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1394 #endif 1395 1396 vfs_inode_t * inode; 1397 1398 // get client cluster identifier and pointer on RPC descriptor 1399 cxy_t client_cxy = GET_CXY( xp ); 1400 rpc_desc_t * desc = GET_PTR( xp ); 1401 1402 // get argument "inode" from client RPC descriptor 1403 inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1404 1405 // call local kernel function 1406 vfs_inode_destroy( inode ); 1407 1408 #if DEBUG_RPC_VFS_INODE_DESTROY 1409 cycle = (uint32_t)hal_get_cycles(); 1410 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1411 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1412 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1413 #endif 1414 } 1415 1416 ///////////////////////////////////////////////////////////////////////////////////////// 1417 // [12] Marshaling functions attached to RPC_VFS_DENTRY_CREATE 1418 ///////////////////////////////////////////////////////////////////////////////////////// 1419 1420 ////////////////////////////////////////////////////////////// 1421 void rpc_vfs_dentry_create_client( cxy_t cxy, 1422 uint32_t type, // in 1423 char * name, // in 1424 xptr_t * dentry_xp, // out 1425 error_t * error ) // out 1426 { 1427 #if DEBUG_RPC_VFS_DENTRY_CREATE 1428 thread_t * this = CURRENT_THREAD; 1429 uint32_t cycle = (uint32_t)hal_get_cycles(); 1430 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1431 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1432 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1433 #endif 1434 1435 uint32_t responses = 1; 1436 1437 // initialise RPC descriptor header 1438 rpc_desc_t rpc; 1439 rpc.index = RPC_VFS_DENTRY_CREATE; 1440 rpc.blocking = true; 1441 rpc.rsp = &responses; 1442 1443 // set input arguments in RPC descriptor 1444 rpc.args[0] = (uint64_t)type; 1445 rpc.args[1] = (uint64_t)(intptr_t)name; 1446 1447 // register RPC request in remote RPC fifo 1448 rpc_send( cxy , &rpc ); 1449 1450 // get output values from RPC descriptor 1451 *dentry_xp = (xptr_t)rpc.args[2]; 1452 *error = (error_t)rpc.args[3]; 1453 1454 #if DEBUG_RPC_VFS_DENTRY_CREATE 1455 cycle = (uint32_t)hal_get_cycles(); 1456 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1457 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1458 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1459 #endif 1460 } 1461 1462 ////////////////////////////////////////////// 1463 void rpc_vfs_dentry_create_server( xptr_t xp ) 1464 { 1465 #if DEBUG_RPC_VFS_DENTRY_CREATE 1466 thread_t * this = CURRENT_THREAD; 1467 uint32_t cycle = (uint32_t)hal_get_cycles(); 1468 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1469 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1470 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1471 #endif 1472 1473 uint32_t type; 1474 char * name; 1475 xptr_t dentry_xp; 1476 error_t error; 1477 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 1478 1479 // get client cluster identifier and pointer on RPC descriptor 1480 cxy_t client_cxy = GET_CXY( xp ); 1481 rpc_desc_t * desc = GET_PTR( xp ); 1482 1483 // get arguments "name", "type", and "parent" from client RPC descriptor 1484 type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1485 name = (char *)(intptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1486 1487 // makes a local copy of name 1488 hal_remote_strcpy( XPTR( local_cxy , name_copy ), 1489 XPTR( client_cxy , name ) ); 1490 1491 // call local kernel function 1492 error = vfs_dentry_create( type, 1493 name_copy, 1494 &dentry_xp ); 1495 // set output arguments 1496 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)dentry_xp ); 1497 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1498 1499 #if DEBUG_RPC_VFS_DENTRY_CREATE 1500 cycle = (uint32_t)hal_get_cycles(); 1501 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1502 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1503 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1504 #endif 1505 } 1506 1507 ///////////////////////////////////////////////////////////////////////////////////////// 1508 // [13] Marshaling functions attached to RPC_VFS_DENTRY_DESTROY 1509 ///////////////////////////////////////////////////////////////////////////////////////// 1510 1511 /////////////////////////////////////////////////////// 1512 void rpc_vfs_dentry_destroy_client( cxy_t cxy, 1513 vfs_dentry_t * dentry ) 1514 { 1515 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1516 thread_t * this = CURRENT_THREAD; 1517 uint32_t cycle = (uint32_t)hal_get_cycles(); 1518 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1519 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1520 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1521 #endif 1522 1523 uint32_t responses = 1; 1524 1525 // initialise RPC descriptor header 1526 rpc_desc_t rpc; 1527 rpc.index = RPC_VFS_DENTRY_DESTROY; 1528 rpc.blocking = true; 1529 rpc.rsp = &responses; 1530 1531 // set input arguments in RPC descriptor 1532 rpc.args[0] = (uint64_t)(intptr_t)dentry; 1533 1534 // register RPC request in remote RPC fifo 1535 rpc_send( cxy , &rpc ); 1536 1537 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1538 cycle = (uint32_t)hal_get_cycles(); 1539 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1540 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1541 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1542 #endif 1543 } 1544 1545 /////////////////////////////////////////////// 1546 void rpc_vfs_dentry_destroy_server( xptr_t xp ) 1547 { 1548 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1549 thread_t * this = CURRENT_THREAD; 1550 uint32_t cycle = (uint32_t)hal_get_cycles(); 1551 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1552 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1553 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1554 #endif 1555 1556 vfs_dentry_t * dentry; 1557 1558 // get client cluster identifier and pointer on RPC descriptor 1559 cxy_t client_cxy = GET_CXY( xp ); 1560 rpc_desc_t * desc = GET_PTR( xp ); 1561 1562 // get arguments "dentry" from client RPC descriptor 1563 dentry = (vfs_dentry_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1564 1565 // call local kernel function 1566 vfs_dentry_destroy( dentry ); 1567 1568 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1569 cycle = (uint32_t)hal_get_cycles(); 1570 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1571 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1572 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1573 #endif 1574 } 1575 1576 1577 ///////////////////////////////////////////////////////////////////////////////////////// 1578 // [14] Marshaling functions attached to RPC_VFS_FILE_CREATE 1579 ///////////////////////////////////////////////////////////////////////////////////////// 1580 1581 ////////////////////////////////////////////////////////////// 1582 void rpc_vfs_file_create_client( cxy_t cxy, 1583 struct vfs_inode_s * inode, // in 1584 uint32_t file_attr, // in 1585 xptr_t * file_xp, // out 1586 error_t * error ) // out 1587 { 1588 #if DEBUG_RPC_VFS_FILE_CREATE 1589 thread_t * this = CURRENT_THREAD; 1590 uint32_t cycle = (uint32_t)hal_get_cycles(); 1591 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1592 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1593 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1594 #endif 1595 1596 uint32_t responses = 1; 1597 1598 // initialise RPC descriptor header 1599 rpc_desc_t rpc; 1600 rpc.index = RPC_VFS_FILE_CREATE; 1601 rpc.blocking = true; 1602 rpc.rsp = &responses; 1603 1604 // set input arguments in RPC descriptor 1605 rpc.args[0] = (uint64_t)(intptr_t)inode; 1606 rpc.args[1] = (uint64_t)file_attr; 1607 1608 // register RPC request in remote RPC fifo 1609 rpc_send( cxy , &rpc ); 1610 1611 // get output values from RPC descriptor 1612 *file_xp = (xptr_t)rpc.args[2]; 1613 *error = (error_t)rpc.args[3]; 1614 1615 #if DEBUG_RPC_VFS_FILE_CREATE 1616 cycle = (uint32_t)hal_get_cycles(); 1617 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1618 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1619 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1620 #endif 1621 } 1622 1623 //////////////////////////////////////////// 1624 void rpc_vfs_file_create_server( xptr_t xp ) 1625 { 1626 #if DEBUG_RPC_VFS_FILE_CREATE 1627 thread_t * this = CURRENT_THREAD; 1628 uint32_t cycle = (uint32_t)hal_get_cycles(); 1629 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1630 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1631 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1632 #endif 1633 1634 uint32_t file_attr; 1635 vfs_inode_t * inode; 1636 xptr_t file_xp; 1637 error_t error; 1638 1639 // get client cluster identifier and pointer on RPC descriptor 1640 cxy_t client_cxy = GET_CXY( xp ); 1641 rpc_desc_t * desc = GET_PTR( xp ); 1642 1643 // get arguments "file_attr" and "inode" from client RPC descriptor 1644 inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1645 file_attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1646 1647 // call local kernel function 1648 error = vfs_file_create( inode, 1649 file_attr, 1650 &file_xp ); 1651 1652 // set output arguments 1653 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp ); 1654 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1655 1656 #if DEBUG_RPC_VFS_FILE_CREATE 1657 cycle = (uint32_t)hal_get_cycles(); 1658 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 1659 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1660 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1661 #endif 1662 } 1663 1664 ///////////////////////////////////////////////////////////////////////////////////////// 1665 // [15] Marshaling functions attached to RPC_VFS_FILE_DESTROY 1666 ///////////////////////////////////////////////////////////////////////////////////////// 1667 1668 /////////////////////////////////////////////////// 1669 void rpc_vfs_file_destroy_client( cxy_t cxy, 1670 vfs_file_t * file ) 1671 { 1672 #if DEBUG_RPC_VFS_FILE_DESTROY 1673 thread_t * this = CURRENT_THREAD; 1674 uint32_t cycle = (uint32_t)hal_get_cycles(); 1675 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1676 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1677 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1678 #endif 1679 1680 uint32_t responses = 1; 1681 1682 // initialise RPC descriptor header 1683 rpc_desc_t rpc; 1684 rpc.index = RPC_VFS_FILE_DESTROY; 1685 rpc.blocking = true; 1686 rpc.rsp = &responses; 1687 1688 // set input arguments in RPC descriptor 1689 rpc.args[0] = (uint64_t)(intptr_t)file; 1690 1691 // register RPC request in remote RPC fifo 1692 rpc_send( cxy , &rpc ); 1693 1694 #if DEBUG_RPC_VFS_FILE_DESTROY 1695 cycle = (uint32_t)hal_get_cycles(); 1696 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1697 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1698 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1699 #endif 1700 } 1701 1702 ///////////////////////////////////////////// 1703 void rpc_vfs_file_destroy_server( xptr_t xp ) 1704 { 1705 #if DEBUG_RPC_VFS_FILE_DESTROY 1706 thread_t * this = CURRENT_THREAD; 1707 uint32_t cycle = (uint32_t)hal_get_cycles(); 1708 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1709 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1710 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1711 #endif 1712 1713 vfs_file_t * file; 1714 1715 // get client cluster identifier and pointer on RPC descriptor 1716 cxy_t client_cxy = GET_CXY( xp ); 1717 rpc_desc_t * desc = GET_PTR( xp ); 1718 1719 // get arguments "dentry" from client RPC descriptor 1720 file = (vfs_file_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1721 1722 // call local kernel function 1723 vfs_file_destroy( file ); 1724 1725 #if DEBUG_RPC_VFS_FILE_DESTROY 1726 cycle = (uint32_t)hal_get_cycles(); 1727 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1728 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1729 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1730 #endif 1731 } 1732 1733 ///////////////////////////////////////////////////////////////////////////////////////// 1734 // [16] Marshaling functions attached to RPC_VFS_FS_GET_DENTRY 1735 ///////////////////////////////////////////////////////////////////////////////////////// 1736 1737 ///////////////////////////////////////////////////////// 1738 void rpc_vfs_fs_new_dentry_client( cxy_t cxy, 1739 vfs_inode_t * parent_inode, // in 1740 char * name, // in 1741 xptr_t child_inode_xp, // in 1742 error_t * error ) // out 1743 { 1744 #if DEBUG_RPC_VFS_FS_NEW_DENTRY 1745 thread_t * this = CURRENT_THREAD; 1746 uint32_t cycle = (uint32_t)hal_get_cycles(); 1747 if( cycle > DEBUG_RPC_VFS_FS_NEW_DENTRY ) 1748 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1749 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1750 #endif 1751 1752 uint32_t responses = 1; 1753 1754 // initialise RPC descriptor header 1755 rpc_desc_t rpc; 1756 rpc.index = RPC_VFS_FS_NEW_DENTRY; 1757 rpc.blocking = true; 1758 rpc.rsp = &responses; 1759 1760 // set input arguments in RPC descriptor 1761 rpc.args[0] = (uint64_t)(intptr_t)parent_inode; 1762 rpc.args[1] = (uint64_t)(intptr_t)name; 1763 rpc.args[2] = (uint64_t)child_inode_xp; 1764 1765 // register RPC request in remote RPC fifo 1766 rpc_send( cxy , &rpc ); 1767 1768 // get output values from RPC descriptor 1769 *error = (error_t)rpc.args[3]; 1770 1771 #if DEBUG_RPC_VFS_FS_NEW_DENTRY 1772 cycle = (uint32_t)hal_get_cycles(); 1773 if( cycle > DEBUG_RPC_VFS_FS_NEW_DENTRY ) 1774 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1775 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1776 #endif 1777 } 1778 1779 ////////////////////////////////////////////// 1780 void rpc_vfs_fs_new_dentry_server( xptr_t xp ) 1781 { 1782 #if DEBUG_RPC_VFS_FS_NEW_DENTRY 1783 thread_t * this = CURRENT_THREAD; 1784 uint32_t cycle = (uint32_t)hal_get_cycles(); 1785 if( cycle > DEBUG_RPC_VFS_FS_NEW_DENTRY ) 1786 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1787 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1788 #endif 1789 1790 error_t error; 1791 vfs_inode_t * parent; 1792 xptr_t child_xp; 1793 char * name; 1794 1795 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 1796 1797 // get client cluster identifier and pointer on RPC descriptor 1798 cxy_t client_cxy = GET_CXY( xp ); 1799 rpc_desc_t * desc = GET_PTR( xp ); 1800 1801 // get arguments "parent", "name", and "child_xp" 1802 parent = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 1803 name = (char*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 1804 child_xp = (xptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); 1805 1806 // get name local copy 1807 hal_remote_strcpy( XPTR( local_cxy , name_copy ) , 1808 XPTR( client_cxy , name ) ); 1809 1810 // call the kernel function 1811 error = vfs_fs_new_dentry( parent , name_copy , child_xp ); 1812 1813 // set output argument 1814 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1815 1816 #if DEBUG_RPC_VFS_FS_NEW_DENTRY 1817 cycle = (uint32_t)hal_get_cycles(); 1818 if( cycle > DEBUG_RPC_VFS_FS_NEW_DENTRY ) 1819 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1820 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1821 #endif 1822 } 1823 1824 ///////////////////////////////////////////////////////////////////////////////////////// 1825 // [17] Marshaling function attached to RPC_VFS_FS_ADD_DENTRY 1826 ///////////////////////////////////////////////////////////////////////////////////////// 1827 1828 void rpc_vfs_fs_add_dentry_client( cxy_t cxy, 1829 vfs_inode_t * parent, // in 1830 vfs_dentry_t * dentry, // in 1831 error_t * error ) // out 1832 { 1833 #if DEBUG_RPC_VFS_FS_ADD_DENTRY 1834 thread_t * this = CURRENT_THREAD; 1835 uint32_t cycle = (uint32_t)hal_get_cycles(); 1836 if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) 1837 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1838 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1839 #endif 1840 1841 uint32_t responses = 1; 1842 1843 // initialise RPC descriptor header 1844 rpc_desc_t rpc; 1845 rpc.index = RPC_VFS_FS_ADD_DENTRY; 1846 rpc.blocking = true; 1847 rpc.rsp = &responses; 1848 1849 // set input arguments in RPC descriptor 1850 rpc.args[0] = (uint64_t)(intptr_t)parent; 1851 rpc.args[1] = (uint64_t)(intptr_t)dentry; 1852 1853 // register RPC request in remote RPC fifo 1854 rpc_send( cxy , &rpc ); 1855 1856 // get output values from RPC descriptor 1857 *error = (error_t)rpc.args[2]; 1858 1859 #if DEBUG_RPC_VFS_FS_ADD_DENTRY 1860 cycle = (uint32_t)hal_get_cycles(); 1861 if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) 1862 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1863 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1864 #endif 1865 } 1866 1867 ////////////////////////////////////////////// 1868 void rpc_vfs_fs_add_dentry_server( xptr_t xp ) 1869 { 1870 #if DEBUG_RPC_VFS_FS_ADD_DENTRY 1871 thread_t * this = CURRENT_THREAD; 1872 uint32_t cycle = (uint32_t)hal_get_cycles(); 1873 if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) 1874 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1875 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1876 #endif 1877 1878 error_t error; 1879 vfs_inode_t * parent; 1880 vfs_dentry_t * dentry; 1881 1882 // get client cluster identifier and pointer on RPC descriptor 1883 cxy_t client_cxy = GET_CXY( xp ); 1884 rpc_desc_t * desc = GET_PTR( xp ); 1885 1886 // get input arguments 1887 parent = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 1888 dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 1889 1890 // call the kernel function 1891 error = vfs_fs_add_dentry( parent , dentry ); 1892 1893 // set output argument 1894 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)error ); 1895 1896 #if DEBUG_RPC_VFS_FS_ADD_DENTRY 1897 cycle = (uint32_t)hal_get_cycles(); 1898 if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) 1899 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1900 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1901 #endif 1902 } 1903 1904 ///////////////////////////////////////////////////////////////////////////////////////// 1905 // [18] Marshaling function attached to RPC_VFS_FS_REMOVE_DENTRY 1906 ///////////////////////////////////////////////////////////////////////////////////////// 1907 1908 void rpc_vfs_fs_remove_dentry_client( cxy_t cxy, 1909 vfs_inode_t * parent, // in 1910 vfs_dentry_t * dentry, // in 1911 error_t * error ) // out 1912 { 1913 #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY 1914 thread_t * this = CURRENT_THREAD; 1915 uint32_t cycle = (uint32_t)hal_get_cycles(); 1916 if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) 1917 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1918 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1919 #endif 1920 1921 uint32_t responses = 1; 1922 1923 // initialise RPC descriptor header 1924 rpc_desc_t rpc; 1925 rpc.index = RPC_VFS_FS_REMOVE_DENTRY; 1926 rpc.blocking = true; 1927 rpc.rsp = &responses; 1928 1929 // set input arguments in RPC descriptor 1930 rpc.args[0] = (uint64_t)(intptr_t)parent; 1931 rpc.args[1] = (uint64_t)(intptr_t)dentry; 1932 1933 // register RPC request in remote RPC fifo 1934 rpc_send( cxy , &rpc ); 1935 1936 // get output values from RPC descriptor 1937 *error = (error_t)rpc.args[2]; 1938 1939 #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY 1940 cycle = (uint32_t)hal_get_cycles(); 1941 if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) 1942 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1943 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1944 #endif 1945 } 1946 1947 ///////////////////////////////////////////////// 1948 void rpc_vfs_fs_remove_dentry_server( xptr_t xp ) 1949 { 1950 #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY 1951 thread_t * this = CURRENT_THREAD; 1952 uint32_t cycle = (uint32_t)hal_get_cycles(); 1953 if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) 1954 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1955 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1956 #endif 1957 1958 error_t error; 1959 vfs_inode_t * parent; 1960 vfs_dentry_t * dentry; 1961 1962 // get client cluster identifier and pointer on RPC descriptor 1963 cxy_t client_cxy = GET_CXY( xp ); 1964 rpc_desc_t * desc = GET_PTR( xp ); 1965 1966 // get input arguments 1967 parent = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 1968 dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 1969 1970 // call the kernel function 1971 error = vfs_fs_remove_dentry( parent , dentry ); 1972 1973 // set output argument 1974 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)error ); 1975 1976 #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY 1977 cycle = (uint32_t)hal_get_cycles(); 1978 if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) 1979 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1980 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1981 #endif 1982 } 1983 1984 ///////////////////////////////////////////////////////////////////////////////////////// 1985 // [19] Marshaling functions attached to RPC_VFS_INODE_LOAD_ALL_PAGES 1986 ///////////////////////////////////////////////////////////////////////////////////////// 1987 1988 //////////////////////////////////////////////////////////// 1989 void rpc_vfs_inode_load_all_pages_client( cxy_t cxy, 1990 vfs_inode_t * inode, // in 1991 error_t * error ) // out 1992 { 1993 #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES 1994 thread_t * this = CURRENT_THREAD; 1995 uint32_t cycle = (uint32_t)hal_get_cycles(); 1996 if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) 1997 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1998 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1999 #endif 2000 2001 uint32_t responses = 1; 2002 2003 // initialise RPC descriptor header 2004 rpc_desc_t rpc; 2005 rpc.index = RPC_VFS_INODE_LOAD_ALL_PAGES; 2006 rpc.blocking = true; 2007 rpc.rsp = &responses; 2008 2009 // set input arguments in RPC descriptor 2010 rpc.args[0] = (uint64_t)(intptr_t)inode; 2011 2012 // register RPC request in remote RPC fifo 2013 rpc_send( cxy , &rpc ); 2014 2015 // get output values from RPC descriptor 2016 *error = (error_t)rpc.args[1]; 2017 2018 #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES 2019 cycle = (uint32_t)hal_get_cycles(); 2020 if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) 2021 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2022 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2023 #endif 2024 } 2025 2026 ///////////////////////////////////////////////////// 2027 void rpc_vfs_inode_load_all_pages_server( xptr_t xp ) 2028 { 2029 #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES 2030 thread_t * this = CURRENT_THREAD; 2031 uint32_t cycle = (uint32_t)hal_get_cycles(); 2032 if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) 2033 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2034 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2035 #endif 2036 2037 error_t error; 2038 vfs_inode_t * inode; 2039 2040 // get client cluster identifier and pointer on RPC descriptor 2041 cxy_t client_cxy = GET_CXY( xp ); 2042 rpc_desc_t * desc = GET_PTR( xp ); 2043 2044 // get input argument 2045 inode = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 2046 2047 // call the kernel function 2048 error = vfs_inode_load_all_pages( inode ); 2049 2050 // set output argument 2051 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 2052 2053 #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES 2054 cycle = (uint32_t)hal_get_cycles(); 2055 if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) 2056 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2057 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2058 #endif 2059 } 2060 2061 ///////////////////////////////////////////////////////////////////////////////////////// 2062 // [20] RPC_VMM_GET_VSEG deprecated [AG] sept 2019 2063 ///////////////////////////////////////////////////////////////////////////////////////// 2064 2065 /* 2066 ////////////////////////////////////////////////// 2067 void rpc_vmm_get_vseg_client( cxy_t cxy, 2068 process_t * process, // in 2069 intptr_t vaddr, // in 2070 xptr_t * vseg_xp, // out 2071 error_t * error ) // out 2072 { 2073 #if DEBUG_RPC_VMM_GET_VSEG 2074 thread_t * this = CURRENT_THREAD; 2075 uint32_t cycle = (uint32_t)hal_get_cycles(); 2076 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 2077 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2078 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2079 #endif 2080 2081 uint32_t responses = 1; 2082 2083 // initialise RPC descriptor header 2084 rpc_desc_t rpc; 2085 rpc.index = RPC_VMM_GET_VSEG; 2086 rpc.blocking = true; 2087 rpc.rsp = &responses; 2088 2089 // set input arguments in RPC descriptor 2090 rpc.args[0] = (uint64_t)(intptr_t)process; 2091 rpc.args[1] = (uint64_t)vaddr; 2092 2093 // register RPC request in remote RPC fifo 2094 rpc_send( cxy , &rpc ); 2095 2096 // get output argument from rpc descriptor 2097 *vseg_xp = rpc.args[2]; 2098 *error = (error_t)rpc.args[3]; 2099 2100 #if DEBUG_RPC_VMM_GET_VSEG 2101 cycle = (uint32_t)hal_get_cycles(); 2102 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 2103 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2104 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2105 #endif 2106 } 2107 2108 ///////////////////////////////////////// 2109 void rpc_vmm_get_vseg_server( xptr_t xp ) 2110 { 2111 #if DEBUG_RPC_VMM_GET_VSEG 2112 thread_t * this = CURRENT_THREAD; 2113 uint32_t cycle = (uint32_t)hal_get_cycles(); 2114 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 2115 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2116 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2117 #endif 2118 2119 process_t * process; 2120 intptr_t vaddr; 2121 vseg_t * vseg_ptr; 2122 xptr_t vseg_xp; 2123 error_t error; 2124 2125 // get client cluster identifier and pointer on RPC descriptor 2126 cxy_t client_cxy = GET_CXY( xp ); 2127 rpc_desc_t * desc = GET_PTR( xp ); 2128 2129 // get input argument from client RPC descriptor 2130 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2131 vaddr = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2132 2133 // call local kernel function 2134 error = vmm_get_vseg( process , vaddr , &vseg_ptr ); 2135 2136 // set output arguments to client RPC descriptor 2137 vseg_xp = XPTR( local_cxy , vseg_ptr ); 2138 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp ); 2139 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 2140 2141 #if DEBUG_RPC_VMM_GET_VSEG 2142 cycle = (uint32_t)hal_get_cycles(); 2143 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 2144 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2145 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2146 #endif 2147 } 2148 */ 2149 2150 ///////////////////////////////////////////////////////////////////////////////////////// 2151 // [21] undefined 2152 ///////////////////////////////////////////////////////////////////////////////////////// 2153 2154 ///////////////////////////////////////////////////////////////////////////////////////// 2155 // [22] RPC_KCM_ALLOC deprecated [AG] sept 2019 2156 ///////////////////////////////////////////////////////////////////////////////////////// 2157 2158 /* 2159 ////////////////////////////////////////// 2160 void rpc_kcm_alloc_client( cxy_t cxy, 2161 uint32_t kmem_type, // in 2162 xptr_t * buf_xp ) // out 2163 { 2164 #if DEBUG_RPC_KCM_ALLOC 2165 thread_t * this = CURRENT_THREAD; 2166 uint32_t cycle = (uint32_t)hal_get_cycles(); 2167 if( cycle > DEBUG_RPC_KCM_ALLOC ) 2168 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2169 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2170 #endif 2171 2172 uint32_t responses = 1; 2173 2174 // initialise RPC descriptor header 2175 rpc_desc_t rpc; 2176 rpc.index = RPC_KCM_ALLOC; 2177 rpc.blocking = true; 2178 rpc.rsp = &responses; 2179 2180 // set input arguments in RPC descriptor 2181 rpc.args[0] = (uint64_t)kmem_type; 2182 2183 // register RPC request in remote RPC fifo 2184 rpc_send( cxy , &rpc ); 2185 2186 // get output arguments from RPC descriptor 2187 *buf_xp = (xptr_t)rpc.args[1]; 2188 2189 #if DEBUG_RPC_KCM_ALLOC 2190 cycle = (uint32_t)hal_get_cycles(); 2191 if( cycle > DEBUG_RPC_KCM_ALLOC ) 2192 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2193 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2194 #endif 2195 } 2196 2197 ////////////////////////////////////// 2198 void rpc_kcm_alloc_server( xptr_t xp ) 2199 { 2200 #if DEBUG_RPC_KCM_ALLOC 2201 thread_t * this = CURRENT_THREAD; 2202 uint32_t cycle = (uint32_t)hal_get_cycles(); 2203 if( cycle > DEBUG_RPC_KCM_ALLOC ) 2204 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2205 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2206 #endif 2207 2208 // get client cluster identifier and pointer on RPC descriptor 2209 cxy_t client_cxy = GET_CXY( xp ); 2210 rpc_desc_t * desc = GET_PTR( xp ); 2211 2212 // get input argument "kmem_type" from client RPC descriptor 2213 uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2214 2215 // allocates memory for kcm 2216 kmem_req_t req; 2217 req.type = kmem_type; 2218 req.flags = AF_ZERO; 2219 void * buf_ptr = kmem_alloc( &req ); 2220 2221 // set output argument 2222 xptr_t buf_xp = XPTR( local_cxy , buf_ptr ); 2223 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp ); 2224 2225 #if DEBUG_RPC_KCM_ALLOC 2226 cycle = (uint32_t)hal_get_cycles(); 2227 if( cycle > DEBUG_RPC_KCM_ALLOC ) 2228 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2229 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2230 #endif 2231 } 2232 */ 2233 2234 ///////////////////////////////////////////////////////////////////////////////////////// 2235 // [23] RPC_KCM_FREE deprecated [AG] sept 2019 2236 ///////////////////////////////////////////////////////////////////////////////////////// 2237 2238 /* 2239 ///////////////////////////////////////// 2240 void rpc_kcm_free_client( cxy_t cxy, 2241 void * buf, // in 2242 uint32_t kmem_type ) // in 2243 { 2244 #if DEBUG_RPC_KCM_FREE 2245 thread_t * this = CURRENT_THREAD; 2246 uint32_t cycle = (uint32_t)hal_get_cycles(); 2247 if( cycle > DEBUG_RPC_KCM_FREE ) 2248 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2249 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2250 #endif 2251 2252 uint32_t responses = 1; 2253 2254 // initialise RPC descriptor header 2255 rpc_desc_t rpc; 2256 rpc.index = RPC_KCM_FREE; 2257 rpc.blocking = true; 2258 rpc.rsp = &responses; 2259 2260 // set input arguments in RPC descriptor 2261 rpc.args[0] = (uint64_t)(intptr_t)buf; 2262 rpc.args[1] = (uint64_t)kmem_type; 2263 2264 // register RPC request in remote RPC fifo 2265 rpc_send( cxy , &rpc ); 2266 2267 #if DEBUG_RPC_KCM_FREE 2268 cycle = (uint32_t)hal_get_cycles(); 2269 if( cycle > DEBUG_RPC_KCM_FREE ) 2270 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2271 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2272 #endif 2273 } 2274 2275 ///////////////////////////////////// 2276 void rpc_kcm_free_server( xptr_t xp ) 2277 { 2278 #if DEBUG_RPC_KCM_FREE 2279 thread_t * this = CURRENT_THREAD; 2280 uint32_t cycle = (uint32_t)hal_get_cycles(); 2281 if( cycle > DEBUG_RPC_KCM_FREE ) 2282 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2283 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2284 #endif 2285 2286 // get client cluster identifier and pointer on RPC descriptor 2287 cxy_t client_cxy = GET_CXY( xp ); 2288 rpc_desc_t * desc = GET_PTR( xp ); 2289 2290 // get input arguments "buf" and "kmem_type" from client RPC descriptor 2291 void * buf = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2292 uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2293 2294 // releases memory 2295 kmem_req_t req; 2296 req.type = kmem_type; 2297 req.ptr = buf; 2298 kmem_free( &req ); 2299 2300 #if DEBUG_RPC_KCM_FREE 2301 cycle = (uint32_t)hal_get_cycles(); 2302 if( cycle > DEBUG_RPC_KCM_FREE ) 2303 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2304 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2305 #endif 2306 } 2307 */ 2308 2309 ///////////////////////////////////////////////////////////////////////////////////////// 2310 // [24] Marshaling functions attached to RPC_MAPPER_SYNC 2311 ///////////////////////////////////////////////////////////////////////////////////////// 2312 2313 /////////////////////////////////////////////////// 2314 void rpc_mapper_sync_client( cxy_t cxy, 2315 struct mapper_s * mapper, 2316 error_t * error ) 2317 { 2318 #if DEBUG_RPC_MAPPER_SYNC 2319 thread_t * this = CURRENT_THREAD; 2320 uint32_t cycle = (uint32_t)hal_get_cycles(); 2321 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2322 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2323 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2324 #endif 2325 2326 uint32_t responses = 1; 2327 2328 // initialise RPC descriptor header 2329 rpc_desc_t rpc; 2330 rpc.index = RPC_MAPPER_SYNC; 2331 rpc.blocking = true; 2332 rpc.rsp = &responses; 2333 2334 // set input arguments in RPC descriptor 2335 rpc.args[0] = (uint64_t)(intptr_t)mapper; 2336 2337 // register RPC request in remote RPC fifo 2338 rpc_send( cxy , &rpc ); 2339 2340 // get output values from RPC descriptor 2341 *error = (error_t)rpc.args[1]; 2342 2343 #if DEBUG_RPC_MAPPER_SYNC 2344 cycle = (uint32_t)hal_get_cycles(); 2345 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2346 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2347 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2348 #endif 2349 } 2350 2351 //////////////////////////////////////// 2352 void rpc_mapper_sync_server( xptr_t xp ) 2353 { 2354 #if DEBUG_RPC_MAPPER_SYNC 2355 thread_t * this = CURRENT_THREAD; 2356 uint32_t cycle = (uint32_t)hal_get_cycles(); 2357 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2358 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2359 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2360 #endif 2361 2362 mapper_t * mapper; 2363 error_t error; 2364 2365 // get client cluster identifier and pointer on RPC descriptor 2366 cxy_t client_cxy = GET_CXY( xp ); 2367 rpc_desc_t * desc = GET_PTR( xp ); 2368 2369 // get arguments from client RPC descriptor 2370 mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2371 2372 // call local kernel function 2373 error = mapper_sync( mapper ); 2374 2375 // set output argument to client RPC descriptor 2376 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 2377 2378 #if DEBUG_RPC_MAPPER_SYNC 2379 cycle = (uint32_t)hal_get_cycles(); 2380 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2381 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2382 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2383 #endif 2384 } 2385 2386 ///////////////////////////////////////////////////////////////////////////////////////// 2387 // [25] Marshaling functions attached to RPC_VMM_RESIZE_VSEG 952 } 953 954 ///////////////////////////////////////////////////////////////////////////////////////// 955 // [10] undefined 956 ///////////////////////////////////////////////////////////////////////////////////////// 957 958 ///////////////////////////////////////////////////////////////////////////////////////// 959 // [11] undefined 960 ///////////////////////////////////////////////////////////////////////////////////////// 961 962 ///////////////////////////////////////////////////////////////////////////////////////// 963 // [12] undefined 964 ///////////////////////////////////////////////////////////////////////////////////////// 965 966 ///////////////////////////////////////////////////////////////////////////////////////// 967 // [13] undefined 968 ///////////////////////////////////////////////////////////////////////////////////////// 969 970 ///////////////////////////////////////////////////////////////////////////////////////// 971 // [14] undefined 972 ///////////////////////////////////////////////////////////////////////////////////////// 973 974 ///////////////////////////////////////////////////////////////////////////////////////// 975 // [15] Marshaling functions attached to RPC_VMM_RESIZE_VSEG 2388 976 ///////////////////////////////////////////////////////////////////////////////////////// 2389 977 … … 2478 1066 2479 1067 ///////////////////////////////////////////////////////////////////////////////////////// 2480 // [ 26] Marshaling functions attached to RPC_VMM_REMOVE_VSEG1068 // [16] Marshaling functions attached to RPC_VMM_REMOVE_VSEG 2481 1069 ///////////////////////////////////////////////////////////////////////////////////////// 2482 1070 … … 2560 1148 2561 1149 ///////////////////////////////////////////////////////////////////////////////////////// 2562 // [ 27] Marshaling functions attached to RPC_VMM_CREATE_VSEG1150 // [17] Marshaling functions attached to RPC_VMM_CREATE_VSEG 2563 1151 ///////////////////////////////////////////////////////////////////////////////////////// 2564 1152 … … 2662 1250 2663 1251 ///////////////////////////////////////////////////////////////////////////////////////// 2664 // [ 28] Marshaling functions attached to RPC_VMM_SET_COW1252 // [18] Marshaling functions attached to RPC_VMM_SET_COW 2665 1253 ///////////////////////////////////////////////////////////////////////////////////////// 2666 1254 … … 2730 1318 } 2731 1319 1320 1321 1322 ///////////////////////////////////////////////////////////////////////////////////////// 1323 ///////////////////////////////////////////////////////////////////////////////////////// 1324 // DEPRECATED RPCs 1325 ///////////////////////////////////////////////////////////////////////////////////////// 1326 ///////////////////////////////////////////////////////////////////////////////////////// 1327 1328 /* 1329 1330 ///////////////////////////////////////////////////////////////////////////////////////// 1331 // [0] RPC_PMEM_GET_PAGES deprecated [AG] May 2019 1332 ///////////////////////////////////////////////////////////////////////////////////////// 1333 void rpc_pmem_get_pages_client( cxy_t cxy, 1334 uint32_t order, // in 1335 page_t ** page ) // out 1336 { 1337 #if DEBUG_RPC_PMEM_GET_PAGES 1338 thread_t * this = CURRENT_THREAD; 1339 uint32_t cycle = (uint32_t)hal_get_cycles(); 1340 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 1341 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1342 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1343 #endif 1344 1345 uint32_t responses = 1; 1346 1347 // initialise RPC descriptor header 1348 rpc_desc_t rpc; 1349 rpc.index = RPC_PMEM_GET_PAGES; 1350 rpc.blocking = true; 1351 rpc.rsp = &responses; 1352 1353 // set input arguments in RPC descriptor 1354 rpc.args[0] = (uint64_t)order; 1355 1356 // register RPC request in remote RPC fifo 1357 rpc_send( cxy , &rpc ); 1358 1359 // get output arguments from RPC descriptor 1360 *page = (page_t *)(intptr_t)rpc.args[1]; 1361 1362 #if DEBUG_RPC_PMEM_GET_PAGES 1363 cycle = (uint32_t)hal_get_cycles(); 1364 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 1365 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1366 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1367 #endif 1368 } 1369 1370 /////////////////////////////////////////// 1371 void rpc_pmem_get_pages_server( xptr_t xp ) 1372 { 1373 #if DEBUG_RPC_PMEM_GET_PAGES 1374 thread_t * this = CURRENT_THREAD; 1375 uint32_t cycle = (uint32_t)hal_get_cycles(); 1376 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 1377 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1378 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1379 #endif 1380 1381 // get client cluster identifier and pointer on RPC descriptor 1382 cxy_t cxy = GET_CXY( xp ); 1383 rpc_desc_t * desc = GET_PTR( xp ); 1384 1385 // get input arguments from client RPC descriptor 1386 uint32_t order = (uint32_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); 1387 1388 // call local pmem allocator 1389 page_t * page = ppm_alloc_pages( order ); 1390 1391 // set output arguments into client RPC descriptor 1392 hal_remote_s64( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 1393 1394 #if DEBUG_RPC_PMEM_GET_PAGES 1395 cycle = (uint32_t)hal_get_cycles(); 1396 if( cycle > DEBUG_RPC_PMEM_GET_PAGES ) 1397 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1398 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1399 #endif 1400 } 1401 1402 ///////////////////////////////////////////////////////////////////////////////////////// 1403 // [1] RPC_PMEM_RELEASE_PAGES deprecated [AG] may 2019 1404 ///////////////////////////////////////////////////////////////////////////////////////// 1405 void rpc_pmem_release_pages_client( cxy_t cxy, 1406 page_t * page ) // out 1407 { 1408 #if DEBUG_RPC_PMEM_RELEASE_PAGES 1409 thread_t * this = CURRENT_THREAD; 1410 uint32_t cycle = (uint32_t)hal_get_cycles(); 1411 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 1412 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1413 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1414 #endif 1415 1416 uint32_t responses = 1; 1417 1418 // initialise RPC descriptor header 1419 rpc_desc_t rpc; 1420 rpc.index = RPC_PMEM_RELEASE_PAGES; 1421 rpc.blocking = true; 1422 rpc.rsp = &responses; 1423 1424 // set input arguments in RPC descriptor 1425 rpc.args[0] = (uint64_t)(intptr_t)page; 1426 1427 // register RPC request in remote RPC fifo 1428 rpc_send( cxy , &rpc ); 1429 1430 #if DEBUG_RPC_PMEM_RELEASE_PAGES 1431 cycle = (uint32_t)hal_get_cycles(); 1432 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 1433 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1434 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1435 #endif 1436 } 1437 1438 /////////////////////////////////////////////// 1439 void rpc_pmem_release_pages_server( xptr_t xp ) 1440 { 1441 #if DEBUG_RPC_PMEM_RELEASE_PAGES 1442 thread_t * this = CURRENT_THREAD; 1443 uint32_t cycle = (uint32_t)hal_get_cycles(); 1444 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 1445 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1446 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1447 #endif 1448 1449 // get client cluster identifier and pointer on RPC descriptor 1450 cxy_t cxy = GET_CXY( xp ); 1451 rpc_desc_t * desc = GET_PTR( xp ); 1452 1453 // get input arguments from client RPC descriptor 1454 page_t * page = (page_t *)(intptr_t)hal_remote_l64( XPTR( cxy , &desc->args[0] ) ); 1455 1456 // release memory to local pmem 1457 kmem_req_t req; 1458 req.type = KMEM_PPM; 1459 req.ptr = page; 1460 kmem_free( &req ); 1461 1462 #if DEBUG_RPC_PMEM_RELEASE_PAGES 1463 cycle = (uint32_t)hal_get_cycles(); 1464 if( cycle > DEBUG_RPC_PMEM_RELEASE_PAGES ) 1465 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1466 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1467 #endif 1468 } 1469 1470 ///////////////////////////////////////////////////////////////////////////////////////// 1471 // [2] RPC_PPM_DISPLAY deprecated [AG] May 2019 1472 ///////////////////////////////////////////////////////////////////////////////////////// 1473 void rpc_ppm_display_client( cxy_t cxy ) 1474 { 1475 #if DEBUG_RPC_PPM_DISPLAY 1476 thread_t * this = CURRENT_THREAD; 1477 uint32_t cycle = (uint32_t)hal_get_cycles(); 1478 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 1479 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1480 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1481 #endif 1482 1483 uint32_t responses = 1; 1484 1485 // initialise RPC descriptor header 1486 rpc_desc_t rpc; 1487 rpc.index = RPC_PPM_DISPLAY; 1488 rpc.blocking = true; 1489 rpc.rsp = &responses; 1490 1491 // register RPC request in remote RPC fifo 1492 rpc_send( cxy , &rpc ); 1493 1494 #if DEBUG_RPC_PPM_DISPLAY 1495 cycle = (uint32_t)hal_get_cycles(); 1496 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 1497 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1498 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1499 #endif 1500 } 1501 1502 //////////////////////////////////////////////////////////////////// 1503 void rpc_ppm_display_server( xptr_t __attribute__((__unused__)) xp ) 1504 { 1505 #if DEBUG_RPC_PPM_DISPLAY 1506 thread_t * this = CURRENT_THREAD; 1507 uint32_t cycle = (uint32_t)hal_get_cycles(); 1508 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 1509 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1510 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1511 #endif 1512 1513 // call local kernel function 1514 ppm_display(); 1515 1516 #if DEBUG_RPC_PPM_DISPLAY 1517 cycle = (uint32_t)hal_get_cycles(); 1518 if( cycle > DEBUG_RPC_PPM_DISPLAY ) 1519 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1520 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1521 #endif 1522 } 1523 1524 ///////////////////////////////////////////////////////////////////////////////////////// 1525 // [8] RPC_VFS_FS_UPDATE_DENTRY deprecated [AG] dec 2019 1526 ///////////////////////////////////////////////////////////////////////////////////////// 1527 void rpc_vfs_fs_update_dentry_client( cxy_t cxy, 1528 vfs_inode_t * inode, 1529 vfs_dentry_t * dentry, 1530 uint32_t size, 1531 error_t * error ) 1532 { 1533 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1534 thread_t * this = CURRENT_THREAD; 1535 uint32_t cycle = (uint32_t)hal_get_cycles(); 1536 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1537 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1538 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1539 #endif 1540 1541 uint32_t responses = 1; 1542 1543 // initialise RPC descriptor header 1544 rpc_desc_t rpc; 1545 rpc.index = RPC_VFS_FS_UPDATE_DENTRY; 1546 rpc.blocking = true; 1547 rpc.rsp = &responses; 1548 1549 // set input arguments in RPC descriptor 1550 rpc.args[0] = (uint64_t)(intptr_t)inode; 1551 rpc.args[1] = (uint64_t)(intptr_t)dentry; 1552 rpc.args[2] = (uint64_t)size; 1553 1554 // register RPC request in remote RPC fifo 1555 rpc_send( cxy , &rpc ); 1556 1557 // get output values from RPC descriptor 1558 *error = (error_t)rpc.args[3]; 1559 1560 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1561 cycle = (uint32_t)hal_get_cycles(); 1562 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1563 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1564 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1565 #endif 1566 } 1567 1568 ///////////////////////////////////////////////// 1569 void rpc_vfs_fs_update_dentry_server( xptr_t xp ) 1570 { 1571 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1572 thread_t * this = CURRENT_THREAD; 1573 uint32_t cycle = (uint32_t)hal_get_cycles(); 1574 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1575 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1576 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1577 #endif 1578 1579 error_t error; 1580 vfs_inode_t * inode; 1581 vfs_dentry_t * dentry; 1582 uint32_t size; 1583 1584 // get client cluster identifier and pointer on RPC descriptor 1585 cxy_t client_cxy = GET_CXY( xp ); 1586 rpc_desc_t * desc = GET_PTR( xp ); 1587 1588 // get input arguments 1589 inode = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 1590 dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 1591 size = (uint32_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); 1592 1593 // call the kernel function 1594 error = vfs_fs_update_dentry( inode , dentry , size ); 1595 1596 // set output argument 1597 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1598 1599 #if DEBUG_RPC_VFS_FS_UPDATE_DENTRY 1600 cycle = (uint32_t)hal_get_cycles(); 1601 if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY ) 1602 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1603 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1604 #endif 1605 } 1606 1607 2732 1608 ///////////////////////////////////////////////////////////////////////////////////////// 2733 1609 // [29] RPC_VMM_DISPLAY deprecated [AG] June 2019 2734 1610 ///////////////////////////////////////////////////////////////////////////////////////// 2735 2736 /*2737 /////////////////////////////////////////////2738 1611 void rpc_hal_vmm_display_client( cxy_t cxy, 2739 1612 process_t * process, … … 2804 1677 } 2805 1678 1679 ///////////////////////////////////////////////////////////////////////////////////////// 1680 // [10] to RPC_VFS_INODE_CREATE deprecated [AG] dec 2019 1681 ///////////////////////////////////////////////////////////////////////////////////////// 1682 void rpc_vfs_inode_create_client( cxy_t cxy, 1683 uint32_t fs_type, // in 1684 uint32_t attr, // in 1685 uint32_t rights, // in 1686 uint32_t uid, // in 1687 uint32_t gid, // in 1688 xptr_t * inode_xp, // out 1689 error_t * error ) // out 1690 { 1691 #if DEBUG_RPC_VFS_INODE_CREATE 1692 thread_t * this = CURRENT_THREAD; 1693 uint32_t cycle = (uint32_t)hal_get_cycles(); 1694 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1695 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1696 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1697 #endif 1698 1699 uint32_t responses = 1; 1700 1701 // initialise RPC descriptor header 1702 rpc_desc_t rpc; 1703 rpc.index = RPC_VFS_INODE_CREATE; 1704 rpc.blocking = true; 1705 rpc.rsp = &responses; 1706 1707 // set input arguments in RPC descriptor 1708 rpc.args[0] = (uint64_t)fs_type; 1709 rpc.args[1] = (uint64_t)attr; 1710 rpc.args[2] = (uint64_t)rights; 1711 rpc.args[3] = (uint64_t)uid; 1712 rpc.args[4] = (uint64_t)gid; 1713 1714 // register RPC request in remote RPC fifo 1715 rpc_send( cxy , &rpc ); 1716 1717 // get output values from RPC descriptor 1718 *inode_xp = (xptr_t)rpc.args[5]; 1719 *error = (error_t)rpc.args[6]; 1720 1721 #if DEBUG_RPC_VFS_INODE_CREATE 1722 cycle = (uint32_t)hal_get_cycles(); 1723 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1724 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1725 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1726 #endif 1727 } 1728 1729 ///////////////////////////////////////////// 1730 void rpc_vfs_inode_create_server( xptr_t xp ) 1731 { 1732 #if DEBUG_RPC_VFS_INODE_CREATE 1733 thread_t * this = CURRENT_THREAD; 1734 uint32_t cycle = (uint32_t)hal_get_cycles(); 1735 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1736 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1737 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1738 #endif 1739 1740 uint32_t fs_type; 1741 uint32_t attr; 1742 uint32_t rights; 1743 uint32_t uid; 1744 uint32_t gid; 1745 xptr_t inode_xp; 1746 error_t error; 1747 1748 // get client cluster identifier and pointer on RPC descriptor 1749 cxy_t client_cxy = GET_CXY( xp ); 1750 rpc_desc_t * desc = GET_PTR( xp ); 1751 1752 // get input arguments from client rpc descriptor 1753 fs_type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1754 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1755 rights = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1756 uid = (uid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 1757 gid = (gid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) ); 1758 1759 // call local kernel function 1760 error = vfs_inode_create( fs_type, 1761 attr, 1762 rights, 1763 uid, 1764 gid, 1765 &inode_xp ); 1766 1767 // set output arguments 1768 hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)inode_xp ); 1769 hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error ); 1770 1771 #if DEBUG_RPC_VFS_INODE_CREATE 1772 cycle = (uint32_t)hal_get_cycles(); 1773 if( cycle > DEBUG_RPC_VFS_INODE_CREATE ) 1774 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1775 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1776 #endif 1777 } 1778 1779 ///////////////////////////////////////////////////////////////////////////////////////// 1780 // [11] to RPC_VFS_INODE_DESTROY deprecated [AG] dec 2019 1781 ///////////////////////////////////////////////////////////////////////////////////////// 1782 void rpc_vfs_inode_destroy_client( cxy_t cxy, 1783 struct vfs_inode_s * inode ) 1784 { 1785 #if DEBUG_RPC_VFS_INODE_DESTROY 1786 thread_t * this = CURRENT_THREAD; 1787 uint32_t cycle = (uint32_t)hal_get_cycles(); 1788 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1789 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1790 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1791 #endif 1792 1793 uint32_t responses = 1; 1794 1795 // initialise RPC descriptor header 1796 rpc_desc_t rpc; 1797 rpc.index = RPC_VFS_INODE_DESTROY; 1798 rpc.blocking = true; 1799 rpc.rsp = &responses; 1800 1801 // set input arguments in RPC descriptor 1802 rpc.args[0] = (uint64_t)(intptr_t)inode; 1803 1804 // register RPC request in remote RPC fifo 1805 rpc_send( cxy , &rpc ); 1806 1807 #if DEBUG_RPC_VFS_INODE_DESTROY 1808 cycle = (uint32_t)hal_get_cycles(); 1809 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1810 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1811 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1812 #endif 1813 } 1814 1815 ////////////////////////////////////////////// 1816 void rpc_vfs_inode_destroy_server( xptr_t xp ) 1817 { 1818 #if DEBUG_RPC_VFS_INODE_DESTROY 1819 thread_t * this = CURRENT_THREAD; 1820 uint32_t cycle = (uint32_t)hal_get_cycles(); 1821 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1822 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1823 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1824 #endif 1825 1826 vfs_inode_t * inode; 1827 1828 // get client cluster identifier and pointer on RPC descriptor 1829 cxy_t client_cxy = GET_CXY( xp ); 1830 rpc_desc_t * desc = GET_PTR( xp ); 1831 1832 // get argument "inode" from client RPC descriptor 1833 inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1834 1835 // call local kernel function 1836 vfs_inode_destroy( inode ); 1837 1838 #if DEBUG_RPC_VFS_INODE_DESTROY 1839 cycle = (uint32_t)hal_get_cycles(); 1840 if( cycle > DEBUG_RPC_VFS_INODE_DESTROY ) 1841 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1842 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1843 #endif 1844 } 1845 1846 ///////////////////////////////////////////////////////////////////////////////////////// 1847 // [12] RPC_VFS_DENTRY_CREATE deprecated [AG] dec 2019 1848 ///////////////////////////////////////////////////////////////////////////////////////// 1849 void rpc_vfs_dentry_create_client( cxy_t cxy, 1850 uint32_t type, // in 1851 char * name, // in 1852 xptr_t * dentry_xp, // out 1853 error_t * error ) // out 1854 { 1855 #if DEBUG_RPC_VFS_DENTRY_CREATE 1856 thread_t * this = CURRENT_THREAD; 1857 uint32_t cycle = (uint32_t)hal_get_cycles(); 1858 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1859 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1860 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1861 #endif 1862 1863 uint32_t responses = 1; 1864 1865 // initialise RPC descriptor header 1866 rpc_desc_t rpc; 1867 rpc.index = RPC_VFS_DENTRY_CREATE; 1868 rpc.blocking = true; 1869 rpc.rsp = &responses; 1870 1871 // set input arguments in RPC descriptor 1872 rpc.args[0] = (uint64_t)type; 1873 rpc.args[1] = (uint64_t)(intptr_t)name; 1874 1875 // register RPC request in remote RPC fifo 1876 rpc_send( cxy , &rpc ); 1877 1878 // get output values from RPC descriptor 1879 *dentry_xp = (xptr_t)rpc.args[2]; 1880 *error = (error_t)rpc.args[3]; 1881 1882 #if DEBUG_RPC_VFS_DENTRY_CREATE 1883 cycle = (uint32_t)hal_get_cycles(); 1884 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1885 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1886 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1887 #endif 1888 } 1889 1890 ////////////////////////////////////////////// 1891 void rpc_vfs_dentry_create_server( xptr_t xp ) 1892 { 1893 #if DEBUG_RPC_VFS_DENTRY_CREATE 1894 thread_t * this = CURRENT_THREAD; 1895 uint32_t cycle = (uint32_t)hal_get_cycles(); 1896 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1897 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1898 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1899 #endif 1900 1901 uint32_t type; 1902 char * name; 1903 xptr_t dentry_xp; 1904 error_t error; 1905 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 1906 1907 // get client cluster identifier and pointer on RPC descriptor 1908 cxy_t client_cxy = GET_CXY( xp ); 1909 rpc_desc_t * desc = GET_PTR( xp ); 1910 1911 // get arguments "name", "type", and "parent" from client RPC descriptor 1912 type = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1913 name = (char *)(intptr_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1914 1915 // makes a local copy of name 1916 hal_remote_strcpy( XPTR( local_cxy , name_copy ), 1917 XPTR( client_cxy , name ) ); 1918 1919 // call local kernel function 1920 error = vfs_dentry_create( type, 1921 name_copy, 1922 &dentry_xp ); 1923 // set output arguments 1924 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)dentry_xp ); 1925 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1926 1927 #if DEBUG_RPC_VFS_DENTRY_CREATE 1928 cycle = (uint32_t)hal_get_cycles(); 1929 if( cycle > DEBUG_RPC_VFS_DENTRY_CREATE ) 1930 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1931 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1932 #endif 1933 } 1934 1935 ///////////////////////////////////////////////////////////////////////////////////////// 1936 // [13] RPC_VFS_DENTRY_DESTROY deprecated [AG] dec 2019 1937 ///////////////////////////////////////////////////////////////////////////////////////// 1938 void rpc_vfs_dentry_destroy_client( cxy_t cxy, 1939 vfs_dentry_t * dentry ) 1940 { 1941 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1942 thread_t * this = CURRENT_THREAD; 1943 uint32_t cycle = (uint32_t)hal_get_cycles(); 1944 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1945 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1946 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1947 #endif 1948 1949 uint32_t responses = 1; 1950 1951 // initialise RPC descriptor header 1952 rpc_desc_t rpc; 1953 rpc.index = RPC_VFS_DENTRY_DESTROY; 1954 rpc.blocking = true; 1955 rpc.rsp = &responses; 1956 1957 // set input arguments in RPC descriptor 1958 rpc.args[0] = (uint64_t)(intptr_t)dentry; 1959 1960 // register RPC request in remote RPC fifo 1961 rpc_send( cxy , &rpc ); 1962 1963 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1964 cycle = (uint32_t)hal_get_cycles(); 1965 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1966 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1967 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1968 #endif 1969 } 1970 1971 /////////////////////////////////////////////// 1972 void rpc_vfs_dentry_destroy_server( xptr_t xp ) 1973 { 1974 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1975 thread_t * this = CURRENT_THREAD; 1976 uint32_t cycle = (uint32_t)hal_get_cycles(); 1977 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1978 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 1979 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 1980 #endif 1981 1982 vfs_dentry_t * dentry; 1983 1984 // get client cluster identifier and pointer on RPC descriptor 1985 cxy_t client_cxy = GET_CXY( xp ); 1986 rpc_desc_t * desc = GET_PTR( xp ); 1987 1988 // get arguments "dentry" from client RPC descriptor 1989 dentry = (vfs_dentry_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1990 1991 // call local kernel function 1992 vfs_dentry_destroy( dentry ); 1993 1994 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1995 cycle = (uint32_t)hal_get_cycles(); 1996 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1997 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 1998 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 1999 #endif 2000 } 2001 2002 2003 ///////////////////////////////////////////////////////////////////////////////////////// 2004 // [14] RPC_VFS_FILE_CREATE deprecated [AG] dec 2019 2005 ///////////////////////////////////////////////////////////////////////////////////////// 2006 void rpc_vfs_file_create_client( cxy_t cxy, 2007 struct vfs_inode_s * inode, // in 2008 uint32_t file_attr, // in 2009 xptr_t * file_xp, // out 2010 error_t * error ) // out 2011 { 2012 #if DEBUG_RPC_VFS_FILE_CREATE 2013 thread_t * this = CURRENT_THREAD; 2014 uint32_t cycle = (uint32_t)hal_get_cycles(); 2015 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 2016 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2017 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2018 #endif 2019 2020 uint32_t responses = 1; 2021 2022 // initialise RPC descriptor header 2023 rpc_desc_t rpc; 2024 rpc.index = RPC_VFS_FILE_CREATE; 2025 rpc.blocking = true; 2026 rpc.rsp = &responses; 2027 2028 // set input arguments in RPC descriptor 2029 rpc.args[0] = (uint64_t)(intptr_t)inode; 2030 rpc.args[1] = (uint64_t)file_attr; 2031 2032 // register RPC request in remote RPC fifo 2033 rpc_send( cxy , &rpc ); 2034 2035 // get output values from RPC descriptor 2036 *file_xp = (xptr_t)rpc.args[2]; 2037 *error = (error_t)rpc.args[3]; 2038 2039 #if DEBUG_RPC_VFS_FILE_CREATE 2040 cycle = (uint32_t)hal_get_cycles(); 2041 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 2042 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2043 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2044 #endif 2045 } 2046 2047 //////////////////////////////////////////// 2048 void rpc_vfs_file_create_server( xptr_t xp ) 2049 { 2050 #if DEBUG_RPC_VFS_FILE_CREATE 2051 thread_t * this = CURRENT_THREAD; 2052 uint32_t cycle = (uint32_t)hal_get_cycles(); 2053 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 2054 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2055 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2056 #endif 2057 2058 uint32_t file_attr; 2059 vfs_inode_t * inode; 2060 xptr_t file_xp; 2061 error_t error; 2062 2063 // get client cluster identifier and pointer on RPC descriptor 2064 cxy_t client_cxy = GET_CXY( xp ); 2065 rpc_desc_t * desc = GET_PTR( xp ); 2066 2067 // get arguments "file_attr" and "inode" from client RPC descriptor 2068 inode = (vfs_inode_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2069 file_attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2070 2071 // call local kernel function 2072 error = vfs_file_create( inode, 2073 file_attr, 2074 &file_xp ); 2075 2076 // set output arguments 2077 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)file_xp ); 2078 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 2079 2080 #if DEBUG_RPC_VFS_FILE_CREATE 2081 cycle = (uint32_t)hal_get_cycles(); 2082 if( cycle > DEBUG_RPC_VFS_FILE_CREATE ) 2083 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2084 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2085 #endif 2086 } 2087 2088 ///////////////////////////////////////////////////////////////////////////////////////// 2089 // [15] RPC_VFS_FILE_DESTROY deprecated [AG] dec 2019 2090 ///////////////////////////////////////////////////////////////////////////////////////// 2091 void rpc_vfs_file_destroy_client( cxy_t cxy, 2092 vfs_file_t * file ) 2093 { 2094 #if DEBUG_RPC_VFS_FILE_DESTROY 2095 thread_t * this = CURRENT_THREAD; 2096 uint32_t cycle = (uint32_t)hal_get_cycles(); 2097 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 2098 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2099 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2100 #endif 2101 2102 uint32_t responses = 1; 2103 2104 // initialise RPC descriptor header 2105 rpc_desc_t rpc; 2106 rpc.index = RPC_VFS_FILE_DESTROY; 2107 rpc.blocking = true; 2108 rpc.rsp = &responses; 2109 2110 // set input arguments in RPC descriptor 2111 rpc.args[0] = (uint64_t)(intptr_t)file; 2112 2113 // register RPC request in remote RPC fifo 2114 rpc_send( cxy , &rpc ); 2115 2116 #if DEBUG_RPC_VFS_FILE_DESTROY 2117 cycle = (uint32_t)hal_get_cycles(); 2118 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 2119 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2120 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2121 #endif 2122 } 2123 2124 ///////////////////////////////////////////// 2125 void rpc_vfs_file_destroy_server( xptr_t xp ) 2126 { 2127 #if DEBUG_RPC_VFS_FILE_DESTROY 2128 thread_t * this = CURRENT_THREAD; 2129 uint32_t cycle = (uint32_t)hal_get_cycles(); 2130 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 2131 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2132 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2133 #endif 2134 2135 vfs_file_t * file; 2136 2137 // get client cluster identifier and pointer on RPC descriptor 2138 cxy_t client_cxy = GET_CXY( xp ); 2139 rpc_desc_t * desc = GET_PTR( xp ); 2140 2141 // get arguments "dentry" from client RPC descriptor 2142 file = (vfs_file_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2143 2144 // call local kernel function 2145 vfs_file_destroy( file ); 2146 2147 #if DEBUG_RPC_VFS_FILE_DESTROY 2148 cycle = (uint32_t)hal_get_cycles(); 2149 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 2150 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2151 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2152 #endif 2153 } 2154 2155 ///////////////////////////////////////////////////////////////////////////////////////// 2156 // [16] RPC_VFS_FS_GET_DENTRY deprecated [AG] dec 2019 2157 ///////////////////////////////////////////////////////////////////////////////////////// 2158 void rpc_vfs_fs_new_dentry_client( cxy_t cxy, 2159 vfs_inode_t * parent_inode, // in 2160 char * name, // in 2161 xptr_t child_inode_xp, // in 2162 error_t * error ) // out 2163 { 2164 #if DEBUG_RPC_VFS_FS_NEW_DENTRY 2165 thread_t * this = CURRENT_THREAD; 2166 uint32_t cycle = (uint32_t)hal_get_cycles(); 2167 if( cycle > DEBUG_RPC_VFS_FS_NEW_DENTRY ) 2168 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2169 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2170 #endif 2171 2172 uint32_t responses = 1; 2173 2174 // initialise RPC descriptor header 2175 rpc_desc_t rpc; 2176 rpc.index = RPC_VFS_FS_NEW_DENTRY; 2177 rpc.blocking = true; 2178 rpc.rsp = &responses; 2179 2180 // set input arguments in RPC descriptor 2181 rpc.args[0] = (uint64_t)(intptr_t)parent_inode; 2182 rpc.args[1] = (uint64_t)(intptr_t)name; 2183 rpc.args[2] = (uint64_t)child_inode_xp; 2184 2185 // register RPC request in remote RPC fifo 2186 rpc_send( cxy , &rpc ); 2187 2188 // get output values from RPC descriptor 2189 *error = (error_t)rpc.args[3]; 2190 2191 #if DEBUG_RPC_VFS_FS_NEW_DENTRY 2192 cycle = (uint32_t)hal_get_cycles(); 2193 if( cycle > DEBUG_RPC_VFS_FS_NEW_DENTRY ) 2194 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2195 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2196 #endif 2197 } 2198 2199 ////////////////////////////////////////////// 2200 void rpc_vfs_fs_new_dentry_server( xptr_t xp ) 2201 { 2202 #if DEBUG_RPC_VFS_FS_NEW_DENTRY 2203 thread_t * this = CURRENT_THREAD; 2204 uint32_t cycle = (uint32_t)hal_get_cycles(); 2205 if( cycle > DEBUG_RPC_VFS_FS_NEW_DENTRY ) 2206 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2207 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2208 #endif 2209 2210 error_t error; 2211 vfs_inode_t * parent; 2212 xptr_t child_xp; 2213 char * name; 2214 2215 char name_copy[CONFIG_VFS_MAX_NAME_LENGTH]; 2216 2217 // get client cluster identifier and pointer on RPC descriptor 2218 cxy_t client_cxy = GET_CXY( xp ); 2219 rpc_desc_t * desc = GET_PTR( xp ); 2220 2221 // get arguments "parent", "name", and "child_xp" 2222 parent = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 2223 name = (char*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 2224 child_xp = (xptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[2])); 2225 2226 // get name local copy 2227 hal_remote_strcpy( XPTR( local_cxy , name_copy ) , 2228 XPTR( client_cxy , name ) ); 2229 2230 // call the kernel function 2231 error = vfs_fs_new_dentry( parent , name_copy , child_xp ); 2232 2233 // set output argument 2234 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 2235 2236 #if DEBUG_RPC_VFS_FS_NEW_DENTRY 2237 cycle = (uint32_t)hal_get_cycles(); 2238 if( cycle > DEBUG_RPC_VFS_FS_NEW_DENTRY ) 2239 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2240 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2241 #endif 2242 } 2243 2244 ///////////////////////////////////////////////////////////////////////////////////////// 2245 // [17] RPC_VFS_FS_ADD_DENTRY deprecated [AG] dec 2019 2246 ///////////////////////////////////////////////////////////////////////////////////////// 2247 void rpc_vfs_fs_add_dentry_client( cxy_t cxy, 2248 vfs_inode_t * parent, // in 2249 vfs_dentry_t * dentry, // in 2250 error_t * error ) // out 2251 { 2252 #if DEBUG_RPC_VFS_FS_ADD_DENTRY 2253 thread_t * this = CURRENT_THREAD; 2254 uint32_t cycle = (uint32_t)hal_get_cycles(); 2255 if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) 2256 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2257 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2258 #endif 2259 2260 uint32_t responses = 1; 2261 2262 // initialise RPC descriptor header 2263 rpc_desc_t rpc; 2264 rpc.index = RPC_VFS_FS_ADD_DENTRY; 2265 rpc.blocking = true; 2266 rpc.rsp = &responses; 2267 2268 // set input arguments in RPC descriptor 2269 rpc.args[0] = (uint64_t)(intptr_t)parent; 2270 rpc.args[1] = (uint64_t)(intptr_t)dentry; 2271 2272 // register RPC request in remote RPC fifo 2273 rpc_send( cxy , &rpc ); 2274 2275 // get output values from RPC descriptor 2276 *error = (error_t)rpc.args[2]; 2277 2278 #if DEBUG_RPC_VFS_FS_ADD_DENTRY 2279 cycle = (uint32_t)hal_get_cycles(); 2280 if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) 2281 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2282 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2283 #endif 2284 } 2285 2286 ////////////////////////////////////////////// 2287 void rpc_vfs_fs_add_dentry_server( xptr_t xp ) 2288 { 2289 #if DEBUG_RPC_VFS_FS_ADD_DENTRY 2290 thread_t * this = CURRENT_THREAD; 2291 uint32_t cycle = (uint32_t)hal_get_cycles(); 2292 if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) 2293 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2294 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2295 #endif 2296 2297 error_t error; 2298 vfs_inode_t * parent; 2299 vfs_dentry_t * dentry; 2300 2301 // get client cluster identifier and pointer on RPC descriptor 2302 cxy_t client_cxy = GET_CXY( xp ); 2303 rpc_desc_t * desc = GET_PTR( xp ); 2304 2305 // get input arguments 2306 parent = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 2307 dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 2308 2309 // call the kernel function 2310 error = vfs_fs_add_dentry( parent , dentry ); 2311 2312 // set output argument 2313 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)error ); 2314 2315 #if DEBUG_RPC_VFS_FS_ADD_DENTRY 2316 cycle = (uint32_t)hal_get_cycles(); 2317 if( cycle > DEBUG_RPC_VFS_FS_ADD_DENTRY ) 2318 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2319 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2320 #endif 2321 } 2322 2323 ///////////////////////////////////////////////////////////////////////////////////////// 2324 // [18] RPC_VFS_FS_REMOVE_DENTRY deprecated [AG] dec 2019 2325 ///////////////////////////////////////////////////////////////////////////////////////// 2326 void rpc_vfs_fs_remove_dentry_client( cxy_t cxy, 2327 vfs_inode_t * parent, // in 2328 vfs_dentry_t * dentry, // in 2329 error_t * error ) // out 2330 { 2331 #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY 2332 thread_t * this = CURRENT_THREAD; 2333 uint32_t cycle = (uint32_t)hal_get_cycles(); 2334 if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) 2335 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2336 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2337 #endif 2338 2339 uint32_t responses = 1; 2340 2341 // initialise RPC descriptor header 2342 rpc_desc_t rpc; 2343 rpc.index = RPC_VFS_FS_REMOVE_DENTRY; 2344 rpc.blocking = true; 2345 rpc.rsp = &responses; 2346 2347 // set input arguments in RPC descriptor 2348 rpc.args[0] = (uint64_t)(intptr_t)parent; 2349 rpc.args[1] = (uint64_t)(intptr_t)dentry; 2350 2351 // register RPC request in remote RPC fifo 2352 rpc_send( cxy , &rpc ); 2353 2354 // get output values from RPC descriptor 2355 *error = (error_t)rpc.args[2]; 2356 2357 #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY 2358 cycle = (uint32_t)hal_get_cycles(); 2359 if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) 2360 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2361 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2362 #endif 2363 } 2364 2365 ///////////////////////////////////////////////// 2366 void rpc_vfs_fs_remove_dentry_server( xptr_t xp ) 2367 { 2368 #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY 2369 thread_t * this = CURRENT_THREAD; 2370 uint32_t cycle = (uint32_t)hal_get_cycles(); 2371 if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) 2372 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2373 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2374 #endif 2375 2376 error_t error; 2377 vfs_inode_t * parent; 2378 vfs_dentry_t * dentry; 2379 2380 // get client cluster identifier and pointer on RPC descriptor 2381 cxy_t client_cxy = GET_CXY( xp ); 2382 rpc_desc_t * desc = GET_PTR( xp ); 2383 2384 // get input arguments 2385 parent = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 2386 dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1])); 2387 2388 // call the kernel function 2389 error = vfs_fs_remove_dentry( parent , dentry ); 2390 2391 // set output argument 2392 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)error ); 2393 2394 #if DEBUG_RPC_VFS_FS_REMOVE_DENTRY 2395 cycle = (uint32_t)hal_get_cycles(); 2396 if( cycle > DEBUG_RPC_VFS_FS_REMOVE_DENTRY ) 2397 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2398 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2399 #endif 2400 } 2401 2402 ///////////////////////////////////////////////////////////////////////////////////////// 2403 // [19] RPC_VFS_INODE_LOAD_ALL_PAGES deprecated [AG] dec 2019 2404 ///////////////////////////////////////////////////////////////////////////////////////// 2405 void rpc_vfs_inode_load_all_pages_client( cxy_t cxy, 2406 vfs_inode_t * inode, // in 2407 error_t * error ) // out 2408 { 2409 #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES 2410 thread_t * this = CURRENT_THREAD; 2411 uint32_t cycle = (uint32_t)hal_get_cycles(); 2412 if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) 2413 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2414 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2415 #endif 2416 2417 uint32_t responses = 1; 2418 2419 // initialise RPC descriptor header 2420 rpc_desc_t rpc; 2421 rpc.index = RPC_VFS_INODE_LOAD_ALL_PAGES; 2422 rpc.blocking = true; 2423 rpc.rsp = &responses; 2424 2425 // set input arguments in RPC descriptor 2426 rpc.args[0] = (uint64_t)(intptr_t)inode; 2427 2428 // register RPC request in remote RPC fifo 2429 rpc_send( cxy , &rpc ); 2430 2431 // get output values from RPC descriptor 2432 *error = (error_t)rpc.args[1]; 2433 2434 #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES 2435 cycle = (uint32_t)hal_get_cycles(); 2436 if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) 2437 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2438 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2439 #endif 2440 } 2441 2442 ///////////////////////////////////////////////////// 2443 void rpc_vfs_inode_load_all_pages_server( xptr_t xp ) 2444 { 2445 #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES 2446 thread_t * this = CURRENT_THREAD; 2447 uint32_t cycle = (uint32_t)hal_get_cycles(); 2448 if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) 2449 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2450 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2451 #endif 2452 2453 error_t error; 2454 vfs_inode_t * inode; 2455 2456 // get client cluster identifier and pointer on RPC descriptor 2457 cxy_t client_cxy = GET_CXY( xp ); 2458 rpc_desc_t * desc = GET_PTR( xp ); 2459 2460 // get input argument 2461 inode = (vfs_inode_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[0])); 2462 2463 // call the kernel function 2464 error = vfs_inode_load_all_pages( inode ); 2465 2466 // set output argument 2467 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 2468 2469 #if DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES 2470 cycle = (uint32_t)hal_get_cycles(); 2471 if( cycle > DEBUG_RPC_VFS_INODE_LOAD_ALL_PAGES ) 2472 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2473 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2474 #endif 2475 } 2476 2477 ///////////////////////////////////////////////////////////////////////////////////////// 2478 // [20] RPC_VMM_GET_VSEG deprecated [AG] sept 2019 2479 ///////////////////////////////////////////////////////////////////////////////////////// 2480 void rpc_vmm_get_vseg_client( cxy_t cxy, 2481 process_t * process, // in 2482 intptr_t vaddr, // in 2483 xptr_t * vseg_xp, // out 2484 error_t * error ) // out 2485 { 2486 #if DEBUG_RPC_VMM_GET_VSEG 2487 thread_t * this = CURRENT_THREAD; 2488 uint32_t cycle = (uint32_t)hal_get_cycles(); 2489 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 2490 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2491 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2492 #endif 2493 2494 uint32_t responses = 1; 2495 2496 // initialise RPC descriptor header 2497 rpc_desc_t rpc; 2498 rpc.index = RPC_VMM_GET_VSEG; 2499 rpc.blocking = true; 2500 rpc.rsp = &responses; 2501 2502 // set input arguments in RPC descriptor 2503 rpc.args[0] = (uint64_t)(intptr_t)process; 2504 rpc.args[1] = (uint64_t)vaddr; 2505 2506 // register RPC request in remote RPC fifo 2507 rpc_send( cxy , &rpc ); 2508 2509 // get output argument from rpc descriptor 2510 *vseg_xp = rpc.args[2]; 2511 *error = (error_t)rpc.args[3]; 2512 2513 #if DEBUG_RPC_VMM_GET_VSEG 2514 cycle = (uint32_t)hal_get_cycles(); 2515 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 2516 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2517 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2518 #endif 2519 } 2520 2521 ///////////////////////////////////////// 2522 void rpc_vmm_get_vseg_server( xptr_t xp ) 2523 { 2524 #if DEBUG_RPC_VMM_GET_VSEG 2525 thread_t * this = CURRENT_THREAD; 2526 uint32_t cycle = (uint32_t)hal_get_cycles(); 2527 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 2528 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2529 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2530 #endif 2531 2532 process_t * process; 2533 intptr_t vaddr; 2534 vseg_t * vseg_ptr; 2535 xptr_t vseg_xp; 2536 error_t error; 2537 2538 // get client cluster identifier and pointer on RPC descriptor 2539 cxy_t client_cxy = GET_CXY( xp ); 2540 rpc_desc_t * desc = GET_PTR( xp ); 2541 2542 // get input argument from client RPC descriptor 2543 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2544 vaddr = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2545 2546 // call local kernel function 2547 error = vmm_get_vseg( process , vaddr , &vseg_ptr ); 2548 2549 // set output arguments to client RPC descriptor 2550 vseg_xp = XPTR( local_cxy , vseg_ptr ); 2551 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)vseg_xp ); 2552 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 2553 2554 #if DEBUG_RPC_VMM_GET_VSEG 2555 cycle = (uint32_t)hal_get_cycles(); 2556 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 2557 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2558 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2559 #endif 2560 } 2561 2562 ///////////////////////////////////////////////////////////////////////////////////////// 2563 // [22] RPC_KCM_ALLOC deprecated [AG] sept 2019 2564 ///////////////////////////////////////////////////////////////////////////////////////// 2565 void rpc_kcm_alloc_client( cxy_t cxy, 2566 uint32_t kmem_type, // in 2567 xptr_t * buf_xp ) // out 2568 { 2569 #if DEBUG_RPC_KCM_ALLOC 2570 thread_t * this = CURRENT_THREAD; 2571 uint32_t cycle = (uint32_t)hal_get_cycles(); 2572 if( cycle > DEBUG_RPC_KCM_ALLOC ) 2573 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2574 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2575 #endif 2576 2577 uint32_t responses = 1; 2578 2579 // initialise RPC descriptor header 2580 rpc_desc_t rpc; 2581 rpc.index = RPC_KCM_ALLOC; 2582 rpc.blocking = true; 2583 rpc.rsp = &responses; 2584 2585 // set input arguments in RPC descriptor 2586 rpc.args[0] = (uint64_t)kmem_type; 2587 2588 // register RPC request in remote RPC fifo 2589 rpc_send( cxy , &rpc ); 2590 2591 // get output arguments from RPC descriptor 2592 *buf_xp = (xptr_t)rpc.args[1]; 2593 2594 #if DEBUG_RPC_KCM_ALLOC 2595 cycle = (uint32_t)hal_get_cycles(); 2596 if( cycle > DEBUG_RPC_KCM_ALLOC ) 2597 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2598 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2599 #endif 2600 } 2601 2602 ////////////////////////////////////// 2603 void rpc_kcm_alloc_server( xptr_t xp ) 2604 { 2605 #if DEBUG_RPC_KCM_ALLOC 2606 thread_t * this = CURRENT_THREAD; 2607 uint32_t cycle = (uint32_t)hal_get_cycles(); 2608 if( cycle > DEBUG_RPC_KCM_ALLOC ) 2609 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2610 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2611 #endif 2612 2613 // get client cluster identifier and pointer on RPC descriptor 2614 cxy_t client_cxy = GET_CXY( xp ); 2615 rpc_desc_t * desc = GET_PTR( xp ); 2616 2617 // get input argument "kmem_type" from client RPC descriptor 2618 uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2619 2620 // allocates memory for kcm 2621 kmem_req_t req; 2622 req.type = kmem_type; 2623 req.flags = AF_ZERO; 2624 void * buf_ptr = kmem_alloc( &req ); 2625 2626 // set output argument 2627 xptr_t buf_xp = XPTR( local_cxy , buf_ptr ); 2628 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp ); 2629 2630 #if DEBUG_RPC_KCM_ALLOC 2631 cycle = (uint32_t)hal_get_cycles(); 2632 if( cycle > DEBUG_RPC_KCM_ALLOC ) 2633 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2634 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2635 #endif 2636 } 2637 2638 ///////////////////////////////////////////////////////////////////////////////////////// 2639 // [23] RPC_KCM_FREE deprecated [AG] sept 2019 2640 ///////////////////////////////////////////////////////////////////////////////////////// 2641 void rpc_kcm_free_client( cxy_t cxy, 2642 void * buf, // in 2643 uint32_t kmem_type ) // in 2644 { 2645 #if DEBUG_RPC_KCM_FREE 2646 thread_t * this = CURRENT_THREAD; 2647 uint32_t cycle = (uint32_t)hal_get_cycles(); 2648 if( cycle > DEBUG_RPC_KCM_FREE ) 2649 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2650 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2651 #endif 2652 2653 uint32_t responses = 1; 2654 2655 // initialise RPC descriptor header 2656 rpc_desc_t rpc; 2657 rpc.index = RPC_KCM_FREE; 2658 rpc.blocking = true; 2659 rpc.rsp = &responses; 2660 2661 // set input arguments in RPC descriptor 2662 rpc.args[0] = (uint64_t)(intptr_t)buf; 2663 rpc.args[1] = (uint64_t)kmem_type; 2664 2665 // register RPC request in remote RPC fifo 2666 rpc_send( cxy , &rpc ); 2667 2668 #if DEBUG_RPC_KCM_FREE 2669 cycle = (uint32_t)hal_get_cycles(); 2670 if( cycle > DEBUG_RPC_KCM_FREE ) 2671 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2672 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2673 #endif 2674 } 2675 2676 ///////////////////////////////////// 2677 void rpc_kcm_free_server( xptr_t xp ) 2678 { 2679 #if DEBUG_RPC_KCM_FREE 2680 thread_t * this = CURRENT_THREAD; 2681 uint32_t cycle = (uint32_t)hal_get_cycles(); 2682 if( cycle > DEBUG_RPC_KCM_FREE ) 2683 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2684 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2685 #endif 2686 2687 // get client cluster identifier and pointer on RPC descriptor 2688 cxy_t client_cxy = GET_CXY( xp ); 2689 rpc_desc_t * desc = GET_PTR( xp ); 2690 2691 // get input arguments "buf" and "kmem_type" from client RPC descriptor 2692 void * buf = (void *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2693 uint32_t kmem_type = (uint32_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2694 2695 // releases memory 2696 kmem_req_t req; 2697 req.type = kmem_type; 2698 req.ptr = buf; 2699 kmem_free( &req ); 2700 2701 #if DEBUG_RPC_KCM_FREE 2702 cycle = (uint32_t)hal_get_cycles(); 2703 if( cycle > DEBUG_RPC_KCM_FREE ) 2704 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2705 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2706 #endif 2707 } 2708 2709 ///////////////////////////////////////////////////////////////////////////////////////// 2710 // [24] Marshaling functions attached to RPC_MAPPER_SYNC 2711 ///////////////////////////////////////////////////////////////////////////////////////// 2712 void rpc_mapper_sync_client( cxy_t cxy, 2713 struct mapper_s * mapper, 2714 error_t * error ) 2715 { 2716 #if DEBUG_RPC_MAPPER_SYNC 2717 thread_t * this = CURRENT_THREAD; 2718 uint32_t cycle = (uint32_t)hal_get_cycles(); 2719 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2720 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2721 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2722 #endif 2723 2724 uint32_t responses = 1; 2725 2726 // initialise RPC descriptor header 2727 rpc_desc_t rpc; 2728 rpc.index = RPC_MAPPER_SYNC; 2729 rpc.blocking = true; 2730 rpc.rsp = &responses; 2731 2732 // set input arguments in RPC descriptor 2733 rpc.args[0] = (uint64_t)(intptr_t)mapper; 2734 2735 // register RPC request in remote RPC fifo 2736 rpc_send( cxy , &rpc ); 2737 2738 // get output values from RPC descriptor 2739 *error = (error_t)rpc.args[1]; 2740 2741 #if DEBUG_RPC_MAPPER_SYNC 2742 cycle = (uint32_t)hal_get_cycles(); 2743 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2744 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2745 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2746 #endif 2747 } 2748 2749 //////////////////////////////////////// 2750 void rpc_mapper_sync_server( xptr_t xp ) 2751 { 2752 #if DEBUG_RPC_MAPPER_SYNC 2753 thread_t * this = CURRENT_THREAD; 2754 uint32_t cycle = (uint32_t)hal_get_cycles(); 2755 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2756 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2757 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2758 #endif 2759 2760 mapper_t * mapper; 2761 error_t error; 2762 2763 // get client cluster identifier and pointer on RPC descriptor 2764 cxy_t client_cxy = GET_CXY( xp ); 2765 rpc_desc_t * desc = GET_PTR( xp ); 2766 2767 // get arguments from client RPC descriptor 2768 mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2769 2770 // call local kernel function 2771 error = mapper_sync( mapper ); 2772 2773 // set output argument to client RPC descriptor 2774 hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 2775 2776 #if DEBUG_RPC_MAPPER_SYNC 2777 cycle = (uint32_t)hal_get_cycles(); 2778 if( cycle > DEBUG_RPC_MAPPER_SYNC ) 2779 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2780 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2781 #endif 2782 } 2783 2806 2784 */ 2785 2786 2787 2788 /* 2789 //////////////////////////////////////////////////// 2790 void rpc_fbf_display_client( cxy_t cxy, 2791 xptr_t window_xp, 2792 uint32_t cores, 2793 error_t * error ) 2794 { 2795 #if DEBUG_RPC_FBF_DISPLAY 2796 uint32_t cycle = (uint32_t)hal_get_cycles(); 2797 thread_t * this = CURRENT_THREAD; 2798 if( DEBUG_RPC_FBF_DISPLAY < cycle ) 2799 printk("\n[%s] thread[%x,%x] on core %d : enter / cycle %d\n", 2800 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2801 #endif 2802 2803 uint32_t responses = 1; 2804 rpc_desc_t rpc; 2805 2806 // initialise RPC descriptor header 2807 rpc.index = RPC_FBF_DISPLAY; 2808 rpc.blocking = true; 2809 rpc.rsp = &responses; 2810 2811 // set input arguments in RPC descriptor 2812 rpc.args[0] = (uint64_t)window_xp; 2813 rpc.args[1] = (uint64_t)cores; 2814 2815 // register RPC request in remote RPC fifo 2816 rpc_send( cxy , &rpc ); 2817 2818 // get output argument from RPC descriptor 2819 *error = (error_t)rpc.args[2]; 2820 2821 #if DEBUG_RPC_FBF_DISPLAY 2822 cycle = (uint32_t)hal_get_cycles(); 2823 if( DEBUG_RPC_FBF_DISPLAY < cycle ) 2824 printk("\n[%s] thread[%x,%x] on core %d : exit / cycle %d\n", 2825 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2826 #endif 2827 } 2828 2829 //////////////////////////////////////// 2830 void rpc_fbf_display_server( xptr_t xp ) 2831 { 2832 // get client cluster identifier and pointer on RPC descriptor 2833 cxy_t client_cxy = GET_CXY( xp ); 2834 rpc_desc_t * desc = GET_PTR( xp ); 2835 2836 // get arguments from RPC descriptor 2837 xptr_t window_xp = (xptr_t )hal_remote_l64( XPTR(client_cxy , &desc->args[0]) ); 2838 uint32_t ncores = (uint32_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) ); 2839 2840 #if DEBUG_RPC_FBF_DISPLAY 2841 uint32_t cycle = (uint32_t)hal_get_cycles(); 2842 thread_t * this = CURRENT_THREAD; 2843 if( DEBUG_RPC_FBF_DISPLAY < cycle ) 2844 printk("\n[%s] thread[%x,%x] on core %d : enter / cycle %d\n", 2845 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2846 #endif 2847 2848 // call relevant kernel function 2849 error_t error = dev_fbf_parallel_display( window_xp , ncores ); 2850 2851 // WARNING : for parallel RPCs, the return error argument is shared 2852 hal_remote_atomic_or( XPTR( client_cxy , &desc->args[2] ) , error ); 2853 2854 #if DEBUG_RPC_FBF_DISPLAY 2855 cycle = (uint32_t)hal_get_cycles(); 2856 if( DEBUG_RPC_FBF_DISPLAY < cycle ) 2857 printk("\n[%s] thread[%x,%x] on core %d : exit / cycle %d\n", 2858 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2859 #endif 2860 } 2861 */ 2862 2863 -
trunk/kernel/kern/rpc.h
r641 r657 2 2 * rpc.h - RPC (Remote Procedure Call) operations definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 68 68 RPC_THREAD_USER_CREATE = 6, 69 69 RPC_THREAD_KERNEL_CREATE = 7, 70 RPC_ VFS_FS_UPDATE_DENTRY= 8,70 RPC_UNDEFINED_8 = 8, 71 71 RPC_PROCESS_SIGACTION = 9, 72 72 73 RPC_VFS_INODE_CREATE = 10, 74 RPC_VFS_INODE_DESTROY = 11, 75 RPC_VFS_DENTRY_CREATE = 12, 76 RPC_VFS_DENTRY_DESTROY = 13, 77 RPC_VFS_FILE_CREATE = 14, 78 RPC_VFS_FILE_DESTROY = 15, 79 RPC_VFS_FS_NEW_DENTRY = 16, 80 RPC_VFS_FS_ADD_DENTRY = 17, 81 RPC_VFS_FS_REMOVE_DENTRY = 18, 82 RPC_VFS_INODE_LOAD_ALL_PAGES = 19, 83 84 RPC_UNDEFINED_20 = 20, // 85 RPC_UNDEFINED_21 = 21, // 86 RPC_UNDEFINED_22 = 22, // 87 RPC_UNDEFINED_23 = 23, // 88 RPC_MAPPER_SYNC = 24, 89 RPC_VMM_RESIZE_VSEG = 25, 90 RPC_VMM_REMOVE_VSEG = 26, 91 RPC_VMM_CREATE_VSEG = 27, 92 RPC_VMM_SET_COW = 28, 93 RPC_UNDEFINED_29 = 29, // 94 95 RPC_MAX_INDEX = 30, 73 RPC_UNDEFINED_10 = 10, // 74 RPC_UNDEFINED_11 = 11, // 75 RPC_UNDEFINED_12 = 12, // 76 RPC_UNDEFINED_13 = 13, // 77 RPC_UNDEFINED_14 = 14, // 78 RPC_VMM_RESIZE_VSEG = 15, 79 RPC_VMM_REMOVE_VSEG = 16, 80 RPC_VMM_CREATE_VSEG = 17, 81 RPC_VMM_SET_COW = 18, 82 RPC_UNDEFINED_19 = 19, // 83 84 RPC_MAX_INDEX = 20, 96 85 } 97 86 rpc_index_t; … … 288 277 289 278 /*********************************************************************************** 290 * [8] The RPC_VFS_FS_UPDATE_DENTRY allows a client thread to request a remote 291 * cluster to update the <size> field of a directory entry in the mapper of a 292 * remote directory inode, identified by the <inode> local pointer. 293 * The target entry name is identified by the <dentry> local pointer. 294 *********************************************************************************** 295 * @ cxy : server cluster identifier. 296 * @ inode : [in] local pointer on remote directory inode. 297 * @ dentry : [in] local pointer on remote dentry. 298 * @ size : [in] new size value. 299 * @ error : [out] error status (0 if success). 300 **********************************************************************************/ 301 void rpc_vfs_fs_update_dentry_client( cxy_t cxy, 302 struct vfs_inode_s * inode, 303 struct vfs_dentry_s * dentry, 304 uint32_t size, 305 error_t * error ); 306 307 void rpc_vfs_fs_update_dentry_server( xptr_t xp ); 279 * [8] undefined 280 **********************************************************************************/ 308 281 309 282 /*********************************************************************************** 310 283 * [9] The RPC_PROCESS_SIGACTION allows a client thread to request a remote cluster 311 284 * to execute a given sigaction, defined by the <action_type> for a given process, 312 * identified by the <pid> argument. 285 * identified by the <pid> argument. When this RPC is used in parallel mode, 286 * the rpc_process_sigaction_client() function is not used. 313 287 *********************************************************************************** 314 288 * @ cxy : server cluster identifier. … … 323 297 324 298 /*********************************************************************************** 325 * [10] The RPC_VFS_INODE_CREATE creates an inode and the associated mapper in a 326 * remote cluster. The parent dentry must have been previously created. 327 * It returns an extended pointer on the created inode. 328 *********************************************************************************** 329 * @ cxy : server cluster identifier. 330 * @ fs_type : [in] file system type. 331 * @ inode_type : [in] file system type. 332 * @ attr : [in] inode attributes. 333 * @ rights : [in] access rights 334 * @ uid : [in] user ID 335 * @ gid : [in] group ID 336 * @ inode_xp : [out] buffer for extended pointer on created inode. 337 * @ error : [out] error status (0 if success). 338 **********************************************************************************/ 339 void rpc_vfs_inode_create_client( cxy_t cxy, 340 uint32_t fs_type, 341 uint32_t attr, 342 uint32_t rights, 343 uint32_t uid, 344 uint32_t gid, 345 xptr_t * inode_xp, 346 error_t * error ); 347 348 void rpc_vfs_inode_create_server( xptr_t xp ); 299 * [10] undefined 300 **********************************************************************************/ 349 301 350 302 /*********************************************************************************** 351 * [11] The RPC_VFS_INODE_DESTROY releases memory allocated for an inode descriptor 352 * and for the associated mapper in a remote cluster. 353 *********************************************************************************** 354 * @ cxy : server cluster identifier 355 * @ inode : [in] local pointer on inode. 356 **********************************************************************************/ 357 void rpc_vfs_inode_destroy_client( cxy_t cxy, 358 struct vfs_inode_s * inode ); 359 360 void rpc_vfs_inode_destroy_server( xptr_t xp ); 303 * [11] undefined 304 **********************************************************************************/ 361 305 362 306 /*********************************************************************************** 363 * [12] The RPC_VFS_DENTRY_CREATE creates a dentry in a remote cluster. 364 * It returns an extended pointer on the created dentry. 365 *********************************************************************************** 366 * @ cxy : server cluster identifier 367 * @ type : [in] file system type. 368 * @ name : [in] directory entry name. 369 * @ dentry_xp : [out] buffer for extended pointer on created dentry. 370 * @ error : [out] error status (0 if success). 371 **********************************************************************************/ 372 void rpc_vfs_dentry_create_client( cxy_t cxy, 373 uint32_t type, 374 char * name, 375 xptr_t * dentry_xp, 376 error_t * error ); 377 378 void rpc_vfs_dentry_create_server( xptr_t xp ); 307 * [12] undefined 308 **********************************************************************************/ 379 309 380 310 /*********************************************************************************** 381 * [13] The RPC_VFS_DENTRY_DESTROY remove a denfry from the parent inode XHTAB, 382 * and releases memory allocated for the dentry descriptor in a remote cluster. 383 *********************************************************************************** 384 * @ cxy : server cluster identifier 385 * @ dentry : [in] local pointer on dentry. 386 **********************************************************************************/ 387 void rpc_vfs_dentry_destroy_client( cxy_t cxy, 388 struct vfs_dentry_s * dentry ); 389 390 void rpc_vfs_dentry_destroy_server( xptr_t xp ); 391 392 /*********************************************************************************** 393 * [14] The RPC_VFS_FILE_CREATE creates a file descriptor in a remote cluster. 394 * It returns an extended pointer on the created file structure. 395 *********************************************************************************** 396 * @ cxy : server cluster identifier 397 * @ inode : [in] local pointer on parent inode. 398 * @ file_attr : [in] new file attributes. 399 * @ file_xp : [out] buffer for extended pointer on created file. 400 * @ error : [out] error status (0 if success). 401 **********************************************************************************/ 402 void rpc_vfs_file_create_client( cxy_t cxy, 403 struct vfs_inode_s * inode, 404 uint32_t file_attr, 405 xptr_t * file_xp, 406 error_t * error ); 407 408 void rpc_vfs_file_create_server( xptr_t xp ); 409 410 /*********************************************************************************** 411 * [15] The RPC_VFS_FILE_DESTROY releases memory allocated for a file descriptor 412 * in a remote cluster. 413 *********************************************************************************** 414 * @ cxy : server cluster identifier 415 * @ file : [in] local pointer on file. 416 **********************************************************************************/ 417 void rpc_vfs_file_destroy_client( cxy_t cxy, 418 struct vfs_file_s * file ); 419 420 void rpc_vfs_file_destroy_server( xptr_t xp ); 421 422 /*********************************************************************************** 423 * [16] The RPC_VFS_FS_GET_DENTRY calls the vfs_fs_new_dentry() 424 * function in a remote cluster containing a parent inode directory to scan the 425 * associated mapper, find a directory entry identified by its name, and update 426 * both the - existing - child inode and dentry. 427 *********************************************************************************** 428 * @ cxy : server cluster identifier 429 * @ parent_inode : [in] local pointer on parent inode. 430 * @ name : [in] local pointer on child name (in client cluster). 431 * @ child_inode_xp : [in] extended pointer on child inode (in another cluster). 432 * @ error : [out] error status (0 if success). 433 **********************************************************************************/ 434 void rpc_vfs_fs_new_dentry_client( cxy_t cxy, 435 struct vfs_inode_s * parent_inode, 436 char * name, 437 xptr_t child_inode_xp, 438 error_t * error ); 439 440 void rpc_vfs_fs_new_dentry_server( xptr_t xp ); 441 442 /*********************************************************************************** 443 * [17] The RPC_VFS_FS_ADD_DENTRY calls the vfs_fs_add_dentry() function in a 444 * remote cluster containing a directory inode and mapper, to add a new dentry 445 * in the mapper of this directory. 446 *********************************************************************************** 447 * @ cxy : server cluster identifier 448 * @ parent : [in] local pointer on directory inode. 449 * @ dentry : [in] local pointer on dentry. 450 * @ error : [out] error status (0 if success). 451 **********************************************************************************/ 452 void rpc_vfs_fs_add_dentry_client( cxy_t, 453 struct vfs_inode_s * parent, 454 struct vfs_dentry_s * dentry, 455 error_t * error ); 456 457 void rpc_vfs_fs_add_dentry_server( xptr_t xp ); 458 459 /*********************************************************************************** 460 * [18] The RPC_VFS_FS_REMOVE_DENTRY calls the vfs_fs_remove_dentry() function in a 461 * remote cluster containing a directory inode and mapper, to remove a dentry from 462 * the mapper of this directory. 463 *********************************************************************************** 464 * @ cxy : server cluster identifier 465 * @ parent : [in] local pointer on directory inode. 466 * @ dentry : [in] local pointer on dentry. 467 * @ error : [out] error status (0 if success). 468 **********************************************************************************/ 469 void rpc_vfs_fs_remove_dentry_client( cxy_t, 470 struct vfs_inode_s * parent, 471 struct vfs_dentry_s * dentry, 472 error_t * error ); 473 474 void rpc_vfs_fs_remove_dentry_server( xptr_t xp ); 475 476 /*********************************************************************************** 477 * [19] The RPC_VFS_INODE_LOAD_ALL_PAGES calls the vfs_inode_load_all_pages() 478 * function a remote cluster containing an inode to load all pages in the 479 * associated mapper. 480 *********************************************************************************** 481 * @ cxy : server cluster identifier 482 * @ inode : [in] local pointer on inode in server cluster. 483 * @ error : [out] error status (0 if success). 484 **********************************************************************************/ 485 void rpc_vfs_inode_load_all_pages_client( cxy_t cxy, 486 struct vfs_inode_s * inode, 487 error_t * error ); 488 489 void rpc_vfs_inode_load_all_pages_server( xptr_t xp ); 490 491 /*********************************************************************************** 492 * [20] undefined 493 **********************************************************************************/ 494 495 /*********************************************************************************** 496 * [21] undefined 497 **********************************************************************************/ 498 499 /*********************************************************************************** 500 * [22] undefined 501 **********************************************************************************/ 502 503 /*********************************************************************************** 504 * [23] undefined 505 **********************************************************************************/ 506 507 /*********************************************************************************** 508 * [24] The RPC_MAPPER_SYNC allows a client thread to synchronize on disk 509 * all dirty pages of a remote mapper. 510 *********************************************************************************** 511 * @ cxy : server cluster identifier. 512 * @ mapper : [in] local pointer on mapper in server cluster. 513 * @ error : [out] error status (0 if success). 514 **********************************************************************************/ 515 void rpc_mapper_sync_client( cxy_t cxy, 516 struct mapper_s * mapper, 517 error_t * error ); 518 519 void rpc_mapper_sync_server( xptr_t xp ); 520 521 /*********************************************************************************** 522 * [25] The RPC_VMM_RESIZE_VSEG allows a client thread to request a remote cluster 311 * [13] undefined 312 **********************************************************************************/ 313 314 /*********************************************************************************** 315 * [14] undefined 316 **********************************************************************************/ 317 318 /*********************************************************************************** 319 * [15] The RPC_VMM_RESIZE_VSEG allows a client thread to request a remote cluster 523 320 * to resize a vseg identified by the <base> argument in a process descriptor 524 321 * identified by the <pid> argument, as defined by the <new_base> and <new_size> … … 540 337 541 338 /*********************************************************************************** 542 * [ 26] The RPC_VMM_REMOVE_VSEG allows a client thread to request a remote cluster339 * [16] The RPC_VMM_REMOVE_VSEG allows a client thread to request a remote cluster 543 340 * to delete a vseg identified by the <vaddr> argument in a process descriptor 544 341 * identified by the <pid> argument. … … 556 353 557 354 /*********************************************************************************** 558 * [ 27] The RPC_VMM_CREATE_VSEG allows a client thread to request the remote355 * [17] The RPC_VMM_CREATE_VSEG allows a client thread to request the remote 559 356 * reference cluster of a given process to allocate and register in the reference 560 357 * process VMM a new vseg descriptor. … … 587 384 588 385 /*********************************************************************************** 589 * [ 28] The RPC_VMM_SET_COW allows a client thread to request the remote reference386 * [18] The RPC_VMM_SET_COW allows a client thread to request the remote reference 590 387 * cluster to set the COW flag and reset the WRITABLE flag of all GPT entries for 591 388 * the DATA, MMAP and REMOTE vsegs of process identified by the <process> argument. … … 602 399 603 400 /*********************************************************************************** 604 * [ 29] undefined401 * [19] undefined 605 402 **********************************************************************************/ 606 403 -
trunk/kernel/kern/thread.h
r651 r657 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 81 81 **************************************************************************************/ 82 82 83 #define THREAD_BLOCKED_GLOBAL 0x0001 /*! thread deactivated / wait activation */ 84 #define THREAD_BLOCKED_IO 0x0002 /*! thread wait IO operation completion */ 85 #define THREAD_BLOCKED_MAPPER 0x0004 /*! thread wait mapper */ 86 #define THREAD_BLOCKED_EXIT 0x0008 /*! thread blocked in join / wait exit */ 87 #define THREAD_BLOCKED_JOIN 0x0010 /*! thread blocked in exit / wait join */ 88 #define THREAD_BLOCKED_SEM 0x0020 /*! thread wait semaphore */ 89 #define THREAD_BLOCKED_PAGE 0x0040 /*! thread wait page access */ 90 #define THREAD_BLOCKED_IDLE 0x0080 /*! thread RPC wait RPC_FIFO non empty */ 91 #define THREAD_BLOCKED_USERSYNC 0x0100 /*! thread wait (cond/mutex/barrier) */ 92 #define THREAD_BLOCKED_RPC 0x0200 /*! thread wait RPC completion */ 93 #define THREAD_BLOCKED_ISR 0x0400 /*! thread DEV wait ISR */ 94 #define THREAD_BLOCKED_WAIT 0x0800 /*! thread wait child process termination */ 95 #define THREAD_BLOCKED_LOCK 0x1000 /*! thread wait queuelock or rwlock */ 83 #define THREAD_BLOCKED_GLOBAL 0x0001 /*! ANY : deactivated / wait activation */ 84 #define THREAD_BLOCKED_IO 0x0002 /*! USR : wait IO operation completion */ 85 #define THREAD_BLOCKED_MAPPER 0x0004 /*! ??? : wait mapper */ 86 #define THREAD_BLOCKED_EXIT 0x0008 /*! USR : blocked in join / wait exit */ 87 #define THREAD_BLOCKED_JOIN 0x0010 /*! USR : blocked in exit / wait join */ 88 #define THREAD_BLOCKED_SEM 0x0020 /*! USR : wait semaphore */ 89 #define THREAD_BLOCKED_PAGE 0x0040 /*! ??? : wait page access */ 90 #define THREAD_BLOCKED_IDLE 0x0080 /*! RPC : RPC_FIFO non empty */ 91 #define THREAD_BLOCKED_USERSYNC 0x0100 /*! USR : wait cond / mutex / barrier */ 92 #define THREAD_BLOCKED_RPC 0x0200 /*! ANY : RPC completion */ 93 #define THREAD_BLOCKED_ISR 0x0400 /*! DEV : wait hardware IRQ */ 94 #define THREAD_BLOCKED_WAIT 0x0800 /*! USR : wait child process termination */ 95 #define THREAD_BLOCKED_LOCK 0x1000 /*! ANY : wait queuelock or rwlock */ 96 #define THREAD_BLOCKED_CLIENT 0x2000 /*! DEV : client threads queue non empty */ 96 97 97 98 /*************************************************************************************** … … 190 191 list_entry_t wait_list; /*! member of a local waiting queue */ 191 192 xlist_entry_t wait_xlist; /*! member of a trans-cluster waiting queue */ 193 xlist_entry_t tmp_xlist; /*! member of a trans-cluster kleenex queue */ 192 194 193 195 uint32_t busylocks; /*! number of taken busylocks */ -
trunk/kernel/kernel_config.h
r656 r657 61 61 #define DEBUG_DEV_PIC 0 62 62 63 #define DEBUG_DEVFS_GLOBAL_INIT 0 63 #define DEBUG_DEVFS_GLOBAL_INIT 0 64 64 #define DEBUG_DEVFS_LOCAL_INIT 0 65 65 #define DEBUG_DEVFS_MOVE 0 … … 121 121 122 122 #define DEBUG_MAPPER_GET_PAGE 0 123 #define DEBUG_MAPPER_GET_FAT_PAGE 0 123 124 #define DEBUG_MAPPER_HANDLE_MISS 0 124 125 #define DEBUG_MAPPER_MOVE_KERNEL 0 … … 152 153 #define DEBUG_RPC_SERVER_GENERIC 0 153 154 154 #define DEBUG_RPC_ MAPPER_MOVE_USER0155 #define DEBUG_RPC_FBF_DISPLAY 0 155 156 #define DEBUG_RPC_PROCESS_MAKE_FORK 0 156 157 #define DEBUG_RPC_PROCESS_SIGACTION 0 157 158 #define DEBUG_RPC_THREAD_USER_CREATE 0 158 159 #define DEBUG_RPC_THREAD_KERNEL_CREATE 0 159 #define DEBUG_RPC_VFS_DENTRY_CREATE 0 160 #define DEBUG_RPC_VFS_DENTRY_DESTROY 0 161 #define DEBUG_RPC_VFS_DEVICE_GET_DENTRY 0 162 #define DEBUG_RPC_VFS_FILE_CREATE 0 163 #define DEBUG_RPC_VFS_FILE_DESTROY 0 164 #define DEBUG_RPC_VFS_FS_NEW_DENTRY 0 165 #define DEBUG_RPC_VFS_FS_ADD_DENTRY 0 166 #define DEBUG_RPC_VFS_INODE_CREATE 0 167 #define DEBUG_RPC_VFS_INODE_DESTROY 0 160 #define DEBUG_RPC_USER_DIR_CREATE 0 161 #define DEBUG_RPC_USER_DIR_DESTROY 0 168 162 #define DEBUG_RPC_VMM_CREATE_VSEG 0 169 #define DEBUG_RPC_VMM_ GET_PTE0170 #define DEBUG_RPC_VMM_ GET_VSEG0171 #define DEBUG_RPC_VMM_ DELETE_VSEG0163 #define DEBUG_RPC_VMM_RESIZE_VSEG 0 164 #define DEBUG_RPC_VMM_REMOVE_VSEG 0 165 #define DEBUG_RPC_VMM_SET_COW 0 172 166 173 167 #define DEBUG_RWLOCK_TYPE 0 // lock type 0 is undefined => no debug … … 246 240 #define DEBUG_VFS_FILE_CREATE 0 247 241 #define DEBUG_VFS_GET_PATH 0 248 #define DEBUG_VFS_INODE_CREATE 0 242 #define DEBUG_VFS_INODE_CREATE 0 249 243 #define DEBUG_VFS_INODE_LOAD_ALL 0 250 244 #define DEBUG_VFS_KERNEL_MOVE 0 … … 290 284 #define LOCK_VMM_STACK 3 // local (B) protect VMM stack vseg allocator 291 285 #define LOCK_VMM_MMAP 4 // local (B) protect VMM mmap vseg allocator 292 #define LOCK_ VFS_CTX 5 // local (B) protect vfs contextstate293 #define LOCK_K CM_STATE 6 // local (B) protect KCM allocator state294 #define LOCK_ KHM_STATE 7 // local (B) protect KHM allocatorstate295 #define LOCK_HTAB_STATE 8 // local (B) protect a local htab state 296 286 #define LOCK_KCM_STATE 5 // local (B) protect KCM allocator state 287 #define LOCK_KHM_STATE 6 // local (B) protect KHM allocator state 288 #define LOCK_HTAB_STATE 7 // local (B) protect a local htab state 289 290 #define LOCK_VFS_CTX 8 // remote (B) protect vfs context state 297 291 #define LOCK_PPM_FREE 9 // remote (B) protect PPM allocator free_pages lists 298 292 #define LOCK_THREAD_JOIN 10 // remote (B) protect join/exit between two threads 299 #define LOCK_XHTAB_STATE 11 // remote (B) protect 293 #define LOCK_XHTAB_STATE 11 // remote (B) protect a distributed xhtab state 300 294 #define LOCK_CHDEV_QUEUE 12 // remote (B) protect chdev threads waiting queue 301 295 #define LOCK_CHDEV_TXT0 13 // remote (B) protect access to kernel terminal TXT0 … … 325 319 #define LOCK_VFS_FILE 33 // remote (RW) protect file descriptor state 326 320 #define LOCK_VFS_MAIN 34 // remote (RW) protect vfs traversal (in root inode) 327 #define LOCK_FATFS_FAT 35 // remote (RW) protect exclusive access to the FATFS FAT 321 #define LOCK_FATFS_FAT 35 // remote (RW) protect exclusive access to the VFS FAT 322 #define LOCK_FBF_WINDOWS 36 // remote (RW) protect FBF windows set 328 323 329 324 //////////////////////////////////////////////////////////////////////////////////////////// … … 383 378 //////////////////////////////////////////////////////////////////////////////////////////// 384 379 380 #define CONFIG_VFS_ROOT_CXY 0 // VFS_ROOT and FAT mapper cluster 381 385 382 #define CONFIG_VFS_MAX_INODES 128 // max number of inodes per cluster 386 383 #define CONFIG_VFS_MAX_NAME_LENGTH 56 // dentry name max length (bytes) … … 397 394 398 395 //////////////////////////////////////////////////////////////////////////////////////////// 396 // FBF WINDOWS 397 //////////////////////////////////////////////////////////////////////////////////////////// 398 399 #define CONFIG_FBF_WINDOWS_MAX_NR 64 // max number of windows 400 #define CONFIG_FBF_WINDOWS_MAX_WIDTH 1024 // max number of pixels in FBF line 401 #define CONFIG_FBF_WINDOWS_MAX_HEIGHT 1024 // max number of lines in FBF 402 403 //////////////////////////////////////////////////////////////////////////////////////////// 399 404 // DQDT 400 405 //////////////////////////////////////////////////////////////////////////////////////////// 401 406 402 407 #define CONFIG_DQDT_LEVELS_NR 5 403 #define CONFIG_DQDT_TICKS_PER_QUANTUM 1 // number of ticks between updates404 408 405 409 //////////////////////////////////////////////////////////////////////////////////////////// … … 414 418 //////////////////////////////////////////////////////////////////////////////////////////// 415 419 416 #define CONFIG_SCHED_TICK_MS_PERIOD 10000 // number of milliseconds per period417 #define CONFIG_SCHED_TICKS_PER_QUANTUM 1 // number of ticks between scheduling418 #define CONFIG_SCHED_MAX_THREADS_NR 32 // max number of threads per core419 #define CONFIG_SCHED_IDLE_MODE_SLEEP 0 // idle thread use sleep mode if non 0420 #define CONFIG_SCHED_TICK_MS_PERIOD 10000 // number of milliseconds per period 421 #define CONFIG_SCHED_TICKS_PER_QUANTUM 1 // number of ticks between scheduling 422 #define CONFIG_SCHED_MAX_THREADS_NR 32 // max number of threads per core 423 #define CONFIG_SCHED_IDLE_MODE_SLEEP 0 // idle thread use sleep mode if non 0 420 424 421 425 //////////////////////////////////////////////////////////////////////////////////////////// … … 423 427 //////////////////////////////////////////////////////////////////////////////////////////// 424 428 425 #define CONFIG_THREADS_MAX_PER_CLUSTER 32 // max threads per cluster per process426 #define CONFIG_THREAD_DESC_SIZE 0x4000 // thread desc size (with kernel stack)427 #define CONFIG_THREAD_DESC_ORDER 2 // ln( number of 4K pages )429 #define CONFIG_THREADS_MAX_PER_CLUSTER 32 // max threads per cluster per process 430 #define CONFIG_THREAD_DESC_SIZE 0x4000 // thread desc size (with kernel stack) 431 #define CONFIG_THREAD_DESC_ORDER 2 // ln( number of 4K pages ) 428 432 429 433 //////////////////////////////////////////////////////////////////////////////////////////// … … 433 437 #define CONFIG_REMOTE_FIFO_SLOTS 16 434 438 #define CONFIG_REMOTE_FIFO_MAX_ITERATIONS 1024 435 #define CONFIG_RPC_THREADS_MAX 4 // max number of RPC threads per core439 #define CONFIG_RPC_THREADS_MAX 4 // max number of RPC threads per core 436 440 437 441 //////////////////////////////////////////////////////////////////////////////////////////// … … 439 443 //////////////////////////////////////////////////////////////////////////////////////////// 440 444 441 #define CONFIG_VMM_VSPACE_SIZE 0x100000 // virtual space : 4 Gbytes442 443 #define CONFIG_VMM_UTILS_BASE 0x000200 // UTILS zone base : 2 Mbytes444 #define CONFIG_VMM_ELF_BASE 0x000400 // ELF zone base : 4 Mbytes445 #define CONFIG_VMM_HEAP_BASE 0x040000 // HEAP zone base : 32 Mbytes446 #define CONFIG_VMM_STACK_BASE 0x0C0000 // STACK zone base : 3 Gbytes447 448 #define CONFIG_VMM_ARGS_SIZE 0x000004 // args vseg size : 16 Kbytes449 #define CONFIG_VMM_ENVS_SIZE 0x000008 // envs vseg size : 32 Kbytes450 #define CONFIG_VMM_STACK_SIZE 0x001000 // single stack vseg size : 16 Mbytes445 #define CONFIG_VMM_VSPACE_SIZE 0x100000 // virtual space : 4 Gbytes 446 447 #define CONFIG_VMM_UTILS_BASE 0x000200 // UTILS zone base : 2 Mbytes 448 #define CONFIG_VMM_ELF_BASE 0x000400 // ELF zone base : 4 Mbytes 449 #define CONFIG_VMM_HEAP_BASE 0x040000 // HEAP zone base : 32 Mbytes 450 #define CONFIG_VMM_STACK_BASE 0x0C0000 // STACK zone base : 3 Gbytes 451 452 #define CONFIG_VMM_ARGS_SIZE 0x000004 // args vseg size : 16 Kbytes 453 #define CONFIG_VMM_ENVS_SIZE 0x000008 // envs vseg size : 32 Kbytes 454 #define CONFIG_VMM_STACK_SIZE 0x001000 // single stack vseg size : 16 Mbytes 451 455 452 456 #define CONFIG_VMM_HEAP_MAX_ORDER 18 // max size of MMAP vseg : 1 Gbytes … … 455 459 //////////////////////////////////////////////////////////////////////////////////////////// 456 460 457 #define CONFIG_PPM_PAGE_SIZE 4096 // physical page size (bytes)458 #define CONFIG_PPM_PAGE_SHIFT 12 // physical page shift (bits)459 #define CONFIG_PPM_PAGE_MASK 0x00000FFF // physical page mask460 #define CONFIG_PPM_MAX_ORDER 16 // ln(total number of pages per cluster)461 #define CONFIG_PPM_HEAP_ORDER 10 // ln(number of heap pages per cluster)462 #define CONFIG_PPM_MAX_RSVD 32 // max reserved zones on the machine461 #define CONFIG_PPM_PAGE_SIZE 4096 // physical page size (bytes) 462 #define CONFIG_PPM_PAGE_SHIFT 12 // physical page shift (bits) 463 #define CONFIG_PPM_PAGE_MASK 0x00000FFF // physical page mask 464 #define CONFIG_PPM_MAX_ORDER 16 // ln(total number of pages per cluster) 465 #define CONFIG_PPM_HEAP_ORDER 10 // ln(number of heap pages per cluster) 466 #define CONFIG_PPM_MAX_RSVD 32 // max reserved zones on the machine 463 467 464 468 #define CONFIG_PPM_PAGE_ALIGNED __attribute__((aligned(CONFIG_PPM_PAGE_SIZE))) … … 470 474 #define CONFIG_INSTRUMENTATION_SYSCALLS 0 471 475 #define CONFIG_INSTRUMENTATION_PGFAULTS 0 472 #define CONFIG_INSTRUMENTATION_FOOTPRINT 0 476 #define CONFIG_INSTRUMENTATION_FOOTPRINT 1 477 #define CONFIG_INSTRUMENTATION_CHDEVS 0 473 478 #define CONFIG_INSTRUMENTATION_GPT 0 474 479 -
trunk/kernel/libk/bits.c
r635 r657 1 1 /* 2 * bits.c - bit s manipulation functionsimplementation2 * bits.c - bitmap API implementation 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 26 26 #include <bits.h> 27 27 28 ////////////////////////////////////////////////////////////////////////////// 29 ////////////// local access functions /////////////////////////////// 30 ////////////////////////////////////////////////////////////////////////////// 31 28 32 //////////////////////////////////// 29 33 void bitmap_init( bitmap_t * bitmap, … … 42 46 uint32_t index ) 43 47 { 44 uint32_t word = index / 32;45 uint32_t bit = index % 32;48 uint32_t word = index >> 5; 49 uint32_t bit = index & 0x1F; 46 50 47 51 bitmap[word] |= ( 1 << bit ); … … 52 56 uint32_t index ) 53 57 { 54 uint32_t word = index / 32;55 uint32_t bit = index % 32;58 uint32_t word = index >> 5; 59 uint32_t bit = index & 0x1F; 56 60 57 61 bitmap[word] &= ~( 1 << bit ); … … 62 66 uint32_t index ) 63 67 { 64 uint32_t word = index / 32;65 uint32_t bit = index % 32;68 uint32_t word = index >> 5; 69 uint32_t bit = index & 0x1F; 66 70 67 71 return (bitmap[word] & ( 1 << bit )) != 0; 68 72 } 73 74 ///////////////////////////////////////// 75 uint32_t bitmap_alloc( bitmap_t * bitmap, 76 uint32_t size ) 77 { 78 uint32_t max_word; 79 uint32_t max_bit; 80 uint32_t word; 81 uint32_t bit; 82 83 if( size ) 84 { 85 max_word = ( (size-1) >>5 ) + 1; 86 87 for( word = 0 ; word < max_word ; word++ ) 88 { 89 max_bit = (word == (max_word - 1)) ? (size & 0x1F) : 32; 90 91 if(bitmap[word] != 0XFFFFFFFF) 92 { 93 for(bit = 0 ; bit < max_bit ; bit++) 94 { 95 if( (bitmap[word] & (1 << bit)) == 0 ) 96 { 97 bitmap[word] |= (1 << bit); 98 return (word*32 + bit); 99 } 100 } 101 } 102 } 103 } 104 105 return -1; 106 107 } // end bitmap_alloc() 69 108 70 109 ////////////////////////////////////////// … … 207 246 { 208 247 uint32_t max_word; 248 uint32_t max_bit; 209 249 uint32_t word; 210 250 uint32_t bit; … … 216 256 for( word = 0 ; word < max_word ; word++ ) 217 257 { 258 max_bit = (word == (max_word - 1)) ? (size & 0x1F) : 32; 259 218 260 if(bitmap[word] != 0) 219 261 { 220 for(bit = 0 ; bit < 32; bit++)262 for(bit = 0 ; bit < max_bit ; bit++) 221 263 { 222 264 if( bitmap[word] & (1 << bit) ) return (word*32 + bit); … … 235 277 { 236 278 uint32_t max_word; 279 uint32_t max_bit; 237 280 uint32_t word; 238 281 uint32_t bit; … … 244 287 for( word = 0 ; word < max_word ; word++ ) 245 288 { 289 max_bit = (word == (max_word - 1)) ? (size & 0x1F) : 32; 290 246 291 if(bitmap[word] != 0XFFFFFFFF) 247 292 { 248 for(bit = 0 ; bit < 32; bit++)293 for(bit = 0 ; bit < max_bit ; bit++) 249 294 { 250 295 if( (bitmap[word] & (1 << bit)) == 0 ) return (word*32 + bit); … … 258 303 } // bitmap_ffc() 259 304 305 306 ////////////////////////////////////////////////////////////////////////////// 307 ////////////// remote access functions /////////////////////////////// 308 ////////////////////////////////////////////////////////////////////////////// 309 310 //////////////////////////////////////////// 311 void bitmap_remote_init( xptr_t bitmap_xp, 312 uint32_t len ) 313 { 314 bitmap_t * bitmap_ptr = GET_PTR( bitmap_xp ); 315 cxy_t bitmap_cxy = GET_CXY( bitmap_xp ); 316 317 uint32_t word; 318 uint32_t nwords = BITMAP_SIZE( len ); 319 320 for( word = 0 ; word < nwords ; word++ ) 321 { 322 hal_remote_s32( XPTR( bitmap_cxy , &bitmap_ptr[word] ) , 0 ); 323 } 324 } 325 326 //////////////////////////////////////////////////// 327 inline void bitmap_remote_set( xptr_t bitmap_xp, 328 uint32_t index ) 329 { 330 bitmap_t * bitmap_ptr = GET_PTR( bitmap_xp ); 331 cxy_t bitmap_cxy = GET_CXY( bitmap_xp ); 332 333 uint32_t word = index / 32; 334 uint32_t bit = index % 32; 335 336 hal_remote_atomic_or( XPTR( bitmap_cxy , &bitmap_ptr[word] ) , (1 <<bit) ); 337 } 338 339 ////////////////////////////////////////////////////// 340 inline void bitmap_remote_clear( xptr_t bitmap_xp, 341 uint32_t index ) 342 { 343 bitmap_t * bitmap_ptr = GET_PTR( bitmap_xp ); 344 cxy_t bitmap_cxy = GET_CXY( bitmap_xp ); 345 346 uint32_t word = index / 32; 347 uint32_t bit = index % 32; 348 349 hal_remote_atomic_and( XPTR( bitmap_cxy , &bitmap_ptr[word] ) , ~(1 <<bit) ); 350 } 351 352 /////////////////////////////////////////////////// 353 uint32_t bitmap_remote_alloc( xptr_t bitmap_xp, 354 uint32_t size ) 355 { 356 uint32_t max_word; 357 uint32_t max_bit; 358 uint32_t word; 359 uint32_t bit; 360 xptr_t word_xp; 361 uint32_t value; 362 363 bitmap_t * bitmap_ptr = GET_PTR( bitmap_xp ); 364 cxy_t bitmap_cxy = GET_CXY( bitmap_xp ); 365 366 if( size ) 367 { 368 max_word = ( (size-1) >>5 ) + 1; 369 370 for( word = 0 ; word < max_word ; word++ ) 371 { 372 max_bit = (word == (max_word - 1)) ? (size & 0x1F) : 32; 373 374 word_xp = XPTR( bitmap_cxy , &bitmap_ptr[word] ); 375 376 value = hal_remote_l32( word_xp ); 377 378 if( value != 0XFFFFFFFF ) 379 { 380 for(bit = 0 ; bit < max_bit ; bit++) 381 { 382 if( (value & (1 << bit)) == 0 ) 383 { 384 hal_remote_s32( word_xp , value | (1 << bit) ); 385 return (word*32 + bit); 386 } 387 } 388 } 389 } 390 } 391 392 return -1; 393 394 } // end bitmap_alloc() 395 396 /////////////////////////////////////////////// 397 uint32_t bitmap_remote_ffc( xptr_t bitmap_xp, 398 uint32_t size ) 399 { 400 uint32_t max_word; 401 uint32_t max_bit; 402 uint32_t word; 403 uint32_t bit; 404 uint32_t value; 405 406 bitmap_t * bitmap_ptr = GET_PTR( bitmap_xp ); 407 cxy_t bitmap_cxy = GET_CXY( bitmap_xp ); 408 409 if( size ) 410 { 411 max_word = ( (size-1) >>5 ) + 1; 412 413 for( word = 0 ; word < max_word ; word++ ) 414 { 415 max_bit = (word == (max_word - 1)) ? (size & 0x1F) : 32; 416 417 value = hal_remote_l32( XPTR( bitmap_cxy , &bitmap_ptr[word] ) ); 418 419 if( value != 0xFFFFFFFF ) 420 { 421 for(bit = 0 ; bit < max_bit ; bit++) 422 { 423 if( (value & (1 << bit)) == 0 ) return (word*32 + bit); 424 } 425 } 426 } 427 } 428 429 return -1; 430 431 } // bitmap_remote_ffc() 432 433 -
trunk/kernel/libk/bits.h
r635 r657 1 1 /* 2 * bits.h - bit s manipulation helper functions2 * bits.h - bitmap API definition 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 28 28 #include <kernel_config.h> 29 29 #include <hal_kernel_types.h> 30 31 /********************************************************************************************* 32 * These macros are NOT used by the bitmap, but can be useful in other contexts... [AG] 33 *********************************************************************************************/ 34 35 #define ARROUND_UP(val, size) (((val) & ((size) -1)) ? ((val) & ~((size)-1)) + (size) : (val)) 36 #define ARROUND_DOWN(val, size) ((val) & ~((size) - 1)) 37 38 #define ABS(x) (((x) < 0) ? -(x) : (x)) 39 #define MIN(x,y) (((x) < (y)) ? (x) : (y)) 40 #define MAX(x,y) (((x) < (y)) ? (y) : (x)) 30 #include <hal_remote.h> 31 32 /********************************************************************************************** 33 * This file defines the API to access a generic bitmap, that can be local or remote. 34 * It is implemented as an array of uint32_t words. 35 * The number of entries in this array is statically defined at compile time 36 * and defines the max number of items that can be registered in the bitmap. 37 * The remote accesses are used in the VFS by the inum allocator. 38 *********************************************************************************************/ 39 40 typedef uint32_t bitmap_t; 41 42 /********************************************************************************************** 43 * This macro returns the number of 32 bits words required to register <size> entries. 44 *********************************************************************************************/ 45 46 #define BITMAP_SIZE(size) ( ((size) & 31) ? (((size)>>5) + 1) : ((size)>>5) ) 41 47 42 48 /********************************************************************************************** … … 44 50 * It returns 0xFFFFFFFF if data is larger than 0x80000000. 45 51 *********************************************************************************************/ 52 46 53 #define POW2_ROUNDUP(data) ( (data <= 0x00000001) ? 0x00000001 : \ 47 54 (data <= 0x00000002) ? 0x00000002 : \ … … 77 84 (data <= 0x80000000) ? 0x80000000 : 0xFFFFFFFF ) 78 85 79 /********************************************************************************************** 80 * This macro returns the number of 32 bits words required to register <size> entries. 81 *********************************************************************************************/ 82 83 #define BITMAP_SIZE(size) ( ((size) & 31) ? (((size)>>5) + 1) : ((size)>>5) ) 84 85 typedef uint32_t bitmap_t; 86 87 /********************************************************************************************* 88 * This function reset all bits in a bitmap. (array ot 32 bits words). 86 /********************************************************************************************* 87 * These macros are NOT used by the bitmap, but are useful in other contexts... [AG] 88 *********************************************************************************************/ 89 90 #define ARROUND_UP(val, size) (((val) & ((size) -1)) ? ((val) & ~((size)-1)) + (size) : (val)) 91 #define ARROUND_DOWN(val, size) ((val) & ~((size) - 1)) 92 93 #define ABS(x) (((x) < 0) ? -(x) : (x)) 94 #define MIN(x,y) (((x) < (y)) ? (x) : (y)) 95 #define MAX(x,y) (((x) < (y)) ? (y) : (x)) 96 97 /********************************************************************************************* 98 * This function reset all bits in a local or remote bitmap. 89 99 ********************************************************************************************* 90 100 * @ bitmap : pointer on first word in the bitmap. 91 * @ len : number of bits to reset. 92 ********************************************************************************************/ 93 void bitmap_init( bitmap_t * bitmap, 94 uint32_t len ); 95 96 /********************************************************************************************* 97 * This function set a specific bit in a bitmap. 101 * @ size : number of bits in bitmap. 102 ********************************************************************************************/ 103 extern void bitmap_init( bitmap_t * bitmap, 104 uint32_t size ); 105 106 extern void bitmap_remote_init( xptr_t bitmap_xp, 107 uint32_t size ); 108 109 /********************************************************************************************* 110 * These functions set a specific bit in a local or remote bitmap. 98 111 ********************************************************************************************* 99 112 * @ bitmap : pointer on the bitmap … … 103 116 uint32_t index ); 104 117 105 /********************************************************************************************* 106 * This function clear a specific bit in a bitmap. 118 extern inline void bitmap_remote_set( xptr_t bitmap_xp, 119 uint32_t index ); 120 121 /********************************************************************************************* 122 * These functions clear a specific bit in a local or remote bitmap. 107 123 ********************************************************************************************* 108 124 * @ bitmap : pointer on the bitmap … … 112 128 uint32_t index ); 113 129 114 /********************************************************************************************* 115 * This function returns a specific bit in a bitmap. 130 extern inline void bitmap_remote_clear( xptr_t bitmap_xp, 131 uint32_t index ); 132 133 /********************************************************************************************* 134 * These functions search the first bit non-set in a local or remote bitmap, in the 135 * range [0 , size-1], set this bit, and return the index of the found bit. 136 * The lock protecting the bitmap must be taken by the caller. 137 ********************************************************************************************* 138 * @ bitmap : pointer on the bitmap. 139 * @ size : number of bits to scan. 140 * @ returns index of found bit / returns 0xFFFFFFFF if not found. 141 ********************************************************************************************/ 142 extern uint32_t bitmap_alloc( bitmap_t * bitmap, 143 uint32_t size ); 144 145 extern uint32_t bitmap_remote_alloc( xptr_t bitmap_xp, 146 uint32_t size ); 147 148 /********************************************************************************************* 149 * This function returns the index of aa specific bit in a bitmap. 116 150 ********************************************************************************************* 117 151 * @ bitmap : pointer on the bitmap … … 179 213 180 214 /********************************************************************************************* 181 * This function returns the index of first bit cleared in a bitmap, starting from bit 0. 215 * These functions return the index of first bit cleared in a local or remote bitmap, 216 * starting from bit 0. 182 217 ********************************************************************************************* 183 218 * @ bitmap : pointer on the bitmap … … 187 222 extern uint32_t bitmap_ffc( bitmap_t * bitmap, 188 223 uint32_t size ); 224 225 extern uint32_t bitmap_remote_ffc( xptr_t bitmap_xp, 226 uint32_t size ); 189 227 190 228 /********************************************************************************************* -
trunk/kernel/libk/elf.c
r651 r657 224 224 error_t error; 225 225 226 // get file name for error reporting and debug226 // get file cluster and local pointer 227 227 cxy_t file_cxy = GET_CXY( file_xp ); 228 228 vfs_file_t * file_ptr = GET_PTR( file_xp ); 229 230 // get file name for error reporting and debug 229 231 vfs_inode_t * inode = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 230 232 vfs_inode_get_name( XPTR( file_cxy , inode ) , name ); -
trunk/kernel/libk/grdxt.c
r656 r657 332 332 //////////////////////////////////////////////////////////////////////////////////////// 333 333 334 //////////////////////////////////////////// 335 error_t grdxt_remote_init( xptr_t rt_xp, 336 uint32_t ix1_width, 337 uint32_t ix2_width, 338 uint32_t ix3_width ) 339 { 340 void ** root; 341 kmem_req_t req; 342 343 // get cluster and local pointer 344 cxy_t rt_cxy = GET_CXY( rt_xp ); 345 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 346 347 // initialize widths 348 hal_remote_s32( XPTR( rt_cxy , &rt_ptr->ix1_width ) , ix1_width ); 349 hal_remote_s32( XPTR( rt_cxy , &rt_ptr->ix2_width ) , ix2_width ); 350 hal_remote_s32( XPTR( rt_cxy , &rt_ptr->ix3_width ) , ix3_width ); 351 352 // allocates first level array 353 req.type = KMEM_KCM; 354 req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 355 req.flags = AF_KERNEL | AF_ZERO; 356 root = kmem_remote_alloc( rt_cxy , &req ); 357 358 if( root == NULL ) 359 { 360 printk("\n[ERROR] in %s : cannot allocate first level array\n", __FUNCTION__); 361 return -1; 362 } 363 364 // register first level array in rt descriptor 365 hal_remote_spt( XPTR( rt_cxy , &rt_ptr->root ) , root ); 366 367 return 0; 368 369 } // end grdxt_remote_init() 370 371 ////////////////////////////////////////// 372 void grdxt_remote_destroy( xptr_t rt_xp ) 373 { 374 kmem_req_t req; 375 376 uint32_t w1; 377 uint32_t w2; 378 uint32_t w3; 379 380 uint32_t ix1; 381 uint32_t ix2; 382 uint32_t ix3; 383 384 void ** ptr1; 385 void ** ptr2; 386 void ** ptr3; 387 388 // get cluster and local pointer 389 cxy_t rt_cxy = GET_CXY( rt_xp ); 390 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 391 392 // get widths 393 w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 394 w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 395 w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 396 397 // get ptr1 398 ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 399 400 for( ix1=0 ; ix1 < (uint32_t)(1 << w1) ; ix1++ ) 401 { 402 // get ptr2 403 ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 404 405 if( ptr2 == NULL ) continue; 406 407 for( ix2=0 ; ix2 < (uint32_t)(1 << w2) ; ix2++ ) 408 { 409 // get ptr2 410 ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 411 412 if( ptr3 == NULL ) continue; 413 414 for( ix3=0 ; ix3 < (uint32_t)(1 << w3) ; ix3++ ) 415 { 416 if( ptr3[ix3] != NULL ) 417 { 418 printk("\n[WARNING] in %s : ptr3[%d][%d][%d] non empty\n", 419 __FUNCTION__, ix1, ix2, ix3 ); 420 } 421 } 422 423 // release level 3 array 424 req.type = KMEM_KCM; 425 req.ptr = ptr3; 426 kmem_remote_free( rt_cxy , &req ); 427 } 428 429 // release level 2 array 430 req.type = KMEM_KCM; 431 req.ptr = ptr2; 432 kmem_remote_free( rt_cxy , &req ); 433 } 434 435 // release level 1 array 436 req.type = KMEM_KCM; 437 req.ptr = ptr1; 438 kmem_remote_free( rt_cxy , &req ); 439 440 } // end grdxt_remote_destroy() 441 334 442 ////////////////////////////////////////////// 335 443 error_t grdxt_remote_insert( xptr_t rt_xp, … … 464 572 465 573 //////////////////////////////////////////// 466 void *grdxt_remote_remove( xptr_t rt_xp,574 xptr_t grdxt_remote_remove( xptr_t rt_xp, 467 575 uint32_t key ) 468 576 { … … 489 597 // get ptr2 490 598 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 491 if( ptr2 == NULL ) return NULL;599 if( ptr2 == NULL ) return XPTR_NULL; 492 600 493 601 // get ptr3 494 602 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 495 if( ptr3 == NULL ) return NULL;603 if( ptr3 == NULL ) return XPTR_NULL; 496 604 497 605 // get value … … 502 610 hal_fence(); 503 611 504 return value;612 return XPTR( rt_cxy , value ); 505 613 506 614 } // end grdxt_remote_remove() … … 523 631 524 632 // compute indexes 525 uint32_t 526 uint32_t 527 uint32_t 633 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array 634 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array 635 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 528 636 529 637 // get ptr1 … … 546 654 547 655 } // end grdxt_remote_lookup() 656 657 //////////////////////////////////////////////// 658 xptr_t grdxt_remote_get_first( xptr_t rt_xp, 659 uint32_t start_key, 660 uint32_t * found_key ) 661 { 662 uint32_t ix1; 663 uint32_t ix2; 664 uint32_t ix3; 665 666 void ** ptr1; // local base address of remote first level array 667 void ** ptr2; // local base address of remote second level array 668 void ** ptr3; // local base address of remote third level array 669 670 // get cluster and local pointer on remote rt descriptor 671 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 672 cxy_t rt_cxy = GET_CXY( rt_xp ); 673 674 // get widths 675 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 676 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 677 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 678 679 // Check key value 680 assert( ((start_key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", start_key ); 681 682 // compute min indexes 683 uint32_t min1 = start_key >> (w2 + w3); 684 uint32_t min2 = (start_key >> w3) & ((1 << w2) -1); 685 uint32_t min3 = start_key & ((1 << w3) - 1); 686 687 // compute max indexes 688 uint32_t max1 = 1 << w1; 689 uint32_t max2 = 1 << w2; 690 uint32_t max3 = 1 << w3; 691 692 // get ptr1 693 ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 694 695 for( ix1 = min1 ; ix1 < max1 ; ix1++ ) 696 { 697 ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 698 if( ptr2 == NULL ) continue; 699 700 for( ix2 = min2 ; ix2 < max2 ; ix2++ ) 701 { 702 ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 703 if( ptr3 == NULL ) continue; 704 705 for( ix3 = min3 ; ix3 < max3 ; ix3++ ) 706 { 707 void * item = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) ); 708 709 if( item == NULL ) continue; 710 else 711 { 712 *found_key = (ix1 << (w2+w3)) | (ix2 << w3) | ix3; 713 return XPTR( rt_cxy , item ); 714 } 715 } 716 } 717 } 718 719 return XPTR_NULL; 720 721 } // end grdxt_remote_get_first() 548 722 549 723 /////////////////////////i///////////////// -
trunk/kernel/libk/grdxt.h
r656 r657 2 2 * grdxt.h - Three-levels Generic Radix-tree definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2019) 5 5 * 6 6 * Copyright UPMC Sorbonne Universites … … 124 124 /******************************************************************************************* 125 125 * This function scan all radix-tree entries in increasing key order, starting from 126 * the value defined by the <start_key> argument, and returna pointer on the first valid127 * registered item, and the found item key value.126 * the key defined by the <start_key> argument. It returns a pointer on the first valid 127 * registered item, and returns in the <found_key> buffer the found item key value. 128 128 * It must be called by a local thread. 129 129 ******************************************************************************************* … … 142 142 143 143 /******************************************************************************************* 144 * This function initialises the radix-tree descriptor, 145 * and allocates memory for the first level array of pointers. 146 * It can be called by any thread running in any cluster 147 ******************************************************************************************* 148 * @ rt_xp : extended pointer on the radix-tree descriptor. 149 * @ ix1_width : number of bits in ix1 field 150 * @ ix2_width : number of bits in ix2 field 151 * @ ix3_width : number of bits in ix3 field 152 * @ returns 0 if success / returns ENOMEM if no more memory. 153 ******************************************************************************************/ 154 error_t grdxt_remote_init( xptr_t rt_xp, 155 uint32_t ix1_width, 156 uint32_t ix2_width, 157 uint32_t ix3_width ); 158 159 /******************************************************************************************* 160 * This function releases all memory allocated to the radix-tree infrastructure. 161 * A warning message is printed on the kernel TXT0 if the radix tree is not empty. 162 * It can be called by any thread running in any cluster 163 ******************************************************************************************* 164 * @ rt_xp : extended pointer on the radix-tree descriptor. 165 ******************************************************************************************/ 166 void grdxt_remote_destroy( xptr_t rt_xp ); 167 168 /******************************************************************************************* 144 169 * This function insert a new item in a - possibly remote - radix tree. 145 170 * It dynamically allocates memory for new second and third level arrays if required. 171 * It can be called by any thread running in any cluster 146 172 ******************************************************************************************* 147 173 * @ rt_xp : extended pointer on the radix-tree descriptor. … … 157 183 * This function removes an item identified by its key from a - possibly remote - radix 158 184 * tree, and returns a local pointer on the removed item. No memory is released. 185 * It can be called by a thread running in any cluster 159 186 ******************************************************************************************* 160 187 * @ rt_xp : pointer on the radix-tree descriptor. 161 188 * @ key : key value. 162 * @ returns local pointer on removed item if success / returnsNULL if failure.163 ******************************************************************************************/ 164 void *grdxt_remote_remove( xptr_t rt_xp,189 * @ returns extended pointer on removed item if success / returns XPTR_NULL if failure. 190 ******************************************************************************************/ 191 xptr_t grdxt_remote_remove( xptr_t rt_xp, 165 192 uint32_t key ); 166 193 … … 169 196 * on the item identified by the <key> argument, from the radix tree identified by 170 197 * the <rt_xp> remote pointer. 198 * It can be called by a thread running in any cluster 171 199 ******************************************************************************************* 172 200 * @ rt_xp : extended pointer on the radix-tree descriptor. 173 201 * @ key : key value. 174 * @ returns an extended pointer on found item if success / returns XPTR_NULL if failure.202 * @ returns extended pointer on found item if success / returns XPTR_NULL if not found. 175 203 ******************************************************************************************/ 176 204 xptr_t grdxt_remote_lookup( xptr_t rt_xp, … … 178 206 179 207 /******************************************************************************************* 208 * This function scan all radix-tree entries of a - possibly remote - radix tree <rt_xp>, 209 * in increasing key order, starting from the key defined by the <start_key> argument. 210 * It returns an extended pointer on the first valid registered item, and returns in the 211 * <found_key> buffer the found item key value. 212 * It can be called by a thread running in any cluster 213 ******************************************************************************************* 214 * @ rt_xp : extended pointer on the radix-tree descriptor. 215 * @ start_key : key starting value for the scan. 216 * @ found_key : [out] buffer for found key value. 217 * @ return xptr on first valid item if found / return XPTR_NULL if no item found. 218 ******************************************************************************************/ 219 xptr_t grdxt_remote_get_first( xptr_t rt_xp, 220 uint32_t start_key, 221 uint32_t * found_key ); 222 223 /******************************************************************************************* 180 224 * This function displays the current content of a possibly remote radix_tree. 225 * It can be called by a thread running in any cluster 181 226 ******************************************************************************************* 182 227 * @ rt : extended pointer on the radix-tree descriptor. -
trunk/kernel/libk/remote_fifo.c
r563 r657 65 65 watchdog = 0; 66 66 67 // get write slot index with atomic increment 67 // get write slot index with atomic increment QQQQQQQQQQ 68 68 wr_id = hal_remote_atomic_add( XPTR( fifo_cxy , &fifo_ptr->wr_id ) , 1 ); 69 69 -
trunk/kernel/libk/remote_rwlock.h
r629 r657 1 1 /* 2 * remote_rwlock.h - kernel remote read/write lock definition.2 * remote_rwlock.h - kernel remote read/write lock definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,20 19)4 * Authors Alain Greiner (2016,2017,2018,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/libk/xhtab.c
r635 r657 53 53 54 54 return (name[0] % XHASHTAB_SIZE); 55 /*56 uint32_t index = 0;57 while( *name )58 {59 index = index + (*(name++) ^ index);60 }61 return index % XHASHTAB_SIZE;62 */63 64 55 } 65 56 … … 128 119 //////////////////////////////////////////////////////////////////////////////////////// 129 120 130 ////////////////////////////////////////// 131 void xhtab_init( x htab_t * xhtab,121 ///////////////////////////////////////////// 122 void xhtab_init( xptr_t xhtab_xp, 132 123 xhtab_item_type_t type ) 133 124 { 134 uint32_t i; 125 uint32_t i; 126 127 // get cluster and local pointer 128 xhtab_t * ptr = GET_PTR( xhtab_xp ); 129 cxy_t cxy = GET_CXY( xhtab_xp ); 135 130 136 131 // initialize lock 137 remote_busylock_init( XPTR( local_cxy , &xhtab->lock), LOCK_XHTAB_STATE ); 138 139 xhtab->items = 0; 140 xhtab->current_index = 0; 141 xhtab->current_xlist_xp = XPTR_NULL; 142 132 remote_busylock_init( XPTR( cxy , &ptr->lock), LOCK_XHTAB_STATE ); 133 134 // initialise various fiels 135 hal_remote_s32( XPTR( cxy , &ptr->items ) , 0 ); 136 hal_remote_s32( XPTR( cxy , &ptr->current_index ) , 0 ); 137 hal_remote_s64( XPTR( cxy , &ptr->current_xlist_xp ) , XPTR_NULL ); 138 139 // initialize functions pointers 143 140 if( type == XHTAB_DENTRY_TYPE ) 144 141 { 145 xhtab->item_match_key = &xhtab_dentry_item_match_key;146 xhtab->index_from_key = &xhtab_dentry_index_from_key;147 xhtab->item_from_xlist = &xhtab_dentry_item_from_xlist;148 xhtab->item_print_key = &xhtab_dentry_item_print_key;142 hal_remote_spt( XPTR( cxy , &ptr->item_match_key ) , &xhtab_dentry_item_match_key ); 143 hal_remote_spt( XPTR( cxy , &ptr->index_from_key ) , &xhtab_dentry_index_from_key ); 144 hal_remote_spt( XPTR( cxy , &ptr->item_from_xlist ) , &xhtab_dentry_item_from_xlist ); 145 hal_remote_spt( XPTR( cxy , &ptr->item_print_key ) , &xhtab_dentry_item_print_key ); 149 146 } 150 147 else … … 153 150 } 154 151 152 // initialize all lists 153 for( i=0 ; i < XHASHTAB_SIZE ; i++ ) 154 { 155 xlist_root_init( XPTR( cxy , &ptr->roots[i] ) ); 156 } 157 155 158 #if DEBUG_XHTAB 156 159 printk("\n[%s] for xhtab (%x,%x)\n" 157 " - index_from_key = %x (@ %x)\n" 158 " - item_match_key = %x (@ %x)\n" 159 " - item_from_xlist = %x (@ %x)\n", 160 __FUNCTION__, local_cxy, xhtab, 161 xhtab->index_from_key , &xhtab->index_from_key, 162 xhtab->item_match_key , &xhtab->item_match_key, 163 xhtab->item_from_xlist, &xhtab->item_from_xlist ); 164 #endif 165 166 for( i=0 ; i < XHASHTAB_SIZE ; i++ ) 167 { 168 xlist_root_init( XPTR( local_cxy , &xhtab->roots[i] ) ); 169 170 #if (DEBUG_XHTAB & 1) 171 printk("\n - initialize root[%d] / %x\n", i , &xhtab->roots[i] ); 172 #endif 173 174 } 160 " - index_from_key = %x\n" 161 " - item_match_key = %x\n" 162 " - item_from_xlist = %x\n", 163 __FUNCTION__, cxy, ptr, 164 hal_remote_lpt( XPTR( cxy , &ptr->index_from_key ) ), 165 hal_remote_lpt( XPTR( cxy , &ptr->item_match_key ) ), 166 hal_remote_lpt( XPTR( cxy , &ptr->item_from_xlist ) ) ); 167 #endif 175 168 176 169 } // end xhtab_init() … … 200 193 xhtab_ptr = GET_PTR( xhtab_xp ); 201 194 195 #if DEBUG_XHTAB 196 printk("\n[%s] enter : index = %d / key = %s\n", __FUNCTION__ , index , key ); 197 #endif 198 202 199 // get pointer on "item_from_xlist" function 203 200 item_from_xlist = (item_from_xlist_t *)hal_remote_lpt( XPTR( xhtab_cxy , … … 206 203 item_match_key = (item_match_key_t *)hal_remote_lpt( XPTR( xhtab_cxy , 207 204 &xhtab_ptr->item_match_key ) ); 208 209 205 // scan sub-list[index] 210 206 XLIST_FOREACH( XPTR( xhtab_cxy , &xhtab_ptr->roots[index] ) , xlist_xp ) … … 216 212 if( item_match_key( item_xp , key ) ) return item_xp; 217 213 } 214 215 #if DEBUG_XHTAB 216 printk("\n[%s] exit\n", __FUNCTION__ ); 217 #endif 218 218 219 219 220 // No matching item found … … 248 249 index_from_key = (index_from_key_t *)hal_remote_lpt( XPTR( xhtab_cxy , 249 250 &xhtab_ptr->index_from_key ) ); 250 #if DEBUG_XHTAB251 printk("\n[%s] remote = %x / direct = %x / @ = %x\n",252 __FUNCTION__, index_from_key, xhtab_ptr->index_from_key, &xhtab_ptr->index_from_key );253 #endif254 255 251 // compute index from key 256 252 index = index_from_key( key ); 257 258 #if DEBUG_XHTAB259 printk("\n[%s] index = %x\n", __FUNCTION__, index );260 #endif261 253 262 254 // take the lock protecting hash table … … 285 277 286 278 #if DEBUG_XHTAB 287 printk("\n[%s] success /<%s>\n", __FUNCTION__, key );279 printk("\n[%s] success for <%s>\n", __FUNCTION__, key ); 288 280 #endif 289 281 … … 362 354 363 355 #if DEBUG_XHTAB 364 printk("\n[%s] enter / %s\n", __FUNCTION__, key);356 printk("\n[%s] enter\n", __FUNCTION__ ); 365 357 #endif 366 358 … … 368 360 remote_busylock_acquire( XPTR( xhtab_cxy , &xhtab_ptr->lock ) ); 369 361 370 #if DEBUG_XHTAB371 printk("\n[%s] after lock acquire / %s\n", __FUNCTION__, key );372 #endif373 374 362 // scan sub-list 375 363 item_xp = xhtab_scan( xhtab_xp , index , key ); 376 364 377 #if DEBUG_XHTAB378 printk("\n[%s] after xhtab scan / %s\n", __FUNCTION__, key );379 #endif380 381 365 // release the lock protecting hash table 382 366 remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ) ); 383 367 384 368 #if DEBUG_XHTAB 385 printk("\n[%s] after lock release / %s\n", __FUNCTION__, key);369 printk("\n[%s] exit\n", __FUNCTION__ ); 386 370 #endif 387 371 -
trunk/kernel/libk/xhtab.h
r635 r657 105 105 /****************************************************************************************** 106 106 * This function initializes an empty hash table (zero registered item). 107 * The initialisation must be done by a thread running in cluster containing the table.108 107 ****************************************************************************************** 109 * @ xhtab : local pointer on local xhtab to be initialized.110 * @ type : item type (see above).108 * @ xhtab_xp : extended pointer on xhtab. 109 * @ type : item type (see above). 111 110 *****************************************************************************************/ 112 void xhtab_init( x htab_t * xhtab,111 void xhtab_init( xptr_t xhtab_xp, 113 112 xhtab_item_type_t type ); 114 113 -
trunk/kernel/libk/xlist.h
r656 r657 2 2 * xlist.h - Trans-cluster double circular linked list, using extended pointers. 3 3 * 4 * Author : Alain Greiner (2016,2017,2018,2019 )4 * Author : Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 50 50 /*************************************************************************** 51 51 * This macro returns the offset (in bytes) of a field in a structure. 52 *************************************************************************** 52 53 * @ type : structure type 53 54 * @ member : name of the field … … 61 62 * This macro returns an extended pointer on the structure containing an 62 63 * embedded xlist_entry_t field. 64 *************************************************************************** 63 65 * @ xlist_xp : extended pointer on the xlist_entry_t field 64 66 * @ type : type of the structure containing the xlist_entry_t … … 74 76 * the root xlist_entry_t. 75 77 * WARNING : check list non empty before using this macro. 78 *************************************************************************** 76 79 * @ root_xp : extended pointer on the root xlist_entry_t 77 80 * @ type : type of the linked elements … … 88 91 * the root xlist_entry_t. 89 92 * WARNING : check list non empty before using this macro. 93 *************************************************************************** 90 94 * @ root_xp : extended pointer on the root xlist_entry_t 91 95 * @ type : type of the linked elements … … 100 104 * This macro traverses an extended double linked list in forward order. 101 105 * WARNING : the iter variable should NOT be deleted during traversal. 106 *************************************************************************** 102 107 * @ root_xp : extended pointer on the root xlist_entry_t 103 108 * @ iter_xp : current extended pointer on a xlist_entry_t … … 112 117 * This macro traverses an extended double linked list in backward order. 113 118 * WARNING : the iter variable should NOT be deleted during traversal. 119 *************************************************************************** 114 120 * @ root_xp : extended pointer on the root xlist_entry_t 115 121 * @ iter_xp : current extended pointer on a xlist_entry_t … … 124 130 * This function returns an extended pointer on the next xlist_entry_t, 125 131 * from an extended pointer on a reference xlist_entry_t. 132 *************************************************************************** 126 133 * @ root : extended pointer on the root xlist_entry_t 127 134 * @ ref : extended pointer on the reference xlist_entry_t … … 144 151 /*************************************************************************** 145 152 * This function returns an extended pointer on the previous xlist_entry_t. 153 *************************************************************************** 146 154 * @ root : extended pointer on the root xlist_entry_t 147 155 * @ ref : extended pointer on the reference xlist_entry_t … … 165 173 * This function initialises the root of an extended double linked list. 166 174 * The root can be located in any cluster. 175 *************************************************************************** 167 176 * @ root_xp : extended pointer on the root xlist_entry_t 168 177 xixi **************************************************************************/ … … 176 185 * This function initialises an entry of an extended double linked list. 177 186 * The entry can be located in any cluster. 187 *************************************************************************** 178 188 * @ entry_xp : extended pointer on the xlist_entry_t 179 189 **************************************************************************/ … … 188 198 * double linked list. Four extended pointers must be modified. 189 199 * The lock protecting the list should have been previously taken. 200 *************************************************************************** 190 201 * @ root_xp : extended pointer on the root xlist_entry_t 191 202 * @ entry_xp : extended pointer on the xlist_entry_t to be inserted … … 214 225 * double linked list. Four extended pointers must be modified. 215 226 * The lock protecting the list should have been previously taken. 227 *************************************************************************** 216 228 * @ root_xp : extended pointer on the root xlist_entry_t 217 229 * @ entry_xp : extended pointer on the xlist_entry_t to be inserted … … 239 251 /*************************************************************************** 240 252 * This function returns true if the list is empty. 253 *************************************************************************** 241 254 * @ root_xp : extended pointer on the root xlist_entry_t. 242 255 **************************************************************************/ … … 253 266 * Two extended pointers must be modified. 254 267 * The memory allocated to the removed entry is not released. 268 *************************************************************************** 255 269 * @ xp : extended pointer on the xlist_entry_t to be removed. 256 270 **************************************************************************/ … … 277 291 * Four extended pointers must be modified. 278 292 * The memory allocated to the removed entry is not released. 293 *************************************************************************** 279 294 * @ old : extended pointer on the xlist_entry_t to be removed. 280 295 * @ new : extended pointer on the xlist_entry_t to be inserted. … … 307 322 /*************************************************************************** 308 323 * This debug function displays all entries of an xlist. 324 *************************************************************************** 309 325 * @ root_xp : extended pointer on the root xlist_entry_t. 310 326 * @ string : list identifier displayed in header. -
trunk/kernel/mm/kcm.c
r656 r657 43 43 ////////////////////////////////////////////////////////////////////////////////////// 44 44 // This static function must be called by a local thread. 45 // It returns a pointer on a block allocated from a non-fullkcm_page.46 // It makes a panic if no block is available in selected page.45 // It returns a pointer on a block allocated from an active kcm_page. 46 // It makes a panic if no block is available in the selected page. 47 47 // It changes the page status as required. 48 48 ////////////////////////////////////////////////////////////////////////////////////// 49 49 // @ kcm : pointer on KCM allocator. 50 // @ kcm_page : pointer on a non-fullkcm_page.50 // @ kcm_page : pointer on an active kcm_page. 51 51 // @ return pointer on allocated block. 52 52 ///////////////////////////////////////////////////////////////////////////////////// … … 64 64 uint32_t index = 1; 65 65 uint64_t mask = (uint64_t)0x2; 66 uint32_t found = 0;67 66 68 67 // allocate first free block in kcm_page, update status, … … 70 69 while( index <= max ) 71 70 { 72 if( (status & mask) == 0 ) // block non allocated71 if( (status & mask) == 0 ) // block found 73 72 { 73 // update page count and status 74 74 kcm_page->status = status | mask; 75 75 kcm_page->count = count + 1; 76 found = 1;77 78 76 break; 79 77 } … … 83 81 } 84 82 85 // change the page list if almost full83 // change the page list if found block is the last 86 84 if( count == max-1 ) 87 85 { … … 162 160 163 161 ///////////////////////////////////////////////////////////////////////////////////// 164 // This privatestatic function must be called by a local thread.165 // It returns one non-full kcm_page with t e following policy :162 // This static function must be called by a local thread. 163 // It returns one non-full kcm_page with the following policy : 166 164 // - if the "active_list" is non empty, it returns the first "active" page, 167 165 // without modifying the KCM state. 168 // - if the "active_list" is empty, it allocates a new page from mPPM, inserts166 // - if the "active_list" is empty, it allocates a new page from PPM, inserts 169 167 // this page in the active_list, and returns it. 170 168 ///////////////////////////////////////////////////////////////////////////////////// … … 275 273 // release KCM lock 276 274 remote_busylock_release( lock_xp ); 277 } 275 276 } // end kcm_destroy() 278 277 279 278 ////////////////////////////////// … … 284 283 void * block_ptr; 285 284 286 285 // min block size is 64 bytes 287 286 if( order < 6 ) order = 6; 288 287 … … 301 300 kcm_page = kcm_get_page( kcm_ptr ); 302 301 302 #if DEBUG_KCM 303 thread_t * this = CURRENT_THREAD; 304 uint32_t cycle = (uint32_t)hal_get_cycles(); 305 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 306 { 307 printk("\n[%s] thread[%x,%x] enters / order %d / page %x / kcm %x / page_status (%x|%x)\n", 308 __FUNCTION__, this->process->pid, this->trdid, order, kcm_page, kcm_ptr, 309 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 310 kcm_remote_display( local_cxy , kcm_ptr ); 311 } 312 #endif 313 303 314 if( kcm_page == NULL ) 304 315 { … … 314 325 315 326 #if DEBUG_KCM 316 thread_t * this = CURRENT_THREAD; 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 if( DEBUG_KCM < cycle ) 319 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm %x / status[%x,%x] / count %d\n", 320 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_ptr, 321 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 327 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 328 { 329 printk("\n[%s] thread[%x,%x] exit / order %d / block %x / kcm %x / page_status (%x|%x)\n", 330 __FUNCTION__, this->process->pid, this->trdid, order, block_ptr, kcm_ptr, 331 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 332 kcm_remote_display( local_cxy , kcm_ptr ); 333 } 322 334 #endif 323 335 … … 344 356 thread_t * this = CURRENT_THREAD; 345 357 uint32_t cycle = (uint32_t)hal_get_cycles(); 346 if( DEBUG_KCM < cycle ) 347 printk("\n[%s] thread[%x,%x] release block %x / order %d / kcm %x / status [%x,%x] / count %d\n", 348 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_ptr->order, kcm_ptr, 349 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 358 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 359 { 360 printk("\n[%s] thread[%x,%x] enters / order %d / block %x / page %x / kcm %x / status [%x,%x]\n", 361 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, block_ptr, kcm_page, kcm_ptr, 362 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) ); 363 kcm_remote_display( local_cxy , kcm_ptr ); 364 } 350 365 #endif 351 366 … … 361 376 // release lock 362 377 remote_busylock_release( lock_xp ); 378 379 #if DEBUG_KCM 380 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 381 { 382 printk("\n[%s] thread[%x,%x] exit / order %d / page %x / status [%x,%x]\n", 383 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, kcm_ptr, 384 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) ); 385 kcm_remote_display( local_cxy , kcm_ptr ); 363 386 } 387 #endif 388 389 } // end kcm_free() 364 390 365 391 ///////////////////////////////////////////////////////////////////////////////////// … … 369 395 ///////////////////////////////////////////////////////////////////////////////////// 370 396 // This static function can be called by any thread running in any cluster. 371 // It returns a local pointer on a block allocated from an non-fullkcm_page.372 // It makes a panic if no block available in selectedpage.397 // It returns a local pointer on a block allocated from an active kcm_page. 398 // It makes a panic if no block available in the selected kcm_page. 373 399 // It changes the page status as required. 374 400 ///////////////////////////////////////////////////////////////////////////////////// 375 // @ kcm_cxy : remote KCM cluster identi dfier.401 // @ kcm_cxy : remote KCM cluster identifier. 376 402 // @ kcm_ptr : local pointer on remote KCM allocator. 377 // @ kcm_page : pointer on active kcmpage to use.403 // @ kcm_page : local pointer on remote active kcm_page to use. 378 404 // @ return a local pointer on the allocated block. 379 405 ///////////////////////////////////////////////////////////////////////////////////// … … 392 418 uint32_t index = 1; 393 419 uint64_t mask = (uint64_t)0x2; 394 uint32_t found = 0;395 420 396 421 // allocate first free block in kcm_page, update status, … … 398 423 while( index <= max ) 399 424 { 400 if( (status & mask) == 0 ) // block non allocated425 if( (status & mask) == 0 ) // block found 401 426 { 402 427 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask ); 403 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->count ) , count + 1 ); 404 found = 1; 428 hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , count + 1 ); 405 429 break; 406 430 } … … 410 434 } 411 435 412 // change the page list if almost full436 // change the page list if found block is the last 413 437 if( count == max-1 ) 414 438 { … … 631 655 kcm_t * kcm_ptr ) 632 656 { 657 list_entry_t * iter; 658 kcm_page_t * kcm_page; 659 uint64_t status; 660 uint32_t count; 661 633 662 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) ); 634 663 uint32_t full_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) ); 635 664 uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); 636 665 637 printk("*** KCM / cxy %x / order %d / full_pages %d / empty_pages %d / active_pages%d\n",666 printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n", 638 667 kcm_cxy, order, full_pages_nr, active_pages_nr ); 639 } 668 669 if( active_pages_nr ) 670 { 671 LIST_REMOTE_FOREACH( kcm_cxy , &kcm_ptr->active_root , iter ) 672 { 673 kcm_page = LIST_ELEMENT( iter , kcm_page_t , list ); 674 status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 675 count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 676 677 printk("- active page %x / status (%x,%x) / count %d\n", 678 kcm_page, GET_CXY( status ), GET_PTR( status ), count ); 679 } 680 } 681 682 if( full_pages_nr ) 683 { 684 LIST_REMOTE_FOREACH( kcm_cxy , &kcm_ptr->full_root , iter ) 685 { 686 kcm_page = LIST_ELEMENT( iter , kcm_page_t , list ); 687 status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 688 count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 689 690 printk("- full page %x / status (%x,%x) / count %d\n", 691 kcm_page, GET_CXY( status ), GET_PTR( status ), count ); 692 } 693 } 694 } // end kcm remote_display() -
trunk/kernel/mm/kcm.h
r635 r657 92 92 * It initializes a Kernel Cache Manager, depending on block size. 93 93 **************************************************************************************** 94 * @ kcm : pointer on KCM manager to initialize.94 * @ kcm : pointer on KCM to be initialized. 95 95 * @ order : ln(block_size). 96 96 ***************************************************************************************/ … … 122 122 ***************************************************************************************/ 123 123 void kcm_free( void * block_ptr ); 124 125 126 124 127 125 128 /**************************************************************************************** -
trunk/kernel/mm/kmem.c
r656 r657 2 2 * kmem.c - kernel memory allocator implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/mapper.c
r656 r657 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 46 46 47 47 48 ////////////////////////////////////////////// 49 mapper_t * mapper_create( vfs_fs_type_t type ) 50 { 51 mapper_t * mapper; 48 ///////////////////////////////////// 49 xptr_t mapper_create( cxy_t cxy, 50 uint32_t type ) 51 { 52 mapper_t * mapper_ptr; 52 53 kmem_req_t req; 53 54 error_t error; 54 55 55 56 // allocate memory for mapper descriptor 56 req.type = KMEM_KCM;57 req.order = bits_log2( sizeof(mapper_t) );58 req.flags = AF_KERNEL | AF_ZERO;59 mapper = kmem_alloc(&req );60 61 if( mapper == NULL )57 req.type = KMEM_KCM; 58 req.order = bits_log2( sizeof(mapper_t) ); 59 req.flags = AF_KERNEL | AF_ZERO; 60 mapper_ptr = kmem_remote_alloc( cxy , &req ); 61 62 if( mapper_ptr == NULL ) 62 63 { 63 64 printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); 64 return NULL;65 } 66 67 // initialize refcount & inode68 mapper->refcount = 0;69 mapper->inode = NULL;65 return XPTR_NULL; 66 } 67 68 // initialize refcount and type 69 hal_remote_s32( XPTR( cxy , &mapper_ptr->refcount ) , 0 ); 70 hal_remote_s32( XPTR( cxy , &mapper_ptr->fs_type ) , type ); 70 71 71 72 // initialize radix tree 72 error = grdxt_ init( &mapper->rt,73 CONFIG_MAPPER_GRDXT_W1,74 CONFIG_MAPPER_GRDXT_W2,75 CONFIG_MAPPER_GRDXT_W3 );73 error = grdxt_remote_init( XPTR( cxy , &mapper_ptr->rt ), 74 CONFIG_MAPPER_GRDXT_W1, 75 CONFIG_MAPPER_GRDXT_W2, 76 CONFIG_MAPPER_GRDXT_W3 ); 76 77 if( error ) 77 78 { 78 79 printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); 79 80 req.type = KMEM_KCM; 80 req.ptr = mapper; 81 kmem_free( &req ); 82 return NULL; 83 } 84 85 // initialize mapper type 86 mapper->type = type; 81 req.ptr = mapper_ptr; 82 kmem_remote_free( cxy , &req ); 83 return XPTR_NULL; 84 } 87 85 88 86 // initialize mapper lock 89 remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );87 remote_rwlock_init( XPTR( cxy , &mapper_ptr->lock ) , LOCK_MAPPER_STATE ); 90 88 91 89 // initialize waiting threads xlist (empty) 92 xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );90 xlist_root_init( XPTR( cxy , &mapper_ptr->wait_root ) ); 93 91 94 92 // initialize vsegs xlist (empty) 95 xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );96 97 return mapper;93 xlist_root_init( XPTR( cxy , &mapper_ptr->vsegs_root ) ); 94 95 return XPTR( cxy , mapper_ptr ); 98 96 99 97 } // end mapper_create() 100 98 101 99 //////////////////////////////////////// 102 void mapper_destroy( mapper_t * mapper ) 103 { 100 void mapper_destroy( xptr_t mapper_xp ) 101 { 102 xptr_t page_xp; 104 103 page_t * page; 105 104 uint32_t found_index = 0; … … 107 106 kmem_req_t req; 108 107 108 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 109 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 110 111 // build extended pointer on radix tree 112 xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 113 109 114 // scan radix tree 110 115 do 111 116 { 112 117 // get page from radix tree 113 page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index ); 114 118 page_xp = grdxt_remote_get_first( rt_xp, 119 start_index , 120 &found_index ); 121 page = GET_PTR( page_xp ); 122 115 123 // release registered pages to PPM 116 124 if( page != NULL ) 117 125 { 118 126 // remove page from mapper and release to PPM 119 mapper_remote_release_page( XPTR( local_cxy , mapper ), page );127 mapper_remote_release_page( mapper_xp , page ); 120 128 121 129 // update start_key value for next page … … 126 134 127 135 // release the memory allocated to radix tree itself 128 grdxt_ destroy( &mapper->rt);136 grdxt_remote_destroy( rt_xp ); 129 137 130 138 // release memory for mapper descriptor 131 139 req.type = KMEM_KCM; 132 req.ptr = mapper ;133 kmem_ free(&req );140 req.ptr = mapper_ptr; 141 kmem_remote_free( mapper_cxy , &req ); 134 142 135 143 } // end mapper_destroy() 136 144 137 ///////////////////////////////////////////////// ///////138 error_t mapper_ remote_handle_miss( xptr_t mapper_xp,139 140 145 ///////////////////////////////////////////////// 146 error_t mapper_handle_miss( xptr_t mapper_xp, 147 uint32_t page_id, 148 xptr_t * page_xp_ptr ) 141 149 { 142 150 error_t error; 143 151 144 uint32_t inode_size ;145 uint32_t inode_type ;152 uint32_t inode_size = 0; 153 uint32_t inode_type = 0; 146 154 147 155 thread_t * this = CURRENT_THREAD; … … 159 167 inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) ); 160 168 inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) ); 161 }162 else163 {164 inode_size = 0;165 inode_type = 0;166 169 } 167 170 … … 267 270 return 0; 268 271 269 } // end mapper_ remote_handle_miss()270 271 ///////////////////////////////////////////// ///////272 xptr_t mapper_ remote_get_page( xptr_t mapper_xp,273 272 } // end mapper_handle_miss() 273 274 ///////////////////////////////////////////// 275 xptr_t mapper_get_page( xptr_t mapper_xp, 276 uint32_t page_id ) 274 277 { 275 278 error_t error; … … 281 284 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 282 285 286 assert( (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) != NULL ), 287 "should not be used for the FAT mapper"); 288 283 289 #if DEBUG_MAPPER_GET_PAGE 284 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );285 290 uint32_t cycle = (uint32_t)hal_get_cycles(); 286 291 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 287 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) // FAT mapper 288 { 289 printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n", 290 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 291 } 292 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) // file mapper 293 { 292 if( DEBUG_MAPPER_GET_PAGE < cycle ) 293 { 294 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 294 295 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 295 296 printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n", … … 330 331 if ( page_xp == XPTR_NULL ) // miss confirmed => handle it 331 332 { 332 error = mapper_ remote_handle_miss( mapper_xp,333 334 333 error = mapper_handle_miss( mapper_xp, 334 page_id, 335 &page_xp ); 335 336 if( error ) 336 337 { … … 343 344 344 345 #if (DEBUG_MAPPER_GET_PAGE & 1) 345 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 346 { 347 printk("\n[%s] thread[%x,%x] introduced missing page in <%s> mapper / ppn %x\n", 348 __FUNCTION__, this->process->pid, this->trdid, name, ppm_page2ppn(page_xp) ); 349 } 350 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 351 { 352 printk("\n[%s] thread[%x,%x] introduced missing page in FAT mapper / ppn %x\n", 353 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) ); 354 } 346 if( DEBUG_MAPPER_GET_PAGE < cycle ) 347 printk("\n[%s] thread[%x,%x] introduced missing page %d in <%s> mapper / ppn %x\n", 348 __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) ); 355 349 #endif 356 350 … … 365 359 366 360 #if DEBUG_MAPPER_GET_PAGE 367 cycle = (uint32_t)hal_get_cycles(); 368 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 369 { 370 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n", 371 __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) ); 372 } 373 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 374 { 375 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x\n", 376 __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) ); 377 } 361 if( DEBUG_MAPPER_GET_PAGE < cycle ) 362 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n", 363 __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) ); 378 364 #endif 379 365 … … 385 371 return page_xp; 386 372 387 } // end mapper_remote_get_page() 373 } // end mapper_get_page() 374 375 ///////////////////////////////////////////////// 376 xptr_t mapper_get_fat_page( xptr_t mapper_xp, 377 uint32_t page_id ) 378 { 379 error_t error; 380 381 thread_t * this = CURRENT_THREAD; 382 383 // get mapper cluster and local pointer 384 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 385 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 386 387 assert( (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) == NULL ), 388 "should be used for the FAT mapper"); 389 390 #if DEBUG_MAPPER_GET_FAT_PAGE 391 uint32_t cycle = (uint32_t)hal_get_cycles(); 392 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 393 printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n", 394 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 395 #endif 396 397 #if( DEBUG_MAPPER_GET_FAT_PAGE & 2 ) 398 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 399 ppm_remote_display( local_cxy ); 400 #endif 401 402 // check thread can yield 403 thread_assert_can_yield( this , __FUNCTION__ ); 404 405 // build extended pointer on mapper lock and mapper rt 406 xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock ); 407 xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 408 409 // take mapper lock in READ_MODE 410 remote_rwlock_rd_acquire( lock_xp ); 411 412 // search page in radix tree 413 xptr_t page_xp = grdxt_remote_lookup( rt_xp , page_id ); 414 415 // test mapper miss 416 if( page_xp == XPTR_NULL ) // miss => handle it 417 { 418 // release the lock in READ_MODE and take it in WRITE_MODE 419 remote_rwlock_rd_release( lock_xp ); 420 remote_rwlock_wr_acquire( lock_xp ); 421 422 // second test on missing page because the page status can be modified 423 // by another thread, when passing from READ_MODE to WRITE_MODE. 424 // from this point there is no concurrent accesses to mapper. 425 page_xp = grdxt_remote_lookup( rt_xp , page_id ); 426 427 if ( page_xp == XPTR_NULL ) // miss confirmed => handle it 428 { 429 error = mapper_handle_miss( mapper_xp, 430 page_id, 431 &page_xp ); 432 if( error ) 433 { 434 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 435 __FUNCTION__ , this->process->pid, this->trdid ); 436 remote_rwlock_wr_release( lock_xp ); 437 return XPTR_NULL; 438 } 439 } 440 441 #if (DEBUG_MAPPER_GET_FAT_PAGE & 1) 442 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 443 printk("\n[%s] thread[%x,%x] introduced missing page %d in FAT mapper / ppn %x\n", 444 __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) ); 445 #endif 446 447 // release mapper lock from WRITE_MODE 448 remote_rwlock_wr_release( lock_xp ); 449 } 450 else // hit 451 { 452 // release mapper lock from READ_MODE 453 remote_rwlock_rd_release( lock_xp ); 454 } 455 456 #if DEBUG_MAPPER_GET_FAT_PAGE 457 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 458 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x\n", 459 __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) ); 460 #endif 461 462 #if( DEBUG_MAPPER_GET_FAT_PAGE & 2) 463 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 464 ppm_remote_display( local_cxy ); 465 #endif 466 467 return page_xp; 468 469 } // end mapper_get_fat_page() 388 470 389 471 //////////////////////////////////////////////////// … … 481 563 482 564 // get extended pointer on page descriptor in mapper 483 page_xp = mapper_ remote_get_page( mapper_xp , page_id );565 page_xp = mapper_get_page( mapper_xp , page_id ); 484 566 485 567 if ( page_xp == XPTR_NULL ) return -1; … … 519 601 __FUNCTION__, this->process->pid, this->trdid, page_bytes, 520 602 local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) ); 521 mapper_display_page( mapper_xp , page_ xp, 128 );603 mapper_display_page( mapper_xp , page_id , 128 ); 522 604 #endif 523 605 … … 617 699 618 700 // get extended pointer on page descriptor 619 page_xp = mapper_ remote_get_page( mapper_xp , page_id );701 page_xp = mapper_get_page( mapper_xp , page_id ); 620 702 621 703 if ( page_xp == XPTR_NULL ) return -1; … … 678 760 679 761 // get page containing the searched word 680 page_xp = mapper_ remote_get_page( mapper_xp , page_id );762 page_xp = mapper_get_page( mapper_xp , page_id ); 681 763 682 764 if( page_xp == XPTR_NULL ) return -1; … … 702 784 703 785 // get page containing the searched word 704 page_xp = mapper_ remote_get_page( mapper_xp , page_id );786 page_xp = mapper_get_page( mapper_xp , page_id ); 705 787 706 788 if( page_xp == XPTR_NULL ) return -1; … … 719 801 } // end mapper_remote_set_32() 720 802 721 ///////////////////////////////////////// 722 error_t mapper_sync( mapper_t * mapper ) 723 { 724 page_t * page; // local pointer on current page descriptor 725 xptr_t page_xp; // extended pointer on current page descriptor 726 grdxt_t * rt; // pointer on radix_tree descriptor 727 uint32_t start_key; // start page index in mapper 728 uint32_t found_key; // current page index in mapper 803 //////////////////////////////////////// 804 error_t mapper_sync( xptr_t mapper_xp ) 805 { 806 uint32_t found_key; // unused, required by grdxt_remote_get_first() 729 807 error_t error; 808 809 // get mapper cluster and local pointer 810 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 811 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 730 812 731 813 #if DEBUG_MAPPER_SYNC … … 733 815 uint32_t cycle = (uint32_t)hal_get_cycles(); 734 816 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 735 vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );736 #endif 737 738 // getpointer on radix tree739 rt = &mapper->rt;817 vfs_inode_get_name( XPTR( mapper_cxy , &mapper_ptr->inode ) , name ); 818 #endif 819 820 // build extended pointer on radix tree 821 xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 740 822 741 823 // initialise loop variable 742 start_key = 0;824 uint32_t start_key = 0; 743 825 744 826 // scan radix-tree until last page found … … 746 828 { 747 829 // get page descriptor from radix tree 748 page = (page_t *)grdxt_get_first( rt, start_key , &found_key );830 xptr_t page_xp = grdxt_remote_get_first( rt_xp , start_key , &found_key ); 749 831 750 if( page == NULL ) break; 751 752 assert( (page->index == found_key ), "page_index (%d) != key (%d)", page->index, found_key ); 753 assert( (page->order == 0), "page_order (%d] != 0", page->order ); 754 755 // build extended pointer on page descriptor 756 page_xp = XPTR( local_cxy , page ); 832 page_t * page_ptr = GET_PTR( page_xp ); 833 834 // exit loop when last page found 835 if( page_ptr == NULL ) break; 836 837 // get page flags & index fields 838 uint32_t flags = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->flags ) ); 839 uint32_t index = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->index ) ); 757 840 758 841 // synchronize page if dirty 759 if( (page->flags & PG_DIRTY) != 0)842 if( flags & PG_DIRTY ) 760 843 { 761 844 … … 763 846 if( cycle > DEBUG_MAPPER_SYNC ) 764 847 printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to IOC device\n", 765 __FUNCTION__, this->process->pid, this->trdid, page ->index, name );848 __FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name ); 766 849 #endif 767 850 // copy page to file system … … 771 854 { 772 855 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 773 __FUNCTION__, page ->index );856 __FUNCTION__, page_ptr->index ); 774 857 return -1; 775 858 } … … 784 867 if( cycle > DEBUG_MAPPER_SYNC ) 785 868 printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n", 786 __FUNCTION__, this->process->pid, this->trdid, page ->index, name );869 __FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name ); 787 870 #endif 788 871 } 789 872 790 873 // update loop variable 791 start_key = page->index + 1;874 start_key = index + 1; 792 875 } // end while 793 876 … … 798 881 /////////////////////////////////////////////// 799 882 void mapper_display_page( xptr_t mapper_xp, 800 xptr_t page_xp,883 uint32_t page_id, 801 884 uint32_t nbytes ) 802 885 { … … 809 892 assert( (nbytes <= 4096) , "nbytes cannot be larger than 4096"); 810 893 assert( (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null"); 811 assert( (page_xp != XPTR_NULL) , "page_xp argument cannot be null");812 894 813 895 // get mapper cluster and local pointer … … 815 897 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 816 898 817 // get page cluster an local pointer 899 // get extended pointer on page descriptor 900 xptr_t page_xp = mapper_get_page( mapper_xp , page_id ); 901 902 // get page cluster and local pointer 818 903 cxy_t page_cxy = GET_CXY( page_xp ); 819 904 page_t * page_ptr = GET_PTR( page_xp ); 820 905 821 906 // get page_id and mapper from page descriptor 822 uint32_t page_id= hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) );907 uint32_t index = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) ); 823 908 mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) ); 824 909 825 910 assert( (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster"); 826 assert( (mapper_ptr == mapper ) , "unconsistent mapper_xp & page_xp arguments"); 911 assert( (mapper_ptr == mapper ) , "unconsistent mapper field in page descriptor"); 912 assert( (page_id == index ) , "unconsistent index field in page descriptor"); 827 913 828 914 // get inode -
trunk/kernel/mm/mapper.h
r656 r657 61 61 * - In the present implementation the cache size for a given file increases on demand, 62 62 * and the allocated memory is only released when the mapper/inode is destroyed. 63 *64 * TODO the "type" field in mapper descriptor is redundant and probably unused.65 63 ******************************************************************************************/ 66 64 … … 73 71 { 74 72 struct vfs_inode_s * inode; /*! owner inode */ 75 uint32_t type;/*! file system type */73 uint32_t fs_type; /*! file system type */ 76 74 grdxt_t rt; /*! embedded pages cache descriptor (radix tree) */ 77 75 remote_rwlock_t lock; /*! several readers / only one writer */ … … 84 82 85 83 /******************************************************************************************* 86 * This function allocates physical memory for a mapper descriptor, and initializes it 87 * (refcount <= 0) / inode <= NULL). 88 * It must be executed by a thread running in the cluster containing the mapper. 89 ******************************************************************************************* 90 * @ type : type of the mapper to create. 91 * @ return : pointer on created mapper if success / return NULL if no memory 92 ******************************************************************************************/ 93 mapper_t * mapper_create( vfs_fs_type_t type ); 94 95 /******************************************************************************************* 96 * This function releases all physical memory allocated for a mapper. 97 * Both the mapper descriptor and the radix tree are released. 84 * This function allocates physical memory for a mapper descriptor, in cluster 85 * identified by the <cxy> argument. It initializes it (refcount <= 0) / inode <= NULL). 86 * It can be executed by any thread running in any cluster. 87 ******************************************************************************************* 88 * @ cxy : target cluster identifier. 89 * @ type : FS type. 90 * @ return an extended pointer on created mapper if success / return NULL if no memory 91 ******************************************************************************************/ 92 xptr_t mapper_create( cxy_t cxy, 93 uint32_t type ); 94 95 /******************************************************************************************* 96 * This function releases all physical memory allocated for a mapper, identified 97 * by the <mapper_xp> argument. Both the mapper descriptor and the radix tree are released. 98 98 * It does NOT synchronize dirty pages. Use the vfs_sync_inode() function if required. 99 * It must be executed by a thread running in the cluster containing the mapper.100 ******************************************************************************************* 101 * @ mapper :target mapper.102 ******************************************************************************************/ 103 void mapper_destroy( mapper_t * mapper);99 * It can be executed by any thread running in any cluster. 100 ******************************************************************************************* 101 * @ mapper_xp : extended pointer on target mapper. 102 ******************************************************************************************/ 103 void mapper_destroy( xptr_t mapper_xp ); 104 104 105 105 /******************************************************************************************* … … 117 117 * @ return 0 if success / return -1 if IOC cannot be accessed. 118 118 ******************************************************************************************/ 119 error_t mapper_ remote_handle_miss( xptr_t mapper_xp,120 121 119 error_t mapper_handle_miss( xptr_t mapper_xp, 120 uint32_t page_id, 121 xptr_t * page_xp ); 122 122 123 123 /******************************************************************************************* … … 180 180 181 181 /******************************************************************************************* 182 * This function returns an extended pointer on a page descriptor .183 * The - possibly remote - mapper isidentified by the <mapper_xp> argument.182 * This function returns an extended pointer on a page descriptor for a regular mapper 183 * (i.e. this mapper is NOT the FAT mapper), identified by the <mapper_xp> argument. 184 184 * The page is identified by <page_id> argument (page index in the file). 185 185 * It can be executed by a thread running in any cluster, as it uses remote … … 193 193 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error. 194 194 ******************************************************************************************/ 195 xptr_t mapper_remote_get_page( xptr_t mapper_xp, 196 uint32_t page_id ); 195 xptr_t mapper_get_page( xptr_t mapper_xp, 196 uint32_t page_id ); 197 198 /******************************************************************************************* 199 * This function returns an extended pointer on a page descriptor for the FAT mapper. 200 * The page is identified by <page_id> argument (page index in the FAT mapper). 201 * It can be executed by a thread running in any cluster, as it uses remote 202 * access primitives to scan the mapper. 203 * In case of miss, this function takes the mapper lock in WRITE_MODE, and call the 204 * mapper_handle_miss() to load the missing page from device to mapper, using an RPC 205 * when the mapper is remote. 206 ******************************************************************************************* 207 * @ mapper_xp : extended pointer on the mapper. 208 * @ page_id : page index in file 209 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error. 210 ******************************************************************************************/ 211 xptr_t mapper_get_fat_page( xptr_t mapper_xp, 212 uint32_t page_id ); 197 213 198 214 /******************************************************************************************* … … 234 250 235 251 /******************************************************************************************* 236 * This function scan all pages present in the mapper identified by the <mapper > argument,237 * a nd synchronize all pages marked as "dirty" on disk.252 * This function scan all pages present in the mapper identified by the <mapper_xp> 253 * argument, and synchronize all pages marked as "dirty" on disk. 238 254 * These pages are unmarked and removed from the local PPM dirty_list. 239 * This function must be called by a local thread running in same cluster as the mapper. 240 * A remote thread must call the RPC_MAPPER_SYNC function. 241 ******************************************************************************************* 242 * @ mapper : [in] local pointer on local mapper. 243 * @ returns 0 if success / return -1 if error. 244 ******************************************************************************************/ 245 error_t mapper_sync( mapper_t * mapper ); 255 * It can be called by any thread running in any cluster. 256 ******************************************************************************************* 257 * @ mapper_xp : [in] extended pointer on local mapper. 258 * @ returns 0 if success / return -1 if error. 259 ******************************************************************************************/ 260 error_t mapper_sync( xptr_t mapper_xp ); 246 261 247 262 /******************************************************************************************* 248 263 * This debug function displays the content of a given page of a given mapper, identified 249 * by the <mapper_xp> and <page_ xp> arguments.264 * by the <mapper_xp> and <page_id> arguments. 250 265 * The number of bytes to display in page is defined by the <nbytes> argument. 251 266 * The format is eigth (32 bits) words per line in hexadecimal. … … 253 268 ******************************************************************************************* 254 269 * @ mapper_xp : [in] extended pointer on the mapper. 255 * @ page_ xp : [in] extended pointer on page descriptor.270 * @ page_id : [in] page_index in mapper. 256 271 * @ nbytes : [in] number of bytes in page. 257 272 * @ returns 0 if success / return -1 if error. 258 273 ******************************************************************************************/ 259 274 void mapper_display_page( xptr_t mapper_xp, 260 xptr_t page_xp,275 uint32_t page_id, 261 276 uint32_t nbytes ); 262 277 -
trunk/kernel/mm/ppm.c
r656 r657 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/vmm.c
r656 r657 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 1988 1988 xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1989 1989 1990 // loop on PTEs in GPT to unmap PTE if (old d_vpn_min <= vpn < new_vpn_min)1990 // loop on PTEs in GPT to unmap PTE if (old_vpn_min <= vpn < new_vpn_min) 1991 1991 for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ ) 1992 1992 { … … 2292 2292 2293 2293 // get extended pointer on page descriptor 2294 page_xp = mapper_ remote_get_page( mapper_xp , page_id );2294 page_xp = mapper_get_page( mapper_xp , page_id ); 2295 2295 2296 2296 if ( page_xp == XPTR_NULL ) return EINVAL; -
trunk/kernel/mm/vmm.h
r656 r657 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019) 5 * Alain Greiner (2016,2017,2018,2019,2020)) 7 6 * 8 7 * Copyright (c) UPMC Sorbonne Universites … … 31 30 #include <list.h> 32 31 #include <queuelock.h> 32 #include <remote_queuelock.h> 33 33 #include <hal_gpt.h> 34 34 #include <vseg.h> … … 208 208 209 209 /********************************************************************************************* 210 * This function modifies the size of the vseg identified by <process> and <base> arguments 211 * in all clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments. 212 * This function is called by the sys_munmap() function, and can be called by a thread 213 * running in any cluster, as it uses remote accesses. 210 * This function modifies the vseg identified by <process> and <base> arguments in all 211 * clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments. 212 * The new vseg, defined by the <new_base> and <new_size> arguments must be included 213 * in the existing vseg. The target VSL size and base fields are modified in the VSL. 214 * This is done in all clusters containing a VMM copy to maintain VMM coherence. 215 * It is called by the sys_munmap() and dev_fbf_resize_window() functions. 216 * It can be called by a thread running in any cluster, as it uses the vmm_resize_vseg() in 217 * the local cluster, and parallel RPC_VMM_RESIZE_VSEG for remote clusters. 214 218 * It cannot fail, as only vseg registered in VSL copies are updated. 215 219 ********************************************************************************************* … … 228 232 * the VSL and remove all associated PTE entries from the GPT. 229 233 * This is done in all clusters containing a VMM copy to maintain VMM coherence. 230 * This function can be called by a thread running in any cluster, as it uses the 231 * vmm_remove_vseg() in the local cluster, and the RPC_VMM_REMOVE_VSEG for remote clusters. 234 * It is called by the sys_munmap() and dev_fbf_resize_window() functions. 235 * It can be called by a thread running in any cluster, as it uses the vmm_remove_vseg() in 236 * the local cluster, and parallel RPC_VMM_REMOVE_VSEG for remote clusters. 232 237 * It cannot fail, as only vseg registered in VSL copies are deleted. 233 238 ********************************************************************************************* … … 317 322 * It must be called by a local thread, running in the cluster containing the modified VMM. 318 323 * Use the RPC_VMM_REMOVE_VSEG if required. 319 * It makes a kernel panic if the process is not registered in the local cluster, 320 * or if the vseg is not registered in the process VSL. 324 * It makes a kernel panic if the process is not registered in the local cluster. 321 325 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are 322 326 * unmapped from local GPT. Other actions depend on the vseg type: … … 340 344 /********************************************************************************************* 341 345 * This function resize a local vseg identified by the <process> and <vseg> arguments. 342 * It is called by the vmm_global_resize() function. 343 * It must be called by a local thread, running in the cluster containing the modified VMM. 346 * Both the "size" and "base" fields are modified in the process VSL. When the new vseg 347 * contains less pages than the target vseg, the relevant pages are removed from the GPT. 348 * It is called by the vmm_global_resize() and dev_fbf_resize_window() functions. 349 * It must be called by a local thread, running in the cluster containing the modified VSL. 344 350 * Use the RPC_VMM_RESIZE_VSEG if required. 345 * It makes a kernel panic if the process is not registered in the local cluster,346 * or if the vseg is not registered in the process VSL.347 * The new vseg, defined by the <new_base> and <new_size> arguments must be strictly348 * included in the target vseg. The target VSL size and base fields are modified in the VSL.349 * If the new vseg contains less pages than the target vseg, the relevant pages are350 * removed from the GPT.351 351 * The VSL lock protecting the VSL must be taken by the caller. 352 352 ********************************************************************************************* … … 454 454 ppn_t * ppn ); 455 455 456 457 456 #endif /* _VMM_H_ */ -
trunk/kernel/mm/vseg.c
r651 r657 2 2 * vseg.c - virtual segment (vseg) related operations 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/vseg.h
r651 r657 2 2 * vseg.h - virtual segment (vseg) related operations 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/syscalls/shared_include/shared_fbf.h
r642 r657 1 1 /* 2 * shared_f ramebuffer.h - Shared mnemonics used by the frame buffer related syscalls.2 * shared_fbf.h - Shared mnemonics used by the frame buffer related syscalls. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 26 26 27 27 /******************************************************************************************* 28 * This enum defines the operation mnemonics for frame buffer access.28 * This enum defines the user operation mnemonics for frame buffer access. 29 29 ******************************************************************************************/ 30 30 31 31 typedef enum 32 32 { 33 FBF_GET_CONFIG = 0, 34 FBF_READ = 1, 35 FBF_WRITE = 2, 33 FBF_GET_CONFIG = 0, 34 FBF_DIRECT_READ = 1, 35 FBF_DIRECT_WRITE = 2, 36 FBF_CREATE_WINDOW = 3, 37 FBF_DELETE_WINDOW = 4, 38 FBF_REFRESH_WINDOW = 5, 39 FBF_MOVE_WINDOW = 6, 40 FBF_RESIZE_WINDOW = 7, 36 41 } 37 fbf_ operation_type_t;42 fbf_usr_operation_type_t; 38 43 39 44 #endif // _SHARED_FRAMEBUFFER_H_ -
trunk/kernel/syscalls/shared_include/syscalls_numbers.h
r642 r657 93 93 SYS_GET_THREAD_INFO = 55, 94 94 SYS_FBF = 56, 95 SYS_SOCKET = 57, 95 96 96 SYSCALLS_NR = 5 7,97 SYSCALLS_NR = 58, 97 98 98 99 } syscalls_t; -
trunk/kernel/syscalls/sys_display.c
r656 r657 2 2 * sys_display.c - display the current state of a kernel structure on TXT0 3 3 * 4 * Author Alain Greiner (2016,2017,2018, 2019)4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 380 380 381 381 // get extended pointer on target page 382 page_xp = mapper_ remote_get_page( mapper_xp , page_id );382 page_xp = mapper_get_page( mapper_xp , page_id ); 383 383 384 384 if( page_xp == XPTR_NULL ) … … 446 446 case DISPLAY_FAT: 447 447 { 448 uint32_t entries = (uint32_t)arg1;449 450 if( entries > 4096)448 uint32_t slots = (uint32_t)arg1; 449 450 if( slots > 1024 ) 451 451 { 452 452 453 453 #if DEBUG_SYSCALLS_ERROR 454 printk("\n[ERROR] in %s for FAT : nb_ entries larger than 4096\n",454 printk("\n[ERROR] in %s for FAT : nb_slots larger than 1024\n", 455 455 __FUNCTION__ ); 456 456 #endif … … 459 459 } 460 460 461 if( entries == 0 ) // display fat context in cluster cxy461 if( slots == 0 ) // display fat context in cluster cxy 462 462 { 463 463 uint32_t cxy = (uint32_t)arg0; … … 476 476 fatfs_display_ctx( cxy ); 477 477 } 478 else // display nb_ entries in page478 else // display nb_slots in page 479 479 { 480 uint32_t page= (uint32_t)arg0;481 482 fatfs_display_fat( page , 0 , entries );480 uint32_t min = (uint32_t)arg0; 481 482 fatfs_display_fat( min , slots ); 483 483 } 484 484 -
trunk/kernel/syscalls/sys_fbf.c
r647 r657 2 2 * sys_fbf.c - Acces the frame buffer peripheral. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 33 33 #include <syscalls.h> 34 34 35 //////////////////////// //////////36 int sys_fbf( uint32_t operation,37 void * arg0,38 void * arg1,39 void * arg2)35 //////////////////////// 36 int sys_fbf( reg_t arg0, 37 reg_t arg1, 38 reg_t arg2, 39 reg_t arg3 ) 40 40 { 41 41 vseg_t * vseg; // for vaddr check 42 42 error_t error; 43 43 44 #if (DEBUG_SYS_FBF || CONFIG_INSTRUMENTATION_SYSCALLS) 45 uint64_t tm_start = hal_get_cycles(); 46 #endif 47 44 48 thread_t * this = CURRENT_THREAD; 45 49 process_t * process = this->process; 46 50 47 #if (DEBUG_SYS_FBF || CONFIG_INSTRUMENTATION_SYSCALLS) 48 uint64_t tm_start = hal_get_cycles(); 49 #endif 51 // for some operations, the MSB of arg0 can contain the wid 52 uint32_t operation = arg0 & 0xFFFF; 50 53 51 54 #if DEBUG_SYS_FBF 52 55 if( DEBUG_SYS_FBF < tm_start ) 53 printk("\n[%s] thread[%x,%x] enter for %s / cycle %d\n", 54 __FUNCTION__, process->pid, this->trdid, dev_fbf_cmd_str( operation ), (uint32_t)tm_start ); 56 printk("\n[%s] thread[%x,%x] enter for %s / arg1 %x / arg2 %x / arg3 %x / cycle %d\n", 57 __FUNCTION__, process->pid, this->trdid, dev_fbf_cmd_str( operation ), 58 arg1, arg2, arg3, (uint32_t)tm_start ); 55 59 #endif 56 60 … … 65 69 uint32_t type; 66 70 67 // check arg0 (width) in user vspace 68 error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg ); 69 if( error ) 70 { 71 72 #if DEBUG_SYSCALLS_ERROR 73 printk("\n[ERROR] in %s : unmapped arg0 %x for %s / thread[%x,%x]\n", 74 __FUNCTION__ , arg0, dev_fbf_cmd_str(operation), process->pid, this->trdid ); 75 #endif 76 this->errno = EINVAL; 77 return -1; 78 } 79 80 // check arg1 (height) in user vspace 71 // check "width" in user vspace 81 72 error = vmm_get_vseg( process , (intptr_t)arg1 , &vseg ); 82 73 if( error ) … … 88 79 #endif 89 80 this->errno = EINVAL; 90 return -1; 91 } 92 93 // check arg2 (type) in user vspace 94 error = vmm_get_vseg( process , (intptr_t)arg2 , &vseg ); 81 } 82 83 // check "height" in user vspace 84 error |= vmm_get_vseg( process , (intptr_t)arg2 , &vseg ); 95 85 if( error ) 96 86 { … … 101 91 #endif 102 92 this->errno = EINVAL; 103 return -1; 93 } 94 95 // check "type" in user vspace 96 error |= vmm_get_vseg( process , (intptr_t)arg3 , &vseg ); 97 if( error ) 98 { 99 100 #if DEBUG_SYSCALLS_ERROR 101 printk("\n[ERROR] in %s : unmapped arg3 %x for %s / thread[%x,%x]\n", 102 __FUNCTION__ , arg3, dev_fbf_cmd_str(operation), process->pid, this->trdid ); 103 #endif 104 this->errno = EINVAL; 104 105 } 105 106 … … 107 108 dev_fbf_get_config( &width , &height , &type ); 108 109 109 // transfer to user space 110 hal_copy_to_uspace( arg0 , XPTR( local_cxy , &width ) , sizeof(uint32_t) ); 111 hal_copy_to_uspace( arg1 , XPTR( local_cxy , &height ) , sizeof(uint32_t) ); 112 hal_copy_to_uspace( arg2 , XPTR( local_cxy , &type ) , sizeof(uint32_t) ); 113 114 break; 115 } 116 ////////////// 117 case FBF_READ: 118 case FBF_WRITE: 119 { 120 // check arg0 (buffer) in user space 121 error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg ); 122 if( error ) 123 { 124 125 #if DEBUG_SYSCALLS_ERROR 126 printk("\n[ERROR] in %s : unmapped arg0 %x for %s / thread[%x,%x]\n", 127 __FUNCTION__ , arg0, dev_fbf_cmd_str(operation), process->pid, this->trdid ); 128 #endif 129 this->errno = EINVAL; 130 return -1; 131 } 132 133 // get length and offset values 134 135 uint32_t length = (uint32_t)(intptr_t)arg1; 136 uint32_t offset = (uint32_t)(intptr_t)arg2; 110 // transfer to user space if no error 111 if( error == 0 ) 112 { 113 hal_copy_to_uspace( (void*)arg1, XPTR(local_cxy , &width ), sizeof(uint32_t) ); 114 hal_copy_to_uspace( (void*)arg2, XPTR(local_cxy , &height), sizeof(uint32_t) ); 115 hal_copy_to_uspace( (void*)arg3, XPTR(local_cxy , &type ), sizeof(uint32_t) ); 116 } 117 break; 118 } 119 ///////////////////// 120 case FBF_DIRECT_READ: 121 case FBF_DIRECT_WRITE: 122 { 123 void * buffer = (void *)arg1; 124 uint32_t npixels = arg2; 125 uint32_t offset = arg3; 126 bool_t is_write = (operation == FBF_DIRECT_WRITE); 127 128 // check buffer in user space 129 error = vmm_get_vseg( process , (intptr_t)buffer , &vseg ); 130 if( error ) 131 { 132 133 #if DEBUG_SYSCALLS_ERROR 134 printk("\n[ERROR] in %s : unmapped buffer %x for %s / thread[%x,%x]\n", 135 __FUNCTION__ , buffer, dev_fbf_cmd_str(operation), process->pid, this->trdid ); 136 #endif 137 this->errno = EINVAL; 138 } 139 else 140 { 141 // call relevant kernel function 142 error |= dev_fbf_move_data( is_write, buffer, npixels, offset ); 143 144 if( error ) 145 { 146 147 #if DEBUG_SYSCALLS_ERROR 148 printk("\n[ERROR] in %s : cannot move data for %s / buffer %x / thread[%x,%x]\n", 149 __FUNCTION__ , dev_fbf_cmd_str(operation), buffer, process->pid, this->trdid ); 150 #endif 151 this->errno = EINVAL; 152 } 153 } 154 break; 155 } 156 /////////////////////// 157 case FBF_CREATE_WINDOW: 158 { 159 uint32_t l_zero = arg1 >> 16; 160 uint32_t p_zero = arg1 & 0xFFFF; 161 uint32_t nlines = arg2 >> 16; 162 uint32_t npixels = arg2 & 0xFFFF; 163 164 // check buffer in user space 165 error = vmm_get_vseg( process , (intptr_t)arg3 , &vseg ); 166 if( error ) 167 { 168 169 #if DEBUG_SYSCALLS_ERROR 170 printk("\n[ERROR] in %s : unmapped user buffer %x for %s / thread[%x,%x]\n", 171 __FUNCTION__ , (intptr_t)arg3, dev_fbf_cmd_str(operation), process->pid, this->trdid ); 172 #endif 173 this->errno = EINVAL; 174 } 175 else 176 { 177 // allocated buffer base address 178 intptr_t user_buffer; 179 180 // call relevant kernel function 181 error = dev_fbf_create_window( nlines, 182 npixels, 183 l_zero, 184 p_zero, 185 &user_buffer ); 186 if( error == -1 ) 187 { 188 189 #if DEBUG_SYSCALLS_ERROR 190 printk("\n[ERROR] in %s : cannot create window for %s / thread[%x,%x]\n", 191 __FUNCTION__ , dev_fbf_cmd_str(operation), process->pid, this->trdid ); 192 #endif 193 this->errno = EINVAL; 194 } 195 196 // copy vseg base address to user space buffer 197 hal_copy_to_uspace( (void *)arg3, 198 XPTR( local_cxy , &user_buffer ), 199 sizeof(intptr_t) ); 200 hal_fence(); 201 } 202 break; 203 } 204 /////////////////////// 205 case FBF_DELETE_WINDOW: 206 { 207 uint32_t wid = arg1; 208 209 // call relevant kernel function 210 error = dev_fbf_delete_window( wid ); 211 212 if( error ) 213 { 214 215 #if DEBUG_SYSCALLS_ERROR 216 printk("\n[ERROR] in %s : cannot delete window for %s / thread[%x,%x]\n", 217 __FUNCTION__ , dev_fbf_cmd_str(operation), process->pid, this->trdid ); 218 #endif 219 this->errno = EINVAL; 220 } 221 break; 222 } 223 //////////////////////// 224 case FBF_REFRESH_WINDOW: 225 { 226 uint32_t wid = arg1; 227 uint32_t line_first = arg2; 228 uint32_t line_last = arg3; 229 230 // call relevant kernel function 231 error = dev_fbf_refresh_window( wid, 232 line_first, 233 line_last ); 234 235 if( error ) 236 { 237 238 #if DEBUG_SYSCALLS_ERROR 239 printk("\n[ERROR] in %s : cannot refresh window for %s / thread[%x,%x]\n", 240 __FUNCTION__ , dev_fbf_cmd_str(operation), process->pid, this->trdid ); 241 #endif 242 this->errno = EINVAL; 243 } 244 break; 245 } 246 ///////////////////// 247 case FBF_MOVE_WINDOW: 248 { 249 uint32_t wid = arg1; 250 uint32_t l_zero = arg2; 251 uint32_t p_zero = arg3; 137 252 138 253 // call relevant kernel function to move data 139 error = dev_fbf_move_ data( operation , arg0 , length , offset);254 error = dev_fbf_move_window( wid, l_zero, p_zero ); 140 255 141 256 if( error ) … … 143 258 144 259 #if DEBUG_SYSCALLS_ERROR 145 printk("\n[ERROR] in %s : cannot move data for %s / buffer %x / thread[%x,%x]\n", 146 __FUNCTION__ , dev_fbf_cmd_str(operation), arg0, process->pid, this->trdid ); 147 #endif 148 this->errno = EINVAL; 149 return -1; 150 } 151 260 printk("\n[ERROR] in %s : cannot move window / thread[%x,%x]\n", 261 __FUNCTION__ , dev_fbf_cmd_str(operation), process->pid, this->trdid ); 262 #endif 263 this->errno = EINVAL; 264 } 265 break; 266 } 267 /////////////////////// 268 case FBF_RESIZE_WINDOW: 269 { 270 uint32_t wid = arg1; 271 uint32_t width = arg2; 272 uint32_t height = arg3; 273 274 // call relevant kernel function to move data 275 error = dev_fbf_resize_window( wid , width , height ); 276 277 if( error ) 278 { 279 280 #if DEBUG_SYSCALLS_ERROR 281 printk("\n[ERROR] in %s : cannot move window / thread[%x,%x]\n", 282 __FUNCTION__ , dev_fbf_cmd_str(operation), process->pid, this->trdid ); 283 #endif 284 this->errno = EINVAL; 285 } 152 286 break; 153 287 } … … 161 295 #endif 162 296 this->errno = EINVAL; 163 return -1; 164 } 297 error = -1; 298 } 299 break; 165 300 } // end switch on operation 166 301 … … 173 308 #if DEBUG_SYS_FBF 174 309 if( DEBUG_SYS_FBF < tm_end ) 175 printk("\n[%s] thread[%x,%x] exit for %s / cost = %d / cycle %d\n", 176 __FUNCTION__, process->pid, this->trdid, dev_fbf_cmd_str( operation ), 177 (uint32_t)(tm_end - tm_start), (uint32_t)tm_end ); 310 printk("\n[%s] thread[%x,%x] exit for %s / cycle %d\n", 311 __FUNCTION__, process->pid, this->trdid, dev_fbf_cmd_str( operation ), (uint32_t)tm_end ); 178 312 #endif 179 313 … … 183 317 #endif 184 318 185 return 0;319 return error; 186 320 187 321 } // end sys_fbf() -
trunk/kernel/syscalls/sys_mmap.c
r651 r657 2 2 * sys_mmap.c - map files, memory or devices into process virtual address space 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/syscalls/syscalls.h
r651 r657 2 2 * syscalls.h - Kernel side services for syscall handling. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 748 748 749 749 /****************************************************************************************** 750 * [56] This function implements the non-standard "fbf_get_config", "fbf_read" and 751 * "fbf_write" syscalls, used to access the frame buffer peripheral. 752 ****************************************************************************************** 753 * @ operation : [in] operation type (defined in shared_fbf.h) 754 * @ arg0 : if config : pointer on width / else : pointer on user buffer 755 * @ arg1 : if config : pointer on height / else : number of bytes to move 756 * @ arg2 : if config : pointer on type / else : offset in frame buffer 750 * [56] This generic function implements the non-standard FBF related syscalls. 751 * The operation types mnemonics are defined in the <shared_fbf> file. 752 * The supported operations are defined in the <almosmkh.h> & <almosmkh.c> files. 753 * This function ckecks the syscall arguments, and call the relevant kernel function. 754 ****************************************************************************************** 755 * @ arg0 : operation type (mnemonics defined in shared_fbf.h) 756 * @ arg1 : depends on operation type 757 * @ arg2 : depends on operation type 758 * @ arg3 : depends on operation type 757 759 * @ return 0 if success / return -1 if illegal argument. 758 760 *****************************************************************************************/ 759 int sys_fbf( uint32_t operation, 760 void * arg0, 761 void * arg1, 762 void * arg2 ); 761 int sys_fbf( reg_t arg0, 762 reg_t arg1, 763 reg_t arg2, 764 reg_t arg3 ); 765 766 /****************************************************************************************** 767 * [57] This generic function implements the socket related syscalls. 768 * The operation types mnemonics are defined in the <shared_socket> file. 769 * The supported operations are defined in the <socket.h> & <socket.c> files. 770 * This function ckecks the syscall arguments, and call the relevant kernel function. 771 ****************************************************************************************** 772 * @ arg0 : operation type (mnemonics defined in shared_socket.h) 773 * @ arg1 : depends on operation type 774 * @ arg2 : depends on operation type 775 * @ arg3 : depends on operation type 776 * @ return 0 if success / return -1 if illegal argument. 777 *****************************************************************************************/ 778 int sys_socket( reg_t arg0, 779 reg_t arg1, 780 reg_t arg2, 781 reg_t arg3 ); 763 782 764 783 #endif // _SYSCALLS_H_
Note: See TracChangeset
for help on using the changeset viewer.