Changeset 657 for trunk/kernel/devices/dev_ioc.c
- Timestamp:
- Mar 18, 2020, 11:16:59 PM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/devices/dev_ioc.c
r647 r657 2 2 * dev_ioc.c - IOC (Block Device Controler) generic device API implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 37 37 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 38 38 39 //////////////////////////////////////// 40 char * dev_ioc_cmd_str( cmd_type_t cmd )39 //////////////////////////////////////////// 40 char * dev_ioc_cmd_str( ioc_cmd_type_t cmd ) 41 41 { 42 42 if ( cmd == IOC_READ ) return "READ"; … … 91 91 } // end dev_ioc_init() 92 92 93 /////////////////////////////////////////////// 94 error_t dev_ioc_move_data( uint32_t cmd_type, 93 //////////////////////////////////////////////////////////////////////////////////// 94 // This static function executes an asynchronous SYNC_READ or SYNC_WRITE request. 95 // thread in the IOC device waiting queue, activates the server thread, blocks on 96 // the THREAD_BLOCKED_IO condition and deschedules. 97 // The clent is re-activated by the server thread after IO operation completion. 98 //////////////////////////////////////////////////////////////////////////////////// 99 static error_t dev_ioc_move( uint32_t cmd_type, 100 xptr_t buffer_xp, 101 uint32_t lba, 102 uint32_t count ) 103 { 104 thread_t * this = CURRENT_THREAD; // pointer on client thread 105 106 // get extended pointer on IOC chdev descriptor 107 xptr_t ioc_xp = chdev_dir.ioc[0]; 108 109 // check dev_xp 110 assert( (ioc_xp != XPTR_NULL) , "undefined IOC chdev descriptor" ); 111 112 // register command in client thread 113 this->ioc_cmd.dev_xp = ioc_xp; 114 this->ioc_cmd.type = cmd_type; 115 this->ioc_cmd.buf_xp = buffer_xp; 116 this->ioc_cmd.lba = lba; 117 this->ioc_cmd.count = count; 118 119 // register client thread in IOC queue, blocks and deschedules 120 chdev_register_command( ioc_xp ); 121 122 // return I/O operation status 123 return this->ioc_cmd.error; 124 125 } // end dev_ioc_move() 126 127 //////////////////////////////////////////////////////////////////////////////////// 128 // This static function executes a synchronous READ or WRITE request. 129 // It register the command in the client thread descriptor, and calls directly 130 // the driver cmd function. 131 //////////////////////////////////////////////////////////////////////////////////// 132 error_t dev_ioc_sync_move( uint32_t cmd_type, 95 133 xptr_t buffer_xp, 96 134 uint32_t lba, … … 98 136 { 99 137 thread_t * this = CURRENT_THREAD; // pointer on client thread 100 101 #if ( DEBUG_DEV_IOC_RX || DEBUG_DEV_IOC_TX )102 uint32_t cycle = (uint32_t)hal_get_cycles();103 #endif104 105 // software L2/L3 cache coherence for memory buffer106 if( chdev_dir.iob )107 {108 if( (cmd_type == IOC_SYNC_READ) || (cmd_type == IOC_READ) )109 {110 dev_mmc_inval( buffer_xp , count<<9 );111 }112 else // (cmd_type == IOC_SYNC_WRITE) or (cmd_type == IOC_WRITE)113 {114 dev_mmc_sync ( buffer_xp , count<<9 );115 }116 }117 138 118 139 // get extended pointer on IOC chdev descriptor … … 129 150 this->ioc_cmd.count = count; 130 151 131 // for a synchronous acces, the driver is directly called by the client thread 132 if( (cmd_type == IOC_SYNC_READ) || (cmd_type == IOC_SYNC_WRITE) ) 133 { 134 135 #if DEBUG_DEV_IOC_RX 136 uint32_t cycle = (uint32_t)hal_get_cycles(); 137 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_SYNC_READ) ) 138 printk("\n[%s] thread[%x,%x] enters for SYNC_READ / lba %x / buffer[%x,%x] / cycle %d\n", 139 __FUNCTION__ , this->process->pid, this->trdid, lba, 140 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 141 #endif 142 143 #if DEBUG_DEV_IOC_TX 144 uint32_t cycle = (uint32_t)hal_get_cycles(); 145 if( (DEBUG_DEV_IOC_TX < cycle) && (cmd_type == IOC_SYNC_WRITE) ) 146 printk("\n[%s] thread[%x,%x] enters for SYNC_WRITE / lba %x / buffer[%x,%x] / cycle %d\n", 147 __FUNCTION__ , this->process->pid, this->trdid, lba, 148 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 149 #endif 150 // get driver command function 151 cxy_t ioc_cxy = GET_CXY( ioc_xp ); 152 chdev_t * ioc_ptr = GET_PTR( ioc_xp ); 153 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) ); 154 155 // call driver function 156 cmd( XPTR( local_cxy , this ) ); 157 158 #if DEBUG_DEV_IOC_RX 159 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_SYNC_READ) ) 160 printk("\n[%s] thread[%x,%x] resumes for IOC_SYNC_READ\n", 161 __FUNCTION__, this->process->pid , this->trdid ) 162 #endif 163 164 #if DEBUG_DEV_IOC_TX 165 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_SYNC_WRITE) ) 166 printk("\n[%s] thread[%x,%x] resumes for IOC_SYNC_WRITE\n", 167 __FUNCTION__, this->process->pid , this->trdid ) 168 #endif 169 170 } 171 // for an asynchronous access, the client thread registers in the chdev waiting queue, 172 // activates server thread, blocks on THREAD_BLOCKED_IO and deschedules. 173 // It is re-activated by the server thread after IO operation completion. 174 else // (cmd_type == IOC_READ) || (cmd_type == IOC_WRITE) 175 { 176 177 #if DEBUG_DEV_IOC_RX 178 uint32_t cycle = (uint32_t)hal_get_cycles(); 179 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_READ) ) 180 printk("\n[%s] thread[%x,%x] enters for READ / lba %x / buffer[%x,%x] / cycle %d\n", 181 __FUNCTION__ , this->process->pid, this->trdid, lba, 182 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 183 #endif 184 185 #if DEBUG_DEV_IOC_TX 186 uint32_t cycle = (uint32_t)hal_get_cycles(); 187 if( (DEBUG_DEV_IOC_TX < cycle) && (cmd_type == IOC_WRITE) ) 188 printk("\n[%s] thread[%x,%x] enters for WRITE / lba %x / buffer[%x,%x] / cycle %d\n", 189 __FUNCTION__ , this->process->pid, this->trdid, lba, 190 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 191 #endif 192 chdev_register_command( ioc_xp ); 193 194 #if(DEBUG_DEV_IOC_RX ) 195 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_READ) ) 196 printk("\n[%s] thread[%x,%x] resumes for IOC_READ\n", 197 __FUNCTION__, this->process->pid , this->trdid ) 198 #endif 199 200 #if(DEBUG_DEV_IOC_TX & 1) 201 if( (DEBUG_DEV_IOC_RX < cycle) && (cmd_type == IOC_WRITE) ) 202 printk("\n[%s] thread[%x,%x] resumes for IOC_WRITE\n", 203 __FUNCTION__, this->process->pid , this->trdid ) 204 #endif 205 206 } 152 // get driver command function 153 cxy_t ioc_cxy = GET_CXY( ioc_xp ); 154 chdev_t * ioc_ptr = GET_PTR( ioc_xp ); 155 dev_cmd_t * cmd = (dev_cmd_t *)hal_remote_lpt( XPTR( ioc_cxy , &ioc_ptr->cmd ) ); 156 157 // call driver function whithout blocking & descheduling 158 cmd( XPTR( local_cxy , this ) ); 207 159 208 160 // return I/O operation status 209 161 return this->ioc_cmd.error; 210 162 211 } // end dev_ioc_move_data() 212 213 163 } // end dev_ioc_sync_move() 164 165 /////////////////////////////////////////// 166 error_t dev_ioc_read( xptr_t buffer_xp, 167 uint32_t lba, 168 uint32_t count ) 169 { 170 171 #if DEBUG_DEV_IOC_RX 172 thread_t * this = CURRENT_THREAD; 173 uint32_t cycle = (uint32_t)hal_get_cycles(); 174 if( DEBUG_DEV_IOC_RX < cycle ) 175 printk("\n[%s] thread[%x,%x] enters IOC_READ / lba %x / buffer[%x,%x] / cycle %d\n", 176 __FUNCTION__, this->process->pid, this->trdid, lba, 177 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 178 #endif 179 180 // software L2/L3 cache coherence for memory buffer 181 if( chdev_dir.iob ) dev_mmc_inval( buffer_xp , count<<9 ); 182 183 // request an asynchronous transfer 184 error_t error = dev_ioc_move( IOC_READ, 185 buffer_xp, 186 lba, 187 count ); 188 #if(DEBUG_DEV_IOC_RX & 1) 189 cycle = (uint32_t)hal_get_cycles(); 190 if( DEBUG_DEV_IOC_RX < cycle ) 191 printk("\n[%s] thread[%x,%x] exit IOC_READ / cycle %d\n", 192 __FUNCTION__, this->process->pid , this->trdid , cycle ); 193 #endif 194 195 return error; 196 197 } // end dev_ioc_read() 198 199 /////////////////////////////////////////// 200 error_t dev_ioc_write( xptr_t buffer_xp, 201 uint32_t lba, 202 uint32_t count ) 203 { 204 205 #if DEBUG_DEV_IOC_TX 206 thread_t * this = CURRENT_THREAD; 207 uint32_t cycle = (uint32_t)hal_get_cycles(); 208 if( DEBUG_DEV_IOC_TX < cycle ) 209 printk("\n[%s] thread[%x,%x] enters IOC_WRITE / lba %x / buffer[%x,%x] / cycle %d\n", 210 __FUNCTION__, this->process->pid, this->trdid, lba, 211 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 212 #endif 213 214 // software L2/L3 cache coherence for memory buffer 215 if( chdev_dir.iob ) dev_mmc_sync ( buffer_xp , count<<9 ); 216 217 // request a blocking, but asynchronous, transfer 218 error_t error = dev_ioc_move( IOC_WRITE, 219 buffer_xp, 220 lba, 221 count ); 222 #if(DEBUG_DEV_IOC_TX & 1) 223 cycle = (uint32_t)hal_get_cycles(); 224 if( DEBUG_DEV_IOC_TX < cycle ) 225 printk("\n[%s] thread[%x,%x] exit IOC_WRITE / cycle %d\n", 226 __FUNCTION__, this->process->pid , this->trdid , cycle ); 227 #endif 228 229 return error; 230 231 } // end dev_ioc_write() 232 233 234 /////////////////////////////////////////// 235 error_t dev_ioc_sync_read( xptr_t buffer_xp, 236 uint32_t lba, 237 uint32_t count ) 238 { 239 240 #if DEBUG_DEV_IOC_RX 241 thread_t * this = CURRENT_THREAD; 242 uint32_t cycle = (uint32_t)hal_get_cycles(); 243 if( DEBUG_DEV_IOC_RX < cycle ) 244 printk("\n[%s] thread[%x,%x] enters IOC_SYNC_READ / lba %x / buffer[%x,%x] / cycle %d\n", 245 __FUNCTION__, this->process->pid, this->trdid, lba, 246 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 247 #endif 248 249 // software L2/L3 cache coherence for memory buffer 250 if( chdev_dir.iob ) dev_mmc_inval( buffer_xp , count<<9 ); 251 252 // request an asynchronous transfer 253 error_t error = dev_ioc_sync_move( IOC_SYNC_READ, 254 buffer_xp, 255 lba, 256 count ); 257 #if(DEBUG_DEV_IOC_RX & 1) 258 cycle = (uint32_t)hal_get_cycles(); 259 if( DEBUG_DEV_IOC_RX < cycle ) 260 printk("\n[%s] thread[%x,%x] exit IOC_SYNC_READ / cycle %d\n", 261 __FUNCTION__, this->process->pid , this->trdid , cycle ); 262 #endif 263 264 return error; 265 266 } // end dev_ioc_sync_read() 267 268 ///////////////////////////////////////////////// 269 error_t dev_ioc_sync_write( xptr_t buffer_xp, 270 uint32_t lba, 271 uint32_t count ) 272 { 273 274 #if DEBUG_DEV_IOC_TX 275 thread_t * this = CURRENT_THREAD; 276 uint32_t cycle = (uint32_t)hal_get_cycles(); 277 if( DEBUG_DEV_IOC_TX < cycle ) 278 printk("\n[%s] thread[%x,%x] enters IOC_SYNC_WRITE / lba %x / buffer[%x,%x] / cycle %d\n", 279 __FUNCTION__, this->process->pid, this->trdid, lba, 280 GET_CXY(buffer_xp), GET_PTR(buffer_xp), cycle ); 281 #endif 282 283 // software L2/L3 cache coherence for memory buffer 284 if( chdev_dir.iob ) dev_mmc_sync ( buffer_xp , count<<9 ); 285 286 // request a blocking, but asynchronous, transfer 287 error_t error = dev_ioc_sync_move( IOC_SYNC_WRITE, 288 buffer_xp, 289 lba, 290 count ); 291 #if(DEBUG_DEV_IOC_TX & 1) 292 cycle = (uint32_t)hal_get_cycles(); 293 if( DEBUG_DEV_IOC_TX < cycle ) 294 printk("\n[%s] thread[%x,%x] exit IOC_SYNC_WRITE / cycle %d\n", 295 __FUNCTION__, this->process->pid , this->trdid , cycle ); 296 #endif 297 298 return error; 299 300 } // end dev_ioc_sync_write() 301 302
Note: See TracChangeset
for help on using the changeset viewer.