/////////////////////////////////////////////////////////////////////////////////// // File : sys_handler.c // Date : 01/04/2012 // Author : alain greiner and joel porquet // Copyright (c) UPMC-LIP6 /////////////////////////////////////////////////////////////////////////////////// #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if !defined(X_SIZE) # error: You must define X_SIZE in the hard_config.h file #endif #if !defined(Y_SIZE) # error: You must define Y_SIZE in the hard_config.h file #endif #if !defined(NB_PROCS_MAX) # error: You must define NB_PROCS_MAX in the hard_config.h file #endif #if !defined(SEG_BOOT_MAPPING_BASE) # error: You must define SEG_BOOT_MAPPING_BASE in the hard_config.h file #endif #if !defined(NB_TTY_CHANNELS) # error: You must define NB_TTY_CHANNELS in the hard_config.h file #endif #if (NB_TTY_CHANNELS < 1) # error: NB_TTY_CHANNELS cannot be smaller than 1! #endif #if !defined(NB_TIM_CHANNELS) # error: You must define NB_TIM_CHANNELS in the hard_config.h file #endif #if !defined(NB_NIC_CHANNELS) # error: You must define NB_NIC_CHANNELS in the hard_config.h file #endif #if !defined(NB_CMA_CHANNELS) # error: You must define NB_CMA_CHANNELS in the hard_config.h file #endif #if !defined(GIET_NO_HARD_CC) # error: You must define GIET_NO_HARD_CC in the giet_config.h file #endif #if !defined ( GIET_NIC_MAC4 ) # error: You must define GIET_NIC_MAC4 in the giet_config.h file #endif #if !defined ( GIET_NIC_MAC2 ) # error: You must define GIET_NIC_MAC2 in the giet_config.h file #endif //////////////////////////////////////////////////////////////////////////// // Extern variables //////////////////////////////////////////////////////////////////////////// // allocated in tty0.c file. extern sqt_lock_t _tty0_sqt_lock; // allocated in mwr_driver.c file. extern simple_lock_t _coproc_lock[X_SIZE*Y_SIZE]; extern unsigned int _coproc_type[X_SIZE*Y_SIZE]; extern unsigned int _coproc_info[X_SIZE*Y_SIZE]; extern unsigned int _coproc_mode[X_SIZE*Y_SIZE]; extern unsigned int _coproc_error[X_SIZE*Y_SIZE]; extern unsigned int _coproc_trdid[X_SIZE*Y_SIZE]; // allocated in tty_driver.c file. extern tty_fifo_t _tty_rx_fifo[NB_TTY_CHANNELS]; // allocated in kernel_init.c file extern static_scheduler_t* _schedulers[X_SIZE][Y_SIZE][NB_PROCS_MAX]; // allocated in bdv_driver.c file spin_lock_t _bdv_lock; //////////////////////////////////////////////////////////////////////////////// // Allocator protecting exclusive access to FBF by a single application. // - The number of users in a given application should be set by a single // thread using an _atomic_test_and_set(). // - The allocator is atomically decremented by each user thread when // the thread exit. //////////////////////////////////////////////////////////////////////////////// __attribute__((section(".kdata"))) unsigned int _fbf_alloc = 0; //////////////////////////////////////////////////////////////////////////////// // Channel allocators for multi-channels peripherals // - The array _***_channel_allocator[channel] defines the number of user // threads for a dynamically allocated channel of peripheral ***. // - The array _***_channel_wti[channel] defines the WTI index and the // processor coordinates for the processor receiving the channel WTI. //////////////////////////////////////////////////////////////////////////////// #if NB_TTY_CHANNELS __attribute__((section(".kdata"))) unsigned int _tty_channel_alloc[NB_TTY_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _tty_channel_wti[NB_TTY_CHANNELS]; #endif #if NB_TIM_CHANNELS __attribute__((section(".kdata"))) unsigned int _tim_channel_alloc[NB_TIM_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _tim_channel_wti[NB_TIM_CHANNELS]; #endif #if NB_CMA_CHANNELS __attribute__((section(".kdata"))) unsigned int _cma_channel_alloc[NB_CMA_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _cma_channel_wti[NB_CMA_CHANNELS]; #endif #if NB_NIC_CHANNELS __attribute__((section(".kdata"))) unsigned int _nic_rx_channel_alloc[NB_NIC_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _nic_rx_channel_wti[NB_NIC_CHANNELS]; __attribute__((section(".kdata"))) unsigned int _nic_tx_channel_alloc[NB_NIC_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _nic_tx_channel_wti[NB_NIC_CHANNELS]; #endif //////////////////////////////////////////////////////////////////////////// // NIC_RX and NIC_TX kernel chbuf arrays //////////////////////////////////////////////////////////////////////////// __attribute__((section(".kdata"))) ker_chbuf_t _nic_ker_rx_chbuf[NB_NIC_CHANNELS] __attribute__((aligned(64))); __attribute__((section(".kdata"))) ker_chbuf_t _nic_ker_tx_chbuf[NB_NIC_CHANNELS] __attribute__((aligned(64))); //////////////////////////////////////////////////////////////////////////// // FBF related chbuf descriptors array, indexed by the CMA channel index. // Physical addresses of these chbuf descriptors required for L2 cache sync. // FBF status //////////////////////////////////////////////////////////////////////////// __attribute__((section(".kdata"))) fbf_chbuf_t _fbf_chbuf[NB_CMA_CHANNELS] __attribute__((aligned(64))); __attribute__((section(".kdata"))) unsigned long long _fbf_chbuf_paddr[NB_CMA_CHANNELS]; __attribute__((section(".kdata"))) buffer_status_t _fbf_status[NB_CMA_CHANNELS] __attribute__((aligned(64))); //////////////////////////////////////////////////////////////////////////// // Initialize the syscall vector with syscall handlers // Note: This array must be synchronised with the define in file stdio.h //////////////////////////////////////////////////////////////////////////// __attribute__((section(".kdata"))) const void * _syscall_vector[64] = { &_sys_proc_xyp, /* 0x00 */ &_get_proctime, /* 0x01 */ &_sys_procs_number, /* 0x02 */ &_sys_xy_from_ptr, /* 0x03 */ &_sys_ukn, /* 0x04 */ &_sys_vseg_get_vbase, /* 0x05 */ &_sys_vseg_get_length, /* 0x06 */ &_sys_heap_info, /* 0x07 */ &_sys_fbf_size, /* 0x08 */ &_sys_fbf_alloc, /* 0x09 */ &_sys_fbf_cma_alloc, /* 0x0A */ &_sys_fbf_cma_init_buf, /* 0x0B */ &_sys_fbf_cma_start, /* 0x0C */ &_sys_fbf_cma_display, /* 0x0D */ &_sys_fbf_cma_stop, /* 0x0E */ &_sys_ukn, /* 0x0F */ &_sys_applications_status, /* 0x10 */ &_sys_fbf_sync_write, /* 0x11 */ &_sys_fbf_sync_read, /* 0x12 */ &_sys_ukn, /* 0x13 */ &_sys_tim_alloc, /* 0x14 */ &_sys_tim_start, /* 0x15 */ &_sys_tim_stop, /* 0x16 */ &_sys_kill_application, /* 0x17 */ &_sys_exec_application, /* 0x18 */ &_sys_ukn, /* 0x19 */ &_sys_pthread_control, /* 0x1A */ &_sys_pthread_yield, /* 0x1B */ &_sys_pthread_kill, /* 0x1C */ &_sys_pthread_create, /* 0x1D */ &_sys_pthread_join, /* 0x1E */ &_sys_pthread_exit, /* 0x1F */ &_fat_open, /* 0x20 */ &_fat_read, /* 0x21 */ &_fat_write, /* 0x22 */ &_fat_lseek, /* 0x23 */ &_fat_file_info, /* 0x24 */ &_fat_close, /* 0x25 */ &_fat_remove, /* 0x26 */ &_fat_rename, /* 0x27 */ &_fat_mkdir, /* 0x28 */ &_fat_opendir, /* 0x29 */ &_fat_closedir, /* 0x2A */ &_fat_readdir, /* 0x2B */ &_sys_ukn, /* 0x2C */ &_sys_ukn, /* 0x2D */ &_sys_ukn, /* 0x2E */ &_sys_ukn, /* 0x2F */ &_sys_nic_alloc, /* 0x30 */ &_sys_nic_start, /* 0x31 */ &_sys_nic_move, /* 0x32 */ &_sys_nic_stop, /* 0x33 */ &_sys_nic_stats, /* 0x34 */ &_sys_nic_clear, /* 0x35 */ &_sys_tty_write, /* 0x36 */ &_sys_tty_read, /* 0x37 */ &_sys_tty_alloc, /* 0x38 */ &_sys_ukn, /* 0x39 */ &_sys_ukn, /* 0x3A */ &_sys_coproc_completed, /* 0x3B */ &_sys_coproc_alloc, /* 0x3C */ &_sys_coproc_channel_init, /* 0x3D */ &_sys_coproc_run, /* 0x3E */ &_sys_coproc_release, /* 0x3F */ }; ////////////////////////////////////////////////////////////////////////////// // Applications related syscall handlers ////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // This function is called by the _sys_exec_application function // to reload all data segments contained in an application.elf file. // File checking is minimal, because these segments have already // been loaded by the boot code. //////////////////////////////////////////////////////////////////////// static unsigned int _load_writable_segments( mapping_vspace_t* vspace ) { #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] enters for %s\n", _get_proctime() , x , y , p , vspace->name ); #endif mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vseg_t* vseg = _get_vseg_base(header); unsigned int vseg_id; // vseg index in mapping char buf[4096]; // buffer to store one cluster unsigned int fd = 0; // file descriptor unsigned int found = 0; // first scan on vsegs in vspace to find the .elf pathname for (vseg_id = vspace->vseg_offset; vseg_id < (vspace->vseg_offset + vspace->vsegs); vseg_id++) { if( vseg[vseg_id].type == VSEG_TYPE_ELF ) { // open the .elf file associated to vspace fd = _fat_open( vseg[vseg_id].binpath , O_RDONLY ); if ( fd < 0 ) return 1; #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] open %s / fd = %d\n", _get_proctime() , x , y , p , vseg[vseg_id].binpath , fd ); #endif found = 1; break; } } // check .elf file found if ( found == 0 ) { _printf("@@@ _load_writable_segments() : .elf not found\n"); return 1; } // load Elf-Header into buffer from .elf file if ( _fat_lseek( fd, 0, SEEK_SET ) < 0 ) { _printf("@@@ _load_writable_segments() : cannot seek\n"); _fat_close( fd ); return 1; } if ( _fat_read( fd, (unsigned int)buf, 4096, 0 ) < 0 ) { _printf("@@@ _load_writable_segments() : cannot read\n"); _fat_close( fd ); return 1; } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] loaded Elf-Header\n", _get_proctime() , x , y , p ); #endif // get nsegments and Program-Header-Table offset from Elf-Header Elf32_Ehdr* elf_header_ptr = (Elf32_Ehdr*)buf; unsigned int offset = elf_header_ptr->e_phoff; unsigned int nsegments = elf_header_ptr->e_phnum; // load Program-Header-Table from .elf file if ( _fat_lseek( fd, offset, SEEK_SET ) < 0 ) { _fat_close( fd ); return 1; } if ( _fat_read( fd, (unsigned int)buf, 4096, 0 ) < 0 ) { _fat_close( fd ); return 1; } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] loaded Program-Header-Table\n", _get_proctime() , x , y , p ); #endif // set Program-Header-Table pointer Elf32_Phdr* elf_pht_ptr = (Elf32_Phdr*)buf; // second scan on vsegs in vspace to load the seg_data segments : // - type == VSEG_TYPE_ELF // - non eXecutable for (vseg_id = vspace->vseg_offset; vseg_id < (vspace->vseg_offset + vspace->vsegs); vseg_id++) { if( (vseg[vseg_id].type == VSEG_TYPE_ELF) && // type ELF ((vseg[vseg_id].mode & 0x4) == 0) ) // non executable { // get vbase and pbase paddr_t pbase = vseg[vseg_id].pbase; unsigned int vbase = vseg[vseg_id].vbase; // scan segments in Progam-Header-Table to find match // No match checking as the segment was previously found unsigned int seg; for (seg = 0 ; seg < nsegments ; seg++) { if ( (elf_pht_ptr[seg].p_type == PT_LOAD) && // loadable (elf_pht_ptr[seg].p_flags & PF_W) && // writable (elf_pht_ptr[seg].p_vaddr == vbase) ) // matching { // Get segment offset and size in .elf file unsigned int seg_offset = elf_pht_ptr[seg].p_offset; unsigned int seg_size = elf_pht_ptr[seg].p_filesz; // compute destination address and extension for _fat_read() unsigned int dest = (unsigned int)pbase; unsigned int extend = (unsigned int)(pbase>>32) | 0xFFFF0000; // load the segment if ( _fat_lseek( fd, seg_offset, SEEK_SET ) < 0 ) { _fat_close( fd ); return 1; } if ( _fat_read( fd, dest, seg_size, extend ) < 0 ) { _fat_close( fd ); return 1; } } } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] loaded segment %x\n", _get_proctime() , x , y , p , vbase ); #endif } } // end loop on writable & loadable segments // close .elf file _fat_close( fd ); return 0; } // end load_writable_segments() /////////////////////////////////////// int _sys_exec_application( char* name ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); mapping_thread_t * thread = _get_thread_base(header); mapping_vseg_t * vseg = _get_vseg_base(header); unsigned int vspace_id; unsigned int thread_id; #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_exec_application() at cycle %d\n" "P[%d,%d,%d] enters for vspace %s\n", _get_proctime() , x, y, p, name ); #endif unsigned int y_size = header->y_size; // scan vspaces to find matching vspace name for (vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++) { if ( _strcmp( vspace[vspace_id].name, name ) == 0 ) // vspace found { #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_exec_application() at cycle %d\n" "P[%d,%d,%d] found vspace %s\n", _get_proctime() , x, y, p, name ); #endif // reload writable segments if ( _load_writable_segments( &vspace[vspace_id] ) ) { _printf("[GIET ERROR] _sys_exec_application() : " "can't load data segment for vspace %s\n", name ); return SYSCALL_CANNOT_LOAD_DATA_SEGMENT; } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_exec_application() at cycle %d\n" "P[%d,%d,%d] loaded all writable segments for vspace %s\n", _get_proctime() , x, y, p, name ); #endif // scan threads in vspace with three goals : // - check all threads desactivated // - re-initialise all threads contexts // - find main thread unsigned int main_found = 0; unsigned int main_ltid = 0; static_scheduler_t* main_psched = NULL; unsigned int min = vspace[vspace_id].thread_offset; unsigned int max = min + vspace[vspace_id].threads; for ( thread_id = min ; thread_id < max ; thread_id++ ) { // get thread identifiers : [x,y,p,ltid] unsigned int cid = thread[thread_id].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[thread_id].proclocid; unsigned int ltid = thread[thread_id].ltid; unsigned int vsid = thread[thread_id].stack_vseg_id; // get scheduler pointer static_scheduler_t* psched = _schedulers[x][y][p]; // check thread non active if ( psched->context[ltid].slot[CTX_NORUN_ID] == 0 ) // runnable !!! { _printf("\n[GIET ERROR] in _sys_exec_application() : " "thread %s already active in vspace %s\n", thread[thread_id].name, name ); return SYSCALL_THREAD_ALREADY_ACTIVE; } // initialise thread context unsigned int ctx_epc = psched->context[ltid].slot[CTX_ENTRY_ID]; unsigned int ctx_sp = vseg[vsid].vbase + vseg[vsid].length; unsigned int ctx_ra = (unsigned int)&_ctx_eret; unsigned int ctx_sr = GIET_SR_INIT_VALUE; psched->context[ltid].slot[CTX_EPC_ID] = ctx_epc; psched->context[ltid].slot[CTX_RA_ID] = ctx_ra; psched->context[ltid].slot[CTX_SR_ID] = ctx_sr; psched->context[ltid].slot[CTX_SP_ID] = ctx_sp; // register information required to activate main thread // actual activation done when threads initialisation is completed if ( thread[thread_id].is_main ) { main_psched = psched; main_ltid = ltid; main_found = 1; } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_exec_application() at cycle %d\n" "P[%d,%d,%d] initialise thread %s in vspace %s\n", _get_proctime() , x, y, p, thread[thread_id].name , name ); #endif } // end loop on threads // activate main thread if ( main_found ) { main_psched->context[main_ltid].slot[CTX_NORUN_ID] = 0; } else { _printf("\n[GIET ERROR] in _sys_exec_application() : " "main not found in vspace %s\n", name ); return SYSCALL_MAIN_NOT_FOUND; } _printf("\n[GIET WARNING] application %s launched : %d threads\n", name , max-min ); return SYSCALL_OK; } } // end of loop on vspaces // vspace not found _printf("\n[GIET ERROR] in _sys_exec_application() : " "vspace %s not found\n", name ); return SYSCALL_VSPACE_NOT_FOUND; } // end _sys_exec_application() /////////////////////////////////////// int _sys_kill_application( char* name ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); mapping_thread_t * thread = _get_thread_base(header); unsigned int vspace_id; unsigned int thread_id; #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_kill_application() at cycle %d\n" "P[%d,%d,%d] enters for vspace %s\n", _get_proctime() , x , y , p , name ); #endif // shell cannot be killed if ( _strcmp( name , "shell" ) == 0 ) { _printf("\n[GIET ERROR] in _sys_kill_application() : " "%s application cannot be killed\n", name ); return SYSCALL_APPLI_CANNOT_BE_KILLED; } // scan vspaces to find matching vspace name for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { if ( _strcmp( vspace[vspace_id].name, name ) == 0 ) { // scan threads to send KILL signal to all threads in vspace unsigned int y_size = header->y_size; unsigned int min = vspace[vspace_id].thread_offset; unsigned int max = min + vspace[vspace_id].threads; for ( thread_id = min ; thread_id < max ; thread_id++ ) { unsigned int cid = thread[thread_id].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[thread_id].proclocid; unsigned int ltid = thread[thread_id].ltid; // get scheduler pointer for processor running the thread static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; // set KILL signal bit _atomic_or( &psched->context[ltid].slot[CTX_SIGS_ID] , SIGS_MASK_KILL ); } _printf("\n[GIET WARNING] application %s killed / %d threads\n", name , max-min ); return SYSCALL_OK; } } // en loop on vspaces _printf("\n[GIET ERROR] in _sys_kill_application() : " "application %s not found\n", name ); return SYSCALL_VSPACE_NOT_FOUND; } // end _sys_kill_application() ////////////////////////////////////////// int _sys_applications_status( char* name ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_thread_t * thread = _get_thread_base(header); mapping_vspace_t * vspace = _get_vspace_base(header); mapping_cluster_t * cluster = _get_cluster_base(header); unsigned int thread_id; // thread index in mapping unsigned int vspace_id; // vspace index in mapping // scan vspaces for( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { if ( (name == NULL) || (_strcmp(vspace[vspace_id].name , name ) == 0) ) { _user_printf("\n*** vspace %s\n", vspace[vspace_id].name ); // scan all threads in vspace unsigned int min = vspace[vspace_id].thread_offset ; unsigned int max = min + vspace[vspace_id].threads ; for ( thread_id = min ; thread_id < max ; thread_id++ ) { unsigned int clusterid = thread[thread_id].clusterid; unsigned int p = thread[thread_id].proclocid; unsigned int x = cluster[clusterid].x; unsigned int y = cluster[clusterid].y; unsigned int ltid = thread[thread_id].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; unsigned int norun = psched->context[ltid].slot[CTX_NORUN_ID]; unsigned int tty = psched->context[ltid].slot[CTX_TTY_ID]; unsigned int current = psched->current; if ( current == ltid ) _user_printf(" - thread %s / P[%d,%d,%d] / ltid = %d / " "TTY = %d / norun = %x : running\n", thread[thread_id].name, x, y, p, ltid, tty, norun ); else if ( norun == 0 ) _user_printf(" - thread %s / P[%d,%d,%d] / ltid = %d / " "TTY = %d / norun = %x : runable\n", thread[thread_id].name, x, y, p, ltid, tty, norun); else _user_printf(" - thread %s / P[%d,%d,%d] / ltid = %d / " "TTY = %d / norun = %x : blocked\n", thread[thread_id].name, x, y, p, ltid, tty, norun); } } } _user_printf("\n"); return SYSCALL_OK; } // end _sys_applications_status() ///////////////////////////////////////////////////////////////////////////// // Threads related syscall handlers ///////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////// int _sys_pthread_create( unsigned int* buffer, void* attr, void* function, void* arg ) { // attr argument not supported if ( attr != NULL ) { _printf("\n[GIET ERROR] in _sys_pthread_create() : " "attr argument not supported\n" ); return SYSCALL_PTHREAD_ARGUMENT_NOT_SUPPORTED; } // get pointers in mapping mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_thread_t* thread = _get_thread_base(header); mapping_vspace_t* vspace = _get_vspace_base(header); mapping_cluster_t* cluster = _get_cluster_base(header); // get scheduler for processor running the calling thread static_scheduler_t* psched = (static_scheduler_t*)_get_sched(); // get calling thread local index in scheduler unsigned int current = psched->current; // get vspace index unsigned int vspace_id = psched->context[current].slot[CTX_VSID_ID]; #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_create() at cycle %d\n" "P[%d,%d,%d] enters for vspace %s / entry = %x\n", _get_proctime() , x , y , p , vspace[vspace_id].name , (unsigned int)function ); #endif unsigned int thread_id; // searched thread : local index in mapping unsigned int clusterid; // searched thread : cluster index unsigned int lpid; // searched thread : processor local index unsigned int ltid; // searched thread : scheduler thread index unsigned int cx; // searched thread : X coordinate for searched thread unsigned int cy; // searched thread : Y coordinate for searched thread unsigned int entry; // searched thread : entry point unsigned int norun; // searched thread : norun vector unsigned int trdid; // searched thread : thread identifier // scan threads in vspace to find an inactive thread matching function unsigned int min = vspace[vspace_id].thread_offset; unsigned int max = min + vspace[vspace_id].threads; unsigned int found = 0; for ( thread_id = min ; (thread_id < max) && (found == 0) ; thread_id++ ) { // get thread coordinates [cx,cy,lpid] and ltid from mapping ltid = thread[thread_id].ltid; clusterid = thread[thread_id].clusterid; lpid = thread[thread_id].proclocid; cx = cluster[clusterid].x; cy = cluster[clusterid].y; // get thread scheduler pointer psched = _schedulers[cx][cy][lpid]; // get thread entry-point, norun-vector, and trdid from context entry = psched->context[ltid].slot[CTX_ENTRY_ID]; norun = psched->context[ltid].slot[CTX_NORUN_ID]; trdid = psched->context[ltid].slot[CTX_TRDID_ID]; // check matching if ( ((unsigned int)function == entry ) && (norun & NORUN_MASK_THREAD) ) found = 1; } // end loop on threads if ( found ) // one matching inactive thread has been found { // set argument value in thread context if ( arg != NULL ) psched->context[ltid].slot[CTX_A0_ID] = (unsigned int)arg; // activate thread psched->context[ltid].slot[CTX_NORUN_ID] = 0; // return launched thead global identifier *buffer = trdid; #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_create() at cycle %d\n" "P[%d,%d,%d] exit : thread %x launched in vspace %s\n", _get_proctime() , x , y , p , trdid , vspace[vspace_id].name ); #endif return SYSCALL_OK; } else // no matching thread found { _printf("\n[GIET ERROR] in _sys_pthread_create() : " "no matching thread for entry = %x in vspace %s\n", (unsigned int)function , vspace[vspace_id].name ); return SYSCALL_THREAD_NOT_FOUND; } } // end _sys_pthread_create() /////////////////////////////////////////// int _sys_pthread_join( unsigned int trdid, void* ptr ) { // ptr argument not supported if ( ptr != NULL ) { _printf("\n[GIET ERROR] in _sys_pthread_join() : " "ptr argument not supported, must be NULL\n" ); return SYSCALL_PTHREAD_ARGUMENT_NOT_SUPPORTED; } // get calling thread vspace unsigned int caller_vspace = _get_context_slot( CTX_VSID_ID ); #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_join() at cycle %d\n" "P[%d,%d,%d] enters for thread %x in vspace %d\n", _get_proctime() , x , y , p , trdid , caller_vspace ); #endif // get target thread indexes from trdid unsigned int cx = (trdid>>24) & 0xFF; unsigned int cy = (trdid>>16) & 0xFF; unsigned int lpid = (trdid>>8 ) & 0xFF; unsigned int ltid = (trdid ) & 0xFF; // get target thread scheduler, vspace and registered trdid static_scheduler_t* psched = _schedulers[cx][cy][lpid]; unsigned int target_vspace = psched->context[ltid].slot[CTX_VSID_ID]; unsigned int registered_trdid = psched->context[ltid].slot[CTX_TRDID_ID]; // check trdid if ( trdid != registered_trdid ) { _printf("\nerror in _sys_pthread_join() : " "trdid = %x / registered_trdid = %x\n", trdid , registered_trdid ); return SYSCALL_UNCOHERENT_THREAD_CONTEXT; } // check calling thread and target thread in same vspace if ( caller_vspace != target_vspace ) { _printf("\n[GIET ERROR] in _sys_pthread_join() : " " calling thread and target thread not in same vspace\n"); return SYSCALL_NOT_IN_SAME_VSPACE; } // get target thread state unsigned int* pnorun = &psched->context[ltid].slot[CTX_NORUN_ID]; asm volatile ( "2000: \n" "move $11, %0 \n" /* $11 <= pnorun */ "lw $11, 0($11) \n" /* $11 <= norun */ "andi $11, $11, 1 \n" /* $11 <= norun & 0x1 */ "beqz $11, 2000b \n" : : "r" (pnorun) : "$11" ); #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_join() at cycle %d\n" "P[%d,%d,%d] exit for thread %x in vspace %d\n", _get_proctime() , x , y , p , trdid , caller_vspace ); #endif return SYSCALL_OK; } // end _sys_pthread_join() //////////////////////////////////////// int _sys_pthread_kill( pthread_t trdid, int signal ) { // get calling thread vspace unsigned int caller_vspace = _get_context_slot( CTX_VSID_ID ); #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_kill() at cycle %d\n" "P[%d,%d,%d] enters for thread %x in vspace %d\n", _get_proctime() , x , y , p , trdid , caller_vspace ); #endif // get and check target thread indexes from trdid unsigned int cx = (trdid>>24) & 0xFF; unsigned int cy = (trdid>>16) & 0xFF; unsigned int lpid = (trdid>>8 ) & 0xFF; unsigned int ltid = (trdid ) & 0xFF; // get target thread scheduler, vspace and registered trdid static_scheduler_t* psched = _schedulers[cx][cy][lpid]; unsigned int target_vspace = psched->context[ltid].slot[CTX_VSID_ID]; unsigned int registered_trdid = psched->context[ltid].slot[CTX_TRDID_ID]; // check trdid if ( trdid != registered_trdid ) { _printf("\n[GIET ERROR] in _sys_pthread_kill() : trdid = %x" " / registered_trdid = %x\n", trdid , registered_trdid ); return SYSCALL_UNCOHERENT_THREAD_CONTEXT; } // check calling thread and target thread in same vspace if ( caller_vspace != target_vspace ) { _printf("\n[GIET ERROR] in _sys_pthread_kill() : not in same vspace\n"); return SYSCALL_NOT_IN_SAME_VSPACE; } // register KILL signal in target thread context if required if ( signal ) { _atomic_or( &psched->context[ltid].slot[CTX_SIGS_ID] , SIGS_MASK_KILL ); } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_kill() at cycle %d\n" "P[%d,%d,%d] exit for thread %x in vspace %d\n", _get_proctime() , x , y , p , trdid , caller_vspace ); #endif return SYSCALL_OK; } // end _sys_pthread_kill() ///////////////////////////////////// int _sys_pthread_exit( void* string ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); unsigned int ltid = _get_context_slot(CTX_LTID_ID); unsigned int trdid = _get_context_slot(CTX_TRDID_ID); unsigned int vsid = _get_context_slot(CTX_VSID_ID); // print exit message if ( string == NULL ) { _printf("\n[GIET WARNING] Exit thread %x in application %s\n", trdid , vspace[vsid].name ); } else { _printf("\n[GIET WARNING] Exit thread %x in vspace %s\n" " Cause : %s\n\n", trdid , vspace[vsid].name , (char*) string ); } // get scheduler pointer for calling thread static_scheduler_t* psched = (static_scheduler_t*)_get_sched(); // register KILL signal in calling thread context (suicid request) _atomic_or( &psched->context[ltid].slot[CTX_SIGS_ID] , SIGS_MASK_KILL ); // deschedule calling thread unsigned int save_sr; _it_disable( &save_sr ); _ctx_switch(); return SYSCALL_OK; } // end _sys_pthread_exit() //////////////////////// int _sys_pthread_yield() { unsigned int save_sr; _it_disable( &save_sr ); _ctx_switch(); _it_restore( &save_sr ); return SYSCALL_OK; } ////////////////////////////////////////////////// int _sys_pthread_control( unsigned int command, char* vspace_name, char* thread_name ) { #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_control() at cycle %d\n" "P[%d,%d,%d] enter for vspace %s / thread %s / command = %d\n", _get_proctime() , x , y , p , vspace_name, thread_name, command ); #endif // get pointers in mapping mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_thread_t* thread = _get_thread_base(header); mapping_vspace_t* vspace = _get_vspace_base(header); mapping_cluster_t* cluster = _get_cluster_base(header); unsigned int found; // search vspace name to get vspace index: vsid found = 0; unsigned int vsid; for( vsid = 0 ; vsid < header->vspaces ; vsid++ ) { if ( _strcmp( vspace[vsid].name, vspace_name ) == 0 ) { found = 1; break; } } if ( found == 0 ) return SYSCALL_VSPACE_NOT_FOUND; // search thread name in vspace to get thread index: tid found = 0; unsigned int tid; unsigned int min = vspace[vsid].thread_offset; unsigned int max = min + vspace[vsid].threads; for( tid = min ; tid < max ; tid++ ) { if ( _strcmp( thread[tid].name, thread_name ) == 0 ) { found = 1; break; } } if ( found == 0 ) return SYSCALL_THREAD_NOT_FOUND; // get target thread coordinates unsigned int cid = thread[tid].clusterid; unsigned int cx = cluster[cid].x; unsigned int cy = cluster[cid].y; unsigned int cp = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; // get target thread scheduler static_scheduler_t* psched = _schedulers[cx][cy][cp]; // check trdid and vsid unsigned int trdid = cx<<24 | cy<<16 | cp<<8 | ltid; if ( (psched->context[ltid].slot[CTX_TRDID_ID] != trdid) || (psched->context[ltid].slot[CTX_VSID_ID] != vsid) ) { return SYSCALL_UNCOHERENT_THREAD_CONTEXT; } // execute command if ( command == THREAD_CMD_PAUSE ) { _atomic_or ( &psched->context[ltid].slot[CTX_NORUN_ID], NORUN_MASK_THREAD ); return SYSCALL_OK; } else if ( command == THREAD_CMD_RESUME ) { _atomic_and( &psched->context[ltid].slot[CTX_NORUN_ID], ~NORUN_MASK_THREAD ); return SYSCALL_OK; } else if ( command == THREAD_CMD_CONTEXT ) { _user_printf( " - CTX_TRDID = %x\n" " - CTX_VSID = %x\n" " - CTX_EPC = %x\n" " - CTX_PTAB = %x\n" " - CTX_PTPR = %x\n" " - CTX_SR = %x\n" " - CTX_RA = %x\n" " - CTX_SP = %x\n" " - CTX_ENTRY = %x\n" " - CTX_NORUN = %x\n" " - CTX_SIGS = %x\n" " - CTX_LOCKS = %x\n" " - CTX_TTY = %x\n" " - CTX_NIC_RX = %x\n" " - CTX_NIC_TX = %x\n" " - CTX_CMA_RX = %x\n" " - CTX_CMA_TX = %x\n" " - CTX_CMA_FB = %x\n", psched->context[ltid].slot[CTX_TRDID_ID], psched->context[ltid].slot[CTX_VSID_ID], psched->context[ltid].slot[CTX_EPC_ID], psched->context[ltid].slot[CTX_PTAB_ID], psched->context[ltid].slot[CTX_PTPR_ID], psched->context[ltid].slot[CTX_SR_ID], psched->context[ltid].slot[CTX_RA_ID], psched->context[ltid].slot[CTX_SP_ID], psched->context[ltid].slot[CTX_ENTRY_ID], psched->context[ltid].slot[CTX_NORUN_ID], psched->context[ltid].slot[CTX_SIGS_ID], psched->context[ltid].slot[CTX_LOCKS_ID], psched->context[ltid].slot[CTX_TTY_ID], psched->context[ltid].slot[CTX_NIC_RX_ID], psched->context[ltid].slot[CTX_NIC_TX_ID], psched->context[ltid].slot[CTX_CMA_RX_ID], psched->context[ltid].slot[CTX_CMA_TX_ID], psched->context[ltid].slot[CTX_CMA_FB_ID] ); return SYSCALL_OK; } else { return SYSCALL_ILLEGAL_THREAD_COMMAND_TYPE; } } // end _sys_pthread_control() ////////////////////////////////////////////////////////////////////////////// // Coprocessors related syscall handlers ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////// int _sys_coproc_alloc( unsigned int coproc_type, unsigned int* coproc_info ) { // In this implementation, the allocation policy is constrained: // the coprocessor must be in the same cluster as the calling thread, // and there is at most one coprocessor per cluster mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_cluster_t * cluster = _get_cluster_base(header); mapping_periph_t * periph = _get_periph_base(header); // get cluster coordinates and cluster global index unsigned int procid = _get_procid(); unsigned int x = procid >> (Y_WIDTH + P_WIDTH); unsigned int y = (procid >> P_WIDTH) & ((1<arg0 & 0xFF) | (found->arg1 & 0xFF)<<8 | (found->arg2 & 0xFF)<<16 | (found->arg3 & 0xFF)<<24 ; // returns coprocessor info *coproc_info = _coproc_info[cluster_id]; // register coprocessor coordinates in thread context unsigned int cluster_xy = (x< 0xFF ) { _printf("\n[GIET_ERROR] in _sys_coproc_release(): " "no coprocessor allocated to thread %x\n", trdid ); return SYSCALL_COPROCESSOR_NON_ALLOCATED; } unsigned int cx = cluster_xy >> Y_WIDTH; unsigned int cy = cluster_xy & ((1<>8) & 0xFF; unsigned int channel; // stops coprocessor and communication channels _mwr_set_coproc_register( cluster_xy , coproc_reg_index , 0 ); for ( channel = 0 ; channel < (nb_from + nb_to) ; channel++ ) { _mwr_set_channel_register( cluster_xy , channel , MWR_CHANNEL_RUNNING , 0 ); } // deallocates coprocessor coordinates in thread context _set_context_slot( CTX_COPROC_ID , 0xFFFFFFFF ); // release coprocessor lock _simple_lock_release( &_coproc_lock[cluster_id] ); #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_release() at cycle %d\n" "thread %x release coprocessor in cluster[%d,%d]\n", cx, cy ); #endif return SYSCALL_OK; } // end _sys_coproc_release() ////////////////////////////////////////////////////////////// int _sys_coproc_channel_init( unsigned int channel, giet_coproc_channel_t* desc ) { // get thread trdid unsigned int trdid = _get_thread_trdid(); // get coprocessor coordinates unsigned int cluster_xy = _get_context_slot( CTX_COPROC_ID ); if ( cluster_xy > 0xFF ) { _printf("\n[GIET_ERROR] in _sys_coproc_channel_init(): " "no coprocessor allocated to thread %x\n", trdid ); return SYSCALL_COPROCESSOR_NON_ALLOCATED; } // check channel mode unsigned mode = desc->channel_mode; if ( (mode != MODE_MWMR) && (mode != MODE_DMA_IRQ) && (mode != MODE_DMA_NO_IRQ) ) { _printf("\n[GIET_ERROR] in _sys_coproc_channel_init(): " "illegal mode\n"); return SYSCALL_COPROCESSOR_ILLEGAL_MODE; } // get memory buffer size unsigned int size = desc->buffer_size; // physical addresses unsigned long long buffer_paddr; unsigned int buffer_lsb; unsigned int buffer_msb; unsigned long long mwmr_paddr = 0; unsigned int mwmr_lsb; unsigned int mwmr_msb; unsigned long long lock_paddr = 0; unsigned int lock_lsb; unsigned int lock_msb; unsigned int flags; // unused // compute memory buffer physical address buffer_paddr = _v2p_translate( desc->buffer_vaddr , &flags ); buffer_lsb = (unsigned int)buffer_paddr; buffer_msb = (unsigned int)(buffer_paddr>>32); // call MWMR_DMA driver _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_MODE, mode ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_SIZE, size ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_BUFFER_LSB, buffer_lsb ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_BUFFER_MSB, buffer_msb ); if ( mode == MODE_MWMR ) { // compute MWMR descriptor physical address mwmr_paddr = _v2p_translate( desc->mwmr_vaddr , &flags ); mwmr_lsb = (unsigned int)mwmr_paddr; mwmr_msb = (unsigned int)(mwmr_paddr>>32); // call MWMR_DMA driver _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_MWMR_LSB, mwmr_lsb ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_MWMR_MSB, mwmr_msb ); // compute lock physical address lock_paddr = _v2p_translate( desc->lock_vaddr , &flags ); lock_lsb = (unsigned int)lock_paddr; lock_msb = (unsigned int)(lock_paddr>>32); // call MWMR_DMA driver _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_LOCK_LSB, lock_lsb ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_LOCK_MSB, lock_msb ); } #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_channel_init() at cycle %d\n" "cluster[%d,%d] / channel = %d / mode = %d / buffer_size = %d\n" " buffer_paddr = %l / mwmr_paddr = %l / lock_paddr = %l\n", _get_proctime() , x , y , channel , mode , size , buffer_paddr, mwmr_paddr, lock_paddr ); #endif return SYSCALL_OK; } // end _sys_coproc_channel_init() //////////////////////////////////////////////////// int _sys_coproc_run( unsigned int coproc_reg_index ) { // get thread trdid unsigned int trdid = _get_thread_trdid(); // get coprocessor coordinates unsigned int cluster_xy = _get_context_slot( CTX_COPROC_ID ); if ( cluster_xy > 0xFF ) { _printf("\n[GIET_ERROR] in _sys_coproc_run(): " "no coprocessor allocated to thread %d\n", trdid ); return SYSCALL_COPROCESSOR_NON_ALLOCATED; } unsigned int cx = cluster_xy >> Y_WIDTH; unsigned int cy = cluster_xy & ((1<>8) & 0xFF; unsigned int mode = 0xFFFFFFFF; unsigned int channel; // register coprocessor running mode for ( channel = 0 ; channel < (nb_from + nb_to) ; channel++ ) { unsigned int temp; temp = _mwr_get_channel_register( cluster_xy , channel , MWR_CHANNEL_MODE ); if ( mode == 0xFFFFFFFF ) { mode = temp; } else if ( temp != mode ) { _printf("\n[GIET_ERROR] in _sys_coproc_run(): " "channels don't have same mode in coprocessor[%d,%d]\n", cx , cy ); return SYSCALL_COPROCESSOR_ILLEGAL_MODE; } } _coproc_mode[cluster_id] = mode; // start all communication channels for ( channel = 0 ; channel < (nb_from + nb_to) ; channel++ ) { _mwr_set_channel_register( cluster_xy , channel , MWR_CHANNEL_RUNNING , 1 ); } ////////////////////////////////////////////////////////////////////////// if ( (mode == MODE_MWMR) || (mode == MODE_DMA_NO_IRQ) ) // no descheduling { // start coprocessor _mwr_set_coproc_register( cluster_xy , coproc_reg_index , 1 ); #if GIET_DEBUG_COPROC if ( mode == MODE_MWMR ) _printf("\n[DEBUG COPROC] _sys_coproc_run() at cycle %d\n" "thread %x starts coprocessor[%d,%d] in MODE_MWMR\n", _get_proctime() , trdid , cx , cy ); else _printf("\n[DEBUG COPROC] _sys_coproc_run() at cycle %d\n" "thread %x starts coprocessor[%d,%d] in MODE_DMA_NO_IRQ\n", _get_proctime() , trdid , cx , cy ); #endif return SYSCALL_OK; } /////////////////////////////////////////////////////////////////////////// else // mode == MODE_DMA_IRQ => descheduling { // set _coproc_trdid unsigned int gpid = _get_procid(); unsigned int x = gpid >> (Y_WIDTH+P_WIDTH); unsigned int y = (gpid >> P_WIDTH) & ((1<context[ltid].slot[CTX_NORUN_ID]; _atomic_or( ptr , NORUN_MASK_COPROC ); // start coprocessor _mwr_set_coproc_register( cluster_xy , coproc_reg_index , 1 ); #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_run() at cycle %d\n" "thread %x starts coprocessor[%d,%d] in MODE_DMA_IRQ\n", _get_proctime() , trdid , cx , cy ); #endif // deschedule thread _ctx_switch(); #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_run() at cycle %d\n" "thread %x resume after coprocessor[%d,%d] completion\n", _get_proctime() , trdid , cx , cy ); #endif // restore SR _it_restore( &save_sr ); // return error computed by mwr_isr() return _coproc_error[cluster_id]; } } // end _sys_coproc_run() /////////////////////////// int _sys_coproc_completed() { // get thread trdid unsigned int trdid = _get_thread_trdid(); // get coprocessor coordinates unsigned int cluster_xy = _get_context_slot( CTX_COPROC_ID ); if ( cluster_xy > 0xFF ) { _printf("\n[GIET_ERROR] in _sys_coproc_completed(): " "no coprocessor allocated to thread %x\n", trdid ); return SYSCALL_COPROCESSOR_NON_ALLOCATED; } unsigned int cx = cluster_xy >> Y_WIDTH; unsigned int cy = cluster_xy & ((1<>8) & 0xFF; unsigned int error = 0; unsigned int channel; unsigned int status; // get status for all channels, and signal reported errors for ( channel = 0 ; channel < (nb_to +nb_from) ; channel++ ) { do { status = _mwr_get_channel_register( cluster_xy, channel, MWR_CHANNEL_STATUS ); if ( status == MWR_CHANNEL_ERROR_DATA ) { _printf("\n[GIET_ERROR] in _sys_coproc_completed(): " "channel %d / DATA_ERROR\n", channel ); error = 1; } else if ( status == MWR_CHANNEL_ERROR_LOCK ) { _printf("\n[GIET_ERROR] in _sys_coproc_completed()" " / channel %d / LOCK_ERROR\n", channel ); error = 1; } else if ( status == MWR_CHANNEL_ERROR_DESC ) { _printf("\n[GIET_ERROR] in _sys_coproc_completed()" " / channel %d / DESC_ERROR\n", channel ); error = 1; } } while ( status == MWR_CHANNEL_BUSY ); // reset channel _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_RUNNING , 0 ); } // end for channels if ( error ) { return SYSCALL_COPROCESSOR_ILLEGAL_MODE; } else { #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_completed() at cycle %d\n" "coprocessor[%d,%d] successfully completes operation for thread %d\n", cx , cy , trdid ); #endif return SYSCALL_OK; } } else // mode == MODE_MWMR or MODE_DMA_IRQ { _printf("\n[GIET ERROR] in sys_coproc_completed(): " "coprocessor[%d,%d] is not running in MODE_DMA_NO_IRQ\n", cx , cy ); return SYSCALL_COPROCESSOR_ILLEGAL_MODE; } } // end _sys_coproc_completed() ////////////////////////////////////////////////////////////////////////////// // TTY related syscall handlers ////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////// int _sys_tty_alloc( unsigned int shared ) { unsigned int channel; // allocated TTY channel // get trdid and vsid for the calling thread unsigned int vsid = _get_context_slot( CTX_VSID_ID ); unsigned int trdid = _get_thread_trdid(); // check no TTY already allocated to calling thread if ( _get_context_slot( CTX_TTY_ID ) < NB_TTY_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tty_alloc() : " "TTY channel already allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t *vspace = _get_vspace_base(header); mapping_thread_t *thread = _get_thread_base(header); // compute number of users unsigned int users; if ( shared ) users = vspace[vsid].threads; else users = 1; // get a TTY channel for ( channel = 0 ; channel < NB_TTY_CHANNELS ; channel++ ) { unsigned int* palloc = &_tty_channel_alloc[channel]; if ( _atomic_test_and_set( palloc , users ) == 0 ) break; } if ( channel >= NB_TTY_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tty_alloc() : " "no TTY channel available for thread %x\n", trdid ); return SYSCALL_NO_CHANNEL_AVAILABLE; } // initialise allocated TTY channel _tty_init( channel ); // allocate a WTI mailbox to the calling proc if external IRQ unsigned int wti_id; if ( USE_PIC ) _ext_irq_alloc( ISR_TTY_RX , channel , &wti_id ); // register wti_id and coordinates for processor receiving WTI unsigned int procid = _get_procid(); unsigned int x = procid >> (Y_WIDTH + P_WIDTH); unsigned int y = (procid >> P_WIDTH) & ((1<y_size; unsigned int cid = thread[tid].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; psched->context[ltid].slot[CTX_TTY_ID] = channel; } } else // for calling thread only { _set_context_slot( CTX_TTY_ID, channel ); } return SYSCALL_OK; } // end _sys_tty_alloc() ////////////////////// int _sys_tty_release() // NOTE: not a syscall: used by _ctx_kill_thread() { unsigned int channel = _get_context_slot( CTX_TTY_ID ); if ( channel == -1 ) { unsigned int trdid = _get_thread_trdid(); _printf("\n[GIET_ERROR] in _sys_tty_release() : " "TTY channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } // reset CTX_TTY_ID for the calling thread _set_context_slot( CTX_TTY_ID , 0xFFFFFFFF ); // atomically decrement the _tty_channel_allocator[] array _atomic_increment( &_tty_channel_alloc[channel] , -1 ); // release WTI mailbox if TTY channel no more used if ( USE_PIC && (_tty_channel_alloc[channel] == 0) ) { _ext_irq_release( ISR_TTY_RX , channel ); } return SYSCALL_OK; } // end sys_tty_release() //////////////////////////////////////// int _sys_tty_write( const char* buffer, unsigned int length, // number of characters unsigned int channel) // channel index { unsigned int nwritten; // compute and check tty channel if( channel == 0xFFFFFFFF ) channel = _get_context_slot(CTX_TTY_ID); if( channel >= NB_TTY_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tty_write() : " "no TTY channel allocated for thread %x\n", _get_thread_trdid() ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // write string to TTY channel for (nwritten = 0; nwritten < length; nwritten++) { // check tty's status if ( _tty_get_register( channel, TTY_STATUS ) & 0x2 ) break; // write one byte if (buffer[nwritten] == '\n') { _tty_set_register( channel, TTY_WRITE, (unsigned int)'\r' ); } _tty_set_register( channel, TTY_WRITE, (unsigned int)buffer[nwritten] ); } return nwritten; } /////////////////////////////////////// int _sys_tty_read( char* buffer, unsigned int length, // unused unsigned int channel) // channel index { // compute and check tty channel if( channel == 0xFFFFFFFF ) channel = _get_context_slot(CTX_TTY_ID); if( channel >= NB_TTY_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tty_read() : " "no TTY channel allocated for thread %x\n", _get_thread_trdid() ); return SYSCALL_CHANNEL_NON_ALLOCATED; } unsigned int save_sr; unsigned int found = 0; // get pointer on TTY_RX FIFO tty_fifo_t* fifo = &_tty_rx_fifo[channel]; // try to read one character from FIFO // blocked in while loop until success while ( found == 0 ) { if ( fifo->sts == 0) // FIFO empty => deschedule { // enters critical section _it_disable( &save_sr ); // set NORUN_MASK_TTY bit for calling thread static_scheduler_t* psched = (static_scheduler_t*)_get_sched(); unsigned int ltid = psched->current; _atomic_or( &psched->context[ltid].slot[CTX_NORUN_ID] , NORUN_MASK_TTY ); // register descheduling thread trdid fifo->trdid = _get_thread_trdid(); // deschedule calling thread _ctx_switch(); // exit critical section _it_restore( &save_sr ); } else // FIFO not empty => get one character { *buffer = fifo->data[fifo->ptr]; fifo->sts = fifo->sts - 1; fifo->ptr = (fifo->ptr + 1) % TTY_FIFO_DEPTH; found = 1; } } return 1; } ////////////////////////////////////////////////////////////////////////////// // TIMER related syscall handlers ////////////////////////////////////////////////////////////////////////////// //////////////////// int _sys_tim_alloc() { #if NB_TIM_CHANNELS unsigned int channel; // allocated TIMER channel unsigned int trdid = _get_thread_trdid(); // check no TIMER already allocated to calling thread if ( _get_context_slot( CTX_TIM_ID ) < NB_TIM_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tim_alloc() : " "TIMER channel already allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } // get a TIMER channel for ( channel = 0 ; channel < NB_TIM_CHANNELS ; channel++ ) { unsigned int* palloc = &_tim_channel_alloc[channel]; if ( _atomic_test_and_set( palloc , 1 ) == 0 ) break; } if ( channel >= NB_TIM_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tim_alloc() : " "no TIMER channel available for thread %x\n", trdid ); return SYSCALL_NO_CHANNEL_AVAILABLE; } // allocate a WTI mailbox to the calling proc if external IRQ unsigned int wti_id; if ( USE_PIC ) _ext_irq_alloc( ISR_TIMER , channel , &wti_id ); // register wti_id and coordinates for processor receiving WTI unsigned int procid = _get_procid(); unsigned int x = procid >> (Y_WIDTH + P_WIDTH); unsigned int y = (procid >> P_WIDTH) & ((1<= NB_TIM_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tim_start(): not enough TIM channels\n"); return SYSCALL_NO_CHANNEL_AVAILABLE; } // start timer _timer_start( channel, period ); return SYSCALL_OK; #else _printf("\n[GIET ERROR] in _sys_tim_start() : NB_TIM_CHANNELS = 0\n"); return SYSCALL_NO_CHANNEL_AVAILABLE; #endif } /////////////////// int _sys_tim_stop() { #if NB_TIM_CHANNELS // get timer index unsigned int channel = _get_context_slot( CTX_TIM_ID ); if ( channel >= NB_TIM_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tim_stop() : illegal timer index\n"); return SYSCALL_CHANNEL_NON_ALLOCATED; } // stop timer _timer_stop( channel ); return SYSCALL_OK; #else _printf("\n[GIET ERROR] in _sys_tim_stop() : NB_TIM_CHANNELS = 0\n"); return SYSCALL_NO_CHANNEL_AVAILABLE; #endif } ////////////////////////////////////////////////////////////////////////////// // NIC related syscall handlers ////////////////////////////////////////////////////////////////////////////// #define NIC_CONTAINER_SIZE 4096 #if NB_NIC_CHANNELS //////////////////////////////////////// int _sys_nic_alloc( unsigned int is_rx, unsigned int xmax, unsigned int ymax ) { mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t *vspace = _get_vspace_base(header); mapping_thread_t *thread = _get_thread_base(header); // get calling thread trdid, vspace index, and number of threads unsigned int trdid = _get_thread_trdid(); unsigned int vsid = _get_context_slot( CTX_VSID_ID ); unsigned int users = vspace[vsid].threads; // check xmax / ymax parameters if ( (xmax > X_SIZE) || (ymax > Y_SIZE) ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() " "xmax or ymax argument too large for thread %x\n", trdid ); return SYSCALL_ILLEGAL_XY_ARGUMENTS; } //////////////////////////////////////////////////////// // Step 1: get and register CMA and NIC channel index // //////////////////////////////////////////////////////// unsigned int nic_channel; unsigned int cma_channel; unsigned int* palloc; // get a NIC_RX or NIC_TX channel for ( nic_channel = 0 ; nic_channel < NB_NIC_CHANNELS ; nic_channel++ ) { if ( is_rx ) palloc = &_nic_rx_channel_alloc[nic_channel]; else palloc = &_nic_tx_channel_alloc[nic_channel]; if ( _atomic_test_and_set( palloc , users ) == 0 ) break; } if ( (nic_channel >= NB_NIC_CHANNELS) ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "no NIC channel available for thread %x\n", trdid ); return SYSCALL_NO_CHANNEL_AVAILABLE; } // get a CMA channel for ( cma_channel = 0 ; cma_channel < NB_CMA_CHANNELS ; cma_channel++ ) { palloc = &_cma_channel_alloc[cma_channel]; if ( _atomic_test_and_set( palloc , users ) == 0 ) break; } if ( cma_channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "no CMA channel available for thread %x\n", trdid ); if ( is_rx ) _nic_rx_channel_alloc[nic_channel] = 0; else _nic_tx_channel_alloc[nic_channel] = 0; return SYSCALL_NO_CHANNEL_AVAILABLE; } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] sys_nic_alloc() at cycle %d\n" "thread %d get nic_channel = %d / cma_channel = %d\n", _get_proctime() , trdid , nic_channel , cma_channel ); #endif // register nic_index and cma_index in all threads // contexts that are in the same vspace unsigned int tid; for (tid = vspace[vsid].thread_offset; tid < (vspace[vsid].thread_offset + vspace[vsid].threads); tid++) { unsigned int y_size = header->y_size; unsigned int cid = thread[tid].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; if ( is_rx ) { if ( (psched->context[ltid].slot[CTX_NIC_RX_ID] < NB_NIC_CHANNELS) || (psched->context[ltid].slot[CTX_CMA_RX_ID] < NB_CMA_CHANNELS) ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "NIC_RX or CMA_RX channel already allocated for thread %x\n", trdid ); _nic_rx_channel_alloc[nic_channel] = 0; _cma_channel_alloc[cma_channel] = 0; return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } else { psched->context[ltid].slot[CTX_NIC_RX_ID] = nic_channel; psched->context[ltid].slot[CTX_CMA_RX_ID] = cma_channel; } } else // is_tx { if ( (psched->context[ltid].slot[CTX_NIC_TX_ID] < NB_NIC_CHANNELS) || (psched->context[ltid].slot[CTX_CMA_TX_ID] < NB_CMA_CHANNELS) ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "NIC_TX or CMA_TX channel already allocated for thread %x\n", trdid ); _nic_tx_channel_alloc[nic_channel] = 0; _cma_channel_alloc[cma_channel] = 0; return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } else { psched->context[ltid].slot[CTX_NIC_TX_ID] = nic_channel; psched->context[ltid].slot[CTX_CMA_TX_ID] = cma_channel; } } } // end loop on threads ///////////////////////////////////////////////////////////////////////////////// // Step 2: loop on all the clusters // // Allocate the kernel containers and status, compute the container and the // // status physical addresses, fill and synchronize the kernel CHBUF descriptor // ///////////////////////////////////////////////////////////////////////////////// // physical addresses to be registered in the CMA registers unsigned long long nic_chbuf_pbase; // NIC chbuf physical address unsigned long long ker_chbuf_pbase; // kernel chbuf physical address // allocate one kernel container and one status variable per cluster in the // (xmax / ymax) mesh unsigned int cx; // cluster X coordinate unsigned int cy; // cluster Y coordinate unsigned int index; // container index in chbuf unsigned int vaddr; // virtual address unsigned long long cont_paddr; // container physical address unsigned long long sts_paddr; // container status physical address unsigned int flags; // for _v2p_translate() for ( cx = 0 ; cx < xmax ; cx++ ) { for ( cy = 0 ; cy < ymax ; cy++ ) { // compute index in chbuf index = (cx * ymax) + cy; // allocate the kernel container vaddr = (unsigned int)_remote_malloc( NIC_CONTAINER_SIZE, cx, cy ); if ( vaddr == 0 ) // not enough kernel heap memory in cluster[cx,cy] { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "not enough kernel heap in cluster[%d,%d]\n", cx, cy ); return SYSCALL_OUT_OF_KERNEL_HEAP_MEMORY; } // compute container physical address cont_paddr = _v2p_translate( vaddr , &flags ); // checking container address alignment if ( cont_paddr & 0x3F ) { _printf("\n[GIET ERROR] in _sys_nic_alloc() : " "container address in cluster[%d,%d] not aligned\n", cx, cy); return SYSCALL_ADDRESS_NON_ALIGNED; } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x allocates a container in cluster[%d,%d] / vaddr = %x / paddr = %l\n", -get_proctime() , trdid , cx , cy , vaddr, cont_paddr ); #endif // allocate the kernel container status // it occupies 64 bytes but only last bit is useful (1 for full and 0 for empty) vaddr = (unsigned int)_remote_malloc( 64, cx, cy ); if ( vaddr == 0 ) // not enough kernel heap memory in cluster[cx,cy] { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "not enough kernel heap in cluster[%d,%d]\n", cx, cy ); return SYSCALL_OUT_OF_KERNEL_HEAP_MEMORY; } // compute status physical address sts_paddr = _v2p_translate( vaddr , &flags ); // checking status address alignment if ( sts_paddr & 0x3F ) { _printf("\n[GIET ERROR] in _sys_nic_alloc() : " "status address in cluster[%d,%d] not aligned\n", cx, cy); return SYSCALL_ADDRESS_NON_ALIGNED; } // initialize chbuf entry // The buffer descriptor has the following structure: // - the 26 LSB bits contain bits[6:31] of the buffer physical address // - the 26 following bits contain bits[6:31] of the physical address where the // buffer status is located // - the 12 MSB bits contain the common address extension of the buffer and its // status if ( is_rx ) _nic_ker_rx_chbuf[nic_channel].buf_desc[index] = (unsigned long long) ((sts_paddr & 0xFFFFFFFFULL) >> 6) + (((cont_paddr & 0xFFFFFFFFFFFULL) >> 6) << 26); else _nic_ker_tx_chbuf[nic_channel].buf_desc[index] = (unsigned long long) ((sts_paddr & 0xFFFFFFC0ULL) >> 6) + (((cont_paddr & 0xFFFFFFFFFC0ULL) >> 6) << 26); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x allocates a status in cluster[%d,%d] / vaddr = %x / paddr = %l\n" " descriptor = %l\n", _get_proctime() , trdid , cx , cy , vaddr, sts_paddr, (unsigned long long)((sts_paddr & 0xFFFFFFFFULL) >> 6) + (((cont_paddr & 0xFFFFFFFFFFFULL) >> 6) << 26) ); #endif } } // complete kernel chbuf initialisation if ( is_rx ) { _nic_ker_rx_chbuf[nic_channel].xmax = xmax; _nic_ker_rx_chbuf[nic_channel].ymax = ymax; } else { _nic_ker_tx_chbuf[nic_channel].xmax = xmax; _nic_ker_tx_chbuf[nic_channel].ymax = ymax; } // compute the kernel chbuf descriptor physical address if ( is_rx ) vaddr = (unsigned int)( &_nic_ker_rx_chbuf[nic_channel] ); else vaddr = (unsigned int)( &_nic_ker_tx_chbuf[nic_channel] ); ker_chbuf_pbase = _v2p_translate( vaddr , &flags ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x initialise kernel chbuf / vaddr = %x / paddr = %l\n", _get_proctime() , trdid , vaddr , ker_chbuf_pbase ); #endif // sync the kernel chbuf in L2 after write in L2 _mmc_sync( ker_chbuf_pbase, sizeof( ker_chbuf_t ) ); /////////////////////////////////////////////////////////////// // Step 3: compute the NIC chbuf descriptor physical address // /////////////////////////////////////////////////////////////// unsigned int offset; if ( is_rx ) offset = 0x4100; else offset = 0x4110; nic_chbuf_pbase = (((unsigned long long)((X_IO << Y_WIDTH) + Y_IO))<<32) | (SEG_NIC_BASE + (nic_channel<<15) + offset); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x get NIC chbuf paddr = %l\n", _get_proctime() , trdid , nic_chbuf_pbase ); #endif //////////////////////////////////////////////////////////////////////////////// // Step 4: initialize CMA registers defining the source & destination chbufs // //////////////////////////////////////////////////////////////////////////////// if ( is_rx ) // NIC to kernel { _cma_set_register( cma_channel, CHBUF_SRC_DESC , (unsigned int)(nic_chbuf_pbase) ); _cma_set_register( cma_channel, CHBUF_SRC_EXT , (unsigned int)(nic_chbuf_pbase>>32) ); _cma_set_register( cma_channel, CHBUF_SRC_NBUFS, 2 ); _cma_set_register( cma_channel, CHBUF_DST_DESC , (unsigned int)(ker_chbuf_pbase) ); _cma_set_register( cma_channel, CHBUF_DST_EXT , (unsigned int)(ker_chbuf_pbase>>32) ); _cma_set_register( cma_channel, CHBUF_DST_NBUFS, xmax * ymax ); } else // kernel to NIC { _cma_set_register( cma_channel, CHBUF_SRC_DESC , (unsigned int)(ker_chbuf_pbase) ); _cma_set_register( cma_channel, CHBUF_SRC_EXT , (unsigned int)(ker_chbuf_pbase>>32) ); _cma_set_register( cma_channel, CHBUF_SRC_NBUFS, xmax * ymax ); _cma_set_register( cma_channel, CHBUF_DST_DESC , (unsigned int)(nic_chbuf_pbase) ); _cma_set_register( cma_channel, CHBUF_DST_EXT , (unsigned int)(nic_chbuf_pbase>>32) ); _cma_set_register( cma_channel, CHBUF_DST_NBUFS, 2 ); } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x exit\n", _get_proctime() , trdid ); #endif return SYSCALL_OK; } // end _sys_nic_alloc() ////////////////////////////////////////// int _sys_nic_release( unsigned int is_rx ) // NOTE: not a syscall: used by _ctx_kill_thread() { unsigned int trdid = _get_thread_trdid(); unsigned int nic_channel; unsigned int cma_channel; // update the kernel tables if ( is_rx ) { nic_channel = _get_context_slot( CTX_NIC_RX_ID ); cma_channel = _get_context_slot( CTX_CMA_RX_ID ); if ( (nic_channel >= NB_NIC_CHANNELS) ) { _printf("\n[GIET ERROR] in _sys_nic_release() : " "NIC_RX channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( (cma_channel >= NB_CMA_CHANNELS) ) { _printf("\n[GIET ERROR] in _sys_nic_release() : " "CMA_RX channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // atomically decrement the NIC and CMA channel allocators _atomic_increment( &_nic_rx_channel_alloc[nic_channel] , -1 ); _atomic_increment( &_cma_channel_alloc[cma_channel] , -1 ); // stop the NIC and CMA peripherals channels if no more users if ( (_nic_rx_channel_alloc[nic_channel] == 0) && (_cma_channel_alloc[cma_channel] == 0) ) _sys_nic_stop( 1 ); // reset the calling thread context slots _set_context_slot( CTX_NIC_RX_ID , 0xFFFFFFFF ); _set_context_slot( CTX_CMA_RX_ID , 0xFFFFFFFF ); } else { nic_channel = _get_context_slot( CTX_NIC_TX_ID ); cma_channel = _get_context_slot( CTX_CMA_TX_ID ); if ( (nic_channel >= NB_NIC_CHANNELS) ) { _printf("\n[GIET ERROR] in _sys_nic_release() : " "NIC_TX channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( (cma_channel >= NB_CMA_CHANNELS) ) { _printf("\n[GIET ERROR] in _sys_nic_release() : " "CMA_TX channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // atomically decrement the NIC and CMA channel allocators _atomic_increment( &_nic_tx_channel_alloc[nic_channel] , -1 ); _atomic_increment( &_cma_channel_alloc[cma_channel] , -1 ); // stop the NIC and CMA peripherals channels if no more users if ( (_nic_tx_channel_alloc[nic_channel] == 0) && (_cma_channel_alloc[cma_channel] == 0) ) _sys_nic_stop( 0 ); // reset the calling thread context slots _set_context_slot( CTX_NIC_TX_ID , 0xFFFFFFFF ); _set_context_slot( CTX_CMA_TX_ID , 0xFFFFFFFF ); } return SYSCALL_OK; } // end sys_nic_release() //////////////////////////////////////// int _sys_nic_start( unsigned int is_rx ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int nic_channel; unsigned int cma_channel; // get NIC channel index and CMA channel index from thread context if ( is_rx ) { nic_channel = _get_context_slot( CTX_NIC_RX_ID ); cma_channel = _get_context_slot( CTX_CMA_RX_ID ); } else { nic_channel = _get_context_slot( CTX_NIC_TX_ID ); cma_channel = _get_context_slot( CTX_CMA_TX_ID ); } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_start() at cycle %d\n" "thread %x enter / NIC channel = %d / CMA channel = %d\n", _get_proctime() , trdid , nic_channel, cma_channel ); #endif // check NIC and CMA channels index if ( nic_channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_start() : " "illegal NIC channel for thread %x\n", trdid ); return -1111; } if ( cma_channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_start() : " "illegal CMA channel for thread %x\n", trdid ); return -1111; } // start CMA transfer _cma_set_register( cma_channel, CHBUF_BUF_SIZE , NIC_CONTAINER_SIZE ); _cma_set_register( cma_channel, CHBUF_PERIOD , 0 ); // OUT_OF_ORDER _cma_set_register( cma_channel, CHBUF_RUN , 1 ); // activates NIC channel _nic_channel_start( nic_channel, is_rx, GIET_NIC_MAC4, GIET_NIC_MAC2 ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_start() at cycle %d\n" "thread %d exit\n", _get_proctime() , trdid ); #endif return SYSCALL_OK; } // end _sys_nic_start() ////////////////////////////////////// int _sys_nic_move( unsigned int is_rx, void* buffer ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int channel; #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n", "thread %x enters\n", _get_proctime() , trdid ); #endif // get NIC channel index from thread context if ( is_rx ) channel = _get_context_slot( CTX_NIC_RX_ID ); else channel = _get_context_slot( CTX_NIC_TX_ID ); // check NIC channel index if ( channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_move() : " "NIC channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get kernel chbuf virtual address ker_chbuf_t* ker_chbuf; if ( is_rx ) ker_chbuf = &_nic_ker_rx_chbuf[channel]; else ker_chbuf = &_nic_ker_tx_chbuf[channel]; // get xmax / ymax parameters unsigned int xmax = ker_chbuf->xmax; unsigned int ymax = ker_chbuf->ymax; // get cluster coordinates for the processor running the calling thread unsigned int procid = _get_procid(); unsigned int cx = procid >> (Y_WIDTH + P_WIDTH); unsigned int cy = (procid >> P_WIDTH) & ((1<= xmax) || (cy >= ymax) ) { _printf("\n[GIET_ERROR] in _sys_nic_move(): " "processor coordinates [%d,%d] larger than (xmax,ymax) = [%d,%d]\n", cx , cy , xmax , ymax ); return SYSCALL_ILLEGAL_XY_ARGUMENTS; } unsigned long long usr_buf_paddr; // user buffer physical address unsigned long long ker_buf_paddr; // kernel buffer physical address unsigned long long ker_sts_paddr; // kernel buffer status physical address unsigned long long ker_buf_desc; // kernel buffer descriptor unsigned int ker_sts; // kernel buffer status (full or empty) unsigned int index; // kernel buffer index in chbuf unsigned int flags; // for _v2P_translate // Compute user buffer physical address and check access rights usr_buf_paddr = _v2p_translate( (unsigned int)buffer , &flags ); if ( (flags & PTE_U) == 0 ) { _printf("\n[GIET ERROR] in _sys_nic_tx_move() : " "buffer address non user accessible\n"); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x get user buffer : paddr = %l\n", _get_proctime() , trdid , usr_buf_paddr ); #endif // compute buffer index, buffer descriptor paddr and buffer status paddr index = (ymax * cx) + cy; ker_buf_desc = ker_chbuf->buf_desc[index]; ker_sts_paddr = ((ker_buf_desc & 0xFFF0000000000000ULL) >> 20) + ((ker_buf_desc & 0x3FFFFFFULL) << 6); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x get ker_buf_desc %d / paddr = %l\n", _get_proctime(), trdid , index , ker_buf_desc ); #endif // poll local kernel container status until success while ( 1 ) { // inval buffer descriptor in L2 before read in L2 _mmc_inval( ker_sts_paddr, 4 ); ker_sts = _physical_read( ker_sts_paddr ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x get status %d / paddr = %l / status = %x\n", _get_proctime() , trdid , index , ker_sts_paddr, ker_sts ); #endif // test buffer status and break if found if ( ( is_rx != 0 ) && ( ker_sts == 0x1 ) ) break; if ( ( is_rx == 0 ) && ( ker_sts == 0 ) ) break; } // compute kernel buffer physical address ker_buf_paddr = (ker_buf_desc & 0xFFFFFFFFFC000000ULL) >> 20; // move one container if ( is_rx ) // RX transfer { // inval kernel buffer in L2 before read in L2 _mmc_inval( ker_buf_paddr, NIC_CONTAINER_SIZE ); // transfer data from kernel buffer to user buffer _physical_memcpy( usr_buf_paddr, ker_buf_paddr, NIC_CONTAINER_SIZE ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x transfer kernel buffer %l to user buffer %l\n", _get_proctime() , trdid , ker_buf_paddr , usr_buf_paddr ); #endif } else // TX transfer { // transfer data from user buffer to kernel buffer _physical_memcpy( ker_buf_paddr, usr_buf_paddr, NIC_CONTAINER_SIZE ); // sync kernel buffer in L2 after write in L2 _mmc_sync( ker_buf_paddr, NIC_CONTAINER_SIZE ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x transfer user buffer %l to kernel buffer %l\n", _get_proctime() , trdid , usr_buf_paddr , ker_buf_paddr ); #endif } // update kernel chbuf status if ( is_rx ) _physical_write ( ker_sts_paddr, 0 ); else _physical_write ( ker_sts_paddr, 0x1 ); // sync kernel chbuf in L2 after write in L2 _mmc_sync( ker_sts_paddr, 4 ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x exit\n", _get_proctime() , trdid ); #endif return SYSCALL_OK; } // end _sys_nic_move() /////////////////////////////////////// int _sys_nic_stop( unsigned int is_rx ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int nic_channel; unsigned int cma_channel; // get NIC channel index and CMA channel index if ( is_rx ) { nic_channel = _get_context_slot( CTX_NIC_RX_ID ); cma_channel = _get_context_slot( CTX_CMA_RX_ID ); } else { nic_channel = _get_context_slot( CTX_NIC_TX_ID ); cma_channel = _get_context_slot( CTX_CMA_TX_ID ); } // check NIC and CMA channels index if ( nic_channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_stop() : " "NIC channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( cma_channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_stop() : " "CMA channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // desactivates the CMA channel _cma_set_register( cma_channel, CHBUF_RUN , 0 ); // wait until CMA channel IDLE unsigned int volatile status; do { status = _cma_get_register( cma_channel, CHBUF_STATUS ); } while ( status ); // desactivates the NIC channel _nic_channel_stop( nic_channel, is_rx ); return SYSCALL_OK; } // end _sys_nic_stop() //////////////////////////////////////// int _sys_nic_clear( unsigned int is_rx ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int channel; // get NIC channel if ( is_rx ) channel = _get_context_slot( CTX_NIC_RX_ID ); else channel = _get_context_slot( CTX_NIC_TX_ID ); if ( channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_clear() : " "NIC channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( is_rx ) { _nic_set_global_register( NIC_G_NPKT_RX_G2S_RECEIVED , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DES_TOO_SMALL , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DES_TOO_BIG , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DES_MFIFO_FULL , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DES_CRC_FAIL , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DISPATCH_RECEIVED , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DISPATCH_BROADCAST , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DISPATCH_DST_FAIL , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DISPATCH_CH_FULL , 0 ); } else { _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_RECEIVED , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_TRANSMIT , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_TOO_BIG , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_TOO_SMALL , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_SRC_FAIL , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_BYPASS , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_BROADCAST , 0 ); } return SYSCALL_OK; } // en _sys_nic_clear() //////////////////////////////////////// int _sys_nic_stats( unsigned int is_rx ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int nic_channel; // get NIC channel if ( is_rx ) nic_channel = _get_context_slot( CTX_NIC_RX_ID ); else nic_channel = _get_context_slot( CTX_NIC_TX_ID ); if ( nic_channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_stats() : " "NIC channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( is_rx ) { unsigned int received = _nic_get_global_register( NIC_G_NPKT_RX_G2S_RECEIVED ); unsigned int too_small = _nic_get_global_register( NIC_G_NPKT_RX_DES_TOO_SMALL ); unsigned int too_big = _nic_get_global_register( NIC_G_NPKT_RX_DES_TOO_BIG ); unsigned int fifo_full = _nic_get_global_register( NIC_G_NPKT_RX_DES_MFIFO_FULL ); unsigned int crc_fail = _nic_get_global_register( NIC_G_NPKT_RX_DES_CRC_FAIL ); unsigned int broadcast = _nic_get_global_register( NIC_G_NPKT_RX_DISPATCH_BROADCAST ); unsigned int dst_fail = _nic_get_global_register( NIC_G_NPKT_RX_DISPATCH_DST_FAIL ); unsigned int ch_full = _nic_get_global_register( NIC_G_NPKT_RX_DISPATCH_CH_FULL ); _printf("\n### Network Controller RX Statistics ###\n" "- packets received : %d\n" "- too small : %d\n" "- too big : %d\n" "- fifo full : %d\n" "- crc fail : %d\n" "- dst mac fail : %d\n" "- channel full : %d\n" "- broadcast : %d\n", received, too_small, too_big, fifo_full, crc_fail, dst_fail, ch_full, broadcast ); } else { unsigned int received = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_RECEIVED ); unsigned int too_big = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_TOO_BIG ); unsigned int too_small = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_TOO_SMALL ); unsigned int src_fail = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_SRC_FAIL ); unsigned int bypass = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_BYPASS ); unsigned int broadcast = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_BROADCAST ); _printf("\n### Network Controller TX Statistics ###\n" "- packets received : %d\n" "- too small : %d\n" "- too big : %d\n" "- src mac fail : %d\n" "- bypass : %d\n" "- broadcast : %d\n", received, too_big, too_small, src_fail, bypass, broadcast ); } return SYSCALL_OK; } // end _sys_nic_stats() #endif ///////////////////////////////////////////////////////////////////////////////////////// // FBF related syscall handlers ///////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////// int _sys_fbf_size( unsigned int* width, unsigned int* height ) { if ( USE_FBF == 0 ) { *width = 0; *height = 0; } else { *width = FBUF_X_SIZE; *height = FBUF_Y_SIZE; } return SYSCALL_OK; } //////////////////// int _sys_fbf_alloc() { mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t *vspace = _get_vspace_base(header); mapping_thread_t *thread = _get_thread_base(header); // compute number of users unsigned int vsid = _get_context_slot(CTX_VSID_ID); unsigned int users = vspace[vsid].threads; // access FBF allocator // register it in all threads contexts if ( _atomic_test_and_set( &_fbf_alloc , users ) == 0 ) // FBF available { unsigned int min = vspace[vsid].thread_offset; unsigned int max = min + users; unsigned int tid; for ( tid = min ; tid < max ; tid++ ) { unsigned int y_size = header->y_size; unsigned int cid = thread[tid].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; _atomic_or( &psched->context[ltid].slot[CTX_LOCKS_ID] , LOCKS_MASK_FBF ); } return SYSCALL_OK; } else // FBF already allocated { return SYSCALL_SHARED_PERIPHERAL_BUSY; } } ////////////////////// int _sys_fbf_release() // not a syscall: used by _ctx_kill_thread() { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // decrement FBF allocator // reset the calling thread context _atomic_increment( &_fbf_alloc , 0xFFFFFFFF ); _atomic_and( &psched->context[ltid].slot[CTX_LOCKS_ID] , ~LOCKS_MASK_FBF ); return SYSCALL_OK; } ///////////////////////////////////////////// int _sys_fbf_sync_write( unsigned int offset, void* buffer, unsigned int length ) { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } char* fbf_address = (char *)SEG_FBF_BASE + offset; memcpy( fbf_address, buffer, length); return SYSCALL_OK; } ///////////////////////////////////////////// int _sys_fbf_sync_read( unsigned int offset, void* buffer, unsigned int length ) { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } char* fbf_address = (char *)SEG_FBF_BASE + offset; memcpy( buffer, fbf_address, length); return SYSCALL_OK; } //////////////////////// int _sys_fbf_cma_alloc() { unsigned int trdid = _get_thread_trdid(); if ( _get_context_slot( CTX_CMA_FB_ID ) < NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_alloc() : " "CMA channel already allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } // get a CMA channel unsigned int channel; for ( channel = 0 ; channel < NB_CMA_CHANNELS ; channel++ ) { unsigned int* palloc = &_cma_channel_alloc[channel]; if ( _atomic_test_and_set( palloc , 1 ) == 0 ) break; } if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_alloc() : no CMA channel available\n"); return SYSCALL_NO_CHANNEL_AVAILABLE; } else { _set_context_slot( CTX_CMA_FB_ID, channel ); return SYSCALL_OK; } } // end sys_fbf_cma_alloc() ////////////////////////// int _sys_fbf_cma_release() // Not a syscall { unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); unsigned int trdid = _get_thread_trdid(); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_fbf_cma_release() : " "CMA_FB channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // stop CMA transfer _sys_fbf_cma_stop(); // reset CTX_CMA_FB_ID for thread _set_context_slot( CTX_CMA_FB_ID, 0xFFFFFFFF ); // release CMA channel _cma_channel_alloc[channel] = 0; return SYSCALL_OK; } /////////////////////////////////////////////////// int _sys_fbf_cma_init_buf( void* buf0_vbase, void* buf1_vbase, void* sts0_vaddr, void* sts1_vaddr ) { unsigned int vaddr; // virtual address unsigned int flags; // for _v2p_translate() unsigned long long fbf_paddr; // fbf physical address unsigned long long fbf_sts_paddr; // fbf status physical address unsigned long long buf0_pbase; // buffer 0 base physical address unsigned long long sts0_paddr; // buffer 0 status physical address unsigned long long buf1_pbase; // buffer 1 base physical address unsigned long long sts1_paddr; // buffer 1 status physical address // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); // check FBF allocated if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "CMA channel non allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } #if GIET_DEBUG_FBF_CMA _printf("\n[FBF_CMA DEBUG] _sys_fbf_cma_init_buf()\n" " - channel = %d\n" " - buf0 vbase = %x\n" " - buf1 vbase = %x\n" " - sts0 vaddr = %x\n" " - sts1 vaddr = %x\n", channel, (unsigned int)buf0_vbase, (unsigned int)buf1_vbase, (unsigned int)sts0_vaddr, (unsigned int)sts1_vaddr ); #endif // checking user buffers virtual addresses alignment if ( ((unsigned int)buf0_vbase & 0x3F) || ((unsigned int)buf1_vbase & 0x3F) ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "user buffer not aligned for thread %x\n", trdid ); return SYSCALL_ADDRESS_NON_ALIGNED; } // checking user buffers status virtual addresses alignment if ( ((unsigned int)sts0_vaddr & 0x3F) || ((unsigned int)sts1_vaddr & 0x3F) ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "user status not aligned for thread %x\n", trdid ); return SYSCALL_ADDRESS_NON_ALIGNED; } // compute frame buffer physical address and initialize _fbf_chbuf[channel] vaddr = (unsigned int)SEG_FBF_BASE; fbf_paddr = _v2p_translate( vaddr , &flags ); vaddr = (unsigned int)&_fbf_status[channel]; fbf_sts_paddr = _v2p_translate( vaddr , &flags ); _fbf_chbuf[channel].fbf_desc = (unsigned long long) ((fbf_sts_paddr & 0xFFFFFFFFULL) >> 6) + (((fbf_paddr & 0xFFFFFFFFULL) >> 6 ) << 26); // Compute user buffer 0 physical addresses and intialize _fbf_chbuf[channel] vaddr = (unsigned int)buf0_vbase; buf0_pbase = _v2p_translate( vaddr , &flags ); if ((flags & PTE_U) == 0) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "buf0 not in user space for thread %x\n", trdid ); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } vaddr = (unsigned int)sts0_vaddr; sts0_paddr = _v2p_translate( vaddr , &flags ); if ((flags & PTE_U) == 0) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "sts0 not in user space for thread %x\n", trdid); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } _fbf_chbuf[channel].buf0_desc = (unsigned long long) ((sts0_paddr & 0xFFFFFFFFULL) >> 6) + (((buf0_pbase & 0xFFFFFFFFULL) >> 6 ) << 26); // Compute user buffer 1 physical addresses and intialize _fbf_chbuf[channel] vaddr = (unsigned int)buf1_vbase; buf1_pbase = _v2p_translate( vaddr , &flags ); if ((flags & PTE_U) == 0) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "buf1 not in user space for thread %x\n", trdid ); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } vaddr = (unsigned int)sts1_vaddr; sts1_paddr = _v2p_translate( vaddr , &flags ); if ((flags & PTE_U) == 0) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "sts1 not in user space for thread %x\n", trdid); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } _fbf_chbuf[channel].buf1_desc = (unsigned long long) ((sts1_paddr & 0xFFFFFFFFULL) >> 6) + (((buf1_pbase & 0xFFFFFFFFULL) >> 6 ) << 26); // Compute and register physical adress of the fbf_chbuf descriptor vaddr = (unsigned int)&_fbf_chbuf[channel]; _fbf_chbuf_paddr[channel] = _v2p_translate( vaddr , &flags ); #if GIET_DEBUG_FBF_CMA _printf(" - fbf pbase = %l\n" " - fbf status paddr = %l\n" " - buf0 pbase = %l\n" " - buf0 status paddr = %l\n" " - buf1 pbase = %l\n" " - buf0 status paddr = %l\n" " - chbuf pbase = %l\n", fbf_paddr, fbf_sts_paddr, buf0_pbase, sts0_paddr, buf1_pbase, sts1_paddr, _fbf_chbuf_paddr[channel] ); #endif return SYSCALL_OK; } // end sys_fbf_cma_init_buf() //////////////////////////////////////////// int _sys_fbf_cma_start( unsigned int length ) { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); // check FBF allocated if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _fbf_cma_start() : " "CMA channel non allocated\n"); return SYSCALL_CHANNEL_NON_ALLOCATED; } // check buffers initialization if ( ( _fbf_chbuf[channel].buf0_desc == 0x0ULL ) && ( _fbf_chbuf[channel].buf1_desc == 0x0ULL) && ( _fbf_chbuf[channel].fbf_desc == 0x0ULL) ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_start(): initialization not done\n"); return SYSCALL_MISSING_INITIALISATION; } // initializes buffer length _fbf_chbuf[channel].length = length; if ( USE_IOB ) { // SYNC request for fbf_chbuf descriptor _mmc_sync( _fbf_chbuf_paddr[channel] , sizeof( fbf_chbuf_t ) ); } // start CMA transfer unsigned long long paddr = _fbf_chbuf_paddr[channel]; unsigned int src_chbuf_paddr_lsb = (unsigned int)(paddr & 0xFFFFFFFF); unsigned int src_chbuf_paddr_ext = (unsigned int)(paddr >> 32); unsigned int dst_chbuf_paddr_lsb = src_chbuf_paddr_lsb + 16; unsigned int dst_chbuf_paddr_ext = src_chbuf_paddr_ext; _cma_set_register( channel, CHBUF_SRC_DESC , src_chbuf_paddr_lsb ); _cma_set_register( channel, CHBUF_SRC_EXT , src_chbuf_paddr_ext ); _cma_set_register( channel, CHBUF_SRC_NBUFS, 2 ); _cma_set_register( channel, CHBUF_DST_DESC , dst_chbuf_paddr_lsb ); _cma_set_register( channel, CHBUF_DST_EXT , dst_chbuf_paddr_ext ); _cma_set_register( channel, CHBUF_DST_NBUFS, 1 ); _cma_set_register( channel, CHBUF_BUF_SIZE , length ); _cma_set_register( channel, CHBUF_PERIOD , 300 ); _cma_set_register( channel, CHBUF_RUN , 1 ); return SYSCALL_OK; } // end _sys_fbf_cma_start() ///////////////////////////////////////////////////// int _sys_fbf_cma_display( unsigned int buffer_index ) { volatile unsigned int full = 1; // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); // check FBF allocated if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_display() : " "CMA channel non allocated\n"); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get fbf_chbuf descriptor pointer fbf_chbuf_t* pdesc = &_fbf_chbuf[channel]; #if GIET_DEBUG_FBF_CMA _printf("\n[FBF_CMA DEBUG] enters _sys_fb_cma_display()\n" " - cma channel = %d\n" " - buffer index = %d\n" " - buf0_desc value = %l\n" " - buf1_desc value = %l\n" " - fbf_desc value = %l\n", channel , buffer_index, _fbf_chbuf[channel].buf0_desc, _fbf_chbuf[channel].buf1_desc, _fbf_chbuf[channel].fbf_desc ); #endif unsigned long long buf_sts_paddr; unsigned long long buf_paddr; unsigned long long fbf_sts_paddr; if ( buffer_index == 0 ) // user buffer 0 { buf_sts_paddr = ((pdesc->buf0_desc & 0xFFF0000000000000ULL) >> 20) + // compute address extension ((pdesc->buf0_desc & 0x3FFFFFFULL) << 6); // compute 32 LSB of the address buf_paddr = (pdesc->buf0_desc & 0xFFFFFFFFFC000000ULL) >> 20; // compute the entire address } else // user buffer 1 { buf_sts_paddr = ((pdesc->buf1_desc & 0xFFF0000000000000ULL) >> 20) + ((pdesc->buf1_desc & 0x3FFFFFFULL) << 6); buf_paddr = (pdesc->buf1_desc & 0xFFFFFFFFFC000000ULL) >> 20; } fbf_sts_paddr = ((pdesc->fbf_desc & 0xFFF0000000000000ULL) >> 20) + ((pdesc->fbf_desc & 0x3FFFFFFULL) << 6); #if GIET_DEBUG_FBF_CMA _printf(" - fbf status paddr = %l\n" " - buf pbase = %l\n" " - buf status paddr = %l\n", fbf_sts_paddr, buf_paddr, buf_sts_paddr ); #endif // waiting user buffer released by the CMA component) while ( full ) { // INVAL L2 cache copy of user buffer status // because it has been modified in RAM by the CMA component _mmc_inval( buf_sts_paddr , 4 ); full = _physical_read( buf_sts_paddr ); } // SYNC request for the user buffer, because // it will be read from XRAM by the CMA component _mmc_sync( buf_paddr , pdesc->length ); // set user buffer status _physical_write( buf_sts_paddr, 0x1 ); // reset fbf buffer status _physical_write( fbf_sts_paddr, 0x0 ); // SYNC request, because these buffer descriptors // will be read from XRAM by the CMA component _mmc_sync( buf_sts_paddr, 4 ); _mmc_sync( fbf_sts_paddr, 4 ); return SYSCALL_OK; } // end _sys_fbf_cma_display() /////////////////////// int _sys_fbf_cma_stop() { // get channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_stop() : CMA channel non allocated\n"); return SYSCALL_CHANNEL_NON_ALLOCATED; } // Desactivate CMA channel _cma_set_register( channel, CHBUF_RUN, 0 ); return SYSCALL_OK; } // end _sys_fbf_cma_stop() ////////////////////////////////////////////////////////////////////////////// // Miscelaneous syscall handlers ////////////////////////////////////////////////////////////////////////////// /////////////// int _sys_ukn() { _printf("\n[GIET ERROR] Undefined System Call / EPC = %x\n", _get_epc() ); return SYSCALL_UNDEFINED_SYSTEM_CALL; } //////////////////////////////////// int _sys_proc_xyp( unsigned int* x, unsigned int* y, unsigned int* p ) { unsigned int gpid = _get_procid(); // global processor index from CPO register *x = (gpid >> (Y_WIDTH + P_WIDTH)) & ((1<> P_WIDTH) & ((1<x_size; unsigned int ymax = header->y_size; unsigned int procs = cluster[0].procs; // check the (ymax-1) lower rows for ( y = 0 ; y < ymax-1 ; y++ ) { for ( x = 0 ; x < xmax ; x++ ) { if (cluster[x*ymax+y].procs != procs ) okmin = 0; } } // check the upper row for ( x = 0 ; x < xmax ; x++ ) { if (cluster[x*ymax+ymax-1].procs != procs ) okmax = 0; } // return values if ( okmin && okmax ) { *x_size = xmax; *y_size = ymax; *nprocs = procs; } else if ( okmin ) { *x_size = xmax; *y_size = ymax-1; *nprocs = procs; } else { *x_size = 0; *y_size = 0; *nprocs = 0; } return SYSCALL_OK; } /////////////////////////////////////////////////////// int _sys_vseg_get_vbase( char* vspace_name, char* vseg_name, unsigned int* vbase ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); mapping_vseg_t * vseg = _get_vseg_base(header); unsigned int vspace_id; unsigned int vseg_id; // scan vspaces for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { if (_strncmp( vspace[vspace_id].name, vspace_name, 31) == 0) { // scan vsegs for (vseg_id = vspace[vspace_id].vseg_offset; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); vseg_id++) { if (_strncmp(vseg[vseg_id].name, vseg_name, 31) == 0) { *vbase = vseg[vseg_id].vbase; return SYSCALL_OK; } } } } return SYSCALL_VSEG_NOT_FOUND; } ///////////////////////////////////////////////////////// int _sys_vseg_get_length( char* vspace_name, char* vseg_name, unsigned int* length ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); mapping_vseg_t * vseg = _get_vseg_base(header); unsigned int vspace_id; unsigned int vseg_id; // scan vspaces for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { if (_strncmp( vspace[vspace_id].name, vspace_name, 31) == 0) { // scan vsegs for (vseg_id = vspace[vspace_id].vseg_offset; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); vseg_id++) { if (_strncmp(vseg[vseg_id].name, vseg_name, 31) == 0) { *length = vseg[vseg_id].length; return SYSCALL_OK; } } } } return SYSCALL_VSEG_NOT_FOUND; } //////////////////////////////////////// int _sys_xy_from_ptr( void* ptr, unsigned int* x, unsigned int* y ) { unsigned int flags; unsigned long long paddr = _v2p_translate( (unsigned int)ptr , &flags ); *x = (paddr>>36) & 0xF; *y = (paddr>>32) & 0xF; return SYSCALL_OK; } ///////////////////////////////////////// int _sys_heap_info( unsigned int* vaddr, unsigned int* length, unsigned int x, unsigned int y ) { // checking parameters if ( (x >= X_SIZE) || (y >= Y_SIZE) ) { *vaddr = 0; *length = 0; _printf("\n[GIET ERROR] in _sys_heap_info() : " "illegal (%d,%d) coordinates\n", x , y ); return SYSCALL_ILLEGAL_CLUSTER_COORDINATES; } mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_thread_t * thread = _get_thread_base(header); mapping_vseg_t * vseg = _get_vseg_base(header); mapping_vspace_t * vspace = _get_vspace_base(header); unsigned int thread_id; unsigned int vspace_id; unsigned int vseg_id = 0xFFFFFFFF; // get calling thread vspace index vspace_id = _get_context_slot(CTX_VSID_ID); // scan all threads in vspace to find one in clyster[x,y] unsigned int min = vspace[vspace_id].thread_offset ; unsigned int max = min + vspace[vspace_id].threads ; for ( thread_id = min ; thread_id < max ; thread_id++ ) { if ( thread[thread_id].clusterid == (x * Y_SIZE + y) ) { vseg_id = thread[thread_id].heap_vseg_id; break; } } // analysing the vseg_id if ( vseg_id != 0xFFFFFFFF ) { *vaddr = vseg[vseg_id].vbase; *length = vseg[vseg_id].length; } else { *vaddr = 0; *length = 0; _printf("error in _sys_heap_info() : no heap in cluster (%d,%d)\n", x , y ); } return SYSCALL_OK; } // end _sys_heap_info() // Local Variables: // tab-width: 4 // c-basic-offset: 4 // c-file-offsets:((innamespace . 0)(inline-open . 0)) // indent-tabs-mode: nil // End: // vim: filetype=c:expandtab:shiftwidth=4:tabstop=4:softtabstop=4