/////////////////////////////////////////////////////////////////////////////////// // File : sys_handler.c // Date : 01/04/2012 // Author : alain greiner and joel porquet // Copyright (c) UPMC-LIP6 /////////////////////////////////////////////////////////////////////////////////// #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if !defined(X_SIZE) # error: You must define X_SIZE in the hard_config.h file #endif #if !defined(Y_SIZE) # error: You must define Y_SIZE in the hard_config.h file #endif #if !defined(NB_PROCS_MAX) # error: You must define NB_PROCS_MAX in the hard_config.h file #endif #if !defined(SEG_BOOT_MAPPING_BASE) # error: You must define SEG_BOOT_MAPPING_BASE in the hard_config.h file #endif #if !defined(NB_TTY_CHANNELS) # error: You must define NB_TTY_CHANNELS in the hard_config.h file #endif #if (NB_TTY_CHANNELS < 1) # error: NB_TTY_CHANNELS cannot be smaller than 1! #endif #if !defined(NB_TIM_CHANNELS) # error: You must define NB_TIM_CHANNELS in the hard_config.h file #endif #if !defined(NB_NIC_CHANNELS) # error: You must define NB_NIC_CHANNELS in the hard_config.h file #endif #if !defined(NB_CMA_CHANNELS) # error: You must define NB_CMA_CHANNELS in the hard_config.h file #endif #if !defined(GIET_NO_HARD_CC) # error: You must define GIET_NO_HARD_CC in the giet_config.h file #endif #if !defined ( GIET_NIC_MAC4 ) # error: You must define GIET_NIC_MAC4 in the giet_config.h file #endif #if !defined ( GIET_NIC_MAC2 ) # error: You must define GIET_NIC_MAC2 in the giet_config.h file #endif //////////////////////////////////////////////////////////////////////////// // Extern variables //////////////////////////////////////////////////////////////////////////// // allocated in tty0.c file. extern sqt_lock_t _tty0_sqt_lock; // allocated in mwr_driver.c file. extern simple_lock_t _coproc_lock[X_SIZE*Y_SIZE]; extern unsigned int _coproc_type[X_SIZE*Y_SIZE]; extern unsigned int _coproc_info[X_SIZE*Y_SIZE]; extern unsigned int _coproc_mode[X_SIZE*Y_SIZE]; extern unsigned int _coproc_error[X_SIZE*Y_SIZE]; extern unsigned int _coproc_trdid[X_SIZE*Y_SIZE]; // allocated in tty_driver.c file. extern tty_fifo_t _tty_rx_fifo[NB_TTY_CHANNELS]; // allocated in kernel_init.c file extern static_scheduler_t* _schedulers[X_SIZE][Y_SIZE][NB_PROCS_MAX]; // allocated in kernel_init.c file extern unsigned long long _ptabs_paddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE]; //////////////////////////////////////////////////////////////////////////////// // Allocator protecting exclusive access to FBF by a single application. // - The number of users in a given application should be set by a single // thread using an _atomic_test_and_set(). // - The allocator is atomically decremented by each user thread when // the thread exit. //////////////////////////////////////////////////////////////////////////////// __attribute__((section(".kdata"))) unsigned int _fbf_alloc = 0; //////////////////////////////////////////////////////////////////////////////// // Channel allocators for multi-channels peripherals // - The array _***_channel_allocator[channel] defines the number of user // threads for a dynamically allocated channel of peripheral ***. // - The array _***_channel_wti[channel] defines the WTI index and the // processor coordinates for the processor receiving the channel WTI. //////////////////////////////////////////////////////////////////////////////// #if NB_TTY_CHANNELS __attribute__((section(".kdata"))) unsigned int _tty_channel_alloc[NB_TTY_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _tty_channel_wti[NB_TTY_CHANNELS]; #endif #if NB_TIM_CHANNELS __attribute__((section(".kdata"))) unsigned int _tim_channel_alloc[NB_TIM_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _tim_channel_wti[NB_TIM_CHANNELS]; #endif #if NB_CMA_CHANNELS __attribute__((section(".kdata"))) unsigned int _cma_channel_alloc[NB_CMA_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _cma_channel_wti[NB_CMA_CHANNELS]; #endif #if NB_NIC_CHANNELS __attribute__((section(".kdata"))) unsigned int _nic_rx_channel_alloc[NB_NIC_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _nic_rx_channel_wti[NB_NIC_CHANNELS]; __attribute__((section(".kdata"))) unsigned int _nic_tx_channel_alloc[NB_NIC_CHANNELS] = {0}; __attribute__((section(".kdata"))) unsigned int _nic_tx_channel_wti[NB_NIC_CHANNELS]; #endif //////////////////////////////////////////////////////////////////////////// // NIC_RX and NIC_TX kernel chbuf arrays //////////////////////////////////////////////////////////////////////////// __attribute__((section(".kdata"))) nic_chbuf_t _nic_ker_rx_chbuf[NB_NIC_CHANNELS] __attribute__((aligned(64))); __attribute__((section(".kdata"))) nic_chbuf_t _nic_ker_tx_chbuf[NB_NIC_CHANNELS] __attribute__((aligned(64))); //////////////////////////////////////////////////////////////////////////// // FBF related chbuf // The physical address of this chbuf is required for L2 cache sync. //////////////////////////////////////////////////////////////////////////// __attribute__((section(".kdata"))) fbf_chbuf_t _fbf_ker_chbuf __attribute__((aligned(64))); __attribute__((section(".kdata"))) unsigned long long _fbf_chbuf_paddr; //////////////////////////////////////////////////////////////////////////// // Initialize the syscall vector with syscall handlers // Note: This array must be synchronised with the define in file stdio.h //////////////////////////////////////////////////////////////////////////// __attribute__((section(".kdata"))) const void * _syscall_vector[64] = { &_sys_proc_xyp, /* 0x00 */ &_get_proctime, /* 0x01 */ &_sys_procs_number, /* 0x02 */ &_sys_xy_from_ptr, /* 0x03 */ &_sys_ukn, /* 0x04 */ &_sys_vseg_get_vbase, /* 0x05 */ &_sys_vseg_get_length, /* 0x06 */ &_sys_heap_info, /* 0x07 */ &_sys_fbf_size, /* 0x08 */ &_sys_fbf_alloc, /* 0x09 */ #if NB_CMA_CHANNELS &_sys_fbf_cma_alloc, /* 0x0A */ &_sys_fbf_cma_init_buf, /* 0x0B */ &_sys_fbf_cma_start, /* 0x0C */ &_sys_fbf_cma_display, /* 0x0D */ &_sys_fbf_cma_stop, /* 0x0E */ &_sys_fbf_cma_check, /* 0x0F */ #else &_sys_ukn, /* 0x0A */ &_sys_ukn, /* 0x0B */ &_sys_ukn, /* 0x0C */ &_sys_ukn, /* 0x0D */ &_sys_ukn, /* 0x0E */ &_sys_ukn, /* 0x0F */ #endif &_sys_applications_status, /* 0x10 */ &_sys_fbf_sync_write, /* 0x11 */ &_sys_fbf_sync_read, /* 0x12 */ &_sys_ukn, /* 0x13 */ &_sys_tim_alloc, /* 0x14 */ &_sys_tim_start, /* 0x15 */ &_sys_tim_stop, /* 0x16 */ &_sys_kill_application, /* 0x17 */ &_sys_exec_application, /* 0x18 */ &_sys_ukn, /* 0x19 */ &_sys_pthread_control, /* 0x1A */ &_sys_pthread_yield, /* 0x1B */ &_sys_pthread_kill, /* 0x1C */ &_sys_pthread_create, /* 0x1D */ &_sys_pthread_join, /* 0x1E */ &_sys_pthread_exit, /* 0x1F */ &_fat_open, /* 0x20 */ &_sys_fat_read, /* 0x21 */ &_sys_fat_write, /* 0x22 */ &_fat_lseek, /* 0x23 */ &_fat_file_info, /* 0x24 */ &_fat_close, /* 0x25 */ &_fat_remove, /* 0x26 */ &_fat_rename, /* 0x27 */ &_fat_mkdir, /* 0x28 */ &_fat_opendir, /* 0x29 */ &_fat_closedir, /* 0x2A */ &_fat_readdir, /* 0x2B */ &_sys_fat_pread, /* 0x2C */ &_sys_fat_mmap, /* 0x2D */ &_sys_fat_munmap, /* 0x2E */ &_sys_fat_dump, /* 0x2F */ #if NB_NIC_CHANNELS &_sys_nic_alloc, /* 0x30 */ &_sys_nic_start, /* 0x31 */ &_sys_nic_move, /* 0x32 */ &_sys_nic_stop, /* 0x33 */ &_sys_nic_stats, /* 0x34 */ &_sys_nic_clear, /* 0x35 */ #else &_sys_ukn, /* 0x30 */ &_sys_ukn, /* 0x31 */ &_sys_ukn, /* 0x32 */ &_sys_ukn, /* 0x33 */ &_sys_ukn, /* 0x34 */ &_sys_ukn, /* 0x35 */ #endif &_sys_tty_write, /* 0x36 */ &_sys_tty_read, /* 0x37 */ &_sys_tty_alloc, /* 0x38 */ &_sys_ukn, /* 0x39 */ &_sys_ukn, /* 0x3A */ &_sys_coproc_completed, /* 0x3B */ &_sys_coproc_alloc, /* 0x3C */ &_sys_coproc_channel_init, /* 0x3D */ &_sys_coproc_run, /* 0x3E */ &_sys_coproc_release, /* 0x3F */ }; /////////////////////////////////////////////////////////////////////////////////// // File system related syscall handlers /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// // This function is called by the _sys_fat_mmap() function. // It implements a simplistic pages allocator from the MMAP vseg of the vspace // identified by the argument. The number of pages is defined by // the argument. // Allocated pages are never released, and the allocator is the "psegid" field // in the vseg mapping, that is initialised to 0 by the boot code. // In order to support concurrent system calls, the allocator must be // atomically incremented. /////////////////////////////////////////////////////////////////////////////////// // Returns the buffer vbase address in case of success. // Returns 0 in case of error (no MMAP vseg or not enough space). /////////////////////////////////////////////////////////////////////////////////// static unsigned int _mmap_pages_alloc( unsigned int vspace_id, unsigned int count ) { mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t* vspace = _get_vspace_base( header ); mapping_vseg_t* vseg = _get_vseg_base( header ); // loop on the vsegs to find the MMAP vseg vbase, length, and offset unsigned int vseg_id; unsigned int vbase; unsigned int offset; unsigned int length; unsigned int found = 0; for (vseg_id = vspace[vspace_id].vseg_offset; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); vseg_id++) { if ( vseg[vseg_id].type == VSEG_TYPE_MMAP ) { offset = _atomic_increment( &vseg[vseg_id].psegid , count ); vbase = vseg[vseg_id].vbase; length = vseg[vseg_id].length; found = 1; break; } } if ( (found == 0) || (((offset + count)<<12) > length) ) { return 0; } else { return vbase + (offset<<12); } } // end _mmap_pages_alloc() /////////////////////////////////////////////////////////////////////////////////// // This function implements the giet_fat_read() system call. /////////////////////////////////////////////////////////////////////////////////// int _sys_fat_read( unsigned int fd_id, unsigned int vaddr, unsigned int count ) { return _fat_read( fd_id, vaddr, count, 0, // no paddr extension 0, // no forced offset 0 ); // no special mode } //////////////////////////////////////////////////////////////// // This function implements the giet_fat_pread() system call. //////////////////////////////////////////////////////////////// int _sys_fat_pread( unsigned int fd_id, unsigned int vaddr, unsigned int count, unsigned int offset ) { return _fat_read( fd_id, vaddr, count, 0, // no paddr extension offset, FAT_FORCED_OFFSET ); } //////////////////////////////////////////////////////////////// // This function implements the giet_fat_write() system call. //////////////////////////////////////////////////////////////// int _sys_fat_write( unsigned int fd_id, unsigned int vaddr, unsigned int count ) { return _fat_write( fd_id, vaddr, count, 0, // no paddr extension 0 ); // no special mode } /////////////////////////////////////////////////////////////////////////////////// // This function implements the "giet_fat_mmap()" system call. // It allocates contiguous pages from the MMAP vseg of the calling vspace. // It maps all these pages directly to the file_cache defined by . // The in the file is a number of pages. // The argument defines the access modes MAP_PROT_WRITE and MAP_PROT_EXEC. // In writable mode, the file size is extended to the (offset + count) pages. // In non writable mode, the file size must be >= (offset + count) pages. // It has the following limitations: // - it does not support MAP_PRIVATE (no copy on write). // - it does not support MAP_FIXED (no forced user buffer address). // - it does not support MAP_ANONYMOUS (only file mapping). // - the and arguments must multiple of 4 Kbytes. /////////////////////////////////////////////////////////////////////////////////// // Returns memory buffer vbase in case of success. // Returns 0 on error. /////////////////////////////////////////////////////////////////////////////////// int _sys_fat_mmap( unsigned int fd_id, unsigned int count, unsigned int offset, unsigned int prot ) { // takes the FAT lock and register it in thread context static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); _spin_lock_acquire( &_fat.fat_lock ); _atomic_or( &psched->context[ltid].slot[CTX_LOCKS_ID] , LOCKS_MASK_FAT ); // check fd_id overflow if ( fd_id >= GIET_OPEN_FILES_MAX ) { _spin_lock_release( &_fat.fat_lock ); _atomic_and( &psched->context[ltid].slot[CTX_LOCKS_ID] , ~LOCKS_MASK_FAT ); _printf("\n[GIET ERROR] _sys_fat_mmap(): illegal file descriptor\n"); return 0; } // check file open if ( _fat.fd[fd_id].allocated == 0 ) { _spin_lock_release( &_fat.fat_lock ); _atomic_and( &psched->context[ltid].slot[CTX_LOCKS_ID] , ~LOCKS_MASK_FAT ); _printf("\n[GIET ERROR] _sys_fat_mmap(): file not open\n" ); return 0; } // get access modes unsigned int writable = prot & MAP_PROT_WRITE; unsigned int executable = prot & MAP_PROT_EXEC; // get inode pointer fat_inode_t* inode = _fat.fd[fd_id].inode; // check file writable if ( _fat.fd[fd_id].read_only && writable ) { _spin_lock_release( &_fat.fat_lock ); _atomic_and( &psched->context[ltid].slot[CTX_LOCKS_ID] , ~LOCKS_MASK_FAT ); _printf("\n[GIET ERROR] _sys_fat_mmap(): file <%s> is read-only\n", inode->name ); return 0; } // get vspace index and calling proc coordinates unsigned int vsid = _get_context_slot( CTX_VSID_ID ); // compute first and last cluster indexes unsigned int first_cluster = offset; unsigned int last_cluster = offset + count - 1; #if GIET_DEBUG_MMAP unsigned int procid = _get_procid(); unsigned int x_id = procid >> (Y_WIDTH + P_WIDTH); unsigned int y_id = (procid >> P_WIDTH) & ((1< GIET_DEBUG_MMAP ) _printf("\n[DEBUG MMAP] _sys_fat_mmap() : P[%d,%d,%d] enters at cycle %d\n" " for file %s / size = %x / cluster = %x / cache = %x / desc[0] = %x\n" " first_cluster = %d / last_cluster = %d\n", x_id , y_id , p_id , _get_proctime() , inode->name , inode->size , inode->cluster , (unsigned int)inode->cache , (unsigned int)inode->cache->children[0] , first_cluster , last_cluster ); #endif // get buffer vbase from MMAP vseg in calling vspace unsigned int buffer_vbase = _mmap_pages_alloc( vsid, count ); if ( buffer_vbase == 0 ) { _spin_lock_release( &_fat.fat_lock ); _atomic_and( &psched->context[ltid].slot[CTX_LOCKS_ID] , ~LOCKS_MASK_FAT ); _printf("\n[GIET ERROR] _sys_fat_mmap(): no space in MMAP vseg to map file %s\n", _fat.fd[fd_id].inode->name ); return 0; } // set flags for all pages in buffer unsigned int flags = 0; flags |= PTE_C; // cachable flags |= PTE_U; // user access flags |= PTE_L; // local access (unused) flags |= PTE_R; // remote access (unused) if ( executable ) flags |= PTE_X; if ( writable ) flags |= PTE_W; // loop on pages to be mapped unsigned int cid; unsigned int user_vaddr = buffer_vbase; for ( cid = first_cluster ; cid <= last_cluster ; cid++ ) { // get file_cache buffer vaddr unsigned char* cache_vaddr; fat_cache_desc_t* pdesc; if ( _get_file_cache_buffer( inode, cid, writable, &pdesc ) ) { _spin_lock_release( &_fat.fat_lock ); _atomic_and( &psched->context[ltid].slot[CTX_LOCKS_ID] , ~LOCKS_MASK_FAT ); _printf("\n[FAT ERROR] _sys_fat_mmap(): cannot access file <%s>\n", _fat.fd[fd_id].inode->name ); return 0; } cache_vaddr = pdesc->buffer; // compute buffer paddr unsigned long long cache_paddr; unsigned int unused; cache_paddr = _v2p_translate( (unsigned int)cache_vaddr, &unused ); // map the user_vaddr to cache_paddr // in all pages tables defined for the vspace unsigned int x_cur; unsigned int y_cur; for ( x_cur = 0 ; x_cur < X_SIZE ; x_cur++ ) { for ( y_cur = 0 ; y_cur < Y_SIZE ; y_cur++ ) { if ( _ptabs_paddr[vsid][x_cur][y_cur] ) // Page table is defined { _v2p_add_pte2( vsid, x_cur, y_cur, user_vaddr >> 12, flags, cache_paddr >> 12, 0 ); } } } #if GIET_DEBUG_MMAP if ( _get_proctime() > GIET_DEBUG_MMAP ) _printf("\n[DEBUG MMAP] _sys_fat_mmap() : P[%d,%d,%d] map cluster_id = %d\n" " user_vaddr = %x / cache_paddr = %l\n", x_id , y_id , p_id , cid , user_vaddr , cache_paddr ); #endif // increment user buffer vaddr user_vaddr += 4096; } // releases the FAT lock _spin_lock_release( &_fat.fat_lock ); _atomic_and( &psched->context[ltid].slot[CTX_LOCKS_ID] , ~LOCKS_MASK_FAT ); #if GIET_DEBUG_MMAP if ( _get_proctime() > GIET_DEBUG_MMAP ) _printf("\n[DEBUG MMAP] _sys_fat_mmap() : P[%d,%d,%d] returns buffer %x for file %s\n", x_id , y_id , p_id , buffer_vbase , inode->name ); #endif // returns pointer on mapped buffer in user space return buffer_vbase; } // end _sys_fat_mmap() ////////////////////////////////////////////////////////////////// // This function implements the giet_fat_munmap() system call. ////////////////////////////////////////////////////////////////// int _sys_fat_munmap( unsigned int vaddr, unsigned int count ) { // get vspace index unsigned int vsid = _get_context_slot( CTX_VSID_ID ); // loop on pages unsigned int page; for ( page = 0 ; page < count ; page++ ) { unsigned int vpn = (vaddr>>12) + page; // loop on all page tables defined for the vspace unsigned int x_cur; unsigned int y_cur; for ( x_cur = 0 ; x_cur < X_SIZE ; x_cur++ ) { for ( y_cur = 0 ; y_cur < Y_SIZE ; y_cur++ ) { if ( _ptabs_paddr[vsid][x_cur][y_cur] ) // Page table is defined { _v2p_del_pte2( vsid , x_cur , y_cur , vpn ); } } } } return 0; } // end _sys_fat_munmap() /////////////////////////////////////////////////////////////////////////////////// // This service function is called by the _sys_fat_dump() kernel function. // It displays on the calling thread terminal the content of a 512 bytes buffer // defined by the pointer. // The , and arguments are only used // to build a meaningful title: is an identifier, is // the cluster index (in file or FAT), is the block index (in cluster). /////////////////////////////////////////////////////////////////////////////////// static void _dump_block( unsigned char* buf, char* string, unsigned int cluster_id, unsigned int block_id ) { unsigned int line; unsigned int word; _user_printf("\n--- %s : cluster_id = %d / block_id = %d ---\n", string , cluster_id , block_id ); for ( line = 0 ; line < 16 ; line++ ) { // display line index _user_printf("%x : ", line ); // display 8*4 bytes hexa per line for ( word=0 ; word<8 ; word++ ) { unsigned int byte = (line<<5) + (word<<2); unsigned int hexa = (buf[byte ]<<24) | (buf[byte+1]<<16) | (buf[byte+2]<< 8) | (buf[byte+3] ); _user_printf(" %X |", hexa ); } _user_printf("\n"); } } // end _dump_block() /////////////////////////////////////////////////////////////////////////////////// // This function implements the giet_fat_dump() system call. // It analyse the , and arguments to select // a 512 bytes sector, and display the sector content on the calling thread TTY. // The argument can take the following values : // - DUMP_BS : boot sector // - DUMP_FS : fs_info sector // - DUMP_FAT : fat sector, identified by // - DUMP_FILE : file sector, identified by and // - DUMP_DIR : directory sector, identified by and // The argument defines the sector index in the file or in the FAT. // For a file or a directory, or for the FAT itself, it uses the kernel File-Caches // or Fat-Cache, and the Inode-Tree. // For the boot sector, or fs_info sector, it access directly the block device, // that is loaded in the FAT descriptor block-buffer. /////////////////////////////////////////////////////////////////////////////////// // Returns 0 in case of success. // Returns 1 in case of error. /////////////////////////////////////////////////////////////////////////////////// int _sys_fat_dump( unsigned int type, char* pathname, unsigned int sector_id ) { unsigned char* buf; // pointer on 512 bytes buffer unsigned int cluster_id = sector_id >> 3; // cluster index in cache unsigned int block_id = sector_id & 0x7; // cluster index in cache unsigned int fd_id; // file/dir descriptor index if( type == DUMP_BS ) // dump the boot sector { // access block_device buf = _fat.block_buffer; if ( _fat_ioc_access( 1, // descheduling mode 1, // read 0, // boot sector lba (unsigned int)buf, 1 ) ) // one block { _printf("\n[FAT ERROR] _sys_fat_dump(): cannot access device for BS\n"); return 1; } _fat.block_buffer_lba = 0; // dump boot sector _dump_block( buf, "boot sector" , 0 , 0 ); } else if ( type == DUMP_FS ) // dump the fs_info sector { // access block_device buf = _fat.block_buffer; if ( _fat_ioc_access( 1, // descheduling mode 1, // read _fat.fs_info_lba, // fs_info sector lba (unsigned int)buf, 1 ) ) // one block { _printf("\n[FAT ERROR] _sys_fat_dump(): cannot access device for FS\n"); return 1; } _fat.block_buffer_lba = _fat.fs_info_lba; // dump fs-info sector _dump_block( buf, "fs_info sector" , 0 , 0 ); } else if ( type == DUMP_FAT ) // dump a fat sector { // get pointer on the relevant buffer descriptor in Fat-Cache fat_cache_desc_t* pdesc; if ( _get_fat_cache_buffer( cluster_id, &pdesc ) ) { _printf("\n[FAT ERROR] _sys_fat_dump(): cannot access Fat-Cache\n"); return 1; } buf = pdesc->buffer + (block_id<<9); // dump fat sector _dump_block( buf, "fat" , cluster_id , block_id ); } else if ( type == DUMP_FILE ) // dump a file sector { // open file fd_id = _fat_open( pathname , O_RDONLY ); if ( fd_id < 0 ) { _printf("\n[FAT ERROR] _sys_fat_dump(): cannot open file <%s>\n", pathname ); return 1; } fat_inode_t* inode = _fat.fd[fd_id].inode; // get pointer on the relevant buffer descriptor in File-Cache fat_cache_desc_t* pdesc; if ( _get_file_cache_buffer( inode, cluster_id, 0, &pdesc ) ) { _printf("\n[FAT ERROR] _sys_fat_dump(): cannot access file <%s>\n", pathname ); return 1; } buf = pdesc->buffer + (block_id<<9); // dump file sector _dump_block( buf, pathname , cluster_id , block_id ); // close file _fat_close( fd_id ); } else if ( type == DUMP_DIR ) // dump a directory sector { // open directory fd_id = _fat_opendir( pathname ); if ( fd_id < 0 ) { _printf("\n[FAT ERROR] _sys_fat_dump(): cannot open <%s>\n", pathname ); return 1; } fat_inode_t* inode = _fat.fd[fd_id].inode; // get pointer on the relevant buffer descriptor in File-Cache fat_cache_desc_t* pdesc; if ( _get_file_cache_buffer( inode, cluster_id, 0, &pdesc ) ) { _printf("\n[FAT ERROR] _sys_fat_dump(): cannot access <%s>\n", pathname ); return 1; } buf = pdesc->buffer + (block_id<<9); // dump directory sector _dump_block( buf, pathname , cluster_id , block_id ); // close directory _fat_closedir( fd_id ); } return 0; } // end sys_fat_dump ////////////////////////////////////////////////////////////////////////////// // Applications related syscall handlers ////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // This function is called by the _sys_exec_application() function // to reload all data segments contained in an application.elf file. // File checking is minimal, because these segments have already // been loaded by the boot code. //////////////////////////////////////////////////////////////////////// static unsigned int _load_writable_segments( mapping_vspace_t* vspace ) { #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] enters for %s\n", _get_proctime() , x , y , p , vspace->name ); #endif mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vseg_t* vseg = _get_vseg_base(header); unsigned int vseg_id; // vseg index in mapping char buf[4096]; // buffer to store one cluster unsigned int fd = 0; // file descriptor unsigned int found = 0; // first scan on vsegs in vspace to find the .elf pathname for (vseg_id = vspace->vseg_offset; vseg_id < (vspace->vseg_offset + vspace->vsegs); vseg_id++) { if( vseg[vseg_id].type == VSEG_TYPE_ELF ) { // open the .elf file associated to vspace fd = _fat_open( vseg[vseg_id].binpath , O_RDONLY ); if ( fd < 0 ) return 1; #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] open %s / fd = %d\n", _get_proctime() , x , y , p , vseg[vseg_id].binpath , fd ); #endif found = 1; break; } } // check .elf file found if ( found == 0 ) { _printf("\n[GIET ERROR] _load_writable_segments() : .elf not found\n"); return 1; } // load Elf-Header into buffer from .elf file if ( _fat_lseek( fd, 0, SEEK_SET ) < 0 ) { _printf("\n[GIET ERROR] _load_writable_segments() : cannot seek\n"); _fat_close( fd ); return 1; } if ( _fat_read( fd, (unsigned int)buf, 4096, 0, 0, 0 ) < 0 ) { _printf("\n[GIET ERROR] _load_writable_segments() : cannot read\n"); _fat_close( fd ); return 1; } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] loaded Elf-Header\n", _get_proctime() , x , y , p ); #endif // get nsegments and Program-Header-Table offset from Elf-Header Elf32_Ehdr* elf_header_ptr = (Elf32_Ehdr*)buf; unsigned int offset = elf_header_ptr->e_phoff; unsigned int nsegments = elf_header_ptr->e_phnum; // load Program-Header-Table from .elf file if ( _fat_lseek( fd, offset, SEEK_SET ) < 0 ) { _fat_close( fd ); return 1; } if ( _fat_read( fd, (unsigned int)buf, 4096, 0, 0, 0 ) < 0 ) { _fat_close( fd ); return 1; } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] loaded Program-Header-Table\n", _get_proctime() , x , y , p ); #endif // set Program-Header-Table pointer Elf32_Phdr* elf_pht_ptr = (Elf32_Phdr*)buf; // second scan on vsegs in vspace to load the seg_data segments : // - type == VSEG_TYPE_ELF // - non eXecutable for (vseg_id = vspace->vseg_offset; vseg_id < (vspace->vseg_offset + vspace->vsegs); vseg_id++) { if( (vseg[vseg_id].type == VSEG_TYPE_ELF) && // type ELF ((vseg[vseg_id].mode & 0x4) == 0) ) // non executable { // get vbase and pbase paddr_t pbase = vseg[vseg_id].pbase; unsigned int vbase = vseg[vseg_id].vbase; // scan segments in Progam-Header-Table to find match // No match checking as the segment was previously found unsigned int seg; for (seg = 0 ; seg < nsegments ; seg++) { if ( (elf_pht_ptr[seg].p_type == PT_LOAD) && // loadable (elf_pht_ptr[seg].p_flags & PF_W) && // writable (elf_pht_ptr[seg].p_vaddr == vbase) ) // matching { // Get segment offset and size in .elf file unsigned int seg_offset = elf_pht_ptr[seg].p_offset; unsigned int seg_size = elf_pht_ptr[seg].p_filesz; // compute destination address and extension for _fat_read() unsigned int dest = (unsigned int)pbase; unsigned int extend = (unsigned int)(pbase>>32); // load the segment if ( _fat_lseek( fd, seg_offset, SEEK_SET ) < 0 ) { _fat_close( fd ); return 1; } if ( _fat_read( fd, dest, seg_size, extend, 0, FAT_PADDR_MODE ) < 0 ) { _fat_close( fd ); return 1; } } } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _load_writable_segments() at cycle %d\n" "P[%d,%d,%d] loaded segment %x\n", _get_proctime() , x , y , p , vbase ); #endif } } // end loop on writable & loadable segments // close .elf file _fat_close( fd ); return 0; } // end load_writable_segments() /////////////////////////////////////// int _sys_exec_application( char* name ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); mapping_thread_t * thread = _get_thread_base(header); mapping_vseg_t * vseg = _get_vseg_base(header); unsigned int vspace_id; unsigned int thread_id; #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_exec_application() at cycle %d\n" "P[%d,%d,%d] enters for vspace %s\n", _get_proctime() , x, y, p, name ); #endif unsigned int y_size = header->y_size; // scan vspaces to find matching vspace name for (vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++) { if ( _strcmp( vspace[vspace_id].name, name ) == 0 ) // vspace found { #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_exec_application() at cycle %d\n" "P[%d,%d,%d] found vspace %s\n", _get_proctime() , x, y, p, name ); #endif // reload writable segments if ( _load_writable_segments( &vspace[vspace_id] ) ) { _printf("[GIET ERROR] _sys_exec_application() : " "can't load data segment for vspace %s\n", name ); return SYSCALL_CANNOT_LOAD_DATA_SEGMENT; } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_exec_application() at cycle %d\n" "P[%d,%d,%d] loaded all writable segments for vspace %s\n", _get_proctime() , x, y, p, name ); #endif // scan threads in vspace with three goals : // - check all threads desactivated // - re-initialise all threads contexts // - find main thread unsigned int main_found = 0; unsigned int main_ltid = 0; static_scheduler_t* main_psched = NULL; unsigned int min = vspace[vspace_id].thread_offset; unsigned int max = min + vspace[vspace_id].threads; for ( thread_id = min ; thread_id < max ; thread_id++ ) { // get thread identifiers : [x,y,p,ltid] unsigned int cid = thread[thread_id].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[thread_id].proclocid; unsigned int ltid = thread[thread_id].ltid; unsigned int vsid = thread[thread_id].stack_vseg_id; // get scheduler pointer static_scheduler_t* psched = _schedulers[x][y][p]; // check thread non active if ( psched->context[ltid].slot[CTX_NORUN_ID] == 0 ) // runnable !!! { _printf("\n[GIET ERROR] in _sys_exec_application() : " "thread %s already active in vspace %s\n", thread[thread_id].name, name ); return SYSCALL_THREAD_ALREADY_ACTIVE; } // initialise thread context unsigned int ctx_epc = psched->context[ltid].slot[CTX_ENTRY_ID]; unsigned int ctx_sp = vseg[vsid].vbase + vseg[vsid].length; unsigned int ctx_ra = (unsigned int)&_ctx_eret; unsigned int ctx_sr = GIET_SR_INIT_VALUE; psched->context[ltid].slot[CTX_EPC_ID] = ctx_epc; psched->context[ltid].slot[CTX_RA_ID] = ctx_ra; psched->context[ltid].slot[CTX_SR_ID] = ctx_sr; psched->context[ltid].slot[CTX_SP_ID] = ctx_sp; // register information required to activate main thread // actual activation done when threads initialisation is completed if ( thread[thread_id].is_main ) { main_psched = psched; main_ltid = ltid; main_found = 1; } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_exec_application() at cycle %d\n" "P[%d,%d,%d] initialise thread %s in vspace %s\n", _get_proctime() , x, y, p, thread[thread_id].name , name ); #endif } // end loop on threads // activate main thread if ( main_found ) { main_psched->context[main_ltid].slot[CTX_NORUN_ID] = 0; } else { _printf("\n[GIET ERROR] in _sys_exec_application() : " "main not found in vspace %s\n", name ); return SYSCALL_MAIN_NOT_FOUND; } _printf("\n[GIET WARNING] application %s launched : %d threads\n", name , max-min ); return SYSCALL_OK; } } // end of loop on vspaces // vspace not found _printf("\n[GIET ERROR] in _sys_exec_application() : " "vspace %s not found\n", name ); return SYSCALL_VSPACE_NOT_FOUND; } // end _sys_exec_application() /////////////////////////////////////// int _sys_kill_application( char* name ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); mapping_thread_t * thread = _get_thread_base(header); unsigned int vspace_id; unsigned int thread_id; #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_kill_application() at cycle %d\n" "P[%d,%d,%d] enters for vspace %s\n", _get_proctime() , x , y , p , name ); #endif // shell cannot be killed if ( _strcmp( name , "shell" ) == 0 ) { _printf("\n[GIET ERROR] in _sys_kill_application() : " "%s application cannot be killed\n", name ); return SYSCALL_APPLI_CANNOT_BE_KILLED; } // scan vspaces to find matching vspace name for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { if ( _strcmp( vspace[vspace_id].name, name ) == 0 ) { // scan threads to send KILL signal to all threads in vspace unsigned int y_size = header->y_size; unsigned int min = vspace[vspace_id].thread_offset; unsigned int max = min + vspace[vspace_id].threads; for ( thread_id = min ; thread_id < max ; thread_id++ ) { unsigned int cid = thread[thread_id].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[thread_id].proclocid; unsigned int ltid = thread[thread_id].ltid; // get scheduler pointer for processor running the thread static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; // set KILL signal bit _atomic_or( &psched->context[ltid].slot[CTX_SIGS_ID] , SIGS_MASK_KILL ); } _printf("\n[GIET WARNING] application %s killed / %d threads\n", name , max-min ); return SYSCALL_OK; } } // en loop on vspaces _printf("\n[GIET ERROR] in _sys_kill_application() : " "application %s not found\n", name ); return SYSCALL_VSPACE_NOT_FOUND; } // end _sys_kill_application() ////////////////////////////////////////// int _sys_applications_status( char* name ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_thread_t * thread = _get_thread_base(header); mapping_vspace_t * vspace = _get_vspace_base(header); mapping_cluster_t * cluster = _get_cluster_base(header); unsigned int thread_id; // thread index in mapping unsigned int vspace_id; // vspace index in mapping // scan vspaces for( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ ) { if ( (name == NULL) || (_strcmp(vspace[vspace_id].name , name ) == 0) ) { _user_printf("\n--- vspace %s ---\n", vspace[vspace_id].name ); // scan all threads in vspace unsigned int min = vspace[vspace_id].thread_offset ; unsigned int max = min + vspace[vspace_id].threads ; for ( thread_id = min ; thread_id < max ; thread_id++ ) { unsigned int clusterid = thread[thread_id].clusterid; unsigned int p = thread[thread_id].proclocid; unsigned int x = cluster[clusterid].x; unsigned int y = cluster[clusterid].y; unsigned int ltid = thread[thread_id].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; unsigned int norun = psched->context[ltid].slot[CTX_NORUN_ID]; unsigned int tty = psched->context[ltid].slot[CTX_TTY_ID]; unsigned int current = psched->current; if ( current == ltid ) _user_printf(" - thread %s / P[%d,%d,%d] / ltid = %d / " "TTY = %d / norun = %x : running\n", thread[thread_id].name, x, y, p, ltid, tty, norun ); else if ( norun == 0 ) _user_printf(" - thread %s / P[%d,%d,%d] / ltid = %d / " "TTY = %d / norun = %x : runable\n", thread[thread_id].name, x, y, p, ltid, tty, norun); else _user_printf(" - thread %s / P[%d,%d,%d] / ltid = %d / " "TTY = %d / norun = %x : blocked\n", thread[thread_id].name, x, y, p, ltid, tty, norun); } } } _user_printf("\n"); return SYSCALL_OK; } // end _sys_applications_status() ///////////////////////////////////////////////////////////////////////////// // Threads related syscall handlers ///////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////// int _sys_pthread_create( unsigned int* buffer, void* attr, void* function, void* arg ) { // attr argument not supported if ( attr != NULL ) { _printf("\n[GIET ERROR] in _sys_pthread_create() : " "attr argument not supported\n" ); return SYSCALL_PTHREAD_ARGUMENT_NOT_SUPPORTED; } // get pointers in mapping mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_thread_t* thread = _get_thread_base(header); mapping_vspace_t* vspace = _get_vspace_base(header); mapping_cluster_t* cluster = _get_cluster_base(header); // get scheduler for processor running the calling thread static_scheduler_t* psched = (static_scheduler_t*)_get_sched(); // get calling thread local index in scheduler unsigned int current = psched->current; // get vspace index unsigned int vspace_id = psched->context[current].slot[CTX_VSID_ID]; #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_create() at cycle %d\n" "P[%d,%d,%d] enters for vspace %s / entry = %x\n", _get_proctime() , x , y , p , vspace[vspace_id].name , (unsigned int)function ); #endif unsigned int thread_id; // searched thread : local index in mapping unsigned int clusterid; // searched thread : cluster index unsigned int lpid; // searched thread : processor local index unsigned int ltid; // searched thread : scheduler thread index unsigned int cx; // searched thread : X coordinate for searched thread unsigned int cy; // searched thread : Y coordinate for searched thread unsigned int entry; // searched thread : entry point unsigned int norun; // searched thread : norun vector unsigned int trdid; // searched thread : thread identifier // scan threads in vspace to find an inactive thread matching function unsigned int min = vspace[vspace_id].thread_offset; unsigned int max = min + vspace[vspace_id].threads; unsigned int found = 0; for ( thread_id = min ; (thread_id < max) && (found == 0) ; thread_id++ ) { // get thread coordinates [cx,cy,lpid] and ltid from mapping ltid = thread[thread_id].ltid; clusterid = thread[thread_id].clusterid; lpid = thread[thread_id].proclocid; cx = cluster[clusterid].x; cy = cluster[clusterid].y; // get thread scheduler pointer psched = _schedulers[cx][cy][lpid]; // get thread entry-point, norun-vector, and trdid from context entry = psched->context[ltid].slot[CTX_ENTRY_ID]; norun = psched->context[ltid].slot[CTX_NORUN_ID]; trdid = psched->context[ltid].slot[CTX_TRDID_ID]; // check matching if ( ((unsigned int)function == entry ) && (norun & NORUN_MASK_THREAD) ) found = 1; } // end loop on threads if ( found ) // one matching inactive thread has been found { // set argument value in thread context if ( arg != NULL ) psched->context[ltid].slot[CTX_A0_ID] = (unsigned int)arg; // activate thread psched->context[ltid].slot[CTX_NORUN_ID] = 0; // return launched thead global identifier *buffer = trdid; #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_create() at cycle %d\n" "P[%d,%d,%d] exit : thread %x launched in vspace %s\n", _get_proctime() , x , y , p , trdid , vspace[vspace_id].name ); #endif return SYSCALL_OK; } else // no matching thread found { _printf("\n[GIET ERROR] in _sys_pthread_create() : " "no matching thread for entry = %x in vspace %s\n", (unsigned int)function , vspace[vspace_id].name ); return SYSCALL_THREAD_NOT_FOUND; } } // end _sys_pthread_create() /////////////////////////////////////////// int _sys_pthread_join( unsigned int trdid, void* ptr ) { // ptr argument not supported if ( ptr != NULL ) { _printf("\n[GIET ERROR] in _sys_pthread_join() : " "ptr argument not supported, must be NULL\n" ); return SYSCALL_PTHREAD_ARGUMENT_NOT_SUPPORTED; } // get calling thread vspace unsigned int caller_vspace = _get_context_slot( CTX_VSID_ID ); #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_join() at cycle %d\n" "P[%d,%d,%d] enters for thread %x in vspace %d\n", _get_proctime() , x , y , p , trdid , caller_vspace ); #endif // get target thread indexes from trdid unsigned int cx = (trdid>>24) & 0xFF; unsigned int cy = (trdid>>16) & 0xFF; unsigned int lpid = (trdid>>8 ) & 0xFF; unsigned int ltid = (trdid ) & 0xFF; // get target thread scheduler, vspace and registered trdid static_scheduler_t* psched = _schedulers[cx][cy][lpid]; unsigned int target_vspace = psched->context[ltid].slot[CTX_VSID_ID]; unsigned int registered_trdid = psched->context[ltid].slot[CTX_TRDID_ID]; // check trdid if ( trdid != registered_trdid ) { _printf("\nerror in _sys_pthread_join() : " "trdid = %x / registered_trdid = %x\n", trdid , registered_trdid ); return SYSCALL_UNCOHERENT_THREAD_CONTEXT; } // check calling thread and target thread in same vspace if ( caller_vspace != target_vspace ) { _printf("\n[GIET ERROR] in _sys_pthread_join() : " " calling thread and target thread not in same vspace\n"); return SYSCALL_NOT_IN_SAME_VSPACE; } // get target thread state unsigned int* pnorun = &psched->context[ltid].slot[CTX_NORUN_ID]; asm volatile ( "2000: \n" "move $11, %0 \n" /* $11 <= pnorun */ "lw $11, 0($11) \n" /* $11 <= norun */ "andi $11, $11, 1 \n" /* $11 <= norun & 0x1 */ "beqz $11, 2000b \n" : : "r" (pnorun) : "$11" ); #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_join() at cycle %d\n" "P[%d,%d,%d] exit for thread %x in vspace %d\n", _get_proctime() , x , y , p , trdid , caller_vspace ); #endif return SYSCALL_OK; } // end _sys_pthread_join() //////////////////////////////////////// int _sys_pthread_kill( pthread_t trdid, int signal ) { // get calling thread vspace unsigned int caller_vspace = _get_context_slot( CTX_VSID_ID ); #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_kill() at cycle %d\n" "P[%d,%d,%d] enters for thread %x in vspace %d\n", _get_proctime() , x , y , p , trdid , caller_vspace ); #endif // get and check target thread indexes from trdid unsigned int cx = (trdid>>24) & 0xFF; unsigned int cy = (trdid>>16) & 0xFF; unsigned int lpid = (trdid>>8 ) & 0xFF; unsigned int ltid = (trdid ) & 0xFF; // get target thread scheduler, vspace and registered trdid static_scheduler_t* psched = _schedulers[cx][cy][lpid]; unsigned int target_vspace = psched->context[ltid].slot[CTX_VSID_ID]; unsigned int registered_trdid = psched->context[ltid].slot[CTX_TRDID_ID]; // check trdid if ( trdid != registered_trdid ) { _printf("\n[GIET ERROR] in _sys_pthread_kill() : trdid = %x" " / registered_trdid = %x\n", trdid , registered_trdid ); return SYSCALL_UNCOHERENT_THREAD_CONTEXT; } // check calling thread and target thread in same vspace if ( caller_vspace != target_vspace ) { _printf("\n[GIET ERROR] in _sys_pthread_kill() : not in same vspace\n"); return SYSCALL_NOT_IN_SAME_VSPACE; } // register KILL signal in target thread context if required if ( signal ) { _atomic_or( &psched->context[ltid].slot[CTX_SIGS_ID] , SIGS_MASK_KILL ); } #if GIET_DEBUG_EXEC if ( _get_proctime() > GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_kill() at cycle %d\n" "P[%d,%d,%d] exit for thread %x in vspace %d\n", _get_proctime() , x , y , p , trdid , caller_vspace ); #endif return SYSCALL_OK; } // end _sys_pthread_kill() ///////////////////////////////////// int _sys_pthread_exit( void* string ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); unsigned int ltid = _get_context_slot(CTX_LTID_ID); unsigned int trdid = _get_context_slot(CTX_TRDID_ID); unsigned int vsid = _get_context_slot(CTX_VSID_ID); // print exit message if ( string != NULL ) { _printf("\n[GIET WARNING] Exit thread %x in vspace %s\n" " Cause : %s\n\n", trdid , vspace[vsid].name , (char*) string ); } // get scheduler pointer for calling thread static_scheduler_t* psched = (static_scheduler_t*)_get_sched(); // register KILL signal in calling thread context (suicid request) _atomic_or( &psched->context[ltid].slot[CTX_SIGS_ID] , SIGS_MASK_KILL ); // deschedule calling thread unsigned int save_sr; _it_disable( &save_sr ); _ctx_switch(); return SYSCALL_OK; } // end _sys_pthread_exit() //////////////////////// int _sys_pthread_yield() { unsigned int save_sr; _it_disable( &save_sr ); _ctx_switch(); _it_restore( &save_sr ); return SYSCALL_OK; } ////////////////////////////////////////////////// int _sys_pthread_control( unsigned int command, char* vspace_name, char* thread_name ) { #if GIET_DEBUG_EXEC unsigned int gpid = _get_procid(); unsigned int cluster_xy = gpid >> P_WIDTH; unsigned int p = gpid & ((1<> Y_WIDTH; unsigned int y = cluster_xy & ((1< GIET_DEBUG_EXEC ) _printf("\n[DEBUG EXEC] _sys_pthread_control() at cycle %d\n" "P[%d,%d,%d] enter for vspace %s / thread %s / command = %d\n", _get_proctime() , x , y , p , vspace_name, thread_name, command ); #endif // get pointers in mapping mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_thread_t* thread = _get_thread_base(header); mapping_vspace_t* vspace = _get_vspace_base(header); mapping_cluster_t* cluster = _get_cluster_base(header); unsigned int found; // search vspace name to get vspace index: vsid found = 0; unsigned int vsid; for( vsid = 0 ; vsid < header->vspaces ; vsid++ ) { if ( _strcmp( vspace[vsid].name, vspace_name ) == 0 ) { found = 1; break; } } if ( found == 0 ) return SYSCALL_VSPACE_NOT_FOUND; // search thread name in vspace to get thread index: tid found = 0; unsigned int tid; unsigned int min = vspace[vsid].thread_offset; unsigned int max = min + vspace[vsid].threads; for( tid = min ; tid < max ; tid++ ) { if ( _strcmp( thread[tid].name, thread_name ) == 0 ) { found = 1; break; } } if ( found == 0 ) return SYSCALL_THREAD_NOT_FOUND; // get target thread coordinates unsigned int cid = thread[tid].clusterid; unsigned int cx = cluster[cid].x; unsigned int cy = cluster[cid].y; unsigned int cp = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; // get target thread scheduler static_scheduler_t* psched = _schedulers[cx][cy][cp]; // check trdid and vsid unsigned int trdid = cx<<24 | cy<<16 | cp<<8 | ltid; if ( (psched->context[ltid].slot[CTX_TRDID_ID] != trdid) || (psched->context[ltid].slot[CTX_VSID_ID] != vsid) ) { return SYSCALL_UNCOHERENT_THREAD_CONTEXT; } // execute command if ( command == THREAD_CMD_PAUSE ) { _atomic_or ( &psched->context[ltid].slot[CTX_NORUN_ID], NORUN_MASK_THREAD ); return SYSCALL_OK; } else if ( command == THREAD_CMD_RESUME ) { _atomic_and( &psched->context[ltid].slot[CTX_NORUN_ID], ~NORUN_MASK_THREAD ); return SYSCALL_OK; } else if ( command == THREAD_CMD_CONTEXT ) { _user_printf( " - CTX_TRDID = %x\n" " - CTX_VSID = %x\n" " - CTX_EPC = %x\n" " - CTX_PTAB = %x\n" " - CTX_PTPR = %x\n" " - CTX_SR = %x\n" " - CTX_RA = %x\n" " - CTX_SP = %x\n" " - CTX_ENTRY = %x\n" " - CTX_NORUN = %x\n" " - CTX_SIGS = %x\n" " - CTX_LOCKS = %x\n" " - CTX_TTY = %x\n" " - CTX_NIC_RX = %x\n" " - CTX_NIC_TX = %x\n" " - CTX_CMA_RX = %x\n" " - CTX_CMA_TX = %x\n" " - CTX_CMA_FB = %x\n", psched->context[ltid].slot[CTX_TRDID_ID], psched->context[ltid].slot[CTX_VSID_ID], psched->context[ltid].slot[CTX_EPC_ID], psched->context[ltid].slot[CTX_PTAB_ID], psched->context[ltid].slot[CTX_PTPR_ID], psched->context[ltid].slot[CTX_SR_ID], psched->context[ltid].slot[CTX_RA_ID], psched->context[ltid].slot[CTX_SP_ID], psched->context[ltid].slot[CTX_ENTRY_ID], psched->context[ltid].slot[CTX_NORUN_ID], psched->context[ltid].slot[CTX_SIGS_ID], psched->context[ltid].slot[CTX_LOCKS_ID], psched->context[ltid].slot[CTX_TTY_ID], psched->context[ltid].slot[CTX_NIC_RX_ID], psched->context[ltid].slot[CTX_NIC_TX_ID], psched->context[ltid].slot[CTX_CMA_RX_ID], psched->context[ltid].slot[CTX_CMA_TX_ID], psched->context[ltid].slot[CTX_CMA_FB_ID] ); return SYSCALL_OK; } else { return SYSCALL_ILLEGAL_THREAD_COMMAND_TYPE; } } // end _sys_pthread_control() ////////////////////////////////////////////////////////////////////////////// // Coprocessors related syscall handlers ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////// int _sys_coproc_alloc( unsigned int cluster_xy, unsigned int coproc_type, unsigned int* return_info ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_cluster_t * cluster = _get_cluster_base(header); mapping_periph_t * periph = _get_periph_base(header); // compute cluster coordinates and cluster index in mapping unsigned int y = cluster_xy & ((1<>Y_WIDTH) & ((1<arg0 & 0xFF) | (found->arg1 & 0xFF)<<8 | (found->arg2 & 0xFF)<<16 | (found->arg3 & 0xFF)<<24 ; // returns coprocessor info *return_info = _coproc_info[cluster_id]; #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_alloc() at cycle %d\n" "type = %d in cluster[%d,%d] / coproc_info = %x\n", _get_proctime() , coproc_type, x , y , *return_info ); #endif return SYSCALL_OK; } else { _printf("\n[GIET_ERROR] in _sys_coproc_alloc(): " "no coprocessor with type %d in cluster[%d,%d]\n", coproc_type , x , y ); return SYSCALL_COPROCESSOR_NOT_FOUND; } } // end _sys_coproc_alloc() //////////////////////////////////////////////////////// int _sys_coproc_release( unsigned int cluster_xy, unsigned int coproc_type ) { // compute cluster coordinates and cluster index unsigned int y = cluster_xy & ((1<>Y_WIDTH) & ((1<>8) & 0xFF; unsigned int channel; // stops coprocessor and communication channels _mwr_set_coproc_register( cluster_xy , 0 , 0 ); for ( channel = 0 ; channel < (nb_from + nb_to) ; channel++ ) { _mwr_set_channel_register( cluster_xy , channel , MWR_CHANNEL_RUNNING , 0 ); } // release coprocessor lock _simple_lock_release( &_coproc_lock[cluster_id] ); #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_release() at cycle %d\n" "type = %d in cluster[%d,%d]\n", _get_proctime() , coproc_type , x , y ); #endif return SYSCALL_OK; } // end _sys_coproc_release() //////////////////////////////////////////////////////////////// int _sys_coproc_channel_init( unsigned int cluster_xy, unsigned int coproc_type, unsigned int channel, giet_coproc_channel_t* desc ) { // compute cluster coordinates and cluster index unsigned int y = cluster_xy & ((1<>Y_WIDTH) & ((1<channel_mode; if ( (mode != MODE_MWMR) && (mode != MODE_DMA_IRQ) && (mode != MODE_DMA_NO_IRQ) ) { _printf("\n[GIET_ERROR] in _sys_coproc_channel_init(): " "illegal mode\n"); return SYSCALL_COPROCESSOR_ILLEGAL_MODE; } // get memory buffer size unsigned int size = desc->buffer_size; // physical addresses unsigned long long buffer_paddr; unsigned int buffer_lsb; unsigned int buffer_msb; unsigned long long status_paddr = 0; unsigned int status_lsb; unsigned int status_msb; unsigned long long lock_paddr = 0; unsigned int lock_lsb; unsigned int lock_msb; unsigned int flags; // unused // compute memory buffer physical address buffer_paddr = _v2p_translate( desc->buffer_vaddr , &flags ); buffer_lsb = (unsigned int)buffer_paddr; buffer_msb = (unsigned int)(buffer_paddr>>32); // call MWMR_DMA driver _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_MODE, mode ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_SIZE, size ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_BUFFER_LSB, buffer_lsb ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_BUFFER_MSB, buffer_msb ); if ( mode == MODE_MWMR ) { // compute MWMR descriptor physical address status_paddr = _v2p_translate( desc->status_vaddr , &flags ); status_lsb = (unsigned int)status_paddr; status_msb = (unsigned int)(status_paddr>>32); // call MWMR_DMA driver _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_MWMR_LSB, status_lsb ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_MWMR_MSB, status_msb ); // compute lock physical address lock_paddr = _v2p_translate( desc->lock_vaddr , &flags ); lock_lsb = (unsigned int)lock_paddr; lock_msb = (unsigned int)(lock_paddr>>32); // call MWMR_DMA driver _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_LOCK_LSB, lock_lsb ); _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_LOCK_MSB, lock_msb ); } #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_channel_init() at cycle %d\n" "cluster[%d,%d] / channel = %d / mode = %d / buffer_size = %d\n" "buffer_vaddr = %x / status_vaddr = %x / lock_vaddr = %x\n" "buffer_paddr = %l / status_paddr = %l / lock_paddr = %l\n", _get_proctime() , x , y , channel , mode , size , desc->buffer_vaddr, desc->status_vaddr, desc->lock_vaddr, buffer_paddr, status_paddr, lock_paddr ); #endif return SYSCALL_OK; } // end _sys_coproc_channel_init() ////////////////////////////////////////////// int _sys_coproc_run( unsigned int cluster_xy, unsigned int coproc_type ) { // compute cluster coordinates and cluster index unsigned int y = cluster_xy & ((1<>Y_WIDTH) & ((1<>8) & 0xFF; unsigned int mode = 0xFFFFFFFF; unsigned int channel; // check channels modes, and register coprocessor running mode for ( channel = 0 ; channel < (nb_from + nb_to) ; channel++ ) { unsigned int temp; temp = _mwr_get_channel_register( cluster_xy , channel , MWR_CHANNEL_MODE ); if ( mode == 0xFFFFFFFF ) { mode = temp; } else if ( temp != mode ) { _printf("\n[GIET_ERROR] in _sys_coproc_run(): " "channels don't have same mode in coprocessor[%d,%d]\n", x , y ); return SYSCALL_COPROCESSOR_ILLEGAL_MODE; } } _coproc_mode[cluster_id] = mode; // start all communication channels for ( channel = 0 ; channel < (nb_from + nb_to) ; channel++ ) { _mwr_set_channel_register( cluster_xy , channel , MWR_CHANNEL_RUNNING , 1 ); } ////////////////////////////////////////////////////////////////////////// if ( (mode == MODE_MWMR) || (mode == MODE_DMA_NO_IRQ) ) // no descheduling { // start coprocessor _mwr_set_coproc_register( cluster_xy , 0 , 1 ); #if GIET_DEBUG_COPROC if ( mode == MODE_MWMR ) _printf("\n[DEBUG COPROC] _sys_coproc_run() at cycle %d\n" "type = %d / cluster[%d,%d] / MODE_MWMR\n", _get_proctime() , coproc_type , x , y ); else _printf("\n[DEBUG COPROC] _sys_coproc_run() at cycle %d\n" "type = %d / cluster[%d,%d] / MODE_DMA_NO_IRQ\n", _get_proctime() , coproc_type , x , y ); #endif return SYSCALL_OK; } /////////////////////////////////////////////////////////////////////////// else // mode == MODE_DMA_IRQ => descheduling { // register calling thread trdid unsigned int trdid = _get_thread_trdid(); _coproc_trdid[cluster_id] = trdid; // enters critical section unsigned int save_sr; _it_disable( &save_sr ); // set NORUN_MASK_COPROC bit static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int* ptr = &psched->context[ltid].slot[CTX_NORUN_ID]; _atomic_or( ptr , NORUN_MASK_COPROC ); // start coprocessor _mwr_set_coproc_register( cluster_xy , 0 , 1 ); #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_run() at cycle %d\n" "thread %x starts coprocessor / type = %d / cluster[%d,%d] / MODE_DMA_IRQ\n", _get_proctime() , trdid , coproc_type , x , y ); #endif // deschedule thread _ctx_switch(); #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_run() at cycle %d\n" "thread %x resume after coprocessor[%d,%d] completion\n", _get_proctime() , trdid , x , y ); #endif // restore SR _it_restore( &save_sr ); // return error computed by mwr_isr() return _coproc_error[cluster_id]; } } // end _sys_coproc_run() //////////////////////////////////////////////////// int _sys_coproc_completed( unsigned int cluster_xy, unsigned int coproc_type ) { // compute cluster coordinates and cluster index unsigned int y = cluster_xy & ((1<>Y_WIDTH) & ((1<>8) & 0xFF; unsigned int error = 0; unsigned int channel; unsigned int status; // get status for all channels, and signal reported errors for ( channel = 0 ; channel < (nb_to +nb_from) ; channel++ ) { do { status = _mwr_get_channel_register( cluster_xy, channel, MWR_CHANNEL_STATUS ); if ( status == MWR_CHANNEL_ERROR_DATA ) { _printf("\n[GIET_ERROR] in _sys_coproc_completed(): " "channel %d / DATA_ERROR\n", channel ); error = 1; } else if ( status == MWR_CHANNEL_ERROR_LOCK ) { _printf("\n[GIET_ERROR] in _sys_coproc_completed()" " / channel %d / LOCK_ERROR\n", channel ); error = 1; } else if ( status == MWR_CHANNEL_ERROR_DESC ) { _printf("\n[GIET_ERROR] in _sys_coproc_completed()" " / channel %d / DESC_ERROR\n", channel ); error = 1; } } while ( status == MWR_CHANNEL_BUSY ); // reset channel _mwr_set_channel_register( cluster_xy, channel, MWR_CHANNEL_RUNNING , 0 ); } // end for channels if ( error ) { return SYSCALL_COPROCESSOR_ILLEGAL_MODE; } else { #if GIET_DEBUG_COPROC _printf("\n[DEBUG COPROC] _sys_coproc_completed() at cycle %d\n" "coprocessor type = %d / cluster[%d,%d] completes operation for thread %d\n", _get_proctime() , coproc_type , x , y , _get_thread_trdid() ); #endif return SYSCALL_OK; } } else // mode == MODE_MWMR or MODE_DMA_IRQ { _printf("\n[GIET ERROR] in sys_coproc_completed(): " "coprocessor[%d,%d] is not running in MODE_DMA_NO_IRQ\n", x , y ); return SYSCALL_COPROCESSOR_ILLEGAL_MODE; } } // end _sys_coproc_completed() ////////////////////////////////////////////////////////////////////////////// // TTY related syscall handlers ////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////// int _sys_tty_alloc( unsigned int shared ) { unsigned int channel = 0; // allocated TTY channel // get vsid for the calling thread unsigned int vsid = _get_context_slot( CTX_VSID_ID ); mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t *vspace = _get_vspace_base(header); mapping_thread_t *thread = _get_thread_base(header); // compute number of users unsigned int users; if ( shared ) users = vspace[vsid].threads; else users = 1; #if NB_TTY_CHANNELS > 1 // get trdid for the calling thread unsigned int trdid = _get_thread_trdid(); // get a TTY channel for ( channel = 0 ; channel < NB_TTY_CHANNELS ; channel++ ) { unsigned int* palloc = &_tty_channel_alloc[channel]; if ( _atomic_test_and_set( palloc , users ) == 0 ) break; } if ( ( channel >= NB_TTY_CHANNELS ) ) { _printf("\n[GIET_ERROR] in _sys_tty_alloc() : " "no TTY channel available for thread %x\n", trdid ); return SYSCALL_NO_CHANNEL_AVAILABLE; } // initialise allocated TTY channel _tty_init( channel ); // allocate a WTI mailbox to the calling proc if external IRQ unsigned int wti_id; if ( USE_PIC ) _ext_irq_alloc( ISR_TTY_RX , channel , &wti_id ); // register wti_id and coordinates for processor receiving WTI unsigned int procid = _get_procid(); unsigned int x = procid >> (Y_WIDTH + P_WIDTH); unsigned int y = (procid >> P_WIDTH) & ((1<y_size; unsigned int cid = thread[tid].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; psched->context[ltid].slot[CTX_TTY_ID] = channel; } } else // for calling thread only { _set_context_slot( CTX_TTY_ID, channel ); } return SYSCALL_OK; } // end _sys_tty_alloc() ////////////////////// int _sys_tty_release() // NOTE: not a syscall: used by _ctx_kill_thread() { unsigned int channel = _get_context_slot( CTX_TTY_ID ); if ( channel == -1 ) { unsigned int trdid = _get_thread_trdid(); _printf("\n[GIET_ERROR] in _sys_tty_release() : " "TTY channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } // reset CTX_TTY_ID for the calling thread _set_context_slot( CTX_TTY_ID , 0xFFFFFFFF ); // atomically decrement the _tty_channel_allocator[] array _atomic_increment( &_tty_channel_alloc[channel] , -1 ); // release WTI mailbox if TTY channel no more used if ( USE_PIC && (_tty_channel_alloc[channel] == 0) ) { _ext_irq_release( ISR_TTY_RX , channel ); } return SYSCALL_OK; } // end sys_tty_release() //////////////////////////////////////// int _sys_tty_write( const char* buffer, unsigned int length, // number of characters unsigned int channel) // channel index { unsigned int nwritten; // compute and check tty channel if( channel == 0xFFFFFFFF ) channel = _get_context_slot(CTX_TTY_ID); if( channel >= NB_TTY_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tty_write() : " "no TTY channel allocated for thread %x\n", _get_thread_trdid() ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // write string to TTY channel for (nwritten = 0; nwritten < length; nwritten++) { // check tty's status if ( _tty_get_register( channel, TTY_STATUS ) & 0x2 ) break; // write one byte if (buffer[nwritten] == '\n') { _tty_set_register( channel, TTY_WRITE, (unsigned int)'\r' ); } _tty_set_register( channel, TTY_WRITE, (unsigned int)buffer[nwritten] ); } return nwritten; } /////////////////////////////////////// int _sys_tty_read( char* buffer, unsigned int length, // unused unsigned int channel) // channel index { // compute and check tty channel if( channel == 0xFFFFFFFF ) channel = _get_context_slot(CTX_TTY_ID); if( channel >= NB_TTY_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tty_read() : " "no TTY channel allocated for thread %x\n", _get_thread_trdid() ); return SYSCALL_CHANNEL_NON_ALLOCATED; } unsigned int save_sr; unsigned int found = 0; // get pointer on TTY_RX FIFO tty_fifo_t* fifo = &_tty_rx_fifo[channel]; // try to read one character from FIFO // blocked in while loop until success while ( found == 0 ) { if ( fifo->sts == 0) // FIFO empty => deschedule { // enters critical section _it_disable( &save_sr ); // set NORUN_MASK_TTY bit for calling thread static_scheduler_t* psched = (static_scheduler_t*)_get_sched(); unsigned int ltid = psched->current; _atomic_or( &psched->context[ltid].slot[CTX_NORUN_ID] , NORUN_MASK_TTY ); // register descheduling thread trdid fifo->trdid = _get_thread_trdid(); // deschedule calling thread _ctx_switch(); // exit critical section _it_restore( &save_sr ); } else // FIFO not empty => get one character { *buffer = fifo->data[fifo->ptr]; fifo->sts = fifo->sts - 1; fifo->ptr = (fifo->ptr + 1) % TTY_FIFO_DEPTH; found = 1; } } return 1; } ////////////////////////////////////////////////////////////////////////////// // TIMER related syscall handlers ////////////////////////////////////////////////////////////////////////////// //////////////////// int _sys_tim_alloc() { #if NB_TIM_CHANNELS unsigned int channel; // allocated TIMER channel unsigned int trdid = _get_thread_trdid(); // check no TIMER already allocated to calling thread if ( _get_context_slot( CTX_TIM_ID ) < NB_TIM_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tim_alloc() : " "TIMER channel already allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } // get a TIMER channel for ( channel = 0 ; channel < NB_TIM_CHANNELS ; channel++ ) { unsigned int* palloc = &_tim_channel_alloc[channel]; if ( _atomic_test_and_set( palloc , 1 ) == 0 ) break; } if ( channel >= NB_TIM_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tim_alloc() : " "no TIMER channel available for thread %x\n", trdid ); return SYSCALL_NO_CHANNEL_AVAILABLE; } // allocate a WTI mailbox to the calling proc if external IRQ unsigned int wti_id; if ( USE_PIC ) _ext_irq_alloc( ISR_TIMER , channel , &wti_id ); // register wti_id and coordinates for processor receiving WTI unsigned int procid = _get_procid(); unsigned int x = procid >> (Y_WIDTH + P_WIDTH); unsigned int y = (procid >> P_WIDTH) & ((1<= NB_TIM_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tim_start(): not enough TIM channels\n"); return SYSCALL_NO_CHANNEL_AVAILABLE; } // start timer _timer_start( channel, period ); return SYSCALL_OK; #else _printf("\n[GIET ERROR] in _sys_tim_start() : NB_TIM_CHANNELS = 0\n"); return SYSCALL_NO_CHANNEL_AVAILABLE; #endif } /////////////////// int _sys_tim_stop() { #if NB_TIM_CHANNELS // get timer index unsigned int channel = _get_context_slot( CTX_TIM_ID ); if ( channel >= NB_TIM_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_tim_stop() : illegal timer index\n"); return SYSCALL_CHANNEL_NON_ALLOCATED; } // stop timer _timer_stop( channel ); return SYSCALL_OK; #else _printf("\n[GIET ERROR] in _sys_tim_stop() : NB_TIM_CHANNELS = 0\n"); return SYSCALL_NO_CHANNEL_AVAILABLE; #endif } ////////////////////////////////////////////////////////////////////////////// // NIC related syscall handlers ////////////////////////////////////////////////////////////////////////////// #define NIC_CONTAINER_SIZE 4096 #if NB_NIC_CHANNELS && NB_CMA_CHANNELS //////////////////////////////////////// int _sys_nic_alloc( unsigned int is_rx, unsigned int xmax, unsigned int ymax ) { mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t *vspace = _get_vspace_base(header); mapping_thread_t *thread = _get_thread_base(header); // get calling thread trdid, vspace index, and number of threads unsigned int trdid = _get_thread_trdid(); unsigned int vsid = _get_context_slot( CTX_VSID_ID ); unsigned int users = vspace[vsid].threads; // check xmax / ymax parameters if ( (xmax > X_SIZE) || (ymax > Y_SIZE) ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() " "xmax or ymax argument too large for thread %x\n", trdid ); return SYSCALL_ILLEGAL_ARGUMENT; } //////////////////////////////////////////////////////// // Step 1: get and register CMA and NIC channel index // //////////////////////////////////////////////////////// unsigned int nic_channel; unsigned int cma_channel; unsigned int* palloc; // get a NIC_RX or NIC_TX channel for ( nic_channel = 0 ; nic_channel < NB_NIC_CHANNELS ; nic_channel++ ) { if ( is_rx ) palloc = &_nic_rx_channel_alloc[nic_channel]; else palloc = &_nic_tx_channel_alloc[nic_channel]; if ( _atomic_test_and_set( palloc , users ) == 0 ) break; } if ( (nic_channel >= NB_NIC_CHANNELS) ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "no NIC channel available for thread %x\n", trdid ); return SYSCALL_NO_CHANNEL_AVAILABLE; } // get a CMA channel for ( cma_channel = 0 ; cma_channel < NB_CMA_CHANNELS ; cma_channel++ ) { palloc = &_cma_channel_alloc[cma_channel]; if ( _atomic_test_and_set( palloc , users ) == 0 ) break; } if ( cma_channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "no CMA channel available for thread %x\n", trdid ); if ( is_rx ) _nic_rx_channel_alloc[nic_channel] = 0; else _nic_tx_channel_alloc[nic_channel] = 0; return SYSCALL_NO_CHANNEL_AVAILABLE; } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] sys_nic_alloc() at cycle %d\n" "thread %d get nic_channel = %d / cma_channel = %d\n", _get_proctime() , trdid , nic_channel , cma_channel ); #endif // register nic_index and cma_index in all threads // contexts that are in the same vspace unsigned int tid; for (tid = vspace[vsid].thread_offset; tid < (vspace[vsid].thread_offset + vspace[vsid].threads); tid++) { unsigned int y_size = header->y_size; unsigned int cid = thread[tid].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; if ( is_rx ) { if ( (psched->context[ltid].slot[CTX_NIC_RX_ID] < NB_NIC_CHANNELS) || (psched->context[ltid].slot[CTX_CMA_RX_ID] < NB_CMA_CHANNELS) ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "NIC_RX or CMA_RX channel already allocated for thread %x\n", trdid ); _nic_rx_channel_alloc[nic_channel] = 0; _cma_channel_alloc[cma_channel] = 0; return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } else { psched->context[ltid].slot[CTX_NIC_RX_ID] = nic_channel; psched->context[ltid].slot[CTX_CMA_RX_ID] = cma_channel; } } else // is_tx { if ( (psched->context[ltid].slot[CTX_NIC_TX_ID] < NB_NIC_CHANNELS) || (psched->context[ltid].slot[CTX_CMA_TX_ID] < NB_CMA_CHANNELS) ) { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "NIC_TX or CMA_TX channel already allocated for thread %x\n", trdid ); _nic_tx_channel_alloc[nic_channel] = 0; _cma_channel_alloc[cma_channel] = 0; return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } else { psched->context[ltid].slot[CTX_NIC_TX_ID] = nic_channel; psched->context[ltid].slot[CTX_CMA_TX_ID] = cma_channel; } } } // end loop on threads ///////////////////////////////////////////////////////////////////////////////// // Step 2: loop on all the clusters // // Allocate the kernel containers and status, compute the container and the // // status physical addresses, fill and synchronize the kernel CHBUF descriptor // ///////////////////////////////////////////////////////////////////////////////// // physical addresses to be registered in the CMA registers unsigned long long nic_chbuf_pbase; // NIC chbuf physical address unsigned long long ker_chbuf_pbase; // kernel chbuf physical address // allocate one kernel container and one status variable per cluster in the // (xmax / ymax) mesh unsigned int cx; // cluster X coordinate unsigned int cy; // cluster Y coordinate unsigned int index; // container index in chbuf unsigned int vaddr; // virtual address unsigned long long cont_paddr; // container physical address unsigned long long sts_paddr; // container status physical address unsigned int flags; // for _v2p_translate() for ( cx = 0 ; cx < xmax ; cx++ ) { for ( cy = 0 ; cy < ymax ; cy++ ) { // compute index in chbuf index = (cx * ymax) + cy; // allocate the kernel container vaddr = (unsigned int)_remote_malloc( NIC_CONTAINER_SIZE, cx, cy ); if ( vaddr == 0 ) // not enough kernel heap memory in cluster[cx,cy] { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "not enough kernel heap in cluster[%d,%d]\n", cx, cy ); return SYSCALL_OUT_OF_KERNEL_HEAP_MEMORY; } // compute container physical address cont_paddr = _v2p_translate( vaddr , &flags ); // checking container address alignment if ( cont_paddr & 0x3F ) { _printf("\n[GIET ERROR] in _sys_nic_alloc() : " "container address in cluster[%d,%d] not aligned\n", cx, cy); return SYSCALL_ADDRESS_NON_ALIGNED; } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x allocates a container in cluster[%d,%d] / vaddr = %x / paddr = %l\n", -get_proctime() , trdid , cx , cy , vaddr, cont_paddr ); #endif // allocate the kernel container status // it occupies 64 bytes but only last bit is useful (1 for full and 0 for empty) vaddr = (unsigned int)_remote_malloc( 64, cx, cy ); if ( vaddr == 0 ) // not enough kernel heap memory in cluster[cx,cy] { _printf("\n[GIET_ERROR] in _sys_nic_alloc() : " "not enough kernel heap in cluster[%d,%d]\n", cx, cy ); return SYSCALL_OUT_OF_KERNEL_HEAP_MEMORY; } // compute status physical address sts_paddr = _v2p_translate( vaddr , &flags ); // checking status address alignment if ( sts_paddr & 0x3F ) { _printf("\n[GIET ERROR] in _sys_nic_alloc() : " "status address in cluster[%d,%d] not aligned\n", cx, cy); return SYSCALL_ADDRESS_NON_ALIGNED; } // initialize chbuf entry // The buffer descriptor has the following structure: // - the 26 LSB bits contain bits[6:31] of the buffer physical address // - the 26 following bits contain bits[6:31] of the physical address where the // buffer status is located // - the 12 MSB bits contain the common address extension of the buffer and its // status if ( is_rx ) _nic_ker_rx_chbuf[nic_channel].buf_desc[index] = (unsigned long long) ((sts_paddr & 0xFFFFFFFFULL) >> 6) + (((cont_paddr & 0xFFFFFFFFFFFULL) >> 6) << 26); else _nic_ker_tx_chbuf[nic_channel].buf_desc[index] = (unsigned long long) ((sts_paddr & 0xFFFFFFC0ULL) >> 6) + (((cont_paddr & 0xFFFFFFFFFC0ULL) >> 6) << 26); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x allocates a status in cluster[%d,%d] / vaddr = %x / paddr = %l\n" " descriptor = %l\n", _get_proctime() , trdid , cx , cy , vaddr, sts_paddr, (unsigned long long)((sts_paddr & 0xFFFFFFFFULL) >> 6) + (((cont_paddr & 0xFFFFFFFFFFFULL) >> 6) << 26) ); #endif } } // complete kernel chbuf initialisation if ( is_rx ) { _nic_ker_rx_chbuf[nic_channel].xmax = xmax; _nic_ker_rx_chbuf[nic_channel].ymax = ymax; } else { _nic_ker_tx_chbuf[nic_channel].xmax = xmax; _nic_ker_tx_chbuf[nic_channel].ymax = ymax; } // compute the kernel chbuf descriptor physical address if ( is_rx ) vaddr = (unsigned int)( &_nic_ker_rx_chbuf[nic_channel] ); else vaddr = (unsigned int)( &_nic_ker_tx_chbuf[nic_channel] ); ker_chbuf_pbase = _v2p_translate( vaddr , &flags ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x initialise kernel chbuf / vaddr = %x / paddr = %l\n", _get_proctime() , trdid , vaddr , ker_chbuf_pbase ); #endif // sync the kernel chbuf in L2 after write in L2 _mmc_sync( ker_chbuf_pbase, sizeof( nic_chbuf_t ) ); /////////////////////////////////////////////////////////////// // Step 3: compute the NIC chbuf descriptor physical address // /////////////////////////////////////////////////////////////// unsigned int offset; if ( is_rx ) offset = 0x4100; else offset = 0x4110; nic_chbuf_pbase = (((unsigned long long)((X_IO << Y_WIDTH) + Y_IO))<<32) | (SEG_NIC_BASE + (nic_channel<<15) + offset); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x get NIC chbuf paddr = %l\n", _get_proctime() , trdid , nic_chbuf_pbase ); #endif //////////////////////////////////////////////////////////////////////////////// // Step 4: initialize CMA registers defining the source & destination chbufs // //////////////////////////////////////////////////////////////////////////////// if ( is_rx ) // NIC to kernel { _cma_set_register( cma_channel, CHBUF_SRC_DESC , (unsigned int)(nic_chbuf_pbase) ); _cma_set_register( cma_channel, CHBUF_SRC_EXT , (unsigned int)(nic_chbuf_pbase>>32) ); _cma_set_register( cma_channel, CHBUF_SRC_NBUFS, 2 ); _cma_set_register( cma_channel, CHBUF_DST_DESC , (unsigned int)(ker_chbuf_pbase) ); _cma_set_register( cma_channel, CHBUF_DST_EXT , (unsigned int)(ker_chbuf_pbase>>32) ); _cma_set_register( cma_channel, CHBUF_DST_NBUFS, xmax * ymax ); } else // kernel to NIC { _cma_set_register( cma_channel, CHBUF_SRC_DESC , (unsigned int)(ker_chbuf_pbase) ); _cma_set_register( cma_channel, CHBUF_SRC_EXT , (unsigned int)(ker_chbuf_pbase>>32) ); _cma_set_register( cma_channel, CHBUF_SRC_NBUFS, xmax * ymax ); _cma_set_register( cma_channel, CHBUF_DST_DESC , (unsigned int)(nic_chbuf_pbase) ); _cma_set_register( cma_channel, CHBUF_DST_EXT , (unsigned int)(nic_chbuf_pbase>>32) ); _cma_set_register( cma_channel, CHBUF_DST_NBUFS, 2 ); } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_alloc() at cycle %d\n" "thread %x exit\n", _get_proctime() , trdid ); #endif return SYSCALL_OK; } // end _sys_nic_alloc() ////////////////////////////////////////// int _sys_nic_release( unsigned int is_rx ) // NOTE: not a syscall: used by _ctx_kill_thread() { unsigned int trdid = _get_thread_trdid(); unsigned int nic_channel; unsigned int cma_channel; // update the kernel tables if ( is_rx ) { nic_channel = _get_context_slot( CTX_NIC_RX_ID ); cma_channel = _get_context_slot( CTX_CMA_RX_ID ); if ( (nic_channel >= NB_NIC_CHANNELS) ) { _printf("\n[GIET ERROR] in _sys_nic_release() : " "NIC_RX channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( (cma_channel >= NB_CMA_CHANNELS) ) { _printf("\n[GIET ERROR] in _sys_nic_release() : " "CMA_RX channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // atomically decrement the NIC and CMA channel allocators _atomic_increment( &_nic_rx_channel_alloc[nic_channel] , -1 ); _atomic_increment( &_cma_channel_alloc[cma_channel] , -1 ); // stop the NIC and CMA peripherals channels if no more users if ( (_nic_rx_channel_alloc[nic_channel] == 0) && (_cma_channel_alloc[cma_channel] == 0) ) _sys_nic_stop( 1 ); // reset the calling thread context slots _set_context_slot( CTX_NIC_RX_ID , 0xFFFFFFFF ); _set_context_slot( CTX_CMA_RX_ID , 0xFFFFFFFF ); } else { nic_channel = _get_context_slot( CTX_NIC_TX_ID ); cma_channel = _get_context_slot( CTX_CMA_TX_ID ); if ( (nic_channel >= NB_NIC_CHANNELS) ) { _printf("\n[GIET ERROR] in _sys_nic_release() : " "NIC_TX channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( (cma_channel >= NB_CMA_CHANNELS) ) { _printf("\n[GIET ERROR] in _sys_nic_release() : " "CMA_TX channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // atomically decrement the NIC and CMA channel allocators _atomic_increment( &_nic_tx_channel_alloc[nic_channel] , -1 ); _atomic_increment( &_cma_channel_alloc[cma_channel] , -1 ); // stop the NIC and CMA peripherals channels if no more users if ( (_nic_tx_channel_alloc[nic_channel] == 0) && (_cma_channel_alloc[cma_channel] == 0) ) _sys_nic_stop( 0 ); // reset the calling thread context slots _set_context_slot( CTX_NIC_TX_ID , 0xFFFFFFFF ); _set_context_slot( CTX_CMA_TX_ID , 0xFFFFFFFF ); } return SYSCALL_OK; } // end sys_nic_release() //////////////////////////////////////// int _sys_nic_start( unsigned int is_rx ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int nic_channel; unsigned int cma_channel; // get NIC channel index and CMA channel index from thread context if ( is_rx ) { nic_channel = _get_context_slot( CTX_NIC_RX_ID ); cma_channel = _get_context_slot( CTX_CMA_RX_ID ); } else { nic_channel = _get_context_slot( CTX_NIC_TX_ID ); cma_channel = _get_context_slot( CTX_CMA_TX_ID ); } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_start() at cycle %d\n" "thread %x enter / NIC channel = %d / CMA channel = %d\n", _get_proctime() , trdid , nic_channel, cma_channel ); #endif // check NIC and CMA channels index if ( nic_channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_start() : " "illegal NIC channel for thread %x\n", trdid ); return -1111; } if ( cma_channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_start() : " "illegal CMA channel for thread %x\n", trdid ); return -1111; } // start CMA transfer _cma_set_register( cma_channel, CHBUF_BUF_SIZE , NIC_CONTAINER_SIZE ); _cma_set_register( cma_channel, CHBUF_PERIOD , 0 ); // OUT_OF_ORDER _cma_set_register( cma_channel, CHBUF_RUN , MODE_NORMAL ); // activates NIC channel _nic_channel_start( nic_channel, is_rx, GIET_NIC_MAC4, GIET_NIC_MAC2 ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_start() at cycle %d\n" "thread %d exit\n", _get_proctime() , trdid ); #endif return SYSCALL_OK; } // end _sys_nic_start() ////////////////////////////////////// int _sys_nic_move( unsigned int is_rx, void* buffer ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int channel; #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n", "thread %x enters\n", _get_proctime() , trdid ); #endif // get NIC channel index from thread context if ( is_rx ) channel = _get_context_slot( CTX_NIC_RX_ID ); else channel = _get_context_slot( CTX_NIC_TX_ID ); // check NIC channel index if ( channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_move() : " "NIC channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get kernel chbuf virtual address nic_chbuf_t* ker_chbuf; if ( is_rx ) ker_chbuf = &_nic_ker_rx_chbuf[channel]; else ker_chbuf = &_nic_ker_tx_chbuf[channel]; // get xmax / ymax parameters unsigned int xmax = ker_chbuf->xmax; unsigned int ymax = ker_chbuf->ymax; // get cluster coordinates for the processor running the calling thread unsigned int procid = _get_procid(); unsigned int cx = procid >> (Y_WIDTH + P_WIDTH); unsigned int cy = (procid >> P_WIDTH) & ((1<= xmax) || (cy >= ymax) ) { _printf("\n[GIET_ERROR] in _sys_nic_move(): " "processor coordinates [%d,%d] larger than (xmax,ymax) = [%d,%d]\n", cx , cy , xmax , ymax ); return SYSCALL_ILLEGAL_ARGUMENT; } unsigned long long usr_buf_paddr; // user buffer physical address unsigned long long ker_buf_paddr; // kernel buffer physical address unsigned long long ker_sts_paddr; // kernel buffer status physical address unsigned long long ker_buf_desc; // kernel buffer descriptor unsigned int ker_sts; // kernel buffer status (full or empty) unsigned int index; // kernel buffer index in chbuf unsigned int flags; // for _v2P_translate // Compute user buffer physical address and check access rights usr_buf_paddr = _v2p_translate( (unsigned int)buffer , &flags ); if ( (flags & PTE_U) == 0 ) { _printf("\n[GIET ERROR] in _sys_nic_tx_move() : " "buffer address non user accessible\n"); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x get user buffer : paddr = %l\n", _get_proctime() , trdid , usr_buf_paddr ); #endif // compute buffer index, buffer descriptor paddr and buffer status paddr index = (ymax * cx) + cy; ker_buf_desc = ker_chbuf->buf_desc[index]; ker_sts_paddr = ((ker_buf_desc & 0xFFF0000000000000ULL) >> 20) + ((ker_buf_desc & 0x3FFFFFFULL) << 6); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x get ker_buf_desc %d / paddr = %l\n", _get_proctime(), trdid , index , ker_buf_desc ); #endif // poll local kernel container status until success while ( 1 ) { // inval buffer descriptor in L2 before read in L2 _mmc_inval( ker_sts_paddr, 4 ); ker_sts = _physical_read( ker_sts_paddr ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x get status %d / paddr = %l / status = %x\n", _get_proctime() , trdid , index , ker_sts_paddr, ker_sts ); #endif // test buffer status and break if found if ( ( is_rx != 0 ) && ( ker_sts == 0x1 ) ) break; if ( ( is_rx == 0 ) && ( ker_sts == 0 ) ) break; } // compute kernel buffer physical address ker_buf_paddr = (ker_buf_desc & 0xFFFFFFFFFC000000ULL) >> 20; // move one container if ( is_rx ) // RX transfer { // inval kernel buffer in L2 before read in L2 _mmc_inval( ker_buf_paddr, NIC_CONTAINER_SIZE ); // transfer data from kernel buffer to user buffer _physical_memcpy( usr_buf_paddr, ker_buf_paddr, NIC_CONTAINER_SIZE ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x transfer kernel buffer %l to user buffer %l\n", _get_proctime() , trdid , ker_buf_paddr , usr_buf_paddr ); #endif } else // TX transfer { // transfer data from user buffer to kernel buffer _physical_memcpy( ker_buf_paddr, usr_buf_paddr, NIC_CONTAINER_SIZE ); // sync kernel buffer in L2 after write in L2 _mmc_sync( ker_buf_paddr, NIC_CONTAINER_SIZE ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x transfer user buffer %l to kernel buffer %l\n", _get_proctime() , trdid , usr_buf_paddr , ker_buf_paddr ); #endif } // update kernel chbuf status if ( is_rx ) _physical_write ( ker_sts_paddr, 0 ); else _physical_write ( ker_sts_paddr, 0x1 ); // sync kernel chbuf in L2 after write in L2 _mmc_sync( ker_sts_paddr, 4 ); #if GIET_DEBUG_NIC _printf("\n[DEBUG NIC] _sys_nic_move() at cycle %d\n" "thread %x exit\n", _get_proctime() , trdid ); #endif return SYSCALL_OK; } // end _sys_nic_move() /////////////////////////////////////// int _sys_nic_stop( unsigned int is_rx ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int nic_channel; unsigned int cma_channel; // get NIC channel index and CMA channel index if ( is_rx ) { nic_channel = _get_context_slot( CTX_NIC_RX_ID ); cma_channel = _get_context_slot( CTX_CMA_RX_ID ); } else { nic_channel = _get_context_slot( CTX_NIC_TX_ID ); cma_channel = _get_context_slot( CTX_CMA_TX_ID ); } // check NIC and CMA channels index if ( nic_channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_stop() : " "NIC channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( cma_channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_stop() : " "CMA channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // desactivates the CMA channel _cma_set_register( cma_channel, CHBUF_RUN , MODE_IDLE ); // wait until CMA channel IDLE unsigned int volatile status; do { status = _cma_get_register( cma_channel, CHBUF_STATUS ); } while ( status ); // desactivates the NIC channel _nic_channel_stop( nic_channel, is_rx ); return SYSCALL_OK; } // end _sys_nic_stop() //////////////////////////////////////// int _sys_nic_clear( unsigned int is_rx ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int channel; // get NIC channel if ( is_rx ) channel = _get_context_slot( CTX_NIC_RX_ID ); else channel = _get_context_slot( CTX_NIC_TX_ID ); if ( channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_clear() : " "NIC channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( is_rx ) { _nic_set_global_register( NIC_G_NPKT_RX_G2S_RECEIVED , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DES_TOO_SMALL , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DES_TOO_BIG , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DES_MFIFO_FULL , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DES_CRC_FAIL , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DISPATCH_RECEIVED , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DISPATCH_BROADCAST , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DISPATCH_DST_FAIL , 0 ); _nic_set_global_register( NIC_G_NPKT_RX_DISPATCH_CH_FULL , 0 ); } else { _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_RECEIVED , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_TRANSMIT , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_TOO_BIG , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_TOO_SMALL , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_SRC_FAIL , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_BYPASS , 0 ); _nic_set_global_register( NIC_G_NPKT_TX_DISPATCH_BROADCAST , 0 ); } return SYSCALL_OK; } // en _sys_nic_clear() //////////////////////////////////////// int _sys_nic_stats( unsigned int is_rx ) { unsigned int trdid = _get_context_slot( CTX_TRDID_ID ); unsigned int nic_channel; // get NIC channel if ( is_rx ) nic_channel = _get_context_slot( CTX_NIC_RX_ID ); else nic_channel = _get_context_slot( CTX_NIC_TX_ID ); if ( nic_channel >= NB_NIC_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_nic_stats() : " "NIC channel non allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( is_rx ) { unsigned int received = _nic_get_global_register( NIC_G_NPKT_RX_G2S_RECEIVED ); unsigned int too_small = _nic_get_global_register( NIC_G_NPKT_RX_DES_TOO_SMALL ); unsigned int too_big = _nic_get_global_register( NIC_G_NPKT_RX_DES_TOO_BIG ); unsigned int fifo_full = _nic_get_global_register( NIC_G_NPKT_RX_DES_MFIFO_FULL ); unsigned int crc_fail = _nic_get_global_register( NIC_G_NPKT_RX_DES_CRC_FAIL ); unsigned int broadcast = _nic_get_global_register( NIC_G_NPKT_RX_DISPATCH_BROADCAST ); unsigned int dst_fail = _nic_get_global_register( NIC_G_NPKT_RX_DISPATCH_DST_FAIL ); unsigned int ch_full = _nic_get_global_register( NIC_G_NPKT_RX_DISPATCH_CH_FULL ); _printf("\n### Network Controller RX Statistics ###\n" "- packets received : %d\n" "- too small : %d\n" "- too big : %d\n" "- fifo full : %d\n" "- crc fail : %d\n" "- dst mac fail : %d\n" "- channel full : %d\n" "- broadcast : %d\n", received, too_small, too_big, fifo_full, crc_fail, dst_fail, ch_full, broadcast ); } else { unsigned int received = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_RECEIVED ); unsigned int too_big = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_TOO_BIG ); unsigned int too_small = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_TOO_SMALL ); unsigned int src_fail = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_SRC_FAIL ); unsigned int bypass = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_BYPASS ); unsigned int broadcast = _nic_get_global_register( NIC_G_NPKT_TX_DISPATCH_BROADCAST ); _printf("\n### Network Controller TX Statistics ###\n" "- packets received : %d\n" "- too small : %d\n" "- too big : %d\n" "- src mac fail : %d\n" "- bypass : %d\n" "- broadcast : %d\n", received, too_big, too_small, src_fail, bypass, broadcast ); } return SYSCALL_OK; } // end _sys_nic_stats() #endif // if NB_NIC_CHANNELS && NB_CMA_CHANNELS ///////////////////////////////////////////////////////////////////////////////////////// // FBF related syscall handlers ///////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////// int _sys_fbf_size( unsigned int* width, unsigned int* height ) { if ( USE_FBF == 0 ) { *width = 0; *height = 0; } else { *width = FBUF_X_SIZE; *height = FBUF_Y_SIZE; } return SYSCALL_OK; } //////////////////// int _sys_fbf_alloc() { mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t *vspace = _get_vspace_base(header); mapping_thread_t *thread = _get_thread_base(header); // compute number of users unsigned int vsid = _get_context_slot(CTX_VSID_ID); unsigned int users = vspace[vsid].threads; // access FBF allocator // register it in all threads contexts if ( _atomic_test_and_set( &_fbf_alloc , users ) == 0 ) // FBF available { unsigned int min = vspace[vsid].thread_offset; unsigned int max = min + users; unsigned int tid; for ( tid = min ; tid < max ; tid++ ) { unsigned int y_size = header->y_size; unsigned int cid = thread[tid].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; _atomic_or( &psched->context[ltid].slot[CTX_LOCKS_ID] , LOCKS_MASK_FBF ); } return SYSCALL_OK; } else // FBF already allocated { return SYSCALL_SHARED_PERIPHERAL_BUSY; } } ////////////////////// int _sys_fbf_release() // not a syscall: used by _ctx_kill_thread() { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // decrement FBF allocator // reset the calling thread context _atomic_increment( &_fbf_alloc , 0xFFFFFFFF ); _atomic_and( &psched->context[ltid].slot[CTX_LOCKS_ID] , ~LOCKS_MASK_FBF ); return SYSCALL_OK; } ///////////////////////////////////////////// int _sys_fbf_sync_write( unsigned int offset, void* buffer, unsigned int length ) { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } char* fbf_address = (char *)SEG_FBF_BASE + offset; memcpy( fbf_address, buffer, length); return SYSCALL_OK; } ///////////////////////////////////////////// int _sys_fbf_sync_read( unsigned int offset, void* buffer, unsigned int length ) { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } char* fbf_address = (char *)SEG_FBF_BASE + offset; memcpy( buffer, fbf_address, length); return SYSCALL_OK; } #if NB_CMA_CHANNELS //////////////////////////////////////////// int _sys_fbf_cma_alloc( unsigned int nbufs ) { // compute trdid and vsid for the calling thread unsigned int vsid = _get_context_slot( CTX_VSID_ID ); unsigned int trdid = _get_thread_trdid(); if ( _get_context_slot( CTX_CMA_FB_ID ) < NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_alloc() : " "CMA channel already allocated for thread %x\n", trdid ); return SYSCALL_CHANNEL_ALREADY_ALLOCATED; } // compute number of threads in vspace from mapping mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t *vspace = _get_vspace_base(header); mapping_thread_t *thread = _get_thread_base(header); unsigned int first = vspace[vsid].thread_offset; unsigned int threads = vspace[vsid].threads; // get a CMA channel unsigned int channel; for ( channel = 0 ; channel < NB_CMA_CHANNELS ; channel++ ) { unsigned int* palloc = &_cma_channel_alloc[channel]; if ( _atomic_test_and_set( palloc , threads ) == 0 ) break; } if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_alloc() : no CMA channel available\n"); return SYSCALL_NO_CHANNEL_AVAILABLE; } // check nbufs argument if ( nbufs > 256 ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_alloc() : nbufs larger than 256\n"); return SYSCALL_ILLEGAL_ARGUMENT; } // loop on all threads to register channel in thread contexts unsigned int tid; for ( tid = first ; tid < (first + threads) ; tid++ ) { unsigned int y_size = header->y_size; unsigned int cid = thread[tid].clusterid; unsigned int x = cid / y_size; unsigned int y = cid % y_size; unsigned int p = thread[tid].proclocid; unsigned int ltid = thread[tid].ltid; static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; psched->context[ltid].slot[CTX_CMA_FB_ID] = channel; } unsigned int vaddr; unsigned int flags; // compute frame buffer physical addresses vaddr = (unsigned int)SEG_FBF_BASE; unsigned long long fbf_buf_paddr = _v2p_translate( vaddr , &flags ); // initialize the FBF chbuf // we don't register a status address in the fbf_desc, because // the CMA does not test the status for the frame buffer (no synchro) _fbf_ker_chbuf.nbufs = nbufs; _fbf_ker_chbuf.fbf_desc = (((fbf_buf_paddr & 0xFFFFFFFFFFFULL) >> 6 ) << 26); // register FBF chbuf physical address vaddr = (unsigned int)(&_fbf_ker_chbuf); _fbf_chbuf_paddr = _v2p_translate( vaddr , &flags ); #if GIET_DEBUG_FBF_CMA _printf("\n[FBF_CMA DEBUG] _sys_fbf_cma_alloc()\n" " - channel = %d\n" " - vaddr(_ker_fbf_chbuf) = %x\n" " - paddr(_ker_fbf_chbuf) = %l\n" " - nbufs = %d\n" " - fbf_desc = %l\n", channel , vaddr , _fbf_chbuf_paddr , nbufs , _fbf_ker_chbuf.fbf_desc ); #endif return SYSCALL_OK; } // end sys_fbf_cma_alloc() ////////////////////////// int _sys_fbf_cma_release() // Not a syscall : used by _ctx_kill_thread() { unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); unsigned int trdid = _get_thread_trdid(); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET_ERROR] in _sys_fbf_cma_release() : " "CMA_FB channel already released for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } if ( _cma_channel_alloc[channel] == 1 ) // the calling thread is the last user { // stop the CMA transfer _sys_fbf_cma_stop(); // reset the CMA channel allocator _cma_channel_alloc[channel] = 0; } else // not the last user { // atomically decrement the CMA channel allocator _atomic_increment( &_cma_channel_alloc[channel] , -1 ); } // reset CTX_CMA_FB_ID slot in calling thread context _set_context_slot( CTX_CMA_FB_ID, 0xFFFFFFFF ); return SYSCALL_OK; } // end _sys_fbf_cma_release() /////////////////////////////////////////////////// int _sys_fbf_cma_init_buf( unsigned int index, void* buf_vaddr, void* sts_vaddr ) { unsigned int vaddr; // virtual address unsigned int flags; // for _v2p_translate() unsigned long long buf_paddr; // user buffer physical address unsigned long long sts_paddr; // user status physical address // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); // check FBF allocated if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "CMA channel non allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } #if GIET_DEBUG_FBF_CMA _printf("\n[FBF_CMA DEBUG] _sys_fbf_cma_init_buf()\n" " - channel = %d / index = %d\n" " - buf vaddr = %x\n" " - sts vaddr = %x\n", channel, index, (unsigned int)buf_vaddr, (unsigned int)sts_vaddr ); #endif // checking index argument if ( index >= _fbf_ker_chbuf.nbufs ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "user buffer index too large %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // checking user buffer and status addresses alignment if ( ((unsigned int)buf_vaddr & 0x3F) || ((unsigned int)sts_vaddr & 0x3F) ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "user buffer or status not aligned for thread %x\n", trdid ); return SYSCALL_ADDRESS_NON_ALIGNED; } // Compute user buffer and status physical addresses vaddr = (unsigned int)buf_vaddr; buf_paddr = _v2p_translate( vaddr , &flags ); if ((flags & PTE_U) == 0) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "buffer not in user space for thread %x\n", trdid ); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } vaddr = (unsigned int)sts_vaddr; sts_paddr = _v2p_translate( vaddr , &flags ); if ((flags & PTE_U) == 0) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "status not in user space for thread %x\n", trdid); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } // check user buffer and user status in same cluster if ( (buf_paddr & 0xFF00000000ULL) != (sts_paddr & 0xFF00000000ULL) ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " "user status and buffer not in same cluster for thread %x\n", trdid); return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; } // initialize _fbf_ker_chbuf.usr_desc[index] _fbf_ker_chbuf.usr_desc[index] = ((sts_paddr & 0xFFFFFFFFULL) >> 6) + (((buf_paddr & 0xFFFFFFFFFFULL) >> 6 ) << 26); #if GIET_DEBUG_FBF_CMA _printf(" - buf paddr = %l\n" " - sts paddr = %l\n" " - usr_desc[%d] = %l\n", buf_paddr, sts_paddr, index , _fbf_ker_chbuf.usr_desc[index] ); #endif return SYSCALL_OK; } // end sys_fbf_cma_init_buf() //////////////////////// int _sys_fbf_cma_start() { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); // check FBF allocated if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_release() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get CMA channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _fbf_cma_start() : " "CMA channel non allocated\n"); return SYSCALL_CHANNEL_NON_ALLOCATED; } // check buffers initialization if ( _fbf_ker_chbuf.nbufs == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_start(): " "FBF chbuf not initialized for thread %x\n", trdid ); return SYSCALL_MISSING_INITIALISATION; } // synchronize FBF chbuf that will be read by CMA peripheral if ( USE_IOB ) { // SYNC request for fbf_chbuf descriptor _mmc_sync( _fbf_chbuf_paddr , sizeof( fbf_chbuf_t ) ); } // start CMA transfer unsigned long long paddr = _fbf_chbuf_paddr; unsigned int dst_chbuf_paddr_lsb = (unsigned int)(paddr & 0xFFFFFFFF); unsigned int dst_chbuf_paddr_ext = (unsigned int)(paddr >> 32); unsigned int src_chbuf_paddr_lsb = dst_chbuf_paddr_lsb + 8; unsigned int src_chbuf_paddr_ext = dst_chbuf_paddr_ext; #if GIET_DEBUG_FBF_CMA _printf("\n[FBF_CMA DEBUG] _sys_fbf_cma_start()\n" " - src_chbuf_paddr_lsb = %x\n" " - src_chbuf_paddr_ext = %x\n" " - src_chbuf_nbufs = %d\n" " - dst_chbuf_paddr_lsb = %x\n" " - dst_chbuf_paddr_ext = %x\n" " - dst_chbuf_nbufs = 1 \n" " - buffer_size = %d\n", src_chbuf_paddr_lsb, src_chbuf_paddr_ext, _fbf_ker_chbuf.nbufs, dst_chbuf_paddr_lsb, dst_chbuf_paddr_ext, FBUF_X_SIZE * FBUF_Y_SIZE ); #endif _cma_set_register( channel, CHBUF_SRC_DESC , src_chbuf_paddr_lsb ); _cma_set_register( channel, CHBUF_SRC_EXT , src_chbuf_paddr_ext ); _cma_set_register( channel, CHBUF_SRC_NBUFS, _fbf_ker_chbuf.nbufs ); _cma_set_register( channel, CHBUF_DST_DESC , dst_chbuf_paddr_lsb ); _cma_set_register( channel, CHBUF_DST_EXT , dst_chbuf_paddr_ext ); _cma_set_register( channel, CHBUF_DST_NBUFS, 1 ); _cma_set_register( channel, CHBUF_BUF_SIZE , FBUF_X_SIZE*FBUF_Y_SIZE ); _cma_set_register( channel, CHBUF_PERIOD , 1000 ); _cma_set_register( channel, CHBUF_RUN , MODE_NO_DST_SYNC ); return SYSCALL_OK; } // end _sys_fbf_cma_start() //////////////////////////////////////////// int _sys_fbf_cma_check( unsigned int index ) { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); // check FBF allocated if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_check() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_check() : " "CMA channel non allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // check buffer index if ( index >= _fbf_ker_chbuf.nbufs ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_check() : " "buffer index too large for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // compute user buffer status physical addresses unsigned long long usr_sts_paddr; fbf_chbuf_t* pdesc = &_fbf_ker_chbuf; usr_sts_paddr = ((pdesc->usr_desc[index] & 0xFFF0000000000000ULL) >> 20) + ((pdesc->usr_desc[index] & 0x3FFFFFFULL) << 6); #if GIET_DEBUG_FBF_CMA _printf("\n[FBF_CMA DEBUG] enters _sys_fbf_cma_check()\n" " - cma channel = %d\n" " - buffer index = %d\n" " - usr_desc value = %l\n" " - fbf_desc value = %l\n" " - usr status paddr = %l\n", channel, index, _fbf_ker_chbuf.usr_desc[index], _fbf_ker_chbuf.fbf_desc, usr_sts_paddr ); #endif // waiting user buffer released by the CMA component) unsigned int full; do { // INVAL L2 cache copy of user buffer status // because it is modified in RAM by the CMA component _mmc_inval( usr_sts_paddr , 4 ); full = _physical_read( usr_sts_paddr ); } while ( full ); return SYSCALL_OK; } // end _sys_fbf_cma_check() ////////////////////////////////////////////// int _sys_fbf_cma_display( unsigned int index ) { // get calling thread scheduler, ltid and trdid static_scheduler_t* psched = _get_sched(); unsigned int ltid = _get_thread_ltid(); unsigned int trdid = _get_thread_trdid(); // check FBF allocated if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_display() : " "FBF not allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // get channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_display() : " "CMA channel non allocated to thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // check buffer index if ( index >= _fbf_ker_chbuf.nbufs ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_display() : " "buffer index too large for thread %x\n", trdid ); return SYSCALL_CHANNEL_NON_ALLOCATED; } // compute user buffer and status physical addresses unsigned long long usr_sts_paddr; unsigned long long usr_buf_paddr; fbf_chbuf_t* pdesc = &_fbf_ker_chbuf; usr_sts_paddr = ((pdesc->usr_desc[index] & 0xFFF0000000000000ULL) >> 20) + ((pdesc->usr_desc[index] & 0x3FFFFFFULL) << 6); usr_buf_paddr = ((pdesc->usr_desc[index] & 0xFFFFFFFFFC000000ULL) >> 20); #if GIET_DEBUG_FBF_CMA _printf("\n[FBF_CMA DEBUG] enters _sys_fbf_cma_display()\n" " - cma channel = %d\n" " - buffer index = %d\n" " - usr buffer paddr = %l\n" " - usr status paddr = %l\n", channel, index, usr_buf_paddr, usr_sts_paddr ); #endif // SYNC request, because this buffer will be read from XRAM by the CMA component _mmc_sync( usr_buf_paddr , FBUF_X_SIZE * FBUF_Y_SIZE ); // set user buffer status _physical_write( usr_sts_paddr, 0x1 ); // SYNC request, because this status will be read from XRAM by the CMA component _mmc_sync( usr_sts_paddr, 4 ); return SYSCALL_OK; } // end _sys_fbf_cma_display() /////////////////////// int _sys_fbf_cma_stop() { // get channel index unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); if ( channel >= NB_CMA_CHANNELS ) { _printf("\n[GIET ERROR] in _sys_fbf_cma_stop() : CMA channel non allocated\n"); return SYSCALL_CHANNEL_NON_ALLOCATED; } // Desactivate CMA channel _cma_set_register( channel, CHBUF_RUN, MODE_IDLE ); return SYSCALL_OK; } // end _sys_fbf_cma_stop() #endif // if NB_CMA_CHANNELS ////////////////////////////////////////////////////////////////////////////// // Miscelaneous syscall handlers ////////////////////////////////////////////////////////////////////////////// /////////////// int _sys_ukn() { _printf("\n[GIET ERROR] Undefined System Call / EPC = %x\n", _get_epc() ); return SYSCALL_UNDEFINED_SYSTEM_CALL; } //////////////////////////////////// int _sys_proc_xyp( unsigned int* x, unsigned int* y, unsigned int* p ) { unsigned int gpid = _get_procid(); // global processor index from CPO register *x = (gpid >> (Y_WIDTH + P_WIDTH)) & ((1<> P_WIDTH) & ((1<x_size; unsigned int ymax = header->y_size; unsigned int procs = cluster[0].procs; // check the (ymax-1) lower rows for ( y = 0 ; y < ymax-1 ; y++ ) { for ( x = 0 ; x < xmax ; x++ ) { if (cluster[x*ymax+y].procs != procs ) okmin = 0; } } // check the upper row for ( x = 0 ; x < xmax ; x++ ) { if (cluster[x*ymax+ymax-1].procs != procs ) okmax = 0; } // return values if ( okmin && okmax ) { *x_size = xmax; *y_size = ymax; *nprocs = procs; } else if ( okmin ) { *x_size = xmax; *y_size = ymax-1; *nprocs = procs; } else { *x_size = 0; *y_size = 0; *nprocs = 0; } return SYSCALL_OK; } /////////////////////////////////////////////////////// int _sys_vseg_get_vbase( char* vspace_name, char* vseg_name, unsigned int* vbase ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); mapping_vseg_t * vseg = _get_vseg_base(header); unsigned int vspace_id; unsigned int vseg_id; // scan vspaces for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { if (_strncmp( vspace[vspace_id].name, vspace_name, 31) == 0) { // scan vsegs for (vseg_id = vspace[vspace_id].vseg_offset; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); vseg_id++) { if (_strncmp(vseg[vseg_id].name, vseg_name, 31) == 0) { *vbase = vseg[vseg_id].vbase; return SYSCALL_OK; } } } } return SYSCALL_VSEG_NOT_FOUND; } ///////////////////////////////////////////////////////// int _sys_vseg_get_length( char* vspace_name, char* vseg_name, unsigned int* length ) { mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_vspace_t * vspace = _get_vspace_base(header); mapping_vseg_t * vseg = _get_vseg_base(header); unsigned int vspace_id; unsigned int vseg_id; // scan vspaces for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { if (_strncmp( vspace[vspace_id].name, vspace_name, 31) == 0) { // scan vsegs for (vseg_id = vspace[vspace_id].vseg_offset; vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs); vseg_id++) { if (_strncmp(vseg[vseg_id].name, vseg_name, 31) == 0) { *length = vseg[vseg_id].length; return SYSCALL_OK; } } } } return SYSCALL_VSEG_NOT_FOUND; } //////////////////////////////////////// int _sys_xy_from_ptr( void* ptr, unsigned int* x, unsigned int* y ) { unsigned int flags; unsigned long long paddr = _v2p_translate( (unsigned int)ptr , &flags ); *x = (paddr>> (32 + Y_WIDTH)) & ((1 << X_WIDTH) - 1); *y = (paddr>>32) & ((1 << Y_WIDTH) - 1); return SYSCALL_OK; } ///////////////////////////////////////// int _sys_heap_info( unsigned int* vaddr, unsigned int* length, unsigned int x, unsigned int y ) { // checking parameters if ( (x >= X_SIZE) || (y >= Y_SIZE) ) { *vaddr = 0; *length = 0; _printf("\n[GIET ERROR] in _sys_heap_info() : " "illegal (%d,%d) coordinates\n", x , y ); return SYSCALL_ILLEGAL_CLUSTER_COORDINATES; } mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; mapping_thread_t * thread = _get_thread_base(header); mapping_vseg_t * vseg = _get_vseg_base(header); mapping_vspace_t * vspace = _get_vspace_base(header); unsigned int thread_id; unsigned int vspace_id; unsigned int vseg_id = 0xFFFFFFFF; // get calling thread vspace index vspace_id = _get_context_slot(CTX_VSID_ID); // scan all threads in vspace to find one in clyster[x,y] unsigned int min = vspace[vspace_id].thread_offset ; unsigned int max = min + vspace[vspace_id].threads ; for ( thread_id = min ; thread_id < max ; thread_id++ ) { if ( thread[thread_id].clusterid == (x * Y_SIZE + y) ) { vseg_id = thread[thread_id].heap_vseg_id; break; } } // analysing the vseg_id if ( vseg_id != 0xFFFFFFFF ) { *vaddr = vseg[vseg_id].vbase; *length = vseg[vseg_id].length; } else { *vaddr = 0; *length = 0; _printf("error in _sys_heap_info() : no heap in cluster (%d,%d)\n", x , y ); } return SYSCALL_OK; } // end _sys_heap_info() // Local Variables: // tab-width: 4 // c-basic-offset: 4 // c-file-offsets:((innamespace . 0)(inline-open . 0)) // indent-tabs-mode: nil // End: // vim: filetype=c:expandtab:shiftwidth=4:tabstop=4:softtabstop=4