- Timestamp:
- Nov 21, 2015, 2:28:31 PM (9 years ago)
- Location:
- soft/giet_vm/giet_kernel
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/giet_kernel/ctx_handler.c
r714 r725 29 29 extern fat_desc_t _fat; 30 30 31 ////////////////////////////////////////////////////////////////// 31 ///////////////////////////////////////////////////////////////////////////////// 32 32 // This function is called by the _ctx_switch() function. 33 33 // It desactivates a thread that received a KILL signal. 34 34 // We must release all ressources allocated to the thread 35 // before the actual desactivation, that uses NORUN_MASK_THREAD. 36 ////////////////////////////////////////////////////////////////// 35 // before the actual desactivation, that set the NORUN_MASK_THREAD 36 // bit in the thread context. 37 ////////////////////////////////////////////////////////////////////////////////// 37 38 static void _ctx_kill_thread( unsigned int x, 38 39 unsigned int y, -
soft/giet_vm/giet_kernel/kernel_init.c
r709 r725 379 379 //////////////////////////////////////////////////////////////////////////// 380 380 381 if (threads == 0) _printf("\n[GIET WARNING] No thread allocated to P[%d,%d,%d]\n",382 x, y, p );383 384 381 // default value for ltid 385 382 ltid = IDLE_THREAD_INDEX; -
soft/giet_vm/giet_kernel/sys_handler.c
r719 r725 160 160 161 161 __attribute__((section(".kdata"))) 162 ker_chbuf_t _nic_ker_rx_chbuf[NB_NIC_CHANNELS] __attribute__((aligned(64)));162 nic_chbuf_t _nic_ker_rx_chbuf[NB_NIC_CHANNELS] __attribute__((aligned(64))); 163 163 164 164 __attribute__((section(".kdata"))) 165 ker_chbuf_t _nic_ker_tx_chbuf[NB_NIC_CHANNELS] __attribute__((aligned(64)));165 nic_chbuf_t _nic_ker_tx_chbuf[NB_NIC_CHANNELS] __attribute__((aligned(64))); 166 166 167 167 //////////////////////////////////////////////////////////////////////////// 168 // FBF related chbuf descriptors array, indexed by the CMA channel index. 169 // Physical addresses of these chbuf descriptors required for L2 cache sync. 170 // FBF status 168 // FBF related chbuf 169 // The physical address of this chbuf is required for L2 cache sync. 171 170 //////////////////////////////////////////////////////////////////////////// 172 171 173 172 __attribute__((section(".kdata"))) 174 fbf_chbuf_t _fbf_chbuf[NB_CMA_CHANNELS]__attribute__((aligned(64)));173 fbf_chbuf_t _fbf_ker_chbuf __attribute__((aligned(64))); 175 174 176 175 __attribute__((section(".kdata"))) 177 unsigned long long _fbf_chbuf_paddr[NB_CMA_CHANNELS]; 178 179 __attribute__((section(".kdata"))) 180 buffer_status_t _fbf_status[NB_CMA_CHANNELS] __attribute__((aligned(64))); 176 unsigned long long _fbf_chbuf_paddr; 181 177 182 178 //////////////////////////////////////////////////////////////////////////// … … 203 199 &_sys_fbf_cma_display, /* 0x0D */ 204 200 &_sys_fbf_cma_stop, /* 0x0E */ 205 &_sys_ ukn,/* 0x0F */201 &_sys_fbf_cma_check, /* 0x0F */ 206 202 207 203 &_sys_applications_status, /* 0x10 */ … … 1574 1570 unsigned int trdid = _get_thread_trdid(); 1575 1571 1576 // check no TTY already allocated to calling thread1577 if ( _get_context_slot( CTX_TTY_ID ) < NB_TTY_CHANNELS )1578 {1579 _printf("\n[GIET_ERROR] in _sys_tty_alloc() : "1580 "TTY channel already allocated to thread %x\n", trdid );1581 return SYSCALL_CHANNEL_ALREADY_ALLOCATED;1582 }1583 1584 1572 mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 1585 1573 mapping_vspace_t *vspace = _get_vspace_base(header); … … 1963 1951 "xmax or ymax argument too large for thread %x\n", trdid ); 1964 1952 1965 return SYSCALL_ILLEGAL_ XY_ARGUMENTS;1953 return SYSCALL_ILLEGAL_ARGUMENT; 1966 1954 } 1967 1955 … … 2201 2189 2202 2190 // sync the kernel chbuf in L2 after write in L2 2203 _mmc_sync( ker_chbuf_pbase, sizeof( ker_chbuf_t ) );2191 _mmc_sync( ker_chbuf_pbase, sizeof( nic_chbuf_t ) ); 2204 2192 2205 2193 /////////////////////////////////////////////////////////////// … … 2372 2360 // start CMA transfer 2373 2361 _cma_set_register( cma_channel, CHBUF_BUF_SIZE , NIC_CONTAINER_SIZE ); 2374 _cma_set_register( cma_channel, CHBUF_PERIOD , 0 ); // OUT_OF_ORDER2375 _cma_set_register( cma_channel, CHBUF_RUN , 1);2362 _cma_set_register( cma_channel, CHBUF_PERIOD , 0 ); // OUT_OF_ORDER 2363 _cma_set_register( cma_channel, CHBUF_RUN , MODE_NORMAL ); 2376 2364 2377 2365 // activates NIC channel … … 2416 2404 2417 2405 // get kernel chbuf virtual address 2418 ker_chbuf_t* ker_chbuf;2406 nic_chbuf_t* ker_chbuf; 2419 2407 if ( is_rx ) ker_chbuf = &_nic_ker_rx_chbuf[channel]; 2420 2408 else ker_chbuf = &_nic_ker_tx_chbuf[channel]; … … 2436 2424 cx , cy , xmax , ymax ); 2437 2425 2438 return SYSCALL_ILLEGAL_ XY_ARGUMENTS;2426 return SYSCALL_ILLEGAL_ARGUMENT; 2439 2427 } 2440 2428 … … 2586 2574 2587 2575 // desactivates the CMA channel 2588 _cma_set_register( cma_channel, CHBUF_RUN , 0);2576 _cma_set_register( cma_channel, CHBUF_RUN , MODE_IDLE ); 2589 2577 2590 2578 // wait until CMA channel IDLE … … 2853 2841 } 2854 2842 2855 //////////////////////// 2856 int _sys_fbf_cma_alloc() 2857 { 2843 //////////////////////////////////////////// 2844 int _sys_fbf_cma_alloc( unsigned int nbufs ) 2845 { 2846 // compute trdid and vsid for the calling thread 2847 unsigned int vsid = _get_context_slot( CTX_VSID_ID ); 2858 2848 unsigned int trdid = _get_thread_trdid(); 2859 2849 … … 2862 2852 _printf("\n[GIET ERROR] in _sys_fbf_cma_alloc() : " 2863 2853 "CMA channel already allocated for thread %x\n", trdid ); 2864 2865 2854 return SYSCALL_CHANNEL_ALREADY_ALLOCATED; 2866 2855 } 2856 2857 // compute number of threads in vspace from mapping 2858 mapping_header_t *header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 2859 mapping_vspace_t *vspace = _get_vspace_base(header); 2860 mapping_thread_t *thread = _get_thread_base(header); 2861 unsigned int first = vspace[vsid].thread_offset; 2862 unsigned int threads = vspace[vsid].threads; 2867 2863 2868 2864 // get a CMA channel … … 2871 2867 { 2872 2868 unsigned int* palloc = &_cma_channel_alloc[channel]; 2873 if ( _atomic_test_and_set( palloc , 1 ) == 0 ) break; 2874 } 2869 if ( _atomic_test_and_set( palloc , threads ) == 0 ) break; 2870 } 2871 2875 2872 if ( channel >= NB_CMA_CHANNELS ) 2876 2873 { 2877 2874 _printf("\n[GIET ERROR] in _sys_fbf_cma_alloc() : no CMA channel available\n"); 2878 2879 2875 return SYSCALL_NO_CHANNEL_AVAILABLE; 2880 2876 } 2881 else 2882 { 2883 _set_context_slot( CTX_CMA_FB_ID, channel ); 2884 2885 return SYSCALL_OK; 2886 } 2877 2878 // check nbufs argument 2879 if ( nbufs > 256 ) 2880 { 2881 _printf("\n[GIET ERROR] in _sys_fbf_cma_alloc() : nbufs larger than 256\n"); 2882 return SYSCALL_ILLEGAL_ARGUMENT; 2883 } 2884 2885 // loop on all threads to register channel in thread contexts 2886 unsigned int tid; 2887 for ( tid = first ; tid < (first + threads) ; tid++ ) 2888 { 2889 unsigned int y_size = header->y_size; 2890 unsigned int cid = thread[tid].clusterid; 2891 unsigned int x = cid / y_size; 2892 unsigned int y = cid % y_size; 2893 unsigned int p = thread[tid].proclocid; 2894 unsigned int ltid = thread[tid].ltid; 2895 static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; 2896 psched->context[ltid].slot[CTX_CMA_FB_ID] = channel; 2897 } 2898 2899 unsigned int vaddr; 2900 unsigned int flags; 2901 2902 // compute frame buffer physical addresses 2903 vaddr = (unsigned int)SEG_FBF_BASE; 2904 unsigned long long fbf_buf_paddr = _v2p_translate( vaddr , &flags ); 2905 2906 // initialize the FBF chbuf 2907 // we don't register a status address in the fbf_desc, because 2908 // the CMA does not test the status for the frame buffer (no synchro) 2909 _fbf_ker_chbuf.nbufs = nbufs; 2910 _fbf_ker_chbuf.fbf_desc = (((fbf_buf_paddr & 0xFFFFFFFFFFFULL) >> 6 ) << 26); 2911 2912 // register FBF chbuf physical address 2913 vaddr = (unsigned int)(&_fbf_ker_chbuf); 2914 _fbf_chbuf_paddr = _v2p_translate( vaddr , &flags ); 2915 2916 #if GIET_DEBUG_FBF_CMA 2917 _printf("\n[FBF_CMA DEBUG] _sys_fbf_cma_alloc()\n" 2918 " - channel = %d\n" 2919 " - vaddr(_ker_fbf_chbuf) = %x\n" 2920 " - paddr(_ker_fbf_chbuf) = %l\n" 2921 " - nbufs = %d\n" 2922 " - fbf_desc = %l\n", 2923 channel , vaddr , _fbf_chbuf_paddr , nbufs , _fbf_ker_chbuf.fbf_desc ); 2924 #endif 2925 2926 return SYSCALL_OK; 2887 2927 } // end sys_fbf_cma_alloc() 2888 2928 2889 2929 ////////////////////////// 2890 int _sys_fbf_cma_release() // Not a syscall 2930 int _sys_fbf_cma_release() // Not a syscall : used by _ctx_kill_thread() 2891 2931 { 2892 2932 unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); … … 2901 2941 } 2902 2942 2903 // stop CMA transfer 2904 _sys_fbf_cma_stop(); 2905 2906 // reset CTX_CMA_FB_ID for thread 2943 if ( _cma_channel_alloc[channel] == 1 ) // the calling thread is the last user 2944 { 2945 // stop the CMA transfer 2946 _sys_fbf_cma_stop(); 2947 2948 // reset the CMA channel allocator 2949 _cma_channel_alloc[channel] = 0; 2950 } 2951 else // not the last user 2952 { 2953 // atomically decrement the CMA channel allocator 2954 _atomic_increment( &_cma_channel_alloc[channel] , -1 ); 2955 } 2956 2957 // reset CTX_CMA_FB_ID slot in calling thread context 2907 2958 _set_context_slot( CTX_CMA_FB_ID, 0xFFFFFFFF ); 2908 2959 2909 // release CMA channel2910 _cma_channel_alloc[channel] = 0;2911 2912 2960 return SYSCALL_OK; 2913 } 2961 } // end _sys_fbf_cma_release() 2914 2962 2915 2963 /////////////////////////////////////////////////// 2916 int _sys_fbf_cma_init_buf( void* buf0_vbase, 2917 void* buf1_vbase, 2918 void* sts0_vaddr, 2919 void* sts1_vaddr ) 2964 int _sys_fbf_cma_init_buf( unsigned int index, 2965 void* buf_vaddr, 2966 void* sts_vaddr ) 2920 2967 { 2921 2968 unsigned int vaddr; // virtual address 2922 2969 unsigned int flags; // for _v2p_translate() 2923 unsigned long long fbf_paddr; // fbf physical address 2924 unsigned long long fbf_sts_paddr; // fbf status physical address 2925 unsigned long long buf0_pbase; // buffer 0 base physical address 2926 unsigned long long sts0_paddr; // buffer 0 status physical address 2927 unsigned long long buf1_pbase; // buffer 1 base physical address 2928 unsigned long long sts1_paddr; // buffer 1 status physical address 2970 unsigned long long buf_paddr; // user buffer physical address 2971 unsigned long long sts_paddr; // user status physical address 2929 2972 2930 2973 // get calling thread scheduler, ltid and trdid … … 2955 2998 #if GIET_DEBUG_FBF_CMA 2956 2999 _printf("\n[FBF_CMA DEBUG] _sys_fbf_cma_init_buf()\n" 2957 " - channel = %d\n" 2958 " - buf0 vbase = %x\n" 2959 " - buf1 vbase = %x\n" 2960 " - sts0 vaddr = %x\n" 2961 " - sts1 vaddr = %x\n", 2962 channel, 2963 (unsigned int)buf0_vbase, 2964 (unsigned int)buf1_vbase, 2965 (unsigned int)sts0_vaddr, 2966 (unsigned int)sts1_vaddr ); 2967 #endif 2968 2969 // checking user buffers virtual addresses alignment 2970 if ( ((unsigned int)buf0_vbase & 0x3F) || 2971 ((unsigned int)buf1_vbase & 0x3F) ) 3000 " - channel = %d / index = %d\n" 3001 " - buf vaddr = %x\n" 3002 " - sts vaddr = %x\n", 3003 channel, index, 3004 (unsigned int)buf_vaddr, 3005 (unsigned int)sts_vaddr ); 3006 #endif 3007 3008 // checking index argument 3009 if ( index >= _fbf_ker_chbuf.nbufs ) 2972 3010 { 2973 3011 _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " 2974 "user buffer not aligned for thread %x\n", trdid ); 3012 "user buffer index too large %x\n", trdid ); 3013 3014 return SYSCALL_CHANNEL_NON_ALLOCATED; 3015 } 3016 3017 // checking user buffer and status addresses alignment 3018 if ( ((unsigned int)buf_vaddr & 0x3F) || ((unsigned int)sts_vaddr & 0x3F) ) 3019 { 3020 _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " 3021 "user buffer or status not aligned for thread %x\n", trdid ); 2975 3022 2976 3023 return SYSCALL_ADDRESS_NON_ALIGNED; 2977 3024 } 2978 3025 2979 // checking user buffers status virtual addresses alignment 2980 if ( ((unsigned int)sts0_vaddr & 0x3F) || 2981 ((unsigned int)sts1_vaddr & 0x3F) ) 3026 // Compute user buffer and status physical addresses 3027 vaddr = (unsigned int)buf_vaddr; 3028 buf_paddr = _v2p_translate( vaddr , &flags ); 3029 if ((flags & PTE_U) == 0) 2982 3030 { 2983 3031 _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " 2984 "user status not aligned for thread %x\n", trdid ); 2985 2986 return SYSCALL_ADDRESS_NON_ALIGNED; 2987 } 2988 2989 // compute frame buffer physical address and initialize _fbf_chbuf[channel] 2990 vaddr = (unsigned int)SEG_FBF_BASE; 2991 fbf_paddr = _v2p_translate( vaddr , &flags ); 2992 vaddr = (unsigned int)&_fbf_status[channel]; 2993 fbf_sts_paddr = _v2p_translate( vaddr , &flags ); 2994 2995 _fbf_chbuf[channel].fbf_desc = 2996 (unsigned long long) ((fbf_sts_paddr & 0xFFFFFFFFULL) >> 6) + 2997 (((fbf_paddr & 0xFFFFFFFFULL) >> 6 ) << 26); 2998 2999 // Compute user buffer 0 physical addresses and intialize _fbf_chbuf[channel] 3000 vaddr = (unsigned int)buf0_vbase; 3001 buf0_pbase = _v2p_translate( vaddr , &flags ); 3032 "buffer not in user space for thread %x\n", trdid ); 3033 3034 return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; 3035 } 3036 3037 vaddr = (unsigned int)sts_vaddr; 3038 sts_paddr = _v2p_translate( vaddr , &flags ); 3002 3039 if ((flags & PTE_U) == 0) 3003 3040 { 3004 3041 _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " 3005 " buf0 not in user space for thread %x\n", trdid);3042 "status not in user space for thread %x\n", trdid); 3006 3043 3007 3044 return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; 3008 3045 } 3009 3046 3010 vaddr = (unsigned int)sts0_vaddr; 3011 sts0_paddr = _v2p_translate( vaddr , &flags ); 3012 if ((flags & PTE_U) == 0) 3047 // check user buffer and user status in same cluster 3048 if ( (buf_paddr & 0xFF00000000ULL) != (sts_paddr & 0xFF00000000ULL) ) 3013 3049 { 3014 3050 _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " 3015 " sts0 not in user spacefor thread %x\n", trdid);3051 "user status and buffer not in same cluster for thread %x\n", trdid); 3016 3052 3017 3053 return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; 3018 3054 } 3019 3055 3020 _fbf_chbuf[channel].buf0_desc = 3021 (unsigned long long) ((sts0_paddr & 0xFFFFFFFFULL) >> 6) + 3022 (((buf0_pbase & 0xFFFFFFFFULL) >> 6 ) << 26); 3023 3024 // Compute user buffer 1 physical addresses and intialize _fbf_chbuf[channel] 3025 vaddr = (unsigned int)buf1_vbase; 3026 buf1_pbase = _v2p_translate( vaddr , &flags ); 3027 if ((flags & PTE_U) == 0) 3028 { 3029 _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " 3030 "buf1 not in user space for thread %x\n", trdid ); 3031 3032 return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; 3033 } 3034 3035 vaddr = (unsigned int)sts1_vaddr; 3036 sts1_paddr = _v2p_translate( vaddr , &flags ); 3037 if ((flags & PTE_U) == 0) 3038 { 3039 _printf("\n[GIET ERROR] in _sys_fbf_cma_init_buf() : " 3040 "sts1 not in user space for thread %x\n", trdid); 3041 3042 return SYSCALL_ADDRESS_NON_USER_ACCESSIBLE; 3043 } 3044 3045 _fbf_chbuf[channel].buf1_desc = 3046 (unsigned long long) ((sts1_paddr & 0xFFFFFFFFULL) >> 6) + 3047 (((buf1_pbase & 0xFFFFFFFFULL) >> 6 ) << 26); 3048 3049 // Compute and register physical adress of the fbf_chbuf descriptor 3050 vaddr = (unsigned int)&_fbf_chbuf[channel]; 3051 _fbf_chbuf_paddr[channel] = _v2p_translate( vaddr , &flags ); 3052 3056 // initialize _fbf_ker_chbuf.usr_desc[index] 3057 _fbf_ker_chbuf.usr_desc[index] = ((sts_paddr & 0xFFFFFFFFULL) >> 6) + 3058 (((buf_paddr & 0xFFFFFFFFFFULL) >> 6 ) << 26); 3059 3053 3060 #if GIET_DEBUG_FBF_CMA 3054 _printf(" - fbf pbase = %l\n" 3055 " - fbf status paddr = %l\n" 3056 " - buf0 pbase = %l\n" 3057 " - buf0 status paddr = %l\n" 3058 " - buf1 pbase = %l\n" 3059 " - buf0 status paddr = %l\n" 3060 " - chbuf pbase = %l\n", 3061 fbf_paddr, 3062 fbf_sts_paddr, 3063 buf0_pbase, 3064 sts0_paddr, 3065 buf1_pbase, 3066 sts1_paddr, 3067 _fbf_chbuf_paddr[channel] ); 3061 _printf(" - buf paddr = %l\n" 3062 " - sts paddr = %l\n" 3063 " - usr_desc[%d] = %l\n", 3064 buf_paddr, 3065 sts_paddr, 3066 index , _fbf_ker_chbuf.usr_desc[index] ); 3068 3067 #endif 3069 3068 … … 3072 3071 } // end sys_fbf_cma_init_buf() 3073 3072 3074 //////////////////////// ////////////////////3075 int _sys_fbf_cma_start( unsigned int length)3073 //////////////////////// 3074 int _sys_fbf_cma_start() 3076 3075 { 3077 3076 // get calling thread scheduler, ltid and trdid … … 3089 3088 } 3090 3089 3091 // get channel index3090 // get CMA channel index 3092 3091 unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); 3093 3092 … … 3101 3100 3102 3101 // check buffers initialization 3103 if ( ( _fbf_chbuf[channel].buf0_desc == 0x0ULL ) && 3104 ( _fbf_chbuf[channel].buf1_desc == 0x0ULL) && 3105 ( _fbf_chbuf[channel].fbf_desc == 0x0ULL) ) 3106 { 3107 _printf("\n[GIET ERROR] in _sys_fbf_cma_start(): initialization not done\n"); 3102 if ( _fbf_ker_chbuf.nbufs == 0 ) 3103 { 3104 _printf("\n[GIET ERROR] in _sys_fbf_cma_start(): " 3105 "FBF chbuf not initialized for thread %x\n", trdid ); 3108 3106 3109 3107 return SYSCALL_MISSING_INITIALISATION; 3110 3108 } 3111 3109 3112 // initializes buffer length 3113 _fbf_chbuf[channel].length = length; 3114 3110 // synchronize FBF chbuf that will be read by CMA peripheral 3115 3111 if ( USE_IOB ) 3116 3112 { 3117 3113 // SYNC request for fbf_chbuf descriptor 3118 _mmc_sync( _fbf_chbuf_paddr [channel], sizeof( fbf_chbuf_t ) );3114 _mmc_sync( _fbf_chbuf_paddr , sizeof( fbf_chbuf_t ) ); 3119 3115 } 3120 3116 3121 3117 // start CMA transfer 3122 unsigned long long paddr = _fbf_chbuf_paddr[channel]; 3123 unsigned int src_chbuf_paddr_lsb = (unsigned int)(paddr & 0xFFFFFFFF); 3124 unsigned int src_chbuf_paddr_ext = (unsigned int)(paddr >> 32); 3125 unsigned int dst_chbuf_paddr_lsb = src_chbuf_paddr_lsb + 16; 3126 unsigned int dst_chbuf_paddr_ext = src_chbuf_paddr_ext; 3118 unsigned long long paddr = _fbf_chbuf_paddr; 3119 unsigned int dst_chbuf_paddr_lsb = (unsigned int)(paddr & 0xFFFFFFFF); 3120 unsigned int dst_chbuf_paddr_ext = (unsigned int)(paddr >> 32); 3121 unsigned int src_chbuf_paddr_lsb = dst_chbuf_paddr_lsb + 8; 3122 unsigned int src_chbuf_paddr_ext = dst_chbuf_paddr_ext; 3123 3124 #if GIET_DEBUG_FBF_CMA 3125 _printf("\n[FBF_CMA DEBUG] _sys_fbf_cma_start()\n" 3126 " - src_chbuf_paddr_lsb = %x\n" 3127 " - src_chbuf_paddr_ext = %x\n" 3128 " - src_chbuf_nbufs = %d\n" 3129 " - dst_chbuf_paddr_lsb = %x\n" 3130 " - dst_chbuf_paddr_ext = %x\n" 3131 " - dst_chbuf_nbufs = 1 \n" 3132 " - buffer_size = %d\n", 3133 src_chbuf_paddr_lsb, 3134 src_chbuf_paddr_ext, 3135 _fbf_ker_chbuf.nbufs, 3136 dst_chbuf_paddr_lsb, 3137 dst_chbuf_paddr_ext, 3138 FBUF_X_SIZE * FBUF_Y_SIZE ); 3139 #endif 3127 3140 3128 3141 _cma_set_register( channel, CHBUF_SRC_DESC , src_chbuf_paddr_lsb ); 3129 3142 _cma_set_register( channel, CHBUF_SRC_EXT , src_chbuf_paddr_ext ); 3130 _cma_set_register( channel, CHBUF_SRC_NBUFS, 2);3143 _cma_set_register( channel, CHBUF_SRC_NBUFS, _fbf_ker_chbuf.nbufs ); 3131 3144 _cma_set_register( channel, CHBUF_DST_DESC , dst_chbuf_paddr_lsb ); 3132 3145 _cma_set_register( channel, CHBUF_DST_EXT , dst_chbuf_paddr_ext ); 3133 3146 _cma_set_register( channel, CHBUF_DST_NBUFS, 1 ); 3134 _cma_set_register( channel, CHBUF_BUF_SIZE , length);3147 _cma_set_register( channel, CHBUF_BUF_SIZE , FBUF_X_SIZE*FBUF_Y_SIZE ); 3135 3148 _cma_set_register( channel, CHBUF_PERIOD , 300 ); 3136 _cma_set_register( channel, CHBUF_RUN , 1);3149 _cma_set_register( channel, CHBUF_RUN , MODE_NO_DST_SYNC ); 3137 3150 3138 3151 return SYSCALL_OK; … … 3140 3153 } // end _sys_fbf_cma_start() 3141 3154 3142 ///////////////////////////////////////////////////// 3143 int _sys_fbf_cma_display( unsigned int buffer_index ) 3144 { 3145 volatile unsigned int full = 1; 3146 3155 //////////////////////////////////////////// 3156 int _sys_fbf_cma_check( unsigned int index ) 3157 { 3147 3158 // get calling thread scheduler, ltid and trdid 3148 3159 static_scheduler_t* psched = _get_sched(); … … 3153 3164 if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) 3154 3165 { 3155 _printf("\n[GIET ERROR] in _sys_fbf_ release() : "3166 _printf("\n[GIET ERROR] in _sys_fbf_cma_check() : " 3156 3167 "FBF not allocated to thread %x\n", trdid ); 3157 3168 … … 3164 3175 if ( channel >= NB_CMA_CHANNELS ) 3165 3176 { 3166 _printf("\n[GIET ERROR] in _sys_fbf_cma_ display() : "3167 "CMA channel non allocated \n");3177 _printf("\n[GIET ERROR] in _sys_fbf_cma_check() : " 3178 "CMA channel non allocated to thread %x\n", trdid ); 3168 3179 3169 3180 return SYSCALL_CHANNEL_NON_ALLOCATED; 3170 3181 } 3171 3182 3172 // get fbf_chbuf descriptor pointer 3173 fbf_chbuf_t* pdesc = &_fbf_chbuf[channel]; 3183 // check buffer index 3184 if ( index >= _fbf_ker_chbuf.nbufs ) 3185 { 3186 _printf("\n[GIET ERROR] in _sys_fbf_cma_check() : " 3187 "buffer index too large for thread %x\n", trdid ); 3188 3189 return SYSCALL_CHANNEL_NON_ALLOCATED; 3190 } 3191 3192 // compute user buffer status physical addresses 3193 unsigned long long usr_sts_paddr; 3194 fbf_chbuf_t* pdesc = &_fbf_ker_chbuf; 3195 usr_sts_paddr = ((pdesc->usr_desc[index] & 0xFFF0000000000000ULL) >> 20) + 3196 ((pdesc->usr_desc[index] & 0x3FFFFFFULL) << 6); 3174 3197 3175 3198 #if GIET_DEBUG_FBF_CMA 3176 _printf("\n[FBF_CMA DEBUG] enters _sys_fb _cma_display()\n"3199 _printf("\n[FBF_CMA DEBUG] enters _sys_fbf_cma_check()\n" 3177 3200 " - cma channel = %d\n" 3178 3201 " - buffer index = %d\n" 3179 " - buf0_desc value = %l\n" 3180 " - buf1_desc value = %l\n" 3181 " - fbf_desc value = %l\n", 3182 channel , buffer_index, 3183 _fbf_chbuf[channel].buf0_desc, 3184 _fbf_chbuf[channel].buf1_desc, 3185 _fbf_chbuf[channel].fbf_desc ); 3186 #endif 3187 3188 unsigned long long buf_sts_paddr; 3189 unsigned long long buf_paddr; 3190 unsigned long long fbf_sts_paddr; 3191 3192 if ( buffer_index == 0 ) // user buffer 0 3193 { 3194 buf_sts_paddr = 3195 ((pdesc->buf0_desc & 0xFFF0000000000000ULL) >> 20) + // compute address extension 3196 ((pdesc->buf0_desc & 0x3FFFFFFULL) << 6); // compute 32 LSB of the address 3197 3198 buf_paddr = 3199 (pdesc->buf0_desc & 0xFFFFFFFFFC000000ULL) >> 20; // compute the entire address 3200 } 3201 else // user buffer 1 3202 { 3203 buf_sts_paddr = 3204 ((pdesc->buf1_desc & 0xFFF0000000000000ULL) >> 20) + 3205 ((pdesc->buf1_desc & 0x3FFFFFFULL) << 6); 3206 3207 buf_paddr = 3208 (pdesc->buf1_desc & 0xFFFFFFFFFC000000ULL) >> 20; 3209 } 3210 3211 fbf_sts_paddr = 3212 ((pdesc->fbf_desc & 0xFFF0000000000000ULL) >> 20) + 3213 ((pdesc->fbf_desc & 0x3FFFFFFULL) << 6); 3214 3215 #if GIET_DEBUG_FBF_CMA 3216 _printf(" - fbf status paddr = %l\n" 3217 " - buf pbase = %l\n" 3218 " - buf status paddr = %l\n", 3219 fbf_sts_paddr, 3220 buf_paddr, 3221 buf_sts_paddr ); 3222 #endif 3223 3202 " - usr_desc value = %l\n" 3203 " - fbf_desc value = %l\n" 3204 " - usr status paddr = %l\n", 3205 channel, 3206 index, 3207 _fbf_ker_chbuf.usr_desc[index], 3208 _fbf_ker_chbuf.fbf_desc, 3209 usr_sts_paddr ); 3210 #endif 3211 3224 3212 // waiting user buffer released by the CMA component) 3225 while ( full ) 3213 unsigned int full; 3214 do 3226 3215 { 3227 3216 // INVAL L2 cache copy of user buffer status 3228 // because it has been modified in RAM by the CMA component 3229 _mmc_inval( buf_sts_paddr , 4 ); 3230 3231 full = _physical_read( buf_sts_paddr ); 3232 } 3233 3234 // SYNC request for the user buffer, because 3235 // it will be read from XRAM by the CMA component 3236 _mmc_sync( buf_paddr , pdesc->length ); 3217 // because it is modified in RAM by the CMA component 3218 _mmc_inval( usr_sts_paddr , 4 ); 3219 3220 full = _physical_read( usr_sts_paddr ); 3221 } 3222 while ( full ); 3223 3224 return SYSCALL_OK; 3225 3226 } // end _sys_fbf_cma_check() 3227 3228 ////////////////////////////////////////////// 3229 int _sys_fbf_cma_display( unsigned int index ) 3230 { 3231 // get calling thread scheduler, ltid and trdid 3232 static_scheduler_t* psched = _get_sched(); 3233 unsigned int ltid = _get_thread_ltid(); 3234 unsigned int trdid = _get_thread_trdid(); 3235 3236 // check FBF allocated 3237 if ( (psched->context[ltid].slot[CTX_LOCKS_ID] & LOCKS_MASK_FBF) == 0 ) 3238 { 3239 _printf("\n[GIET ERROR] in _sys_fbf_cma_display() : " 3240 "FBF not allocated to thread %x\n", trdid ); 3241 3242 return SYSCALL_CHANNEL_NON_ALLOCATED; 3243 } 3244 3245 // get channel index 3246 unsigned int channel = _get_context_slot( CTX_CMA_FB_ID ); 3247 3248 if ( channel >= NB_CMA_CHANNELS ) 3249 { 3250 _printf("\n[GIET ERROR] in _sys_fbf_cma_display() : " 3251 "CMA channel non allocated to thread %x\n", trdid ); 3252 3253 return SYSCALL_CHANNEL_NON_ALLOCATED; 3254 } 3255 3256 // check buffer index 3257 if ( index >= _fbf_ker_chbuf.nbufs ) 3258 { 3259 _printf("\n[GIET ERROR] in _sys_fbf_cma_display() : " 3260 "buffer index too large for thread %x\n", trdid ); 3261 3262 return SYSCALL_CHANNEL_NON_ALLOCATED; 3263 } 3264 3265 // compute user buffer and status physical addresses 3266 unsigned long long usr_sts_paddr; 3267 unsigned long long usr_buf_paddr; 3268 3269 fbf_chbuf_t* pdesc = &_fbf_ker_chbuf; 3270 3271 usr_sts_paddr = ((pdesc->usr_desc[index] & 0xFFF0000000000000ULL) >> 20) + 3272 ((pdesc->usr_desc[index] & 0x3FFFFFFULL) << 6); 3273 3274 usr_buf_paddr = ((pdesc->usr_desc[index] & 0xFFFFFFFFFC000000ULL) >> 20); 3275 3276 #if GIET_DEBUG_FBF_CMA 3277 _printf("\n[FBF_CMA DEBUG] enters _sys_fbf_cma_display()\n" 3278 " - cma channel = %d\n" 3279 " - buffer index = %d\n" 3280 " - usr buffer paddr = %l\n" 3281 " - usr status paddr = %l\n", 3282 channel, 3283 index, 3284 usr_buf_paddr, 3285 usr_sts_paddr ); 3286 #endif 3287 3288 // SYNC request, because this buffer will be read from XRAM by the CMA component 3289 _mmc_sync( usr_buf_paddr , FBUF_X_SIZE * FBUF_Y_SIZE ); 3237 3290 3238 3291 // set user buffer status 3239 _physical_write( buf_sts_paddr, 0x1 ); 3240 3241 // reset fbf buffer status 3242 _physical_write( fbf_sts_paddr, 0x0 ); 3243 3244 // SYNC request, because these buffer descriptors 3245 // will be read from XRAM by the CMA component 3246 _mmc_sync( buf_sts_paddr, 4 ); 3247 _mmc_sync( fbf_sts_paddr, 4 ); 3292 _physical_write( usr_sts_paddr, 0x1 ); 3293 3294 // SYNC request, because this status will be read from XRAM by the CMA component 3295 _mmc_sync( usr_sts_paddr, 4 ); 3248 3296 3249 3297 return SYSCALL_OK; … … 3265 3313 3266 3314 // Desactivate CMA channel 3267 _cma_set_register( channel, CHBUF_RUN, 0);3315 _cma_set_register( channel, CHBUF_RUN, MODE_IDLE ); 3268 3316 3269 3317 return SYSCALL_OK; -
soft/giet_vm/giet_kernel/sys_handler.h
r714 r725 51 51 #define SYSCALL_NO_CHANNEL_AVAILABLE (-18) 52 52 #define SYSCALL_CHANNEL_NON_ALLOCATED (-19) 53 #define SYSCALL_ILLEGAL_ XY_ARGUMENTS(-20)53 #define SYSCALL_ILLEGAL_ARGUMENT (-20) 54 54 #define SYSCALL_OUT_OF_KERNEL_HEAP_MEMORY (-21) 55 55 #define SYSCALL_ADDRESS_NON_ALIGNED (-22) … … 64 64 65 65 /////////////////////////////////////////////////////////////////////////////// 66 // This structure is used by the CMA component to store the status of the 67 // frame buffer (full or empty). The useful information is contained in the 68 // "status" integer (1 for full and 0 for empty). 69 // This structure must be aligned on a cache line (64 bytes) to simplify 70 // the software L2/L3 cache coherence when the IO bridge is used. 71 /////////////////////////////////////////////////////////////////////////////// 72 73 typedef struct buffer_status_s 74 { 75 unsigned int status; 76 unsigned int padding[15]; 77 } buffer_status_t; 78 79 /////////////////////////////////////////////////////////////////////////////// 80 // This structure is used by the CMA component to move a stream 81 // of images from two user buffers to the frame buffer in kernel space. 66 // This structure is used by the CMA component to move a stream of images 67 // from a set of user buffers to the frame buffer in kernel space. 82 68 // It contains two chbuf arrays: 83 // - The SRC chbuf contains two buffers (buf0 & buf1), in user space. 84 // - The DST cbuf contains one single buffer (fbf), that is the frame buffer. 69 // - The SRC chbuf contains <nbufs> buffer descriptors, in user space, 70 // that can be distributed (one buffer per cluster) or not. 71 // - The DST cbuf contains one single buffer, that is the frame buffer. 85 72 // Each buffer is described with a 64 bits buffer descriptor: 86 // - the 26 LSB bits contain bits[6:31] of the buffer physical address 87 // - the 26 following bits contain bits[6:31] of the physical address where the 88 // buffer status is located 89 // - the 12 MSB bits contain the common address extension of the buffer and its 90 // status 91 // The length field define the buffer size (bytes) 73 // - the 26 LSB bits contain bits[31:6] of the status physical address. 74 // - the 26 following bits contain bits[31:6] of the buffer physical address. 75 // - the 12 MSB bits contain the common address extension. 76 // The actual number of user buffers cannot be larger than 256 (at most 77 // one user buffer per cluster for a 16*16 mesh). 78 // NB: The user buffers are mapped in user space, but the chbuf descriptor 79 // contained in this structure is a protected kernel variable. 92 80 // This structure must be 64 bytes aligned. 93 81 /////////////////////////////////////////////////////////////////////////////// … … 95 83 typedef struct fbf_chbuf_s 96 84 { 97 unsigned long long buf0_desc; // first user buffer descriptor 98 unsigned long long buf1_desc; // second user buffer descriptor 99 unsigned long long fbf_desc; // frame buffer descriptor 100 unsigned int length; // buffer length (bytes) 101 unsigned int padding[9]; // padding for 64 bytes alignment 85 unsigned long long fbf_desc; // frame buffer descriptor 86 unsigned long long usr_desc[256]; // user chbuf descriptor 87 unsigned int nbufs; // number of user buffers 102 88 } fbf_chbuf_t; 103 89 … … 110 96 // The actual number of buffers used in the chbuf is defined by (xmax * ymax). 111 97 // Each buffer is described with a 64 bits buffer descriptor: 112 // - the 26 LSB bits contain bits[6:31] of the buffer physical address 113 // - the 26 following bits contain bits[6:31] of the physical address where the 114 // buffer status is located 115 // - the 12 MSB bits contain the common address extension of the buffer and its 116 // status 98 // - the 26 LSB bits contain bits[31:6] of the status physical address. 99 // - the 26 following bits contain bits[31:6] of the buffer physical address. 100 // - the 12 MSB bits contain the common address extension. 101 // The <xmax> and <ymax> fields define the actual mesh size. 117 102 // This structure must be 64 bytes aligned. 118 103 /////////////////////////////////////////////////////////////////////////////// 119 104 120 typedef struct ker_chbuf_s105 typedef struct nic_chbuf_s 121 106 { 122 unsigned long long buf_desc[X_SIZE*Y_SIZE]; // kernel chbuf descriptor107 unsigned long long buf_desc[X_SIZE*Y_SIZE]; // kernel chbuf descriptor 123 108 unsigned int xmax; // nb clusters in a row 124 109 unsigned int ymax; // nb clusters in a column 125 } ker_chbuf_t;110 } nic_chbuf_t; 126 111 127 112 … … 236 221 237 222 extern int _sys_fbf_sync_write( unsigned int offset, 238 void* buffer,239 unsigned int length );223 void* buffer, 224 unsigned int length ); 240 225 241 226 extern int _sys_fbf_sync_read( unsigned int offset, 242 void* buffer,243 unsigned int length );244 245 extern int _sys_fbf_cma_alloc( );227 void* buffer, 228 unsigned int length ); 229 230 extern int _sys_fbf_cma_alloc( unsigned int nbufs ); 246 231 247 232 extern int _sys_fbf_cma_release(); 248 233 249 extern int _sys_fbf_cma_init_buf(void* buf0_vbase, 250 void* buf1_vbase, 251 void* sts0_vaddr, 252 void* sts1_vaddr ); 253 254 extern int _sys_fbf_cma_start( unsigned int length ); 255 256 extern int _sys_fbf_cma_display( unsigned int buffer_index ); 234 extern int _sys_fbf_cma_init_buf( unsigned int index, 235 void* buf_vaddr, 236 void* sts_vaddr ); 237 238 extern int _sys_fbf_cma_start(); 239 240 extern int _sys_fbf_cma_display( unsigned int index ); 241 242 extern int _sys_fbf_cma_check( unsigned int index ); 257 243 258 244 extern int _sys_fbf_cma_stop(); … … 265 251 266 252 extern int _sys_proc_xyp( unsigned int* x, 267 unsigned int* y,268 unsigned int* p );253 unsigned int* y, 254 unsigned int* p ); 269 255 270 256 extern int _sys_procs_number( unsigned int* x_size, 271 unsigned int* y_size,272 unsigned int* nprocs );257 unsigned int* y_size, 258 unsigned int* nprocs ); 273 259 274 260 extern int _sys_vseg_get_vbase( char* vspace_name, 275 char* vseg_name,276 unsigned int* vbase );261 char* vseg_name, 262 unsigned int* vbase ); 277 263 278 264 extern int _sys_vseg_get_length( char* vspace_name, 279 char* vseg_name,280 unsigned int* length );265 char* vseg_name, 266 unsigned int* length ); 281 267 282 268 extern int _sys_xy_from_ptr( void* ptr, 283 unsigned int* x,284 unsigned int* y );269 unsigned int* x, 270 unsigned int* y ); 285 271 286 272 extern int _sys_heap_info( unsigned int* vaddr, 287 unsigned int* length,288 unsigned int x,289 unsigned int y );273 unsigned int* length, 274 unsigned int x, 275 unsigned int y ); 290 276 291 277 #endif
Note: See TracChangeset
for help on using the changeset viewer.