///////////////////////////////////////////////////////////////////////////////////////// // File : mjpeg.c // Date : octobre 2015 // author : Alain Greiner ///////////////////////////////////////////////////////////////////////////////////////// // This multi-threaded application illustrates "pipe-line" parallelism, and message // passing programming model, on top of the POSIX threads API. // It makes the parallel decompression of a MJPEG bitstream contained in a file. // The application is described as a TCG (Task and Communication Graph), and all // communications between threads uses MWMR channels,. // It uses the chained buffer DMA component to display the images on the graphic display. // It contains 6 types of threads (plus the "main" thread, that makes initialisation) // and 7 types of MWMR communication channels: // - the TG thread is only mapped in cluster[0,0], but all other threads // (DEMUX, VLD, IQZZ, IDCT, LIBU) are replicated in all clusters. // - all MWMR channels are replicated in all clusters. // The number of cluster cannot be larger than 16*16. // The number of processors per cluster is not constrained. // The frame buffer size must fit the decompressed images size. // It uses one TTY terminal shared by all tasks. ///////////////////////////////////////////////////////////////////////////////////////// #include #include #include #include #include "mjpeg.h" // macro to use a shared TTY #define PRINTF(...) lock_acquire( &tty_lock ); \ giet_tty_printf(__VA_ARGS__); \ lock_release( &tty_lock ); /////////////////////////////////////////////// // Global variables /////////////////////////////////////////////// uint32_t fd; // file descriptor for the file containing the MJPEG stream mwmr_channel_t* tg_2_demux[256]; // one per cluster mwmr_channel_t* demux_2_vld_data[256]; // one per cluster mwmr_channel_t* demux_2_vld_huff[256]; // one per cluster mwmr_channel_t* demux_2_iqzz[256]; // one per cluster mwmr_channel_t* vld_2_iqzz[256]; // one per cluster mwmr_channel_t* iqzz_2_idct[256]; // one per cluster mwmr_channel_t* idct_2_libu[256]; // one per cluster user_lock_t tty_lock; // lock protecting shared TTY uint8_t* cma_buf[256]; // CMA buffers (one per cluster) void* cma_sts[256]; // CMA buffers status uint32_t fbf_width; // Frame Buffer width uint32_t fbf_height; // Frame Buffer height uint32_t nblocks_h; // number of blocks in a column uint32_t nblocks_w; // number of blocks in a row uint32_t date[MAX_IMAGES]; // date of libu completion //////////////////////////////////////////////// // declare thread functions //////////////////////////////////////////////// extern void tg( ); extern void demux( uint32_t index ); extern void vld( uint32_t index ); extern void iqzz( uint32_t index ); extern void idct( uint32_t index ); extern void libu( uint32_t index ); ///////////////////////////////////////// __attribute__ ((constructor)) void main() ///////////////////////////////////////// { // get platform parameters uint32_t x_size; uint32_t y_size; uint32_t nprocs; giet_procs_number( &x_size , &y_size , &nprocs ); // shared TTY allocation giet_tty_alloc( 1 ); lock_init( &tty_lock ); // check platform parameters giet_pthread_assert( (nprocs <= 6), "[MJPEG ERROR] nprocs cannot be larger than 4"); giet_pthread_assert( (x_size <= 16), "[MJPEG ERROR] x_size cannot be larger than 16"); giet_pthread_assert( (y_size <= 16), "[MJPEG ERROR] y_size cannot be larger than 16"); giet_pthread_assert( (MAX_IMAGES >= (x_size*y_size)), "MJPEG ERROR] number of images smaller than x_size * y_size"); // check frame buffer size giet_fbf_size( &fbf_width , &fbf_height ); giet_pthread_assert( ((fbf_width & 0x7) == 0) && ((fbf_height & 0x7) == 0) , "[MJPEG ERROR] image width and height must be multiple of 8"); // request frame buffer and CMA channel allocation giet_fbf_alloc(); giet_fbf_cma_alloc( x_size * y_size ); // file name and image size acquisition char file_pathname[256]; uint32_t image_width; uint32_t image_height; PRINTF("\n[MJPEG] enter path for JPEG stream file\n> "); giet_tty_gets( file_pathname , 256 ); if ( file_pathname[0] == 0 ) { strcpy( file_pathname , "/misc/plan_48.mjpg" ); image_width = 48; image_height = 48; PRINTF("\n\n[MJPEG] use /misc/plan_48.mjpg\n" ); } else { PRINTF("\n[MJPEG] enter image width\n> "); giet_tty_getw( &image_width ); PRINTF("\n[MJPEG] enter image height\n> "); giet_tty_getw( &image_height ); PRINTF("\n"); } giet_pthread_assert( (image_width == fbf_width) && (image_height == fbf_height) , "[MJPEG ERROR] image size doesn't fit frame buffer size"); // compute nblocks_h & nblocks_w nblocks_w = fbf_width / 8; nblocks_h = fbf_height / 8; // open file containing the MJPEG bit stream int fd = giet_fat_open( file_pathname , 0 ); giet_pthread_assert( (fd >= 0), "[MJPEG ERROR] cannot open MJPEG stream file"); // index for loops uint32_t x; uint32_t y; uint32_t n; uint32_t* buffer; // initialise distributed heap, // allocate MWMR channels // allocate buffers for CMA for ( x = 0 ; x < x_size ; x++ ) { for ( y = 0 ; y < y_size ; y++ ) { uint32_t index = x*y_size + y; // initialise heap[x][y] heap_init( x , y ); // allocate MWMR channels in cluster[x][y] tg_2_demux[index] = remote_malloc( sizeof( mwmr_channel_t ) , x , y ); buffer = remote_malloc( 4 * TG_2_DEMUX_DEPTH , x , y ); mwmr_init( tg_2_demux[index] , buffer , 1 , TG_2_DEMUX_DEPTH ); demux_2_vld_data[index] = remote_malloc( sizeof( mwmr_channel_t ) , x , y ); buffer = remote_malloc( 4 * DEMUX_2_VLD_DATA_DEPTH , x , y ); mwmr_init( demux_2_vld_data[index] , buffer , 1 , DEMUX_2_VLD_DATA_DEPTH ); demux_2_vld_huff[index] = remote_malloc( sizeof( mwmr_channel_t ) , x , y ); buffer = remote_malloc( 4 * DEMUX_2_VLD_HUFF_DEPTH , x , y ); mwmr_init( demux_2_vld_huff[index] , buffer , 1 , DEMUX_2_VLD_HUFF_DEPTH ); demux_2_iqzz[index] = remote_malloc( sizeof( mwmr_channel_t ) , x , y ); buffer = remote_malloc( 4 * DEMUX_2_IQZZ_DEPTH , x , y ); mwmr_init( demux_2_iqzz[index] , buffer , 1 , DEMUX_2_IQZZ_DEPTH ); vld_2_iqzz[index] = remote_malloc( sizeof( mwmr_channel_t ) , x , y ); buffer = remote_malloc( 4 * VLD_2_IQZZ_DEPTH , x , y ); mwmr_init( vld_2_iqzz[index] , buffer , 1 , VLD_2_IQZZ_DEPTH ); iqzz_2_idct[index] = remote_malloc( sizeof( mwmr_channel_t ) , x , y ); buffer = remote_malloc( 4 * IQZZ_2_IDCT_DEPTH , x , y ); mwmr_init( iqzz_2_idct[index] , buffer , 1 , IQZZ_2_IDCT_DEPTH ); idct_2_libu[index] = remote_malloc( sizeof( mwmr_channel_t ) , x , y ); buffer = remote_malloc( 4 * IDCT_2_LIBU_DEPTH , x , y ); mwmr_init( idct_2_libu[index] , buffer , 1 , IDCT_2_LIBU_DEPTH ); // allocate and register CMA buffers in cluster[x][y] cma_buf[index] = remote_malloc( fbf_width * fbf_height , x , y ); cma_sts[index] = remote_malloc( 64 , x , y ); giet_fbf_cma_init_buf( index , cma_buf[index] , cma_sts[index] ); } } // start CMA channel giet_fbf_cma_start( ); PRINTF("\n[MJPEG] main thread completes initialisation for %d cores\n", x_size * y_size * nprocs ) // thread trdid for pthread_create() and pthread_join() pthread_t trdid_tg; pthread_t trdid_demux[256]; pthread_t trdid_vld[256]; pthread_t trdid_iqzz[256]; pthread_t trdid_idct[256]; pthread_t trdid_libu[256]; uint32_t index; // launch all threads : precise mapping is defined in the mjpeg.py file if ( giet_pthread_create( &trdid_tg, NULL, &tg , NULL ) ) giet_pthread_exit( "error launching thread tg\n"); for ( index = 0 ; index < (x_size * y_size) ; index++ ) { if ( giet_pthread_create( &trdid_demux[index], NULL, &demux , (void*)index ) ) giet_pthread_exit( "error launching thread demux\n"); if ( giet_pthread_create( &trdid_vld[index], NULL, &vld , (void*)index ) ) giet_pthread_exit( "error launching thread vld\n"); if ( giet_pthread_create( &trdid_iqzz[index], NULL, &iqzz , (void*)index ) ) giet_pthread_exit( "error launching thread iqzz"); if ( giet_pthread_create( &trdid_idct[index], NULL, &idct , (void*)index ) ) giet_pthread_exit( "error launching thread idct\n"); if ( giet_pthread_create( &trdid_libu[index], NULL, &libu , (void*)index ) ) giet_pthread_exit( "error launching thread libu\n"); } // wait all threads completion if ( giet_pthread_join( trdid_tg , NULL ) ) { PRINTF("\n[MJPEG ERROR] calling giet_pthread_join() for tg\n" ) } for ( index = 0 ; index < (x_size * y_size) ; index++ ) { if ( giet_pthread_join( trdid_demux[index] , NULL ) ) { PRINTF("\n[MJPEG ERROR] calling giet_pthread_join() for demux[%d]\n", index ) } if ( giet_pthread_join( trdid_vld[index] , NULL ) ) { PRINTF("\n[MJPEG ERROR] calling giet_pthread_join() for vld[%d]\n", index ) } if ( giet_pthread_join( trdid_iqzz[index] , NULL ) ) { PRINTF("\n[MJPEG ERROR] calling giet_pthread_join() for iqzz[%d]\n", index ) } if ( giet_pthread_join( trdid_idct[index] , NULL ) ) { PRINTF("\n[MJPEG ERROR] calling giet_pthread_join() for idct[%d]\n", index ) } if ( giet_pthread_join( trdid_libu[index] , NULL ) ) { PRINTF("\n[MJPEG ERROR] calling giet_pthread_join() for libu[%d]\n", index ) } } // instrumentation uint32_t image; PRINTF("\n[MJPEG] Instumentation Results\n" ) for ( image = 0 ; image < MAX_IMAGES ; image++ ) { PRINTF(" - Image %d : completed at cycle %d\n", image , date[image]) } giet_pthread_exit( "main completed" ); } // end main()