Changeset 720
- Timestamp:
- Oct 11, 2015, 6:23:18 PM (9 years ago)
- Location:
- soft/giet_vm/applications
- Files:
-
- 1 added
- 1 deleted
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/applications/classif/classif.c
r708 r720 546 546 giet_proc_xyp( &x_id , &y_id , &p_id ); 547 547 548 // get & checkplat-form parameters548 // get plat-form parameters 549 549 unsigned int x_size; // number of clusters in a row 550 550 unsigned int y_size; // number of clusters in a column … … 552 552 giet_procs_number( &x_size , &y_size , &nprocs ); 553 553 554 // shared TTY allocation 555 giet_tty_alloc( 1 ); 556 lock_init( &tty_lock); 557 558 // check plat-form parameters 554 559 giet_pthread_assert( ((nprocs >= 3) && (nprocs <= 8)), 555 560 "[CLASSIF ERROR] number of procs per cluster must in [3...8]"); … … 570 575 } 571 576 572 // shared TTY allocation573 giet_tty_alloc( 1 );574 lock_init( &tty_lock);575 576 577 printf("\n[CLASSIF] start at cycle %d on %d cores\n", 577 578 giet_proctime(), (x_size * y_size * nprocs) ); 578 579 579 580 // thread index 580 // required b tpthread_create()581 // required by pthread_create() 581 582 // unused in this appli because no pthread_join() 582 583 pthread_t trdid; -
soft/giet_vm/applications/classif/classif.py
r712 r720 56 56 57 57 # create vspace 58 vspace = mapping.addVspace( name = 'classif', startname = 'classif_data', active = False ) 58 vspace = mapping.addVspace( name = 'classif', 59 startname = 'classif_data', 60 active = False ) 59 61 60 62 # data vseg : shared / cluster[0][0] … … 88 90 local = True ) 89 91 90 # stacks vsegs: local (one stack per processor=> nprocs stacks per cluster)92 # stacks vsegs: local (one stack per thread => nprocs stacks per cluster) 91 93 # ... plus main_stack in cluster[0][0] 92 94 mapping.addVseg( vspace, 'main_stack', … … 133 135 'classif_stack_%d_%d_%d' % (x,y,p), 134 136 'classif_heap_%d_%d' % (x,y), 135 start_index ) # index in start_vector137 start_index ) 136 138 137 139 # extend mapping name -
soft/giet_vm/applications/display/display.c
r712 r720 10 10 11 11 #include <stdio.h> 12 #include <stdlib.h>13 12 #include <hard_config.h> // To check Frame Buffer size 14 13 -
soft/giet_vm/applications/router/Makefile
r589 r720 2 2 APP_NAME = router 3 3 4 OBJS= main.o4 OBJS= router.o 5 5 6 6 LIBS= -L../../build/libs -luser -
soft/giet_vm/applications/router/router.py
r610 r720 10 10 # This file describes the mapping of the multi-threaded "router" 11 11 # application on a multi-clusters, multi-processors architecture. 12 # This application contains N+2 parallel t asks communicating through MWMR channels:12 # This application contains N+2 parallel threads communicating through MWMR channels: 13 13 # The mapping of virtual segments on the clusters is the following: 14 # - the data vseg is mapped on cluster[0,0]. 14 15 # - The code vsegs are replicated on all clusters. 15 # - the data_0 vseg is mapped on cluster[0,0].16 # - the data_1 vseg is mapped on cluster[x_size-1,y_size-1].17 # - the stacks vsegs are distibuted on all clusters.18 # The mapping of tasks on processors is the following:19 # - one "producer" task=> on proc[0,0,0]20 # - one "consume" task => on proc[x_size-1,y_size-1,nprocs-1]21 # - N " router" tasks => on all others processors16 # - the stack vsegs are distibuted on all clusters. 17 # - the heap vsegs are distributed on all clusters 18 # The mapping of threads on processors is the following: 19 # - the "main" thread => on proc[0,0,0] 20 # - the "producer" thread => on proc[0,0,0] 21 # - the "consume" thread => on proc[0,0,1] 22 # - N "compute" threads => on all others processors 22 23 # This mapping uses 5 platform parameters, (obtained from the "mapping" argument) 23 24 # - x_size : number of clusters in a row … … 39 40 code_size = 0x00010000 # 64 Kbytes (replicated in each cluster) 40 41 41 data_ 0_base= 0x2000000042 data_ 0_size= 0x00010000 # 64 Kbytes (non replicated)42 data_base = 0x20000000 43 data_size = 0x00010000 # 64 Kbytes (non replicated) 43 44 44 data_1_base = 0x3000000045 data_1_size = 0x00010000 # 64 Kbytes (non replicated)45 stack_base = 0x30000000 46 stack_size = 0x00010000 # 64 Kbytes (per thread) 46 47 47 stack_base = 0x4000000048 stack_size= 0x00200000 # 2 Mbytes (per cluster)48 heap_base = 0x40000000 49 heap_size = 0x00200000 # 2 Mbytes (per cluster) 49 50 50 51 # create vspace 51 vspace = mapping.addVspace( name = 'router', startname = 'router_data_0' ) 52 vspace = mapping.addVspace( name = 'router', 53 startname = 'router_data', 54 active = False ) 52 55 53 # data _0vseg : shared / in cluster [0,0]54 mapping.addVseg( vspace, 'router_data _0', data_0_base , data_0_size,56 # data vseg : shared / in cluster [0,0] 57 mapping.addVseg( vspace, 'router_data', data_base , data_size, 55 58 'C_WU', vtype = 'ELF', x = 0, y = 0, pseg = 'RAM', 56 binpath = 'bin/router/appli.elf',57 local = False )58 59 # data_1 vseg : shared / in cluster[x_size-1,y_size-1]60 mapping.addVseg( vspace, 'router_data_1', data_1_base , data_1_size,61 'C_WU', vtype = 'ELF', x = x_size - 1, y = y_size - 1, pseg = 'RAM',62 59 binpath = 'bin/router/appli.elf', 63 60 local = False ) … … 71 68 local = True ) 72 69 73 # stacks vsegs: local (one stack per processor => nprocs stacks per cluster)70 # heap vsegs : shared (one per cluster) 74 71 for x in xrange (x_size): 75 72 for y in xrange (y_size): 76 for p in xrange( nprocs ): 77 proc_id = (((x * y_size) + y) * nprocs) + p 78 size = stack_size / nprocs 79 base = stack_base + (proc_id * size) 80 mapping.addVseg( vspace, 'router_stack_%d_%d_%d' % (x,y,p), base, size, 81 'C_WU', vtype = 'BUFFER', x = x , y = y , pseg = 'RAM', 82 local = True, big = True ) 73 cluster_id = (x * y_size) + y 74 if ( mapping.clusters[cluster_id].procs ): 75 size = heap_size 76 base = heap_base + (cluster_id * size) 83 77 84 # distributed tasks / one task per processor 78 mapping.addVseg( vspace, 'router_heap_%d_%d' %(x,y), base , size, 79 'C_WU', vtype = 'HEAP', x = x, y = y, pseg = 'RAM', 80 local = False, big = True ) 81 82 # stacks vsegs: local (one stack per thread => nprocs stacks per cluster) 83 # ... plus main_stack in cluster[0][0] 84 mapping.addVseg( vspace, 'main_stack', 85 stack_base, stack_size, 'C_WU', vtype = 'BUFFER', 86 x = 0 , y = 0 , pseg = 'RAM', 87 local = True ) 88 85 89 for x in xrange (x_size): 86 90 for y in xrange (y_size): 87 for p in xrange( nprocs ): 88 trdid = (((x * y_size) + y) * nprocs) + p 89 if (x==0) and (y==0) and (p== 0): # task producer 90 task_index = 2 91 task_name = 'producer' 92 elif (x==x_size-1) and (y==y_size-1) and (p==nprocs-1): # task consumer 93 task_index = 1 94 task_name = 'consumer' 95 else : # task router 96 task_index = 0 97 task_name = 'router_%d_%d_%d' % (x,y,p) 98 mapping.addTask( vspace, task_name, trdid, x, y, p, 99 'router_stack_%d_%d_%d' % (x,y,p), '' , task_index ) 91 cluster_id = (x * y_size) + y 92 if ( mapping.clusters[cluster_id].procs ): 93 for p in xrange( nprocs ): 94 proc_id = (((x * y_size) + y) * nprocs) + p 95 base = stack_base + (proc_id * stack_size) + stack_size 96 97 mapping.addVseg( vspace, 'router_stack_%d_%d_%d' % (x,y,p), 98 base, stack_size, 'C_WU', vtype = 'BUFFER', 99 x = x , y = y , pseg = 'RAM', 100 local = True ) 101 102 # distributed threads / one thread per processor 103 # ... plus main on P[0][0][0] 104 mapping.addThread( vspace, 'main', True, 0, 0, 1, 105 'main_stack', 106 'router_heap_0_0', 107 0 ) # index in start_vector 108 109 for x in xrange (x_size): 110 for y in xrange (y_size): 111 cluster_id = (x * y_size) + y 112 if ( mapping.clusters[cluster_id].procs ): 113 for p in xrange( nprocs ): 114 if (x==0) and (y==0) and (p==0): # thread producer 115 start_index = 3 116 thread_name = 'producer_0_0_0' 117 elif (x==0) and (y==0) and (p==1): # thread consumer 118 start_index = 2 119 thread_name = 'consumer_0_0_1' 120 else : # thread compute 121 start_index = 1 122 thread_name = 'compute_%d_%d_%d' % (x,y,p) 123 124 mapping.addThread( vspace, thread_name, False , x, y, p, 125 'router_stack_%d_%d_%d' % (x,y,p), 126 'router_heap_%d_%d' %(x,y), 127 start_index ) 100 128 101 129 # extend mapping name
Note: See TracChangeset
for help on using the changeset viewer.