[589] | 1 | #!/usr/bin/env python |
---|
| 2 | |
---|
| 3 | from mapping import * |
---|
| 4 | |
---|
| 5 | ################################################################################## |
---|
| 6 | # file : transpose.py (for the transpose application) |
---|
| 7 | # date : may 2014 |
---|
| 8 | # author : Alain Greiner |
---|
| 9 | ################################################################################## |
---|
| 10 | # This file describes the mapping of the multi-threaded "dhrystone" |
---|
| 11 | # application on a multi-clusters, multi-processors architecture. |
---|
| 12 | # This include both the mapping of virtual segments on the clusters, |
---|
| 13 | # and the mapping of tasks on processors. |
---|
| 14 | # There is one task per processor. |
---|
| 15 | # The mapping of virtual segments is the following: |
---|
| 16 | # - There is one shared data vseg in cluster[0][0] |
---|
| 17 | # - The code vsegs are replicated on all clusters containing processors. |
---|
| 18 | # - There is one heap vseg per cluster containing processors. |
---|
| 19 | # - The stacks vsegs are distibuted on all clusters containing processors. |
---|
| 20 | # This mapping uses 5 platform parameters, (obtained from the "mapping" argument) |
---|
| 21 | # - x_size : number of clusters in a row |
---|
| 22 | # - y_size : number of clusters in a column |
---|
| 23 | # - x_width : number of bits coding x coordinate |
---|
| 24 | # - y_width : number of bits coding y coordinate |
---|
| 25 | # - nprocs : number of processors per cluster |
---|
| 26 | ################################################################################## |
---|
| 27 | |
---|
| 28 | ######################### |
---|
| 29 | def extend( mapping ): |
---|
| 30 | |
---|
| 31 | x_size = mapping.x_size |
---|
| 32 | y_size = mapping.y_size |
---|
| 33 | nprocs = mapping.nprocs |
---|
| 34 | x_width = mapping.x_width |
---|
| 35 | y_width = mapping.y_width |
---|
| 36 | |
---|
| 37 | # define vsegs base & size |
---|
| 38 | code_base = 0x10000000 |
---|
| 39 | code_size = 0x00010000 # 64 Kbytes (replicated in each cluster) |
---|
| 40 | |
---|
| 41 | data_base = 0x20000000 |
---|
| 42 | data_size = 0x00010000 # 64 Kbytes (non replicated) |
---|
| 43 | |
---|
| 44 | stack_base = 0x40000000 |
---|
| 45 | stack_size = 0x00200000 # 2 Mbytes (per cluster) |
---|
| 46 | |
---|
| 47 | heap_base = 0x60000000 |
---|
| 48 | heap_size = 0x00200000 # 2 Mbytes (per cluster) |
---|
| 49 | |
---|
| 50 | # create vspace |
---|
| 51 | vspace = mapping.addVspace( name = 'dhrystone', startname = 'dhry_data' ) |
---|
| 52 | |
---|
| 53 | # data vseg : shared (only in cluster[0,0]) |
---|
| 54 | mapping.addVseg( vspace, 'dhry_data', data_base , data_size, |
---|
| 55 | 'C_WU', vtype = 'ELF', x = 0, y = 0, pseg = 'RAM', |
---|
[610] | 56 | binpath = 'bin/dhrystone/appli.elf', |
---|
[589] | 57 | local = False ) |
---|
| 58 | |
---|
| 59 | # code vsegs : local (one copy in each cluster) |
---|
| 60 | for x in xrange (x_size): |
---|
| 61 | for y in xrange (y_size): |
---|
| 62 | cluster_id = (x * y_size) + y |
---|
| 63 | if ( mapping.clusters[cluster_id].procs ): |
---|
| 64 | |
---|
| 65 | mapping.addVseg( vspace, 'dhry_code_%d_%d' %(x,y), |
---|
| 66 | code_base , code_size, |
---|
| 67 | 'CXWU', vtype = 'ELF', x = x, y = y, pseg = 'RAM', |
---|
[610] | 68 | binpath = 'bin/dhrystone/appli.elf', |
---|
[589] | 69 | local = True ) |
---|
| 70 | |
---|
| 71 | # stacks vsegs: local (one stack per processor => nprocs stacks per cluster) |
---|
| 72 | for x in xrange (x_size): |
---|
| 73 | for y in xrange (y_size): |
---|
| 74 | cluster_id = (x * y_size) + y |
---|
| 75 | if ( mapping.clusters[cluster_id].procs ): |
---|
| 76 | for p in xrange( nprocs ): |
---|
| 77 | proc_id = (((x * y_size) + y) * nprocs) + p |
---|
| 78 | size = (stack_size / nprocs) & 0xFFFFF000 |
---|
| 79 | base = stack_base + (proc_id * size) |
---|
| 80 | |
---|
| 81 | mapping.addVseg( vspace, 'dhry_stack_%d_%d_%d' % (x,y,p), |
---|
| 82 | base, size, 'C_WU', vtype = 'BUFFER', |
---|
| 83 | x = x , y = y , pseg = 'RAM', |
---|
| 84 | local = True, big = True ) |
---|
| 85 | |
---|
| 86 | # heap vsegs: distributed non local (all heap vsegs can be accessed by all tasks) |
---|
| 87 | for x in xrange (x_size): |
---|
| 88 | for y in xrange (y_size): |
---|
| 89 | cluster_id = (x * y_size) + y |
---|
| 90 | if ( mapping.clusters[cluster_id].procs ): |
---|
| 91 | size = heap_size |
---|
| 92 | base = heap_base + (cluster_id * size) |
---|
| 93 | |
---|
| 94 | mapping.addVseg( vspace, 'dhry_heap_%d_%d' % (x,y), base, size, |
---|
| 95 | 'C_WU', vtype = 'HEAP', x = x, y = y, pseg = 'RAM', |
---|
| 96 | local = False, big = True ) |
---|
| 97 | |
---|
| 98 | # distributed tasks / one task per processor |
---|
| 99 | for x in xrange (x_size): |
---|
| 100 | for y in xrange (y_size): |
---|
| 101 | cluster_id = (x * y_size) + y |
---|
| 102 | if ( mapping.clusters[cluster_id].procs ): |
---|
| 103 | for p in xrange( nprocs ): |
---|
| 104 | trdid = (((x * y_size) + y) * nprocs) + p |
---|
| 105 | |
---|
| 106 | mapping.addTask( vspace, 'dhry_%d_%d_%d' % (x,y,p), |
---|
| 107 | trdid, x, y, p, |
---|
| 108 | 'dhry_stack_%d_%d_%d' % (x,y,p), |
---|
| 109 | 'dhry_heap_%d_%d' % (x,y), 0 ) |
---|
| 110 | |
---|
| 111 | # extend mapping name |
---|
| 112 | mapping.name += '_transpose' |
---|
| 113 | |
---|
| 114 | return vspace # useful for test |
---|
| 115 | |
---|
| 116 | ################################ test ################################################## |
---|
| 117 | |
---|
| 118 | if __name__ == '__main__': |
---|
| 119 | |
---|
| 120 | vspace = extend( Mapping( 'test', 2, 2, 4 ) ) |
---|
| 121 | print vspace.xml() |
---|
| 122 | |
---|
| 123 | |
---|
| 124 | # Local Variables: |
---|
| 125 | # tab-width: 4; |
---|
| 126 | # c-basic-offset: 4; |
---|
| 127 | # c-file-offsets:((innamespace . 0)(inline-open . 0)); |
---|
| 128 | # indent-tabs-mode: nil; |
---|
| 129 | # End: |
---|
| 130 | # |
---|
| 131 | # vim: filetype=python:expandtab:shiftwidth=4:tabstop=4:softtabstop=4 |
---|
| 132 | |
---|