1 | #!/usr/bin/env python |
---|
2 | |
---|
3 | from mapping import * |
---|
4 | |
---|
5 | ###################################################################################### |
---|
6 | # file : raycast.py |
---|
7 | # date : july 2015 |
---|
8 | # author : Alain Greiner |
---|
9 | ####################################################################################### |
---|
10 | # This file describes the mapping of the multi-threaded "raycast" application |
---|
11 | # on a multi-clusters, multi-processors architecture. |
---|
12 | # The mapping of tasks on processors is the following: |
---|
13 | # - one "main" task on (0,0,0) |
---|
14 | # - one "render" task per processor but (0,0,0) |
---|
15 | # The mapping of virtual segments is the following: |
---|
16 | # - There is one shared data vseg in cluster[0][0] |
---|
17 | # - The code vsegs are replicated on all clusters containing processors. |
---|
18 | # - There is one heap vseg per cluster containing processors. |
---|
19 | # - The stacks vsegs are distibuted on all clusters containing processors. |
---|
20 | # This mapping uses 5 platform parameters, (obtained from the "mapping" argument) |
---|
21 | # - x_size : number of clusters in a row |
---|
22 | # - y_size : number of clusters in a column |
---|
23 | # - x_width : number of bits for x field |
---|
24 | # - y_width : number of bits for y field |
---|
25 | # - nprocs : number of processors per cluster |
---|
26 | #################################################################################### |
---|
27 | |
---|
28 | ###################### |
---|
29 | def extend( mapping ): |
---|
30 | |
---|
31 | x_size = mapping.x_size |
---|
32 | y_size = mapping.y_size |
---|
33 | nprocs = mapping.nprocs |
---|
34 | |
---|
35 | # define vsegs base & size |
---|
36 | code_base = 0x10000000 |
---|
37 | code_size = 0x00010000 # 64 Kbytes |
---|
38 | |
---|
39 | data_base = 0x20000000 |
---|
40 | data_size = 0x00040000 # 256 Kbytes |
---|
41 | |
---|
42 | stack_base = 0x40000000 |
---|
43 | stack_size = 0x00200000 # 2 Mbytes |
---|
44 | |
---|
45 | heap_base = 0x60000000 |
---|
46 | heap_size = 0x00400000 # 4 Mbytes |
---|
47 | |
---|
48 | # create vspace |
---|
49 | vspace = mapping.addVspace( name = 'raycast', startname = 'raycast_data', active = False ) |
---|
50 | |
---|
51 | # data vseg : shared / cluster[0][0] |
---|
52 | mapping.addVseg( vspace, 'raycast_data', data_base , data_size, |
---|
53 | 'C_WU', vtype = 'ELF', x = 0, y = 0, pseg = 'RAM', |
---|
54 | binpath = 'bin/raycast/appli.elf', |
---|
55 | local = False ) |
---|
56 | |
---|
57 | # code vsegs : local (one copy in each cluster) |
---|
58 | for x in xrange (x_size): |
---|
59 | for y in xrange (y_size): |
---|
60 | cluster_id = (x * y_size) + y |
---|
61 | if ( mapping.clusters[cluster_id].procs ): |
---|
62 | mapping.addVseg( vspace, 'raycast_code_%d_%d' %(x,y), |
---|
63 | code_base , code_size, |
---|
64 | 'CXWU', vtype = 'ELF', x = x, y = y, pseg = 'RAM', |
---|
65 | binpath = 'bin/raycast/appli.elf', |
---|
66 | local = True ) |
---|
67 | |
---|
68 | # stacks vsegs: local (one stack per processor => nprocs stacks per cluster) |
---|
69 | for x in xrange (x_size): |
---|
70 | for y in xrange (y_size): |
---|
71 | cluster_id = (x * y_size) + y |
---|
72 | if ( mapping.clusters[cluster_id].procs ): |
---|
73 | for p in xrange( nprocs ): |
---|
74 | proc_id = (((x * y_size) + y) * nprocs) + p |
---|
75 | size = (stack_size / nprocs) & 0xFFFFF000 |
---|
76 | base = stack_base + (proc_id * size) |
---|
77 | mapping.addVseg( vspace, 'raycast_stack_%d_%d_%d' % (x,y,p), |
---|
78 | base, size, 'C_WU', vtype = 'BUFFER', |
---|
79 | x = x , y = y , pseg = 'RAM', |
---|
80 | local = True, big = True ) |
---|
81 | |
---|
82 | # heap vsegs : shared (one per cluster) |
---|
83 | for x in xrange (x_size): |
---|
84 | for y in xrange (y_size): |
---|
85 | cluster_id = (x * y_size) + y |
---|
86 | if ( mapping.clusters[cluster_id].procs ): |
---|
87 | size = heap_size |
---|
88 | base = heap_base + (cluster_id * size) |
---|
89 | mapping.addVseg( vspace, 'raycast_heap_%d_%d' %(x,y), base , size, |
---|
90 | 'C_WU', vtype = 'HEAP', x = x, y = y, pseg = 'RAM', |
---|
91 | local = False ) |
---|
92 | |
---|
93 | # distributed tasks / one task per processor |
---|
94 | for x in xrange (x_size): |
---|
95 | for y in xrange (y_size): |
---|
96 | cluster_id = (x * y_size) + y |
---|
97 | if ( mapping.clusters[cluster_id].procs ): |
---|
98 | for p in xrange( nprocs ): |
---|
99 | trdid = (((x * y_size) + y) * nprocs) + p |
---|
100 | if ( x == 0 and y == 0 and p == 0 ): # main task |
---|
101 | task_index = 1 |
---|
102 | task_name = 'main_%d_%d_%d' %(x,y,p) |
---|
103 | else: # render task |
---|
104 | task_index = 0 |
---|
105 | task_name = 'render_%d_%d_%d' % (x,y,p) |
---|
106 | |
---|
107 | mapping.addTask( vspace, task_name, trdid, x, y, p, |
---|
108 | 'raycast_stack_%d_%d_%d' % (x,y,p), |
---|
109 | 'raycast_heap_%d_%d' % (x,y), |
---|
110 | task_index ) |
---|
111 | |
---|
112 | # extend mapping name |
---|
113 | mapping.name += '_raycast' |
---|
114 | |
---|
115 | return vspace # useful for test |
---|
116 | |
---|
117 | ################################ test ###################################################### |
---|
118 | |
---|
119 | if __name__ == '__main__': |
---|
120 | |
---|
121 | vspace = extend( Mapping( 'test', 2, 2, 4 ) ) |
---|
122 | print vspace.xml() |
---|
123 | |
---|
124 | |
---|
125 | # Local Variables: |
---|
126 | # tab-width: 4; |
---|
127 | # c-basic-offset: 4; |
---|
128 | # c-file-offsets:((innamespace . 0)(inline-open . 0)); |
---|
129 | # indent-tabs-mode: nil; |
---|
130 | # End: |
---|
131 | # |
---|
132 | # vim: filetype=python:expandtab:shiftwidth=4:tabstop=4:softtabstop=4 |
---|
133 | |
---|