1 | #!/usr/bin/env python |
---|
2 | |
---|
3 | from mapping import * |
---|
4 | |
---|
5 | ###################################################################################### |
---|
6 | # file : raycast.py |
---|
7 | # date : july 2015 |
---|
8 | # author : Alain Greiner |
---|
9 | ####################################################################################### |
---|
10 | # This file describes the mapping of the multi-threaded "raycast" application |
---|
11 | # on a multi-clusters, multi-processors architecture. |
---|
12 | # The mapping of threads on processors is the following: |
---|
13 | # - one "main" thread on (0,0,0) |
---|
14 | # - one "render" thread per processor but (0,0,0) |
---|
15 | # The mapping of virtual segments is the following: |
---|
16 | # - There is one shared data vseg in cluster[0][0] |
---|
17 | # - The code vsegs are replicated on all clusters containing processors. |
---|
18 | # - There is one heap vseg per cluster containing processors. |
---|
19 | # - The stacks vsegs are distibuted on all clusters containing processors. |
---|
20 | # This mapping uses 5 platform parameters, (obtained from the "mapping" argument) |
---|
21 | # - x_size : number of clusters in a row |
---|
22 | # - y_size : number of clusters in a column |
---|
23 | # - x_width : number of bits for x field |
---|
24 | # - y_width : number of bits for y field |
---|
25 | # - nprocs : number of processors per cluster |
---|
26 | #################################################################################### |
---|
27 | |
---|
28 | ###################### |
---|
29 | def extend( mapping ): |
---|
30 | |
---|
31 | x_size = mapping.x_size |
---|
32 | y_size = mapping.y_size |
---|
33 | nprocs = mapping.nprocs |
---|
34 | |
---|
35 | # define vsegs base & size |
---|
36 | code_base = 0x10000000 # replicated in all clusters |
---|
37 | code_size = 0x00010000 # 64 Kbytes per cluster |
---|
38 | |
---|
39 | data_base = 0x20000000 # non replicated |
---|
40 | data_size = 0x00040000 # 256 Kbytes in cluster 0 |
---|
41 | |
---|
42 | stack_base = 0x40000000 # distributed in all clusters |
---|
43 | stack_size = 0x00010000 # 64 Kbytes per proc |
---|
44 | |
---|
45 | heap_base = 0x60000000 # distributed in all clusters |
---|
46 | heap_size = 0x00400000 # 4 Mbytes per cluster |
---|
47 | |
---|
48 | # create vspace |
---|
49 | vspace = mapping.addVspace( name = 'raycast', startname = 'raycast_data', active = False ) |
---|
50 | |
---|
51 | # data vseg : shared / cluster[0][0] |
---|
52 | mapping.addVseg( vspace, 'raycast_data', data_base , data_size, |
---|
53 | 'C_WU', vtype = 'ELF', x = 0, y = 0, pseg = 'RAM', |
---|
54 | binpath = 'bin/raycast/appli.elf', |
---|
55 | local = False ) |
---|
56 | |
---|
57 | # code vsegs : local (one copy in each cluster) |
---|
58 | for x in xrange (x_size): |
---|
59 | for y in xrange (y_size): |
---|
60 | cluster_id = (x * y_size) + y |
---|
61 | if ( mapping.clusters[cluster_id].procs ): |
---|
62 | mapping.addVseg( vspace, 'raycast_code_%d_%d' %(x,y), |
---|
63 | code_base , code_size, |
---|
64 | 'CXWU', vtype = 'ELF', x = x, y = y, pseg = 'RAM', |
---|
65 | binpath = 'bin/raycast/appli.elf', |
---|
66 | local = True ) |
---|
67 | |
---|
68 | # stacks vsegs: local (one stack per processor => nprocs stacks per cluster) |
---|
69 | for x in xrange (x_size): |
---|
70 | for y in xrange (y_size): |
---|
71 | cluster_id = (x * y_size) + y |
---|
72 | if ( mapping.clusters[cluster_id].procs ): |
---|
73 | for p in xrange( nprocs ): |
---|
74 | proc_id = (((x * y_size) + y) * nprocs) + p |
---|
75 | size = stack_size |
---|
76 | base = stack_base + (proc_id * size) |
---|
77 | mapping.addVseg( vspace, 'raycast_stack_%d_%d_%d' % (x,y,p), |
---|
78 | base, size, 'C_WU', vtype = 'BUFFER', |
---|
79 | x = x , y = y , pseg = 'RAM', |
---|
80 | local = True, big = False ) |
---|
81 | |
---|
82 | # heap vsegs : shared (one per cluster) |
---|
83 | for x in xrange (x_size): |
---|
84 | for y in xrange (y_size): |
---|
85 | cluster_id = (x * y_size) + y |
---|
86 | if ( mapping.clusters[cluster_id].procs ): |
---|
87 | size = heap_size |
---|
88 | base = heap_base + (cluster_id * size) |
---|
89 | mapping.addVseg( vspace, 'raycast_heap_%d_%d' %(x,y), base , size, |
---|
90 | 'C_WU', vtype = 'HEAP', x = x, y = y, pseg = 'RAM', |
---|
91 | local = False, big = True ) |
---|
92 | |
---|
93 | # distributed threads / one thread per processor |
---|
94 | for x in xrange (x_size): |
---|
95 | for y in xrange (y_size): |
---|
96 | cluster_id = (x * y_size) + y |
---|
97 | if ( mapping.clusters[cluster_id].procs ): |
---|
98 | for p in xrange( nprocs ): |
---|
99 | trdid = (((x * y_size) + y) * nprocs) + p |
---|
100 | if ( x == 0 and y == 0 and p == 0 ): # main thread |
---|
101 | start_id = 1 |
---|
102 | is_main = True |
---|
103 | else: # render thread |
---|
104 | start_id = 0 |
---|
105 | is_main = False |
---|
106 | |
---|
107 | mapping.addThread( vspace, |
---|
108 | 'raycast_%d_%d_%d' % (x,y,p), |
---|
109 | is_main, |
---|
110 | x, y, p, |
---|
111 | 'raycast_stack_%d_%d_%d' % (x,y,p), |
---|
112 | 'raycast_heap_%d_%d' % (x,y), |
---|
113 | start_id ) |
---|
114 | |
---|
115 | # extend mapping name |
---|
116 | mapping.name += '_raycast' |
---|
117 | |
---|
118 | return vspace # useful for test |
---|
119 | |
---|
120 | ################################ test ###################################################### |
---|
121 | |
---|
122 | if __name__ == '__main__': |
---|
123 | |
---|
124 | vspace = extend( Mapping( 'test', 2, 2, 4 ) ) |
---|
125 | print vspace.xml() |
---|
126 | |
---|
127 | |
---|
128 | # Local Variables: |
---|
129 | # tab-width: 4; |
---|
130 | # c-basic-offset: 4; |
---|
131 | # c-file-offsets:((innamespace . 0)(inline-open . 0)); |
---|
132 | # indent-tabs-mode: nil; |
---|
133 | # End: |
---|
134 | # |
---|
135 | # vim: filetype=python:expandtab:shiftwidth=4:tabstop=4:softtabstop=4 |
---|
136 | |
---|