Changeset 720 for soft


Ignore:
Timestamp:
Oct 11, 2015, 6:23:18 PM (9 years ago)
Author:
alain
Message:

Adapt the router application to the POSIX API.

Location:
soft/giet_vm/applications
Files:
1 added
1 deleted
5 edited

Legend:

Unmodified
Added
Removed
  • soft/giet_vm/applications/classif/classif.c

    r708 r720  
    546546    giet_proc_xyp( &x_id , &y_id , &p_id );
    547547
    548     // get & check plat-form parameters
     548    // get plat-form parameters
    549549    unsigned int x_size;                       // number of clusters in a row
    550550    unsigned int y_size;                       // number of clusters in a column
     
    552552    giet_procs_number( &x_size , &y_size , &nprocs );
    553553
     554    // shared TTY allocation
     555    giet_tty_alloc( 1 );     
     556    lock_init( &tty_lock);
     557
     558    // check plat-form parameters
    554559    giet_pthread_assert( ((nprocs >= 3) && (nprocs <= 8)),
    555560                         "[CLASSIF ERROR] number of procs per cluster must in [3...8]");
     
    570575    }
    571576
    572     // shared TTY allocation
    573     giet_tty_alloc( 1 );     
    574     lock_init( &tty_lock);
    575 
    576577    printf("\n[CLASSIF] start at cycle %d on %d cores\n",
    577578           giet_proctime(), (x_size * y_size * nprocs) );
    578579
    579580    // thread index
    580     // required bt pthread_create()
     581    // required by pthread_create()
    581582    // unused in this appli because no pthread_join()
    582583    pthread_t   trdid;
  • soft/giet_vm/applications/classif/classif.py

    r712 r720  
    5656
    5757    # create vspace
    58     vspace = mapping.addVspace( name = 'classif', startname = 'classif_data', active = False )
     58    vspace = mapping.addVspace( name = 'classif',
     59                                startname = 'classif_data',
     60                                active = False )
    5961   
    6062    # data vseg : shared / cluster[0][0]
     
    8890                                 local = True )
    8991
    90     # stacks vsegs: local (one stack per processor => nprocs stacks per cluster)
     92    # stacks vsegs: local (one stack per thread => nprocs stacks per cluster)
    9193    # ... plus main_stack in cluster[0][0]
    9294    mapping.addVseg( vspace, 'main_stack',
     
    133135                                       'classif_stack_%d_%d_%d' % (x,y,p),
    134136                                       'classif_heap_%d_%d' % (x,y),
    135                                        start_index )   # index in start_vector
     137                                       start_index ) 
    136138
    137139    # extend mapping name
  • soft/giet_vm/applications/display/display.c

    r712 r720  
    1010
    1111#include <stdio.h>
    12 #include <stdlib.h>
    1312#include <hard_config.h>     // To check Frame Buffer size
    1413
  • soft/giet_vm/applications/router/Makefile

    r589 r720  
    22APP_NAME = router
    33
    4 OBJS= main.o
     4OBJS= router.o
    55
    66LIBS= -L../../build/libs -luser
  • soft/giet_vm/applications/router/router.py

    r610 r720  
    1010#  This file describes the mapping of the multi-threaded "router"
    1111#  application on a multi-clusters, multi-processors architecture.
    12 #  This application contains N+2 parallel tasks communicating through MWMR channels:
     12#  This application contains N+2 parallel threads communicating through MWMR channels:
    1313#  The mapping of virtual segments on the clusters is the following:
     14#    - the data vseg is mapped on cluster[0,0].
    1415#    - The code vsegs are replicated on all clusters.
    15 #    - the data_0 vseg is mapped on cluster[0,0].
    16 #    - the data_1 vseg is mapped on cluster[x_size-1,y_size-1].
    17   - the stacks vsegs are distibuted on all clusters.
    18 The mapping of tasks on processors is the following:
    19 #    - one "producer" task  => on proc[0,0,0]
    20 #    - one "consume"  task  => on proc[x_size-1,y_size-1,nprocs-1]
    21 #    - N   "router"   tasks => on all others processors
     16#    - the stack vsegs are distibuted on all clusters.
     17#    - the heap vsegs are distributed on all clusters
     18The mapping of threads on processors is the following:
     19  - the "main" thread      => on proc[0,0,0]
     20#    - the "producer" thread  => on proc[0,0,0]
     21#    - the "consume"  thread  => on proc[0,0,1]
     22#    - N   "compute"  threads => on all others processors
    2223#  This mapping uses 5 platform parameters, (obtained from the "mapping" argument)
    2324#    - x_size    : number of clusters in a row
     
    3940    code_size    = 0x00010000     # 64 Kbytes (replicated in each cluster)
    4041   
    41     data_0_base  = 0x20000000
    42     data_0_size  = 0x00010000     # 64 Kbytes (non replicated)
     42    data_base    = 0x20000000
     43    data_size    = 0x00010000     # 64 Kbytes (non replicated)
    4344
    44     data_1_base  = 0x30000000
    45     data_1_size  = 0x00010000     # 64 Kbytes (non replicated)
     45    stack_base   = 0x30000000
     46    stack_size   = 0x00010000     # 64 Kbytes (per thread)
    4647
    47     stack_base   = 0x40000000
    48     stack_size   = 0x00200000     # 2 Mbytes (per cluster)
     48    heap_base    = 0x40000000
     49    heap_size    = 0x00200000     # 2 Mbytes (per cluster)
    4950
    5051    # create vspace
    51     vspace = mapping.addVspace( name = 'router', startname = 'router_data_0' )
     52    vspace = mapping.addVspace( name = 'router',
     53                                startname = 'router_data',
     54                                active = False )
    5255   
    53     # data_0 vseg : shared / in cluster [0,0]
    54     mapping.addVseg( vspace, 'router_data_0', data_0_base , data_0_size,
     56    # data vseg : shared / in cluster [0,0]
     57    mapping.addVseg( vspace, 'router_data', data_base , data_size,
    5558                     'C_WU', vtype = 'ELF', x = 0, y = 0, pseg = 'RAM',
    56                      binpath = 'bin/router/appli.elf',
    57                      local = False )
    58 
    59     # data_1 vseg : shared / in cluster[x_size-1,y_size-1]
    60     mapping.addVseg( vspace, 'router_data_1', data_1_base , data_1_size,
    61                      'C_WU', vtype = 'ELF', x = x_size - 1, y = y_size - 1, pseg = 'RAM',
    6259                     binpath = 'bin/router/appli.elf',
    6360                     local = False )
     
    7168                             local = True )
    7269
    73     # stacks vsegs: local (one stack per processor => nprocs stacks per cluster)           
     70    # heap vsegs : shared (one per cluster)
    7471    for x in xrange (x_size):
    7572        for y in xrange (y_size):
    76             for p in xrange( nprocs ):
    77                 proc_id = (((x * y_size) + y) * nprocs) + p
    78                 size    = stack_size / nprocs
    79                 base    = stack_base + (proc_id * size)
    80                 mapping.addVseg( vspace, 'router_stack_%d_%d_%d' % (x,y,p), base, size,
    81                                  'C_WU', vtype = 'BUFFER', x = x , y = y , pseg = 'RAM',
    82                                  local = True, big = True )
     73            cluster_id = (x * y_size) + y
     74            if ( mapping.clusters[cluster_id].procs ):
     75                size  = heap_size
     76                base  = heap_base + (cluster_id * size)
    8377
    84     # distributed tasks / one task per processor
     78                mapping.addVseg( vspace, 'router_heap_%d_%d' %(x,y), base , size,
     79                                 'C_WU', vtype = 'HEAP', x = x, y = y, pseg = 'RAM',
     80                                 local = False, big = True )
     81
     82    # stacks vsegs: local (one stack per thread => nprocs stacks per cluster)
     83    # ... plus main_stack in cluster[0][0]
     84    mapping.addVseg( vspace, 'main_stack',
     85                     stack_base, stack_size, 'C_WU', vtype = 'BUFFER',
     86                     x = 0 , y = 0 , pseg = 'RAM',
     87                     local = True )
     88
    8589    for x in xrange (x_size):
    8690        for y in xrange (y_size):
    87             for p in xrange( nprocs ):
    88                 trdid = (((x * y_size) + y) * nprocs) + p
    89                 if   (x==0) and (y==0) and (p== 0):                      # task producer
    90                     task_index = 2
    91                     task_name  = 'producer'           
    92                 elif (x==x_size-1) and (y==y_size-1) and (p==nprocs-1):  # task consumer
    93                     task_index = 1 
    94                     task_name  = 'consumer'
    95                 else :                                                   # task router
    96                     task_index = 0
    97                     task_name  = 'router_%d_%d_%d' % (x,y,p)
    98                 mapping.addTask( vspace, task_name, trdid, x, y, p,
    99                                  'router_stack_%d_%d_%d' % (x,y,p), '' , task_index )
     91            cluster_id = (x * y_size) + y
     92            if ( mapping.clusters[cluster_id].procs ):
     93                for p in xrange( nprocs ):
     94                    proc_id = (((x * y_size) + y) * nprocs) + p
     95                    base    = stack_base + (proc_id * stack_size) + stack_size
     96
     97                    mapping.addVseg( vspace, 'router_stack_%d_%d_%d' % (x,y,p),
     98                                     base, stack_size, 'C_WU', vtype = 'BUFFER',
     99                                     x = x , y = y , pseg = 'RAM',
     100                                     local = True )
     101
     102    # distributed threads / one thread per processor
     103    # ... plus main on P[0][0][0]
     104    mapping.addThread( vspace, 'main', True, 0, 0, 1,
     105                       'main_stack',
     106                       'router_heap_0_0',
     107                       0 )                      # index in start_vector
     108
     109    for x in xrange (x_size):
     110        for y in xrange (y_size):
     111            cluster_id = (x * y_size) + y
     112            if ( mapping.clusters[cluster_id].procs ):
     113                for p in xrange( nprocs ):
     114                    if   (x==0) and (y==0) and (p==0):  # thread producer
     115                        start_index = 3
     116                        thread_name  = 'producer_0_0_0'           
     117                    elif (x==0) and (y==0) and (p==1):  # thread consumer
     118                        start_index = 2 
     119                        thread_name  = 'consumer_0_0_1'
     120                    else :                              # thread compute
     121                        start_index = 1
     122                        thread_name  = 'compute_%d_%d_%d' % (x,y,p)
     123
     124                    mapping.addThread( vspace, thread_name, False , x, y, p,
     125                                      'router_stack_%d_%d_%d' % (x,y,p),
     126                                      'router_heap_%d_%d' %(x,y),
     127                                      start_index )
    100128
    101129    # extend mapping name
Note: See TracChangeset for help on using the changeset viewer.