Changeset 347 for soft/giet_vm


Ignore:
Timestamp:
Jun 29, 2014, 12:27:21 PM (10 years ago)
Author:
alain
Message:

Introducing support for distributed page tables, kernel code and user code,
(if it is requested in the mapping).

File:
1 edited

Legend:

Unmodified
Added
Removed
  • soft/giet_vm/giet_boot/boot.c

    r345 r347  
    1414// and where there is one physical memory bank per cluster.
    1515//
    16 // This code is executed in the boot phase by proc[0] and performs the following tasks:
    17 // - load into memory the giet_vm binary files, contained in a FAT32 file system,
     16// This code, executed in the boot phase by proc[0,0,0] performs the following tasks:
     17// - load into memory the binary files, from a FAT32 file system,
    1818// - build the various page tables (one page table per vspace)
    1919// - initialize the shedulers (one scheduler per processor)
     
    7474#include <tty_driver.h>
    7575#include <xcu_driver.h>
    76 #include <ioc_driver.h>
     76#include <bdv_driver.h>
    7777#include <dma_driver.h>
    7878#include <cma_driver.h>
     
    140140extern fat32_fs_t fat;
    141141
    142 // Page table addresses arrays
     142// Page tables base addresses, sizes, and PT2 allocators:
     143// For each vspace, it can exist one page table per cluster,
     144// but only one virtual base address per vspace
     145
    143146__attribute__((section (".bootdata")))
    144 volatile paddr_t      _ptabs_paddr[GIET_NB_VSPACE_MAX];
     147unsigned int _ptabs_vaddr[GIET_NB_VSPACE_MAX];
    145148
    146149__attribute__((section (".bootdata")))
    147 volatile unsigned int _ptabs_vaddr[GIET_NB_VSPACE_MAX];
    148 
    149 // Next free PT2 index array
     150paddr_t _ptabs_paddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
     151
    150152__attribute__((section (".bootdata")))
    151 volatile unsigned int _next_free_pt2[GIET_NB_VSPACE_MAX] =
    152 { [0 ... GIET_NB_VSPACE_MAX - 1] = 0 };
    153 
    154 // Max PT2 index
     153unsigned int _ptabs_max_pt2[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
     154
    155155__attribute__((section (".bootdata")))
    156 volatile unsigned int _max_pt2[GIET_NB_VSPACE_MAX] =
    157 { [0 ... GIET_NB_VSPACE_MAX - 1] = 0 };
     156unsigned int _ptabs_next_pt2[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
    158157
    159158// Scheduler pointers array (virtual addresses)
    160 // indexed by (x,y,lpid) : ((x << Y_WIDTH) + y)*NB_PROCS_MAX + lpid
     159// indexed by (x,y,lpid) : (((x << Y_WIDTH) + y) * NB_PROCS_MAX) + lpid
    161160__attribute__((section (".bootdata")))
    162 static_scheduler_t* _schedulers[NB_PROCS_MAX<<(X_WIDTH+Y_WIDTH)];
     161static_scheduler_t* _schedulers[1<<X_WIDTH][1<<Y_WIDTH][NB_PROCS_MAX];
    163162
    164163
     
    291290// boot_add_pte()
    292291// This function registers a new PTE in the page table defined
    293 // by the vspace_id argument, and updates both PT1 and PT2.
    294 // A new PT2 is used when required.
     292// by the vspace_id argument, and the (x,y) coordinates.
     293// It updates both the PT1 and PT2, and a new PT2 is used if required.
    295294// As the set of PT2s is implemented as a fixed size array (no dynamic
    296295// allocation), this function checks a possible overflow of the PT2 array.
    297296//////////////////////////////////////////////////////////////////////////////
    298297void boot_add_pte(unsigned int vspace_id,
     298                  unsigned int x,
     299                  unsigned int y,
    299300                  unsigned int vpn,
    300301                  unsigned int flags,
     
    304305    unsigned int ix1;
    305306    unsigned int ix2;
    306     paddr_t      pt1_pbase;     // PT1 physical base address
    307     paddr_t      pt2_pbase = 0; // PT2 physical base address
    308     paddr_t      pte_paddr;     // PTE physucal address
     307    paddr_t      pt2_pbase;     // PT2 physical base address
     308    paddr_t      pte_paddr;     // PTE physical address
    309309    unsigned int pt2_id;        // PT2 index
    310310    unsigned int ptd;           // PTD : entry in PT1
    311     unsigned int max_pt2;       // max number of PT2s for a given vspace
    312311
    313312    ix1 = vpn >> 9;         // 11 bits
    314313    ix2 = vpn & 0x1FF;      //  9 bits
    315314
    316     // check that the _max_pt2[vspace_id] has been set
    317     max_pt2 = _max_pt2[vspace_id];
     315    // get page table physical base address and size
     316    paddr_t      pt1_pbase = _ptabs_paddr[vspace_id][x][y];
     317    unsigned int max_pt2   = _ptabs_max_pt2[vspace_id][x][y];
    318318
    319319    if (max_pt2 == 0)
     
    325325    }
    326326
    327 
    328     // get page table physical base address
    329     pt1_pbase = _ptabs_paddr[vspace_id];
    330 
    331327    // get ptd in PT1
    332328    ptd = _physical_read(pt1_pbase + 4 * ix1);
    333329
    334     if ((ptd & PTE_V) == 0)    // invalid PTD: compute PT2 base address,
     330    if ((ptd & PTE_V) == 0)    // undefined PTD: compute PT2 base address,
    335331                               // and set a new PTD in PT1
    336332    {
    337         pt2_id = _next_free_pt2[vspace_id];
     333        pt2_id = _ptabs_next_pt2[vspace_id][x][y];
    338334        if (pt2_id == max_pt2)
    339335        {
    340336            _puts("\n[BOOT ERROR] in boot_add_pte() function\n");
    341             _puts("the length of the ptab vobj is too small\n");
    342 
     337            _puts("the length of the PTAB vobj is too small\n");
    343338            _puts(" max_pt2 = ");
    344339            _putd( max_pt2 );
     
    347342            _putd( pt2_id );
    348343            _puts("\n");
    349            
    350344            _exit();
    351345        }
    352         else
    353         {
    354             pt2_pbase = pt1_pbase + PT1_SIZE + PT2_SIZE * pt2_id;
    355             ptd = PTE_V | PTE_T | (unsigned int) (pt2_pbase >> 12);
    356             _physical_write( pt1_pbase + 4 * ix1, ptd);
    357             _next_free_pt2[vspace_id] = pt2_id + 1;
    358         }
     346
     347        pt2_pbase = pt1_pbase + PT1_SIZE + PT2_SIZE * pt2_id;
     348        ptd = PTE_V | PTE_T | (unsigned int) (pt2_pbase >> 12);
     349        _physical_write( pt1_pbase + 4 * ix1, ptd);
     350        _ptabs_next_pt2[vspace_id][x][y] = pt2_id + 1;
    359351    }
    360352    else                       // valid PTD: compute PT2 base address
     
    392384
    393385
    394 /////////////////////////////////////////////////////////////////////
    395 // This function build the page table for a given vspace.
     386////////////////////////////////////////////////////////////////////////
     387// This function build the page table(s) for a given vspace.
     388// It build as many pages tables as the number of vobjs having
     389// the PTAB type in the vspace, because page tables can be replicated.
    396390// The physical base addresses for all vsegs (global and private)
    397391// must have been previously computed and stored in the mapping.
    398 // It initializes the MWMR channels.
    399 /////////////////////////////////////////////////////////////////////
     392//
     393// General rule regarding local / shared vsegs:
     394// - shared vsegs are mapped in all page tables
     395// - local vsegs are mapped only in the "local" page table
     396////////////////////////////////////////////////////////////////////////
    400397void boot_vspace_pt_build(unsigned int vspace_id)
    401398{
    402     unsigned int vseg_id;
     399    unsigned int ptab_id;       // global index for a vseg containing a PTAB
     400    unsigned int priv_id;       // global index for a private vseg in a vspace
     401    unsigned int glob_id;       // global index for a global vseg
    403402    unsigned int npages;
    404403    unsigned int ppn;
     
    408407    unsigned int verbose = 0;   // can be used to activate trace in add_pte()
    409408
    410     mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
    411     mapping_vspace_t * vspace = _get_vspace_base(header);
    412     mapping_vseg_t   * vseg   = _get_vseg_base(header);
    413 
    414     // private segments
    415     for (vseg_id = vspace[vspace_id].vseg_offset;
    416          vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs);
    417          vseg_id++)
    418     {
    419         vpn = vseg[vseg_id].vbase >> 12;
    420         ppn = (unsigned int) (vseg[vseg_id].pbase >> 12);
    421 
    422         npages = vseg[vseg_id].length >> 12;
    423         if ((vseg[vseg_id].length & 0xFFF) != 0) npages++;
    424 
    425         flags = PTE_V;
    426         if (vseg[vseg_id].mode & C_MODE_MASK) flags |= PTE_C;
    427         if (vseg[vseg_id].mode & X_MODE_MASK) flags |= PTE_X;
    428         if (vseg[vseg_id].mode & W_MODE_MASK) flags |= PTE_W;
    429         if (vseg[vseg_id].mode & U_MODE_MASK) flags |= PTE_U;
     409    mapping_header_t  * header  = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     410    mapping_vspace_t  * vspace  = _get_vspace_base(header);
     411    mapping_vseg_t    * vseg    = _get_vseg_base(header);
     412    mapping_vobj_t    * vobj    = _get_vobj_base(header);
     413    mapping_pseg_t    * pseg    = _get_pseg_base(header);
     414    mapping_cluster_t * cluster = _get_cluster_base(header);
     415
     416    // external loop on private vsegs to find all PTAB vobjs in vspace
     417    for (ptab_id = vspace[vspace_id].vseg_offset;
     418         ptab_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs);
     419         ptab_id++)
     420    {
     421        // get global index of first vobj in vseg
     422        unsigned int vobj_id = vseg[ptab_id].vobj_offset;
     423
     424        if ( vobj[vobj_id].type == VOBJ_TYPE_PTAB )
     425        {
     426            // get cluster coordinates for the PTAB
     427            unsigned int ptab_pseg_id    = vseg[ptab_id].psegid;
     428            unsigned int ptab_cluster_id = pseg[ptab_pseg_id].clusterid;
     429            unsigned int x_ptab          = cluster[ptab_cluster_id].x;
     430            unsigned int y_ptab          = cluster[ptab_cluster_id].y;
     431
     432            // internal loop on private vsegs to build
     433            // the (vspace_id, x_ptab, y_ptab) page table
     434            for (priv_id = vspace[vspace_id].vseg_offset;
     435                 priv_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs);
     436                 priv_id++)
     437            {
     438                // get cluster coordinates for private vseg
     439                unsigned int priv_pseg_id    = vseg[priv_id].psegid;
     440                unsigned int priv_cluster_id = pseg[priv_pseg_id].clusterid;
     441                unsigned int x_priv          = cluster[priv_cluster_id].x;
     442                unsigned int y_priv          = cluster[priv_cluster_id].y;
     443
     444                // only non local or matching private vsegs must be mapped
     445                if ( (vseg[priv_id].local == 0 ) ||
     446                     ((x_ptab == x_priv) && (y_ptab == y_priv)) )
     447                {
     448                    vpn = vseg[priv_id].vbase >> 12;
     449                    ppn = (unsigned int) (vseg[priv_id].pbase >> 12);
     450                    npages = vseg[priv_id].length >> 12;
     451                    if ((vseg[priv_id].length & 0xFFF) != 0) npages++;
     452
     453                    flags = PTE_V;
     454                    if (vseg[priv_id].mode & C_MODE_MASK) flags |= PTE_C;
     455                    if (vseg[priv_id].mode & X_MODE_MASK) flags |= PTE_X;
     456                    if (vseg[priv_id].mode & W_MODE_MASK) flags |= PTE_W;
     457                    if (vseg[priv_id].mode & U_MODE_MASK) flags |= PTE_U;
    430458       
    431         // These three flags (Local, Remote and Dirty) are set to 1 to reduce
    432         // latency of TLB miss (L/R) and write (D): Avoid hardware update
    433         // mechanism for these flags. This optimization can be performed
    434         // because GIET_VM does nothing with these flags.
    435 
    436         flags |= PTE_L;
    437         flags |= PTE_R;
    438         flags |= PTE_D;
     459                    // The three flags (Local, Remote and Dirty) are set to 1 to reduce
     460                    // latency of TLB miss (L/R) and write (D): Avoid hardware update
     461                    // mechanism for these flags because GIET_VM does use these flags.
     462
     463                    flags |= PTE_L;
     464                    flags |= PTE_R;
     465                    flags |= PTE_D;
    439466
    440467#if BOOT_DEBUG_PT
    441         _puts(vseg[vseg_id].name);
    442         _puts(" : flags = ");
    443         _putx(flags);
    444         _puts(" / npages = ");
    445         _putd(npages);
    446         _puts(" / pbase = ");
    447         _putl(vseg[vseg_id].pbase);
    448         _puts("\n");
    449 #endif
    450         // loop on 4K pages
    451         for (page_id = 0; page_id < npages; page_id++)
    452         {
    453             boot_add_pte(vspace_id, vpn, flags, ppn, verbose);
    454             vpn++;
    455             ppn++;
    456         }
    457     }
    458 
    459     // global segments
    460     for (vseg_id = 0; vseg_id < header->globals; vseg_id++)
    461     {
    462         vpn = vseg[vseg_id].vbase >> 12;
    463         ppn = (unsigned int)(vseg[vseg_id].pbase >> 12);
    464         npages = vseg[vseg_id].length >> 12;
    465         if ((vseg[vseg_id].length & 0xFFF) != 0) npages++;
    466 
    467         flags = PTE_V;
    468         if (vseg[vseg_id].mode & C_MODE_MASK) flags |= PTE_C;
    469         if (vseg[vseg_id].mode & X_MODE_MASK) flags |= PTE_X;
    470         if (vseg[vseg_id].mode & W_MODE_MASK) flags |= PTE_W;
    471         if (vseg[vseg_id].mode & U_MODE_MASK) flags |= PTE_U;
    472 
    473         // Flags set for optimization (as explained above)
    474 
    475         flags |= PTE_L;
    476         flags |= PTE_R;
    477         flags |= PTE_D;
     468_puts(vseg[priv_id].name);
     469_puts(" : flags = ");
     470_putx(flags);
     471_puts(" / npages = ");
     472_putd(npages);
     473_puts(" / pbase = ");
     474_putl(vseg[priv_id].pbase);
     475_puts("\n");
     476#endif
     477                    // loop on 4K pages
     478                    for (page_id = 0; page_id < npages; page_id++)
     479                    {
     480                        boot_add_pte(vspace_id, x_ptab, y_ptab, vpn, flags, ppn, verbose);
     481                        vpn++;
     482                        ppn++;
     483                    }
     484                }
     485            }  // end internal loop on private vsegs
     486
     487            // internal loop on global vsegs to build the (x_ptab,y_ptab) page table
     488            for (glob_id = 0; glob_id < header->globals; glob_id++)
     489            {
     490                // get cluster coordinates for global vseg
     491                unsigned int glob_pseg_id    = vseg[glob_id].psegid;
     492                unsigned int glob_cluster_id = pseg[glob_pseg_id].clusterid;
     493                unsigned int x_glob          = cluster[glob_cluster_id].x;
     494                unsigned int y_glob          = cluster[glob_cluster_id].y;
     495
     496                // only non local or matching global vsegs must be mapped
     497                if ( (vseg[glob_id].local == 0 ) ||
     498                     ((x_ptab == x_glob) && (y_ptab == y_glob)) )
     499                {
     500                    vpn = vseg[glob_id].vbase >> 12;
     501                    ppn = (unsigned int)(vseg[glob_id].pbase >> 12);
     502                    npages = vseg[glob_id].length >> 12;
     503                    if ((vseg[glob_id].length & 0xFFF) != 0) npages++;
     504
     505                    flags = PTE_V;
     506                    if (vseg[glob_id].mode & C_MODE_MASK) flags |= PTE_C;
     507                    if (vseg[glob_id].mode & X_MODE_MASK) flags |= PTE_X;
     508                    if (vseg[glob_id].mode & W_MODE_MASK) flags |= PTE_W;
     509                    if (vseg[glob_id].mode & U_MODE_MASK) flags |= PTE_U;
     510
     511                    // Flags set for optimization (as explained above)
     512
     513                    flags |= PTE_L;
     514                    flags |= PTE_R;
     515                    flags |= PTE_D;
    478516
    479517#if BOOT_DEBUG_PT
    480         _puts(vseg[vseg_id].name);
    481         _puts(" : flags = ");
    482         _putx(flags);
    483         _puts(" / npages = ");
    484         _putd(npages);
    485         _puts(" / pbase = ");
    486         _putl(vseg[vseg_id].pbase);
    487         _puts("\n");
    488 #endif
    489         // loop on 4K pages
    490         for (page_id = 0; page_id < npages; page_id++)
    491         {
    492             boot_add_pte(vspace_id, vpn, flags, ppn, verbose);
    493             vpn++;
    494             ppn++;
    495         }
    496     }
     518_puts(vseg[glob_id].name);
     519_puts(" : flags = ");
     520_putx(flags);
     521_puts(" / npages = ");
     522_putd(npages);
     523_puts(" / pbase = ");
     524_putl(vseg[glob_id].pbase);
     525_puts("\n");
     526#endif
     527                    // loop on 4K pages
     528                    for (page_id = 0; page_id < npages; page_id++)
     529                    {
     530                        boot_add_pte(vspace_id, x_ptab, y_ptab, vpn, flags, ppn, verbose);
     531                        vpn++;
     532                        ppn++;
     533                    }
     534                }
     535            }   // end internal loop on global vsegs
     536
     537            _puts("\n[BOOT] Page Table for vspace ");
     538            _puts( vspace[vspace_id].name );
     539            _puts(" in cluster[");
     540            _putd( x_ptab );
     541            _puts(",");
     542            _putd( y_ptab );
     543            _puts("] completed at cycle ");
     544            _putd( _get_proctime() );
     545            _puts("\n");
     546
     547#if BOOT_DEBUG_PT
     548_puts("vaddr = ");
     549_putx( _ptabs_vaddr[vspace_id] );
     550_puts(" / paddr = ");
     551_putl( _ptabs_paddr[vspace_id][x_ptab][y_ptab] );
     552_puts(" / PT2 number = ");
     553_putd( _ptabs_next_pt2[vspace_id][x_ptab][y_ptab] );
     554_puts("\n");
     555#endif
     556
     557        }  // end if PTAB
     558    }  // end first loop on private vsegs
    497559}   // end boot_vspace_pt_build()
    498560
     
    517579// Set pbase for a vseg when identity mapping is required.
    518580// The length of the vseg must be known.
    519 // The ordered linked list of vsegs mapped on pseg must be updated,
    520 // and overlap with previously mapped vsegs must be checked.
     581// The ordered linked list of vsegs mapped on pseg is updated,
     582// and overlap with previously mapped vsegs is checked.
    521583///////////////////////////////////////////////////////////////////////////
    522584void boot_vseg_set_paddr_ident(mapping_vseg_t * vseg)
     
    713775// It updates the pbase and the length fields of the vseg.
    714776// It updates the pbase and vbase fields of all vobjs in the vseg.
    715 // It updates the _ptabs_paddr[] and _ptabs_vaddr[] arrays.
     777// It updates the _ptabs_paddr[] and _ptabs_vaddr[], _ptabs_max_pt2[],
     778// and _ptabs_next_pt2[] arrays.
    716779// It is a global vseg if vspace_id = (-1).
    717780///////////////////////////////////////////////////////////////////////////
     
    727790    mapping_vobj_t   * vobj   = _get_vobj_base(header);
    728791
    729     // loop on the vobjs contained in vseg to compute
     792    // first loop on the vobjs contained in vseg to compute
    730793    // the vseg length, required for mapping.
    731794    cur_length = 0;
     
    771834        vobj[vobj_id].paddr = cur_paddr;
    772835       
    773         // initialize _ptabs_vaddr[] & boot_ptabs-paddr[] if PTAB
     836        // initialize _ptabs_vaddr[] , _ptabs-paddr[] , _ptabs_max_pt2[] if PTAB
    774837        if (vobj[vobj_id].type == VOBJ_TYPE_PTAB)
    775838        {
     
    788851                _exit();
    789852            }
    790             // register both physical and virtual page table address
    791             _ptabs_vaddr[vspace_id] = vobj[vobj_id].vaddr;
    792             _ptabs_paddr[vspace_id] = vobj[vobj_id].paddr;
     853            // get cluster coordinates for PTAB
     854            unsigned int cluster_xy = (unsigned int)(cur_paddr>>32);
     855            unsigned int x          = cluster_xy >> Y_WIDTH;
     856            unsigned int y          = cluster_xy & ((1<<Y_WIDTH)-1);
     857
     858            // register physical and virtual page table addresses, size, and next PT2
     859            _ptabs_vaddr[vspace_id]          = vobj[vobj_id].vaddr;
     860            _ptabs_paddr[vspace_id][x][y]    = vobj[vobj_id].paddr;
     861            _ptabs_max_pt2[vspace_id][x][y]  = (vobj[vobj_id].length - PT1_SIZE) / PT2_SIZE;
     862            _ptabs_next_pt2[vspace_id][x][y] = 0;
    793863           
    794864            // reset all valid bits in PT1
     
    797867                _physical_write(cur_paddr + offset, 0);
    798868            }
    799 
    800             // computing the number of second level pages
    801             _max_pt2[vspace_id] = (vobj[vobj_id].length - PT1_SIZE) / PT2_SIZE;
    802869        }
    803870
     
    809876}    // end boot_vseg_map()
    810877
    811 /////////////////////////////////////////////////////////////////////
     878///////////////////////////////////////////////////////////////////////////
    812879// This function builds the page tables for all virtual spaces
    813880// defined in the mapping_info data structure, in three steps:
     
    816883// - step 2 : It computes the physical base address for all private
    817884//            vsegs and all vobjs in each virtual space.
    818 // - step 3 : It actually fill the page table for each vspace.
     885// - step 3 : It actually fill the page table(s) for each vspace.
    819886//
    820 // Note: It must exist at least one vspace in the mapping_info...
    821 /////////////////////////////////////////////////////////////////////
     887// It must exist at least one vspace in the mapping.
     888// For each vspace, it can exist one page table per cluster.
     889///////////////////////////////////////////////////////////////////////////
    822890void boot_pt_init()
    823891{
     
    841909#endif
    842910
     911    //////////////////////////////////
    843912    // step 1 : loop on global vsegs
    844913
     
    857926    }
    858927
     928    ////////////////////////////////////////////////////////////
    859929    // step 2 : loop on virtual vspaces to map private vsegs
     930
    860931    for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++)
    861932    {
     
    867938#endif
    868939
    869         // vsegs with identity mapping constraint first
    870940        for (vseg_id = vspace[vspace_id].vseg_offset;
    871941             vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs);
    872942             vseg_id++)
    873943        {
    874             if (vseg[vseg_id].ident == 1)
    875                 boot_vseg_map(&vseg[vseg_id], vspace_id);
    876         }
    877         // unconstrained vsegs second
    878         for (vseg_id = vspace[vspace_id].vseg_offset;
    879              vseg_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs);
    880              vseg_id++)
    881         {
    882             if (vseg[vseg_id].ident == 0)
    883                 boot_vseg_map(&vseg[vseg_id], vspace_id);
     944            // private vsegs cannot be identity mapping
     945            if (vseg[vseg_id].ident != 0)
     946            {
     947                _puts("\n[BOOT ERROR] in boot_pt_init() : vspace ");
     948                _puts( vspace[vspace_id].name );
     949                _puts(" contains vseg with identity mapping\n");
     950                _exit();
     951            }
     952
     953            boot_vseg_map(&vseg[vseg_id], vspace_id);
    884954        }
    885955    }
     
    917987#endif
    918988
     989    /////////////////////////////////////////////////////////////
    919990    // step 3 : loop on the vspaces to build the page tables
    920991    for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++)
     
    9291000        boot_vspace_pt_build(vspace_id);
    9301001
    931         _puts("\n[BOOT] Page Table for vspace \"");
    932         _puts( vspace[vspace_id].name );
    933         _puts("\" completed at cycle ");
    934         _putd( _get_proctime() );
    935         _puts("\n");
    936 
    937 #if BOOT_DEBUG_PT
    938 _puts("  vaddr = ");
    939 _putx( _ptabs_vaddr[vspace_id] );
    940 _puts(" / paddr = ");
    941 _putl( _ptabs_paddr[vspace_id] );
    942 _puts(" / PT2 number = ");
    943 _putd( _max_pt2[vspace_id] );
    944 _puts("\n");
    945 #endif
    9461002    }
    9471003} // end boot_pt_init()
     
    9721028#endif
    9731029
    974         _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id] >> 13) );
     1030        _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id][0][0] >> 13) );
    9751031
    9761032        unsigned int ptab_found = 0;
     
    12241280// This function initialises all processors schedulers.
    12251281// This is done by processor 0, and the MMU must be activated.
    1226 // - In Step 1, it initialises the _schedulers[gpid] pointers array, and scan
     1282// - In Step 1, it initialises the _schedulers[] pointers array, and scan
    12271283//              the processors to initialise the schedulers, including the
    12281284//              idle_task context (ltid == 14) and HWI / SWI / PTI vectors.
     
    12771333    //          and the interrupt vectors.
    12781334    // Implementation note:
    1279     // We need to use both proc_id to scan the mapping info structure,
    1280     // and lpid to access the schedulers array.
    1281     // - the _schedulers[] array of pointers can contain "holes", because
    1282     //   it is indexed by the global pid = cluster_xy*NB_PROCS_MAX + lpid
    1283     // - the mapping info array of processors is contiguous, it is indexed
    1284     //   by proc_id, and use an offset specific in each cluster.
     1335    // We need to use both (proc_id) to scan the mapping info structure,
     1336    // and (x,y,lpid) to access the schedulers array.
    12851337
    12861338    for (cluster_id = 0 ; cluster_id < X_SIZE*Y_SIZE ; cluster_id++)
     
    12881340        unsigned int x          = cluster[cluster_id].x;
    12891341        unsigned int y          = cluster[cluster_id].y;
    1290         unsigned int cluster_xy = (x<<Y_WIDTH) + y;
    12911342
    12921343#if BOOT_DEBUG_SCHED
     
    13711422            {
    13721423                // set the schedulers pointers array
    1373                 _schedulers[cluster_xy * NB_PROCS_MAX + lpid] =
    1374                       (static_scheduler_t*)&psched[lpid];
     1424                _schedulers[x][y][lpid] = (static_scheduler_t*)&psched[lpid];
    13751425
    13761426#if BOOT_DEBUG_SCHED
     
    14111461                psched[lpid].context[IDLE_TASK_INDEX][CTX_CR_ID]    = 0;
    14121462                psched[lpid].context[IDLE_TASK_INDEX][CTX_SR_ID]    = 0xFF03;
    1413                 psched[lpid].context[IDLE_TASK_INDEX][CTX_PTPR_ID]  = _ptabs_paddr[0]>>13;
     1463                psched[lpid].context[IDLE_TASK_INDEX][CTX_PTPR_ID]  = _ptabs_paddr[0][x][y]>>13;
    14141464                psched[lpid].context[IDLE_TASK_INDEX][CTX_PTAB_ID]  = _ptabs_vaddr[0];
    14151465                psched[lpid].context[IDLE_TASK_INDEX][CTX_TTY_ID]   = 0;
     
    14951545            unsigned int y          = cluster[cluster_id].y;
    14961546            unsigned int cluster_xy = (x<<Y_WIDTH) + y;
    1497             psched                  = _schedulers[cluster_xy * NB_PROCS_MAX];
     1547            psched                  = _schedulers[x][y][0];
    14981548
    14991549            // update WTI vector for scheduler[cluster_id][lpid]
     
    15151565    unsigned int x          = cluster[cluster_id].x;
    15161566    unsigned int y          = cluster[cluster_id].y;
    1517     unsigned int cluster_xy = (x<<Y_WIDTH) + y;
    1518     psched                  = _schedulers[cluster_xy * NB_PROCS_MAX];
     1567    psched                  = _schedulers[x][y][0];
    15191568    unsigned int slot;
    15201569    unsigned int entry;
     
    15821631        // We must set the PTPR depending on the vspace, because the start_vector
    15831632        // and the stack address are defined in virtual space.
    1584         _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id] >> 13) );
     1633        _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id][0][0] >> 13) );
    15851634
    15861635        // loop on the tasks in vspace (task_id is the global index)
     
    16041653            // compute gpid (global processor index) and scheduler base address
    16051654            unsigned int gpid = cluster_xy * NB_PROCS_MAX + lpid;
    1606             psched            = _schedulers[gpid];
     1655            psched            = _schedulers[x][y][lpid];
    16071656
    16081657            // ctx_sr : value required before an eret instruction
     
    16101659
    16111660            // ctx_ptpr : page table physical base address (shifted by 13 bit)
    1612             unsigned int ctx_ptpr = (unsigned int)(_ptabs_paddr[vspace_id] >> 13);
     1661            unsigned int ctx_ptpr = (unsigned int)(_ptabs_paddr[vspace_id][x][y] >> 13);
    16131662
    16141663            // ctx_ptab : page_table virtual base address
     
    18111860void boot_mapping_init()
    18121861{
     1862    // desactivates IOC interrupt
    18131863    _ioc_init( 0 );
    18141864
     1865    // open file "map.bin"
    18151866    int fd_id = _fat_open( IOC_BOOT_MODE,
    18161867                           "map.bin",
    18171868                           0 );         // no creation
    1818 
    18191869    if ( fd_id == -1 )
    18201870    {
     
    18291879#endif
    18301880
     1881    // get "map.bin" file size (from fat32) and check it
    18311882    unsigned int size    = fat.fd[fd_id].file_size;
     1883
     1884    if ( size > SEG_BOOT_MAPPING_SIZE )
     1885    {
     1886        _puts("\n[BOOT ERROR] : allocated segment too small for map.bin file\n");
     1887        _exit();
     1888    }
     1889
     1890    // load "map.bin" file into buffer
    18321891    unsigned int nblocks = size >> 9;
    18331892    unsigned int offset  = size & 0x1FF;
     
    18461905    _fat_close( fd_id );
    18471906   
     1907    // close file "map.bin"
    18481908    boot_mapping_check();
     1909
    18491910} // end boot_mapping_init()
    18501911
    18511912
    1852 //////////////////////////////////////////////////////////////////////////////////
    1853 // This function open the .elf file identified by the "pathname" argument.
    1854 // It loads the complete file in a dedicated buffer, it copies all loadable
    1855 // segments  at the memory virtual address defined in the .elf file,
    1856 // and close the file.
    1857 // Notes:
    1858 // - The processor PTPR should contain the value corresponding to the
    1859 //   vspace containing the .elf file.
    1860 // - As this function requires a temporary memory buffer
    1861 //   to load the complete .elf file before to copy the various segments
    1862 //   to te proper location, it uses the seg_boot_buffer defined in map.xml.
    1863 //////////////////////////////////////////////////////////////////////////////////
    1864 void load_one_elf_file( unsigned int mode,
     1913/////////////////////////////////////////////////////////////////////////////////////
     1914// This function load all loadable segments for one .elf file, identified
     1915// by the "pathname" argument. Some loadable segments can be copied in several
     1916// clusters: same virtual address but different physical addresses. 
     1917// - It open the file.
     1918// - It loads the complete file in a dedicated buffer (seg_boot_buffer).
     1919// - It copies each loadable segments  at the virtual address defined in the .elf
     1920//   file, making several copies if the target vseg is not local.
     1921// - It closes the file.
     1922// Note:
     1923// - This function is supposed to be executed by processor[0,0,0].
     1924//   We must use physical addresses to reach the destination buffers that
     1925//   can be located in remote clusters. We use either a _physical_memcpy(),
     1926//   or a _dma_physical_copy() if DMA is available.
     1927//   The source seg_boot_buffer must be identity mapping.
     1928//////////////////////////////////////////////////////////////////////////////////////
     1929void load_one_elf_file( unsigned int is_kernel,     // kernel file if non zero
    18651930                        char*        pathname,
    1866                         unsigned int vspace_id )    // to use the proper page_table
     1931                        unsigned int vspace_id )    // to scan the proper vspace
    18671932{
     1933    mapping_header_t  * header  = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     1934    mapping_vspace_t  * vspace  = _get_vspace_base(header);
     1935    mapping_vseg_t    * vseg    = _get_vseg_base(header);
     1936    mapping_vobj_t    * vobj    = _get_vobj_base(header);
     1937
    18681938    unsigned int seg_id;
    18691939
     
    18811951
    18821952    // open .elf file
    1883     int fd_id = _fat_open( mode,
     1953    int fd_id = _fat_open( IOC_BOOT_MODE,
    18841954                           pathname,
    18851955                           0 );      // no creation
     
    19071977
    19081978    // load file in boot_buffer
    1909     if( _fat_read( mode,
     1979    if( _fat_read( IOC_BOOT_MODE,
    19101980                   fd_id,
    19111981                   boot_buffer,
     
    19472017    unsigned int nsegments   = elf_header_ptr->e_phnum;
    19482018
    1949 #if BOOT_DEBUG_ELF
    1950 _puts("\n[BOOT DEBUG] File ");
    1951 _puts( pathname );
    1952 _puts(" loaded at cycle ");
    1953 _putd( _get_proctime() );
    1954 _puts(" / bytes = ");
    1955 _putd( nbytes );
    1956 _puts(" / sectors = ");
    1957 _putd( nsectors );
    1958 _puts("\n");
    1959 #endif
    1960 
    1961     // Loop on loadable segments in the ELF file
     2019    _puts("\n[BOOT] File ");
     2020    _puts( pathname );
     2021    _puts(" loaded at cycle ");
     2022    _putd( _get_proctime() );
     2023    _puts("\n");
     2024
     2025    // Loop on loadable segments in the .elf file
    19622026    for (seg_id = 0 ; seg_id < nsegments ; seg_id++)
    19632027    {
     
    19702034            unsigned int seg_memsz  = elf_pht_ptr[seg_id].p_memsz;
    19712035
     2036#if BOOT_DEBUG_ELF
     2037_puts(" - segment ");
     2038_putd( seg_id );
     2039_puts(" / vaddr = ");
     2040_putx( seg_vaddr );
     2041_puts(" / file_size = ");
     2042_putx( seg_filesz );
     2043_puts("\n");
     2044#endif
     2045
    19722046            if( seg_memsz < seg_filesz )
    19732047            {
     
    19762050                _puts(" in file ");
    19772051                _puts( pathname );
    1978                 _puts(" has a wrong size \n");   
     2052                _puts(" has a memsz < filesz \n");   
    19792053                _exit();
    19802054            }
     
    19892063            unsigned int src_vaddr = (unsigned int)boot_buffer + seg_offset;
    19902064
    1991 #if BOOT_DEBUG_ELF
    1992 _puts(" - segment ");
    1993 _putd( seg_id );
    1994 _puts(" / dst_vaddr = ");
    1995 _putx( seg_vaddr );
    1996 _puts(" / src_vaddr = ");
    1997 _putx( src_vaddr );
    1998 _puts(" / size = ");
    1999 _putx( seg_filesz );
    2000 _puts("\n");
    2001 #endif
    2002 
    2003             // copy the segment from boot buffer to destination buffer
    2004             if( NB_DMA_CHANNELS > 0 )
    2005             {
    2006                 _dma_copy( 0,              // DMA cluster index
    2007                            0,              // DMA channel index
    2008                            vspace_id,     
    2009                            seg_vaddr,
    2010                            src_vaddr,
    2011                            seg_filesz );
     2065            // search all vsegs matching the virtual address
     2066            unsigned int vseg_first;
     2067            unsigned int vseg_last;
     2068            unsigned int vseg_id;
     2069            unsigned int found = 0;
     2070            if ( is_kernel )
     2071            {
     2072                vseg_first = 0;
     2073                vseg_last  = header->globals;
    20122074            }
    20132075            else
    20142076            {
    2015                 memcpy( (char*)seg_vaddr,
    2016                         (char*)src_vaddr,
    2017                         seg_filesz );
     2077                vseg_first = vspace[vspace_id].vseg_offset;
     2078                vseg_last  = vseg_first + vspace[vspace_id].vsegs;
     2079            }
     2080
     2081            for ( vseg_id = vseg_first ; vseg_id < vseg_last ; vseg_id++ )
     2082            {
     2083                if ( seg_vaddr == vseg[vseg_id].vbase )  // matching
     2084                {
     2085                    found = 1;
     2086
     2087                    // get destination buffer physical address and size
     2088                    paddr_t      seg_paddr  = vseg[vseg_id].pbase;
     2089                    unsigned int vobj_id    = vseg[vseg_id].vobj_offset;
     2090                    unsigned int seg_size   = vobj[vobj_id].length;
     2091                   
     2092#if BOOT_DEBUG_ELF
     2093_puts("   loaded into vseg ");
     2094_puts( vseg[vseg_id].name );
     2095_puts(" at paddr = ");
     2096_putl( seg_paddr );
     2097_puts(" (buffer size = ");
     2098_putx( seg_size );
     2099_puts(")\n");
     2100#endif
     2101                    // check vseg size
     2102                    if ( seg_size < seg_filesz )
     2103                    {
     2104                        _puts("\n[BOOT ERROR] in load_one_elf_file()\n");
     2105                        _puts("vseg ");
     2106                        _puts( vseg[vseg_id].name );
     2107                        _puts(" is to small for loadable segment ");
     2108                        _putx( seg_vaddr );
     2109                        _puts(" in file ");
     2110                        _puts( pathname );
     2111                        _puts(" \n");   
     2112                        _exit();
     2113                    }
     2114
     2115                    // copy the segment from boot buffer to destination buffer
     2116                    // using DMA channel[0,0,0] if it is available.
     2117                    if( NB_DMA_CHANNELS > 0 )
     2118                    {
     2119                        _dma_physical_copy( 0,                  // DMA in cluster[0,0]
     2120                                            0,                  // DMA channel 0
     2121                                            (paddr_t)seg_paddr, // destination paddr
     2122                                            (paddr_t)src_vaddr, // source paddr
     2123                                            seg_filesz );       // size
     2124                    }
     2125                    else
     2126                    {
     2127                        _physical_memcpy( (paddr_t)seg_paddr,   // destination paddr
     2128                                          (paddr_t)src_vaddr,   // source paddr
     2129                                          seg_filesz );         // size
     2130                    }
     2131                }
     2132            }  // end for vsegs in vspace
     2133
     2134            // check at least one matching vseg
     2135            if ( found == 0 )
     2136            {
     2137                _puts("\n[BOOT ERROR] in load_one_elf_file()\n");
     2138                _puts("vseg for loadable segment ");
     2139                _putx( seg_vaddr );
     2140                _puts(" in file ");
     2141                _puts( pathname );
     2142                _puts(" not found \n");   
     2143                _exit();
    20182144            }
    20192145        }
    2020     } // end for segments
     2146    }  // end for loadable segments
    20212147
    20222148    // close .elf file
     
    20262152
    20272153
    2028 //////////////////////////////////////////////////////////////////////////////////
     2154/////i////////////////////////////////////////////////////////////////////////////////
    20292155// This function uses the map.bin data structure to load the "kernel.elf" file
    2030 // as well as the various "application.elf" files.
    2031 // The "preloader.elf" file is not loaded, because it has been burned in the ROM.
    2032 // The "boot.elf" file is not loaded, because it has been loaded by the preloader.
     2156// as well as the various "application.elf" files into memory.
     2157// - The "preloader.elf" file is not loaded, because it has been burned in the ROM.
     2158// - The "boot.elf" file is not loaded, because it has been loaded by the preloader.
    20332159// This function scans all vobjs defined in the map.bin data structure to collect
    2034 // all .elf files pathnames, and calls the load_one_elf_file() function to
    2035 // load all loadable segments at the virtual address found in the .elf file.
    2036 //////////////////////////////////////////////////////////////////////////////////
     2160// all .elf files pathnames, and calls the load_one_elf_file() for each .elf file.
     2161// As the code can be replicated in several vsegs, the same code can be copied
     2162// in one or several clusters by the load_one_elf_file() function.
     2163//////////////////////////////////////////////////////////////////////////////////////
    20372164void boot_elf_load()
    20382165{
     
    20632190    }
    20642191
    2065     load_one_elf_file( IOC_BOOT_MODE,
    2066                        vobj[vobj_id].binpath,
     2192    // Load the kernel
     2193    load_one_elf_file( 1,                           // kernel file
     2194                       vobj[vobj_id].binpath,       // file pathname
    20672195                       0 );                         // vspace 0
    20682196
    2069     _puts("\n[BOOT] File \"");
    2070     _puts( vobj[vobj_id].binpath );
    2071     _puts("\" loaded at cycle ");
    2072     _putd( _get_proctime() );
    2073     _puts("\n");
    2074 
    2075     // loop on the vspaces, scanning all vobjs in a vspace,
     2197    // loop on the vspaces, scanning all vobjs in the vspace,
    20762198    // to find the pathname of the .elf file associated to the vspace.
    20772199    for( vspace_id = 0 ; vspace_id < header->vspaces ; vspace_id++ )
    20782200    {
    2079         // Set PTPR depending on the vspace, as seg_data is defined in virtual space.
    2080         _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id] >> 13) );
    2081 
    20822201        // loop on the vobjs in vspace (vobj_id is the global index)
    20832202        unsigned int found = 0;
     
    21022221        }
    21032222
    2104         load_one_elf_file( IOC_BOOT_MODE,
    2105                            vobj[vobj_id].binpath,
    2106                            vspace_id );
    2107 
    2108         _puts("\n[BOOT] File \"");
    2109         _puts( vobj[vobj_id].binpath );
    2110         _puts("\" loaded at cycle ");
    2111         _putd( _get_proctime() );
    2112         _puts("\n");
     2223        load_one_elf_file( 0,                          // not a kernel file
     2224                           vobj[vobj_id].binpath,      // file pathname
     2225                           vspace_id );                // vspace index
    21132226
    21142227    }  // end for vspaces
    2115 
    2116     // restaure vspace 0 PTPR
    2117     _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0] >> 13) );
    21182228
    21192229} // end boot_elf_load()
     
    21612271        {
    21622272            unsigned int type       = periph[periph_id].type;
     2273            unsigned int subtype    = periph[periph_id].subtype;
    21632274            unsigned int channels   = periph[periph_id].channels;
    21642275
     
    21672278                case PERIPH_TYPE_IOC:    // vci_block_device component
    21682279                {
    2169                     // initialize all channels except channel 0 because it has been
    2170                     // initialized by the preloader.
    2171                     for (channel_id = 1; channel_id < channels; channel_id++)
     2280                    if ( subtype == PERIPH_SUBTYPE_BDV )
    21722281                    {
    2173                         _ioc_init( channel_id );
     2282                        _bdv_lock.value = 0;
     2283#if BOOT_DEBUG_PERI
     2284_puts("- BDV : channels = ");
     2285_putd(channels);
     2286_puts("\n");
     2287#endif
    21742288                    }
    2175 #if BOOT_DEBUG_PERI
    2176 _puts("- IOC : channels = ");
    2177 _putd(channels);
    2178 _puts("\n");
    2179 #endif
     2289                    else if ( subtype == PERIPH_SUBTYPE_HBA )
     2290                    {
     2291                        // TODO
     2292                    }
     2293                    else if ( subtype == PERIPH_SUBTYPE_SPI )
     2294                    {
     2295                        // TODO
     2296                    }
    21802297                    break;
    21812298                }
     
    22082325                case PERIPH_TYPE_TTY:    // vci_multi_tty component
    22092326                {
    2210                     // nothing to do
     2327                    for (channel_id = 0; channel_id < channels; channel_id++)
     2328                    {
     2329                        _tty_lock[channel_id].value = 0;
     2330                        _tty_rx_full[channel_id]    = 0;
     2331                    }
    22112332#if BOOT_DEBUG_PERI
    22122333_puts("- TTY : channels = ");
     
    22182339                case PERIPH_TYPE_IOB:    // vci_io_bridge component
    22192340                {
    2220 #if BOOT_DEBUG_PERI
    2221 _puts("- IOB : channels = ");
    2222 _putd(channels);
    2223 _puts("\n");
    2224 #endif
    22252341                    if (GIET_USE_IOMMU)
    22262342                    {
     
    22362352                case PERIPH_TYPE_PIC:    // vci_iopic component
    22372353                {
    2238                          
    22392354#if BOOT_DEBUG_PERI
    22402355_puts("- PIC : channels = ");
     
    23292444// Most of this code is executed by Processor 0 only.
    23302445/////////////////////////////////////////////////////////////////////////
    2331 void boot_init()
     2446void boot_init() 
    23322447{
    23332448    mapping_header_t*  header     = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     
    23412456        _puts("\n");
    23422457
    2343         // Loading the map.bin file into memory and checking it
     2458        // Load the map.bin file into memory and check it
    23442459        boot_mapping_init();
    23452460
     
    23502465        _puts("\n");
    23512466
    2352         // Building all page tables
     2467        // Build page tables
    23532468        boot_pt_init();
    23542469
    2355         // Activating proc 0 MMU
    2356         _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0]>>13) );
     2470        // Activate MMU for proc [0,0,0]
     2471        _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0][0][0]>>13) );
    23572472        _set_mmu_mode( 0xF );
    23582473
     
    23612476        _puts("\n");
    23622477
    2363         // Initialising private vobjs in vspaces
     2478        // Initialise private vobjs in vspaces
    23642479        boot_vobjs_init();
    23652480
     
    23682483        _puts("\n");
    23692484
    2370         // Initializing schedulers
     2485        // Initialise schedulers
    23712486        boot_schedulers_init();
    23722487
     
    23752490        _puts("\n");
    23762491       
    2377         // Setting CP0_SCHED register for proc 0
    2378         _set_sched( (unsigned int)_schedulers[0] );
    2379 
    2380         // Initializing non replicated peripherals
     2492        // Set CP0_SCHED register for proc [0,0,0]
     2493        _set_sched( (unsigned int)_schedulers[0][0][0] );
     2494
     2495        // Initialise non replicated peripherals
    23812496        boot_peripherals_init();
    23822497
     
    23942509        {
    23952510            unsigned int nprocs     = cluster[clusterid].procs;
    2396             unsigned int xdest      = cluster[clusterid].x;
    2397             unsigned int ydest      = cluster[clusterid].y;
    2398             unsigned int cluster_xy = (xdest<<Y_WIDTH) + ydest;
     2511            unsigned int x          = cluster[clusterid].x;
     2512            unsigned int y          = cluster[clusterid].y;
     2513            unsigned int cluster_xy = (x<<Y_WIDTH) + y;
    23992514
    24002515            for ( p = 0 ; p < nprocs; p++ )
     
    24142529
    24152530    // all processor initialise SCHED register
    2416     _set_sched( (unsigned int)_schedulers[gpid] );
    2417 
    2418     // all processors (but Proc 0) activate MMU
     2531    unsigned int cluster_xy = gpid / NB_PROCS_MAX;
     2532    unsigned int lpid       = gpid % NB_PROCS_MAX;
     2533    unsigned int x          = cluster_xy >> Y_WIDTH;
     2534    unsigned int y          = cluster_xy & ((1<<Y_WIDTH)-1);
     2535    _set_sched( (unsigned int)_schedulers[x][y][lpid] );
     2536
     2537    // all processors (but Proc[0,0,0]) activate MMU
    24192538    if ( gpid != 0 )
    24202539    {
    2421         _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0]>>13) );
     2540        _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[0][x][y]>>13) );
    24222541        _set_mmu_mode( 0xF );
    24232542    }
Note: See TracChangeset for help on using the changeset viewer.