Changeset 412 for soft/giet_vm/giet_boot


Ignore:
Timestamp:
Sep 29, 2014, 11:59:30 AM (10 years ago)
Author:
alain
Message:

Major evolution: the page table initialisation has been
completely redesigned to support both big pages (2 Mbytes)
and small pages (4 Kbytes) for both kernel and user vsegs.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • soft/giet_vm/giet_boot/boot.c

    r392 r412  
    1 //////////////////////////////////////////////////////////////////////////////////////////
     1/////////////////////////////////////////////////////////////////////////////////////////
    22// File     : boot.c
    33// Date     : 01/11/2013
     
    1010// The virtual adresses are on 32 bits and use the (unsigned int) type. The
    1111// physicals addresses can have up to 40 bits, and use the  (unsigned long long) type.
    12 // It natively supports clusterised shared mmemory multi-processors architectures,
     12// It natively supports clusterised shared memory multi-processors architectures,
    1313// where each processor is identified by a composite index (cluster_xy, local_id),
    1414// and where there is one physical memory bank per cluster.
     
    3838//    - classical memory protection, when several independant applications compiled
    3939//      in different virtual spaces are executing on the same hardware platform.
    40 //    - data placement in NUMA architectures, when we want to control the placement
    41 //      of the software objects (virtual segments) on the physical memory banks.
     40//    - data placement in NUMA architectures, to control the placement
     41//      of the software objects (vsegs) on the physical memory banks (psegs).
    4242//
     43//    The max number of vspaces (GIET_NB_VSPACE_MAX) is a configuration parameter.
    4344//    The page table are statically build in the boot phase, and they do not
    44 //    change during execution. The GIET uses only 4 Kbytes pages.
    45 //    As most applications use only a limited number of segments, the number of PT2s
    46 //    actually used by a given virtual space is generally smaller than 2048, and is
    47 //    computed during the boot phase.
    48 //    The max number of virtual spaces (GIET_NB_VSPACE_MAX) is a configuration parameter.
     45//    change during execution.
     46//    The GIET_VM uses both small pages (4 Kbytes), and big pages (2 Mbytes).
    4947//
    5048//    Each page table (one page table per virtual space) is monolithic, and contains
    51 //    one PT1 and up to (GIET_NB_PT2_MAX) PT2s. The PT1 is addressed using the ix1 field
    52 //    (11 bits) of the VPN, and the selected PT2 is addressed using the ix2 field (9 bits).
    53 //    - PT1[2048] : a first 8K aligned array of unsigned int, indexed by (ix1) field of VPN.
    54 //    Each entry in the PT1 contains a 32 bits PTD. The MSB bit PTD[31] is
    55 //    the PTD valid bit, and LSB bits PTD[19:0] are the 20 MSB bits of the physical base
    56 //    address of the selected PT2.
    57 //    The PT1 contains 2048 PTD of 4 bytes => 8K bytes.
    58 //    - PT2[1024][GIET_NB_PT2_MAX] : an array of array of unsigned int.
    59 //    Each PT2[1024] must be 4K aligned, each entry in a PT2 contains two unsigned int:
    60 //    the first word contains the protection flags, and the second word contains the PPN.
    61 //    Each PT2 contains 512 PTE2 of 8bytes => 4K bytes.
    62 //    The total size of a page table is finally = 8K + (GIET_NB_PT2_MAX)*4K bytes.
     49//    one PT1 (8 Kbytes) and a variable number of PT2s (4 Kbytes each). For each vspace,
     50//    the numberof PT2s is defined by the size of the PTAB vobj in the mapping.
     51//    The PT1 is indexed by the ix1 field (11 bits) of the VPN. Each entry is 32 bits.
     52//    A PT2 is indexed the ix2 field (9 bits) of the VPN. Each entry is a double word.
     53//    The first word contains the flags, the second word contains the PPN.
     54//
     55//    The page tables can be distributed in all clusters.
    6356///////////////////////////////////////////////////////////////////////////////////////
    6457// Implementation Notes:
     
    6659// 1) The cluster_id variable is a linear index in the mapping_info array of clusters.
    6760//    We use the cluster_xy variable for the tological index = x << Y_WIDTH + y
     61//
    6862///////////////////////////////////////////////////////////////////////////////////////
    6963
     
    8579#include <irq_handler.h>
    8680#include <vmem.h>
     81#include <pmem.h>
    8782#include <utils.h>
    8883#include <elf-types.h>
     
    128123////////////////////////////////////////////////////////////////////////////
    129124//      Global variables for boot code
    130 // Both the page tables for the various virtual spaces, and the schedulers
    131 // for the processors are physically distributed on the clusters.
    132125////////////////////////////////////////////////////////////////////////////
    133126
    134127extern void boot_entry();
    135128
    136 // This global variable is allocated in "fat32.c" file
    137 extern fat32_fs_t fat;
    138 
    139 // Page tables virtual base addresses (one per vspace)
     129// FAT internal representation for boot code 
    140130__attribute__((section (".bootdata")))
    141 unsigned int _ptabs_vaddr[GIET_NB_VSPACE_MAX];
    142 
    143 // Page tables physical base addresses (one per vspace / one per cluster)
    144 __attribute__((section (".bootdata")))
    145 paddr_t _ptabs_paddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
    146 
    147 // Page table max_pt2 (one per vspace / one per cluster )
    148 __attribute__((section (".bootdata")))
    149 unsigned int _ptabs_max_pt2[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
    150 
    151 // Page tables pt2 allocators (one per vspace / one per cluster)
    152 __attribute__((section (".bootdata")))
    153 unsigned int _ptabs_next_pt2[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
    154 
    155 // Scheduler pointers array (virtual addresses)
    156 // indexed by (x,y,lpid) : (((x << Y_WIDTH) + y) * NB_PROCS_MAX) + lpid
    157 __attribute__((section (".bootdata")))
    158 static_scheduler_t* _schedulers[1<<X_WIDTH][1<<Y_WIDTH][NB_PROCS_MAX];
     131fat32_fs_t             fat   __attribute__((aligned(512)));
    159132
    160133// Temporaty buffer used to load one complete .elf file 
    161134__attribute__((section (".bootdata")))
    162 char boot_elf_buffer[GIET_ELF_BUFFER_SIZE] __attribute__((aligned(512)));
     135char                   boot_elf_buffer[GIET_ELF_BUFFER_SIZE] __attribute__((aligned(512)));
     136
     137// Physical memory allocators array (one per cluster)
     138__attribute__((section (".bootdata")))
     139pmem_alloc_t           boot_pmem_alloc[X_SIZE][Y_SIZE];
     140
     141// Schedulers virtual base addresses array (one per processor)
     142__attribute__((section (".bootdata")))
     143static_scheduler_t*    _schedulers[X_SIZE][Y_SIZE][NB_PROCS_MAX];
     144
     145// Page tables virtual base addresses array (one per vspace)
     146__attribute__((section (".bootdata")))
     147unsigned int           _ptabs_vaddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
     148
     149// Page tables physical base addresses (one per vspace and per cluster)
     150__attribute__((section (".bootdata")))
     151paddr_t                _ptabs_paddr[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
     152
     153// Page tables pt2 allocators (one per vspace and per cluster)
     154__attribute__((section (".bootdata")))
     155unsigned int           _ptabs_next_pt2[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE];
     156
     157// Page tables max_pt2  (same value for all page tables)
     158__attribute__((section (".bootdata")))
     159unsigned int           _ptabs_max_pt2;
    163160
    164161/////////////////////////////////////////////////////////////////////
     
    265262} // end boot_mapping_check()
    266263
    267 
    268264//////////////////////////////////////////////////////////////////////////////
    269 //     boot_pseg_get()
    270 // This function returns the pointer on a physical segment
    271 // identified  by the pseg index.
     265// This function registers a new PTE1 in the page table defined
     266// by the vspace_id argument, and the (x,y) coordinates.
     267// It updates only the first level PT1.
    272268//////////////////////////////////////////////////////////////////////////////
    273 mapping_pseg_t *boot_pseg_get(unsigned int seg_id)
     269void boot_add_pte1( unsigned int vspace_id,
     270                    unsigned int x,
     271                    unsigned int y,
     272                    unsigned int vpn,        // 20 bits right-justified
     273                    unsigned int flags,      // 10 bits left-justified
     274                    unsigned int ppn )       // 28 bits right-justified
    274275{
    275     mapping_header_t* header = (mapping_header_t*)SEG_BOOT_MAPPING_BASE;
    276     mapping_pseg_t * pseg    = _get_pseg_base(header);
    277 
    278     // checking argument
    279     if (seg_id >= header->psegs)
    280     {
    281         _puts("\n[BOOT ERROR] : seg_id argument too large\n");
    282         _puts("               in function boot_pseg_get()\n");
     276    // compute index in PT1
     277    unsigned int    ix1 = vpn >> 9;         // 11 bits for ix1
     278
     279    // get page table physical base address
     280    paddr_t         pt1_pbase = _ptabs_paddr[vspace_id][x][y];
     281
     282    // check pt1_base
     283    if ( pt1_pbase == 0 )
     284    {
     285        _puts("\n[BOOT ERROR] in boot_add_pte1() : illegal pbase address for PTAB[");
     286        _putd( vspace_id );
     287        _puts(",");
     288        _putd( x );
     289        _puts(",");
     290        _putd( y );
     291        _puts("]\n");
    283292        _exit();
    284293    }
    285294
    286     return &pseg[seg_id];
    287 
     295    // compute pte1 : 2 bits V T / 8 bits flags / 3 bits RSVD / 19 bits bppi
     296    unsigned int    pte1 = PTE_V |
     297                           (flags & 0x3FC00000) |
     298                           ((ppn>>9) & 0x0007FFFF);
     299
     300    // write pte1 in PT1
     301    _physical_write( pt1_pbase + 4*ix1, pte1 );
     302
     303#if (BOOT_DEBUG_PT > 1)
     304_puts(" - PTE1 in PTAB[");
     305_putd( vspace_id );
     306_puts(",");
     307_putd( x );
     308_puts(",");
     309_putd( y );
     310_puts("] : vpn = ");
     311_putx( vpn );
     312_puts(" / ppn = ");
     313_putx( ppn );
     314_puts(" / flags = ");
     315_putx( flags );
     316_puts("\n");
     317#endif
     318
     319}   // end boot_add_pte1()
    288320
    289321//////////////////////////////////////////////////////////////////////////////
    290 // boot_add_pte()
    291 // This function registers a new PTE in the page table defined
     322// This function registers a new PTE2 in the page table defined
    292323// by the vspace_id argument, and the (x,y) coordinates.
    293 // It updates both the PT1 and PT2, and a new PT2 is used if required.
     324// It updates both the first level PT1 and the second level PT2.
    294325// As the set of PT2s is implemented as a fixed size array (no dynamic
    295326// allocation), this function checks a possible overflow of the PT2 array.
    296327//////////////////////////////////////////////////////////////////////////////
    297 void boot_add_pte(unsigned int vspace_id,
    298                   unsigned int x,
    299                   unsigned int y,
    300                   unsigned int vpn,
    301                   unsigned int flags,
    302                   unsigned int ppn,
    303                   unsigned int verbose)
     328void boot_add_pte2( unsigned int vspace_id,
     329                    unsigned int x,
     330                    unsigned int y,
     331                    unsigned int vpn,        // 20 bits right-justified
     332                    unsigned int flags,      // 10 bits left-justified
     333                    unsigned int ppn )       // 28 bits right-justified
    304334{
    305335    unsigned int ix1;
    306336    unsigned int ix2;
    307337    paddr_t      pt2_pbase;     // PT2 physical base address
    308     paddr_t      pte_paddr;     // PTE physical address
     338    paddr_t      pte2_paddr;    // PTE2 physical address
    309339    unsigned int pt2_id;        // PT2 index
    310340    unsigned int ptd;           // PTD : entry in PT1
    311341
    312     ix1 = vpn >> 9;         // 11 bits
    313     ix2 = vpn & 0x1FF;      //  9 bits
     342    ix1 = vpn >> 9;             // 11 bits for ix1
     343    ix2 = vpn & 0x1FF;          //  9 bits for ix2
    314344
    315345    // get page table physical base address and size
    316346    paddr_t      pt1_pbase = _ptabs_paddr[vspace_id][x][y];
    317     unsigned int max_pt2   = _ptabs_max_pt2[vspace_id][x][y];
    318 
    319     if (max_pt2 == 0)
    320     {
    321         _puts("Undefined page table for vspace ");
    322         _putd(vspace_id);
    323         _puts("\n");
     347
     348    // check pt1_base
     349    if ( pt1_pbase == 0 )
     350    {
     351        _puts("\n[BOOT ERROR] in boot_add_pte2() : PTAB[");
     352        _putd( vspace_id );
     353        _puts(",");
     354        _putd( x );
     355        _puts(",");
     356        _putd( y );
     357        _puts("] undefined\n");
    324358        _exit();
    325359    }
     
    332366    {
    333367        pt2_id = _ptabs_next_pt2[vspace_id][x][y];
    334         if (pt2_id == max_pt2)
    335         {
    336             _puts("\n[BOOT ERROR] in boot_add_pte() function\n");
    337             _puts("the length of the PTAB vobj is too small\n");
    338             _puts(" max_pt2 = ");
    339             _putd( max_pt2 );
    340             _puts("\n");
    341             _puts(" pt2_id  = ");
    342             _putd( pt2_id );
    343             _puts("\n");
     368        if (pt2_id == _ptabs_max_pt2)
     369        {
     370            _puts("\n[BOOT ERROR] in boot_add_pte2() : PTAB[");
     371            _putd( vspace_id );
     372            _puts(",");
     373            _putd( x );
     374            _puts(",");
     375            _putd( y );
     376            _puts("] contains not enough PT2s\n");
    344377            _exit();
    345378        }
     
    347380        pt2_pbase = pt1_pbase + PT1_SIZE + PT2_SIZE * pt2_id;
    348381        ptd = PTE_V | PTE_T | (unsigned int) (pt2_pbase >> 12);
    349         _physical_write( pt1_pbase + 4 * ix1, ptd);
     382        _physical_write( pt1_pbase + 4*ix1, ptd);
    350383        _ptabs_next_pt2[vspace_id][x][y] = pt2_id + 1;
    351384    }
     
    356389
    357390    // set PTE in PT2 : flags & PPN in two 32 bits words
    358     pte_paddr = pt2_pbase + 8 * ix2;
    359     _physical_write(pte_paddr    , flags);
    360     _physical_write(pte_paddr + 4, ppn);
    361 
    362     if (verbose)
    363     {
    364         _puts(" / vpn = ");
    365         _putx( vpn );
    366         _puts(" / ix1 = ");
    367         _putx( ix1 );
    368         _puts(" / ix2 = ");
    369         _putx( ix2 );
    370         _puts(" / pt1_pbase = ");
    371         _putl( pt1_pbase );
    372         _puts(" / ptd = ");
    373         _putl( ptd );
    374         _puts(" / pt2_pbase = ");
    375         _putl( pt2_pbase );
    376         _puts(" / pte_paddr = ");
    377         _putl( pte_paddr );
    378         _puts(" / ppn = ");
    379         _putx( ppn );
    380         _puts("/\n");
    381     }
    382 
    383 }   // end boot_add_pte()
    384 
    385 
    386 ////////////////////////////////////////////////////////////////////////
    387 // This function build the page table(s) for a given vspace.
    388 // It build as many pages tables as the number of vobjs having
    389 // the PTAB type in the vspace, because page tables can be replicated.
    390 // The physical base addresses for all vsegs (global and private)
    391 // must have been previously computed and stored in the mapping.
    392 //
    393 // General rule regarding local / shared vsegs:
    394 // - shared vsegs are mapped in all page tables
    395 // - local vsegs are mapped only in the "local" page table
    396 ////////////////////////////////////////////////////////////////////////
    397 void boot_vspace_pt_build(unsigned int vspace_id)
    398 {
    399     unsigned int ptab_id;       // global index for a vseg containing a PTAB
    400     unsigned int priv_id;       // global index for a private vseg in a vspace
    401     unsigned int glob_id;       // global index for a global vseg
    402     unsigned int npages;
    403     unsigned int ppn;
    404     unsigned int vpn;
    405     unsigned int flags;
    406     unsigned int page_id;
    407     unsigned int verbose = 0;   // can be used to activate trace in add_pte()
    408 
    409     mapping_header_t  * header  = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
    410     mapping_vspace_t  * vspace  = _get_vspace_base(header);
    411     mapping_vseg_t    * vseg    = _get_vseg_base(header);
    412     mapping_vobj_t    * vobj    = _get_vobj_base(header);
    413     mapping_pseg_t    * pseg    = _get_pseg_base(header);
    414     mapping_cluster_t * cluster = _get_cluster_base(header);
    415 
    416     // external loop on private vsegs to find all PTAB vobjs in vspace
    417     for (ptab_id = vspace[vspace_id].vseg_offset;
    418          ptab_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs);
    419          ptab_id++)
    420     {
    421         // get global index of first vobj in vseg
    422         unsigned int vobj_id = vseg[ptab_id].vobj_offset;
    423 
    424         if ( vobj[vobj_id].type == VOBJ_TYPE_PTAB )
    425         {
    426             // get cluster coordinates for the PTAB
    427             unsigned int ptab_pseg_id    = vseg[ptab_id].psegid;
    428             unsigned int ptab_cluster_id = pseg[ptab_pseg_id].clusterid;
    429             unsigned int x_ptab          = cluster[ptab_cluster_id].x;
    430             unsigned int y_ptab          = cluster[ptab_cluster_id].y;
    431 
    432             // internal loop on private vsegs to build
    433             // the (vspace_id, x_ptab, y_ptab) page table
    434             for (priv_id = vspace[vspace_id].vseg_offset;
    435                  priv_id < (vspace[vspace_id].vseg_offset + vspace[vspace_id].vsegs);
    436                  priv_id++)
    437             {
    438                 // get cluster coordinates for private vseg
    439                 unsigned int priv_pseg_id    = vseg[priv_id].psegid;
    440                 unsigned int priv_cluster_id = pseg[priv_pseg_id].clusterid;
    441                 unsigned int x_priv          = cluster[priv_cluster_id].x;
    442                 unsigned int y_priv          = cluster[priv_cluster_id].y;
    443 
    444                 // only non local or matching private vsegs must be mapped
    445                 if ( (vseg[priv_id].local == 0 ) ||
    446                      ((x_ptab == x_priv) && (y_ptab == y_priv)) )
    447                 {
    448                     vpn = vseg[priv_id].vbase >> 12;
    449                     ppn = (unsigned int) (vseg[priv_id].pbase >> 12);
    450                     npages = vseg[priv_id].length >> 12;
    451                     if ((vseg[priv_id].length & 0xFFF) != 0) npages++;
    452 
    453                     flags = PTE_V;
    454                     if (vseg[priv_id].mode & C_MODE_MASK) flags |= PTE_C;
    455                     if (vseg[priv_id].mode & X_MODE_MASK) flags |= PTE_X;
    456                     if (vseg[priv_id].mode & W_MODE_MASK) flags |= PTE_W;
    457                     if (vseg[priv_id].mode & U_MODE_MASK) flags |= PTE_U;
    458        
    459                     // The three flags (Local, Remote and Dirty) are set to 1 to reduce
    460                     // latency of TLB miss (L/R) and write (D): Avoid hardware update
    461                     // mechanism for these flags because GIET_VM does use these flags.
    462 
    463                     flags |= PTE_L;
    464                     flags |= PTE_R;
    465                     flags |= PTE_D;
    466 
    467 #if BOOT_DEBUG_PT
    468 _puts(vseg[priv_id].name);
    469 _puts(" : flags = ");
    470 _putx(flags);
    471 _puts(" / npages = ");
    472 _putd(npages);
    473 _puts(" / pbase = ");
    474 _putl(vseg[priv_id].pbase);
    475 _puts("\n");
    476 #endif
    477                     // loop on 4K pages
    478                     for (page_id = 0; page_id < npages; page_id++)
    479                     {
    480                         boot_add_pte(vspace_id, x_ptab, y_ptab, vpn, flags, ppn, verbose);
    481                         vpn++;
    482                         ppn++;
    483                     }
    484                 }
    485             }  // end internal loop on private vsegs
    486 
    487             // internal loop on global vsegs to build the (x_ptab,y_ptab) page table
    488             for (glob_id = 0; glob_id < header->globals; glob_id++)
    489             {
    490                 // get cluster coordinates for global vseg
    491                 unsigned int glob_pseg_id    = vseg[glob_id].psegid;
    492                 unsigned int glob_cluster_id = pseg[glob_pseg_id].clusterid;
    493                 unsigned int x_glob          = cluster[glob_cluster_id].x;
    494                 unsigned int y_glob          = cluster[glob_cluster_id].y;
    495 
    496                 // only non local or matching global vsegs must be mapped
    497                 if ( (vseg[glob_id].local == 0 ) ||
    498                      ((x_ptab == x_glob) && (y_ptab == y_glob)) )
    499                 {
    500                     vpn = vseg[glob_id].vbase >> 12;
    501                     ppn = (unsigned int)(vseg[glob_id].pbase >> 12);
    502                     npages = vseg[glob_id].length >> 12;
    503                     if ((vseg[glob_id].length & 0xFFF) != 0) npages++;
    504 
    505                     flags = PTE_V;
    506                     if (vseg[glob_id].mode & C_MODE_MASK) flags |= PTE_C;
    507                     if (vseg[glob_id].mode & X_MODE_MASK) flags |= PTE_X;
    508                     if (vseg[glob_id].mode & W_MODE_MASK) flags |= PTE_W;
    509                     if (vseg[glob_id].mode & U_MODE_MASK) flags |= PTE_U;
    510 
    511                     // Flags set for optimization (as explained above)
    512 
    513                     flags |= PTE_L;
    514                     flags |= PTE_R;
    515                     flags |= PTE_D;
    516 
    517 #if BOOT_DEBUG_PT
    518 _puts(vseg[glob_id].name);
    519 _puts(" : flags = ");
    520 _putx(flags);
    521 _puts(" / npages = ");
    522 _putd(npages);
    523 _puts(" / pbase = ");
    524 _putl(vseg[glob_id].pbase);
    525 _puts("\n");
    526 #endif
    527                     // loop on 4K pages
    528                     for (page_id = 0; page_id < npages; page_id++)
    529                     {
    530                         boot_add_pte(vspace_id, x_ptab, y_ptab, vpn, flags, ppn, verbose);
    531                         vpn++;
    532                         ppn++;
    533                     }
    534                 }
    535             }   // end internal loop on global vsegs
    536 
    537             _puts("\n[BOOT] Page Table for vspace ");
    538             _puts( vspace[vspace_id].name );
    539             _puts(" in cluster[");
    540             _putd( x_ptab );
    541             _puts(",");
    542             _putd( y_ptab );
    543             _puts("] completed at cycle ");
    544             _putd( _get_proctime() );
    545             _puts("\n");
    546 
    547 #if BOOT_DEBUG_PT
    548 _puts("vaddr = ");
    549 _putx( _ptabs_vaddr[vspace_id] );
    550 _puts(" / paddr = ");
    551 _putl( _ptabs_paddr[vspace_id][x_ptab][y_ptab] );
    552 _puts(" / PT2 number = ");
    553 _putd( _ptabs_next_pt2[vspace_id][x_ptab][y_ptab] );
    554 _puts("\n");
    555 #endif
    556 
    557         }  // end if PTAB
    558     }  // end first loop on private vsegs
    559 }   // end boot_vspace_pt_build()
    560 
    561 
    562 ///////////////////////////////////////////////////////////////////////////
     391    pte2_paddr  = pt2_pbase + 8 * ix2;
     392    _physical_write(pte2_paddr     , (PTE_V |flags) );
     393    _physical_write(pte2_paddr + 4 , ppn);
     394
     395#if (BOOT_DEBUG_PT > 1)
     396_puts(" - PTE2 in PTAB[");
     397_putd( vspace_id );
     398_puts(",");
     399_putd( x );
     400_puts(",");
     401_putd( y );
     402_puts("] : vpn = ");
     403_putx( vpn );
     404_puts(" / ppn = ");
     405_putx( ppn );
     406_puts(" / flags = ");
     407_putx( flags );
     408_puts("\n");
     409#endif
     410
     411}   // end boot_add_pte2()
     412
     413////////////////////////////////////////////////////////////////////////////////////
    563414// Align the value of paddr or vaddr to the required alignement,
    564415// defined by alignPow2 == L2(alignement).
    565 ///////////////////////////////////////////////////////////////////////////
     416////////////////////////////////////////////////////////////////////////////////////
    566417paddr_t paddr_align_to(paddr_t paddr, unsigned int alignPow2)
    567418{
     
    576427}
    577428
    578 ///////////////////////////////////////////////////////////////////////////
    579 // Set pbase for a vseg when identity mapping is required.
    580 // The length of the vseg must be known.
    581 // The ordered linked list of vsegs mapped on pseg is updated,
    582 // and overlap with previously mapped vsegs is checked.
    583 ///////////////////////////////////////////////////////////////////////////
    584 void boot_vseg_set_paddr_ident(mapping_vseg_t * vseg)
     429/////////////////////////////////////////////////////////////////////////////////////
     430// This function map a vseg identified by the vseg pointer.
     431//
     432// A given vseg can be mapped in Big Physical Pages (BPP: 2 Mbytes) or in a
     433// Small Physical Pages (SPP: 4 Kbytes), depending on the "big" attribute of vseg,
     434// with the following rules:
     435// - SPP : There is only one vseg in a small physical page, but a single vseg
     436//   can cover several contiguous small physical pages.
     437// - BPP : It can exist several vsegs in a single big physical page, and a single
     438//   vseg can cover several contiguous big physical pages.
     439//
     440// 1) First step: it computes the vseg length, and register it in vseg->length field.
     441//    It computes - for each vobj - the actual vbase address, taking into
     442//    account the alignment constraints and register it in vobj->vbase field.
     443//
     444// 2) Second step: it allocates the required number of physical pages,
     445//    computes the physical base address (if the vseg is not identity mapping),
     446//    and register it in the vseg pbase field.
     447//    Only the 4 vsegs used by the boot code and the peripheral vsegs
     448//    can be identity mapping: The first big physical page in cluster[0,0]
     449//    is reserved for the 4 boot vsegs.
     450//
     451// 3) Third step (only for vseg that have the VOBJ_TYPE_PTAB): all page tables
     452//    associated to the various vspaces must be packed in the same vseg.
     453//    We divide the vseg in M sub-segments, and compute the vbase and pbase
     454//    addresses for each page table, and register it in the _ptabs_paddr
     455//    and _ptabs_vaddr arrays.
     456// 
     457/////////////////////////////////////////////////////////////////////////////////////
     458void boot_vseg_map( mapping_vseg_t* vseg )
    585459{
    586     // checking vseg not already mapped
    587     if (vseg->mapped != 0)
    588     {
    589         _puts("\n[BOOT ERROR] in boot_vseg_set_paddr_ident() : vseg ");
    590         _puts( vseg->name );
    591         _puts(" already mapped\n");
    592         _exit();
    593     }
    594 
    595     // computes selected pseg pointer
    596     mapping_pseg_t* pseg = boot_pseg_get( vseg->psegid );
    597 
    598     // computes vseg alignment constraint
    599     mapping_header_t* header    = (mapping_header_t*)SEG_BOOT_MAPPING_BASE;
    600     mapping_vobj_t*   vobj_base = _get_vobj_base( header );
    601     unsigned int      align     = vobj_base[vseg->vobj_offset].align;
    602     if ( vobj_base[vseg->vobj_offset].align < 12 ) align = 12;
    603 
    604     // computes required_pbase for identity mapping,
    605     paddr_t required_pbase = (paddr_t)vseg->vbase;
    606 
    607     // checks identity constraint against alignment constraint
    608     if ( paddr_align_to( required_pbase, align) != required_pbase )
    609     {
    610         _puts("\n[BOOT ERROR] in boot_vseg_set_paddr_ident() : vseg ");
    611         _puts( vseg->name );
    612         _puts(" has uncompatible identity and alignment constraints\n");
    613         _exit();
    614     }
    615 
    616     // We are looking for a contiguous space in target pseg.
    617     // If there is vsegs already mapped, we scan the vsegs list to:
    618     // - check overlap with already mapped vsegs,
    619     // - try mapping in holes between already mapped vsegs,
    620     // - update the ordered linked list if success
    621     // We don't enter the loop if no vsegs is already mapped.
    622     // implementation note: The next_vseg field is unsigned int,
    623     // but we use it to store a MIP32 pointer on a vseg...
    624 
    625     mapping_vseg_t*   curr      = 0;
    626     mapping_vseg_t*   prev      = 0;
    627     unsigned int      min_pbase = pseg->base;
    628 
    629     for ( curr = (mapping_vseg_t*)pseg->next_vseg ;
    630           (curr != 0) && (vseg->mapped == 0) ;
    631           curr = (mapping_vseg_t*)curr->next_vseg )
    632     {
    633         // looking before current vseg
    634         if( (required_pbase >= min_pbase) &&
    635             (curr->pbase >= (required_pbase + vseg->length)) ) // space found
    636         {
    637             vseg->pbase  = required_pbase;
    638             vseg->mapped = 1;
    639 
    640             // update linked list
    641             vseg->next_vseg = (unsigned int)curr;
    642             if( curr == (mapping_vseg_t*)pseg->next_vseg )
    643                 pseg->next_vseg = (unsigned int)vseg;
    644             else
    645                 prev->next_vseg = (unsigned int)vseg;
    646         }
    647         else                                         // looking in space after curr
    648         {
    649             prev = curr;
    650             min_pbase = curr->pbase + curr->length;
    651         }
    652     }
    653 
    654     // no success in the loop
    655     if( (vseg->mapped == 0) &&
    656         (required_pbase >= min_pbase) &&
    657         ((required_pbase + vseg->length) <= (pseg->base + pseg->length)) )
    658     {
    659         vseg->pbase  = required_pbase;
    660         vseg->mapped = 1;
    661 
    662         // update linked list
    663         vseg->next_vseg = 0;
    664         if ((curr == 0) && (prev == 0)) pseg->next_vseg = (unsigned int)vseg;
    665         else                            prev->next_vseg = (unsigned int)vseg;
    666     }
    667 
    668     if( vseg->mapped == 0 )
    669     {
    670         _puts("\n[BOOT ERROR] in boot_vseg_set_paddr_ident() : vseg ");
    671         _puts( vseg->name );
    672         _puts(" cannot be mapped on pseg ");
    673         _puts( pseg->name );
    674         _puts("\n");
    675         _exit();
    676     }
    677 }  // end boot_vseg_set_paddr_ident()
    678 
    679                
    680 ////////////////////////////////////////////////////////////////////////////
    681 // Set pbase for a vseg when there is no identity mapping constraint.
    682 // This is the physical memory allocator (written by Q.Meunier).
    683 // The length of the vseg must be known.
    684 // All identity mapping vsegs must be already mapped.
    685 // We use a linked list of already mapped vsegs, ordered by incresing pbase.
    686 // We try to place the vseg in the "first fit" hole in this list.
    687 ////////////////////////////////////////////////////////////////////////////
    688 void boot_vseg_set_paddr(mapping_vseg_t * vseg)
    689 {
    690     // checking vseg not already mapped
    691     if ( vseg->mapped != 0 )
    692     {
    693         _puts("\n[BOOT ERROR] in boot_vseg_set_paddr() : vseg ");
    694         _puts( vseg->name );
    695         _puts(" already mapped\n");
    696         _exit();
    697     }
    698 
    699     // computes selected pseg pointer
    700     mapping_pseg_t*   pseg      = boot_pseg_get( vseg->psegid );
    701 
    702     // computes vseg alignment constraint
    703     mapping_header_t* header    = (mapping_header_t*)SEG_BOOT_MAPPING_BASE;
    704     mapping_vobj_t*   vobj_base = _get_vobj_base( header );
    705     unsigned int      align     = vobj_base[vseg->vobj_offset].align;
    706     if ( vobj_base[vseg->vobj_offset].align < 12 ) align = 12;
    707 
    708     // initialise physical base address, with alignment constraint
    709     paddr_t possible_pbase = paddr_align_to( pseg->base, align );
    710 
    711     // We are looking for a contiguous space in target pseg
    712     // If there is vsegs already mapped, we scan the vsegs list to:
    713     // - try mapping in holes between already mapped vsegs,
    714     // - update the ordered linked list if success
    715     // We don't enter the loop if no vsegs is already mapped.
    716     // implementation note: The next_vseg field is unsigned int,
    717     // but we use it to store a MIP32 pointer on a vseg...
    718 
    719     mapping_vseg_t*   curr = 0;
    720     mapping_vseg_t*   prev = 0;
    721 
    722     for( curr = (mapping_vseg_t*)pseg->next_vseg ;
    723          (curr != 0) && (vseg->mapped == 0) ;
    724          curr = (mapping_vseg_t*)curr->next_vseg )
    725     {
    726         // looking for space before current vseg
    727         if ( (curr->pbase >= possible_pbase + vseg->length) ) // space before curr
    728         {
    729             vseg->pbase  = possible_pbase;
    730             vseg->mapped = 1;
    731 
    732             // update linked list
    733             vseg->next_vseg = (unsigned int)curr;
    734             if( curr == (mapping_vseg_t*)pseg->next_vseg )
    735                 pseg->next_vseg = (unsigned int)vseg;
    736             else
    737                 prev->next_vseg = (unsigned int)vseg;
    738         }
    739         else                                            // looking for space after curr
    740         {
    741             possible_pbase = paddr_align_to( curr->pbase + curr->length, align );
    742             prev           = curr;
    743         }
    744     }
    745        
    746     // when no space found, try to allocate space after already mapped vsegs
    747     if( (vseg->mapped == 0) &&
    748         ((possible_pbase + vseg->length) <= (pseg->base + pseg->length)) )
    749     {
    750         vseg->pbase  = possible_pbase;
    751         vseg->mapped = 1;
    752 
    753         // update linked list
    754         vseg->next_vseg = 0;
    755         if ((curr == 0 ) && (prev == 0)) pseg->next_vseg = (unsigned int)vseg;
    756         else                             prev->next_vseg = (unsigned int)vseg;
    757     }
    758 
    759     if( vseg->mapped == 0 )
    760     {
    761         _puts("\n[BOOT ERROR] in boot_vseg_set_paddr() : vseg ");
    762         _puts( vseg->name );
    763         _puts(" cannot be mapped on pseg ");
    764         _puts( pseg->name );
    765         _puts(" in cluster[");
    766         _putd( pseg->clusterid );
    767         _puts("]\n");
    768         _exit();
    769     }
    770 }  // end boot_vseg_set_paddr()
    771 
    772 ///////////////////////////////////////////////////////////////////////////
    773 // This function computes the physical base address for a vseg
    774 // as specified in the mapping info data structure.
    775 // It updates the pbase and the length fields of the vseg.
    776 // It updates the pbase and vbase fields of all vobjs in the vseg.
    777 // It updates the _ptabs_paddr[] and _ptabs_vaddr[], _ptabs_max_pt2[],
    778 // and _ptabs_next_pt2[] arrays.
    779 // It is a global vseg if vspace_id = (-1).
    780 ///////////////////////////////////////////////////////////////////////////
    781 void boot_vseg_map(mapping_vseg_t * vseg, unsigned int vspace_id)
    782 {
    783     unsigned int vobj_id;
    784     unsigned int cur_vaddr;
    785     paddr_t      cur_paddr;
    786     paddr_t      cur_length;
    787     unsigned int offset;
    788 
    789     mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
    790     mapping_vobj_t   * vobj   = _get_vobj_base(header);
    791 
    792     // first loop on the vobjs contained in vseg to compute
    793     // the vseg length, required for mapping.
    794     cur_length = 0;
    795     for ( vobj_id = vseg->vobj_offset;
    796           vobj_id < (vseg->vobj_offset + vseg->vobjs);
     460    mapping_header_t*   header  = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     461    mapping_vobj_t*     vobj    = _get_vobj_base(header);
     462    mapping_cluster_t*  cluster = _get_cluster_base(header);
     463    mapping_pseg_t*     pseg    = _get_pseg_base(header);
     464
     465    // compute destination cluster pointer & coordinates
     466    pseg    = pseg + vseg->psegid;
     467    cluster = cluster + pseg->clusterid;
     468    unsigned int        x_dest     = cluster->x;
     469    unsigned int        y_dest     = cluster->y;
     470
     471    // compute the first vobj global index
     472    unsigned int        vobj_id = vseg->vobj_offset;
     473   
     474    // compute the "big" vseg attribute
     475    unsigned int        big = vseg->big;
     476
     477    // compute the "is_ram" vseg attribute
     478    unsigned int        is_ram;
     479    if ( pseg->type == PSEG_TYPE_RAM )  is_ram = 1;
     480    else                                is_ram = 0;
     481
     482    // compute the "is_ptab" attribute
     483    unsigned int        is_ptab;
     484    if ( vobj[vobj_id].type == VOBJ_TYPE_PTAB ) is_ptab = 1;
     485    else                                        is_ptab = 0;
     486
     487    //////////// First step : compute vseg length and vobj(s) vbase
     488
     489    unsigned int vobj_vbase = vseg->vbase;   // min vbase for first vobj
     490
     491    for ( vobj_id = vseg->vobj_offset ;
     492          vobj_id < (vseg->vobj_offset + vseg->vobjs) ;
    797493          vobj_id++ )
    798494    {
    799         if (vobj[vobj_id].align)
    800         {
    801             cur_length = vaddr_align_to(cur_length, vobj[vobj_id].align);
    802         }
    803         cur_length += vobj[vobj_id].length;
    804     }
    805     vseg->length = paddr_align_to(cur_length, 12);
    806 
    807     // mapping: computes vseg pbase address
    808     if (vseg->ident != 0)                         // identity mapping
    809     {
    810         boot_vseg_set_paddr_ident( vseg );
    811     }
    812     else                                          // unconstrained mapping
    813     {
    814         boot_vseg_set_paddr( vseg );
    815     }
    816 
    817     // second loop on vobjs contained in vseg to :
    818     // initialize the vaddr and paddr fields of all vobjs,
    819     // and initialize the page table pointers arrays
    820 
    821     cur_vaddr = vseg->vbase;
    822     cur_paddr = vseg->pbase;
    823 
    824     for (vobj_id = vseg->vobj_offset;
    825          vobj_id < (vseg->vobj_offset + vseg->vobjs); vobj_id++)
    826     {
    827         if (vobj[vobj_id].align)
    828         {
    829             cur_paddr = paddr_align_to(cur_paddr, vobj[vobj_id].align);
    830             cur_vaddr = vaddr_align_to(cur_vaddr, vobj[vobj_id].align);
    831         }
    832         // set vaddr/paddr for current vobj
    833         vobj[vobj_id].vaddr = cur_vaddr;
    834         vobj[vobj_id].paddr = cur_paddr;
    835        
    836         // initialize _ptabs_vaddr[] , _ptabs-paddr[] , _ptabs_max_pt2[] if PTAB
    837         if (vobj[vobj_id].type == VOBJ_TYPE_PTAB)
    838         {
    839             if (vspace_id == ((unsigned int) -1))    // global vseg
    840             {
    841                 _puts("\n[BOOT ERROR] in boot_vseg_map() function: ");
    842                 _puts("a PTAB vobj cannot be global");
    843                 _exit();
     495        // compute and register vobj vbase
     496        vobj[vobj_id].vbase = vaddr_align_to( vobj_vbase, vobj[vobj_id].align );
     497   
     498        // compute min vbase for next vobj
     499        vobj_vbase = vobj[vobj_id].vbase + vobj[vobj_id].length;
     500    }
     501
     502    // compute and register vseg length (multiple of 4 Kbytes)
     503    vseg->length = vaddr_align_to( vobj_vbase - vseg->vbase, 12 );
     504   
     505    //////////// Second step : compute ppn and npages 
     506    //////////// - if identity mapping :  ppn <= vpn
     507    //////////// - if vseg is periph   :  ppn <= pseg.base >> 12
     508    //////////// - if vseg is ram      :  ppn <= physical memory allocator
     509
     510    unsigned int ppn;          // first physical page index ( 28 bits = |x|y|bppi|sppi| )
     511    unsigned int vpn;          // first virtual page index  ( 20 bits = |ix1|ix2| )
     512    unsigned int vpn_max;      // last  virtual page index  ( 20 bits = |ix1|ix2| )
     513
     514    vpn     = vseg->vbase >> 12;
     515    vpn_max = (vseg->vbase + vseg->length - 1) >> 12;
     516
     517    // compute npages
     518    unsigned int npages;       // number of required (big or small) pages
     519    if ( big == 0 ) npages  = vpn_max - vpn + 1;            // number of small pages
     520    else            npages  = (vpn_max>>9) - (vpn>>9) + 1;  // number of big pages
     521
     522    // compute ppn
     523    if ( vseg->ident )           // identity mapping
     524    {
     525        ppn = vpn;
     526    }
     527    else                         // not identity mapping
     528    {
     529        if ( is_ram )            // RAM : physical memory allocation required
     530        {
     531            // compute pointer on physical memory allocator in dest cluster
     532            pmem_alloc_t*     palloc = &boot_pmem_alloc[x_dest][y_dest];
     533
     534            if ( big == 0 )             // SPP : small physical pages
     535            {
     536                // allocate contiguous small physical pages
     537                ppn = _get_small_ppn( palloc, npages );
    844538            }
    845             // we need at least one PT2
    846             if (vobj[vobj_id].length < (PT1_SIZE + PT2_SIZE))
    847             {
    848                 _puts("\n[BOOT ERROR] in boot_vseg_map() function, ");
    849                 _puts("PTAB too small, minumum size is: ");
    850                 _putx(PT1_SIZE + PT2_SIZE);
    851                 _exit();
    852             }
    853             // get cluster coordinates for PTAB
    854             unsigned int cluster_xy = (unsigned int)(cur_paddr>>32);
    855             unsigned int x          = cluster_xy >> Y_WIDTH;
    856             unsigned int y          = cluster_xy & ((1<<Y_WIDTH)-1);
    857 
    858             // register physical and virtual page table addresses, size, and next PT2
    859             _ptabs_vaddr[vspace_id]          = vobj[vobj_id].vaddr;
    860             _ptabs_paddr[vspace_id][x][y]    = vobj[vobj_id].paddr;
    861             _ptabs_max_pt2[vspace_id][x][y]  = (vobj[vobj_id].length - PT1_SIZE) / PT2_SIZE;
    862             _ptabs_next_pt2[vspace_id][x][y] = 0;
    863            
    864             // reset all valid bits in PT1
    865             for ( offset = 0 ; offset < 8192 ; offset = offset + 4)
    866             {
    867                 _physical_write(cur_paddr + offset, 0);
     539            else                            // BPP : big physical pages
     540            {
     541 
     542                // one big page can be shared by several vsegs
     543                // we must chek if BPP already allocated
     544                if ( is_ptab )   // It cannot be mapped
     545                {
     546                    ppn = _get_big_ppn( palloc, npages );
     547                }
     548                else             // It can be mapped
     549                {
     550                    unsigned int ix1   = vpn >> 9;   // 11 bits
     551                    paddr_t      paddr = _ptabs_paddr[0][x_dest][y_dest] + (ix1<<2);
     552                    unsigned int pte1  = _physical_read( paddr );
     553                    if ( (pte1 & PTE_V) == 0 )     // BPP not allocated yet
     554                    {
     555                        // allocate contiguous big physical pages
     556                        ppn = _get_big_ppn( palloc, npages );
     557                    }
     558                    else                           // BPP already allocated
     559                    {
     560                        ppn = ((pte1 << 9) & 0x0FFFFE00);
     561                    }
     562                }
     563                ppn = ppn | (vpn & 0x1FF);
    868564            }
    869565        }
    870 
    871         // set next vaddr/paddr
    872         cur_vaddr = cur_vaddr + vobj[vobj_id].length;
    873         cur_paddr = cur_paddr + vobj[vobj_id].length;
    874     } // end for vobjs
    875 
    876 }    // end boot_vseg_map()
    877 
    878 ///////////////////////////////////////////////////////////////////////////
    879 // This function builds the page tables for all virtual spaces
    880 // defined in the mapping_info data structure, in three steps:
    881 // - step 1 : It computes the physical base address for global vsegs
    882 //            and for all associated vobjs.
    883 // - step 2 : It computes the physical base address for all private
    884 //            vsegs and all vobjs in each virtual space.
    885 // - step 3 : It actually fill the page table(s) for each vspace.
     566        else                    // PERI : no memory allocation required
     567        {
     568            ppn = pseg->base >> 12;
     569        }
     570    }
     571
     572    // update vseg.pbase field and update vsegs chaining
     573    vseg->pbase     = ((paddr_t)ppn) << 12;
     574    vseg->next_vseg = pseg->next_vseg;
     575    pseg->next_vseg = (unsigned int)vseg;
     576
     577
     578    //////////// Third step : (only if the vseg is a page table)
     579    //////////// - compute the physical & virtual base address for each vspace
     580    ////////////   by dividing the vseg in several sub-segments.
     581    //////////// - register it in _ptabs_vaddr & _ptabs_paddr arrays,
     582    ////////////   and initialize the max_pt2 and next_pt2 allocators.
     583   
     584    if ( is_ptab )
     585    {
     586        unsigned int   vs;        // vspace index
     587        unsigned int   nspaces;   // number of vspaces
     588        unsigned int   nsp;       // number of small pages for one PTAB
     589        unsigned int   offset;    // address offset for current PTAB
     590
     591        nspaces = header->vspaces;
     592        offset  = 0;
     593
     594        // each PTAB must be aligned on a 8 Kbytes boundary
     595        nsp = ( vseg->length << 12 ) / nspaces;
     596        if ( (nsp & 0x1) == 0x1 ) nsp = nsp - 1;
     597
     598        // compute max_pt2
     599        _ptabs_max_pt2 = ((nsp<<12) - PT1_SIZE) / PT2_SIZE;
     600       
     601        for ( vs = 0 ; vs < nspaces ; vs++ )
     602        {
     603            offset += nsp;
     604            _ptabs_vaddr   [vs][x_dest][y_dest] = (vpn + offset) << 12; 
     605            _ptabs_paddr   [vs][x_dest][y_dest] = ((paddr_t)(ppn + offset)) << 12;
     606            _ptabs_next_pt2[vs][x_dest][y_dest] = 0;
     607        }
     608    }
     609
     610#if BOOT_DEBUG_PT
     611_puts("[BOOT DEBUG] ");
     612_puts( vseg->name );
     613_puts(" in cluster[");
     614_putd( x_dest );
     615_puts(",");
     616_putd( y_dest );
     617_puts("] : vbase = ");
     618_putx( vseg->vbase );
     619_puts(" / length = ");
     620_putx( vseg->length );
     621if ( big ) _puts(" / BIG   / npages = ");
     622else       _puts(" / SMALL / npages = ");
     623_putd( npages );
     624_puts(" / pbase = ");
     625_putl( vseg->pbase );
     626_puts("\n");
     627#endif
     628
     629} // end boot_vseg_map()
     630
     631/////////////////////////////////////////////////////////////////////////////////////
     632// For the vseg defined by the vseg pointer, this function register all PTEs
     633// in one or several page tables.
     634// It is a global vseg (system vseg) if (vspace_id == 0xFFFFFFFF).
     635// The number of involved PTABs depends on the "local" and "global" attributes:
     636//  - PTEs are replicated in all vspaces for a global vseg.
     637//  - PTEs are replicated in all clusters for a non local vseg.
     638/////////////////////////////////////////////////////////////////////////////////////
     639void boot_vseg_pte_init( mapping_vseg_t*  vseg,
     640                         unsigned int     vspace_id )
     641{
     642    // compute the "global" vseg attribute and actual vspace index
     643    unsigned int        global;
     644    unsigned int        vsid;   
     645    if ( vspace_id == 0xFFFFFFFF )
     646    {
     647        global = 1;
     648        vsid   = 0;
     649    }
     650    else
     651    {
     652        global = 0;
     653        vsid   = vspace_id;
     654    }
     655
     656    // compute the "local" and "big" attributes
     657    unsigned int        local  = vseg->local;
     658    unsigned int        big    = vseg->big;
     659
     660    // compute vseg flags
     661    // The three flags (Local, Remote and Dirty) are set to 1 to reduce
     662    // latency of TLB miss (L/R) and write (D): Avoid hardware update
     663    // mechanism for these flags because GIET_VM does use these flags.
     664    unsigned int flags = 0;
     665    if (vseg->mode & C_MODE_MASK) flags |= PTE_C;
     666    if (vseg->mode & X_MODE_MASK) flags |= PTE_X;
     667    if (vseg->mode & W_MODE_MASK) flags |= PTE_W;
     668    if (vseg->mode & U_MODE_MASK) flags |= PTE_U;
     669    if ( global )                 flags |= PTE_G;
     670                                  flags |= PTE_L;
     671                                  flags |= PTE_R;
     672                                  flags |= PTE_D;
     673
     674    // compute VPN, PPN and number of pages (big or small)
     675    unsigned int vpn     = vseg->vbase >> 12;
     676    unsigned int vpn_max = (vseg->vbase + vseg->length - 1) >> 12;
     677    unsigned int ppn     = (unsigned int)(vseg->pbase >> 12);
     678    unsigned int npages;
     679    if ( big == 0 ) npages  = vpn_max - vpn + 1;           
     680    else            npages  = (vpn_max>>9) - (vpn>>9) + 1;
     681
     682    // compute destination cluster coordinates
     683    unsigned int        x_dest;
     684    unsigned int        y_dest;
     685    mapping_header_t*   header  = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     686    mapping_cluster_t*  cluster = _get_cluster_base(header);
     687    mapping_pseg_t*     pseg    = _get_pseg_base(header);
     688    pseg     = pseg + vseg->psegid;
     689    cluster  = cluster + pseg->clusterid;
     690    x_dest   = cluster->x;
     691    y_dest   = cluster->y;
     692
     693    unsigned int p;     // iterator for physical page index
     694    unsigned int x;     // iterator for cluster x coordinate 
     695    unsigned int y;     // iterator for cluster y coordinate 
     696    unsigned int v;     // iterator for vspace index
     697
     698    // loop on PTEs
     699    for ( p = 0 ; p < npages ; p++ )
     700    {
     701        if  ( (local != 0) && (global == 0) )         // one cluster  / one vspace
     702        {
     703            if ( big )   // big pages => PTE1s
     704            {
     705                boot_add_pte1( vsid,
     706                               x_dest,
     707                               y_dest,
     708                               vpn + (p<<9),
     709                               flags,
     710                               ppn + (p<<9) );
     711            }
     712            else         // small pages => PTE2s
     713            {
     714                boot_add_pte2( vsid,
     715                               x_dest,
     716                               y_dest,
     717                               vpn + p,     
     718                               flags,
     719                               ppn + p );
     720            }
     721        }
     722        else if ( (local == 0) && (global == 0) )     // all clusters / one vspace
     723        {
     724            for ( x = 0 ; x < X_SIZE ; x++ )
     725            {
     726                for ( y = 0 ; y < Y_SIZE ; y++ )
     727                {
     728                    if ( big )   // big pages => PTE1s
     729                    {
     730                        boot_add_pte1( vsid,
     731                                       x,
     732                                       y,
     733                                       vpn + (p<<9),
     734                                       flags,
     735                                       ppn + (p<<9) );
     736                    }
     737                    else         // small pages => PTE2s
     738                    {
     739                        boot_add_pte2( vsid,
     740                                       x,
     741                                       y,
     742                                       vpn + p,
     743                                       flags,
     744                                       ppn + p );
     745                    }
     746                }
     747            }
     748        }
     749        else if ( (local != 0) && (global != 0) )     // one cluster  / all vspaces
     750        {
     751            for ( v = 0 ; v < header->vspaces ; v++ )
     752            {
     753                if ( big )   // big pages => PTE1s
     754                {
     755                    boot_add_pte1( v,
     756                                   x_dest,
     757                                   y_dest,
     758                                   vpn + (p<<9),
     759                                   flags,
     760                                   ppn + (p<<9) );
     761                }
     762                else         // small pages = PTE2s
     763                {
     764                    boot_add_pte2( v,
     765                                   x_dest,
     766                                   y_dest,
     767                                   vpn + p,
     768                                   flags,
     769                                   ppn + p );
     770                }
     771            }
     772        }
     773        else if ( (local == 0) && (global != 0) )     // all clusters / all vspaces
     774        {
     775            for ( x = 0 ; x < X_SIZE ; x++ )
     776            {
     777                for ( y = 0 ; y < Y_SIZE ; y++ )
     778                {
     779                    for ( v = 0 ; v < header->vspaces ; v++ )
     780                    {
     781                        if ( big )  // big pages => PTE1s
     782                        {
     783                            boot_add_pte1( v,
     784                                           x,
     785                                           y,
     786                                           vpn + (p<<9),
     787                                           flags,
     788                                           ppn + (p<<9) );
     789                        }
     790                        else        // small pages -> PTE2s
     791                        {
     792                            boot_add_pte2( v,
     793                                           x,
     794                                           y,
     795                                           vpn + p,
     796                                           flags,
     797                                           ppn + p );
     798                        }
     799                    }
     800                }
     801            }
     802        }
     803    }  // end for pages
     804}  // end boot_vseg_pte_init()
     805
     806///////////////////////////////////////////////////////////////////////////////
     807// This function initialises the page tables for all vspaces defined
     808// in the mapping_info data structure.
     809// For each vspace, there is one page table per cluster.
     810// In each cluster all page tables for the different vspaces must be
     811// packed in one vseg occupying one single BPP (Big Physical Page).
    886812//
    887 // It must exist at least one vspace in the mapping.
    888 // For each vspace, it can exist one page table per cluster.
    889 ///////////////////////////////////////////////////////////////////////////
    890 void boot_pt_init()
     813// For each vseg, the mapping is done in two steps:
     814//
     815// A) mapping : the boot_vseg_map() function allocates contiguous BPPs
     816//    or SPPs (if the vseg is not associated to a peripheral), and register
     817//    the physical base address in the vseg pbase field. It initialises the
     818//    _ptabs_vaddr and _ptabs_paddr arrays if the vseg is a PTAB.
     819//
     820// B) page table initialisation : the boot_vseg_pte_init() function initialise
     821//    the PTEs (both PTE1 and PTE2) in one or several page tables:
     822//    - PTEs are replicated in all vspaces for a global vseg.
     823//    - PTEs are replicated in all clusters for a non local vseg.
     824//
     825// We must handle vsegs in the following order
     826//   1) all global vsegs containing a page table,
     827//   2) all global vsegs occupying more than one BPP,
     828//   3) all others global vsegs
     829//   4) all private vsegs in user space.
     830///////////////////////////////////////////////////////////////////////////////
     831void _ptabs_init()
    891832{
    892     mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
    893     mapping_vspace_t * vspace = _get_vspace_base(header);
    894     mapping_vseg_t   * vseg   = _get_vseg_base(header);
     833    mapping_header_t*   header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     834    mapping_vspace_t*   vspace = _get_vspace_base(header);
     835    mapping_vseg_t*     vseg   = _get_vseg_base(header);
     836    mapping_vobj_t*     vobj   = _get_vobj_base(header);
    895837
    896838    unsigned int vspace_id;
     
    899841    if (header->vspaces == 0 )
    900842    {
    901         _puts("\n[BOOT ERROR] in boot_pt_init() : mapping ");
     843        _puts("\n[BOOT ERROR] in _ptabs_init() : mapping ");
    902844        _puts( header->name );
    903845        _puts(" contains no vspace\n");
     
    905847    }
    906848
     849    ///////// Phase 1 : global vsegs containing a PTAB (two loops required)
     850
    907851#if BOOT_DEBUG_PT
    908 _puts("\n[BOOT DEBUG] ****** mapping global vsegs ******\n");
    909 #endif
    910 
    911     //////////////////////////////////
    912     // step 1 : loop on global vsegs
    913 
    914     // vsegs with identity mapping constraint first
     852_puts("\n[BOOT DEBUG] map PTAB global vsegs\n");
     853#endif
     854
    915855    for (vseg_id = 0; vseg_id < header->globals; vseg_id++)
    916856    {
    917         if (vseg[vseg_id].ident == 1)
    918             boot_vseg_map(&vseg[vseg_id], ((unsigned int) (-1)));
    919     }
    920 
    921     // unconstrained vsegs second
     857        unsigned int vobj_id = vseg[vseg_id].vobj_offset;
     858        if ( (vobj[vobj_id].type == VOBJ_TYPE_PTAB) )
     859        {
     860            boot_vseg_map( &vseg[vseg_id] );
     861            vseg[vseg_id].mapped = 1;
     862        }
     863    }
     864
    922865    for (vseg_id = 0; vseg_id < header->globals; vseg_id++)
    923866    {
    924         if (vseg[vseg_id].ident == 0)
    925             boot_vseg_map(&vseg[vseg_id], ((unsigned int) (-1)));
    926     }
    927 
    928     ////////////////////////////////////////////////////////////
    929     // step 2 : loop on virtual vspaces to map private vsegs
     867        unsigned int vobj_id = vseg[vseg_id].vobj_offset;
     868        if ( (vobj[vobj_id].type == VOBJ_TYPE_PTAB) )
     869        {
     870            boot_vseg_pte_init( &vseg[vseg_id], 0xFFFFFFFF );
     871            vseg[vseg_id].mapped = 1;
     872        }
     873    }
     874
     875    ///////// Phase 2 : global vsegs occupying more than one BPP (one loop)
     876
     877#if BOOT_DEBUG_PT
     878_puts("\n[BOOT DEBUG] map all multi-BPP global vsegs\n");
     879#endif
     880
     881    for (vseg_id = 0; vseg_id < header->globals; vseg_id++)
     882    {
     883        unsigned int vobj_id = vseg[vseg_id].vobj_offset;
     884        if ( (vobj[vobj_id].length > 0x200000) &&
     885             (vseg[vseg_id].mapped == 0) )
     886        {
     887            boot_vseg_map( &vseg[vseg_id] );
     888            vseg[vseg_id].mapped = 1;
     889            boot_vseg_pte_init( &vseg[vseg_id], 0xFFFFFFFF );
     890        }
     891    }
     892
     893    ///////// Phase 3 : all others global vsegs (one loop)
     894
     895#if BOOT_DEBUG_PT
     896_puts("\n[BOOT DEBUG] map all others global vsegs\n");
     897#endif
     898
     899    for (vseg_id = 0; vseg_id < header->globals; vseg_id++)
     900    {
     901        if ( vseg[vseg_id].mapped == 0 )
     902        {
     903            boot_vseg_map( &vseg[vseg_id] );
     904            vseg[vseg_id].mapped = 1;
     905            boot_vseg_pte_init( &vseg[vseg_id], 0xFFFFFFFF );
     906        }
     907    }
     908
     909    ///////// Phase 4 : all private vsegs (two nested loops)
    930910
    931911    for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++)
     
    933913
    934914#if BOOT_DEBUG_PT
    935 _puts("\n[BOOT DEBUG] ****** mapping private vsegs in vspace ");
    936 _puts(vspace[vspace_id].name);
    937 _puts(" ******\n");
     915_puts("\n[BOOT DEBUG] map private vsegs for vspace ");
     916_puts( vspace[vspace_id].name );
     917_puts("\n");
    938918#endif
    939919
     
    942922             vseg_id++)
    943923        {
    944             // private vsegs cannot be identity mapping
    945             if (vseg[vseg_id].ident != 0)
    946             {
    947                 _puts("\n[BOOT ERROR] in boot_pt_init() : vspace ");
    948                 _puts( vspace[vspace_id].name );
    949                 _puts(" contains vseg with identity mapping\n");
    950                 _exit();
    951             }
    952 
    953             boot_vseg_map(&vseg[vseg_id], vspace_id);
     924            boot_vseg_map( &vseg[vseg_id] );
     925            vseg[vseg_id].mapped = 1;
     926            boot_vseg_pte_init( &vseg[vseg_id], vspace_id );
    954927        }
    955928    }
    956929
    957 #if BOOT_DEBUG_PT
     930#if (BOOT_DEBUG_PT > 1)
    958931mapping_vseg_t*    curr;
    959932mapping_pseg_t*    pseg    = _get_pseg_base(header);
     
    963936{
    964937    unsigned int cluster_id = pseg[pseg_id].clusterid;
    965     _puts("\n[BOOT DEBUG] ****** vsegs mapped on pseg ");
     938    _puts("\n[BOOT DEBUG] vsegs mapped on pseg ");
    966939    _puts( pseg[pseg_id].name );
    967940    _puts(" in cluster[");
     
    969942    _puts(",");
    970943    _putd( cluster[cluster_id].y );
    971     _puts("] ******\n");
     944    _puts("]\n");
    972945    for( curr = (mapping_vseg_t*)pseg[pseg_id].next_vseg ;
    973946         curr != 0 ;
     
    987960#endif
    988961
    989     /////////////////////////////////////////////////////////////
    990     // step 3 : loop on the vspaces to build the page tables
    991     for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++)
    992     {
    993 
    994 #if BOOT_DEBUG_PT
    995 _puts("\n[BOOT DEBUG] ****** building page table for vspace ");
    996 _puts(vspace[vspace_id].name);
    997 _puts(" ******\n");
    998 #endif
    999 
    1000         boot_vspace_pt_build(vspace_id);
    1001 
    1002     }
    1003 } // end boot_pt_init()
     962} // end boot_ptabs_init()
    1004963
    1005964///////////////////////////////////////////////////////////////////////////////
     
    1030989        _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id][0][0] >> 13) );
    1031990
    1032         unsigned int ptab_found = 0;
    1033 
    1034991        // loop on the vobjs
    1035992        for (vobj_id = vspace[vspace_id].vobj_offset;
     
    10521009_puts("\n");
    10531010#endif
    1054                     mwmr_channel_t* mwmr = (mwmr_channel_t *) (vobj[vobj_id].vaddr);
     1011                    mwmr_channel_t* mwmr = (mwmr_channel_t *) (vobj[vobj_id].vbase);
    10551012                    mwmr->ptw = 0;
    10561013                    mwmr->ptr = 0;
     
    11111068_puts("\n");
    11121069#endif
    1113                     giet_barrier_t* barrier = (giet_barrier_t *) (vobj[vobj_id].vaddr);
     1070                    giet_barrier_t* barrier = (giet_barrier_t *) (vobj[vobj_id].vbase);
    11141071                    barrier->count  = vobj[vobj_id].init;
    11151072                    barrier->ntasks = vobj[vobj_id].init;
     
    11351092_puts("\n");
    11361093#endif
    1137                     unsigned int* lock = (unsigned int *) (vobj[vobj_id].vaddr);
     1094                    unsigned int* lock = (unsigned int *) (vobj[vobj_id].vbase);
    11381095                    *lock = 0;
    11391096                    break;
     
    11671124_puts("\n");
    11681125#endif
    1169                     giet_memspace_t* memspace = (giet_memspace_t *) vobj[vobj_id].vaddr;
    1170                     memspace->buffer = (void *) vobj[vobj_id].vaddr + 8;
     1126                    giet_memspace_t* memspace = (giet_memspace_t *) vobj[vobj_id].vbase;
     1127                    memspace->buffer = (void *) vobj[vobj_id].vbase + 8;
    11711128                    memspace->size = vobj[vobj_id].length - 8;
    11721129#if BOOT_DEBUG_VOBJS
     
    11771134_puts("\n");
    11781135#endif
    1179                     break;
    1180                 }
    1181                 case VOBJ_TYPE_PTAB:    // nothing to initialize
    1182                 {
    1183 #if BOOT_DEBUG_VOBJS
    1184 _puts("PTAB    : ");
    1185 _puts(vobj[vobj_id].name);
    1186 _puts(" / vaddr = ");
    1187 _putx(vobj[vobj_id].vaddr);
    1188 _puts(" / paddr = ");
    1189 _putl(vobj[vobj_id].paddr);
    1190 _puts(" / length = ");
    1191 _putx(vobj[vobj_id].length);
    1192 _puts("\n");
    1193 #endif
    1194                     ptab_found = 1;
    11951136                    break;
    11961137                }
     
    12101151_puts("\n");
    12111152#endif
    1212                     unsigned int* addr = (unsigned int *) vobj[vobj_id].vaddr;
     1153                    unsigned int* addr = (unsigned int *) vobj[vobj_id].vbase;
    12131154                    *addr = vobj[vobj_id].init;
    12141155
     
    12221163                default:
    12231164                {
    1224                     _puts("\n[BOOT ERROR] illegal vobj type: ");
    1225                     _putd(vobj[vobj_id].type);
     1165                    _puts("\n[BOOT ERROR] in boot_vobjs_init() : Illegal vobj type ");
     1166                    _putd( vobj[vobj_id].type );
     1167                    _puts(" in vspace ");
     1168                    _puts( vspace[vspace_id].name );
    12261169                    _puts("\n");
    12271170                    _exit();
    12281171                }
    12291172            }            // end switch type
    1230         }            // end loop on vobjs
    1231         if (ptab_found == 0)
    1232         {
    1233             _puts("\n[BOOT ERROR] Missing PTAB for vspace ");
    1234             _putd(vspace_id);
    1235             _exit();
    1236         }
    1237     } // end loop on vspaces
    1238 
     1173        }          // end loop on vobjs
     1174    }        // end loop on vspaces
    12391175} // end boot_vobjs_init()
    12401176
     
    12811217// This function initialises all processors schedulers.
    12821218// This is done by processor 0, and the MMU must be activated.
    1283 // - In Step 1, it initialises the _schedulers[] pointers array, and scan
    1284 //              the processors to initialise the schedulers, including the
    1285 //              idle_task context, and the HWI / SWI / PTI vectors.
     1219// - In Step 1, it initialises the _schedulers[x][y][l] pointers array, and scan
     1220//              the processors for a first initialisation of the schedulers:
     1221//              idle_task context, and HWI / SWI / PTI vectors.
    12861222// - In Step 2, it scan all tasks in all vspaces to complete the tasks contexts,
    12871223//              initialisation as specified in the mapping_info data structure.
     
    13241260    mapping_periph_t*  pic = NULL;
    13251261
    1326     // schedulers array base address in a cluster
    1327     unsigned int          sched_vbase; 
    1328     unsigned int          sched_length; 
    1329     static_scheduler_t*   psched; 
     1262    unsigned int          sched_vbase;  // schedulers array vbase address in a cluster
     1263    unsigned int          sched_length; // schedulers array length
     1264    static_scheduler_t*   psched;       // pointer on processor scheduler
    13301265
    13311266    /////////////////////////////////////////////////////////////////////////
    13321267    // Step 1 : loop on the clusters and on the processors
    13331268    //          to initialize the schedulers[] array of pointers,
    1334     //          and the interrupt vectors.
     1269    //          idle task context and interrupt vectors.
    13351270    // Implementation note:
    13361271    // We need to use both (proc_id) to scan the mapping info structure,
     
    13661301        if ( cluster[cluster_id].procs > 0 )
    13671302        {
    1368             // get scheduler array virtual base address from mapping
     1303            // get scheduler array virtual base address in cluster[cluster_id]
    13691304            boot_get_sched_vaddr( cluster_id, &sched_vbase, &sched_length );
    13701305
    1371             if ( sched_length < (cluster[cluster_id].procs<<12) ) // 4 Kbytes per scheduler
     1306            if ( sched_length < (cluster[cluster_id].procs<<13) ) // 8 Kbytes per scheduler
    13721307            {
    13731308                _puts("\n[BOOT ERROR] Schedulers segment too small in cluster[");
     
    13781313                _exit();
    13791314            }
    1380 
    1381             psched = (static_scheduler_t*)sched_vbase;
    13821315
    13831316            // scan peripherals to find the ICU/XCU and the PIC component
     
    14181351            }
    14191352
    1420             // loop on processors for sechedulers default values
     1353            // loop on processors for schedulers default values
    14211354            // initialisation, including WTI and PTI vectors
    14221355            for ( lpid = 0 ; lpid < cluster[cluster_id].procs ; lpid++ )
    14231356            {
    1424                 // set the schedulers pointers array
    1425                 _schedulers[x][y][lpid] = (static_scheduler_t*)&psched[lpid];
     1357                // pointer on processor scheduler
     1358                psched = (static_scheduler_t*)(sched_vbase + (lpid<<13));
     1359
     1360                // initialise the schedulers pointers array
     1361                _schedulers[x][y][lpid] = psched;
    14261362
    14271363#if BOOT_DEBUG_SCHED
     1364unsigned int   sched_vbase = (unsigned int)_schedulers[x][y][lpid];
     1365unsigned int   sched_ppn;
     1366unsigned int   sched_flags;
     1367paddr_t        sched_pbase;
     1368
     1369page_table_t* ptab = (page_table_t*)(_ptabs_vaddr[0][x][y]);
     1370_v2p_translate( ptab, sched_vbase>>12, &sched_ppn, &sched_flags );
     1371sched_pbase = ((paddr_t)sched_ppn)<<12;
     1372
    14281373_puts("\nProc[");
    14291374_putd( x );
     
    14321377_puts(",");
    14331378_putd( lpid );
    1434 _puts("] : scheduler virtual base address = ");
    1435 _putx( (unsigned int)&psched[lpid] );
     1379_puts("] : scheduler vbase = ");
     1380_putx( sched_vbase );
     1381_puts(" : scheduler pbase = ");
     1382_putl( sched_pbase );
    14361383_puts("\n");
    14371384#endif
    14381385                // initialise the "tasks" and "current" variables default values
    1439                 psched[lpid].tasks   = 0;
    1440                 psched[lpid].current = IDLE_TASK_INDEX;
     1386                psched->tasks   = 0;
     1387                psched->current = IDLE_TASK_INDEX;
    14411388
    14421389                // default values for HWI / PTI / SWI vectors (valid bit = 0)
     
    14441391                for (slot = 0; slot < 32; slot++)
    14451392                {
    1446                     psched[lpid].hwi_vector[slot] = 0;
    1447                     psched[lpid].pti_vector[slot] = 0;
    1448                     psched[lpid].wti_vector[slot] = 0;
     1393                    psched->hwi_vector[slot] = 0;
     1394                    psched->pti_vector[slot] = 0;
     1395                    psched->wti_vector[slot] = 0;
    14491396                }
    14501397
    14511398                // WTI[lpid] <= ISR_WAKUP / PTI[lpid] <= ISR_TICK
    1452                 psched[lpid].wti_vector[lpid] = ISR_WAKUP | 0x80000000;
    1453                 psched[lpid].pti_vector[lpid] = ISR_TICK  | 0x80000000;
     1399                psched->wti_vector[lpid] = ISR_WAKUP | 0x80000000;
     1400                psched->pti_vector[lpid] = ISR_TICK  | 0x80000000;
    14541401
    14551402                // initializes the idle_task context in scheduler:
     
    14601407                //   must be initialised by kernel_init()
    14611408
    1462                 psched[lpid].context[IDLE_TASK_INDEX][CTX_CR_ID]    = 0;
    1463                 psched[lpid].context[IDLE_TASK_INDEX][CTX_SR_ID]    = 0xFF03;
    1464                 psched[lpid].context[IDLE_TASK_INDEX][CTX_PTPR_ID]  = _ptabs_paddr[0][x][y]>>13;
    1465                 psched[lpid].context[IDLE_TASK_INDEX][CTX_PTAB_ID]  = _ptabs_vaddr[0];
    1466                 psched[lpid].context[IDLE_TASK_INDEX][CTX_TTY_ID]   = 0;
    1467                 psched[lpid].context[IDLE_TASK_INDEX][CTX_LTID_ID]  = IDLE_TASK_INDEX;
    1468                 psched[lpid].context[IDLE_TASK_INDEX][CTX_VSID_ID]  = 0;
    1469                 psched[lpid].context[IDLE_TASK_INDEX][CTX_RUN_ID]   = 1;
     1409                psched->context[IDLE_TASK_INDEX][CTX_CR_ID]   = 0;
     1410                psched->context[IDLE_TASK_INDEX][CTX_SR_ID]   = 0xFF03;
     1411                psched->context[IDLE_TASK_INDEX][CTX_PTPR_ID] = _ptabs_paddr[0][x][y]>>13;
     1412                psched->context[IDLE_TASK_INDEX][CTX_PTAB_ID] = _ptabs_vaddr[0][x][y];
     1413                psched->context[IDLE_TASK_INDEX][CTX_TTY_ID]  = 0;
     1414                psched->context[IDLE_TASK_INDEX][CTX_LTID_ID] = IDLE_TASK_INDEX;
     1415                psched->context[IDLE_TASK_INDEX][CTX_VSID_ID] = 0;
     1416                psched->context[IDLE_TASK_INDEX][CTX_RUN_ID]  = 1;
     1417
    14701418            }  // end for processors
    14711419
     
    14921440                }
    14931441
    1494                 psched[lpid].hwi_vector[srcid] = isr | channel | 0x80000000;
     1442                _schedulers[x][y][lpid]->hwi_vector[srcid] = isr | channel | 0x80000000;
    14951443                lpid = (lpid + 1) % cluster[cluster_id].procs;
    14961444
     
    15461494            unsigned int y          = cluster[cluster_id].y;
    15471495            unsigned int cluster_xy = (x<<Y_WIDTH) + y;
    1548             psched                  = _schedulers[x][y][0];
     1496            psched                  = _schedulers[x][y][lpid];
    15491497
    15501498            // update WTI vector for scheduler[cluster_id][lpid]
    1551             unsigned int index = alloc_wti_channel[cluster_id];
    1552             psched[lpid].wti_vector[index] = isr | channel | 0x80000000;
     1499            unsigned int index            = alloc_wti_channel[cluster_id];
     1500            psched->wti_vector[index]    = isr | channel | 0x80000000;
    15531501            alloc_wti_channel[cluster_id] = index + 1;
    1554             lpid = lpid + 1;
     1502            lpid                          = lpid + 1;
    15551503
    15561504            // update IRQ fields in mapping for PIC initialisation
     
    15661514    unsigned int x          = cluster[cluster_id].x;
    15671515    unsigned int y          = cluster[cluster_id].y;
    1568     psched                  = _schedulers[x][y][0];
    15691516    unsigned int slot;
    15701517    unsigned int entry;
    15711518    for ( lpid = 0 ; lpid < cluster[cluster_id].procs ; lpid++ )
    15721519    {
     1520        psched = _schedulers[x][y][lpid];
     1521       
    15731522        _puts("\n*** IRQS for proc[");
    15741523        _putd( x );
     
    15801529        for ( slot = 0 ; slot < 32 ; slot++ )
    15811530        {
    1582             entry = psched[lpid].hwi_vector[slot];
     1531            entry = psched->hwi_vector[slot];
    15831532            if ( entry & 0x80000000 )
    15841533            {
     
    15941543        for ( slot = 0 ; slot < 32 ; slot++ )
    15951544        {
    1596             entry = psched[lpid].wti_vector[slot];
     1545            entry = psched->wti_vector[slot];
    15971546            if ( entry & 0x80000000 )
    15981547            {
     
    16081557        for ( slot = 0 ; slot < 32 ; slot++ )
    16091558        {
    1610             entry = psched[lpid].pti_vector[slot];
     1559            entry = psched->pti_vector[slot];
    16111560            if ( entry & 0x80000000 )
    16121561            {
     
    16341583        _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id][0][0] >> 13) );
    16351584
    1636         // loop on the tasks in vspace (task_id is the global index)
     1585        // loop on the tasks in vspace (task_id is the global index in mapping)
    16371586        for (task_id = vspace[vspace_id].task_offset;
    16381587             task_id < (vspace[vspace_id].task_offset + vspace[vspace_id].tasks);
     
    16631612
    16641613            // ctx_ptab : page_table virtual base address
    1665             unsigned int ctx_ptab = _ptabs_vaddr[vspace_id];
     1614            unsigned int ctx_ptab = _ptabs_vaddr[vspace_id][x][y];
    16661615
    16671616            // ctx_tty : TTY terminal global index provided by the global allocator
     
    17631712            // segment and we must wait the .elf loading to get the entry point value...
    17641713            vobj_id = vspace[vspace_id].start_vobj_id;     
    1765             unsigned int ctx_epc = vobj[vobj_id].vaddr + (task[task_id].startid)*4;
     1714            unsigned int ctx_epc = vobj[vobj_id].vbase + (task[task_id].startid)*4;
    17661715
    17671716            // ctx_sp :  Get the vobj containing the stack
    17681717            vobj_id = task[task_id].stack_vobj_id;
    1769             unsigned int ctx_sp = vobj[vobj_id].vaddr + vobj[vobj_id].length;
     1718            unsigned int ctx_sp = vobj[vobj_id].vbase + vobj[vobj_id].length;
    17701719
    17711720            // get local task index in scheduler
     
    18531802        } // end loop on tasks
    18541803    } // end loop on vspaces
    1855 } // end _schedulers_init()
     1804} // end boot_schedulers_init()
    18561805
    18571806//////////////////////////////////////////////////////////////////////////////////
    18581807// This function loads the map.bin file from block device.
    1859 // The fat global varible is defined in fat32.c file.
    18601808//////////////////////////////////////////////////////////////////////////////////
    18611809void boot_mapping_init()
     
    18801828#endif
    18811829
    1882     // get "map.bin" file size (from fat32) and check it
     1830    // get "map.bin" file size (from fat) and check it
    18831831    unsigned int size    = fat.fd[fd_id].file_size;
    18841832
     
    23652313
    23662314                        _xcu_get_wti_address( wti_id, &vaddr );
    2367 
    23682315                        _pic_init( hwi_id, vaddr, cluster_xy );
     2316
    23692317#if BOOT_DEBUG_PERI
     2318unsigned int address = _pic_get_register( channel_id, IOPIC_ADDRESS );
     2319unsigned int extend  = _pic_get_register( channel_id, IOPIC_EXTEND  );
    23702320_puts("    hwi_index = ");
    23712321_putd( hwi_id );
     
    23782328_puts(",");
    23792329_putd( cluster_xy & ((1<<Y_WIDTH)-1) );
    2380 _puts("]\n");
     2330_puts("] / checked_xcu_paddr = ");
     2331_putl( (paddr_t)address + (((paddr_t)extend)<<32) );
     2332_puts("\n");
    23812333#endif
    23822334                    }
     
    24122364                  cp_port_id++ )
    24132365            {
    2414                 // Get global index of associted vobj
     2366                // get global index of associted vobj
    24152367                unsigned int vobj_id   = cp_port[cp_port_id].mwmr_vobj_id;
    24162368
    2417                 // Get MWMR channel base address
    2418                 paddr_t mwmr_channel_pbase = vobj[vobj_id].paddr;
    2419 
     2369                // get MWMR channel base address
     2370                page_table_t* ptab  = (page_table_t*)_ptabs_vaddr[0][x][y];
     2371                unsigned int  vbase = vobj[vobj_id].vbase;
     2372                unsigned int  ppn;
     2373                unsigned int  flags;
     2374                paddr_t       pbase;
     2375
     2376                _v2p_translate( ptab,
     2377                                vbase>>12 ,
     2378                                &ppn,
     2379                                &flags );
     2380
     2381                pbase = ((paddr_t)ppn)<<12;
     2382
     2383                // initialise cp_port
    24202384                _mwr_hw_init( cluster_xy,
    24212385                              cp_port_id,
    24222386                              cp_port[cp_port_id].direction,
    2423                               mwmr_channel_pbase );
     2387                              pbase );
    24242388#if BOOT_DEBUG_PERI
    24252389_puts("     port direction: ");
    24262390_putd( (unsigned int)cp_port[cp_port_id].direction );
    24272391_puts(" / mwmr_channel_pbase = ");
    2428 _putl( mwmr_channel_pbase );
     2392_putl( pbase );
    24292393_puts(" / name = ");
    24302394_puts(vobj[vobj_id].name);
     
    24362400} // end boot_peripherals_init()
    24372401
     2402/////////////////////////////////////////////////////////////////////////
     2403// This function initialises the physical memory allocators in each
     2404// cluster containing a RAM pseg.
     2405/////////////////////////////////////////////////////////////////////////
     2406void boot_pmem_init()
     2407{
     2408    mapping_header_t*  header     = (mapping_header_t *)SEG_BOOT_MAPPING_BASE;
     2409    mapping_cluster_t* cluster    = _get_cluster_base(header);
     2410    mapping_pseg_t*    pseg       = _get_pseg_base(header);
     2411
     2412    unsigned int cluster_id;
     2413    unsigned int pseg_id;
     2414
     2415    // scan all clusters
     2416    for ( cluster_id = 0 ; cluster_id < X_SIZE*Y_SIZE ; cluster_id++ )
     2417    {
     2418        // scan the psegs in cluster to find first pseg of type RAM
     2419        unsigned int pseg_min = cluster[cluster_id].pseg_offset;
     2420        unsigned int pseg_max = pseg_min + cluster[cluster_id].psegs;
     2421        for ( pseg_id = pseg_min ; pseg_id < pseg_max ; pseg_id++ )
     2422        {
     2423            if ( pseg[pseg_id].type == PSEG_TYPE_RAM )
     2424            {
     2425                unsigned int x    = cluster[cluster_id].x;
     2426                unsigned int y    = cluster[cluster_id].y;
     2427                unsigned int base = (unsigned int)pseg[pseg_id].base;
     2428                unsigned int size = (unsigned int)pseg[pseg_id].length;
     2429                _pmem_alloc_init( x, y, base, size );
     2430
     2431#if BOOT_DEBUG_PT
     2432_puts("\n[BOOT DEBUG] pmem allocator initialised in cluster[");
     2433_putd( x );
     2434_puts(",");
     2435_putd( y );
     2436_puts("] base = ");
     2437_putx( base );
     2438_puts(" / size = ");
     2439_putx( size );
     2440_puts("\n");
     2441#endif
     2442               break;
     2443            }
     2444        }
     2445    }
     2446} // end boot_pmem_init()
     2447 
    24382448/////////////////////////////////////////////////////////////////////////
    24392449// This function is the entry point of the boot code for all processors.
     
    24612471        _puts("\n");
    24622472
     2473        // Initializes the physical memory allocators
     2474        boot_pmem_init();
     2475
     2476        _puts("\n[BOOT] Physical memory allocators initialised at cycle ");
     2477        _putd(_get_proctime());
     2478        _puts("\n");
     2479
    24632480        // Build page tables
    2464         boot_pt_init();
     2481        _ptabs_init();
     2482
     2483        _puts("\n[BOOT] Page tables initialised at cycle ");
     2484        _putd(_get_proctime());
     2485        _puts("\n");
    24652486
    24662487        // Activate MMU for proc [0,0,0]
Note: See TracChangeset for help on using the changeset viewer.