Changeset 632 for trunk/hal


Ignore:
Timestamp:
May 28, 2019, 2:56:04 PM (6 years ago)
Author:
alain
Message:

This version replace the RPC by direct remote memory access
for physical pages allacation/release.
It is commited before being tested.

Location:
trunk/hal
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/hal/generic/hal_gpt.h

    r629 r632  
    9696 * of a remote GPT identified by the <gpt_xp> and <vpn> arguments, after checking
    9797 * (in a busy waiting loop) that this attribute has been reset.
    98  * Then, it returns in the <attr> and <ppn> buffers the current value of the PTE.
    99  * It allocates memory required by the GPT implementation to register this lock
    100  * in the PTE when required.
     98 * It returns in the <attr> and <ppn> buffers the value of the PTE before modification.
     99 * It atomically allocates memory  to register this attribute in the PTE when
     100 * required by a specific GPT implementation (example : allocate a PT2 in the TSAR GPT).
    101101 * WARNING : Only small pages can be locked.
    102102 ****************************************************************************************
     
    128128 *
    129129 * WARNING : For a small page, it checks that the GPT_LOCKED attribute has been
    130  *           previously set, to prevent concurrent accesses.
     130 *           previously set, to prevent concurrent mapping accesses.
    131131 ****************************************************************************************
    132132 * @ gpt_xp    : [in] extended pointer on the page table
     
    212212 * It modifies an existing entry identified by the <vpn> argument in a remote GPT
    213213 * identified by the <gpt_xp> argument, using remote accesses.
    214  * It does NOT require the GPT_LOCKED attribute to be set in the target PTE.
    215  * It cannot fail, because only MAPPED & SMALL entries are modified.
     214 * - The MAPPED and SMALL attributes must be set, and the LOCKED attibute must be reset
     215 *   in the <attr> argument.
     216 * - The MAPPED and SMALL attributes must be set in the target PTE.
    216217 ****************************************************************************************
    217218 * @ gpt_xp    : [in] extended pointer on the page table
  • trunk/hal/tsar_mips32/core/hal_exception.c

    r625 r632  
    165165// in case of illegal virtual address. Finally, it updates the local page table from the
    166166// reference cluster.
     167// WARNING : In order to prevent deadlocks, this function enable IRQs before calling the
     168// vmm_handle_page_fault() and the vmm_handle_cow() functions, because concurrent calls
     169// to these functions can create cross dependencies...
    167170//////////////////////////////////////////////////////////////////////////////////////////
    168171// @ this     : pointer on faulty thread descriptor.
     
    187190
    188191    // check thread type
    189     if( CURRENT_THREAD->type != THREAD_USER )
     192   if( CURRENT_THREAD->type != THREAD_USER )
    190193    {
    191194        printk("\n[PANIC] in %s : illegal thread type %s\n",
  • trunk/hal/tsar_mips32/core/hal_gpt.c

    r630 r632  
    7777#define TSAR_MMU_ATTR_FROM_PTE2( pte2 )    (pte2 & 0xFFC000FF)
    7878
    79 
    8079///////////////////////////////////////////////////////////////////////////////////////
    8180// This static function translates the GPT attributes to the TSAR attributes
     
    125124    return gpt_attr;
    126125}
     126
     127///////////////////////////////////////////////////////////////////////////////////////
     128// The blocking hal_gpt_lock_pte() function implements a busy-waiting policy to get
     129// exclusive access to a specific GPT entry.
     130// - when non zero, the following variable defines the max number of iterations
     131//   in the busy waiting loop.
     132// - when zero, the watchdog mechanism is deactivated.
     133///////////////////////////////////////////////////////////////////////////////////////
     134
     135#define GPT_LOCK_WATCHDOG   100000
    127136
    128137/////////////////////////////////////
     
    317326*/
    318327
    319 /////////////////////////////////////////////////////////////////////////////////////////
    320 // This static function returns in the <ptd1_value> buffer the current value of
    321 // the PT1 entry identified by the <pte1_xp> argument, that must contain a PTD1
    322 // (i.e. a pointer on a PT2). If this PT1 entry is not mapped yet, it allocates a
    323 // new PT2 and updates the PT1 entry, using the TSAR_MMU_LOCKED attribute in PT1
    324 // entry, to handle possible concurrent mappings of the missing PTD1:
    325 // 1) If the PT1 entry is unmapped, it tries to atomically lock this PTD1.
    326 //    - if the atomic lock is successful it allocates a new PT1, and updates the PTD1.
    327 //    - else, it simply waits, in a polling loop, the mapping done by another thread.
    328 //    In both cases, returns the PTD1 value, when the mapping is completed.
    329 // 2) If the PT1 entry is already mapped, it returns the PTD1 value, and does
    330 //    nothing else.
    331 /////////////////////////////////////////////////////////////////////////////////////////
    332 static error_t hal_gpt_allocate_pt2( xptr_t     ptd1_xp,
    333                                      uint32_t * ptd1_value )
    334 {
    335     cxy_t      gpt_cxy;     // target GPT cluster = GET_CXY( ptd1_xp );
    336     uint32_t   ptd1;        // PTD1 value
    337     ppn_t      pt2_ppn;     // PPN of page containing the new PT2
    338     bool_t     atomic;
    339     page_t   * page;
    340     xptr_t     page_xp;
    341 
    342     // get GPT cluster identifier
    343     gpt_cxy = GET_CXY( ptd1_xp );
    344 
    345     // get current ptd1 value
    346     ptd1 = hal_remote_l32( ptd1_xp );
    347 
    348     if( (ptd1 & TSAR_PTE_MAPPED) == 0)    // PTD1 unmapped and unlocked
    349         {
    350         // atomically lock the PTD1 to prevent concurrent PTD1 mappings
    351         atomic = hal_remote_atomic_cas( ptd1_xp,
    352                                         ptd1,
    353                                         ptd1 | TSAR_PTE_LOCKED );
    354 
    355         if( atomic )  // PTD1 successfully locked
    356                 {
    357             // allocate one physical page for PT2
    358             if( gpt_cxy == local_cxy )
    359             {
    360                     kmem_req_t req;
    361                     req.type  = KMEM_PAGE;
    362                     req.size  = 0;                     // 1 small page
    363                     req.flags = AF_KERNEL | AF_ZERO;
    364                     page = (page_t *)kmem_alloc( &req );
    365             }
    366             else
    367             {
    368                 rpc_pmem_get_pages_client( gpt_cxy , 0 , &page );
    369             }
    370 
    371             if( page == NULL ) return -1;
    372 
    373             // get the PT2 PPN
    374             page_xp = XPTR( gpt_cxy , page );       
    375             pt2_ppn = ppm_page2ppn( page_xp );
    376 
    377             // build  PTD1
    378             ptd1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn;
    379 
    380             // set the PTD1 value in PT1
    381             hal_remote_s32( ptd1_xp , ptd1 );
    382             hal_fence();
    383 
    384 #if DEBUG_HAL_GPT_ALLOCATE_PT2
    385 thread_t * this  = CURRENT_THREAD;
    386 uint32_t   cycle = (uint32_t)hal_get_cycles();
    387 if( DEBUG_HAL_GPT_ALLOCATE_PT2 < cycle )
    388 printk("\n[%s] : thread[%x,%x] map PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",
    389 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, ptd1 );
    390 #endif
    391         }
    392         else         // PTD1 modified by another thread
    393         {
    394             // poll PTD1 until mapped by another thread
    395             while( (ptd1 & TSAR_PTE_MAPPED) == 0 )  ptd1 = hal_remote_l32( ptd1_xp );
    396         }
    397     }
    398     else                                   // PTD1 mapped => just use it
    399     {
    400 
    401 #if DEBUG_HAL_GPT_ALLOCATE_PT2
    402 thread_t * this  = CURRENT_THREAD;
    403 uint32_t   cycle = (uint32_t)hal_get_cycles();
    404 if( DEBUG_HAL_GPT_ALLOCATE_PT2 < cycle )
    405 printk("\n[%s] : thread[%x,%x] PTD1 mapped / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",
    406 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, ptd1 );
    407 #endif
    408 
    409     }
    410 
    411     *ptd1_value = ptd1;
    412     return 0;
    413 
    414 }  // end hal_gpt_allocate_pt2
    415 
    416 
    417 
    418 
    419328////////////////////////////////////////////
    420329error_t hal_gpt_lock_pte( xptr_t     gpt_xp,
     
    423332                          ppn_t    * ppn )
    424333{
    425     error_t             error;
    426334    uint32_t          * pt1_ptr;         // local pointer on PT1 base
    427     xptr_t              pte1_xp;         // extended pointer on PT1[x1] entry
    428         uint32_t            pte1;            // value of PT1[x1] entry
     335    xptr_t              ptd1_xp;         // extended pointer on PT1[x1] entry
     336        uint32_t            ptd1;            // value of PT1[x1] entry
     337
     338    xptr_t              page_xp;
    429339
    430340        ppn_t               pt2_ppn;         // PPN of page containing PT2
    431341    uint32_t          * pt2_ptr;         // local pointer on PT2 base
    432         xptr_t              pte2_attr_xp;    // extended pointer on PT2[ix2].attr
     342        xptr_t              pte2_xp;         // extended pointer on PT2[ix2].attr
    433343    uint32_t            pte2_attr;       // PT2[ix2].attr current value   
    434         xptr_t              pte2_ppn_xp;     // extended pointer on PT2[ix2].ppn
    435344    uint32_t            pte2_ppn;        // PT2[ix2].ppn current value   
    436345        bool_t              atomic;
     346
     347#if GPT_LOCK_WATCHDOG
     348    uint32_t count;
     349#endif
    437350
    438351    // get cluster and local pointer on GPT
     
    440353    gpt_t * gpt_ptr = GET_PTR( gpt_xp );
    441354
    442     // get indexes in PTI & PT2
     355#if DEBUG_HAL_GPT_LOCK_PTE
     356thread_t * this  = CURRENT_THREAD;
     357uint32_t   cycle = (uint32_t)hal_get_cycles();
     358if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
     359printk("\n[%s] : thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n",
     360__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle );
     361#endif
     362
     363    // get indexes in PTI & PT2 from vpn
    443364    uint32_t  ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );    // index in PT1
    444365    uint32_t  ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );    // index in PT2
     
    447368    pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    448369
    449     // build extended pointer on PTE1 == PT1[ix1]
    450         pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
    451 
    452     // get PTE1 value from PT1
    453     // allocate a new PT2 for this PTE1 if required
    454     error = hal_gpt_allocate_pt2( pte1_xp , &pte1 );
    455 
    456     if( error )
     370    // build extended pointer on PTD1 == PT1[ix1]
     371        ptd1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
     372
     373    // get current PT1 entry value
     374    ptd1 = hal_remote_l32( ptd1_xp );
     375
     376    // If PTD1 is unmapped and unlocked, try to atomically lock this PT1 entry.
     377    // This PTD1 lock prevent multiple concurrent PT2 allocations
     378    // - only the thread that successfully locked the PTD1 allocates a new PT2
     379    //   and updates the PTD1
     380    // - all other threads simply wait until the missing PTD1 is mapped.
     381
     382    if( ptd1 == 0 ) 
     383        {
     384        // try to atomically lock the PTD1 to prevent concurrent PT2 allocations
     385        atomic = hal_remote_atomic_cas( ptd1_xp,
     386                                        ptd1,
     387                                        ptd1 | TSAR_PTE_LOCKED );
     388        if( atomic ) 
     389                {
     390            // allocate one 4 Kbytes physical page for PT2
     391            page_xp = ppm_remote_alloc_pages( gpt_cxy , 0 );
     392
     393            if( page_xp == NULL )
     394            {
     395                printk("\n[ERROR] in %s : cannot allocate memory for PT2\n", __FUNCTION__ );
     396                return -1;
     397            }
     398
     399            // get the PT2 PPN
     400            pt2_ppn = ppm_page2ppn( page_xp );
     401
     402            // build  PTD1
     403            ptd1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn;
     404
     405            // set the PTD1 value in PT1
     406            // this unlocks the PTD1
     407            hal_remote_s32( ptd1_xp , ptd1 );
     408            hal_fence();
     409
     410#if (DEBUG_HAL_GPT_LOCK_PTE & 1)
     411if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
     412printk("\n[%s] : thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n",
     413__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy );
     414#endif
     415
     416        }  // end if atomic
     417    }  // end if (ptd1 == 0)
     418
     419    // wait until PTD1 is mapped by another thread
     420    while( (ptd1 & TSAR_PTE_MAPPED) == 0 )
    457421    {
    458         printk("\n[ERROR] in %s : cannot allocate memory for PT2\n", __FUNCTION__ );
    459         return -1;
     422        ptd1 = hal_remote_l32( ptd1_xp );
     423
     424#if GPT_LOCK_WATCHDOG
     425if( count > GPT_LOCK_WATCHDOG ) 
     426{
     427    thread_t * thread = CURRENT_THREAD;
     428    printk("\n[PANIC] in %s : thread[%x,%x] waiting PTD1 / vpn %x / cxy %x / %d iterations\n",
     429    __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count );
     430    hal_core_sleep();
     431}
     432count++;
     433#endif
     434
    460435    }
    461436
    462     if( (pte1 & TSAR_PTE_SMALL) == 0 )
    463     {
    464         printk("\n[ERROR] in %s : cannot lock a small page\n", __FUNCTION__ );
    465         return -1;
    466     }
    467 
    468     // get pointer on PT2 base from PTE1
    469         pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
     437// check ptd1 because only small page can be locked
     438assert( (ptd1 & TSAR_PTE_SMALL), "cannot lock a big page\n");
     439
     440#if (DEBUG_HAL_GPT_LOCK_PTE & 1)
     441if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
     442printk("\n[%s] : thread[%x,%x] get ptd1 %x for vpn %x in cluster %x\n",
     443__FUNCTION__, this->process->pid, this->trdid, ptd1, vpn, gpt_cxy );
     444#endif
     445
     446    // get pointer on PT2 base from PTD1
     447        pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( ptd1 );
    470448        pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    471449
    472     // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn
    473     pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );
    474     pte2_ppn_xp  = XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] );
    475  
    476     // wait until PTE2 unlocked, get PTE2.attr and set lock
     450    // build extended pointers on PT2[ix2].attr 
     451    pte2_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );
     452
     453    // wait until PTE2 atomically set using a remote CAS
    477454    do
    478455    {
    479         // busy waiting until TSAR_MMU_LOCK == 0
     456
     457#if GPT_LOCK_WATCHDOG
     458count = 0;
     459#endif
     460
     461        // wait until PTE lock released by the current owner
    480462        do
    481463                {
    482                         pte2_attr = hal_remote_l32( pte2_attr_xp );
     464                        pte2_attr = hal_remote_l32( pte2_xp );
     465
     466#if GPT_LOCK_WATCHDOG
     467if( count > GPT_LOCK_WATCHDOG ) 
     468{
     469    thread_t * thread = CURRENT_THREAD;
     470    printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE2 / vpn %x / cxy %x / %d iterations\n",
     471    __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count );
     472    hal_core_sleep();
     473}
     474count++;
     475#endif
     476     
    483477                }
    484478        while( (pte2_attr & TSAR_PTE_LOCKED) != 0 );
    485479
    486         // try to atomically set the TSAR_MMU_LOCK attribute   
    487                 atomic = hal_remote_atomic_cas( pte2_attr_xp,
     480        // try to atomically set the TSAR_PTE_LOCKED attribute   
     481                atomic = hal_remote_atomic_cas( pte2_xp,
    488482                                        pte2_attr,
    489483                                        (pte2_attr | TSAR_PTE_LOCKED) );
     
    492486
    493487    // get PTE2.ppn
    494     pte2_ppn = hal_remote_l32( pte2_ppn_xp );
     488    pte2_ppn = hal_remote_l32( pte2_xp + 4 );
     489
     490#if DEBUG_HAL_GPT_LOCK_PTE
     491cycle = (uint32_t)hal_get_cycles();
     492if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
     493printk("\n[%s] : thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n",
     494__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, pte2_attr, pte2_ppn, cycle );
     495#endif
     496   
     497    // return PPN and GPT attributes
     498    *ppn  = pte2_ppn & ((1<<TSAR_MMU_PPN_WIDTH)-1);
     499    *attr = tsar2gpt( pte2_attr );
     500        return 0;
     501
     502}  // end hal_gpt_lock_pte()
     503
     504////////////////////////////////////////
     505void hal_gpt_unlock_pte( xptr_t  gpt_xp,
     506                         vpn_t   vpn )
     507{
     508    uint32_t * pt1_ptr;         // local pointer on PT1 base
     509    xptr_t     ptd1_xp;         // extended pointer on PT1[ix1]
     510        uint32_t   ptd1;            // value of PT1[ix1] entry
     511
     512        ppn_t      pt2_ppn;         // PPN of page containing PT2
     513    uint32_t * pt2_ptr;         // PT2 base address
     514        xptr_t     pte2_xp;         // extended pointer on PT2[ix2].attr
     515        uint32_t   pte2_attr;       // PTE2 attribute
     516
     517    // get cluster and local pointer on GPT
     518    cxy_t   gpt_cxy = GET_CXY( gpt_xp );
     519    gpt_t * gpt_ptr = GET_PTR( gpt_xp );
    495520
    496521#if DEBUG_HAL_GPT_LOCK_PTE
     
    498523uint32_t   cycle = (uint32_t)hal_get_cycles();
    499524if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
    500 printk("\n[%s] : thread[%x,%x] locks vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n",
    501 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle );
    502 #endif
    503    
    504     // return PPN and GPT attributes
    505     *ppn  = hal_remote_l32( pte2_ppn_xp ) & ((1<<TSAR_MMU_PPN_WIDTH)-1);
    506     *attr = tsar2gpt( pte2_attr );
    507         return 0;
    508 
    509 }  // end hal_gpt_lock_pte()
    510 
    511 ////////////////////////////////////////
    512 void hal_gpt_unlock_pte( xptr_t  gpt_xp,
    513                          vpn_t   vpn )
    514 {
    515     uint32_t * pt1_ptr;         // local pointer on PT1 base
    516     xptr_t     pte1_xp;         // extended pointer on PT1[ix1]
    517         uint32_t   pte1;            // value of PT1[ix1] entry
    518 
    519         ppn_t      pt2_ppn;         // PPN of page containing PT2
    520     uint32_t * pt2_ptr;         // PT2 base address
    521         uint32_t   pte2_attr_xp;    // extended pointer on PT2[ix2].attr
    522 
    523         uint32_t   attr;            // PTE2 attribute
    524 
    525     // get cluster and local pointer on GPT
    526     cxy_t   gpt_cxy = GET_CXY( gpt_xp );
    527     gpt_t * gpt_ptr = GET_PTR( gpt_xp );
     525printk("\n[%s] : thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n",
     526__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle );
     527#endif
    528528
    529529    // compute indexes in P1 and PT2
     
    534534    pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );
    535535
    536     // build extended pointer on PTE1 == PT1[ix1]
    537         pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
    538 
    539     // get current pte1 value
    540     pte1 = hal_remote_l32( pte1_xp );
    541 
    542 // check PTE1 attributes
    543 assert( (((pte1 & TSAR_PTE_MAPPED) != 0) && ((pte1 & TSAR_PTE_SMALL) != 0)),
    544 "try to unlock a big or unmapped PTE1\n");
    545 
    546     // get pointer on PT2 base from PTE1
    547         pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
     536    // build extended pointer on PTD1 == PT1[ix1]
     537        ptd1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );
     538
     539    // get current ptd1 value
     540    ptd1 = hal_remote_l32( ptd1_xp );
     541
     542// check PTD1 attributes
     543assert( ((ptd1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n");
     544assert( ((ptd1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n");
     545
     546    // get pointer on PT2 base from PTD1
     547        pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( ptd1 );
    548548        pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    549549
    550550    // build extended pointers on PT2[ix2].attr 
    551     pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );
     551    pte2_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );
    552552
    553553    // get PT2[ix2].attr
    554     attr = hal_remote_l32( pte2_attr_xp );
    555 
    556     // reset TSAR_MMU_LOCK attribute
    557     hal_remote_s32( pte2_attr_xp , attr & ~TSAR_PTE_LOCKED );
     554    pte2_attr = hal_remote_l32( pte2_xp );
     555
     556// check PTE2 attributes
     557assert( ((pte2_attr & TSAR_PTE_MAPPED) != 0), "unmapped PTE2\n");
     558assert( ((pte2_attr & TSAR_PTE_LOCKED) != 0), "unlocked PTE2\n");
     559
     560    // reset TSAR_PTE_LOCKED attribute
     561    hal_remote_s32( pte2_xp , pte2_attr & ~TSAR_PTE_LOCKED );
    558562
    559563#if DEBUG_HAL_GPT_LOCK_PTE
    560 thread_t * this  = CURRENT_THREAD;
    561 uint32_t   cycle = (uint32_t)hal_get_cycles();
     564cycle = (uint32_t)hal_get_cycles();
    562565if( DEBUG_HAL_GPT_LOCK_PTE < cycle )
    563 printk("\n[%s] : thread[%x,%x] unlocks vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n",
    564 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle );
    565 #endif
    566  
     566printk("\n[%s] : thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n",
     567__FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle );
     568#endif
     569
    567570}  // end hal_gpt_unlock_pte()
     571
    568572
    569573///////////////////////////////////////
     
    693697    xptr_t     pte2_attr_xp;   // extended pointer on PT2[ix2].attr
    694698    xptr_t     pte2_ppn_xp;    // extended pointer on PT2[ix2].ppn
    695     uint32_t   pte2_attr;      // current value of PT2[ix2].attr
    696699
    697700    // get cluster and local pointer on GPT
     
    10651068        ppn_t               pt2_ppn;             // PPN of PT2
    10661069        uint32_t          * pt2;                 // PT2 base address
     1070    xptr_t              pte2_xp;             // exended pointer on PTE2
    10671071
    10681072    uint32_t            ix1;                 // index in PT1
    10691073    uint32_t            ix2;                 // index in PT2
    10701074
     1075
    10711076    uint32_t            tsar_attr;           // PTE attributes for TSAR MMU
    10721077
    1073     // check attr argument MAPPED and SMALL
    1074     if( (attr & GPT_MAPPED) == 0 )  return;
    1075     if( (attr & GPT_SMALL ) == 0 )  return;
     1078// check MAPPED, SMALL, and not LOCKED in attr argument
     1079assert( ((attr & GPT_MAPPED) != 0), "attribute MAPPED must be set in new attributes\n" );
     1080assert( ((attr & GPT_SMALL ) != 0), "attribute SMALL  must be set in new attributes\n" );
     1081assert( ((attr & GPT_LOCKED) == 0), "attribute LOCKED must not be set in new attributes\n" );
    10761082
    10771083    // get cluster and local pointer on remote GPT
     
    10921098    pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) );
    10931099
    1094     if( (pte1 & TSAR_PTE_MAPPED) == 0 ) return;
    1095     if( (pte1 & TSAR_PTE_SMALL ) == 0 ) return;
     1100// check MAPPED and SMALL in target PTE1
     1101assert( ((pte1 & GPT_MAPPED) != 0), "attribute MAPPED must be set in target PTE1\n" );
     1102assert( ((pte1 & GPT_SMALL ) != 0), "attribute SMALL  must be set in target PTE1\n" );
    10961103
    10971104    // get PT2 base from PTE1
     
    10991106    pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    11001107
     1108    // get extended pointer on PTE2
     1109    pte2_xp = XPTR( gpt_cxy , &pt2[2*ix2] );
     1110   
     1111// check MAPPED in target PTE2
     1112assert( ((hal_remote_l32(pte2_xp) & GPT_MAPPED) != 0),
     1113"attribute MAPPED must be set in target PTE2\n" );
     1114
    11011115    // set PTE2 in this order
    1102         hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2 + 1] ) , ppn );
     1116        hal_remote_s32( pte2_xp    , ppn );
    11031117        hal_fence();
    1104         hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2]     ) , tsar_attr );
     1118        hal_remote_s32( pte2_xp + 4 , tsar_attr );
    11051119        hal_fence();
    11061120
     
    11101124
    11111125
    1112 /* unused until now (march 2019) [AG]
    1113 
    1114 //////////////////////////////////////
    1115 void hal_gpt_reset_range( gpt   * gpt,
    1116                           vpn_t   vpn_min,
    1117                           vpn_t   vpn_max )
    1118 {
    1119     vpn_t      vpn;         // current vpn
    1120 
    1121     uint32_t * pt1;         // PT1 base address
    1122     uint32_t   pte1;        // PT1 entry value
    1123 
    1124     ppn_t      pt2_ppn;     // PPN of PT2
    1125     uint32_t * pt2;         // PT2 base address
    1126 
    1127     uint32_t   ix1;         // index in PT1
    1128     uint32_t   ix2;         // index in PT2
    1129 
    1130     // get PT1
    1131     pt1 = gpt->ptr;
    1132 
    1133     // initialize current index
    1134     vpn = vpn_min;
    1135 
    1136     // loop on pages
    1137     while( vpn <= vpn_max )
    1138     {
    1139         // get ix1 index from vpn
    1140         ix1 = TSAR_MMU_IX1_FROM_VPN( vpn );
    1141 
    1142         // get PTE1
    1143         pte1 = pt1[ix1]
    1144 
    1145             if( (pte1 & TSAR_PTE_MAPPED) == 0 )     // PT1[ix1] unmapped
    1146         {
    1147             // update vpn (next big page)
    1148             (vpn = ix1 + 1) << 9;
    1149         }
    1150             if( (pte1 & TSAR_PTE_SMALL) == 0 )      // it's a PTE1 (big page)
    1151             {
    1152             // unmap the big page
    1153             pt1[ix1] = 0;
    1154                 hal_fence();
    1155            
    1156             // update vpn (next big page)
    1157             (vpn = ix1 + 1) << 9;
    1158         }
    1159         else                                    // it's a PTD1 (small page)
    1160         {
    1161             // compute PT2 base address
    1162             pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );
    1163             pt2     = GET_PTR( ppm_ppn2base( pt2_ppn ) );
    1164 
    1165             // get ix2 index from vpn
    1166             ix2 = TSAR_MMU_IX2_FROM_VPN( vpn );
    1167 
    1168             // unmap the small page
    1169             pt2[2*ix2]   = 0;         
    1170             hal_fence();       
    1171 
    1172             // update vpn (next small page)
    1173             vpn++;
    1174         }
    1175     }
    1176 }  // hal_gpt_reset_range()
    1177 */
    1178 
    1179 
     1126
  • trunk/hal/tsar_mips32/core/hal_irqmask.c

    r457 r632  
    3333        __asm__ volatile
    3434                (".set noat                          \n"
    35          "mfc0   $1,     $12                 \n"
    36                  "or     %0,     $0,     $1          \n"
     35         "mfc0   $1,     $12                 \n"   /* $1    <= c0_sr        */
     36                 "or     %0,     $0,     $1          \n"   /* old   <= $1           */
    3737                 "srl    $1,     $1,     1           \n"
    38                  "sll    $1,     $1,     1           \n"
    39                  "mtc0   $1,     $12                 \n"
     38                 "sll    $1,     $1,     1           \n"   /* clear IE bit in $1    */
     39                 "mtc0   $1,     $12                 \n"   /* c0_sr <= $1           */
    4040         ".set at                            \n"
    4141                 : "=&r" (sr) );
     
    5151        __asm__ volatile
    5252                (".set noat                          \n"
    53                  "mfc0   $1,     $12                 \n"
    54                  "or     %0,     $0,     $1          \n"
    55                  "ori    $1,     $1,     0xFF01      \n"
    56                  "mtc0   $1,     $12                 \n"
     53                 "mfc0   $1,     $12                 \n"   /* s1    <= c0_sr        */
     54                 "or     %0,     $0,     $1          \n"   /* old   <= $1           */
     55                 "ori    $1,     $1,     0x1         \n"   /* set IE bit in $1      */
     56                 "mtc0   $1,     $12                 \n"   /* c0_sr <= $1           */
    5757         ".set at                            \n"
    5858                 : "=&r" (sr) );
  • trunk/hal/tsar_mips32/core/hal_ppm.c

    r610 r632  
    22 * hal_ppm.c - Generic Physical Page Manager API implementation for TSAR
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018)
     4 * Authors  Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    6060
    6161        // initialize lock protecting the free_pages[] lists
    62         busylock_init( &ppm->free_lock , LOCK_PPM_FREE );
     62        remote_busylock_init( XPTR( local_cxy , &ppm->free_lock ) , LOCK_PPM_FREE );
    6363
    6464        // initialize lock protecting the dirty_pages list
     
    117117
    118118        // check consistency
    119         return ppm_assert_order( ppm );
     119        return  ppm_assert_order();
    120120
    121121}  // end hal_ppm_init()
Note: See TracChangeset for help on using the changeset viewer.