Changeset 433 for trunk/kernel/mm/vmm.c


Ignore:
Timestamp:
Feb 14, 2018, 3:40:19 PM (6 years ago)
Author:
alain
Message:

blip

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/vmm.c

    r429 r433  
    6363    intptr_t  size;
    6464
    65 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
    66 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     65#if CONFIG_DEBUG_VMM_INIT
     66uint32_t cycle = (uint32_t)hal_get_cycles();
     67if( CONFIG_DEBUG_VMM_INIT )
     68printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     69__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     70#endif
    6771
    6872    // get pointer on VMM
     
    179183    hal_fence();
    180184
    181 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / entry_point = %x\n",
    182 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    183 process->pid , process->vmm.entry_point );
     185#if CONFIG_DEBUG_VMM_INIT
     186cycle = (uint32_t)hal_get_cycles();
     187if( CONFIG_DEBUG_VMM_INIT )
     188printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n",
     189__FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle );
     190#endif
    184191
    185192    return 0;
     
    211218    {
    212219        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    213         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     220        vseg    = GET_PTR( vseg_xp );
    214221
    215222        printk(" - %s : base = %X / size = %X / npages = %d\n",
     
    239246}  // vmm_display()
    240247
    241 /////////////////////i////////////////////
    242 void vmm_update_pte( process_t * process,
    243                      vpn_t       vpn,
    244                      uint32_t    attr,
    245                      ppn_t       ppn )
     248/////////////////////i//////////////////////////
     249void vmm_global_update_pte( process_t * process,
     250                            vpn_t       vpn,
     251                            uint32_t    attr,
     252                            ppn_t       ppn )
    246253{
    247254
     
    258265    cxy_t           owner_cxy;
    259266    lpid_t          owner_lpid;
     267
     268#if CONFIG_DEBUG_VMM_UPDATE_PTE
     269uint32_t cycle = (uint32_t)hal_get_cycles();
     270if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     271printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n",
     272__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     273#endif
     274
     275    // check cluster is reference
     276    assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__,
     277    "not called in reference cluster\n");
    260278
    261279    // get extended pointer on root of process copies xlist in owner cluster
     
    271289        // get cluster and local pointer on remote process
    272290        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
    273         remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );
     291        remote_process_ptr = GET_PTR( remote_process_xp );
    274292        remote_process_cxy = GET_CXY( remote_process_xp );
     293
     294#if (CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)
     295if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     296printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
     297__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     298#endif
    275299
    276300        // get extended pointer on remote gpt
    277301        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
    278302
    279         hal_gpt_update_pte( remote_gpt_xp,
    280                             vpn,
    281                             attr,
    282                             ppn );
     303        // update remote GPT
     304        hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn );
    283305    } 
    284 }  // end vmm_update_pte()
     306
     307#if CONFIG_DEBUG_VMM_UPDATE_PTE
     308cycle = (uint32_t)hal_get_cycles();
     309if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     310printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n",
     311__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     312#endif
     313
     314}  // end vmm_global_update_pte()
    285315
    286316///////////////////////////////////////
     
    308338    lpid_t          owner_lpid;
    309339
    310 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
    311 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     340#if CONFIG_DEBUG_VMM_SET_COW
     341uint32_t cycle = (uint32_t)hal_get_cycles();
     342if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     343printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     344__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     345#endif
    312346
    313347    // check cluster is reference
     
    333367        // get cluster and local pointer on remote process
    334368        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
    335         remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );
     369        remote_process_ptr = GET_PTR( remote_process_xp );
    336370        remote_process_cxy = GET_CXY( remote_process_xp );
    337371
    338 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling process %x in cluster %x\n",
    339 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid , remote_process_cxy );
     372#if (CONFIG_DEBUG_VMM_SET_COW &0x1)
     373if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     374printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
     375__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     376#endif
    340377
    341378        // get extended pointer on remote gpt
     
    347384            // get pointer on vseg
    348385            vseg_xp  = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist );
    349             vseg     = (vseg_t *)GET_PTR( vseg_xp );
     386            vseg     = GET_PTR( vseg_xp );
    350387
    351388            assert( (GET_CXY( vseg_xp ) == local_cxy) , __FUNCTION__,
     
    357394            vpn_t    vpn_size = vseg->vpn_size;
    358395
    359 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling vseg %s / vpn_base = %x / vpn_size = %x\n",
    360 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vseg_type_str(type), vpn_base, vpn_size );
    361 
    362             // set COW flag on the remote GPT depending on vseg type
     396#if (CONFIG_DEBUG_VMM_SET_COW & 0x1)
     397if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     398printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n",
     399__FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size );
     400#endif
     401            // only DATA, ANON and REMOTE vsegs
    363402            if( (type == VSEG_TYPE_DATA)  ||
    364403                (type == VSEG_TYPE_ANON)  ||
    365404                (type == VSEG_TYPE_REMOTE) )
    366405            {
    367                 hal_gpt_flip_cow( true,             // set_cow
    368                                   remote_gpt_xp,
    369                                   vpn_base,
    370                                   vpn_size );
    371             }
    372         }    // en loop on vsegs
     406                vpn_t      vpn;
     407                uint32_t   attr;
     408                ppn_t      ppn;
     409                xptr_t     page_xp;
     410                cxy_t      page_cxy;
     411                page_t   * page_ptr;
     412                xptr_t     forks_xp;
     413
     414                // update flags in remote GPT
     415                hal_gpt_set_cow( remote_gpt_xp,
     416                                 vpn_base,
     417                                 vpn_size );
     418
     419                // atomically increment pending forks counter in physical pages,
     420                // for all vseg pages that are mapped in reference cluster
     421                if( remote_process_cxy == local_cxy )
     422                {
     423                    // the reference GPT is the local GPT
     424                    gpt_t * gpt = GET_PTR( remote_gpt_xp );
     425
     426                    // scan all pages in vseg
     427                    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
     428                    {
     429                        // get page attributes and PPN from reference GPT
     430                        hal_gpt_get_pte( gpt , vpn , &attr , &ppn );
     431
     432                        // atomically update pending forks counter if page is mapped
     433                        if( attr & GPT_MAPPED )
     434                        {
     435                            page_xp  = ppm_ppn2page( ppn );
     436                            page_cxy = GET_CXY( page_xp );
     437                            page_ptr = GET_PTR( page_xp );
     438                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
     439                            hal_remote_atomic_add( forks_xp , 1 );
     440                        }
     441                    }   // end loop on vpn
     442                }   // end if local
     443            }   // end if vseg type
     444        }   // end loop on vsegs
    373445    }   // end loop on process copies
    374446 
    375 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
    376 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid );
     447#if CONFIG_DEBUG_VMM_SET_COW
     448cycle = (uint32_t)hal_get_cycles();
     449if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     450printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
     451__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     452#endif
    377453
    378454}  // end vmm_set-cow()
     
    404480    ppn_t       ppn;
    405481
    406 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n",
    407 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
     482#if CONFIG_DEBUG_VMM_FORK_COPY
     483uint32_t cycle = (uint32_t)hal_get_cycles();
     484if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     485printk("\n[DBG] %s : thread %x enter / cycle %d\n",
     486__FUNCTION__ , CURRENT_THREAD, cycle );
     487#endif
    408488
    409489    // get parent process cluster and local pointer
    410490    parent_cxy     = GET_CXY( parent_process_xp );
    411     parent_process = (process_t *)GET_PTR( parent_process_xp );
     491    parent_process = GET_PTR( parent_process_xp );
    412492
    413493    // get local pointers on parent and child VMM
     
    445525        // get local and extended pointers on current parent vseg
    446526        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    447         parent_vseg    = (vseg_t *)GET_PTR( parent_vseg_xp );
     527        parent_vseg    = GET_PTR( parent_vseg_xp );
    448528
    449529        // get vseg type
    450530        type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) );
    451531       
    452 
    453 vmm_dmsg("\n[DBG] %s : core[%x,%d] found parent vseg %s / vpn_base = %x\n",
    454 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type),
    455 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) );
     532#if CONFIG_DEBUG_VMM_FORK_COPY
     533cycle = (uint32_t)hal_get_cycles();
     534if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     535printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n",
     536__FUNCTION__ , CURRENT_THREAD, vseg_type_str(type),
     537hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     538#endif
    456539
    457540        // all parent vsegs - but STACK - must be copied in child VSL
     
    473556            vseg_attach( child_vmm , child_vseg );
    474557
    475 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child VSL : vseg %s / vpn_base = %x\n",
    476 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type),
    477 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) );
     558#if CONFIG_DEBUG_VMM_FORK_COPY
     559cycle = (uint32_t)hal_get_cycles();
     560if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     561printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
     562__FUNCTION__ , CURRENT_THREAD , vseg_type_str(type),
     563hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
     564#endif
    478565
    479566            // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT
     
    502589                    }
    503590
    504                     // increment page descriptor fork_nr for the referenced page if mapped
     591                    // increment pending forks counter in page if mapped
    505592                    if( mapped )
    506593                    {
    507594                        page_xp = ppm_ppn2page( ppn );
    508595                        page_cxy = GET_CXY( page_xp );
    509                         page_ptr = (page_t *)GET_PTR( page_xp );
    510                         hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 );
    511 
    512 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child GPT : vpn %x\n",
    513 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
     596                        page_ptr = GET_PTR( page_xp );
     597                        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
     598
     599#if CONFIG_DEBUG_VMM_FORK_COPY
     600cycle = (uint32_t)hal_get_cycles();
     601if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     602printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n",
     603__FUNCTION__ , CURRENT_THREAD , vpn , cycle );
     604#endif
    514605
    515606                    }
     
    558649    hal_fence();
    559650
     651#if CONFIG_DEBUG_VMM_FORK_COPY
     652cycle = (uint32_t)hal_get_cycles();
     653if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     654printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n",
     655__FUNCTION__ , CURRENT_THREAD , cycle );
     656#endif
     657
    560658    return 0;
    561659
     
    568666        vseg_t * vseg;
    569667
    570 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n",
    571 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
    572 
    573     // get pointer on VMM
     668#if CONFIG_DEBUG_VMM_DESTROY
     669uint32_t cycle = (uint32_t)hal_get_cycles();
     670if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     671printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
     672__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     673#endif
     674
     675    // get pointer on local VMM
    574676    vmm_t  * vmm = &process->vmm;
    575677
     
    586688        // get pointer on first vseg in VSL
    587689                vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist );
    588         vseg = (vseg_t *)GET_PTR( vseg_xp );
    589 
    590         // unmap and release all pages
     690        vseg    = GET_PTR( vseg_xp );
     691
     692        // unmap rand release physical pages if required)
    591693        vmm_unmap_vseg( process , vseg );
    592694
     
    598700        }
    599701
    600     // release lock
     702    // release lock protecting VSL
    601703        remote_rwlock_wr_unlock( lock_xp );
    602704
     
    616718    hal_gpt_destroy( &vmm->gpt );
    617719
    618 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit\n",
    619 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
     720#if CONFIG_DEBUG_VMM_DESTROY
     721cycle = (uint32_t)hal_get_cycles();
     722if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     723printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     724__FUNCTION__ , CURRENT_THREAD , cycle );
     725#endif
    620726
    621727}  // end vmm_destroy()
     
    637743        {
    638744                vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    639         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     745        vseg    = GET_PTR( vseg_xp );
    640746
    641747                if( ((vpn_base + vpn_size) > vseg->vpn_base) &&
     
    766872        error_t      error;
    767873
    768 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters / process %x / base %x / size %x / %s / cxy = %x\n",
    769 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    770 process->pid , base , size , vseg_type_str(type) , cxy );
     874#if CONFIG_DEBUG_VMM_CREATE_VSEG
     875uint32_t cycle = (uint32_t)hal_get_cycles();
     876if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     877printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n",
     878__FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle );
     879#endif
    771880
    772881    // get pointer on VMM
     
    854963        remote_rwlock_wr_unlock( lock_xp );
    855964
    856 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / base %x / size %x / type %s\n",
    857 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    858 process->pid , base , size , vseg_type_str(type) );
     965#if CONFIG_DEBUG_VMM_CREATE_VSEG
     966cycle = (uint32_t)hal_get_cycles();
     967if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     968printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n",
     969__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle );
     970#endif
    859971
    860972        return vseg;
     
    9851097    cxy_t       page_cxy;   // page descriptor cluster
    9861098    page_t    * page_ptr;   // page descriptor pointer
    987 
    988 vmm_dmsg("\n[DBG] %s : core[%x, %d] enter / process %x / vseg %s / base %x / cycle %d\n",
    989 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid ,
    990 vseg_type_str( vseg->type ), vseg->vpn_base, (uint32_t)hal_get_cycles() );
    991 
    992     // get pointer on process GPT
     1099    xptr_t      forks_xp;   // extended pointer on pending forks counter
     1100    uint32_t    count;      // actual number of pendinf forks
     1101
     1102#if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1103uint32_t cycle = (uint32_t)hal_get_cycles();
     1104if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1105printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n",
     1106__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     1107#endif
     1108
     1109    // get pointer on local GPT
    9931110    gpt_t     * gpt = &process->vmm.gpt;
    9941111
     
    10071124            "an user vseg must use small pages" );
    10081125
    1009             // unmap GPT entry
     1126            // unmap GPT entry in all GPT copies
    10101127            hal_gpt_reset_pte( gpt , vpn );
    10111128
    1012             // release memory if not identity mapped
    1013             if( (vseg->flags & VSEG_IDENT)  == 0 )
     1129            // handle pending forks counter if
     1130            // 1) not identity mapped
     1131            // 2) running in reference cluster
     1132            if( ((vseg->flags & VSEG_IDENT)  == 0) &&
     1133                (GET_CXY( process->ref_xp ) == local_cxy) )
    10141134            {
    1015                 // get extended pointer on page descriptor
     1135                // get extended pointer on physical page descriptor
    10161136                page_xp  = ppm_ppn2page( ppn );
    10171137                page_cxy = GET_CXY( page_xp );
    1018                 page_ptr = (page_t *)GET_PTR( page_xp );
    1019 
    1020                 // release physical page to relevant cluster
    1021                 if( page_cxy == local_cxy )                   // local cluster
     1138                page_ptr = GET_PTR( page_xp );
     1139
     1140                // FIXME lock the physical page
     1141
     1142                // get extended pointer on pending forks counter
     1143                forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1144
     1145                // get pending forks counter
     1146                count = hal_remote_lw( forks_xp );
     1147               
     1148                if( count )  // decrement pending forks counter
    10221149                {
    1023                     req.type = KMEM_PAGE;
    1024                     req.ptr  = page_ptr;
    1025                     kmem_free( &req );
     1150                    hal_remote_atomic_add( forks_xp , -1 );
     1151                } 
     1152                else         // release physical page to relevant cluster
     1153                {
     1154                    if( page_cxy == local_cxy )   // local cluster
     1155                    {
     1156                        req.type = KMEM_PAGE;
     1157                        req.ptr  = page_ptr;
     1158                        kmem_free( &req );
     1159                    }
     1160                    else                          // remote cluster
     1161                    {
     1162                        rpc_pmem_release_pages_client( page_cxy , page_ptr );
     1163                    }
    10261164                }
    1027                 else                                          // remote cluster
    1028                 {
    1029                     rpc_pmem_release_pages_client( page_cxy , page_ptr );
    1030                 }
     1165
     1166                // FIXME unlock the physical page
    10311167            }
    10321168        }
    10331169    }
     1170
     1171#if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1172cycle = (uint32_t)hal_get_cycles();
     1173if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1174printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n",
     1175__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     1176#endif
     1177
    10341178}  // end vmm_unmap_vseg()
    10351179
     
    10611205    {
    10621206        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    1063         vseg    = (vseg_t *)GET_PTR( vseg_xp );
     1207        vseg    = GET_PTR( vseg_xp );
    10641208        if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
    10651209        {
     
    11851329        // get cluster and local pointer on reference process
    11861330        cxy_t       ref_cxy = GET_CXY( ref_xp );
    1187         process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
     1331        process_t * ref_ptr = GET_PTR( ref_xp );
    11881332
    11891333        if( local_cxy == ref_cxy )  return -1;   // local cluster is the reference
     
    12241368                                 vpn_t    vpn )
    12251369{
     1370
     1371#if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
     1372if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1373printk("\n[DBG] in %s : thread %x enter for vpn %x\n",
     1374__FUNCTION__ , CURRENT_THREAD, vpn );
     1375#endif
     1376
    12261377    // compute target cluster
    12271378    page_t     * page_ptr;
     
    12621413    }
    12631414
     1415#if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
     1416if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1417printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n",
     1418__FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) );
     1419#endif
     1420
    12641421    if( page_ptr == NULL ) return XPTR_NULL;
    12651422    else                   return XPTR( page_cxy , page_ptr );
     
    12811438    index     = vpn - vseg->vpn_base;
    12821439
    1283 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n",
    1284 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, vseg_type_str(type), index );
     1440#if CONFIG_DEBUG_VMM_GET_ONE_PPN
     1441if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1442printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n",
     1443__FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index );
     1444#endif
    12851445
    12861446    // FILE type : get the physical page from the file mapper
     
    12951455        // get mapper cluster and local pointer
    12961456        cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    1297         mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );
     1457        mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    12981458
    12991459        // get page descriptor from mapper
     
    13161476    else
    13171477    {
    1318         // allocate physical page
     1478        // allocate one physical page
    13191479        page_xp = vmm_page_allocate( vseg , vpn );
    13201480
     
    13221482
    13231483        // initialise missing page from .elf file mapper for DATA and CODE types
    1324         // => the mapper_xp field is an extended pointer on the .elf file mapper
     1484        // (the vseg->mapper_xp field is an extended pointer on the .elf file mapper)
    13251485        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
    13261486        {
     
    13331493            // get mapper cluster and local pointer
    13341494            cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    1335             mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );
     1495            mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    13361496
    13371497            // compute missing page offset in vseg
     
    13411501            uint32_t elf_offset = vseg->file_offset + offset;
    13421502
    1343 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n",
    1344 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, elf_offset );
     1503#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1504if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1505printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n",
     1506__FUNCTION__, CURRENT_THREAD, vpn, elf_offset );
     1507#endif
    13451508
    13461509            // compute extended pointer on page base
     
    13521515            if( file_size < offset )                 // missing page fully in  BSS
    13531516            {
    1354 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in BSS\n",
    1355 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn );
     1517
     1518#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1519if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1520printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n",
     1521__FUNCTION__, CURRENT_THREAD, vpn );
     1522#endif
    13561523
    13571524                if( GET_CXY( page_xp ) == local_cxy )
     
    13671534            {
    13681535
    1369 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in mapper\n",
    1370 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn );
     1536#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1537if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1538printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n",
     1539__FUNCTION__, CURRENT_THREAD, vpn );
     1540#endif
    13711541
    13721542                if( mapper_cxy == local_cxy )
     
    13961566            {
    13971567
    1398 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n"
    1399          "      %d bytes from mapper / %d bytes from BSS\n",
    1400 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn,
     1568#if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
     1569if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1570printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n"
     1571"      %d bytes from mapper / %d bytes from BSS\n",
     1572__FUNCTION__, CURRENT_THREAD, vpn,
    14011573file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size  );
    1402 
     1574#endif
    14031575                // initialize mapper part
    14041576                if( mapper_cxy == local_cxy )
     
    14411613    *ppn = ppm_page2ppn( page_xp );
    14421614
    1443 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n",
    1444 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , *ppn );
     1615#if CONFIG_DEBUG_VMM_GET_ONE_PPN
     1616if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1617printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n",
     1618__FUNCTION__ , CURRENT_THREAD , vpn , *ppn );
     1619#endif
    14451620
    14461621    return 0;
     
    14551630                     ppn_t     * ppn )
    14561631{
    1457     vseg_t  * vseg;       // pointer on vseg containing VPN
     1632    vseg_t  * vseg;       // vseg containing VPN
    14581633    ppn_t     old_ppn;    // current PTE_PPN
    14591634    uint32_t  old_attr;   // current PTE_ATTR
     
    14661641    "not called in the reference cluster\n" );
    14671642
    1468 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x in process %x / cow = %d\n",
    1469 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid , cow );
     1643#if CONFIG_DEBUG_VMM_GET_PTE
     1644uint32_t cycle = (uint32_t)hal_get_cycles();
     1645if( CONFIG_DEBUG_VMM_GET_PTE > cycle )
     1646printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n",
     1647__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle );
     1648#endif
    14701649
    14711650    // get VMM pointer
    14721651    vmm_t * vmm = &process->vmm;
    14731652
    1474     // get vseg pointer from ref VSL
     1653    // get vseg pointer from reference VSL
    14751654    error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg );
    14761655
     
    14821661    }
    14831662
    1484 vmm_dmsg("\n[DBG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n",
    1485 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ,
    1486 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size );
     1663#if CONFIG_DEBUG_VMM_GET_PTE
     1664cycle = (uint32_t)hal_get_cycles();
     1665if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1666printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n",
     1667__FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size );
     1668#endif
    14871669
    14881670    // access GPT to get current PTE attributes and PPN
     
    14931675    // clusters containing a copy, and return the new_ppn and new_attr
    14941676
    1495     if( cow )               ////////////// copy_on_write request ///////////
     1677    if( cow )  /////////////////////////// copy_on_write request //////////////////////
    14961678    {
    14971679        assert( (old_attr & GPT_MAPPED) , __FUNCTION__ ,
    14981680        "PTE must be mapped for a copy-on-write exception\n" );
    14991681
    1500 excp_dmsg("\n[DBG] %s : core[%x,%d] handling COW for vpn %x\n",
    1501 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
    1502 
    1503         // get extended pointer, cluster and local pointer on page descriptor
     1682#if CONFIG_DEBUG_VMM_GET_PTE
     1683cycle = (uint32_t)hal_get_cycles();
     1684if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1685printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n",
     1686__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     1687#endif
     1688
     1689        // get extended pointer, cluster and local pointer on physical page descriptor
    15041690        xptr_t   page_xp  = ppm_ppn2page( old_ppn );
    15051691        cxy_t    page_cxy = GET_CXY( page_xp );
    1506         page_t * page_ptr = (page_t *)GET_PTR( page_xp );
     1692        page_t * page_ptr = GET_PTR( page_xp );
    15071693
    15081694        // get number of pending forks in page descriptor
    1509         uint32_t count = hal_remote_lw( XPTR( page_cxy , &page_ptr->fork_nr ) );
    1510 
    1511         if( count )        // pending fork => allocate a new page, copy it, reset COW
     1695        uint32_t forks = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ) );
     1696
     1697        if( forks )        // pending fork => allocate a new page, copy old to new
    15121698        {
    15131699            // allocate a new physical page
     
    15391725
    15401726        // update GPT[vpn] for all GPT copies
    1541         // to maintain coherence of copies
    1542         vmm_update_pte( process,
    1543                         vpn,
    1544                         new_attr,
    1545                         new_ppn );
    1546 
    1547         // decrement fork_nr in page descriptor
    1548         hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , -1 );
    1549     }
    1550     else                         /////////////// page_fault request ///////////
     1727        vmm_global_update_pte( process, vpn, new_attr, new_ppn );
     1728
     1729        // decrement pending forks counter in page descriptor
     1730        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 );
     1731    }
     1732    else  ////////////////////////////////// page_fault request ////////////////////////
    15511733    { 
    15521734        if( (old_attr & GPT_MAPPED) == 0 )   // true page_fault => map it
    15531735        {
    15541736
    1555 excp_dmsg("\n[DBG] %s : core[%x,%d] handling page fault for vpn %x\n",
    1556 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn );
     1737#if CONFIG_DEBUG_VMM_GET_PTE
     1738cycle = (uint32_t)hal_get_cycles();
     1739if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1740printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n",
     1741__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     1742#endif
    15571743
    15581744            // allocate new_ppn, depending on vseg type
     
    15921778    }
    15931779
    1594 excp_dmsg("\n[DBG] %s : core[%x,%d] update GPT for vpn %x / ppn = %x / attr = %x\n",
    1595 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , new_ppn , new_attr );
    1596 
    1597     // retur success
     1780#if CONFIG_DEBUG_VMM_GET_PTE
     1781cycle = (uint32_t)hal_get_cycles();
     1782if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1783printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr = %x / cycle %d\n",
     1784__FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle );
     1785#endif
     1786
     1787    // return success
    15981788    *ppn  = new_ppn;
    15991789    *attr = new_attr;
     
    16121802    // get reference process cluster and local pointer
    16131803    cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1614     process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1804    process_t * ref_ptr = GET_PTR( process->ref_xp );
    16151805
    16161806    // get missing PTE attributes and PPN from reference cluster
     
    16511841                        vpn_t       vpn )
    16521842{
    1653     uint32_t         attr;          // missing page attributes
    1654     ppn_t            ppn;           // missing page PPN
     1843    uint32_t         attr;          // page attributes
     1844    ppn_t            ppn;           // page PPN
    16551845    error_t          error;
    16561846
     1847   
    16571848    // get reference process cluster and local pointer
    16581849    cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1659     process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1850    process_t * ref_ptr = GET_PTR( process->ref_xp );
    16601851
    16611852    // get new PTE attributes and PPN from reference cluster
     
    17221913    {
    17231914        cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1724         process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );
     1915        process_t * ref_ptr = GET_PTR( process->ref_xp );
    17251916        rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , false , &attr , &ppn , &error );
    17261917    }
Note: See TracChangeset for help on using the changeset viewer.