Changeset 438 for trunk/kernel/mm


Ignore:
Timestamp:
Apr 4, 2018, 2:49:02 PM (7 years ago)
Author:
alain
Message:

Fix a bug in scheduler related to RPC blocking.

Location:
trunk/kernel/mm
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r437 r438  
    4848{
    4949
    50 #if CONFIG_DEBUG_KCM
     50#if DEBUG_KCM
    5151uint32_t cycle = (uint32_t)hal_get_cycles();
    52 if( CONFIG_DEBUG_KCM < cycle )
     52if( DEBUG_KCM < cycle )
    5353printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n",
    5454__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) ,
     
    8585                     + (index * kcm->block_size) );
    8686
    87 #if CONFIG_DEBUG_KCM
     87#if DEBUG_KCM
    8888cycle = (uint32_t)hal_get_cycles();
    89 if( CONFIG_DEBUG_KCM < cycle )
     89if( DEBUG_KCM < cycle )
    9090printk("\n[DBG] %s : thread %x exit / type  %s / ptr %p / page %x / count %d\n",
    9191__FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr ,
  • trunk/kernel/mm/kmem.c

    r435 r438  
    145145        assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" );
    146146
    147 #if CONFIG_DEBUG_KMEM
     147#if DEBUG_KMEM
    148148uint32_t cycle = (uint32_t)hal_get_cycles();
    149 if( CONFIG_DEBUG_KMEM < cycle )
     149if( DEBUG_KMEM < cycle )
    150150printk("\n[DBG] %s : thread %x enter / KCM type %s missing in cluster %x / cycle %d\n",
    151151__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     
    173173        hal_fence();
    174174
    175 #if CONFIG_DEBUG_KMEM
     175#if DEBUG_KMEM
    176176cycle = (uint32_t)hal_get_cycles();
    177 if( CONFIG_DEBUG_KMEM < cycle )
     177if( DEBUG_KMEM < cycle )
    178178printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    179179__FUNCTION__, CURRENT_THREAD, cycle );
     
    200200        assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" );
    201201
    202 #if CONFIG_DEBUG_KMEM
     202#if DEBUG_KMEM
    203203uint32_t cycle = (uint32_t)hal_get_cycles();
    204 if( CONFIG_DEBUG_KMEM < cycle )
     204if( DEBUG_KMEM < cycle )
    205205printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n",
    206206__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );
     
    222222                if( flags & AF_ZERO ) page_zero( (page_t *)ptr );
    223223
    224 #if CONFIG_DEBUG_KMEM
     224#if DEBUG_KMEM
    225225cycle = (uint32_t)hal_get_cycles();
    226 if( CONFIG_DEBUG_KMEM < cycle )
     226if( DEBUG_KMEM < cycle )
    227227printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n",
    228228__FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle );
     
    244244                if( flags & AF_ZERO ) memset( ptr , 0 , size );
    245245
    246 #if CONFIG_DEBUG_KMEM
     246#if DEBUG_KMEM
    247247cycle = (uint32_t)hal_get_cycles();
    248 if( CONFIG_DEBUG_KMEM < cycle )
     248if( DEBUG_KMEM < cycle )
    249249printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
    250250__FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle );
     
    275275                if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) );
    276276
    277 #if CONFIG_DEBUG_KMEM
     277#if DEBUG_KMEM
    278278cycle = (uint32_t)hal_get_cycles();
    279 if( CONFIG_DEBUG_KMEM < cycle )
     279if( DEBUG_KMEM < cycle )
    280280printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n",
    281281__FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr,
  • trunk/kernel/mm/mapper.c

    r435 r438  
    143143    error_t       error;
    144144
    145 #if CONFIG_DEBUG_MAPPER_GET_PAGE
     145#if DEBUG_MAPPER_GET_PAGE
    146146uint32_t cycle = (uint32_t)hal_get_cycles();
    147 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     147if( DEBUG_MAPPER_GET_PAGE < cycle )
    148148printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n",
    149149__FUNCTION__ , CURRENT_THREAD , index , mapper , cycle );
     
    175175        {
    176176
    177 #if (CONFIG_DEBUG_MAPPER_GET_PAGE & 1)
    178 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     177#if (DEBUG_MAPPER_GET_PAGE & 1)
     178if( DEBUG_MAPPER_GET_PAGE < cycle )
    179179printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ );
    180180#endif
     
    257257    }
    258258
    259 #if CONFIG_DEBUG_MAPPER_GET_PAGE
     259#if DEBUG_MAPPER_GET_PAGE
    260260cycle = (uint32_t)hal_get_cycles();
    261 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )
     261if( DEBUG_MAPPER_GET_PAGE < cycle )
    262262printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n",
    263263__FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle );
     
    317317    uint8_t  * buf_ptr;        // current buffer  address
    318318
    319 #if CONFIG_DEBUG_MAPPER_MOVE_USER
     319#if DEBUG_MAPPER_MOVE_USER
    320320uint32_t cycle = (uint32_t)hal_get_cycles();
    321 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     321if( DEBUG_MAPPER_MOVE_USER < cycle )
    322322printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n",
    323323__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle );
     
    347347        else                       page_count = CONFIG_PPM_PAGE_SIZE;
    348348
    349 #if (CONFIG_DEBUG_MAPPER_MOVE_USER & 1)
    350 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     349#if (DEBUG_MAPPER_MOVE_USER & 1)
     350if( DEBUG_MAPPER_MOVE_USER < cycle )
    351351printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n",
    352352__FUNCTION__ , index , page_offset , page_count );
     
    379379    }
    380380
    381 #if CONFIG_DEBUG_MAPPER_MOVE_USER
     381#if DEBUG_MAPPER_MOVE_USER
    382382cycle = (uint32_t)hal_get_cycles();
    383 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )
     383if( DEBUG_MAPPER_MOVE_USER < cycle )
    384384printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n",
    385385__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle );
     
    412412    uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp );
    413413
    414 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL
     414#if DEBUG_MAPPER_MOVE_KERNEL
    415415uint32_t cycle = (uint32_t)hal_get_cycles();
    416 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     416if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    417417printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    418418__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle );
     
    427427    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
    428428
    429 #if (CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)
    430 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     429#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
     430if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    431431printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last );
    432432#endif
     
    459459        else                       page_count = CONFIG_PPM_PAGE_SIZE;
    460460
    461 #if (CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)
    462 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     461#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
     462if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    463463printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n",
    464464__FUNCTION__ , index , page_offset , page_count );
     
    494494    }
    495495
    496 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL
     496#if DEBUG_MAPPER_MOVE_KERNEL
    497497cycle = (uint32_t)hal_get_cycles();
    498 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )
     498if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    499499printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    500500__FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle );
  • trunk/kernel/mm/ppm.c

    r437 r438  
    201201        uint32_t   current_size;
    202202 
    203 #if CONFIG_DEBUG_PPM_ALLOC_PAGES
     203#if DEBUG_PPM_ALLOC_PAGES
    204204uint32_t cycle = (uint32_t)hal_get_cycles();
    205 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     205if( DEBUG_PPM_ALLOC_PAGES < cycle )
    206206printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
    207207__FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
    208208#endif
    209209
    210 #if(CONFIG_DEBUG_PPM_ALLOC_PAGES & 0x1)
    211 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     210#if(DEBUG_PPM_ALLOC_PAGES & 0x1)
     211if( DEBUG_PPM_ALLOC_PAGES < cycle )
    212212ppm_print();
    213213#endif
     
    239239                spinlock_unlock( &ppm->free_lock );
    240240
    241 #if CONFIG_DEBUG_PPM_ALLOC_PAGES
     241#if DEBUG_PPM_ALLOC_PAGES
    242242cycle = (uint32_t)hal_get_cycles();
    243 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     243if( DEBUG_PPM_ALLOC_PAGES < cycle )
    244244printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n",
    245245__FUNCTION__ , CURRENT_THREAD , 1<<order, cycle );
     
    275275        spinlock_unlock( &ppm->free_lock );
    276276
    277 #if CONFIG_DEBUG_PPM_ALLOC_PAGES
     277#if DEBUG_PPM_ALLOC_PAGES
    278278cycle = (uint32_t)hal_get_cycles();
    279 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )
     279if( DEBUG_PPM_ALLOC_PAGES < cycle )
    280280printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n",
    281281__FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );
     
    292292        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    293293
    294 #if CONFIG_DEBUG_PPM_FREE_PAGES
     294#if DEBUG_PPM_FREE_PAGES
    295295uint32_t cycle = (uint32_t)hal_get_cycles();
    296 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     296if( DEBUG_PPM_FREE_PAGES < cycle )
    297297printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",
    298298__FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle );
    299299#endif
    300300
    301 #if(CONFIG_DEBUG_PPM_FREE_PAGES & 0x1)
    302 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     301#if(DEBUG_PPM_FREE_PAGES & 0x1)
     302if( DEBUG_PPM_FREE_PAGES < cycle )
    303303ppm_print();
    304304#endif
     
    312312        spinlock_unlock( &ppm->free_lock );
    313313
    314 #if CONFIG_DEBUG_PPM_FREE_PAGES
     314#if DEBUG_PPM_FREE_PAGES
    315315cycle = (uint32_t)hal_get_cycles();
    316 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )
     316if( DEBUG_PPM_FREE_PAGES < cycle )
    317317printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n",
    318318__FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
  • trunk/kernel/mm/vmm.c

    r437 r438  
    6363    intptr_t  size;
    6464
    65 #if CONFIG_DEBUG_VMM_INIT
     65#if DEBUG_VMM_INIT
    6666uint32_t cycle = (uint32_t)hal_get_cycles();
    67 if( CONFIG_DEBUG_VMM_INIT )
     67if( DEBUG_VMM_INIT )
    6868printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    6969__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    183183    hal_fence();
    184184
    185 #if CONFIG_DEBUG_VMM_INIT
     185#if DEBUG_VMM_INIT
    186186cycle = (uint32_t)hal_get_cycles();
    187 if( CONFIG_DEBUG_VMM_INIT )
     187if( DEBUG_VMM_INIT )
    188188printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n",
    189189__FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle );
     
    266266    lpid_t          owner_lpid;
    267267
    268 #if CONFIG_DEBUG_VMM_UPDATE_PTE
     268#if DEBUG_VMM_UPDATE_PTE
    269269uint32_t cycle = (uint32_t)hal_get_cycles();
    270 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     270if( DEBUG_VMM_UPDATE_PTE < cycle )
    271271printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n",
    272272__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     
    292292        remote_process_cxy = GET_CXY( remote_process_xp );
    293293
    294 #if (CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)
    295 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     294#if (DEBUG_VMM_UPDATE_PTE & 0x1)
     295if( DEBUG_VMM_UPDATE_PTE < cycle )
    296296printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
    297297__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     
    305305    } 
    306306
    307 #if CONFIG_DEBUG_VMM_UPDATE_PTE
     307#if DEBUG_VMM_UPDATE_PTE
    308308cycle = (uint32_t)hal_get_cycles();
    309 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )
     309if( DEBUG_VMM_UPDATE_PTE < cycle )
    310310printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n",
    311311__FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle );
     
    338338    lpid_t          owner_lpid;
    339339
    340 #if CONFIG_DEBUG_VMM_SET_COW
     340#if DEBUG_VMM_SET_COW
    341341uint32_t cycle = (uint32_t)hal_get_cycles();
    342 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     342if( DEBUG_VMM_SET_COW < cycle )
    343343printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    344344__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    370370        remote_process_cxy = GET_CXY( remote_process_xp );
    371371
    372 #if (CONFIG_DEBUG_VMM_SET_COW &0x1)
    373 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     372#if (DEBUG_VMM_SET_COW &0x1)
     373if( DEBUG_VMM_SET_COW < cycle )
    374374printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n",
    375375__FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy );
     
    394394            vpn_t    vpn_size = vseg->vpn_size;
    395395
    396 #if (CONFIG_DEBUG_VMM_SET_COW & 0x1)
    397 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     396#if (DEBUG_VMM_SET_COW & 0x1)
     397if( DEBUG_VMM_SET_COW < cycle )
    398398printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n",
    399399__FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size );
     
    445445    }   // end loop on process copies
    446446 
    447 #if CONFIG_DEBUG_VMM_SET_COW
     447#if DEBUG_VMM_SET_COW
    448448cycle = (uint32_t)hal_get_cycles();
    449 if( CONFIG_DEBUG_VMM_SET_COW < cycle )
     449if( DEBUG_VMM_SET_COW < cycle )
    450450printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n",
    451451__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
     
    480480    ppn_t       ppn;
    481481
    482 #if CONFIG_DEBUG_VMM_FORK_COPY
     482#if DEBUG_VMM_FORK_COPY
    483483uint32_t cycle = (uint32_t)hal_get_cycles();
    484 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     484if( DEBUG_VMM_FORK_COPY < cycle )
    485485printk("\n[DBG] %s : thread %x enter / cycle %d\n",
    486486__FUNCTION__ , CURRENT_THREAD, cycle );
     
    530530        type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) );
    531531       
    532 #if CONFIG_DEBUG_VMM_FORK_COPY
     532#if DEBUG_VMM_FORK_COPY
    533533cycle = (uint32_t)hal_get_cycles();
    534 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     534if( DEBUG_VMM_FORK_COPY < cycle )
    535535printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n",
    536536__FUNCTION__ , CURRENT_THREAD, vseg_type_str(type),
     
    556556            vseg_attach( child_vmm , child_vseg );
    557557
    558 #if CONFIG_DEBUG_VMM_FORK_COPY
     558#if DEBUG_VMM_FORK_COPY
    559559cycle = (uint32_t)hal_get_cycles();
    560 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     560if( DEBUG_VMM_FORK_COPY < cycle )
    561561printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
    562562__FUNCTION__ , CURRENT_THREAD , vseg_type_str(type),
     
    597597                        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 );
    598598
    599 #if CONFIG_DEBUG_VMM_FORK_COPY
     599#if DEBUG_VMM_FORK_COPY
    600600cycle = (uint32_t)hal_get_cycles();
    601 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     601if( DEBUG_VMM_FORK_COPY < cycle )
    602602printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n",
    603603__FUNCTION__ , CURRENT_THREAD , vpn , cycle );
     
    649649    hal_fence();
    650650
    651 #if CONFIG_DEBUG_VMM_FORK_COPY
     651#if DEBUG_VMM_FORK_COPY
    652652cycle = (uint32_t)hal_get_cycles();
    653 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )
     653if( DEBUG_VMM_FORK_COPY < cycle )
    654654printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n",
    655655__FUNCTION__ , CURRENT_THREAD , cycle );
     
    666666        vseg_t * vseg;
    667667
    668 #if CONFIG_DEBUG_VMM_DESTROY
     668#if DEBUG_VMM_DESTROY
    669669uint32_t cycle = (uint32_t)hal_get_cycles();
    670 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     670if( DEBUG_VMM_DESTROY < cycle )
    671671printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",
    672672__FUNCTION__ , CURRENT_THREAD , process->pid , cycle );
    673673#endif
    674674
    675 #if (CONFIG_DEBUG_VMM_DESTROY & 1 )
     675#if (DEBUG_VMM_DESTROY & 1 )
    676676vmm_display( process , true );
    677677#endif
     
    694694        vseg    = GET_PTR( vseg_xp );
    695695
    696 #if( CONFIG_DEBUG_VMM_DESTROY & 1 )
    697 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     696#if( DEBUG_VMM_DESTROY & 1 )
     697if( DEBUG_VMM_DESTROY < cycle )
    698698printk("\n[DBG] %s : %s / vpn_base %x / vpn_size %d\n",
    699699__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     
    728728    hal_gpt_destroy( &vmm->gpt );
    729729
    730 #if CONFIG_DEBUG_VMM_DESTROY
     730#if DEBUG_VMM_DESTROY
    731731cycle = (uint32_t)hal_get_cycles();
    732 if( CONFIG_DEBUG_VMM_DESTROY < cycle )
     732if( DEBUG_VMM_DESTROY < cycle )
    733733printk("\n[DBG] %s : thread %x exit / cycle %d\n",
    734734__FUNCTION__ , CURRENT_THREAD , cycle );
     
    882882        error_t      error;
    883883
    884 #if CONFIG_DEBUG_VMM_CREATE_VSEG
     884#if DEBUG_VMM_CREATE_VSEG
    885885uint32_t cycle = (uint32_t)hal_get_cycles();
    886 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     886if( DEBUG_VMM_CREATE_VSEG < cycle )
    887887printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n",
    888888__FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle );
     
    973973        remote_rwlock_wr_unlock( lock_xp );
    974974
    975 #if CONFIG_DEBUG_VMM_CREATE_VSEG
     975#if DEBUG_VMM_CREATE_VSEG
    976976cycle = (uint32_t)hal_get_cycles();
    977 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )
     977if( DEBUG_VMM_CREATE_VSEG < cycle )
    978978printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n",
    979979__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle );
     
    11101110    uint32_t    count;      // actual number of pendinf forks
    11111111
    1112 #if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1112#if DEBUG_VMM_UNMAP_VSEG
    11131113uint32_t cycle = (uint32_t)hal_get_cycles();
    1114 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1114if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11151115printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n",
    11161116__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     
    11311131        {
    11321132
    1133 #if( CONFIG_DEBUG_VMM_UNMAP_VSEG & 1 )
    1134 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1133#if( DEBUG_VMM_UNMAP_VSEG & 1 )
     1134if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11351135printk("- vpn %x / ppn %x\n" , vpn , ppn );
    11361136#endif
     
    11831183    }
    11841184
    1185 #if CONFIG_DEBUG_VMM_UNMAP_VSEG
     1185#if DEBUG_VMM_UNMAP_VSEG
    11861186cycle = (uint32_t)hal_get_cycles();
    1187 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )
     1187if( DEBUG_VMM_UNMAP_VSEG < cycle )
    11881188printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n",
    11891189__FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle );
     
    13831383{
    13841384
    1385 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
    1386 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1385#if DEBUG_VMM_ALLOCATE_PAGE
     1386if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    13871387printk("\n[DBG] in %s : thread %x enter for vpn %x\n",
    13881388__FUNCTION__ , CURRENT_THREAD, vpn );
     
    14271427    }
    14281428
    1429 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE
    1430 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
     1429#if DEBUG_VMM_ALLOCATE_PAGE
     1430if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    14311431printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n",
    14321432__FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) );
     
    14521452    index     = vpn - vseg->vpn_base;
    14531453
    1454 #if CONFIG_DEBUG_VMM_GET_ONE_PPN
    1455 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1454#if DEBUG_VMM_GET_ONE_PPN
     1455if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    14561456printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n",
    14571457__FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index );
     
    15151515            uint32_t elf_offset = vseg->file_offset + offset;
    15161516
    1517 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1518 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1517#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1518if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15191519printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n",
    15201520__FUNCTION__, CURRENT_THREAD, vpn, elf_offset );
     
    15301530            {
    15311531
    1532 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1533 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1532#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1533if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15341534printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n",
    15351535__FUNCTION__, CURRENT_THREAD, vpn );
     
    15481548            {
    15491549
    1550 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1551 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1550#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1551if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15521552printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n",
    15531553__FUNCTION__, CURRENT_THREAD, vpn );
     
    15801580            {
    15811581
    1582 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)
    1583 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1582#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
     1583if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    15841584printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n"
    15851585"      %d bytes from mapper / %d bytes from BSS\n",
     
    16271627    *ppn = ppm_page2ppn( page_xp );
    16281628
    1629 #if CONFIG_DEBUG_VMM_GET_ONE_PPN
    1630 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
     1629#if DEBUG_VMM_GET_ONE_PPN
     1630if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
    16311631printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n",
    16321632__FUNCTION__ , CURRENT_THREAD , vpn , *ppn );
     
    16551655    "not called in the reference cluster\n" );
    16561656
    1657 #if CONFIG_DEBUG_VMM_GET_PTE
     1657#if DEBUG_VMM_GET_PTE
    16581658uint32_t cycle = (uint32_t)hal_get_cycles();
    1659 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
    1660 printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n",
     1659if( DEBUG_VMM_GET_PTE < cycle )
     1660printk("\n[DBG] %s : thread %x enter / vpn %x / process %x / cow %d / cycle %d\n",
    16611661__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle );
    16621662#endif
     
    16751675    }
    16761676
    1677 #if CONFIG_DEBUG_VMM_GET_PTE
     1677#if( DEBUG_VMM_GET_PTE & 1 )
    16781678cycle = (uint32_t)hal_get_cycles();
    1679 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1679if( DEBUG_VMM_GET_PTE < cycle )
    16801680printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n",
    16811681__FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size );
    16821682#endif
    16831683
    1684     // access GPT to get current PTE attributes and PPN
    1685     hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
    1686 
    1687     // for both "copy_on_write" and "page_fault" events, allocate a physical page,
    1688     // initialize it, register it in the reference GPT, update GPT copies in all
    1689     // clusters containing a copy, and return the new_ppn and new_attr
    1690 
    1691     if( cow )  /////////////////////////// copy_on_write request //////////////////////
    1692     {
     1684    if( cow )  //////////////// copy_on_write request //////////////////////
     1685               // get PTE from reference GPT
     1686               // allocate a new physical page if there is pending forks,
     1687               // initialize it from old physical page content,
     1688               // update PTE in all GPT copies,
     1689    {
     1690        // access GPT to get current PTE attributes and PPN
     1691        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
     1692
    16931693        assert( (old_attr & GPT_MAPPED) , __FUNCTION__ ,
    16941694        "PTE must be mapped for a copy-on-write exception\n" );
    16951695
    1696 #if CONFIG_DEBUG_VMM_GET_PTE
     1696#if( DEBUG_VMM_GET_PTE & 1 )
    16971697cycle = (uint32_t)hal_get_cycles();
    1698 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1698if( DEBUG_VMM_GET_PTE < cycle )
    16991699printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n",
    17001700__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     
    17441744        hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 );
    17451745    }
    1746     else  ////////////////////////////////// page_fault request ////////////////////////
     1746    else        //////////// page_fault request ///////////////////////////
     1747                // get PTE from reference GPT
     1748                // allocate a physical page if it is a true page fault,
     1749                // register in reference GPT, but don't update GPT copies
    17471750    { 
     1751        // access GPT to get current PTE
     1752        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
     1753
    17481754        if( (old_attr & GPT_MAPPED) == 0 )   // true page_fault => map it
    17491755        {
    17501756
    1751 #if CONFIG_DEBUG_VMM_GET_PTE
     1757#if( DEBUG_VMM_GET_PTE & 1 )
    17521758cycle = (uint32_t)hal_get_cycles();
    1753 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1759if( DEBUG_VMM_GET_PTE < cycle )
    17541760printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n",
    17551761__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
     
    17921798    }
    17931799
    1794 #if CONFIG_DEBUG_VMM_GET_PTE
     1800#if DEBUG_VMM_GET_PTE
    17951801cycle = (uint32_t)hal_get_cycles();
    1796 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
    1797 printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr = %x / cycle %d\n",
     1802if( DEBUG_VMM_GET_PTE < cycle )
     1803printk("\n[DBG] %s : thread,%x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n",
    17981804__FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle );
    17991805#endif
    18001806
    1801     // return success
     1807    // return PPN and flags
    18021808    *ppn  = new_ppn;
    18031809    *attr = new_attr;
     
    18141820    error_t          error;
    18151821
    1816 #if CONFIG_DEBUG_VMM_GET_PTE
     1822#if DEBUG_VMM_GET_PTE
    18171823uint32_t cycle = (uint32_t)hal_get_cycles();
    1818 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1824if( DEBUG_VMM_GET_PTE < cycle )
    18191825printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
    18201826__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     
    18541860    }
    18551861
    1856 #if CONFIG_DEBUG_VMM_GET_PTE
     1862#if DEBUG_VMM_GET_PTE
    18571863cycle = (uint32_t)hal_get_cycles();
    1858 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1864if( DEBUG_VMM_GET_PTE < cycle )
    18591865printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
    18601866__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     
    18731879    error_t          error;
    18741880
    1875 #if CONFIG_DEBUG_VMM_GET_PTE
     1881#if DEBUG_VMM_GET_PTE
    18761882uint32_t cycle = (uint32_t)hal_get_cycles();
    1877 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1883if( DEBUG_VMM_GET_PTE < cycle )
    18781884printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
    18791885__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     
    19131919    }
    19141920
    1915 #if CONFIG_DEBUG_VMM_GET_PTE
     1921#if DEBUG_VMM_GET_PTE
    19161922cycle = (uint32_t)hal_get_cycles();
    1917 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )
     1923if( DEBUG_VMM_GET_PTE < cycle )
    19181924printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
    19191925__FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
Note: See TracChangeset for help on using the changeset viewer.