source: trunk/kernel/mm/kcm.c @ 646

Last change on this file since 646 was 635, checked in by alain, 5 years ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 21.4 KB
Line 
1/*
2 * kcm.c -  Kernel Cache Manager implementation.
3 *
4 * Author  Alain Greiner    (2016,2017,2018,2019)
5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <kernel_config.h>
25#include <hal_kernel_types.h>
26#include <hal_special.h>
27#include <busylock.h>
28#include <list.h>
29#include <printk.h>
30#include <bits.h>
31#include <ppm.h>
32#include <thread.h>
33#include <page.h>
34#include <cluster.h>
35#include <kmem.h>
36#include <kcm.h>
37
38
39/////////////////////////////////////////////////////////////////////////////////////
40//        Local access functions
41/////////////////////////////////////////////////////////////////////////////////////
42
43//////////////////////////////////////////////////////////////////////////////////////
44// This static function must be called by a local thread.
45// It returns a pointer on a block allocated from a non-full kcm_page.
46// It makes a panic if no block is available in selected page.
47// It changes the page status as required.
48//////////////////////////////////////////////////////////////////////////////////////
49// @ kcm      : pointer on KCM allocator.
50// @ kcm_page : pointer on a non-full kcm_page.
51// @ return pointer on allocated block.
52/////////////////////////////////////////////////////////////////////////////////////
53static void * __attribute__((noinline)) kcm_get_block( kcm_t      * kcm,
54                                                       kcm_page_t * kcm_page )
55{
56    // initialise variables
57    uint32_t size   = 1 << kcm->order;
58    uint32_t max    = kcm->max_blocks;
59    uint32_t count  = kcm_page->count;
60    uint64_t status = kcm_page->status;
61
62assert( (count < max) , "kcm_page should not be full" );
63
64    uint32_t index  = 1;
65    uint64_t mask   = (uint64_t)0x2;
66    uint32_t found  = 0;
67
68        // allocate first free block in kcm_page, update status,
69    // and count , compute index of allocated block in kcm_page
70    while( index <= max )
71    {
72        if( (status & mask) == 0 )   // block non allocated
73        {
74            kcm_page->status = status | mask;
75            kcm_page->count  = count + 1;
76            found  = 1;
77
78            break;     
79        }
80       
81        index++;
82        mask <<= 1;
83    }
84
85    // change the page list if almost full
86    if( count == max-1 )
87    {
88                list_unlink( &kcm_page->list);
89                kcm->active_pages_nr--;
90
91        list_add_first( &kcm->full_root , &kcm_page->list );
92                kcm->full_pages_nr ++;
93    }
94
95        // compute return pointer
96        void * ptr = (void *)((intptr_t)kcm_page + (index * size) );
97
98#if (DEBUG_KCM & 1)
99thread_t * this  = CURRENT_THREAD;
100uint32_t   cycle = (uint32_t)hal_get_cycles();
101if( DEBUG_KCM < cycle )
102printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n",
103__FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle );
104#endif
105
106        return ptr;
107
108}  // end kcm_get_block()
109
110/////////////////////////////////////////////////////////////////////////////////////
111// This private static function must be called by a local thread.
112// It releases a previously allocated block to the relevant kcm_page.
113// It makes a panic if the released block is not allocated in this page.
114// It changes the kcm_page status as required.
115/////////////////////////////////////////////////////////////////////////////////////
116// @ kcm        : pointer on kcm allocator.
117// @ kcm_page   : pointer on kcm_page.
118// @ block_ptr  : pointer on block to be released.
119/////////////////////////////////////////////////////////////////////////////////////
120static void __attribute__((noinline)) kcm_put_block ( kcm_t      * kcm,
121                                                      kcm_page_t * kcm_page,
122                                                      void       * block_ptr )
123{
124    // initialise variables
125    uint32_t max    = kcm->max_blocks;
126    uint32_t size   = 1 << kcm->order;
127    uint32_t count  = kcm_page->count;
128    uint64_t status = kcm_page->status;
129   
130        // compute block index from block pointer
131        uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;
132
133    // compute mask in bit vector
134    uint64_t mask = ((uint64_t)0x1) << index;
135
136assert( (status & mask) , "released block not allocated : status (%x,%x) / mask(%x,%x)",
137GET_CXY(status), GET_PTR(status), GET_CXY(mask  ), GET_PTR(mask  ) );
138
139    // update status & count in kcm_page
140        kcm_page->status = status & ~mask;
141        kcm_page->count  = count - 1;
142
143        // change the page mode if page was full
144        if( count == max )
145        {
146                list_unlink( &kcm_page->list );
147                kcm->full_pages_nr --;
148
149                list_add_last( &kcm->active_root, &kcm_page->list );
150                kcm->active_pages_nr ++;
151        }
152
153#if (DEBUG_KCM & 1)
154thread_t * this  = CURRENT_THREAD;
155uint32_t   cycle = (uint32_t)hal_get_cycles();
156if( DEBUG_KCM < cycle )
157printk("\n[%s] thread[%x,%x] released block %x in page %x / size %d / count %d / cycle %d\n",
158__FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle );
159#endif
160
161}  // kcm_put_block()
162
163/////////////////////////////////////////////////////////////////////////////////////
164// This private static function must be called by a local thread.
165// It returns one non-full kcm_page with te following policy :
166// - if the "active_list" is non empty, it returns the first "active" page,
167//   without modifying the KCM state.
168// - if the "active_list" is empty, it allocates a new page fromm PPM, inserts
169//   this page in the active_list, and returns it.
170/////////////////////////////////////////////////////////////////////////////////////
171// @ kcm      : local pointer on local KCM allocator.
172// @ return pointer on a non-full kcm page if success / returns NULL if no memory.
173/////////////////////////////////////////////////////////////////////////////////////
174static kcm_page_t * __attribute__((noinline)) kcm_get_page( kcm_t * kcm )
175{
176    kcm_page_t * kcm_page;
177
178    uint32_t active_pages_nr = kcm->active_pages_nr;
179
180    if( active_pages_nr > 0 )       // return first active page
181    {
182        kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list );
183    }
184    else                            // allocate a new page from PPM
185        {
186        // get one 4 Kbytes page from local PPM
187        page_t * page = ppm_alloc_pages( 0 );
188
189            if( page == NULL )
190            {
191                    printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
192                __FUNCTION__ , local_cxy );
193
194                    return NULL;
195        }
196
197            // get page base address
198            xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) );
199
200        // get local pointer on kcm_page
201            kcm_page = GET_PTR( base_xp );
202
203            // initialize kcm_page descriptor
204            kcm_page->status = 0;
205            kcm_page->count  = 0;
206            kcm_page->kcm    = kcm;
207            kcm_page->page   = page;
208
209            // introduce new page in KCM active_list
210            list_add_first( &kcm->active_root , &kcm_page->list );
211            kcm->active_pages_nr ++;
212        }
213
214        return kcm_page;
215
216}  // end kcm_get_page()
217
218//////////////////////////////
219void kcm_init( kcm_t    * kcm,
220                   uint32_t   order)
221{
222
223assert( ((order > 5) && (order < 12)) , "order must be in [6,11]" );
224
225        // initialize lock
226        remote_busylock_init( XPTR( local_cxy , &kcm->lock ) , LOCK_KCM_STATE );
227
228        // initialize KCM page lists
229        kcm->full_pages_nr   = 0;
230        kcm->active_pages_nr = 0;
231        list_root_init( &kcm->full_root );
232        list_root_init( &kcm->active_root );
233
234        // initialize order and max_blocks
235        kcm->order      = order;
236    kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1;
237 
238#if DEBUG_KCM
239thread_t * this  = CURRENT_THREAD;
240uint32_t   cycle = (uint32_t)hal_get_cycles();
241if( DEBUG_KCM < cycle )
242printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n",
243__FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks );
244#endif
245
246}  // end kcm_init()
247
248///////////////////////////////
249void kcm_destroy( kcm_t * kcm )
250{
251        kcm_page_t   * kcm_page;
252
253    // build extended pointer on  KCM lock
254    xptr_t lock_xp = XPTR( local_cxy , &kcm->lock );
255
256        // get KCM lock
257        remote_busylock_acquire( lock_xp );
258
259        // release all full pages
260        while( list_is_empty( &kcm->full_root ) == false )
261        {
262                kcm_page = LIST_FIRST( &kcm->full_root , kcm_page_t , list );
263                list_unlink( &kcm_page->list );
264                ppm_free_pages( kcm_page->page );
265        }
266
267    // release all empty pages
268    while( list_is_empty( &kcm->active_root ) == false )
269        {
270                kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list );
271                list_unlink( &kcm_page->list );
272                ppm_free_pages( kcm_page->page );
273        }
274
275        // release KCM lock
276        remote_busylock_release( lock_xp );
277}
278
279//////////////////////////////////
280void * kcm_alloc( uint32_t order )
281{
282    kcm_t      * kcm_ptr;
283        kcm_page_t * kcm_page;
284        void       * block_ptr;
285
286    // min block size is 64 bytes
287    if( order < 6 ) order = 6;
288
289assert( (order < 12) , "order = %d / must be less than 12" , order );
290
291    // get local pointer on relevant KCM allocator
292    kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6];
293
294    // build extended pointer on local KCM lock
295    xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock );
296
297        // get KCM lock
298        remote_busylock_acquire( lock_xp );
299
300    // get a non-full kcm_page
301    kcm_page = kcm_get_page( kcm_ptr );
302
303    if( kcm_page == NULL )
304        {
305                remote_busylock_release( lock_xp );
306                return NULL;
307        }
308
309        // get a block from selected active page
310        block_ptr = kcm_get_block( kcm_ptr , kcm_page );
311
312        // release lock
313        remote_busylock_release( lock_xp );
314
315#if DEBUG_KCM
316thread_t * this  = CURRENT_THREAD;
317uint32_t   cycle = (uint32_t)hal_get_cycles();
318if( DEBUG_KCM < cycle )
319printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm %x / status[%x,%x] / count %d\n",
320__FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_ptr,
321GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count );
322#endif
323
324        return block_ptr;
325
326}  // end kcm_alloc()
327
328/////////////////////////////////
329void kcm_free( void * block_ptr )
330{
331    kcm_t      * kcm_ptr;
332        kcm_page_t * kcm_page;
333
334// check argument
335assert( (block_ptr != NULL) , "block pointer cannot be NULL" );
336
337    // get local pointer on KCM page
338        kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK);
339
340    // get local pointer on KCM descriptor
341        kcm_ptr = kcm_page->kcm;
342
343#if DEBUG_KCM
344thread_t * this  = CURRENT_THREAD;
345uint32_t   cycle = (uint32_t)hal_get_cycles();
346if( DEBUG_KCM < cycle )
347printk("\n[%s] thread[%x,%x] release block %x / order %d / kcm %x / status [%x,%x] / count %d\n",
348__FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_ptr->order, kcm_ptr,
349GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count );
350#endif
351
352    // build extended pointer on local KCM lock
353    xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock );
354
355        // get lock
356        remote_busylock_acquire( lock_xp );
357
358        // release block
359        kcm_put_block( kcm_ptr , kcm_page , block_ptr );
360
361        // release lock
362        remote_busylock_release( lock_xp );
363}
364
365/////////////////////////////////////////////////////////////////////////////////////
366//        Remote access functions
367/////////////////////////////////////////////////////////////////////////////////////
368
369/////////////////////////////////////////////////////////////////////////////////////
370// This static function can be called by any thread running in any cluster.
371// It returns a local pointer on a block allocated from an non-full kcm_page.
372// It makes a panic if no block available in selected page.
373// It changes the page status as required.
374/////////////////////////////////////////////////////////////////////////////////////
375// @ kcm_cxy  : remote KCM cluster identidfier.
376// @ kcm_ptr  : local pointer on remote KCM allocator.
377// @ kcm_page : pointer on active kcm page to use.
378// @ return a local pointer on the allocated block.
379/////////////////////////////////////////////////////////////////////////////////////
380static void * __attribute__((noinline)) kcm_remote_get_block( cxy_t        kcm_cxy,
381                                                              kcm_t      * kcm_ptr,
382                                                              kcm_page_t * kcm_page )
383{
384    uint32_t order  = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
385    uint32_t max    = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );
386    uint32_t count  = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
387    uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
388    uint32_t size   = 1 << order;
389
390assert( (count < max) , "kcm_page should not be full" );
391
392    uint32_t index  = 1;
393    uint64_t mask   = (uint64_t)0x2;
394    uint32_t found  = 0;
395   
396        // allocate first free block in kcm_page, update status,
397    // and count , compute index of allocated block in kcm_page
398    while( index <= max )
399    {
400        if( (status & mask) == 0 )   // block non allocated
401        {
402            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask );
403            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->count  ) , count + 1 );
404            found  = 1; 
405            break;     
406        }
407       
408        index++;
409        mask <<= 1;
410    }
411
412        // change the page list if almost full
413        if( count == max-1 )
414        {
415                list_remote_unlink( kcm_cxy , &kcm_page->list );
416                hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , -1 );
417
418                list_remote_add_first( kcm_cxy , &kcm_ptr->full_root , &kcm_page->list );
419                hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , 1 );
420        }
421
422        // compute return pointer
423        void * ptr = (void *)((intptr_t)kcm_page + (index * size) );
424
425#if DEBUG_KCM_REMOTE
426thread_t * this  = CURRENT_THREAD;
427uint32_t   cycle = (uint32_t)hal_get_cycles();
428if( DEBUG_KCM_REMOTE < cycle )
429printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n",
430__FUNCTION__, this->process->pid, this->trdid, 
431ptr, kcm_page, kcm_cxy, size, count + 1 );
432#endif
433
434        return ptr;
435
436}  // end kcm_remote_get_block()
437
438/////////////////////////////////////////////////////////////////////////////////////
439// This private static function can be called by any thread running in any cluster.
440// It releases a previously allocated block to the relevant kcm_page.
441// It changes the kcm_page status as required.
442/////////////////////////////////////////////////////////////////////////////////////
443// @ kcm_cxy   : remote KCM cluster identifier
444// @ kcm_ptr   : local pointer on remote KCM.
445// @ kcm_page  : local pointer on kcm_page.
446// @ block_ptr : pointer on block to be released.
447/////////////////////////////////////////////////////////////////////////////////////
448static void __attribute__((noinline)) kcm_remote_put_block ( cxy_t        kcm_cxy,
449                                                             kcm_t      * kcm_ptr,
450                                                             kcm_page_t * kcm_page,
451                                                             void       * block_ptr )
452{
453    uint32_t max    = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );
454    uint32_t order  = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
455    uint32_t count  = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
456    uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
457    uint32_t size   = 1 << order;
458   
459        // compute block index from block pointer
460        uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;
461
462    // compute mask in bit vector
463    uint64_t mask = 1 << index;
464
465assert( (status & mask) , "released page not allocated" );
466
467    // update status & count in kcm_page
468        hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status & ~mask );
469        hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count  ) , count - 1 );
470
471        // change the page list if page was full
472        if( count == max )
473        {
474                list_remote_unlink( kcm_cxy , &kcm_page->list );
475                hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , -1 );
476
477                list_remote_add_last( kcm_cxy , &kcm_ptr->active_root, &kcm_page->list );
478                hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 );
479        }
480
481#if (DEBUG_KCM_REMOTE & 1)
482thread_t * this  = CURRENT_THREAD;
483uint32_t   cycle = (uint32_t)hal_get_cycles();
484if( DEBUG_KCM_REMOTE < cycle )
485printk("\n[%s] thread[%x,%x] released block %x in page %x / cluster %x / size %x / count %d\n",
486__FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 )
487#endif
488
489}  // end kcm_remote_put_block()
490
491/////////////////////////////////////////////////////////////////////////////////////
492// This private static function can be called by any thread running in any cluster.
493// It gets one non-full KCM page from the remote KCM.
494// It allocates a page from remote PPM to populate the freelist, and initialises
495// the kcm_page descriptor when required.
496/////////////////////////////////////////////////////////////////////////////////////
497static kcm_page_t * __attribute__((noinline)) kcm_remote_get_page( cxy_t    kcm_cxy,
498                                                                   kcm_t  * kcm_ptr )
499{
500    kcm_page_t * kcm_page;    // local pointer on remote KCM page
501
502    uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) );
503
504    if( active_pages_nr > 0 )       // return first active page
505    {
506        kcm_page = LIST_REMOTE_FIRST( kcm_cxy , &kcm_ptr->active_root , kcm_page_t , list );
507    }
508    else                            // allocate a new page from PPM
509        {
510        // get one 4 Kbytes page from remote PPM
511        page_t * page = ppm_remote_alloc_pages( kcm_cxy , 0 );
512
513            if( page == NULL )
514            {
515                    printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
516                __FUNCTION__ , kcm_cxy );
517
518                    return NULL;
519        }
520
521            // get remote page base address
522            xptr_t base_xp = ppm_page2base( XPTR( kcm_cxy , page ) );
523
524        // get local pointer on kcm_page
525            kcm_page = GET_PTR( base_xp );
526
527            // initialize kcm_page descriptor
528            hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count )  , 0 );
529            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 );
530            hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm )    , kcm_ptr );
531            hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page )   , page );
532
533            // introduce new page in remote KCM active_list
534            list_remote_add_first( kcm_cxy , &kcm_ptr->active_root , &kcm_page->list );
535            hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 );
536        }
537
538        return kcm_page;
539
540}  // end kcm_remote_get_page()
541
542/////////////////////////////////////////
543void * kcm_remote_alloc( cxy_t    kcm_cxy,
544                         uint32_t order )
545{
546    kcm_t      * kcm_ptr;
547    kcm_page_t * kcm_page;
548    void       * block_ptr;
549
550    if( order < 6 ) order = 6;
551
552assert( (order < 12) , "order = %d / must be less than 12" , order );
553
554    // get local pointer on relevant KCM allocator
555    kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6];
556
557    // build extended pointer on remote KCM lock
558    xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock );
559
560        // get lock
561        remote_busylock_acquire( lock_xp );
562
563    // get a non-full kcm_page
564    kcm_page = kcm_remote_get_page( kcm_cxy , kcm_ptr );
565
566    if( kcm_page == NULL )
567        {
568                remote_busylock_release( lock_xp );
569                return NULL;
570        }
571
572        // get a block from selected active page
573        block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page );
574
575        // release lock
576        remote_busylock_release( lock_xp );
577
578#if DEBUG_KCM_REMOTE
579thread_t * this  = CURRENT_THREAD;
580uint32_t   cycle = (uint32_t)hal_get_cycles();
581if( DEBUG_KCM_REMOTE < cycle )
582printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm[%x,%x]\n",
583__FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );
584#endif
585
586        return block_ptr;
587
588}  // end kcm_remote_alloc()
589
590/////////////////////////////////////
591void kcm_remote_free( cxy_t  kcm_cxy,
592                      void * block_ptr )
593{
594        kcm_t      * kcm_ptr;
595        kcm_page_t * kcm_page;
596
597// check argument
598assert( (block_ptr != NULL) , "block pointer cannot be NULL" );
599
600    // get local pointer on remote KCM page
601        kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK);
602
603    // get local pointer on remote KCM
604        kcm_ptr = hal_remote_lpt( XPTR( kcm_cxy , &kcm_page->kcm ) );
605
606    // build extended pointer on remote KCM lock
607    xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock );
608
609        // get lock
610        remote_busylock_acquire( lock_xp );
611
612        // release block
613        kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr );
614
615        // release lock
616        remote_busylock_release( lock_xp );
617
618#if DEBUG_KCM_REMOTE
619thread_t * this  = CURRENT_THREAD;
620uint32_t   cycle = (uint32_t)hal_get_cycles();
621uint32_t   order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
622if( DEBUG_KCM_REMOTE < cycle )
623printk("\n[%s] thread[%x,%x] released block %x / order %d / kcm[%x,%x]\n",
624__FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );
625#endif
626
627}  // end kcm_remote_free
628
629/////////////////////////////////////////
630void kcm_remote_display( cxy_t   kcm_cxy,
631                         kcm_t * kcm_ptr )
632{
633    uint32_t order           = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) );
634    uint32_t full_pages_nr   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) );
635    uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) );
636
637        printk("*** KCM / cxy %x / order %d / full_pages %d / empty_pages %d / active_pages %d\n",
638        kcm_cxy, order, full_pages_nr, active_pages_nr );
639}
Note: See TracBrowser for help on using the repository browser.