source: trunk/kernel/libk/remote_sem.c @ 642

Last change on this file since 642 was 635, checked in by alain, 5 years ago

This version is a major evolution: The physical memory allocators,
defined in the kmem.c, ppm.c, and kcm.c files have been modified
to support remote accesses. The RPCs that were previously user
to allocate physical memory in a remote cluster have been removed.
This has been done to cure a dead-lock in case of concurrent page-faults.

This version 2.2 has been tested on a (4 clusters / 2 cores per cluster)
TSAR architecture, for both the "sort" and the "fft" applications.

File size: 10.8 KB
RevLine 
[1]1/*
[563]2 * remote_sem.c - POSIX unnamed semaphore implementation.
[1]3 *
[635]4 * Author   Alain Greiner  (2016,2017,2018,2019)
[1]5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[457]24#include <hal_kernel_types.h>
[23]25#include <hal_remote.h>
[1]26#include <thread.h>
27#include <kmem.h>
28#include <printk.h>
29#include <process.h>
30#include <vmm.h>
31#include <remote_sem.h>
32
33
34///////////////////////////////////////////////
[563]35xptr_t remote_sem_from_ident( intptr_t  ident )
[1]36{
37    // get pointer on local process_descriptor
[23]38    process_t * process = CURRENT_THREAD->process;
[1]39
40    // get extended pointer on reference process
41    xptr_t      ref_xp = process->ref_xp;
42
43    // get cluster and local pointer on reference process
44    cxy_t          ref_cxy = GET_CXY( ref_xp );
[469]45    process_t    * ref_ptr = GET_PTR( ref_xp );
[1]46
[563]47    // get extended pointer on semaphores list
[1]48    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->sem_root );
[563]49    xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock );
[1]50   
[563]51    // get lock protecting synchro lists
52    remote_queuelock_acquire( lock_xp );
53 
[1]54    // scan reference process semaphores list
55    xptr_t         iter_xp;
56    xptr_t         sem_xp;
57    cxy_t          sem_cxy;
58    remote_sem_t * sem_ptr;
[563]59    intptr_t       current;
[1]60    bool_t         found = false;
61           
62    XLIST_FOREACH( root_xp , iter_xp )
63    {
[23]64        sem_xp  = XLIST_ELEMENT( iter_xp , remote_sem_t , list );
[1]65        sem_cxy = GET_CXY( sem_xp );
[469]66        sem_ptr = GET_PTR( sem_xp );
[563]67        current = (intptr_t)hal_remote_lpt( XPTR( sem_cxy , &sem_ptr->ident ) );   
[469]68
[563]69        if( current == ident )
[1]70        {
71            found = true;
72            break;
73        }
74    }
75
[563]76    // relese lock protecting synchros lists
77    remote_queuelock_release( lock_xp );
78 
[1]79    if( found == false )  return XPTR_NULL;
80    else                  return sem_xp;
81
[563]82}  // end remote_sem_from_ident()
[1]83
[23]84///////////////////////////////////////////
[457]85error_t remote_sem_create( intptr_t   vaddr,
[563]86                           uint32_t   value )
[1]87{
[635]88    kmem_req_t     req;   
[457]89    remote_sem_t * sem_ptr;
[1]90
91    // get pointer on local process descriptor
[23]92    process_t * process = CURRENT_THREAD->process;
[1]93
94    // get extended pointer on reference process
95    xptr_t      ref_xp = process->ref_xp;
96
[23]97    // get reference process cluster and local pointer
[1]98    cxy_t       ref_cxy = GET_CXY( ref_xp );
99    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
100
101    // allocate memory for new semaphore in reference cluster
[635]102    req.type  = KMEM_KCM;
103    req.order = bits_log2( sizeof(remote_sem_t) );
104    req.flags = AF_ZERO | AF_KERNEL;
105    sem_ptr   = kmem_remote_alloc( ref_cxy, &req );
106
107    if( sem_ptr == NULL )
[1]108    {
[635]109        printk("\n[ERROR] in %s : cannot create semaphore\n", __FUNCTION__ );
110        return -1;
[1]111    }
112
[23]113    // initialise semaphore
[563]114    hal_remote_s32 ( XPTR( ref_cxy , &sem_ptr->count ) , value );
[1]115        hal_remote_spt( XPTR( ref_cxy , &sem_ptr->ident ) , (void *)vaddr );
[23]116        xlist_root_init( XPTR( ref_cxy , &sem_ptr->root ) );
117        xlist_entry_init( XPTR( ref_cxy , &sem_ptr->list ) );
[563]118    remote_busylock_init( XPTR( ref_cxy , &sem_ptr->lock ), LOCK_SEM_STATE );
[1]119
120    xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->sem_root );
[563]121    xptr_t list_xp = XPTR( ref_cxy , &sem_ptr->list );
[1]122
[563]123    // get lock protecting user synchro lists
124    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
[457]125
[563]126    // register semaphore in reference process list of semaphores
127    xlist_add_first( root_xp , list_xp );
128
129    // release lock protecting user synchro lists
130    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
131
132#if DEBUG_SEM
133thread_t * this = CURRENT_THREAD;
134if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
135printk("\n[DBG] %s : thread %x in process %x INITIALIZE sem(%x,%x) / value %d\n",
136__FUNCTION__, this->trdid, this->process->pid, local_cxy, sem_ptr, value );
137#endif
138
[1]139    return 0;
140
[563]141}  // end remote_sem_create()
[1]142 
[23]143////////////////////////////////////////
144void remote_sem_destroy( xptr_t sem_xp )
145{
[635]146    kmem_req_t  req;
147
[23]148    // get pointer on local process descriptor
149    process_t * process = CURRENT_THREAD->process;
150
151    // get extended pointer on reference process
152    xptr_t      ref_xp = process->ref_xp;
153
154    // get reference process cluster and local pointer
155    cxy_t       ref_cxy = GET_CXY( ref_xp );
[457]156    process_t * ref_ptr = GET_PTR( ref_xp );
[23]157
158    // get semaphore cluster and local pointer
159    cxy_t          sem_cxy = GET_CXY( sem_xp );
[563]160    remote_sem_t * sem_ptr = GET_PTR( sem_xp );
[23]161
[457]162    // get remote pointer on waiting queue root
163    xptr_t root_xp = XPTR( sem_cxy , &sem_ptr->root );
[23]164 
165    if( !xlist_is_empty( root_xp ) )   // user error
166    {
167        printk("WARNING in %s for thread %x in process %x : "
168               "destroy semaphore, but  waiting threads queue not empty\n", 
169               __FUNCTION__ , CURRENT_THREAD->trdid , CURRENT_THREAD->process->pid );
170    }
171
172    // remove semaphore from reference process xlist
[563]173    remote_queuelock_acquire( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
[23]174    xlist_unlink( XPTR( sem_cxy , &sem_ptr->list ) );
[563]175    remote_queuelock_release( XPTR( ref_cxy , &ref_ptr->sync_lock ) );
[23]176
177    // release memory allocated for semaphore descriptor
[635]178    req.type = KMEM_KCM;
179    req.ptr  = sem_ptr;
180    kmem_remote_free( sem_cxy , &req );
[23]181
182}  // end remote_sem_destroy()
183
[457]184/////////////////////////////////////
[1]185void remote_sem_wait( xptr_t sem_xp )
186{ 
[563]187    thread_t * this = CURRENT_THREAD;
188
189// check calling thread can yield
190assert( (this->busylocks == 0),
191"cannot yield : busylocks = %d\n", this->busylocks );
192
193
[1]194    // get semaphore cluster and local pointer
195    cxy_t          sem_cxy = GET_CXY( sem_xp );
[457]196    remote_sem_t * sem_ptr = GET_PTR( sem_xp );
[1]197
[563]198    // get extended pointers on sem fields
199    xptr_t           count_xp = XPTR( sem_cxy , &sem_ptr->count );
200    xptr_t           root_xp  = XPTR( sem_cxy , &sem_ptr->root );
201    xptr_t           lock_xp  = XPTR( sem_cxy , &sem_ptr->lock );
202
203    while( 1 )
204    {
205        // get busylock protecting semaphore     
206            remote_busylock_acquire( lock_xp );
[1]207 
[563]208        // get semaphore current value
209        uint32_t count = hal_remote_l32( count_xp );
[1]210
[563]211            if( count > 0 )                     // success
212            {
213            // decrement semaphore value
214            hal_remote_s32( count_xp , count - 1 );
[1]215
[563]216#if DEBUG_SEM
217if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
218printk("\n[DBG] %s : thread %x in process %x DECREMENT sem(%x,%x) / value %d\n",
219__FUNCTION__, this->trdid, this->process->pid, sem_cxy, sem_ptr, count-1 );
220#endif
221            // release busylock protecting semaphore
222                remote_busylock_release( XPTR( sem_cxy , &sem_ptr->lock ) );
[1]223
[563]224            return;
225        }
226            else                               // failure
227            {
228            // get cluster and pointers on calling thread
229            cxy_t            caller_cxy = local_cxy;
230            thread_t       * caller_ptr = CURRENT_THREAD;
231            xptr_t           caller_xp  = XPTR( caller_cxy , caller_ptr );
[1]232
[563]233            // block the calling thread
234            thread_block( caller_xp , THREAD_BLOCKED_SEM ); 
[1]235
[563]236            // register calling thread in waiting queue
237            xptr_t entry_xp = XPTR( caller_cxy , &caller_ptr->wait_xlist );
238                    xlist_add_last( root_xp , entry_xp );
239
240#if DEBUG_SEM
241if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
242printk("\n[DBG] %s : thread %x in process %x BLOCK on sem(%x,%x) / value %d\n",
243__FUNCTION__, this->trdid, this->process->pid, sem_cxy, sem_ptr, count );
244#endif
245            // release busylock protecting semaphore
246                remote_busylock_release( XPTR( sem_cxy , &sem_ptr->lock ) );
247
248            // deschedule calling thread
249            sched_yield("blocked on semaphore");
250        }
[1]251        }
252}  // end remote_sem_wait()
253
254/////////////////////////////////////
255void remote_sem_post( xptr_t sem_xp )
256{
[563]257    // memory barrier before sem release
258    hal_fence();
259
[1]260    // get semaphore cluster and local pointer
261    cxy_t          sem_cxy = GET_CXY( sem_xp );
[457]262    remote_sem_t * sem_ptr = GET_PTR( sem_xp );
[1]263
[563]264    // get extended pointers on sem fields
265    xptr_t           count_xp = XPTR( sem_cxy , &sem_ptr->count );
266    xptr_t           root_xp  = XPTR( sem_cxy , &sem_ptr->root );
267    xptr_t           lock_xp  = XPTR( sem_cxy , &sem_ptr->lock );
268
269    // get busylock protecting semaphore
270        remote_busylock_acquire( lock_xp );
[1]271 
[563]272    // increment semaphore value
273    hal_remote_atomic_add( count_xp , 1 );
[457]274
[563]275#if DEBUG_SEM
276uint32_t count = hal_remote_l32( count_xp );
277thread_t * this = CURRENT_THREAD;
278if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
279printk("\n[DBG] %s : thread %x in process %x INCREMENT sem(%x,%x) / value %d\n",
280__FUNCTION__, this->trdid, this->process->pid, sem_cxy, sem_ptr, count );
281#endif
282
283    // scan waiting queue to unblock all waiting threads
284        while( xlist_is_empty( root_xp ) == false )   // waiting queue non empty
[1]285    {
286        // get first waiting thread from queue
[563]287        xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_xlist );
[1]288        cxy_t      thread_cxy = GET_CXY( thread_xp );
[457]289        thread_t * thread_ptr = GET_PTR( thread_xp );
[1]290
[563]291        // remove this thread from the waiting queue
292        xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_xlist ) );
293
294        // unblock this waiting thread
[1]295                thread_unblock( thread_xp , THREAD_BLOCKED_SEM );
[563]296
297#if DEBUG_SEM
298if( (uint32_t)hal_get_cycles() > DEBUG_SEM )
299{
300trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
301process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
302pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
303printk("\n[DBG] %s : thread %x in process %x UNBLOCK thread %x in process %x / sem(%x,%x)\n",
304__FUNCTION__, this->trdid, this->process->pid, trdid, pid, sem_cxy, sem_ptr );
305}
306#endif
307
[1]308    }
309
[563]310    // release busylock protecting the semaphore
311        remote_busylock_release( XPTR( sem_cxy , &sem_ptr->lock ) );
[1]312
313}  // end remote_sem_post()
314
315
316//////////////////////////////////////////////
317void remote_sem_get_value( xptr_t      sem_xp,
318                           uint32_t  * data )
319{
320    // get semaphore cluster and local pointer
321    cxy_t          sem_cxy = GET_CXY( sem_xp );
[457]322    remote_sem_t * sem_ptr = GET_PTR( sem_xp );
[1]323
[563]324    *data = hal_remote_l32( XPTR( sem_cxy , &sem_ptr->count ) );
[1]325
326}  // end remote_sem_get_value()
327
328
Note: See TracBrowser for help on using the repository browser.