source: trunk/kernel/libk/remote_spinlock.c @ 421

Last change on this file since 421 was 409, checked in by alain, 7 years ago

Fix bugs in exec

File size: 6.5 KB
RevLine 
[1]1/*
2 * remote_spinlock.c - kernel remote spinlock implementation.
[93]3 *
[1]4 * Authors  Mohamed Karaoui (2015)
5 *          Alain   Greiner (2016)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <hal_types.h>
26#include <hal_remote.h>
27#include <hal_irqmask.h>
28#include <thread.h>
29#include <cluster.h>
30#include <scheduler.h>
31#include <remote_spinlock.h>
32
[11]33///////////////////////////////////////////
34void remote_spinlock_init( xptr_t lock_xp )
[93]35{
36        remote_spinlock_t * ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
37        cxy_t               cxy = GET_CXY( lock_xp );
[1]38
[93]39        hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
[409]40
41#if CONFIG_LOCKS_CONFIG
[93]42        hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
43        xlist_entry_init( XPTR( cxy , &ptr->list ) );
[409]44#endif
45
[1]46}
47
[11]48/////////////////////////////////////////////////
49error_t remote_spinlock_trylock( xptr_t lock_xp )
[93]50{
[60]51        reg_t               mode;
[1]52        bool_t              isAtomic = false;
53
[93]54        // get cluster and local pointer on remote_spinlock
55        remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
56        cxy_t               lock_cxy = GET_CXY( lock_xp );
[1]57
[409]58        // get local pointer on local thread
[93]59        thread_t          * thread_ptr = CURRENT_THREAD;
[1]60
[93]61        // disable interrupts
62        hal_disable_irq( &mode );
[1]63
64        if( hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ) == 0 )
[93]65        {
[1]66                isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
[93]67        }
[1]68
[93]69        if( isAtomic == false )    // failure
[1]70        {
71                hal_restore_irq( mode );
72                return 1;
73        }
[93]74        else                      // success : register lock in thread
75        {
76                thread_ptr->remote_locks++;
[1]77
[409]78#if CONFIG_LOCKS_DEBUG
[93]79                hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
[409]80                                XPTR( thread_cxy , thread_ptr) );
81                xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
82                                 XPTR( lock_cxy , &lock_ptr->list ) );
83#endif
[1]84
[93]85                hal_restore_irq(mode);
86                return 0;
87        }
[1]88}
89
[11]90///////////////////////////////////////////////////
91void remote_spinlock_lock_busy( xptr_t     lock_xp,
92                                uint32_t * irq_state )
[1]93{
[93]94        bool_t              isAtomic = false;
[60]95        reg_t               mode;
[93]96        volatile uint32_t   taken;
[1]97
[93]98        // get cluster and local pointer on remote_spinlock
99        remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
100        cxy_t               lock_cxy = GET_CXY( lock_xp );
[1]101
[409]102        // get local pointer on local thread
[93]103        thread_t          * thread_ptr = CURRENT_THREAD;
[1]104
[93]105        // disable interrupts
[1]106        hal_disable_irq( &mode );
[93]107
108        // loop until success
[1]109        while( isAtomic == false )
110        {
111                taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) );
112
[93]113                // try to take the lock if not already taken
[11]114                if( taken == 0 )
115                {
[93]116                        isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
117                }
[11]118        }
119
[93]120        // register lock in thread
[11]121        thread_ptr->remote_locks++;
122
[409]123#if CONFIG_LOCKS_DEBUG
124        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
125                        XPTR( local_cxy , thread_ptr) );
126        xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
127                         XPTR( lock_cxy  , &lock_ptr->list ) );
128#endif
[11]129
[93]130        // irq_state must be restored when lock is released
131        *irq_state = mode;
[11]132
133}  // end remote_spinlock_lock_busy()
134
135////////////////////////////////////////////////////
136void remote_spinlock_unlock_busy( xptr_t    lock_xp,
137                                  uint32_t  irq_state )
138{
[93]139        // get cluster and local pointer on remote_spinlock
140        remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
141        cxy_t               lock_cxy = GET_CXY( lock_xp );
[11]142
[93]143        // get pointer on local thread
144        thread_t          * thread_ptr = CURRENT_THREAD;
[11]145
[409]146#if CONFIG_LOCKS_DEBUG
[11]147        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
[409]148        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
149#endif
150
[11]151        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
152        thread_ptr->remote_locks--;
153
[337]154    // deschedule if pending request
155    thread_check_sched();
156 
157    // restore IRQs
[93]158        hal_restore_irq( irq_state );
[11]159}
160
161///////////////////////////////////////////
162void remote_spinlock_lock( xptr_t lock_xp )
163{
[93]164        bool_t              isAtomic = false;
[60]165        reg_t               mode;
[93]166        volatile uint32_t   taken;
[11]167
[93]168        // get cluster and local pointer on remote_spinlock
169        remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
170        cxy_t               lock_cxy = GET_CXY( lock_xp );
[11]171
[409]172    // get local pointer on calling thread
[101]173    thread_t          * thread_ptr = CURRENT_THREAD;
[11]174
[93]175        // disable interrupts
[11]176        hal_disable_irq( &mode );
[93]177
178        // loop until success
[11]179        while( isAtomic == false )
180        {
181                taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) );
182
[93]183                // deschedule if possible when lock already taken
[1]184                if( taken != 0 )
185                {
[93]186                        hal_restore_irq( mode );
[408]187                        if( thread_can_yield() ) sched_yield("waiting spinlock");
[93]188                        hal_disable_irq( &mode );
[1]189                        continue;
190                }
[93]191
192                // try to take the lock if not already taken
[1]193                isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
194        }
195
[93]196        // register lock in thread
[1]197        thread_ptr->remote_locks++;
198
[409]199#if CONFIG_LOCKS_DEBUG
200        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
201                        XPTR( local_cxy , thread_ptr) );
202        xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
203                         XPTR( lock_cxy  , &lock_ptr->list ) );
204#endif
[1]205
[93]206        // enable interrupts
[1]207        hal_restore_irq( mode );
208}
209
[11]210/////////////////////////////////////////////
211void remote_spinlock_unlock( xptr_t lock_xp )
[1]212{
[93]213        // get cluster and local pointer on remote_spinlock
214        remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
215        cxy_t               lock_cxy = GET_CXY( lock_xp );
[1]216
[93]217        // get pointer on local thread
218        thread_t          * thread_ptr = CURRENT_THREAD;
[1]219
[409]220#if CONFIG_LOCKS_DEBUG
[1]221        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
[409]222        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
223#endif
224
[1]225        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
226        thread_ptr->remote_locks--;
227
[337]228    // deschedule if pending request
229    thread_check_sched();
[1]230}
231
Note: See TracBrowser for help on using the repository browser.