source: trunk/kernel/libk/remote_spinlock.c @ 35

Last change on this file since 35 was 11, checked in by alain, 8 years ago

Merge all FS related files in one single vfs directory.

File size: 6.6 KB
RevLine 
[1]1/*
2 * remote_spinlock.c - kernel remote spinlock implementation.
3 *
4 * Authors  Mohamed Karaoui (2015)
5 *          Alain   Greiner (2016)
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <hal_types.h>
26#include <hal_remote.h>
27#include <hal_irqmask.h>
28#include <thread.h>
29#include <cluster.h>
30#include <scheduler.h>
31#include <remote_spinlock.h>
32
[11]33///////////////////////////////////////////
34void remote_spinlock_init( xptr_t lock_xp )
[1]35{ 
[11]36    remote_spinlock_t * ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
37    cxy_t               cxy = GET_CXY( lock_xp );
[1]38
39    hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
40    hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
41    xlist_entry_init( XPTR( cxy , &ptr->list ) ); 
42}
43
[11]44/////////////////////////////////////////////////
45error_t remote_spinlock_trylock( xptr_t lock_xp )
[1]46{ 
47        uint32_t            mode;
48        bool_t              isAtomic = false;
49
50    // get cluster and local pointer on remote_spinlock
[11]51    remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
52    cxy_t               lock_cxy = GET_CXY( lock_xp );
[1]53
54    // get cluster and local pointer on local thread
55    cxy_t               thread_cxy = local_cxy;
56    thread_t          * thread_ptr = CURRENT_THREAD;
57
58    // disable interrupts
59    hal_disable_irq( &mode );
60
61        if( hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ) == 0 )
62    {
63                isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
64    }
65
66        if( isAtomic == false )    // failure
67        {
68                hal_restore_irq( mode );
69                return 1;
70        }
71    else                      // success : register lock in thread
72    {
73            thread_ptr->remote_locks++;
74
75        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
76                    (uint64_t)XPTR( thread_cxy , thread_ptr) );
77
78        xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
79                     XPTR( lock_cxy , &lock_ptr->list ) );
80
81            hal_restore_irq(mode); 
82            return 0;
83    }
84}
85
[11]86///////////////////////////////////////////////////
87void remote_spinlock_lock_busy( xptr_t     lock_xp,
88                                uint32_t * irq_state )
[1]89{
90    bool_t              isAtomic = false;
91        uint32_t            mode;
92    volatile uint32_t   taken;
93
94    // get cluster and local pointer on remote_spinlock
[11]95    remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
96    cxy_t               lock_cxy = GET_CXY( lock_xp );
[1]97
98    // get cluster and local pointer on local thread
99    cxy_t               thread_cxy = local_cxy;
100    thread_t          * thread_ptr = CURRENT_THREAD;
101
102    // disable interrupts
103        hal_disable_irq( &mode );
104 
105    // loop until success
106        while( isAtomic == false )
107        {
108                taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) );
109
[11]110        // try to take the lock if not already taken
111                if( taken == 0 )
112                {
113                    isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
114        }
115        }
116
117    // register lock in thread
118        thread_ptr->remote_locks++;
119
120    hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
121                    (uint64_t)XPTR( thread_cxy , thread_ptr) );
122
123    xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
124                     XPTR( lock_cxy , &lock_ptr->list ) );
125
126    // irq_state must be restored when lock is released
127    *irq_state = mode;
128
129}  // end remote_spinlock_lock_busy()
130
131////////////////////////////////////////////////////
132void remote_spinlock_unlock_busy( xptr_t    lock_xp,
133                                  uint32_t  irq_state )
134{
135    // get cluster and local pointer on remote_spinlock
136    remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
137    cxy_t               lock_cxy = GET_CXY( lock_xp );
138
139    // get pointer on local thread
140    thread_t          * thread_ptr = CURRENT_THREAD;
141
142        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
143
144        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
145
146        thread_ptr->remote_locks--;
147
148    xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
149
150    hal_restore_irq( irq_state );
151}
152
153///////////////////////////////////////////
154void remote_spinlock_lock( xptr_t lock_xp )
155{
156    bool_t              isAtomic = false;
157        uint32_t            mode;
158    volatile uint32_t   taken;
159
160    // get cluster and local pointer on remote_spinlock
161    remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
162    cxy_t               lock_cxy = GET_CXY( lock_xp );
163
164    // get cluster and local pointer on local thread
165    cxy_t               thread_cxy = local_cxy;
166    thread_t          * thread_ptr = CURRENT_THREAD;
167
168    // disable interrupts
169        hal_disable_irq( &mode );
170 
171    // loop until success
172        while( isAtomic == false )
173        {
174                taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) );
175
[1]176        // deschedule if possible when lock already taken
177                if( taken != 0 )
178                {
179                hal_restore_irq( mode );
180            if( thread_can_yield() ) sched_yield();
181                hal_disable_irq( &mode );
182                        continue;
183                }
184   
185        // try to take the lock if not already taken
186                isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 );
187        }
188
189    // register lock in thread
190        thread_ptr->remote_locks++;
191
192    hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
193                    (uint64_t)XPTR( thread_cxy , thread_ptr) );
194
195    xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
196                     XPTR( lock_cxy , &lock_ptr->list ) );
197
198    // enable interrupts
199        hal_restore_irq( mode );
200}
201
[11]202/////////////////////////////////////////////
203void remote_spinlock_unlock( xptr_t lock_xp )
[1]204{
205    // get cluster and local pointer on remote_spinlock
[11]206    remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
207    cxy_t               lock_cxy = GET_CXY( lock_xp );
[1]208
209    // get pointer on local thread
210    thread_t          * thread_ptr = CURRENT_THREAD;
211
212        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
213
214        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
215
216        thread_ptr->remote_locks--;
217
218    xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
219}
220
Note: See TracBrowser for help on using the repository browser.