source: trunk/kernel/libk/spinlock.c @ 421

Last change on this file since 421 was 409, checked in by alain, 7 years ago

Fix bugs in exec

File size: 4.4 KB
Line 
1/*
2 * spinlock.c - kernel spinlock synchronization
3 *
4 * Authors   Ghassan Almaless  (2008,2009,2010,2011,2012)
5 *           Alain Greiner     (2016}
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_types.h>
27#include <hal_atomic.h>
28#include <hal_special.h>
29#include <hal_irqmask.h>
30#include <thread.h>
31#include <scheduler.h>
32#include <printk.h>
33#include <spinlock.h>
34
35//////////////////////////////////////////////
36inline void spinlock_init( spinlock_t * lock )
37{
38    lock->taken = 0;
39
40#if CONFIG_LOCKS_DEBUG
41    lock->owner = NULL;
42    list_entry_init( &lock->list );
43#endif
44
45}
46
47///////////////////////////////////////////
48void spinlock_lock_busy( spinlock_t * lock,
49                         uint32_t   * irq_state )
50{
51    reg_t               mode;
52    volatile uint32_t   taken;
53    thread_t          * this     = CURRENT_THREAD;
54    bool_t              isAtomic = false;
55
56    // disable interrupts
57    hal_disable_irq( &mode );
58
59    // loop until success
60    while( isAtomic == false )
61    {
62        taken = lock->taken;
63
64        // try to take the lock if not already taken
65        if( taken == 0 )
66        {
67            isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 );
68        }
69    }
70
71    this->local_locks++;
72
73#if CONFIG_LOCKS_DEBUG
74    lock->owner = this;
75    list_add_first( &this->locks_root , &lock->list );
76#endif
77
78    // irq_state must be restored when lock is released
79    *irq_state = mode;
80}
81
82//////////////////////////////////////////////
83void spinlock_unlock_busy( spinlock_t * lock,
84                           uint32_t     irq_state )
85{
86    thread_t * this = CURRENT_THREAD;;
87
88#if CONFIG_LOCKS_DEBUG
89    lock->owner = NULL;
90    list_unlink( &lock->list );
91#endif
92
93    lock->taken = 0;
94    this->local_locks--;
95
96    // deschedule if pending request
97    thread_check_sched();
98 
99    // restore IRQs
100        hal_restore_irq( irq_state );
101}
102
103///////////////////////////////////////
104void spinlock_lock( spinlock_t * lock )
105{
106    reg_t             mode;
107    thread_t        * this     = CURRENT_THREAD;
108    bool_t            isAtomic = false;
109    volatile uint32_t taken;
110
111    // disable interrupts
112    hal_disable_irq( &mode );
113
114    // loop until success
115    while( isAtomic == false )
116    {
117        taken = lock->taken;
118
119        // deschedule without blocking when lock already taken
120        if( taken != 0 )
121        {
122            hal_restore_irq( mode );
123            if( thread_can_yield() ) sched_yield("waiting spinlock");
124            hal_disable_irq( &mode );
125            continue;
126        }
127
128        // try to atomically take the lock if not already taken
129        isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 );
130    }
131
132    this->local_locks++;
133
134#if CONFIG_LOCKS_DEBUG
135    lock->owner = this;
136    list_add_first( &this->locks_root , &lock->list );
137#endif
138
139    // restore IRQs
140    hal_restore_irq( mode );
141}
142
143/////////////////////////////////////////////
144error_t spinlock_trylock( spinlock_t * lock )
145{
146    reg_t      mode;
147    bool_t     isAtomic = false;
148    thread_t * this     = CURRENT_THREAD;
149
150    hal_disable_irq( &mode );
151
152    if( lock->taken == 0)
153        isAtomic = hal_atomic_cas( &lock->taken , 0 , 1);
154
155    if(isAtomic == false)
156    {
157        hal_restore_irq(mode);
158        return 1;
159    }
160    else
161    {
162        this->local_locks++;
163
164#if CONFIG_LOCKS_DEBUG
165        lock->owner = this;
166        list_add_first( &this->locks_root , &lock->list );
167#endif
168
169        hal_restore_irq(mode);
170        return 0;
171    }
172}
173
174/////////////////////////////////////////
175void spinlock_unlock( spinlock_t * lock )
176{
177    thread_t * this = CURRENT_THREAD;
178
179#if CONFIG_LOCKS_DEBUG
180    lock->owner = NULL;
181    list_unlink( &lock->list );
182#endif
183
184    lock->taken = 0;
185    this->local_locks--;
186
187    // deschedule if pending request
188    thread_check_sched();
189}
190
Note: See TracBrowser for help on using the repository browser.