source: trunk/kernel/libk/spinlock.c @ 348

Last change on this file since 348 was 337, checked in by alain, 7 years ago

Introduce the delayed context switch if current thread has a lock.

File size: 4.2 KB
RevLine 
[1]1/*
2 * spinlock.c - kernel spinlock synchronization
[331]3 *
[1]4 * Authors   Ghassan Almaless  (2008,2009,2010,2011,2012)
5 *           Alain Greiner     (2016}
6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[1]26#include <hal_types.h>
27#include <hal_atomic.h>
28#include <hal_special.h>
29#include <hal_irqmask.h>
30#include <thread.h>
31#include <scheduler.h>
32#include <printk.h>
33#include <spinlock.h>
34
35//////////////////////////////////////////////
36inline void spinlock_init( spinlock_t * lock )
[331]37{
38    lock->taken = 0;
39    lock->owner = NULL;
[1]40    list_entry_init( &lock->list );
41}
42
43///////////////////////////////////////////
[331]44void spinlock_lock_busy( spinlock_t * lock,
[1]45                         uint32_t   * irq_state )
46{
[331]47    reg_t               mode;
48    volatile uint32_t   taken;
49    thread_t          * this     = CURRENT_THREAD;
50    bool_t              isAtomic = false;
[1]51
52    // disable interrupts
[331]53    hal_disable_irq( &mode );
54
[1]55    // loop until success
[331]56    while( isAtomic == false )
57    {
58        taken = lock->taken;
[1]59
[11]60        // try to take the lock if not already taken
[331]61        if( taken == 0 )
[1]62        {
[331]63            isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 );
[1]64        }
[331]65    }
[1]66
[331]67    this->local_locks++;
[1]68    lock->owner = this;
69    list_add_first( &this->locks_root , &lock->list );
70
[331]71    // irq_state must be restored when lock is released
[11]72    *irq_state = mode;
[1]73}
74
75//////////////////////////////////////////////
76void spinlock_unlock_busy( spinlock_t * lock,
77                           uint32_t     irq_state )
78{
[331]79    thread_t * this = CURRENT_THREAD;;
80
[1]81    lock->owner = NULL;
82    lock->taken = 0;
83    this->local_locks--;
84    list_unlink( &lock->list );
[331]85
[337]86    // deschedule if pending request
87    thread_check_sched();
88 
89    // restore IRQs
90        hal_restore_irq( irq_state );
[1]91}
[331]92
[1]93///////////////////////////////////////
94void spinlock_lock( spinlock_t * lock )
95{
[331]96    reg_t             mode;
97    thread_t        * this     = CURRENT_THREAD;
98    bool_t            isAtomic = false;
99    volatile uint32_t taken;
100
[1]101    // disable interrupts
[331]102    hal_disable_irq( &mode );
103
[1]104    // loop until success
[331]105    while( isAtomic == false )
106    {
[1]107        taken = lock->taken;
108
109        // deschedule without blocking when lock already taken
[331]110        if( taken != 0 )
[1]111        {
112            hal_restore_irq( mode );
[296]113            if( thread_can_yield() ) sched_yield( NULL );
[1]114            hal_disable_irq( &mode );
115            continue;
116        }
117
118        // try to atomically take the lock if not already taken
[331]119        isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 );
[1]120    }
121
[331]122    this->local_locks++;
[1]123    lock->owner = this;
124    list_add_first( &this->locks_root , &lock->list );
125
[337]126    // restore IRQs
[1]127    hal_restore_irq( mode );
128}
129
[11]130/////////////////////////////////////////////
131error_t spinlock_trylock( spinlock_t * lock )
[331]132{
133    reg_t      mode;
134    bool_t     isAtomic = false;
135    thread_t * this     = CURRENT_THREAD;
[1]136
[331]137    hal_disable_irq( &mode );
[1]138
[331]139    if( lock->taken == 0)
140        isAtomic = hal_atomic_cas( &lock->taken , 0 , 1);
141
142    if(isAtomic == false)
143    {
144        hal_restore_irq(mode);
145        return 1;
146    }
[1]147    else
148    {
[331]149        this->local_locks++;
150        lock->owner = this;
[1]151        list_add_first( &this->locks_root , &lock->list );
[331]152        hal_restore_irq(mode);
153        return 0;
[1]154    }
155}
156
157/////////////////////////////////////////
158void spinlock_unlock( spinlock_t * lock )
159{
[331]160    thread_t * this = CURRENT_THREAD;
161
[1]162    lock->owner = NULL;
163    lock->taken = 0;
164    this->local_locks--;
165    list_unlink( &lock->list );
[337]166
167    // deschedule if pending request
168    thread_check_sched();
[1]169}
170
Note: See TracBrowser for help on using the repository browser.