source: trunk/kernel/libk/spinlock.h @ 452

Last change on this file since 452 was 438, checked in by alain, 7 years ago

Fix a bug in scheduler related to RPC blocking.

File size: 6.2 KB
RevLine 
[1]1/*
2 * spinlock.h: kernel spinlock definition     
3 *
[436]4 * Authors  Alain Greiner (2016,2017,2018)
[1]5 *
6 * Copyright (c) UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-kernel; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#ifndef _SPINLOCK_H_
25#define _SPINLOCK_H_
26
[14]27#include <kernel_config.h>
[1]28#include <hal_types.h>
29#include <list.h>
30
31/*******************************************************************************************
32 * This structure defines a local spinlock, that sequencializes all read or write accesses
33 * in a given cluster. A new access can only start after completion of the previous access,
34 * when the owner thread releases the lock. The lock has only two states: taken or free.
35 * The owner thread is registered in the spinlock_t structure, and the calling thread
36 * list of local spinlocks is updated.
37 *
38 * A spinlock can be accessed in three modes :
39 *
40 * - The spinlock_lock() function is BLOCKING and use a descheduling policy when the lock
41 *   is already taken. The lock must be released with the spinlock_unlock() function.
42 * - The spinlock_lock_noirq() function is BLOCKING, but uses a polling (busy-waiting)
43 *   policy if the lock is already taken. The IRQs can be disabled or not depending
44 *   on the irq_state argument. The lock must be released with the spinlock_unlock_noirq()
45 *   function.
46 * - The spinlock_trylock() function is NON-BLOCKING, and tries only once to take the lock.
47 *   It must be released with the spinlock_unlock() function.
48 ******************************************************************************************/
49
50/****     Forward declarations    ****/
51
52struct thread_s;
53
54/*******************************************************************************************
55 * This structure defines a local spinlock.
[409]56 * The "owner" and "list" are optionnal fields used for debug.
57 * It register the list of all spinlocks taken by a given thread.
[1]58 ******************************************************************************************/
59
60typedef struct spinlock_s
61{
62        uint32_t            taken;             /*! state : free if zero / taken if non zero  */
[409]63
[438]64#if DEBUG_SPINLOCKS
[1]65        struct thread_s   * owner;             /*! pointer on curent owner thread            */
[409]66    list_entry_t        list;              /*! member of list of locks taken by owner    */
67#endif
68
[1]69}
70spinlock_t;
71
72/*******************************************************************************************
73 * This function initializes a local spinlock in free state.
74 *******************************************************************************************
75 * @ lock    : pointer on spinlock.
76 ******************************************************************************************/
77inline void spinlock_init( spinlock_t * lock );
78
79/*******************************************************************************************
80 * This blocking function uses a busy waiting strategy to lock a local spinlock.
81 * It polls the lock and returns only when the lock has been taken.
[11]82 * All IRQs are disabled and will keep disabled until the lock is released.
[1]83 * It increments the calling thread local_locks count when the lock has been taken.
84 *******************************************************************************************
85 * @ lock       : pointer on spinlock
86 * @ irq_state  : buffer to save the SR state (in the calling thread stack)
87 ******************************************************************************************/
88void spinlock_lock_busy( spinlock_t * lock,
89                         uint32_t   * irq_state );
90
91/*******************************************************************************************
92 * This function releases a local busy_waiting spinlock.
[11]93 * It restores the CPU SR state.
[1]94 *******************************************************************************************
95 * @ lock       : pointer on spinlock
96 * @ irq_state  : value to be resrored in CPU SR
97 ******************************************************************************************/
98void spinlock_unlock_busy( spinlock_t * lock,
99                           uint32_t     irq_state );
100
101/*******************************************************************************************
102 * This blocking function locks a local spinlock.
[409]103 * If the lock is already taken, the calling thread deschedules without blocking,
104 * and retries when it is rescheduled, until success.
[1]105 * It increments the calling thread local_locks count when the lock has been taken.
106 *******************************************************************************************
107 * @ lock       : pointer on spinlock
108 ******************************************************************************************/
109void spinlock_lock( spinlock_t * lock );
110
111/*******************************************************************************************
112 * This non-blocking function tries once to lock a spinlock.
113 * It increments the calling thread locks count in case of success.
114 *******************************************************************************************
115 * @ lock       : pointer on spinlock
116 * @ returns 0 if success / returns non zero if lock already taken.
117 ******************************************************************************************/
[11]118error_t spinlock_trylock( spinlock_t * lock );
[1]119
120/*******************************************************************************************
121 * This function releases a spinlock and do schedule if necessary.
122 *******************************************************************************************
123 * @ lock       : pointer on spinlock
124 ******************************************************************************************/
125void spinlock_unlock( spinlock_t * lock );
126
127
128#endif  /* _SPINLOCK_H_ */
Note: See TracBrowser for help on using the repository browser.