1 | /* |
---|
2 | * spinlock.h: kernel spinlock definition |
---|
3 | * |
---|
4 | * Authors Ghassan Almaless (2008,2009,2010,2011,2012) |
---|
5 | * Alain Greiner (2016) |
---|
6 | * |
---|
7 | * Copyright (c) UPMC Sorbonne Universites |
---|
8 | * |
---|
9 | * This file is part of ALMOS-MKH. |
---|
10 | * |
---|
11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
12 | * under the terms of the GNU General Public License as published by |
---|
13 | * the Free Software Foundation; version 2.0 of the License. |
---|
14 | * |
---|
15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
18 | * General Public License for more details. |
---|
19 | * |
---|
20 | * You should have received a copy of the GNU General Public License |
---|
21 | * along with ALMOS-kernel; if not, write to the Free Software Foundation, |
---|
22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
23 | */ |
---|
24 | |
---|
25 | #ifndef _SPINLOCK_H_ |
---|
26 | #define _SPINLOCK_H_ |
---|
27 | |
---|
28 | #include <kernel_config.h> |
---|
29 | #include <hal_types.h> |
---|
30 | #include <list.h> |
---|
31 | |
---|
32 | /******************************************************************************************* |
---|
33 | * This structure defines a local spinlock, that sequencializes all read or write accesses |
---|
34 | * in a given cluster. A new access can only start after completion of the previous access, |
---|
35 | * when the owner thread releases the lock. The lock has only two states: taken or free. |
---|
36 | * The owner thread is registered in the spinlock_t structure, and the calling thread |
---|
37 | * list of local spinlocks is updated. |
---|
38 | * |
---|
39 | * A spinlock can be accessed in three modes : |
---|
40 | * |
---|
41 | * - The spinlock_lock() function is BLOCKING and use a descheduling policy when the lock |
---|
42 | * is already taken. The lock must be released with the spinlock_unlock() function. |
---|
43 | * - The spinlock_lock_noirq() function is BLOCKING, but uses a polling (busy-waiting) |
---|
44 | * policy if the lock is already taken. The IRQs can be disabled or not depending |
---|
45 | * on the irq_state argument. The lock must be released with the spinlock_unlock_noirq() |
---|
46 | * function. |
---|
47 | * - The spinlock_trylock() function is NON-BLOCKING, and tries only once to take the lock. |
---|
48 | * It must be released with the spinlock_unlock() function. |
---|
49 | ******************************************************************************************/ |
---|
50 | |
---|
51 | /**** Forward declarations ****/ |
---|
52 | |
---|
53 | struct thread_s; |
---|
54 | |
---|
55 | /******************************************************************************************* |
---|
56 | * This structure defines a local spinlock. |
---|
57 | * The "owner" and "list" are optionnal fields used for debug. |
---|
58 | * It register the list of all spinlocks taken by a given thread. |
---|
59 | ******************************************************************************************/ |
---|
60 | |
---|
61 | typedef struct spinlock_s |
---|
62 | { |
---|
63 | uint32_t taken; /*! state : free if zero / taken if non zero */ |
---|
64 | |
---|
65 | #if CONFIG_LOCKS_DEBUG |
---|
66 | struct thread_s * owner; /*! pointer on curent owner thread */ |
---|
67 | list_entry_t list; /*! member of list of locks taken by owner */ |
---|
68 | #endif |
---|
69 | |
---|
70 | } |
---|
71 | spinlock_t; |
---|
72 | |
---|
73 | /******************************************************************************************* |
---|
74 | * This function initializes a local spinlock in free state. |
---|
75 | ******************************************************************************************* |
---|
76 | * @ lock : pointer on spinlock. |
---|
77 | ******************************************************************************************/ |
---|
78 | inline void spinlock_init( spinlock_t * lock ); |
---|
79 | |
---|
80 | /******************************************************************************************* |
---|
81 | * This blocking function uses a busy waiting strategy to lock a local spinlock. |
---|
82 | * It polls the lock and returns only when the lock has been taken. |
---|
83 | * All IRQs are disabled and will keep disabled until the lock is released. |
---|
84 | * It increments the calling thread local_locks count when the lock has been taken. |
---|
85 | ******************************************************************************************* |
---|
86 | * @ lock : pointer on spinlock |
---|
87 | * @ irq_state : buffer to save the SR state (in the calling thread stack) |
---|
88 | ******************************************************************************************/ |
---|
89 | void spinlock_lock_busy( spinlock_t * lock, |
---|
90 | uint32_t * irq_state ); |
---|
91 | |
---|
92 | /******************************************************************************************* |
---|
93 | * This function releases a local busy_waiting spinlock. |
---|
94 | * It restores the CPU SR state. |
---|
95 | ******************************************************************************************* |
---|
96 | * @ lock : pointer on spinlock |
---|
97 | * @ irq_state : value to be resrored in CPU SR |
---|
98 | ******************************************************************************************/ |
---|
99 | void spinlock_unlock_busy( spinlock_t * lock, |
---|
100 | uint32_t irq_state ); |
---|
101 | |
---|
102 | /******************************************************************************************* |
---|
103 | * This blocking function locks a local spinlock. |
---|
104 | * If the lock is already taken, the calling thread deschedules without blocking, |
---|
105 | * and retries when it is rescheduled, until success. |
---|
106 | * It increments the calling thread local_locks count when the lock has been taken. |
---|
107 | ******************************************************************************************* |
---|
108 | * @ lock : pointer on spinlock |
---|
109 | ******************************************************************************************/ |
---|
110 | void spinlock_lock( spinlock_t * lock ); |
---|
111 | |
---|
112 | /******************************************************************************************* |
---|
113 | * This non-blocking function tries once to lock a spinlock. |
---|
114 | * It increments the calling thread locks count in case of success. |
---|
115 | ******************************************************************************************* |
---|
116 | * @ lock : pointer on spinlock |
---|
117 | * @ returns 0 if success / returns non zero if lock already taken. |
---|
118 | ******************************************************************************************/ |
---|
119 | error_t spinlock_trylock( spinlock_t * lock ); |
---|
120 | |
---|
121 | /******************************************************************************************* |
---|
122 | * This function releases a spinlock and do schedule if necessary. |
---|
123 | ******************************************************************************************* |
---|
124 | * @ lock : pointer on spinlock |
---|
125 | ******************************************************************************************/ |
---|
126 | void spinlock_unlock( spinlock_t * lock ); |
---|
127 | |
---|
128 | |
---|
129 | #endif /* _SPINLOCK_H_ */ |
---|