[1] | 1 | /* |
---|
| 2 | * spinlock.h: kernel spinlock definition |
---|
| 3 | * |
---|
| 4 | * Authors Ghassan Almaless (2008,2009,2010,2011,2012) |
---|
| 5 | * Alain Greiner (2016) |
---|
| 6 | * |
---|
| 7 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 8 | * |
---|
| 9 | * This file is part of ALMOS-MKH. |
---|
| 10 | * |
---|
| 11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
| 12 | * under the terms of the GNU General Public License as published by |
---|
| 13 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 14 | * |
---|
| 15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 18 | * General Public License for more details. |
---|
| 19 | * |
---|
| 20 | * You should have received a copy of the GNU General Public License |
---|
| 21 | * along with ALMOS-kernel; if not, write to the Free Software Foundation, |
---|
| 22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 23 | */ |
---|
| 24 | |
---|
| 25 | #ifndef _SPINLOCK_H_ |
---|
| 26 | #define _SPINLOCK_H_ |
---|
| 27 | |
---|
| 28 | #include <almos_config.h> |
---|
| 29 | #include <hal_types.h> |
---|
| 30 | #include <list.h> |
---|
| 31 | |
---|
| 32 | /******************************************************************************************* |
---|
| 33 | * This structure defines a local spinlock, that sequencializes all read or write accesses |
---|
| 34 | * in a given cluster. A new access can only start after completion of the previous access, |
---|
| 35 | * when the owner thread releases the lock. The lock has only two states: taken or free. |
---|
| 36 | * The owner thread is registered in the spinlock_t structure, and the calling thread |
---|
| 37 | * list of local spinlocks is updated. |
---|
| 38 | * |
---|
| 39 | * A spinlock can be accessed in three modes : |
---|
| 40 | * |
---|
| 41 | * - The spinlock_lock() function is BLOCKING and use a descheduling policy when the lock |
---|
| 42 | * is already taken. The lock must be released with the spinlock_unlock() function. |
---|
| 43 | * - The spinlock_lock_noirq() function is BLOCKING, but uses a polling (busy-waiting) |
---|
| 44 | * policy if the lock is already taken. The IRQs can be disabled or not depending |
---|
| 45 | * on the irq_state argument. The lock must be released with the spinlock_unlock_noirq() |
---|
| 46 | * function. |
---|
| 47 | * - The spinlock_trylock() function is NON-BLOCKING, and tries only once to take the lock. |
---|
| 48 | * It must be released with the spinlock_unlock() function. |
---|
| 49 | ******************************************************************************************/ |
---|
| 50 | |
---|
| 51 | /**** Forward declarations ****/ |
---|
| 52 | |
---|
| 53 | struct thread_s; |
---|
| 54 | |
---|
| 55 | /******************************************************************************************* |
---|
| 56 | * This structure defines a local spinlock. |
---|
| 57 | ******************************************************************************************/ |
---|
| 58 | |
---|
| 59 | typedef struct spinlock_s |
---|
| 60 | { |
---|
| 61 | uint32_t taken; /*! state : free if zero / taken if non zero */ |
---|
| 62 | struct thread_s * owner; /*! pointer on curent owner thread */ |
---|
| 63 | list_entry_t list; /*! list of all locks taken by owner */ |
---|
| 64 | } |
---|
| 65 | spinlock_t; |
---|
| 66 | |
---|
| 67 | /******************************************************************************************* |
---|
| 68 | * This function initializes a local spinlock in free state. |
---|
| 69 | ******************************************************************************************* |
---|
| 70 | * @ lock : pointer on spinlock. |
---|
| 71 | ******************************************************************************************/ |
---|
| 72 | inline void spinlock_init( spinlock_t * lock ); |
---|
| 73 | |
---|
| 74 | /******************************************************************************************* |
---|
| 75 | * This blocking function uses a busy waiting strategy to lock a local spinlock. |
---|
| 76 | * It polls the lock and returns only when the lock has been taken. |
---|
| 77 | * If the irq_state argument is not NULL, all IRQs are disabled and will keep disabled |
---|
| 78 | * until the lock is released. If irq_state is NULL, the IRQs are only disabled |
---|
| 79 | * during the lock acquisition polling loop. |
---|
| 80 | * It increments the calling thread local_locks count when the lock has been taken. |
---|
| 81 | ******************************************************************************************* |
---|
| 82 | * @ lock : pointer on spinlock |
---|
| 83 | * @ irq_state : buffer to save the SR state (in the calling thread stack) |
---|
| 84 | ******************************************************************************************/ |
---|
| 85 | void spinlock_lock_busy( spinlock_t * lock, |
---|
| 86 | uint32_t * irq_state ); |
---|
| 87 | |
---|
| 88 | /******************************************************************************************* |
---|
| 89 | * This function releases a local busy_waiting spinlock. |
---|
| 90 | * It restores the CPU SR state, if required by the restore argument. |
---|
| 91 | ******************************************************************************************* |
---|
| 92 | * @ lock : pointer on spinlock |
---|
| 93 | * @ restore : restore the CPU SR (from irq_state) if true |
---|
| 94 | * @ irq_state : value to be resrored in CPU SR |
---|
| 95 | ******************************************************************************************/ |
---|
| 96 | void spinlock_unlock_busy( spinlock_t * lock, |
---|
| 97 | bool_t restore, |
---|
| 98 | uint32_t irq_state ); |
---|
| 99 | |
---|
| 100 | /******************************************************************************************* |
---|
| 101 | * This blocking function locks a local spinlock. |
---|
| 102 | * If the lock is already taken, the calling thread deschedules and retries when |
---|
| 103 | * it is rescheduled, until success. |
---|
| 104 | * It increments the calling thread local_locks count when the lock has been taken. |
---|
| 105 | ******************************************************************************************* |
---|
| 106 | * @ lock : pointer on spinlock |
---|
| 107 | ******************************************************************************************/ |
---|
| 108 | void spinlock_lock( spinlock_t * lock ); |
---|
| 109 | |
---|
| 110 | /******************************************************************************************* |
---|
| 111 | * This non-blocking function tries once to lock a spinlock. |
---|
| 112 | * It increments the calling thread locks count in case of success. |
---|
| 113 | ******************************************************************************************* |
---|
| 114 | * @ lock : pointer on spinlock |
---|
| 115 | * @ returns 0 if success / returns non zero if lock already taken. |
---|
| 116 | ******************************************************************************************/ |
---|
| 117 | uint32_t spinlock_trylock( spinlock_t * lock ); |
---|
| 118 | |
---|
| 119 | /******************************************************************************************* |
---|
| 120 | * This function releases a spinlock and do schedule if necessary. |
---|
| 121 | ******************************************************************************************* |
---|
| 122 | * @ lock : pointer on spinlock |
---|
| 123 | ******************************************************************************************/ |
---|
| 124 | void spinlock_unlock( spinlock_t * lock ); |
---|
| 125 | |
---|
| 126 | |
---|
| 127 | #endif /* _SPINLOCK_H_ */ |
---|