Changeset 624 for trunk/kernel/libk
- Timestamp:
- Mar 12, 2019, 1:37:38 PM (6 years ago)
- Location:
- trunk/kernel/libk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/libk/busylock.c
r600 r624 2 2 * busylock.c - local kernel-busy waiting lock implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 76 76 77 77 #if DEBUG_BUSYLOCK 78 if( (lock->type != LOCK_CHDEV_TXT0) && 79 ((uint32_t)hal_get_cycles() > DEBUG_BUSYLOCK) ) 78 if( lock->type != LOCK_CHDEV_TXT0 ) 80 79 { 80 // update thread list of busylocks 81 81 xptr_t root_xp = XPTR( local_cxy , &this->busylocks_root ); 82 83 // update thread list of busylocks84 82 xlist_add_last( root_xp , XPTR( local_cxy , &lock->xlist ) ); 85 83 } 86 84 #endif 87 85 88 #if( DEBUG_BUSYLOCK & & DEBUG_BUSYLOCK_THREAD_XP)86 #if( DEBUG_BUSYLOCK & 1 ) 89 87 if( (lock->type != LOCK_CHDEV_TXT0) && 90 (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) ) 88 (this->process->pid == DEBUG_BUSYLOCK_PID) && 89 (this->trdid == DEBUG_BUSYLOCK_TRDID) ) 91 90 { 92 // get cluster and local pointer of target thread93 cxy_t thread_cxy = GET_CXY( DEBUG_BUSYLOCK_THREAD_XP );94 thread_t * thread_ptr = GET_PTR( DEBUG_BUSYLOCK_THREAD_XP );95 96 // display message on kernel TXT097 91 printk("\n[%s] thread[%x,%x] ACQUIRE lock %s\n", 98 92 __FUNCTION__, this->process->pid, this->trdid, lock_type_str[lock->type] ); … … 120 114 121 115 #if DEBUG_BUSYLOCK 122 if( (lock->type != LOCK_CHDEV_TXT0) && 123 ((uint32_t)hal_get_cycles() > DEBUG_BUSYLOCK) ) 116 if( lock->type != LOCK_CHDEV_TXT0 ) 124 117 { 125 118 // remove lock from thread list of busylocks … … 128 121 #endif 129 122 130 #if( DEBUG_BUSYLOCK & & DEBUG_BUSYLOCK_THREAD_XP)123 #if( DEBUG_BUSYLOCK & 1 ) 131 124 if( (lock->type != LOCK_CHDEV_TXT0) && 132 (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) ) 125 (this->process->pid == DEBUG_BUSYLOCK_PID) && 126 (this->trdid == DEBUG_BUSYLOCK_TRDID) ) 133 127 { 134 // get cluster and local pointer of target thread135 cxy_t thread_cxy = GET_CXY( DEBUG_BUSYLOCK_THREAD_XP );136 thread_t * thread_ptr = GET_PTR( DEBUG_BUSYLOCK_THREAD_XP );137 138 // display message on kernel TXT0139 128 printk("\n[%s] thread[%x,%x] RELEASE lock %s\n", 140 129 __FUNCTION__, this->process->pid, this->trdid, lock_type_str[lock->type] ); -
trunk/kernel/libk/busylock.h
r623 r624 2 2 * busylock.h: local kernel busy-waiting lock definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 34 34 * a shared object located in a given cluster, made by thread(s) running in same cluster. 35 35 * It uses a busy waiting policy when the lock is taken by another thread, and should 36 * be used to execute very short actions, such as accessing basic allocators ,or higher36 * be used to execute very short actions, such as accessing basic allocators or higher 37 37 * level synchronisation objects (barriers, queuelocks, or rwlocks). 38 * WARNING: a thread cannot yield when it is owning a busylock.39 38 * 40 39 * - To acquire the lock, we use a ticket policy to avoid starvation: the calling thread … … 42 41 * value until current == ticket. 43 42 * 44 * - To release the lock, the owner thread increments the "current" value, 45 * decrements its busylocks counter. 43 * - To release the lock, the owner thread increments the "current" value. 46 44 * 47 * - When a thread takes a busylock, it enters a critical section: the busylock_acquire()45 * - When a thread takes a busylock, it enters a critical section: the acquire() 48 46 * function disables the IRQs, takes the lock, increments the thread busylocks counter, 49 * andsave the SR in lock descriptor and returns.47 * save the SR in lock descriptor and returns. 50 48 * 51 * - The busylock_release() function releases the lock, decrements the thread busylock52 * counter, restores the SR to exit the critical section, and returns 49 * - The release() function releases the lock, decrements the thread busylock 50 * counter, restores the SR to exit the critical section, and returns. 53 51 * 54 * - If a thread owning a busylock (local or remote) tries to deschedule, the scheduler 55 * signals a kernel panic. 52 * WARNING: a thread cannot yield when it is holding a busylock (local or remote). 53 * 54 * This rule is checked by all functions containing a thread_yield() AND by the scheduler, 55 * thanks to the busylocks counter stored in the calling thread descriptor. 56 * 1) all functions call "thread_assert_can_yield()" before calling "thread_yield()". 57 * 2) The scheduler checks that the calling thread does not hold any busylock. 58 * In case of violation the core goes to sleep after a [PANIC] message on TXT0. 56 59 ******************************************************************************************/ 57 60 58 61 /******************************************************************************************* 59 62 * This structure defines a busylock. 60 * The < type> and <xlist> fields are used for debug.61 * The typedefines the lock usage as detailed in the kernel_config.h file.63 * The <xlist> field is only used when DEBUG_BUSYLOCK is set. 64 * The <type> field defines the lock usage as detailed in the kernel_config.h file. 62 65 ******************************************************************************************/ 63 66 -
trunk/kernel/libk/remote_busylock.c
r619 r624 2 2 * remote_busylock.c - remote kernel busy-waiting lock implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 87 87 #if DEBUG_BUSYLOCK 88 88 uint32_t type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) ); 89 if( (type != LOCK_CHDEV_TXT0) && 90 ((uint32_t)hal_get_cycles() > DEBUG_BUSYLOCK) ) 89 if( type != LOCK_CHDEV_TXT0 ) 91 90 { 91 // update thread list of busyslocks 92 92 xptr_t root_xp = XPTR( local_cxy , &this->busylocks_root ); 93 94 // update thread list of busyslocks95 93 xlist_add_last( root_xp , XPTR( lock_cxy , &lock_ptr->xlist ) ); 96 94 } 97 95 #endif 98 96 99 #if( DEBUG_BUSYLOCK & & DEBUG_BUSYLOCK_THREAD_XP)97 #if( DEBUG_BUSYLOCK & 1 ) 100 98 if( (type != LOCK_CHDEV_TXT0) && 101 (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) ) 99 (this->process->pid == DEBUG_BUSYLOCK_PID) && 100 (this->trdid == DEBUG_BUSYLOCK_TRDID) ) 102 101 { 103 102 printk("\n[%s] thread[%x,%x] ACQUIRE lock %s\n", … … 131 130 #if DEBUG_BUSYLOCK 132 131 uint32_t type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) ); 133 if( (type != LOCK_CHDEV_TXT0) && 134 (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) && 135 ((uint32_t)hal_get_cycles() > DEBUG_BUSYLOCK) ) 132 if( type != LOCK_CHDEV_TXT0 ) 136 133 { 137 134 // remove lock from thread list of busyslocks … … 140 137 #endif 141 138 142 #if (DEBUG_BUSYLOCK && DEBUG_BUSYLOCK_THREAD_XP)139 #if( DEBUG_BUSYLOCK & 1 ) 143 140 if( (type != LOCK_CHDEV_TXT0) && 144 (XPTR( local_cxy , this ) == DEBUG_BUSYLOCK_THREAD_XP) ) 141 (this->process->pid == DEBUG_BUSYLOCK_PID) && 142 (this->trdid == DEBUG_BUSYLOCK_TRDID) ) 145 143 { 146 144 printk("\n[%s] thread[%x,%x] RELEASE lock %s\n", -
trunk/kernel/libk/remote_busylock.h
r619 r624 2 2 * remote_busylock.h: remote kernel busy-waiting lock definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 37 37 * higher level synchronisation objects, such as remote_queuelock and remote_rwlock. 38 38 * 39 * WARNING: a thread cannot yield when it is owning a busylock (local or remote).40 *41 39 * - To acquire the lock, we use a ticket policy to avoid starvation: the calling thread 42 40 * makes an atomic increment on a "ticket" allocator, and keep polling the "current" 43 41 * value until current == ticket. 44 42 * 45 * - To release the lock, the owner thread increments the "current" value, 46 * decrements its busylocks counter. 43 * - To release the lock, the owner thread increments the "current" value. 47 44 * 48 * - When a thread takes a busylock, it enters a critical section: the busylock_acquire()45 * - When a thread takes a busylock, it enters a critical section: the acquire() 49 46 * function disables the IRQs, takes the lock, increments the thread busylocks counter, 50 * 47 * save the SR in the lock descriptor and returns. 51 48 * 52 * - The busylock_release() function decrements the thread busylock counter,53 * restores the SR to exit the critical section, and returns49 * - The release() function releases the lock, decrements the thread busylock 50 * counter, restores the SR to exit the critical section, and returns. 54 51 * 55 * - If a thread owning a busylock (local or remote) tries to deschedule, the scheduler 56 * signals a kernel panic. 52 * WARNING: a thread cannot yield when it is holding a busylock (local or remote). 53 * 54 * This rule is checked by all functions containing a thread_yield() AND by the scheduler, 55 * thanks to the busylocks counter stored in the calling thread descriptor. 56 * 1) all functions call "thread_assert_can_yield()" before calling "thread_yield()". 57 * 2) The scheduler checks that the calling thread does not hold any busylock. 58 * In case of violation the core goes to sleep after a [PANIC] message on TXT0. 57 59 ******************************************************************************************/ 58 60
Note: See TracChangeset
for help on using the changeset viewer.