1 | /* Copyright (c) 2007-2009, Stanford University |
---|
2 | * All rights reserved. |
---|
3 | * |
---|
4 | * Redistribution and use in source and binary forms, with or without |
---|
5 | * modification, are permitted provided that the following conditions are met: |
---|
6 | * * Redistributions of source code must retain the above copyright |
---|
7 | * notice, this list of conditions and the following disclaimer. |
---|
8 | * * Redistributions in binary form must reproduce the above copyright |
---|
9 | * notice, this list of conditions and the following disclaimer in the |
---|
10 | * documentation and/or other materials provided with the distribution. |
---|
11 | * * Neither the name of Stanford University nor the names of its |
---|
12 | * contributors may be used to endorse or promote products derived from |
---|
13 | * this software without specific prior written permission. |
---|
14 | * |
---|
15 | * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY |
---|
16 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
---|
17 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
---|
18 | * DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE FOR ANY |
---|
19 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
---|
20 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
---|
21 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
---|
22 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
---|
24 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
25 | */ |
---|
26 | |
---|
27 | #ifdef MR_LOCK_MCS |
---|
28 | |
---|
29 | #include <stdlib.h> |
---|
30 | #include <assert.h> |
---|
31 | #include "synch.h" |
---|
32 | #include "atomic.h" |
---|
33 | |
---|
34 | typedef struct mcs_lock_priv { |
---|
35 | struct mcs_lock *mcs_head; |
---|
36 | struct mcs_lock_priv *next; |
---|
37 | uintptr_t locked; |
---|
38 | } mcs_lock_priv; |
---|
39 | |
---|
40 | typedef struct mcs_lock { |
---|
41 | mcs_lock_priv *head; |
---|
42 | } mcs_lock; |
---|
43 | |
---|
44 | static mr_lock_t mcs_alloc(void) |
---|
45 | { |
---|
46 | mcs_lock *l; |
---|
47 | |
---|
48 | l = malloc(sizeof(mcs_lock)); |
---|
49 | l->head = NULL; |
---|
50 | |
---|
51 | return l; |
---|
52 | } |
---|
53 | |
---|
54 | static mr_lock_t mcs_alloc_per_thread(mr_lock_t l) |
---|
55 | { |
---|
56 | mcs_lock_priv *priv; |
---|
57 | |
---|
58 | priv = malloc(sizeof(mcs_lock_priv)); |
---|
59 | |
---|
60 | priv->mcs_head = l; |
---|
61 | priv->next = NULL; |
---|
62 | priv->locked = 0; |
---|
63 | |
---|
64 | return priv; |
---|
65 | } |
---|
66 | |
---|
67 | static void mcs_free (mr_lock_t l) |
---|
68 | { |
---|
69 | free(l); |
---|
70 | } |
---|
71 | |
---|
72 | static void mcs_free_per_thread (mr_lock_t l) |
---|
73 | { |
---|
74 | free(l); |
---|
75 | } |
---|
76 | |
---|
77 | static void mcs_acquire(mr_lock_t l) |
---|
78 | { |
---|
79 | mcs_lock *mcs; |
---|
80 | mcs_lock_priv *prev, *priv; |
---|
81 | |
---|
82 | priv = l; |
---|
83 | mcs = priv->mcs_head; |
---|
84 | |
---|
85 | assert (priv->locked == 0); |
---|
86 | |
---|
87 | set_and_flush(priv->next, NULL); |
---|
88 | |
---|
89 | prev = (void*)(atomic_xchg((uintptr_t)priv, (void*)(&mcs->head))); |
---|
90 | if (prev == NULL) { |
---|
91 | /* has exclusive access on lock */ |
---|
92 | return; |
---|
93 | } |
---|
94 | |
---|
95 | /* someone else has lock */ |
---|
96 | |
---|
97 | /* NOTE: this ordering is important-- if locked after next assignment, |
---|
98 | * we may have a schedule that will spin forever */ |
---|
99 | set_and_flush(priv->locked, 1); |
---|
100 | set_and_flush(prev->next, priv); |
---|
101 | |
---|
102 | while (atomic_read(&priv->locked)) { asm("":::"memory"); } |
---|
103 | } |
---|
104 | |
---|
105 | static void mcs_release (mr_lock_t l) |
---|
106 | { |
---|
107 | mcs_lock *mcs; |
---|
108 | mcs_lock_priv *priv; |
---|
109 | |
---|
110 | priv = l; |
---|
111 | mcs = priv->mcs_head; |
---|
112 | |
---|
113 | if (priv->next == NULL) { |
---|
114 | if (cmp_and_swp( |
---|
115 | (uintptr_t)NULL, |
---|
116 | (void*)(&mcs->head), (uintptr_t)priv)) { |
---|
117 | /* we were the only one on the lock, now it's empty */ |
---|
118 | return; |
---|
119 | } |
---|
120 | |
---|
121 | /* wait for next to get thrown on */ |
---|
122 | while (((void*)atomic_read(&(priv->next))) == NULL) { |
---|
123 | asm("" ::: "memory"); |
---|
124 | } |
---|
125 | } |
---|
126 | |
---|
127 | set_and_flush(priv->next->locked, 0); |
---|
128 | } |
---|
129 | |
---|
130 | mr_lock_ops mr_mcs_ops = { |
---|
131 | .alloc = mcs_alloc, |
---|
132 | .acquire = mcs_acquire, |
---|
133 | .release = mcs_release, |
---|
134 | .free = mcs_free, |
---|
135 | .alloc_per_thread = mcs_alloc_per_thread, |
---|
136 | .free_per_thread = mcs_free_per_thread, |
---|
137 | }; |
---|
138 | |
---|
139 | #endif /* MR_LOCK_MCS */ |
---|