source: trunk/sys/libgomp/ordered.c @ 360

Last change on this file since 360 was 1, checked in by alain, 8 years ago

First import

File size: 8.0 KB
Line 
1/* Copyright (C) 2005, 2009 Free Software Foundation, Inc.
2   Contributed by Richard Henderson <rth@redhat.com>.
3
4   This file is part of the GNU OpenMP Library (libgomp).
5
6   Libgomp is free software; you can redistribute it and/or modify it
7   under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 3, or (at your option)
9   any later version.
10
11   Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   Under Section 7 of GPL version 3, you are granted additional
17   permissions described in the GCC Runtime Library Exception, version
18   3.1, as published by the Free Software Foundation.
19
20   You should have received a copy of the GNU General Public License and
21   a copy of the GCC Runtime Library Exception along with this program;
22   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23   <http://www.gnu.org/licenses/>.  */
24
25/* This file handles the ORDERED construct.  */
26
27#include <gomp/libgomp.h>
28
29
30/* This function is called when first allocating an iteration block.  That
31   is, the thread is not currently on the queue.  The work-share lock must
32   be held on entry.  */
33
34void
35gomp_ordered_first (void)
36{
37  struct gomp_thread *thr = gomp_thread ();
38  struct gomp_team *team = thr->ts.team;
39  struct gomp_work_share *ws = thr->ts.work_share;
40  unsigned index;
41
42  /* Work share constructs can be orphaned.  */
43  if (team == NULL || team->nthreads == 1)
44    return;
45
46  index = ws->ordered_cur + ws->ordered_num_used;
47  if (index >= team->nthreads)
48    index -= team->nthreads;
49  ws->ordered_team_ids[index] = thr->ts.team_id;
50
51  /* If this is the first and only thread in the queue, then there is
52     no one to release us when we get to our ordered section.  Post to
53     our own release queue now so that we won't block later.  */
54  if (ws->ordered_num_used++ == 0)
55    gomp_sem_post (team->ordered_release[thr->ts.team_id]);
56}
57
58/* This function is called when completing the last iteration block.  That
59   is, there are no more iterations to perform and so the thread should be
60   removed from the queue entirely.  Because of the way ORDERED blocks are
61   managed, it follows that we currently own access to the ORDERED block,
62   and should now pass it on to the next thread.  The work-share lock must
63   be held on entry.  */
64
65void
66gomp_ordered_last (void)
67{
68  struct gomp_thread *thr = gomp_thread ();
69  struct gomp_team *team = thr->ts.team;
70  struct gomp_work_share *ws = thr->ts.work_share;
71  unsigned next_id;
72
73  /* Work share constructs can be orphaned.  */
74  if (team == NULL || team->nthreads == 1)
75    return;
76
77  /* We're no longer the owner.  */
78  ws->ordered_owner = -1;
79
80  /* If we're not the last thread in the queue, then wake the next.  */
81  if (--ws->ordered_num_used > 0)
82    {
83      unsigned next = ws->ordered_cur + 1;
84      if (next == team->nthreads)
85        next = 0;
86      ws->ordered_cur = next;
87
88      next_id = ws->ordered_team_ids[next];
89      gomp_sem_post (team->ordered_release[next_id]);
90    }
91}
92
93
94/* This function is called when allocating a subsequent allocation block.
95   That is, we're done with the current iteration block and we're allocating
96   another.  This is the logical combination of a call to gomp_ordered_last
97   followed by a call to gomp_ordered_first.  The work-share lock must be
98   held on entry. */
99
100void
101gomp_ordered_next (void)
102{
103  struct gomp_thread *thr = gomp_thread ();
104  struct gomp_team *team = thr->ts.team;
105  struct gomp_work_share *ws = thr->ts.work_share;
106  unsigned index, next_id;
107
108  /* Work share constructs can be orphaned.  */
109  if (team == NULL || team->nthreads == 1)
110    return;
111
112  /* We're no longer the owner.  */
113  ws->ordered_owner = -1;
114
115  /* If there's only one thread in the queue, that must be us.  */
116  if (ws->ordered_num_used == 1)
117    {
118      /* We have a similar situation as in gomp_ordered_first
119         where we need to post to our own release semaphore.  */
120      gomp_sem_post (team->ordered_release[thr->ts.team_id]);
121      return;
122    }
123
124  /* If the queue is entirely full, then we move ourself to the end of
125     the queue merely by incrementing ordered_cur.  Only if it's not
126     full do we have to write our id.  */
127  if (ws->ordered_num_used < team->nthreads)
128    {
129      index = ws->ordered_cur + ws->ordered_num_used;
130      if (index >= team->nthreads)
131        index -= team->nthreads;
132      ws->ordered_team_ids[index] = thr->ts.team_id;
133    }
134
135  index = ws->ordered_cur + 1;
136  if (index == team->nthreads)
137    index = 0;
138  ws->ordered_cur = index;
139
140  next_id = ws->ordered_team_ids[index];
141  gomp_sem_post (team->ordered_release[next_id]);
142}
143
144
145/* This function is called when a statically scheduled loop is first
146   being created.  */
147
148void
149gomp_ordered_static_init (void)
150{
151  struct gomp_thread *thr = gomp_thread ();
152  struct gomp_team *team = thr->ts.team;
153
154  if (team == NULL || team->nthreads == 1)
155    return;
156
157  gomp_sem_post (team->ordered_release[0]);
158}
159
160/* This function is called when a statically scheduled loop is moving to
161   the next allocation block.  Static schedules are not first come first
162   served like the others, so we're to move to the numerically next thread,
163   not the next thread on a list.  The work-share lock should *not* be held
164   on entry.  */
165
166void
167gomp_ordered_static_next (void)
168{
169  struct gomp_thread *thr = gomp_thread ();
170  struct gomp_team *team = thr->ts.team;
171  struct gomp_work_share *ws = thr->ts.work_share;
172  unsigned id = thr->ts.team_id;
173
174  if (team == NULL || team->nthreads == 1)
175    return;
176
177  ws->ordered_owner = -1;
178
179  /* This thread currently owns the lock.  Increment the owner.  */
180  if (++id == team->nthreads)
181    id = 0;
182  ws->ordered_team_ids[0] = id;
183  gomp_sem_post (team->ordered_release[id]);
184}
185
186/* This function is called when we need to assert that the thread owns the
187   ordered section.  Due to the problem of posted-but-not-waited semaphores,
188   this needs to happen before completing a loop iteration.  */
189
190void
191gomp_ordered_sync (void)
192{
193  struct gomp_thread *thr = gomp_thread ();
194  struct gomp_team *team = thr->ts.team;
195  struct gomp_work_share *ws = thr->ts.work_share;
196
197  /* Work share constructs can be orphaned.  But this clearly means that
198     we are the only thread, and so we automatically own the section.  */
199  if (team == NULL || team->nthreads == 1)
200    return;
201
202  /* ??? I believe it to be safe to access this data without taking the
203     ws->lock.  The only presumed race condition is with the previous
204     thread on the queue incrementing ordered_cur such that it points
205     to us, concurrently with our check below.  But our team_id is
206     already present in the queue, and the other thread will always
207     post to our release semaphore.  So the two cases are that we will
208     either win the race an momentarily block on the semaphore, or lose
209     the race and find the semaphore already unlocked and so not block.
210     Either way we get correct results.  */
211
212  if (ws->ordered_owner != thr->ts.team_id)
213    {
214      gomp_sem_wait (team->ordered_release[thr->ts.team_id]);
215      ws->ordered_owner = thr->ts.team_id;
216    }
217}
218
219/* This function is called by user code when encountering the start of an
220   ORDERED block.  We must check to see if the current thread is at the
221   head of the queue, and if not, block.  */
222
223#ifdef HAVE_ATTRIBUTE_ALIAS
224extern void GOMP_ordered_start (void)
225        __attribute__((alias ("gomp_ordered_sync")));
226#else
227void
228GOMP_ordered_start (void)
229{
230  gomp_ordered_sync ();
231}
232#endif
233
234/* This function is called by user code when encountering the end of an
235   ORDERED block.  With the current ORDERED implementation there's nothing
236   for us to do.
237
238   However, the current implementation has a flaw in that it does not allow
239   the next thread into the ORDERED section immediately after the current
240   thread exits the ORDERED section in its last iteration.  The existance
241   of this function allows the implementation to change.  */
242
243void
244GOMP_ordered_end (void)
245{
246}
Note: See TracBrowser for help on using the repository browser.