source: trunk/sys/libgomp/work.c @ 297

Last change on this file since 297 was 1, checked in by alain, 8 years ago

First import

File size: 7.6 KB
Line 
1/* Copyright (C) 2005, 2008, 2009 Free Software Foundation, Inc.
2   Contributed by Richard Henderson <rth@redhat.com>.
3
4   This file is part of the GNU OpenMP Library (libgomp).
5
6   Libgomp is free software; you can redistribute it and/or modify it
7   under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 3, or (at your option)
9   any later version.
10
11   Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   Under Section 7 of GPL version 3, you are granted additional
17   permissions described in the GCC Runtime Library Exception, version
18   3.1, as published by the Free Software Foundation.
19
20   You should have received a copy of the GNU General Public License and
21   a copy of the GCC Runtime Library Exception along with this program;
22   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23   <http://www.gnu.org/licenses/>.  */
24
25/* This file contains routines to manage the work-share queue for a team
26   of threads.  */
27
28#include <gomp/libgomp.h>
29#include <stddef.h>
30#include <stdlib.h>
31#include <string.h>
32
33
34/* Allocate a new work share structure, preferably from current team's
35   free gomp_work_share cache.  */
36
37static struct gomp_work_share *
38alloc_work_share (struct gomp_team *team)
39{
40  struct gomp_work_share *ws;
41  unsigned int i;
42
43  /* This is called in a critical section.  */
44  if (team->work_share_list_alloc != NULL)
45    {
46      ws = team->work_share_list_alloc;
47      team->work_share_list_alloc = ws->next_free;
48      return ws;
49    }
50
51#ifdef HAVE_SYNC_BUILTINS
52  ws = team->work_share_list_free;
53  /* We need atomic read from work_share_list_free,
54     as free_work_share can be called concurrently.  */
55  __asm ("" : "+r" (ws));
56
57  if (ws && ws->next_free)
58    {
59      struct gomp_work_share *next = ws->next_free;
60      ws->next_free = NULL;
61      team->work_share_list_alloc = next->next_free;
62      return next;
63    }
64#else
65  gomp_mutex_lock (&team->work_share_list_free_lock);
66  ws = team->work_share_list_free;
67  if (ws)
68    {
69      team->work_share_list_alloc = ws->next_free;
70      team->work_share_list_free = NULL;
71      gomp_mutex_unlock (&team->work_share_list_free_lock);
72      return ws;
73    }
74  gomp_mutex_unlock (&team->work_share_list_free_lock);
75#endif
76
77  team->work_share_chunk *= 2;
78  ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share));
79  ws->next_alloc = team->work_shares[0].next_alloc;
80  team->work_shares[0].next_alloc = ws;
81  team->work_share_list_alloc = &ws[1];
82  for (i = 1; i < team->work_share_chunk - 1; i++)
83    ws[i].next_free = &ws[i + 1];
84  ws[i].next_free = NULL;
85  return ws;
86}
87
88/* Initialize an already allocated struct gomp_work_share.
89   This shouldn't touch the next_alloc field.  */
90
91void
92gomp_init_work_share (struct gomp_work_share *ws, int ordered,
93                      unsigned nthreads)
94{
95  gomp_mutex_init (&ws->lock);
96  if (__builtin_expect (ordered, 0))
97    {
98#define INLINE_ORDERED_TEAM_IDS_CNT \
99  ((sizeof (struct gomp_work_share) \
100    - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \
101   / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0]))
102
103      if (nthreads > INLINE_ORDERED_TEAM_IDS_CNT)
104        ws->ordered_team_ids
105          = gomp_malloc (nthreads * sizeof (*ws->ordered_team_ids));
106      else
107        ws->ordered_team_ids = ws->inline_ordered_team_ids;
108      memset (ws->ordered_team_ids, '\0',
109              nthreads * sizeof (*ws->ordered_team_ids));
110      ws->ordered_num_used = 0;
111      ws->ordered_owner = -1;
112      ws->ordered_cur = 0;
113    }
114  else
115    ws->ordered_team_ids = NULL;
116  gomp_ptrlock_init (&ws->next_ws, NULL);
117  ws->threads_completed = 0;
118}
119
120/* Do any needed destruction of gomp_work_share fields before it
121   is put back into free gomp_work_share cache or freed.  */
122
123void
124gomp_fini_work_share (struct gomp_work_share *ws)
125{
126  gomp_mutex_destroy (&ws->lock);
127  if (ws->ordered_team_ids != ws->inline_ordered_team_ids)
128    free (ws->ordered_team_ids);
129  gomp_ptrlock_destroy (&ws->next_ws);
130}
131
132/* Free a work share struct, if not orphaned, put it into current
133   team's free gomp_work_share cache.  */
134
135static inline void
136free_work_share (struct gomp_team *team, struct gomp_work_share *ws)
137{
138  gomp_fini_work_share (ws);
139  if (__builtin_expect (team == NULL, 0))
140    free (ws);
141  else
142    {
143      struct gomp_work_share *next_ws;
144#ifdef HAVE_SYNC_BUILTINS
145      do
146        {
147          next_ws = team->work_share_list_free;
148          ws->next_free = next_ws;
149        }
150      while (!__sync_bool_compare_and_swap (&team->work_share_list_free,
151                                            next_ws, ws));
152#else
153      gomp_mutex_lock (&team->work_share_list_free_lock);
154      next_ws = team->work_share_list_free;
155      ws->next_free = next_ws;
156      team->work_share_list_free = ws;
157      gomp_mutex_unlock (&team->work_share_list_free_lock);
158#endif
159    }
160}
161
162/* The current thread is ready to begin the next work sharing construct.
163   In all cases, thr->ts.work_share is updated to point to the new
164   structure.  In all cases the work_share lock is locked.  Return true
165   if this was the first thread to reach this point.  */
166
167int
168gomp_work_share_start (int ordered)
169{
170  struct gomp_thread *thr = gomp_thread ();
171  struct gomp_team *team = thr->ts.team;
172  struct gomp_work_share *ws;
173
174  /* Work sharing constructs can be orphaned.  */
175  if (team == NULL)
176    {
177      ws = gomp_malloc (sizeof (*ws));
178      gomp_init_work_share (ws, ordered, 1);
179      thr->ts.work_share = ws;
180      return (int)ws;
181    }
182
183  ws = thr->ts.work_share;
184  thr->ts.last_work_share = ws;
185  ws = gomp_ptrlock_get (&ws->next_ws);
186  if (ws == NULL)
187    {
188      /* This thread encountered a new ws first.  */
189      struct gomp_work_share *ws = alloc_work_share (team);
190      gomp_init_work_share (ws, ordered, team->nthreads);
191      thr->ts.work_share = ws;
192      return true;
193    }
194  else
195    {
196      thr->ts.work_share = ws;
197      return false;
198    }
199}
200
201/* The current thread is done with its current work sharing construct.
202   This version does imply a barrier at the end of the work-share.  */
203
204void
205gomp_work_share_end (void)
206{
207  struct gomp_thread *thr = gomp_thread ();
208  struct gomp_team *team = thr->ts.team;
209  gomp_barrier_state_t bstate;
210
211  /* Work sharing constructs can be orphaned.  */
212  if (team == NULL)
213    {
214      free_work_share (NULL, thr->ts.work_share);
215      thr->ts.work_share = NULL;
216      return;
217    }
218
219  bstate = gomp_barrier_wait_start (&team->barrier);
220
221  if (gomp_barrier_last_thread (bstate))
222    {
223      if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
224        free_work_share (team, thr->ts.last_work_share);
225    }
226
227  gomp_team_barrier_wait_end (&team->barrier, bstate);
228  thr->ts.last_work_share = NULL;
229}
230
231/* The current thread is done with its current work sharing construct.
232   This version does NOT imply a barrier at the end of the work-share.  */
233
234void
235gomp_work_share_end_nowait (void)
236{
237  struct gomp_thread *thr = gomp_thread ();
238  struct gomp_team *team = thr->ts.team;
239  struct gomp_work_share *ws = thr->ts.work_share;
240  unsigned completed;
241
242  /* Work sharing constructs can be orphaned.  */
243  if (team == NULL)
244    {
245      free_work_share (NULL, ws);
246      thr->ts.work_share = NULL;
247      return;
248    }
249
250  if (__builtin_expect (thr->ts.last_work_share == NULL, 0))
251    return;
252
253#ifdef HAVE_SYNC_BUILTINS
254  completed = __sync_add_and_fetch (&ws->threads_completed, 1);
255#else
256  gomp_mutex_lock (&ws->lock);
257  completed = ++ws->threads_completed;
258  gomp_mutex_unlock (&ws->lock);
259#endif
260
261  if (completed == team->nthreads)
262    free_work_share (team, thr->ts.last_work_share);
263  thr->ts.last_work_share = NULL;
264}
Note: See TracBrowser for help on using the repository browser.