source: trunk/sys/libgomp/team.c @ 54

Last change on this file since 54 was 1, checked in by alain, 7 years ago

First import

File size: 15.6 KB
Line 
1/* Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
2   Contributed by Richard Henderson <rth@redhat.com>.
3
4   This file is part of the GNU OpenMP Library (libgomp).
5
6   Libgomp is free software; you can redistribute it and/or modify it
7   under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 3, or (at your option)
9   any later version.
10
11   Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   Under Section 7 of GPL version 3, you are granted additional
17   permissions described in the GCC Runtime Library Exception, version
18   3.1, as published by the Free Software Foundation.
19
20   You should have received a copy of the GNU General Public License and
21   a copy of the GCC Runtime Library Exception along with this program;
22   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23   <http://www.gnu.org/licenses/>.  */
24
25/* This file handles the maintainence of threads in response to team
26   creation and termination.  */
27
28#include <gomp/libgomp.h>
29#include <stdlib.h>
30#include <string.h>
31
32/* This attribute contains PTHREAD_CREATE_DETACHED.  */
33pthread_attr_t gomp_thread_attr;
34
35/* This key is for the thread destructor.  */
36pthread_key_t gomp_thread_destructor;
37
38
39/* This is the libgomp per-thread data structure.  */
40#ifdef HAVE_TLS
41__thread struct gomp_thread gomp_tls_data;
42#else
43pthread_key_t gomp_tls_key;
44#endif
45
46
47/* This structure is used to communicate across pthread_create.  */
48
49struct gomp_thread_start_data
50{
51  void (*fn) (void *);
52  void *fn_data;
53  struct gomp_team_state ts;
54  struct gomp_task *task;
55  struct gomp_thread_pool *thread_pool;
56  bool nested;
57};
58
59
60/* This function is a pthread_create entry point.  This contains the idle
61   loop in which a thread waits to be called up to become part of a team.  */
62
63static void *
64gomp_thread_start (void *xdata)
65{
66  struct gomp_thread_start_data *data = xdata;
67  struct gomp_thread *thr;
68  struct gomp_thread_pool *pool;
69  void (*local_fn) (void *);
70  void *local_data;
71
72#ifdef HAVE_TLS
73  thr = &gomp_tls_data;
74#else
75  struct gomp_thread local_thr;
76  thr = &local_thr;
77  pthread_setspecific (gomp_tls_key, thr);
78#endif
79
80  gomp_sem_init (&thr->release, 0);
81
82  /* Extract what we need from data.  */
83  local_fn = data->fn;
84  local_data = data->fn_data;
85  thr->thread_pool = data->thread_pool;
86  thr->ts = data->ts;
87  thr->task = data->task;
88
89  thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
90
91  /* Make thread pool local. */
92  pool = thr->thread_pool;
93
94  if (data->nested)
95    {
96      struct gomp_team *team = thr->ts.team;
97      struct gomp_task *task = thr->task;
98
99      gomp_barrier_wait (&team->barrier);
100
101      local_fn (local_data);
102      gomp_team_barrier_wait (&team->barrier);
103      gomp_finish_task (task);
104      gomp_barrier_wait_last (&team->barrier);
105    }
106  else
107    {
108      pool->threads[thr->ts.team_id] = thr;
109
110      gomp_barrier_wait (&pool->threads_dock);
111      do
112        {
113          struct gomp_team *team = thr->ts.team;
114          struct gomp_task *task = thr->task;
115
116          local_fn (local_data);
117          gomp_team_barrier_wait (&team->barrier);
118          gomp_finish_task (task);
119
120          gomp_barrier_wait (&pool->threads_dock);
121
122          local_fn = thr->fn;
123          local_data = thr->data;
124          thr->fn = NULL;
125        }
126      while (local_fn);
127    }
128
129  gomp_sem_destroy (&thr->release);
130  return NULL;
131}
132
133
134/* Create a new team data structure.  */
135
136struct gomp_team *
137gomp_new_team (unsigned nthreads)
138{
139  struct gomp_team *team;
140  size_t size;
141  int i;
142
143  size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0])
144                                      + sizeof (team->implicit_task[0]));
145  team = gomp_malloc (size);
146
147  team->work_share_chunk = 8;
148#ifdef HAVE_SYNC_BUILTINS
149  team->single_count = 0;
150#else
151  gomp_mutex_init (&team->work_share_list_free_lock);
152#endif
153  gomp_init_work_share (&team->work_shares[0], false, nthreads);
154  team->work_shares[0].next_alloc = NULL;
155  team->work_share_list_free = NULL;
156  team->work_share_list_alloc = &team->work_shares[1];
157  for (i = 1; i < 7; i++)
158    team->work_shares[i].next_free = &team->work_shares[i + 1];
159  team->work_shares[i].next_free = NULL;
160
161  team->nthreads = nthreads;
162  gomp_barrier_init (&team->barrier, nthreads);
163
164  gomp_sem_init (&team->master_release, 0);
165  team->ordered_release = (void *) &team->implicit_task[nthreads];
166  team->ordered_release[0] = &team->master_release;
167
168  gomp_mutex_init (&team->task_lock);
169  team->task_queue = NULL;
170  team->task_count = 0;
171  team->task_running_count = 0;
172
173  return team;
174}
175
176
177/* Free a team data structure.  */
178
179static void
180free_team (struct gomp_team *team)
181{
182  gomp_barrier_destroy (&team->barrier);
183  gomp_mutex_destroy (&team->task_lock);
184  free (team);
185}
186
187/* Allocate and initialize a thread pool. */
188
189static struct gomp_thread_pool *gomp_new_thread_pool (void)
190{
191  struct gomp_thread_pool *pool
192    = gomp_malloc (sizeof(struct gomp_thread_pool));
193  pool->threads = NULL;
194  pool->threads_size = 0;
195  pool->threads_used = 0;
196  pool->last_team = NULL;
197  return pool;
198}
199
200static void
201gomp_free_pool_helper (void *thread_pool)
202{
203  struct gomp_thread_pool *pool
204    = (struct gomp_thread_pool *) thread_pool;
205
206  gomp_barrier_wait_last (&pool->threads_dock);
207  gomp_sem_destroy (&gomp_thread ()->release);
208  pthread_exit (NULL);
209}
210
211/* Free a thread pool and release its threads. */
212
213static void
214gomp_free_thread (void *arg __attribute__((unused)))
215{
216  struct gomp_thread *thr;
217  struct gomp_thread_pool *pool;
218
219#ifndef HAVE_TLS
220  pthread_setspecific(gomp_tls_key, arg);
221#endif
222
223  thr = gomp_thread ();
224  pool = thr->thread_pool;
225
226  if (pool)
227    {
228      if (pool->threads_used > 0)
229        {
230          int i;
231          for (i = 1; i < (int)pool->threads_used; i++)
232            {
233              struct gomp_thread *nthr = pool->threads[i];
234              nthr->fn = gomp_free_pool_helper;
235              nthr->data = pool;
236            }
237          /* This barrier undocks threads docked on pool->threads_dock.  */
238          gomp_barrier_wait (&pool->threads_dock);
239          /* And this waits till all threads have called gomp_barrier_wait_last
240             in gomp_free_pool_helper.  */
241          gomp_barrier_wait (&pool->threads_dock);
242          /* Now it is safe to destroy the barrier and free the pool.  */
243          gomp_barrier_destroy (&pool->threads_dock);
244        }
245      free (pool->threads);
246      if (pool->last_team)
247        free_team (pool->last_team);
248      free (pool);
249      thr->thread_pool = NULL;
250    }
251  if (thr->task != NULL)
252    {
253      struct gomp_task *task = thr->task;
254      gomp_end_task ();
255      free (task);
256    }
257
258#ifndef HAVE_TLS
259  pthread_key_delete(gomp_tls_key);
260#endif
261}
262
263/* Launch a team.  */
264
265void
266gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
267                 struct gomp_team *team)
268{
269  struct gomp_thread_start_data *start_data;
270  struct gomp_thread *thr, *nthr;
271  struct gomp_task *task;
272  struct gomp_task_icv *icv;
273  bool nested;
274  struct gomp_thread_pool *pool;
275  unsigned i, n, old_threads_used = 0;
276  pthread_attr_t thread_attr, *attr;
277
278  thr = gomp_thread ();
279  nested = thr->ts.team != NULL;
280
281  if (__builtin_expect (thr->thread_pool == NULL, 0))
282    {
283      thr->thread_pool = gomp_new_thread_pool ();
284      pthread_setspecific (gomp_thread_destructor, thr);
285    }
286  pool = thr->thread_pool;
287  task = thr->task;
288  icv = task ? &task->icv : &gomp_global_icv;
289
290  /* Always save the previous state, even if this isn't a nested team.
291     In particular, we should save any work share state from an outer
292     orphaned work share construct.  */
293  team->prev_ts = thr->ts;
294
295  thr->ts.team = team;
296  thr->ts.team_id = 0;
297  ++thr->ts.level;
298  if (nthreads > 1)
299    ++thr->ts.active_level;
300  thr->ts.work_share = &team->work_shares[0];
301  thr->ts.last_work_share = NULL;
302#ifdef HAVE_SYNC_BUILTINS
303  thr->ts.single_count = 0;
304#endif
305  thr->ts.static_trip = 0;
306  thr->task = &team->implicit_task[0];
307  gomp_init_task (thr->task, task, icv);
308
309  if (nthreads == 1)
310    return;
311
312  i = 1;
313
314  /* We only allow the reuse of idle threads for non-nested PARALLEL
315     regions.  This appears to be implied by the semantics of
316     threadprivate variables, but perhaps that's reading too much into
317     things.  Certainly it does prevent any locking problems, since
318     only the initial program thread will modify gomp_threads.  */
319  if (!nested)
320  {
321    old_threads_used = pool->threads_used;
322
323    if (nthreads <= old_threads_used)
324      n = nthreads;
325    else if (old_threads_used == 0)
326    {
327      n = 0;
328      gomp_barrier_init (&pool->threads_dock, nthreads);
329    }
330    else
331    {
332      n = old_threads_used;
333
334      /* Increase the barrier threshold to make sure all new
335         threads arrive before the team is released.  */
336      gomp_barrier_reinit (&pool->threads_dock, nthreads);
337    }
338
339      /* Not true yet, but soon will be.  We're going to release all
340         threads from the dock, and those that aren't part of the
341         team will exit.  */
342      pool->threads_used = nthreads;
343
344      /* Release existing idle threads.  */
345      for (; i < n; ++i)
346        {
347          nthr = pool->threads[i];
348          nthr->ts.team = team;
349          nthr->ts.work_share = &team->work_shares[0];
350          nthr->ts.last_work_share = NULL;
351          nthr->ts.team_id = i;
352          nthr->ts.level = team->prev_ts.level + 1;
353          nthr->ts.active_level = thr->ts.active_level;
354#ifdef HAVE_SYNC_BUILTINS
355          nthr->ts.single_count = 0;
356#endif
357          nthr->ts.static_trip = 0;
358          nthr->task = &team->implicit_task[i];
359          gomp_init_task (nthr->task, task, icv);
360          nthr->fn = fn;
361          nthr->data = data;
362          team->ordered_release[i] = &nthr->release;
363        }
364
365      if (i == nthreads)
366        goto do_release;
367
368      /* If necessary, expand the size of the gomp_threads array.  It is
369         expected that changes in the number of threads are rare, thus we
370         make no effort to expand gomp_threads_size geometrically.  */
371      if (nthreads >= pool->threads_size)
372        {
373          pool->threads_size = nthreads + 1;
374          pool->threads
375            = gomp_realloc (pool->threads,
376                            pool->threads_size
377                            * sizeof (struct gomp_thread_data *));
378        }
379    }
380
381  if (__builtin_expect (nthreads > old_threads_used, 0))
382    {
383      long diff = (long) nthreads - (long) old_threads_used;
384
385      if (old_threads_used == 0)
386        --diff;
387
388#ifdef HAVE_SYNC_BUILTINS
389      __sync_fetch_and_add (&gomp_managed_threads, diff);
390#else
391      gomp_mutex_lock (&gomp_remaining_threads_lock);
392      gomp_managed_threads += diff;
393      gomp_mutex_unlock (&gomp_remaining_threads_lock);
394#endif
395    }
396
397  attr = &gomp_thread_attr;
398  if (__builtin_expect (gomp_cpu_affinity != NULL, 0))
399    {
400      size_t stacksize;
401      pthread_attr_init (&thread_attr);
402      pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED);
403      if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize))
404        pthread_attr_setstacksize (&thread_attr, stacksize);
405      attr = &thread_attr;
406    }
407
408  start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
409                            * (nthreads-i));
410
411  /* Launch new threads.  */
412  int n_master;
413  int error = pthread_attr_getcpuid_np(&n_master);
414  if (error != 0) {
415    fprintf(stderr, "ERROR: %s: failed to get cpu id\n", __FUNCTION__);
416    n_master = -1;
417  }
418
419  for (; i < nthreads; ++i, ++start_data )
420    {
421      pthread_t pt;
422      int err;
423
424      start_data->fn = fn;
425      start_data->fn_data = data;
426      start_data->ts.team = team;
427      start_data->ts.work_share = &team->work_shares[0];
428      start_data->ts.last_work_share = NULL;
429      start_data->ts.team_id = i;
430      start_data->ts.level = team->prev_ts.level + 1;
431      start_data->ts.active_level = thr->ts.active_level;
432#ifdef HAVE_SYNC_BUILTINS
433      start_data->ts.single_count = 0;
434#endif
435      start_data->ts.static_trip = 0;
436      start_data->task = &team->implicit_task[i];
437      gomp_init_task (start_data->task, task, icv);
438      start_data->thread_pool = pool;
439      start_data->nested = nested;
440
441      if (gomp_cpu_affinity != NULL)
442        gomp_init_thread_affinity (attr);
443
444      if (i==(unsigned)n_master) 
445        pthread_attr_setcpuid_np(attr, 0, NULL);
446      else 
447        pthread_attr_setcpuid_np(attr, i, NULL);
448
449      err = pthread_create (&pt, attr, gomp_thread_start, start_data);
450      if (err != 0)
451        gomp_fatal ("Thread creation failed: %s", strerror (err));
452    }
453
454  if (__builtin_expect (gomp_cpu_affinity != NULL, 0))
455    pthread_attr_destroy (&thread_attr);
456
457 do_release:
458  gomp_barrier_wait (nested ? &team->barrier : &pool->threads_dock);
459
460  /* Decrease the barrier threshold to match the number of threads
461     that should arrive back at the end of this team.  The extra
462     threads should be exiting.  Note that we arrange for this test
463     to never be true for nested teams.  */
464  if (__builtin_expect (nthreads < old_threads_used, 0))
465    {
466      long diff = (long) nthreads - (long) old_threads_used;
467
468      gomp_barrier_reinit (&pool->threads_dock, nthreads);
469
470#ifdef HAVE_SYNC_BUILTINS
471      __sync_fetch_and_add (&gomp_managed_threads, diff);
472#else
473      gomp_mutex_lock (&gomp_remaining_threads_lock);
474      gomp_managed_threads += diff;
475      gomp_mutex_unlock (&gomp_remaining_threads_lock);
476#endif
477    }
478}
479
480
481/* Terminate the current team.  This is only to be called by the master
482   thread.  We assume that we must wait for the other threads.  */
483
484void
485gomp_team_end (void)
486{
487  struct gomp_thread *thr = gomp_thread ();
488  struct gomp_team *team = thr->ts.team;
489
490  /* This barrier handles all pending explicit threads.  */
491  gomp_team_barrier_wait (&team->barrier);
492  gomp_fini_work_share (thr->ts.work_share);
493
494  gomp_end_task ();
495  thr->ts = team->prev_ts;
496
497  if (__builtin_expect (thr->ts.team != NULL, 0))
498    {
499#ifdef HAVE_SYNC_BUILTINS
500      __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads);
501#else
502      gomp_mutex_lock (&gomp_remaining_threads_lock);
503      gomp_managed_threads -= team->nthreads - 1L;
504      gomp_mutex_unlock (&gomp_remaining_threads_lock);
505#endif
506      /* This barrier has gomp_barrier_wait_last counterparts
507         and ensures the team can be safely destroyed.  */
508      gomp_barrier_wait (&team->barrier);
509    }
510
511  if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
512    {
513      struct gomp_work_share *ws = team->work_shares[0].next_alloc;
514      do
515        {
516          struct gomp_work_share *next_ws = ws->next_alloc;
517          free (ws);
518          ws = next_ws;
519        }
520      while (ws != NULL);
521    }
522  gomp_sem_destroy (&team->master_release);
523#ifndef HAVE_SYNC_BUILTINS
524  gomp_mutex_destroy (&team->work_share_list_free_lock);
525#endif
526
527  if (__builtin_expect (thr->ts.team != NULL, 0)
528      || __builtin_expect (team->nthreads == 1, 0))
529    free_team (team);
530  else
531    {
532      struct gomp_thread_pool *pool = thr->thread_pool;
533      if (pool->last_team)
534        free_team (pool->last_team);
535      pool->last_team = team;
536    }
537}
538
539/* Constructors for this file.  */
540void initialize_team (void)
541{
542  struct gomp_thread *thr;
543  int err;
544
545#ifndef HAVE_TLS
546  static struct gomp_thread initial_thread_tls_data;
547
548  if((err=pthread_key_create (&gomp_tls_key, NULL)))
549    gomp_fatal ("could not create gomp_tls_key");
550
551
552  if((err=pthread_setspecific (gomp_tls_key, &initial_thread_tls_data)))
553    gomp_fatal ("could not seting initial thread tls data");
554#endif
555
556  if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0)
557    gomp_fatal ("could not create thread pool destructor.");
558 
559#ifdef HAVE_TLS
560  thr = &gomp_tls_data;
561#else
562  thr = &initial_thread_tls_data;
563#endif
564  gomp_sem_init (&thr->release, 0);
565}
566
567void team_destructor (void)
568{
569  /* Without this dlclose on libgomp could lead to subsequent
570     crashes.  */
571  //pthread_key_delete (gomp_thread_destructor);
572}
573
574struct gomp_task_icv *
575gomp_new_icv (void)
576{
577  struct gomp_thread *thr = gomp_thread ();
578  struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task));
579  gomp_init_task (task, NULL, &gomp_global_icv);
580  thr->task = task;
581  pthread_setspecific (gomp_thread_destructor, thr);
582  return &task->icv;
583}
Note: See TracBrowser for help on using the repository browser.