===================================================================
@@ -310,6 +310,7 @@ struct ptx_event
int type;
void *addr;
int ord;
+ int val;
struct ptx_event *next;
};
@@ -786,6 +787,7 @@ static void
event_gc (bool memmap_lockable)
{
struct ptx_event *ptx_event = ptx_events;
+ struct ptx_event *async_cleanups = NULL;
struct nvptx_thread *nvthd = nvptx_thread ();
pthread_mutex_lock (&ptx_event_lock);
@@ -803,6 +805,7 @@ event_gc (bool memmap_lockable)
r = cuEventQuery (*e->evt);
if (r == CUDA_SUCCESS)
{
+ bool append_async = false;
CUevent *te;
te = e->evt;
@@ -827,7 +830,7 @@ event_gc (bool memmap_lockable)
if (!memmap_lockable)
continue;
- GOMP_PLUGIN_async_unmap_vars (e->addr);
+ append_async = true;
}
break;
}
@@ -835,6 +838,7 @@ event_gc (bool memmap_lockable)
cuEventDestroy (*te);
free ((void *)te);
+ /* Unlink 'e' from ptx_events list. */
if (ptx_events == e)
ptx_events = ptx_events->next;
else
@@ -845,15 +849,31 @@ event_gc (bool memmap_lockable)
e_->next = e_->next->next;
}
- free (e);
+ if (append_async)
+ {
+ e->next = async_cleanups;
+ async_cleanups = e;
+ }
+ else
+ free (e);
}
}
pthread_mutex_unlock (&ptx_event_lock);
+
+ /* We have to do these here, after ptx_event_lock is released. */
+ while (async_cleanups)
+ {
+ struct ptx_event *e = async_cleanups;
+ async_cleanups = async_cleanups->next;
+
+ GOMP_PLUGIN_async_unmap_vars (e->addr, e->val);
+ free (e);
+ }
}
static void
-event_add (enum ptx_event_type type, CUevent *e, void *h)
+event_add (enum ptx_event_type type, CUevent *e, void *h, int val)
{
struct ptx_event *ptx_event;
struct nvptx_thread *nvthd = nvptx_thread ();
@@ -866,6 +886,7 @@ static void
ptx_event->evt = e;
ptx_event->addr = h;
ptx_event->ord = nvthd->ptx_dev->ord;
+ ptx_event->val = val;
pthread_mutex_lock (&ptx_event_lock);
@@ -966,7 +987,7 @@ nvptx_exec (void (*fn), size_t mapnum, void **host
if (r != CUDA_SUCCESS)
GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
- event_add (PTX_EVT_KNL, e, (void *)dev_str);
+ event_add (PTX_EVT_KNL, e, (void *)dev_str, 0);
}
#else
r = cuCtxSynchronize ();
@@ -1073,7 +1094,7 @@ nvptx_host2dev (void *d, const void *h, size_t s)
if (r != CUDA_SUCCESS)
GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
- event_add (PTX_EVT_MEM, e, (void *)h);
+ event_add (PTX_EVT_MEM, e, (void *)h, 0);
}
else
#endif
@@ -1138,7 +1159,7 @@ nvptx_dev2host (void *h, const void *d, size_t s)
if (r != CUDA_SUCCESS)
GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
- event_add (PTX_EVT_MEM, e, (void *)h);
+ event_add (PTX_EVT_MEM, e, (void *)h, 0);
}
else
#endif
@@ -1264,7 +1285,7 @@ nvptx_wait_async (int async1, int async2)
if (r != CUDA_SUCCESS)
GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
- event_add (PTX_EVT_SYNC, e, NULL);
+ event_add (PTX_EVT_SYNC, e, NULL, 0);
r = cuStreamWaitEvent (s2->stream, *e, 0);
if (r != CUDA_SUCCESS)
@@ -1346,7 +1367,7 @@ nvptx_wait_all_async (int async)
if (r != CUDA_SUCCESS)
GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
- event_add (PTX_EVT_SYNC, e, NULL);
+ event_add (PTX_EVT_SYNC, e, NULL, 0);
r = cuStreamWaitEvent (waiting_stream->stream, *e, 0);
if (r != CUDA_SUCCESS)
@@ -1658,7 +1679,7 @@ GOMP_OFFLOAD_openacc_parallel (void (*fn) (void *)
}
void
-GOMP_OFFLOAD_openacc_register_async_cleanup (void *targ_mem_desc)
+GOMP_OFFLOAD_openacc_register_async_cleanup (void *targ_mem_desc, int async)
{
CUevent *e;
CUresult r;
@@ -1674,7 +1695,7 @@ void
if (r != CUDA_SUCCESS)
GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
- event_add (PTX_EVT_ASYNC_CLEANUP, e, targ_mem_desc);
+ event_add (PTX_EVT_ASYNC_CLEANUP, e, targ_mem_desc, async);
}
int
===================================================================
@@ -659,10 +659,7 @@ gomp_acc_remove_pointer (void *h, bool force_copyf
if (async < acc_async_noval)
gomp_unmap_vars (t, true);
else
- {
- gomp_copy_from_async (t);
- acc_dev->openacc.register_async_cleanup_func (t);
- }
+ t->device_descr->openacc.register_async_cleanup_func (t, async);
gomp_debug (0, " %s: mappings restored\n", __FUNCTION__);
}
===================================================================
@@ -829,8 +829,6 @@ struct splay_tree_key_s {
uintptr_t tgt_offset;
/* Reference count. */
uintptr_t refcount;
- /* Asynchronous reference count. */
- uintptr_t async_refcount;
};
/* The comparison function. */
@@ -864,7 +862,7 @@ typedef struct acc_dispatch_t
unsigned *, void *);
/* Async cleanup callback registration. */
- void (*register_async_cleanup_func) (void *);
+ void (*register_async_cleanup_func) (void *, int);
/* Asynchronous routines. */
int (*async_test_func) (int);
@@ -958,7 +956,6 @@ extern struct target_mem_desc *gomp_map_vars (stru
size_t, void **, void **,
size_t *, void *, bool,
enum gomp_map_vars_kind);
-extern void gomp_copy_from_async (struct target_mem_desc *);
extern void gomp_unmap_vars (struct target_mem_desc *, bool);
extern void gomp_init_device (struct gomp_device_descr *);
extern void gomp_free_memmap (struct splay_tree_s *);
===================================================================
@@ -31,11 +31,14 @@
#include "oacc-int.h"
void
-GOMP_PLUGIN_async_unmap_vars (void *ptr)
+GOMP_PLUGIN_async_unmap_vars (void *ptr, int async)
{
struct target_mem_desc *tgt = ptr;
+ struct gomp_device_descr *devicep = tgt->device_descr;
- gomp_unmap_vars (tgt, false);
+ devicep->openacc.async_set_async_func (async);
+ gomp_unmap_vars (tgt, true);
+ devicep->openacc.async_set_async_func (acc_async_sync);
}
/* Return the target-specific part of the TLS data for the current thread. */
===================================================================
@@ -27,7 +27,7 @@
#ifndef OACC_PLUGIN_H
#define OACC_PLUGIN_H 1
-extern void GOMP_PLUGIN_async_unmap_vars (void *);
+extern void GOMP_PLUGIN_async_unmap_vars (void *, int);
extern void *GOMP_PLUGIN_acc_thread (void);
#endif
===================================================================
@@ -143,7 +143,8 @@ host_openacc_exec (void (*fn) (void *),
}
static void
-host_openacc_register_async_cleanup (void *targ_mem_desc __attribute__ ((unused)))
+host_openacc_register_async_cleanup (void *targ_mem_desc __attribute__ ((unused)),
+ int async __attribute__ ((unused)))
{
}
===================================================================
@@ -644,7 +644,6 @@ gomp_map_vars (struct gomp_device_descr *devicep,
tgt->list[i].offset = 0;
tgt->list[i].length = k->host_end - k->host_start;
k->refcount = 1;
- k->async_refcount = 0;
tgt->refcount++;
array->left = NULL;
array->right = NULL;
@@ -784,40 +783,6 @@ gomp_unmap_tgt (struct target_mem_desc *tgt)
free (tgt);
}
-/* Decrease the refcount for a set of mapped variables, and queue asychronous
- copies from the device back to the host after any work that has been issued.
- Because the regions are still "live", increment an asynchronous reference
- count to indicate that they should not be unmapped from host-side data
- structures until the asynchronous copy has completed. */
-
-attribute_hidden void
-gomp_copy_from_async (struct target_mem_desc *tgt)
-{
- struct gomp_device_descr *devicep = tgt->device_descr;
- size_t i;
-
- gomp_mutex_lock (&devicep->lock);
-
- for (i = 0; i < tgt->list_count; i++)
- if (tgt->list[i].key == NULL)
- ;
- else if (tgt->list[i].key->refcount > 1)
- {
- tgt->list[i].key->refcount--;
- tgt->list[i].key->async_refcount++;
- }
- else
- {
- splay_tree_key k = tgt->list[i].key;
- if (tgt->list[i].copy_from)
- devicep->dev2host_func (devicep->target_id, (void *) k->host_start,
- (void *) (k->tgt->tgt_start + k->tgt_offset),
- k->host_end - k->host_start);
- }
-
- gomp_mutex_unlock (&devicep->lock);
-}
-
/* Unmap variables described by TGT. If DO_COPYFROM is true, copy relevant
variables back from device to host: if it is false, it is assumed that this
has been done already, i.e. by gomp_copy_from_async above. */
@@ -847,13 +812,8 @@ gomp_unmap_vars (struct target_mem_desc *tgt, bool
k->refcount--;
else if (k->refcount == 1)
{
- if (k->async_refcount > 0)
- k->async_refcount--;
- else
- {
- k->refcount--;
- do_unmap = true;
- }
+ k->refcount--;
+ do_unmap = true;
}
if ((do_unmap && do_copyfrom && tgt->list[i].copy_from)
@@ -995,7 +955,6 @@ gomp_load_image_to_device (struct gomp_device_desc
k->tgt = tgt;
k->tgt_offset = target_table[i].start;
k->refcount = REFCOUNT_INFINITY;
- k->async_refcount = 0;
array->left = NULL;
array->right = NULL;
splay_tree_insert (&devicep->mem_map, array);
@@ -1020,7 +979,6 @@ gomp_load_image_to_device (struct gomp_device_desc
k->tgt = tgt;
k->tgt_offset = target_var->start;
k->refcount = REFCOUNT_INFINITY;
- k->async_refcount = 0;
array->left = NULL;
array->right = NULL;
splay_tree_insert (&devicep->mem_map, array);
@@ -2120,7 +2078,6 @@ omp_target_associate_ptr (void *host_ptr, void *de
k->tgt = tgt;
k->tgt_offset = (uintptr_t) device_ptr + device_offset;
k->refcount = REFCOUNT_INFINITY;
- k->async_refcount = 0;
array->left = NULL;
array->right = NULL;
splay_tree_insert (&devicep->mem_map, array);
===================================================================
@@ -182,10 +182,7 @@ GOACC_parallel_keyed (int device, void (*fn) (void
if (async < acc_async_noval)
gomp_unmap_vars (tgt, true);
else
- {
- gomp_copy_from_async (tgt);
- acc_dev->openacc.register_async_cleanup_func (tgt);
- }
+ tgt->device_descr->openacc.register_async_cleanup_func (tgt, async);
acc_dev->openacc.async_set_async_func (acc_async_sync);
}