summaryrefslogtreecommitdiffstats
path: root/src/gallium/state_trackers/clover/api
diff options
context:
space:
mode:
authorFrancisco Jerez <[email protected]>2013-10-01 11:54:07 -0700
committerFrancisco Jerez <[email protected]>2013-10-21 10:47:02 -0700
commit257781f243476863591965f22787ff390edd8ba0 (patch)
treee234fa1f688fb17f4fdcbbb2174c56a2f1271e00 /src/gallium/state_trackers/clover/api
parent9d06fb8fa86a70d1c3a4924960dfebabaf1fa3bf (diff)
clover: Switch event objects to the new model.
Tested-by: Tom Stellard <[email protected]>
Diffstat (limited to 'src/gallium/state_trackers/clover/api')
-rw-r--r--src/gallium/state_trackers/clover/api/event.cpp179
-rw-r--r--src/gallium/state_trackers/clover/api/kernel.cpp16
-rw-r--r--src/gallium/state_trackers/clover/api/transfer.cpp107
3 files changed, 158 insertions, 144 deletions
diff --git a/src/gallium/state_trackers/clover/api/event.cpp b/src/gallium/state_trackers/clover/api/event.cpp
index db3a4062cc6..4f689419c99 100644
--- a/src/gallium/state_trackers/clover/api/event.cpp
+++ b/src/gallium/state_trackers/clover/api/event.cpp
@@ -26,93 +26,89 @@
using namespace clover;
PUBLIC cl_event
-clCreateUserEvent(cl_context d_ctx, cl_int *errcode_ret) try {
+clCreateUserEvent(cl_context d_ctx, cl_int *r_errcode) try {
auto &ctx = obj(d_ctx);
- ret_error(errcode_ret, CL_SUCCESS);
- return new soft_event(ctx, {}, false);
+ ret_error(r_errcode, CL_SUCCESS);
+ return desc(new soft_event(ctx, {}, false));
-} catch(error &e) {
- ret_error(errcode_ret, e);
+} catch (error &e) {
+ ret_error(r_errcode, e);
return NULL;
}
PUBLIC cl_int
-clSetUserEventStatus(cl_event ev, cl_int status) {
- if (!dynamic_cast<soft_event *>(ev))
- return CL_INVALID_EVENT;
+clSetUserEventStatus(cl_event d_ev, cl_int status) try {
+ auto &sev = obj<soft_event>(d_ev);
if (status > 0)
return CL_INVALID_VALUE;
- if (ev->status() <= 0)
+ if (sev.status() <= 0)
return CL_INVALID_OPERATION;
if (status)
- ev->abort(status);
+ sev.abort(status);
else
- ev->trigger();
+ sev.trigger();
return CL_SUCCESS;
+
+} catch (error &e) {
+ return e.get();
}
PUBLIC cl_int
-clWaitForEvents(cl_uint num_evs, const cl_event *evs) try {
- if (!num_evs || !evs)
- throw error(CL_INVALID_VALUE);
-
- std::for_each(evs, evs + num_evs, [&](const cl_event ev) {
- if (!ev)
- throw error(CL_INVALID_EVENT);
+clWaitForEvents(cl_uint num_evs, const cl_event *d_evs) try {
+ auto evs = objs(d_evs, num_evs);
- if (&ev->ctx != &evs[0]->ctx)
- throw error(CL_INVALID_CONTEXT);
+ for (auto &ev : evs) {
+ if (&ev.ctx != &evs.front().ctx)
+ throw error(CL_INVALID_CONTEXT);
- if (ev->status() < 0)
- throw error(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
- });
+ if (ev.status() < 0)
+ throw error(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
+ }
// Create a temporary soft event that depends on all the events in
// the wait list
- ref_ptr<soft_event> sev = transfer(
- new soft_event(evs[0]->ctx, { evs, evs + num_evs }, true));
+ ref_ptr<soft_event> sev =
+ transfer(new soft_event(evs.front().ctx, evs, true));
// ...and wait on it.
sev->wait();
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
PUBLIC cl_int
-clGetEventInfo(cl_event ev, cl_event_info param,
+clGetEventInfo(cl_event d_ev, cl_event_info param,
size_t size, void *r_buf, size_t *r_size) try {
property_buffer buf { r_buf, size, r_size };
-
- if (!ev)
- return CL_INVALID_EVENT;
+ auto &ev = obj(d_ev);
switch (param) {
case CL_EVENT_COMMAND_QUEUE:
- buf.as_scalar<cl_command_queue>() = ev->queue();
+ buf.as_scalar<cl_command_queue>() = ev.queue();
break;
case CL_EVENT_CONTEXT:
- buf.as_scalar<cl_context>() = &ev->ctx;
+ buf.as_scalar<cl_context>() = desc(ev.ctx);
break;
case CL_EVENT_COMMAND_TYPE:
- buf.as_scalar<cl_command_type>() = ev->command();
+ buf.as_scalar<cl_command_type>() = ev.command();
break;
case CL_EVENT_COMMAND_EXECUTION_STATUS:
- buf.as_scalar<cl_int>() = ev->status();
+ buf.as_scalar<cl_int>() = ev.status();
break;
case CL_EVENT_REFERENCE_COUNT:
- buf.as_scalar<cl_uint>() = ev->ref_count();
+ buf.as_scalar<cl_uint>() = ev.ref_count();
break;
default:
@@ -126,132 +122,124 @@ clGetEventInfo(cl_event ev, cl_event_info param,
}
PUBLIC cl_int
-clSetEventCallback(cl_event ev, cl_int type,
- void (CL_CALLBACK *pfn_event_notify)(cl_event, cl_int,
- void *),
+clSetEventCallback(cl_event d_ev, cl_int type,
+ void (CL_CALLBACK *pfn_notify)(cl_event, cl_int, void *),
void *user_data) try {
- if (!ev)
- throw error(CL_INVALID_EVENT);
+ auto &ev = obj(d_ev);
- if (!pfn_event_notify || type != CL_COMPLETE)
+ if (!pfn_notify || type != CL_COMPLETE)
throw error(CL_INVALID_VALUE);
// Create a temporary soft event that depends on ev, with
- // pfn_event_notify as completion action.
+ // pfn_notify as completion action.
ref_ptr<soft_event> sev = transfer(
- new soft_event(ev->ctx, { ev }, true,
- [=](event &) {
- ev->wait();
- pfn_event_notify(ev, ev->status(), user_data);
+ new soft_event(ev.ctx, { ev }, true,
+ [=, &ev](event &) {
+ ev.wait();
+ pfn_notify(desc(ev), ev.status(), user_data);
}));
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
PUBLIC cl_int
-clRetainEvent(cl_event ev) {
- if (!ev)
- return CL_INVALID_EVENT;
-
- ev->retain();
+clRetainEvent(cl_event d_ev) try {
+ obj(d_ev).retain();
return CL_SUCCESS;
+
+} catch (error &e) {
+ return e.get();
}
PUBLIC cl_int
-clReleaseEvent(cl_event ev) {
- if (!ev)
- return CL_INVALID_EVENT;
-
- if (ev->release())
- delete ev;
+clReleaseEvent(cl_event d_ev) try {
+ if (obj(d_ev).release())
+ delete pobj(d_ev);
return CL_SUCCESS;
+
+} catch (error &e) {
+ return e.get();
}
PUBLIC cl_int
-clEnqueueMarker(cl_command_queue q, cl_event *ev) try {
- if (!q)
+clEnqueueMarker(cl_command_queue d_q, cl_event *rd_ev) try {
+ if (!d_q)
throw error(CL_INVALID_COMMAND_QUEUE);
- if (!ev)
+ if (!rd_ev)
throw error(CL_INVALID_VALUE);
- *ev = new hard_event(*q, CL_COMMAND_MARKER, {});
+ *rd_ev = desc(new hard_event(*d_q, CL_COMMAND_MARKER, {}));
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
PUBLIC cl_int
-clEnqueueBarrier(cl_command_queue q) {
- if (!q)
+clEnqueueBarrier(cl_command_queue d_q) {
+ if (!d_q)
return CL_INVALID_COMMAND_QUEUE;
// No need to do anything, q preserves data ordering strictly.
+
return CL_SUCCESS;
}
PUBLIC cl_int
-clEnqueueWaitForEvents(cl_command_queue q, cl_uint num_evs,
- const cl_event *evs) try {
- if (!q)
+clEnqueueWaitForEvents(cl_command_queue d_q, cl_uint num_evs,
+ const cl_event *d_evs) try {
+ if (!d_q)
throw error(CL_INVALID_COMMAND_QUEUE);
- if (!num_evs || !evs)
- throw error(CL_INVALID_VALUE);
+ auto &q = *d_q;
+ auto evs = objs(d_evs, num_evs);
- std::for_each(evs, evs + num_evs, [&](const cl_event ev) {
- if (!ev)
- throw error(CL_INVALID_EVENT);
-
- if (&ev->ctx != &q->ctx)
+ for (auto &ev : evs) {
+ if (&ev.ctx != &q.ctx)
throw error(CL_INVALID_CONTEXT);
- });
+ }
// Create a hard event that depends on the events in the wait list:
// subsequent commands in the same queue will be implicitly
// serialized with respect to it -- hard events always are.
- ref_ptr<hard_event> hev = transfer(
- new hard_event(*q, 0, { evs, evs + num_evs }));
+ ref_ptr<hard_event> hev = transfer(new hard_event(q, 0, evs));
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
PUBLIC cl_int
-clGetEventProfilingInfo(cl_event ev, cl_profiling_info param,
+clGetEventProfilingInfo(cl_event d_ev, cl_profiling_info param,
size_t size, void *r_buf, size_t *r_size) try {
property_buffer buf { r_buf, size, r_size };
- hard_event *hev = dynamic_cast<hard_event *>(ev);
-
- if (!ev)
- return CL_INVALID_EVENT;
+ hard_event &hev = dynamic_cast<hard_event &>(obj(d_ev));
- if (!hev || hev->status() != CL_COMPLETE)
- return CL_PROFILING_INFO_NOT_AVAILABLE;
+ if (hev.status() != CL_COMPLETE)
+ throw error(CL_PROFILING_INFO_NOT_AVAILABLE);
switch (param) {
case CL_PROFILING_COMMAND_QUEUED:
- buf.as_scalar<cl_ulong>() = hev->time_queued();
+ buf.as_scalar<cl_ulong>() = hev.time_queued();
break;
case CL_PROFILING_COMMAND_SUBMIT:
- buf.as_scalar<cl_ulong>() = hev->time_submit();
+ buf.as_scalar<cl_ulong>() = hev.time_submit();
break;
case CL_PROFILING_COMMAND_START:
- buf.as_scalar<cl_ulong>() = hev->time_start();
+ buf.as_scalar<cl_ulong>() = hev.time_start();
break;
case CL_PROFILING_COMMAND_END:
- buf.as_scalar<cl_ulong>() = hev->time_end();
+ buf.as_scalar<cl_ulong>() = hev.time_end();
break;
default:
@@ -260,6 +248,9 @@ clGetEventProfilingInfo(cl_event ev, cl_profiling_info param,
return CL_SUCCESS;
+} catch (std::bad_cast &e) {
+ return CL_PROFILING_INFO_NOT_AVAILABLE;
+
} catch (lazy<cl_ulong>::undefined_error &e) {
return CL_PROFILING_INFO_NOT_AVAILABLE;
@@ -268,19 +259,19 @@ clGetEventProfilingInfo(cl_event ev, cl_profiling_info param,
}
PUBLIC cl_int
-clFinish(cl_command_queue q) try {
- if (!q)
+clFinish(cl_command_queue d_q) try {
+ if (!d_q)
throw error(CL_INVALID_COMMAND_QUEUE);
// Create a temporary hard event -- it implicitly depends on all
// the previously queued hard events.
- ref_ptr<hard_event> hev = transfer(new hard_event(*q, 0, { }));
+ ref_ptr<hard_event> hev = transfer(new hard_event(*d_q, 0, { }));
// And wait on it.
hev->wait();
return CL_SUCCESS;
-} catch(error &e) {
+} catch (error &e) {
return e.get();
}
diff --git a/src/gallium/state_trackers/clover/api/kernel.cpp b/src/gallium/state_trackers/clover/api/kernel.cpp
index a1152652a59..3335ee6a713 100644
--- a/src/gallium/state_trackers/clover/api/kernel.cpp
+++ b/src/gallium/state_trackers/clover/api/kernel.cpp
@@ -217,7 +217,7 @@ namespace {
if (&kern->prog.ctx != &q->ctx ||
any_of([&](const cl_event ev) {
- return &ev->ctx != &q->ctx;
+ return &obj(ev).ctx != &q->ctx;
}, range(deps, num_deps)))
throw error(CL_INVALID_CONTEXT);
@@ -285,17 +285,18 @@ PUBLIC cl_int
clEnqueueNDRangeKernel(cl_command_queue q, cl_kernel kern,
cl_uint dims, const size_t *pgrid_offset,
const size_t *pgrid_size, const size_t *pblock_size,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
auto grid_offset = opt_vector(pgrid_offset, dims, 0);
auto grid_size = opt_vector(pgrid_size, dims, 1);
auto block_size = opt_vector(pblock_size, dims, 1);
kernel_validate(q, kern, dims, pgrid_offset, pgrid_size, pblock_size,
- num_deps, deps, ev);
+ num_deps, d_deps, ev);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_NDRANGE_KERNEL, { deps, deps + num_deps },
+ *q, CL_COMMAND_NDRANGE_KERNEL, deps,
kernel_op(q, kern, grid_offset, grid_size, block_size));
ret_object(ev, hev);
@@ -307,17 +308,18 @@ clEnqueueNDRangeKernel(cl_command_queue q, cl_kernel kern,
PUBLIC cl_int
clEnqueueTask(cl_command_queue q, cl_kernel kern,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
const std::vector<size_t> grid_offset = { 0 };
const std::vector<size_t> grid_size = { 1 };
const std::vector<size_t> block_size = { 1 };
kernel_validate(q, kern, 1, grid_offset.data(), grid_size.data(),
- block_size.data(), num_deps, deps, ev);
+ block_size.data(), num_deps, d_deps, ev);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_TASK, { deps, deps + num_deps },
+ *q, CL_COMMAND_TASK, deps,
kernel_op(q, kern, grid_offset, grid_size, block_size));
ret_object(ev, hev);
diff --git a/src/gallium/state_trackers/clover/api/transfer.cpp b/src/gallium/state_trackers/clover/api/transfer.cpp
index d2aae67bebe..62f9d326ddf 100644
--- a/src/gallium/state_trackers/clover/api/transfer.cpp
+++ b/src/gallium/state_trackers/clover/api/transfer.cpp
@@ -49,7 +49,7 @@ namespace {
throw error(CL_INVALID_EVENT_WAIT_LIST);
if (any_of([&](const cl_event ev) {
- return &ev->ctx != &q->ctx;
+ return &obj(ev).ctx != &q->ctx;
}, range(deps, num_deps)))
throw error(CL_INVALID_CONTEXT);
}
@@ -146,16 +146,18 @@ namespace {
PUBLIC cl_int
clEnqueueReadBuffer(cl_command_queue q, cl_mem obj, cl_bool blocking,
size_t offset, size_t size, void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (!ptr || offset > obj->size() || offset + size > obj->size())
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_READ_BUFFER, { deps, deps + num_deps },
+ *q, CL_COMMAND_READ_BUFFER, deps,
soft_copy_op(q,
ptr, {{ 0 }}, {{ 1 }},
obj, {{ offset }}, {{ 1 }},
@@ -171,16 +173,18 @@ clEnqueueReadBuffer(cl_command_queue q, cl_mem obj, cl_bool blocking,
PUBLIC cl_int
clEnqueueWriteBuffer(cl_command_queue q, cl_mem obj, cl_bool blocking,
size_t offset, size_t size, const void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (!ptr || offset > obj->size() || offset + size > obj->size())
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_WRITE_BUFFER, { deps, deps + num_deps },
+ *q, CL_COMMAND_WRITE_BUFFER, deps,
soft_copy_op(q,
obj, {{ offset }}, {{ 1 }},
ptr, {{ 0 }}, {{ 1 }},
@@ -200,16 +204,18 @@ clEnqueueReadBufferRect(cl_command_queue q, cl_mem obj, cl_bool blocking,
size_t obj_row_pitch, size_t obj_slice_pitch,
size_t host_row_pitch, size_t host_slice_pitch,
void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (!ptr)
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_READ_BUFFER_RECT, { deps, deps + num_deps },
+ *q, CL_COMMAND_READ_BUFFER_RECT, deps,
soft_copy_op(q,
ptr, vector(host_origin),
{{ 1, host_row_pitch, host_slice_pitch }},
@@ -231,16 +237,18 @@ clEnqueueWriteBufferRect(cl_command_queue q, cl_mem obj, cl_bool blocking,
size_t obj_row_pitch, size_t obj_slice_pitch,
size_t host_row_pitch, size_t host_slice_pitch,
const void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (!ptr)
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_WRITE_BUFFER_RECT, { deps, deps + num_deps },
+ *q, CL_COMMAND_WRITE_BUFFER_RECT, deps,
soft_copy_op(q,
obj, vector(obj_origin),
{{ 1, obj_row_pitch, obj_slice_pitch }},
@@ -258,14 +266,16 @@ clEnqueueWriteBufferRect(cl_command_queue q, cl_mem obj, cl_bool blocking,
PUBLIC cl_int
clEnqueueCopyBuffer(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
size_t src_offset, size_t dst_offset, size_t size,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_obj);
validate_obj(q, dst_obj);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_BUFFER, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_BUFFER, deps,
hard_copy_op(q, dst_obj, {{ dst_offset }},
src_obj, {{ src_offset }},
{{ size, 1, 1 }}));
@@ -283,14 +293,16 @@ clEnqueueCopyBufferRect(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
const size_t *region,
size_t src_row_pitch, size_t src_slice_pitch,
size_t dst_row_pitch, size_t dst_slice_pitch,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_obj);
validate_obj(q, dst_obj);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_BUFFER_RECT, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_BUFFER_RECT, deps,
soft_copy_op(q,
dst_obj, vector(dst_origin),
{{ 1, dst_row_pitch, dst_slice_pitch }},
@@ -309,18 +321,19 @@ PUBLIC cl_int
clEnqueueReadImage(cl_command_queue q, cl_mem obj, cl_bool blocking,
const size_t *origin, const size_t *region,
size_t row_pitch, size_t slice_pitch, void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *img = dynamic_cast<image *>(obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, img);
if (!ptr)
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_READ_IMAGE, { deps, deps + num_deps },
+ *q, CL_COMMAND_READ_IMAGE, deps,
soft_copy_op(q,
ptr, {},
{{ 1, row_pitch, slice_pitch }},
@@ -339,18 +352,19 @@ PUBLIC cl_int
clEnqueueWriteImage(cl_command_queue q, cl_mem obj, cl_bool blocking,
const size_t *origin, const size_t *region,
size_t row_pitch, size_t slice_pitch, const void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *img = dynamic_cast<image *>(obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, img);
if (!ptr)
throw error(CL_INVALID_VALUE);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_WRITE_IMAGE, { deps, deps + num_deps },
+ *q, CL_COMMAND_WRITE_IMAGE, deps,
soft_copy_op(q,
obj, vector(origin),
{{ 1, img->row_pitch(), img->slice_pitch() }},
@@ -369,17 +383,18 @@ PUBLIC cl_int
clEnqueueCopyImage(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
const size_t *src_origin, const size_t *dst_origin,
const size_t *region,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *src_img = dynamic_cast<image *>(src_obj);
image *dst_img = dynamic_cast<image *>(dst_obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_img);
validate_obj(q, dst_img);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_IMAGE, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_IMAGE, deps,
hard_copy_op(q,
dst_obj, vector(dst_origin),
src_obj, vector(src_origin),
@@ -396,16 +411,17 @@ PUBLIC cl_int
clEnqueueCopyImageToBuffer(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
const size_t *src_origin, const size_t *region,
size_t dst_offset,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *src_img = dynamic_cast<image *>(src_obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_img);
validate_obj(q, dst_obj);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_IMAGE_TO_BUFFER, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_IMAGE_TO_BUFFER, deps,
soft_copy_op(q,
dst_obj, {{ dst_offset }},
{{ 0, 0, 0 }},
@@ -424,16 +440,17 @@ PUBLIC cl_int
clEnqueueCopyBufferToImage(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
size_t src_offset,
const size_t *dst_origin, const size_t *region,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *dst_img = dynamic_cast<image *>(dst_obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, src_obj);
validate_obj(q, dst_img);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_COPY_BUFFER_TO_IMAGE, { deps, deps + num_deps },
+ *q, CL_COMMAND_COPY_BUFFER_TO_IMAGE, deps,
soft_copy_op(q,
dst_obj, vector(dst_origin),
{{ 1, dst_img->row_pitch(), dst_img->slice_pitch() }},
@@ -451,9 +468,10 @@ clEnqueueCopyBufferToImage(cl_command_queue q, cl_mem src_obj, cl_mem dst_obj,
PUBLIC void *
clEnqueueMapBuffer(cl_command_queue q, cl_mem obj, cl_bool blocking,
cl_map_flags flags, size_t offset, size_t size,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev, cl_int *errcode_ret) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
if (offset > obj->size() || offset + size > obj->size())
@@ -463,7 +481,7 @@ clEnqueueMapBuffer(cl_command_queue q, cl_mem obj, cl_bool blocking,
*q, flags, blocking, {{ offset }}, {{ size }});
ret_object(ev, new hard_event(*q, CL_COMMAND_MAP_BUFFER,
- { deps, deps + num_deps }));
+ deps));
ret_error(errcode_ret, CL_SUCCESS);
return map;
@@ -477,18 +495,19 @@ clEnqueueMapImage(cl_command_queue q, cl_mem obj, cl_bool blocking,
cl_map_flags flags,
const size_t *origin, const size_t *region,
size_t *row_pitch, size_t *slice_pitch,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev, cl_int *errcode_ret) try {
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
image *img = dynamic_cast<image *>(obj);
- validate_base(q, num_deps, deps);
+ validate_base(q, num_deps, d_deps);
validate_obj(q, img);
void *map = obj->resource(q).add_map(
*q, flags, blocking, vector(origin), vector(region));
ret_object(ev, new hard_event(*q, CL_COMMAND_MAP_IMAGE,
- { deps, deps + num_deps }));
+ deps));
ret_error(errcode_ret, CL_SUCCESS);
return map;
@@ -499,13 +518,15 @@ clEnqueueMapImage(cl_command_queue q, cl_mem obj, cl_bool blocking,
PUBLIC cl_int
clEnqueueUnmapMemObject(cl_command_queue q, cl_mem obj, void *ptr,
- cl_uint num_deps, const cl_event *deps,
+ cl_uint num_deps, const cl_event *d_deps,
cl_event *ev) try {
- validate_base(q, num_deps, deps);
+ auto deps = objs<wait_list_tag>(d_deps, num_deps);
+
+ validate_base(q, num_deps, d_deps);
validate_obj(q, obj);
hard_event *hev = new hard_event(
- *q, CL_COMMAND_UNMAP_MEM_OBJECT, { deps, deps + num_deps },
+ *q, CL_COMMAND_UNMAP_MEM_OBJECT, deps,
[=](event &) {
obj->resource(q).del_map(ptr);
});