From a0d99937a0b4e778cc0b2e93886c814dd8819f6d Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Tue, 18 Feb 2014 17:13:39 +0100 Subject: clover: Replace the transfer(new ...) idiom with a safer create(...) helper function. Tested-by: Tom Stellard --- src/gallium/state_trackers/clover/api/event.cpp | 22 ++++++++--------- src/gallium/state_trackers/clover/api/kernel.cpp | 4 ++-- src/gallium/state_trackers/clover/api/transfer.cpp | 28 +++++++++++----------- src/gallium/state_trackers/clover/api/util.hpp | 17 ++++++------- .../state_trackers/clover/core/platform.cpp | 2 +- src/gallium/state_trackers/clover/util/pointer.hpp | 26 ++++++-------------- 6 files changed, 43 insertions(+), 56 deletions(-) diff --git a/src/gallium/state_trackers/clover/api/event.cpp b/src/gallium/state_trackers/clover/api/event.cpp index 7057b77db1e..6b1956c8f2f 100644 --- a/src/gallium/state_trackers/clover/api/event.cpp +++ b/src/gallium/state_trackers/clover/api/event.cpp @@ -72,11 +72,10 @@ clWaitForEvents(cl_uint num_evs, const cl_event *d_evs) try { // Create a temporary soft event that depends on all the events in // the wait list - intrusive_ptr sev = - transfer(new soft_event(evs.front().context(), evs, true)); + auto sev = create(evs.front().context(), evs, true); // ...and wait on it. - sev->wait(); + sev().wait(); return CL_SUCCESS; @@ -132,12 +131,11 @@ clSetEventCallback(cl_event d_ev, cl_int type, // Create a temporary soft event that depends on ev, with // pfn_notify as completion action. - intrusive_ptr sev = transfer( - new soft_event(ev.context(), { ev }, true, - [=, &ev](event &) { - ev.wait(); - pfn_notify(desc(ev), ev.status(), user_data); - })); + create(ev.context(), ref_vector { ev }, true, + [=, &ev](event &) { + ev.wait(); + pfn_notify(desc(ev), ev.status(), user_data); + }); return CL_SUCCESS; @@ -206,7 +204,7 @@ clEnqueueWaitForEvents(cl_command_queue d_q, cl_uint num_evs, // Create a hard event that depends on the events in the wait list: // subsequent commands in the same queue will be implicitly // serialized with respect to it -- hard events always are. - intrusive_ptr hev = transfer(new hard_event(q, 0, evs)); + create(q, 0, evs); return CL_SUCCESS; @@ -262,10 +260,10 @@ clFinish(cl_command_queue d_q) try { // Create a temporary hard event -- it implicitly depends on all // the previously queued hard events. - intrusive_ptr hev = transfer(new hard_event(q, 0, { })); + auto hev = create(q, 0, ref_vector {}); // And wait on it. - hev->wait(); + hev().wait(); return CL_SUCCESS; diff --git a/src/gallium/state_trackers/clover/api/kernel.cpp b/src/gallium/state_trackers/clover/api/kernel.cpp index f1a9bd84d34..96cf302b161 100644 --- a/src/gallium/state_trackers/clover/api/kernel.cpp +++ b/src/gallium/state_trackers/clover/api/kernel.cpp @@ -276,7 +276,7 @@ clEnqueueNDRangeKernel(cl_command_queue d_q, cl_kernel d_kern, validate_common(q, kern, deps); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_NDRANGE_KERNEL, deps, [=, &kern, &q](event &) { kern.launch(q, grid_offset, grid_size, block_size); @@ -299,7 +299,7 @@ clEnqueueTask(cl_command_queue d_q, cl_kernel d_kern, validate_common(q, kern, deps); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_TASK, deps, [=, &kern, &q](event &) { kern.launch(q, { 0 }, { 1 }, { 1 }); diff --git a/src/gallium/state_trackers/clover/api/transfer.cpp b/src/gallium/state_trackers/clover/api/transfer.cpp index 02d0dd0a71d..72f0b3041a9 100644 --- a/src/gallium/state_trackers/clover/api/transfer.cpp +++ b/src/gallium/state_trackers/clover/api/transfer.cpp @@ -246,7 +246,7 @@ clEnqueueReadBuffer(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking, validate_object(q, ptr, {}, obj_pitch, region); validate_object(q, mem, obj_origin, obj_pitch, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_READ_BUFFER, deps, soft_copy_op(q, ptr, {}, obj_pitch, &mem, obj_origin, obj_pitch, @@ -275,7 +275,7 @@ clEnqueueWriteBuffer(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking, validate_object(q, mem, obj_origin, obj_pitch, region); validate_object(q, ptr, {}, obj_pitch, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_WRITE_BUFFER, deps, soft_copy_op(q, &mem, obj_origin, obj_pitch, ptr, {}, obj_pitch, @@ -311,7 +311,7 @@ clEnqueueReadBufferRect(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking, validate_object(q, ptr, host_origin, host_pitch, region); validate_object(q, mem, obj_origin, obj_pitch, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_READ_BUFFER_RECT, deps, soft_copy_op(q, ptr, host_origin, host_pitch, &mem, obj_origin, obj_pitch, @@ -347,7 +347,7 @@ clEnqueueWriteBufferRect(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking, validate_object(q, mem, obj_origin, obj_pitch, region); validate_object(q, ptr, host_origin, host_pitch, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_WRITE_BUFFER_RECT, deps, soft_copy_op(q, &mem, obj_origin, obj_pitch, ptr, host_origin, host_pitch, @@ -381,7 +381,7 @@ clEnqueueCopyBuffer(cl_command_queue d_q, cl_mem d_src_mem, cl_mem d_dst_mem, validate_copy(q, dst_mem, dst_origin, dst_pitch, src_mem, src_origin, src_pitch, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_COPY_BUFFER, deps, hard_copy_op(q, &dst_mem, dst_origin, &src_mem, src_origin, region)); @@ -418,7 +418,7 @@ clEnqueueCopyBufferRect(cl_command_queue d_q, cl_mem d_src_mem, validate_copy(q, dst_mem, dst_origin, dst_pitch, src_mem, src_origin, src_pitch, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_COPY_BUFFER_RECT, deps, soft_copy_op(q, &dst_mem, dst_origin, dst_pitch, &src_mem, src_origin, src_pitch, @@ -451,7 +451,7 @@ clEnqueueReadImage(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking, validate_object(q, ptr, {}, dst_pitch, region); validate_object(q, img, src_origin, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_READ_IMAGE, deps, soft_copy_op(q, ptr, {}, dst_pitch, &img, src_origin, src_pitch, @@ -484,7 +484,7 @@ clEnqueueWriteImage(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking, validate_object(q, img, dst_origin, region); validate_object(q, ptr, {}, src_pitch, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_WRITE_IMAGE, deps, soft_copy_op(q, &img, dst_origin, dst_pitch, ptr, {}, src_pitch, @@ -516,7 +516,7 @@ clEnqueueCopyImage(cl_command_queue d_q, cl_mem d_src_mem, cl_mem d_dst_mem, validate_object(q, src_img, src_origin, region); validate_copy(q, dst_img, dst_origin, src_img, src_origin, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_COPY_IMAGE, deps, hard_copy_op(q, &dst_img, dst_origin, &src_img, src_origin, @@ -552,7 +552,7 @@ clEnqueueCopyImageToBuffer(cl_command_queue d_q, validate_object(q, dst_mem, dst_origin, dst_pitch, region); validate_object(q, src_img, src_origin, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_COPY_IMAGE_TO_BUFFER, deps, soft_copy_op(q, &dst_mem, dst_origin, dst_pitch, &src_img, src_origin, src_pitch, @@ -588,7 +588,7 @@ clEnqueueCopyBufferToImage(cl_command_queue d_q, validate_object(q, dst_img, dst_origin, region); validate_object(q, src_mem, src_origin, src_pitch, region); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_COPY_BUFFER_TO_IMAGE, deps, soft_copy_op(q, &dst_img, dst_origin, dst_pitch, &src_mem, src_origin, src_pitch, @@ -618,7 +618,7 @@ clEnqueueMapBuffer(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking, void *map = mem.resource(q).add_map(q, flags, blocking, obj_origin, region); - ret_object(rd_ev, new hard_event(q, CL_COMMAND_MAP_BUFFER, deps)); + ret_object(rd_ev, create(q, CL_COMMAND_MAP_BUFFER, deps)); ret_error(r_errcode, CL_SUCCESS); return map; @@ -645,7 +645,7 @@ clEnqueueMapImage(cl_command_queue d_q, cl_mem d_mem, cl_bool blocking, void *map = img.resource(q).add_map(q, flags, blocking, origin, region); - ret_object(rd_ev, new hard_event(q, CL_COMMAND_MAP_IMAGE, deps)); + ret_object(rd_ev, create(q, CL_COMMAND_MAP_IMAGE, deps)); ret_error(r_errcode, CL_SUCCESS); return map; @@ -664,7 +664,7 @@ clEnqueueUnmapMemObject(cl_command_queue d_q, cl_mem d_mem, void *ptr, validate_common(q, deps); - hard_event *hev = new hard_event( + auto hev = create( q, CL_COMMAND_UNMAP_MEM_OBJECT, deps, [=, &q, &mem](event &) { mem.resource(q).del_map(ptr); diff --git a/src/gallium/state_trackers/clover/api/util.hpp b/src/gallium/state_trackers/clover/api/util.hpp index 60c8709cc05..c2b216ec236 100644 --- a/src/gallium/state_trackers/clover/api/util.hpp +++ b/src/gallium/state_trackers/clover/api/util.hpp @@ -48,16 +48,17 @@ namespace clover { } /// - /// Return a reference-counted object in \a p if non-zero. - /// Otherwise release object ownership. + /// Return a clover object in \a p if non-zero incrementing the + /// reference count of the object. /// - template + template void - ret_object(T p, S v) { - if (p) - *p = v; - else - v->release(); + ret_object(typename T::descriptor_type **p, + const intrusive_ref &v) { + if (p) { + v().retain(); + *p = desc(v()); + } } } diff --git a/src/gallium/state_trackers/clover/core/platform.cpp b/src/gallium/state_trackers/clover/core/platform.cpp index 0a12a61b398..328b71cdcb1 100644 --- a/src/gallium/state_trackers/clover/core/platform.cpp +++ b/src/gallium/state_trackers/clover/core/platform.cpp @@ -32,7 +32,7 @@ platform::platform() : adaptor_range(evals(), devs) { for (pipe_loader_device *ldev : ldevs) { try { - devs.push_back(transfer(*new device(*this, ldev))); + devs.push_back(create(*this, ldev)); } catch (error &) { pipe_loader_release(&ldev, 1); } diff --git a/src/gallium/state_trackers/clover/util/pointer.hpp b/src/gallium/state_trackers/clover/util/pointer.hpp index 98823aa7860..59c6e6e9f64 100644 --- a/src/gallium/state_trackers/clover/util/pointer.hpp +++ b/src/gallium/state_trackers/clover/util/pointer.hpp @@ -121,18 +121,6 @@ namespace clover { T *p; }; - /// - /// Transfer the caller's ownership of a reference-counted object - /// to a clover::intrusive_ptr smart pointer. - /// - template - inline intrusive_ptr - transfer(T *p) { - intrusive_ptr ref { p }; - p->release(); - return ref; - } - /// /// Intrusive smart reference for objects that implement the /// clover::ref_counter interface. @@ -188,14 +176,14 @@ namespace clover { }; /// - /// Transfer the caller's ownership of a reference-counted object - /// to a clover::intrusive_ref smart reference. + /// Initialize a clover::intrusive_ref from a newly created object + /// using the specified constructor arguments. /// - template - inline intrusive_ref - transfer(T &o) { - intrusive_ref ref { o }; - o.release(); + template + intrusive_ref + create(As &&... as) { + intrusive_ref ref { *new T(std::forward(as)...) }; + ref().release(); return ref; } -- cgit v1.2.3