[Bf-blender-cvs] [56e4d350fbd] cycles-x: Cycles X: Remove redundant field in path trace works
Sergey Sharybin
noreply at git.blender.org
Tue Jun 29 12:20:22 CEST 2021
Commit: 56e4d350fbda6a5d6c76417b92e500ef13348c95
Author: Sergey Sharybin
Date: Mon Jun 21 17:19:38 2021 +0200
Branches: cycles-x
https://developer.blender.org/rB56e4d350fbda6a5d6c76417b92e500ef13348c95
Cycles X: Remove redundant field in path trace works
Render buffers are available via the base class, no need to duplicate
it in the derived classes.
===================================================================
M intern/cycles/integrator/path_trace_work_cpu.cpp
M intern/cycles/integrator/path_trace_work_cpu.h
M intern/cycles/integrator/path_trace_work_gpu.cpp
M intern/cycles/integrator/path_trace_work_gpu.h
===================================================================
diff --git a/intern/cycles/integrator/path_trace_work_cpu.cpp b/intern/cycles/integrator/path_trace_work_cpu.cpp
index a7f7bb49f29..6da81903240 100644
--- a/intern/cycles/integrator/path_trace_work_cpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_cpu.cpp
@@ -56,8 +56,7 @@ PathTraceWorkCPU::PathTraceWorkCPU(Device *device,
RenderBuffers *buffers,
bool *cancel_requested_flag)
: PathTraceWork(device, device_scene, buffers, cancel_requested_flag),
- kernels_(*(device->get_cpu_kernels())),
- render_buffers_(buffers)
+ kernels_(*(device->get_cpu_kernels()))
{
DCHECK_EQ(device->info.type, DEVICE_CPU);
}
@@ -114,7 +113,7 @@ void PathTraceWorkCPU::render_samples_full_pipeline(KernelGlobals *kernel_global
IntegratorState *shadow_catcher_state = &integrator_states[1];
KernelWorkTile sample_work_tile = work_tile;
- float *render_buffer = render_buffers_->buffer.data();
+ float *render_buffer = buffers_->buffer.data();
for (int sample = 0; sample < samples_num; ++sample) {
if (is_cancel_requested()) {
@@ -164,7 +163,7 @@ void PathTraceWorkCPU::copy_to_gpu_display(GPUDisplay *gpu_display,
tbb::task_arena local_arena = local_tbb_arena_create(device_);
local_arena.execute([&]() {
- pass_accessor.get_render_tile_pixels(render_buffers_, effective_buffer_params_, destination);
+ pass_accessor.get_render_tile_pixels(buffers_, effective_buffer_params_, destination);
});
gpu_display->unmap_texture_buffer();
@@ -179,7 +178,7 @@ int PathTraceWorkCPU::adaptive_sampling_converge_filter_count_active(float thres
const int offset = effective_buffer_params_.offset;
const int stride = effective_buffer_params_.stride;
- float *render_buffer = render_buffers_->buffer.data();
+ float *render_buffer = buffers_->buffer.data();
uint num_active_pixels = 0;
diff --git a/intern/cycles/integrator/path_trace_work_cpu.h b/intern/cycles/integrator/path_trace_work_cpu.h
index 9f42212e9fd..016bed0d25f 100644
--- a/intern/cycles/integrator/path_trace_work_cpu.h
+++ b/intern/cycles/integrator/path_trace_work_cpu.h
@@ -69,9 +69,6 @@ class PathTraceWorkCPU : public PathTraceWork {
* accessing it, but some "localization" is required to decouple from kernel globals stored
* on the device level. */
vector<CPUKernelThreadGlobals> kernel_thread_globals_;
-
- /* Render output buffers. */
- RenderBuffers *render_buffers_;
};
CCL_NAMESPACE_END
diff --git a/intern/cycles/integrator/path_trace_work_gpu.cpp b/intern/cycles/integrator/path_trace_work_gpu.cpp
index 3b8372d4da2..f543809e5f4 100644
--- a/intern/cycles/integrator/path_trace_work_gpu.cpp
+++ b/intern/cycles/integrator/path_trace_work_gpu.cpp
@@ -36,7 +36,6 @@ PathTraceWorkGPU::PathTraceWorkGPU(Device *device,
bool *cancel_requested_flag)
: PathTraceWork(device, device_scene, buffers, cancel_requested_flag),
queue_(device->gpu_queue_create()),
- render_buffers_(buffers),
integrator_queue_counter_(device, "integrator_queue_counter", MEM_READ_WRITE),
integrator_shader_sort_counter_(device, "integrator_shader_sort_counter", MEM_READ_WRITE),
integrator_shader_raytrace_sort_counter_(
@@ -332,7 +331,7 @@ void PathTraceWorkGPU::enqueue_path_iteration(DeviceKernel kernel)
case DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE:
case DEVICE_KERNEL_INTEGRATOR_SHADE_VOLUME: {
/* Shading kernels with integrator state and render buffer. */
- void *d_render_buffer = (void *)render_buffers_->buffer.device_pointer;
+ void *d_render_buffer = (void *)buffers_->buffer.device_pointer;
void *args[] = {&d_path_index, &d_render_buffer, const_cast<int *>(&work_size)};
queue_->enqueue(kernel, work_size, args);
@@ -562,7 +561,7 @@ void PathTraceWorkGPU::enqueue_work_tiles(DeviceKernel kernel,
void *d_work_tiles = (void *)work_tiles_.device_pointer;
void *d_path_index = (void *)nullptr;
- void *d_render_buffer = (void *)render_buffers_->buffer.device_pointer;
+ void *d_render_buffer = (void *)buffers_->buffer.device_pointer;
if (max_active_path_index_ != 0) {
queue_->zero_to_device(num_queued_paths_);
@@ -668,8 +667,8 @@ void PathTraceWorkGPU::copy_to_gpu_display_naive(GPUDisplay *gpu_display,
PassMode pass_mode,
int num_samples)
{
- const int final_width = render_buffers_->params.width;
- const int final_height = render_buffers_->params.height;
+ const int final_width = buffers_->params.width;
+ const int final_height = buffers_->params.height;
/* Re-allocate display memory if needed, and make sure the device pointer is allocated.
*
@@ -727,7 +726,7 @@ void PathTraceWorkGPU::run_film_convert(device_ptr d_rgba_half,
PassAccessor::Destination destination(pass_access_info.type);
destination.d_pixels_half_rgba = d_rgba_half;
- pass_accessor.get_render_tile_pixels(render_buffers_, effective_buffer_params_, destination);
+ pass_accessor.get_render_tile_pixels(buffers_, effective_buffer_params_, destination);
}
int PathTraceWorkGPU::adaptive_sampling_converge_filter_count_active(float threshold, bool reset)
@@ -752,7 +751,7 @@ int PathTraceWorkGPU::adaptive_sampling_convergence_check_count_active(float thr
const int work_size = effective_buffer_params_.width * effective_buffer_params_.height;
- void *args[] = {&render_buffers_->buffer.device_pointer,
+ void *args[] = {&buffers_->buffer.device_pointer,
const_cast<int *>(&effective_buffer_params_.full_x),
const_cast<int *>(&effective_buffer_params_.full_y),
const_cast<int *>(&effective_buffer_params_.width),
@@ -775,7 +774,7 @@ void PathTraceWorkGPU::enqueue_adaptive_sampling_filter_x()
{
const int work_size = effective_buffer_params_.height;
- void *args[] = {&render_buffers_->buffer.device_pointer,
+ void *args[] = {&buffers_->buffer.device_pointer,
&effective_buffer_params_.full_x,
&effective_buffer_params_.full_y,
&effective_buffer_params_.width,
@@ -790,7 +789,7 @@ void PathTraceWorkGPU::enqueue_adaptive_sampling_filter_y()
{
const int work_size = effective_buffer_params_.width;
- void *args[] = {&render_buffers_->buffer.device_pointer,
+ void *args[] = {&buffers_->buffer.device_pointer,
&effective_buffer_params_.full_x,
&effective_buffer_params_.full_y,
&effective_buffer_params_.width,
diff --git a/intern/cycles/integrator/path_trace_work_gpu.h b/intern/cycles/integrator/path_trace_work_gpu.h
index 0a1bf6e1dc5..7309d948362 100644
--- a/intern/cycles/integrator/path_trace_work_gpu.h
+++ b/intern/cycles/integrator/path_trace_work_gpu.h
@@ -109,9 +109,6 @@ class PathTraceWorkGPU : public PathTraceWork {
/* Scheduler which gives work to path tracing threads. */
WorkTileScheduler work_tile_scheduler_;
- /* Output render buffer. */
- RenderBuffers *render_buffers_;
-
/* Integrate state for paths. */
IntegratorStateGPU integrator_state_gpu_;
/* SoA arrays for integrator state. */
More information about the Bf-blender-cvs
mailing list