[Bf-blender-cvs] [ea0f2bbab61] cycles-x: Cycles X: Experiment with progressively lowering noise in viewport

Sergey Sharybin noreply at git.blender.org
Wed May 12 16:10:46 CEST 2021


Commit: ea0f2bbab614b200d5040a1ca1895afcee3a71e6
Author: Sergey Sharybin
Date:   Thu Apr 22 18:06:24 2021 +0200
Branches: cycles-x
https://developer.blender.org/rBea0f2bbab614b200d5040a1ca1895afcee3a71e6

Cycles X: Experiment with progressively lowering noise in viewport

The idea is make it so in viewport all pixels are uniformly noisy,
and the noise gradually becomes lower and lower. This is like replacing
viewport samples with smarter adaptive sampling.

Currently, rendering starts with noise threshold of 0.4, and once
majority of pixels did converge the noise threshold gets halved, until
the configured threshold is reached.

The final result of viewport rendering should be almost the same as
prior to the patch, but because of extra box filtering on a more sparse
pixel sets there could be some differences. Especially visible with the
active pixels overlay in the case when pixels did not converge to the
final noise floor.

>From the benchmarks there seems to be no performance loss.

Differential Revision: https://developer.blender.org/D11088

===================================================================

M	intern/cycles/integrator/path_trace.cpp
M	intern/cycles/integrator/render_scheduler.cpp
M	intern/cycles/integrator/render_scheduler.h

===================================================================

diff --git a/intern/cycles/integrator/path_trace.cpp b/intern/cycles/integrator/path_trace.cpp
index e1a0b4c24e7..4b3c131c252 100644
--- a/intern/cycles/integrator/path_trace.cpp
+++ b/intern/cycles/integrator/path_trace.cpp
@@ -196,29 +196,56 @@ void PathTrace::adaptive_sample(RenderWork &render_work)
     return;
   }
 
-  const double start_time = time_dt();
+  bool did_reschedule_on_idle = false;
 
-  bool all_pixels_converged = true;
+  while (true) {
+    VLOG(3) << "Will filter adaptive stopping buffer, threshold "
+            << render_work.adaptive_sampling.threshold;
+    if (render_work.adaptive_sampling.reset) {
+      VLOG(3) << "Will re-calculate convergency flag for currently converged pixels.";
+    }
 
-  VLOG(3) << "Will filter adaptive stopping buffer, threshold "
-          << render_work.adaptive_sampling.threshold;
-  if (render_work.adaptive_sampling.reset) {
-    VLOG(3) << "Will re-calculate convergency flag for currently converged pixels.";
-  }
+    const double start_time = time_dt();
 
-  tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
-    if (path_trace_work->adaptive_sampling_converge_filter_count_active(
-            render_work.adaptive_sampling.threshold, render_work.adaptive_sampling.reset)) {
-      all_pixels_converged = false;
-    }
-  });
+    uint num_active_pixels = 0;
+    tbb::parallel_for_each(path_trace_works_, [&](unique_ptr<PathTraceWork> &path_trace_work) {
+      const uint num_active_pixels_in_work =
+          path_trace_work->adaptive_sampling_converge_filter_count_active(
+              render_work.adaptive_sampling.threshold, render_work.adaptive_sampling.reset);
+      if (num_active_pixels_in_work) {
+        atomic_add_and_fetch_u(&num_active_pixels, num_active_pixels_in_work);
+      }
+    });
 
-  render_scheduler_.report_adaptive_filter_time(
-      render_work, time_dt() - start_time, is_cancel_requested());
+    render_scheduler_.report_adaptive_filter_time(
+        render_work, time_dt() - start_time, is_cancel_requested());
 
-  if (all_pixels_converged) {
-    VLOG(3) << "All pixels converged.";
-    render_scheduler_.set_path_trace_finished(render_work);
+    if (num_active_pixels == 0) {
+      VLOG(3) << "All pixels converged.";
+      if (!render_scheduler_.render_work_reschedule_on_converge(render_work)) {
+        break;
+      }
+      VLOG(3) << "Continuing with lower threshold.";
+    }
+    else if (did_reschedule_on_idle) {
+      break;
+    }
+    else if (num_active_pixels < 128 * 128) {
+      /* NOTE: The hardcoded value of 128^2 is more of an empirical value to keep GPU busy so that
+       * there is no performance loss from the progressive noise floor feature.
+       *
+       * A better heuristic is possible here: for example, use maximum of 128^2 and percentage of
+       * the final resolution. */
+      if (!render_scheduler_.render_work_reschedule_on_idle(render_work)) {
+        VLOG(3) << "Rescheduling is not possible: final threshold is reached.";
+        break;
+      }
+      VLOG(3) << "Rescheduling lower threshold.";
+      did_reschedule_on_idle = true;
+    }
+    else {
+      break;
+    }
   }
 }
 
diff --git a/intern/cycles/integrator/render_scheduler.cpp b/intern/cycles/integrator/render_scheduler.cpp
index b6eee0ffb91..7d98bfe1298 100644
--- a/intern/cycles/integrator/render_scheduler.cpp
+++ b/intern/cycles/integrator/render_scheduler.cpp
@@ -32,6 +32,7 @@ RenderScheduler::RenderScheduler(bool headless, bool background, int pixel_size)
       pixel_size_(pixel_size),
       default_start_resolution_divider_(pixel_size * 8)
 {
+  use_progressive_noise_floor_ = !background_;
 }
 
 bool RenderScheduler::is_background() const
@@ -104,6 +105,10 @@ void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples)
   state_.last_display_update_time = 0.0;
   state_.last_display_update_sample = -1;
 
+  /* TODO(sergey): Choose better initial value. */
+  /* NOTE: The adaptive sampling settings might not be available here yet. */
+  state_.adaptive_sampling_threshold = 0.4f;
+
   state_.path_trace_finished = false;
 
   first_render_time_.path_trace_per_sample = 0.0;
@@ -116,10 +121,16 @@ void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples)
   display_update_time_.reset();
 }
 
-void RenderScheduler::set_path_trace_finished(RenderWork &render_work)
+bool RenderScheduler::render_work_reschedule_on_converge(RenderWork &render_work)
 {
+  /* Move to the next resolution divider. Assume adaptive filtering is not needed during
+   * navigation. */
   if (state_.resolution_divider != pixel_size_) {
-    return;
+    return false;
+  }
+
+  if (render_work_reschedule_on_idle(render_work)) {
+    return true;
   }
 
   state_.path_trace_finished = true;
@@ -128,6 +139,35 @@ void RenderScheduler::set_path_trace_finished(RenderWork &render_work)
   render_work.denoise = work_need_denoise(denoiser_delayed);
 
   render_work.update_display = work_need_update_display(denoiser_delayed);
+
+  return false;
+}
+
+bool RenderScheduler::render_work_reschedule_on_idle(RenderWork &render_work)
+{
+  if (!use_progressive_noise_floor_) {
+    return false;
+  }
+
+  /* Move to the next resolution divider. Assume adaptive filtering is not needed during
+   * navigation. */
+  if (state_.resolution_divider != pixel_size_) {
+    return false;
+  }
+
+  if (adaptive_sampling_.use) {
+    if (state_.adaptive_sampling_threshold > adaptive_sampling_.threshold) {
+      state_.adaptive_sampling_threshold = max(state_.adaptive_sampling_threshold / 2,
+                                               adaptive_sampling_.threshold);
+
+      render_work.adaptive_sampling.threshold = state_.adaptive_sampling_threshold;
+      render_work.adaptive_sampling.reset = true;
+
+      return true;
+    }
+  }
+
+  return false;
 }
 
 bool RenderScheduler::done() const
@@ -167,7 +207,7 @@ RenderWork RenderScheduler::get_render_work()
   state_.num_rendered_samples += render_work.path_trace.num_samples;
 
   render_work.adaptive_sampling.filter = work_need_adaptive_filter();
-  render_work.adaptive_sampling.threshold = adaptive_sampling_.threshold;
+  render_work.adaptive_sampling.threshold = work_adaptive_threshold();
   render_work.adaptive_sampling.reset = false;
 
   bool denoiser_delayed;
@@ -508,6 +548,15 @@ bool RenderScheduler::work_need_adaptive_filter() const
   return adaptive_sampling_.need_filter(get_rendered_sample());
 }
 
+float RenderScheduler::work_adaptive_threshold() const
+{
+  if (!use_progressive_noise_floor_) {
+    return adaptive_sampling_.threshold;
+  }
+
+  return max(state_.adaptive_sampling_threshold, adaptive_sampling_.threshold);
+}
+
 bool RenderScheduler::work_need_denoise(bool &delayed)
 {
   delayed = false;
@@ -582,6 +631,14 @@ bool RenderScheduler::work_need_update_display(const bool denoiser_delayed)
     return true;
   }
 
+  /* For the development purposes of adaptive sampling it might be very useful to see all updates
+   * of active pixels after convergence check. However, it would cause a slowdown for regular usage
+   * users. Possibly, make it a debug panel option to allow rapid update to ease development
+   * without need to re-compiled. */
+  // if (work_need_adaptive_filter()) {
+  //   return true;
+  // }
+
   /* When adaptive sampling is used, its possible that only handful of samples of a very simple
    * scene will be scheduled to a powerful device (in order to not "miss" any of filtering points).
    * We take care of skipping updates here based on when previous display update did happen. */
diff --git a/intern/cycles/integrator/render_scheduler.h b/intern/cycles/integrator/render_scheduler.h
index 066ce8c8510..50872c27ec2 100644
--- a/intern/cycles/integrator/render_scheduler.h
+++ b/intern/cycles/integrator/render_scheduler.h
@@ -98,9 +98,18 @@ class RenderScheduler {
    * Resets current rendered state, as well as scheduling information. */
   void reset(const BufferParams &buffer_params, int num_samples);
 
-  /* Indicate that path tracing has finished (due to adaptive sampling convergency). Only remaining
-   * tasks like denoising will be scheduled after this. */
-  void set_path_trace_finished(RenderWork &render_work);
+  /* Reschedule adaptive sampling work when all pixels did converge.
+   * If there is nothing else to be done for the adaptive sampling (pixels did converge to the
+   * final threshold) then false is returned and the render scheduler will stop scheduling path
+   * tracing works. Otherwise will modify the work's adaptive sampling settings to continue with
+   * a lower threshold. */
+  bool render_work_reschedule_on_converge(RenderWork &render_work);
+
+  /* Reschedule adaptive sampling work when the device is mostly on idle, but not all pixels yet
+   * converged.
+   * If re-scheduling is not possible (adaptive sampling is happening with the final threshold, and
+   * the path tracer is to finish the current pixels) then false is returned. */
+  bool render_work_reschedule_on_idle(RenderWork &render_work);
 
   /* Check whether all work has been scheduled. */
   bool done() const;
@@ -151,6 +160,9 @@ class RenderScheduler {
   /* Whether adaptive sampling convergence check and filter is to happen. */
   bool work_need_adaptive_filter() const;
 
+  /* Calculate thretshold for adaptive sampling. */
+  float work_adaptive_threshold() const;
+
   /* Check whether current work needs denoising.
    * Denoising is not needed if the denoiser is not configured, or when denosiing is happening too
    * often.
@@ -227,6 +239,10 @@ class RenderScheduler {
     /* Value of -1 means display was never updated. */
     int last_display_update_sample = -1;
 
+    /* Threshold for adaptive sampling which will be scheduled to work when not using progressive
+     * noise floor. */
+    float adaptive_sampling_threshold = 0.0f;
+
     bool path_trace_finished = false;
   } state_;
 
@@ -261,8 +277,13 @@ class RenderScheduler {
 
   BufferParams buffer_params_;
   DenoiseParams denoiser_params_;
+
   AdaptiveSampling adaptive_sampling_;
 
+  /* Progressively lower adaptive sampling threshold level, keeping the image at a uniform noise
+   * 

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list