[Bf-blender-cvs] [613a75a3f1b] cycles-x: Cycles X: Initial implementation of shadow catcher denoising
Sergey Sharybin
noreply at git.blender.org
Tue Jun 15 12:04:12 CEST 2021
Commit: 613a75a3f1b4ed08938fb335e725bd7738969092
Author: Sergey Sharybin
Date: Fri Jun 11 19:01:54 2021 +0200
Branches: cycles-x
https://developer.blender.org/rB613a75a3f1b4ed08938fb335e725bd7738969092
Cycles X: Initial implementation of shadow catcher denoising
Currently only implemented for OIDN denoiser.
Almost straightforward. The tricky part is the albedo pass: using the
real albedo causes unexpected discontinuity of the shadow catcher based
on the albedo of the surface the shadow is cast on. Worked around by
passing a fake albedo values. The downside is that it requires a full
frame of constant values. Not sure if there is a way around it.
Differential Revision: https://developer.blender.org/D11585
===================================================================
M intern/cycles/blender/blender_sync.cpp
M intern/cycles/integrator/denoiser_oidn.cpp
M intern/cycles/integrator/pass_accessor.cpp
M intern/cycles/integrator/pass_accessor.h
M intern/cycles/integrator/path_trace_work.cpp
M intern/cycles/kernel/kernel_film.h
M intern/cycles/kernel/kernel_types.h
M intern/cycles/render/scene.cpp
===================================================================
diff --git a/intern/cycles/blender/blender_sync.cpp b/intern/cycles/blender/blender_sync.cpp
index 240f5c3b079..98d37878356 100644
--- a/intern/cycles/blender/blender_sync.cpp
+++ b/intern/cycles/blender/blender_sync.cpp
@@ -555,7 +555,8 @@ static BlenderPassInfo get_blender_pass_info(BL::RenderPass &b_pass)
MAP_PASS("Denoising Normal", PASS_DENOISING_NORMAL);
MAP_PASS("Denoising Albedo", PASS_DENOISING_ALBEDO);
- MAP_PASS("Shadow Catcher", PASS_SHADOW_CATCHER);
+ MAP_PASS("Shadow Catcher", PASS_SHADOW_CATCHER, PassMode::DENOISED);
+ MAP_PASS("Noisy Shadow Catcher", PASS_SHADOW_CATCHER);
MAP_PASS("Debug Render Time", PASS_RENDER_TIME);
@@ -575,6 +576,8 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
{
PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles");
+ bool add_denoised_passes = false;
+
vector<Pass> passes;
/* loop over passes */
@@ -601,6 +604,8 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
PointerRNA crl = RNA_pointer_get(&b_view_layer.ptr, "cycles");
if (get_boolean(crl, "denoising_store_passes")) {
+ add_denoised_passes = true;
+
b_engine.add_pass("Noisy Image", 4, "RGBA", b_view_layer.name().c_str());
Pass::add(passes, PASS_COMBINED, "Noisy Image");
@@ -611,6 +616,8 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
Pass::add(passes, PASS_DENOISING_ALBEDO, "Denoising Albedo");
}
else if (get_boolean(cscene, "use_denoising")) {
+ add_denoised_passes = true;
+
b_engine.add_pass("Noisy Image", 4, "RGBA", b_view_layer.name().c_str());
Pass::add(passes, PASS_COMBINED, "Noisy Image");
}
@@ -634,7 +641,12 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
if (get_boolean(crl, "use_pass_shadow_catcher")) {
b_engine.add_pass("Shadow Catcher", 4, "RGBA", b_view_layer.name().c_str());
- Pass::add(passes, PASS_SHADOW_CATCHER, "Shadow Catcher");
+ Pass::add_denoising_read(passes, PASS_SHADOW_CATCHER, "Shadow Catcher");
+
+ if (add_denoised_passes) {
+ b_engine.add_pass("Noisy Shadow Catcher", 4, "RGBA", b_view_layer.name().c_str());
+ Pass::add(passes, PASS_SHADOW_CATCHER, "Noisy Shadow Catcher");
+ }
}
/* Cryptomatte stores two ID/weight pairs per RGBA layer.
diff --git a/intern/cycles/integrator/denoiser_oidn.cpp b/intern/cycles/integrator/denoiser_oidn.cpp
index e94d570d9de..aca913c92c5 100644
--- a/intern/cycles/integrator/denoiser_oidn.cpp
+++ b/intern/cycles/integrator/denoiser_oidn.cpp
@@ -82,6 +82,9 @@ class OIDNPass {
{
offset = buffer_params.get_pass_offset(type, mode);
need_scale = (type == PASS_DENOISING_ALBEDO || type == PASS_DENOISING_NORMAL);
+
+ const PassInfo pass_info = Pass::get_info(type);
+ use_compositing = pass_info.use_compositing;
}
/* Name of an image which will be passed to the OIDN library.
@@ -104,6 +107,8 @@ class OIDNPass {
* outside of generic pass handling. */
bool need_scale = false;
+ bool use_compositing = false;
+
/* For the scaled passes, the data which holds values of scaled pixels. */
array<float> scaled_buffer;
};
@@ -122,33 +127,45 @@ class OIDNDeenoiseContext {
num_samples_(num_samples),
pass_sample_count_(buffer_params_.get_pass_offset(PASS_SAMPLE_COUNT))
{
+ if (denoise_params_.use_pass_albedo) {
+ oidn_albedo_pass_ = OIDNPass(buffer_params_, "albedo", PASS_DENOISING_ALBEDO);
+ /* NOTE: The albedo pass is always ensured to be set from the denoise() call, since it is
+ * possible that some passes will not use the real values. */
+ }
+
+ if (denoise_params_.use_pass_normal) {
+ oidn_normal_pass_ = OIDNPass(buffer_params_, "normal", PASS_DENOISING_NORMAL);
+ set_pass(oidn_normal_pass_);
+ }
}
- void denoise()
+ void denoise(const PassType pass_type)
{
- /* Add input images.
- *
- * NOTE: Store passes for the entire duration od denoising because OIDN denoiser might
- * reference pixels from the pass buffer. */
-
- OIDNPass oidn_color_pass(buffer_params_, "color", PASS_COMBINED);
- OIDNPass oidn_albedo_pass;
- OIDNPass oidn_normal_pass;
-
+ /* Add input color image. */
+ OIDNPass oidn_color_pass(buffer_params_, "color", pass_type);
+ if (oidn_color_pass.offset == PASS_UNUSED) {
+ return;
+ }
set_pass(oidn_color_pass);
if (denoise_params_.use_pass_albedo) {
- oidn_albedo_pass = OIDNPass(buffer_params_, "albedo", PASS_DENOISING_ALBEDO);
- set_pass(oidn_albedo_pass);
- }
-
- if (denoise_params_.use_pass_normal) {
- oidn_normal_pass = OIDNPass(buffer_params_, "normal", PASS_DENOISING_NORMAL);
- set_pass(oidn_normal_pass);
+ if (pass_type == PASS_SHADOW_CATCHER) {
+ /* Using albedo for the shadow catcher passes does not give desired results: there are
+ * an unexpected discontinuity in the shadow catcher pass based on the albedo of the object
+ * the shadow is cast on. */
+ set_fake_albedo_pass();
+ }
+ else {
+ set_pass(oidn_albedo_pass_);
+ }
}
/* Add output pass. */
- OIDNPass oidn_output_pass(buffer_params_, "output", PASS_COMBINED, PassMode::DENOISED);
+ OIDNPass oidn_output_pass(buffer_params_, "output", pass_type, PassMode::DENOISED);
+ if (oidn_color_pass.offset == PASS_UNUSED) {
+ LOG(DFATAL) << "Missing denoised pass " << pass_type_as_string(pass_type);
+ return;
+ }
set_pass_referenced(oidn_output_pass);
/* Execute filter. */
@@ -186,7 +203,7 @@ class OIDNDeenoiseContext {
stride * pass_stride * sizeof(float));
}
- void set_pass_scaled(OIDNPass &oidn_pass)
+ void read_pass_pixels(OIDNPass &oidn_pass)
{
const int64_t width = buffer_params_.width;
const int64_t height = buffer_params_.height;
@@ -196,6 +213,7 @@ class OIDNDeenoiseContext {
PassAccessor::PassAccessInfo pass_access_info;
pass_access_info.type = oidn_pass.type;
+ pass_access_info.mode = oidn_pass.mode;
pass_access_info.offset = oidn_pass.offset;
/* Denoiser operates on passes which are used to calculate the approximation, and is never used
@@ -211,13 +229,34 @@ class OIDNDeenoiseContext {
const PassAccessor::Destination destination(scaled_buffer.data(), 3);
pass_accessor.get_render_tile_pixels(render_buffers_, buffer_params_, destination);
+ }
- oidn_filter_->setImage(
- oidn_pass.name, scaled_buffer.data(), oidn::Format::Float3, width, height, 0, 0, 0);
+ void set_pass_scaled(OIDNPass &oidn_pass)
+ {
+ if (oidn_pass.scaled_buffer.empty()) {
+ read_pass_pixels(oidn_pass);
+ }
+
+ const int64_t width = buffer_params_.width;
+ const int64_t height = buffer_params_.height;
+
+ oidn_filter_->setImage(oidn_pass.name,
+ oidn_pass.scaled_buffer.data(),
+ oidn::Format::Float3,
+ width,
+ height,
+ 0,
+ 0,
+ 0);
}
void set_pass(OIDNPass &oidn_pass)
{
+ if (oidn_pass.use_compositing) {
+ set_pass_scaled(oidn_pass);
+ return;
+ }
+
if (!oidn_pass.need_scale || (num_samples_ == 1 && pass_sample_count_ == PASS_UNUSED)) {
set_pass_referenced(oidn_pass);
return;
@@ -226,6 +265,25 @@ class OIDNDeenoiseContext {
set_pass_scaled(oidn_pass);
}
+ void set_fake_albedo_pass()
+ {
+ const int64_t width = buffer_params_.width;
+ const int64_t height = buffer_params_.height;
+
+ /* TODO(sergey): Is there a way to avoid allocation of an entire frame of const values? */
+
+ if (fake_albedo_pixels_.empty()) {
+ const int64_t num_pixels = width * height * 3;
+ fake_albedo_pixels_.resize(num_pixels);
+ for (int i = 0; i < num_pixels; ++i) {
+ fake_albedo_pixels_[i] = 0.5f;
+ }
+ }
+
+ oidn_filter_->setImage(
+ "albedo", fake_albedo_pixels_.data(), oidn::Format::Float3, width, height, 0, 0, 0);
+ }
+
/* Scale output pass to match adaptive sampling per-pixel scale, as well as bring alpha channel
* back. */
void postprocess_output(const OIDNPass &oidn_input_pass, const OIDNPass &oidn_output_pass)
@@ -244,6 +302,9 @@ class OIDNDeenoiseContext {
float *buffer_data = render_buffers_->buffer.data();
+ const bool has_pass_sample_count = (pass_sample_count_ != PASS_UNUSED);
+ const bool need_scale = has_pass_sample_count || oidn_input_pass.use_compositing;
+
for (int y = 0; y < height; ++y) {
float *buffer_row = buffer_data + buffer_offset + y * row_stride;
for (int x = 0; x < width; ++x) {
@@ -251,8 +312,10 @@ class OIDNDeenoiseContext {
float *noisy_pixel = buffer_pixel + oidn_input_pass.offset;
float *denoised_pixel = buffer_pixel + oidn_output_pass.offset;
- if (pass_sample_count_ != PASS_UNUSED) {
- const float pixel_scale = __float_as_uint(buffer_pixel[pass_sample_count_]);
+ if (need_scale) {
+ float pixel_scale = has_pass_sample_count ?
+ __float_as_uint(buffer_pixel[pass_sample_count_]) :
+ num_samples_;
denoised_pixel[0] = denoised_pixel[0] * pixel_scale;
denoised_pixel[1] = denoised_pixel[1] * pixel_scale;
@@ -270,6 +333,12 @@ class OIDNDeenoiseContext {
oidn::FilterRef *oidn_filter_;
int num_samples_;
int pass_sample_count_;
+
+ /* Optional albedo and normal passes, reused by denoising of different pass types. */
+ OIDNPass oidn_albedo_pass_;
+ OIDNPass oidn_normal_pass_;
+
+ array<float> fake_albedo_pixels_;
};
#endif
@@ -288,7 +357,9 @@ void OIDNDenoiser::denoise_buffer(const BufferParams &buffer_params,
oidn::FilterRef *oidn_filter = &state_->oidn_filter;
OIDNDeenoiseContext context(params_, buffer_params, render_buffers, oidn_filter, num_samples);
- context.denoise();
+ context.denoise(PASS_COMBINED);
+ context.denoise(PASS_SHADOW_CATCHER);
+ context.denoise(PASS_SHADOW_CATCHER_MATTE);
#end
@@ Diff output truncated at 10240 characters. @@
More information about the Bf-blender-cvs
mailing list