[Bf-blender-cvs] [f210842a725] master: Sequencer: Improve Image Transform Quality When Exporting.

Jeroen Bakker noreply at git.blender.org
Thu Jan 26 14:28:44 CET 2023


Commit: f210842a725959d09e6dd87c9325c610aabbac00
Author: Jeroen Bakker
Date:   Thu Jan 26 13:38:59 2023 +0100
Branches: master
https://developer.blender.org/rBf210842a725959d09e6dd87c9325c610aabbac00

Sequencer: Improve Image Transform Quality When Exporting.

Image Transform use linear or nearest sampling during editing and exporting.
This gets sampling is fine for images that aren't scaled. When sequencing
however you mostly want use some sort of scaling, that leads to poorer
quality.

This change will use sub-sampling to improve the quality. This is only
enabled when rendering. During editing the subsampling is disabled to
keep the user interface reacting as expected.

Another improvement is that image transform is stopped at the moment
it hadn't sampled at least 4 samples for a scan line. In that case we
expect that there are no further samples that would change to result.

In a future patch this could be replaced by a ray/bounding bo intersection
as that would remove some unneeded loops in both the single sampled and
sub sampled approach.

===================================================================

M	source/blender/draw/engines/image/image_drawing_mode.hh
M	source/blender/imbuf/IMB_imbuf.h
M	source/blender/imbuf/intern/transform.cc
M	source/blender/sequencer/intern/render.c

===================================================================

diff --git a/source/blender/draw/engines/image/image_drawing_mode.hh b/source/blender/draw/engines/image/image_drawing_mode.hh
index 9345be800e8..d389ecd90af 100644
--- a/source/blender/draw/engines/image/image_drawing_mode.hh
+++ b/source/blender/draw/engines/image/image_drawing_mode.hh
@@ -516,6 +516,7 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
                   &texture_buffer,
                   transform_mode,
                   IMB_FILTER_NEAREST,
+                  1,
                   uv_to_texel.ptr(),
                   crop_rect_ptr);
   }
diff --git a/source/blender/imbuf/IMB_imbuf.h b/source/blender/imbuf/IMB_imbuf.h
index a5bb34392b1..79be739a205 100644
--- a/source/blender/imbuf/IMB_imbuf.h
+++ b/source/blender/imbuf/IMB_imbuf.h
@@ -846,6 +846,8 @@ typedef enum eIMBTransformMode {
  * - Only one data type buffer will be used (rect_float has priority over rect)
  * \param mode: Cropping/Wrap repeat effect to apply during transformation.
  * \param filter: Interpolation to use during sampling.
+ * \param num_subsamples: Number of subsamples to use. Increasing this would improve the quality,
+ * but reduces the performance.
  * \param transform_matrix: Transformation matrix to use.
  * The given matrix should transform between dst pixel space to src pixel space.
  * One unit is one pixel.
@@ -860,6 +862,7 @@ void IMB_transform(const struct ImBuf *src,
                    struct ImBuf *dst,
                    eIMBTransformMode mode,
                    eIMBInterpolationFilterMode filter,
+                   const int num_subsamples,
                    const float transform_matrix[4][4],
                    const struct rctf *src_crop);
 
diff --git a/source/blender/imbuf/intern/transform.cc b/source/blender/imbuf/intern/transform.cc
index 323460f4593..23aad5edf1d 100644
--- a/source/blender/imbuf/intern/transform.cc
+++ b/source/blender/imbuf/intern/transform.cc
@@ -9,6 +9,7 @@
 #include <type_traits>
 
 #include "BLI_math.h"
+#include "BLI_math_color_blend.h"
 #include "BLI_math_vector.hh"
 #include "BLI_rect.h"
 
@@ -37,6 +38,14 @@ struct TransformUserData {
    */
   double2 add_y;
 
+  struct {
+    int num;
+    double2 offset_x;
+    double2 offset_y;
+    double2 add_x;
+    double2 add_y;
+  } subsampling;
+
   /**
    * \brief Cropping region in source image pixel space.
    */
@@ -45,11 +54,12 @@ struct TransformUserData {
   /**
    * \brief Initialize the start_uv, add_x and add_y fields based on the given transform matrix.
    */
-  void init(const float transform_matrix[4][4])
+  void init(const float transform_matrix[4][4], const int num_subsamples)
   {
     init_start_uv(transform_matrix);
     init_add_x(transform_matrix);
     init_add_y(transform_matrix);
+    init_subsampling(num_subsamples);
   }
 
  private:
@@ -83,6 +93,15 @@ struct TransformUserData {
     mul_v3_m4v3_db(add_y_v3, transform_matrix_double, double3(0.0, height, 0.0));
     add_y = double2((add_y_v3 - double3(start_uv)) * (1.0 / height));
   }
+
+  void init_subsampling(const int num_subsamples)
+  {
+    subsampling.num = max_ii(num_subsamples, 1);
+    subsampling.add_x = add_x / (subsampling.num);
+    subsampling.add_y = add_y / (subsampling.num);
+    subsampling.offset_x = -add_x * 0.5 + subsampling.add_x * 0.5;
+    subsampling.offset_y = -add_y * 0.5 + subsampling.add_y * 0.5;
+  }
 };
 
 /**
@@ -257,6 +276,39 @@ class WrapRepeatUV : public BaseUVWrapping {
   }
 };
 
+// TODO: should we use math_vectors for this.
+template<typename StorageType, int NumChannels>
+class Pixel : public std::array<StorageType, NumChannels> {
+ public:
+  void clear()
+  {
+    for (int channel_index : IndexRange(NumChannels)) {
+      (*this)[channel_index] = 0;
+    }
+  }
+
+  void add_subsample(const Pixel<StorageType, NumChannels> other, int sample_number)
+  {
+    BLI_STATIC_ASSERT((std::is_same_v<StorageType, uchar>) || (std::is_same_v<StorageType, float>),
+                      "Only uchar and float channels supported.");
+
+    float factor = 1.0 / (sample_number + 1);
+    if constexpr (std::is_same_v<StorageType, uchar>) {
+      BLI_STATIC_ASSERT(NumChannels == 4, "Pixels using uchar requires to have 4 channels.");
+      blend_color_interpolate_byte(this->data(), this->data(), other.data(), factor);
+    }
+    else if constexpr (std::is_same_v<StorageType, float> && NumChannels == 4) {
+      blend_color_interpolate_float(this->data(), this->data(), other.data(), factor);
+    }
+    else if constexpr (std::is_same_v<StorageType, float>) {
+      for (int channel_index : IndexRange(NumChannels)) {
+        (*this)[channel_index] = (*this)[channel_index] * (1.0 - factor) +
+                                 other[channel_index] * factor;
+      }
+    }
+  }
+};
+
 /**
  * \brief Read a sample from an image buffer.
  *
@@ -286,7 +338,7 @@ class Sampler {
  public:
   using ChannelType = StorageType;
   static const int ChannelLen = NumChannels;
-  using SampleType = std::array<StorageType, NumChannels>;
+  using SampleType = Pixel<StorageType, NumChannels>;
 
   void sample(const ImBuf *source, const double2 uv, SampleType &r_sample)
   {
@@ -378,12 +430,12 @@ class Sampler {
 template<typename StorageType, int SourceNumChannels, int DestinationNumChannels>
 class ChannelConverter {
  public:
-  using SampleType = std::array<StorageType, SourceNumChannels>;
+  using SampleType = Pixel<StorageType, SourceNumChannels>;
   using PixelType = PixelPointer<StorageType, DestinationNumChannels>;
 
   /**
-   * \brief Convert the number of channels of the given sample to match the pixel pointer and store
-   * it at the location the pixel_pointer points at.
+   * \brief Convert the number of channels of the given sample to match the pixel pointer and
+   * store it at the location the pixel_pointer points at.
    */
   void convert_and_store(const SampleType &sample, PixelType &pixel_pointer)
   {
@@ -413,6 +465,19 @@ class ChannelConverter {
       BLI_assert_unreachable();
     }
   }
+
+  void mix_and_store(const SampleType &sample, PixelType &pixel_pointer, const float mix_factor)
+  {
+    if constexpr (std::is_same_v<StorageType, uchar>) {
+      BLI_STATIC_ASSERT(SourceNumChannels == 4, "Unsigned chars always have 4 channels.");
+      BLI_STATIC_ASSERT(DestinationNumChannels == 4, "Unsigned chars always have 4 channels.");
+      blend_color_interpolate_byte(
+          pixel_pointer.get_pointer(), pixel_pointer.get_pointer(), sample.data(), mix_factor);
+    }
+    else {
+      BLI_assert_unreachable();
+    }
+  }
 };
 
 /**
@@ -442,8 +507,8 @@ class ScanlineProcessor {
   Sampler sampler;
 
   /**
-   * \brief Channels sizzling logic to convert between the input image buffer and the output image
-   * buffer.
+   * \brief Channels sizzling logic to convert between the input image buffer and the output
+   * image buffer.
    */
   ChannelConverter<typename Sampler::ChannelType,
                    Sampler::ChannelLen,
@@ -455,18 +520,114 @@ class ScanlineProcessor {
    * \brief Inner loop of the transformations, processing a full scanline.
    */
   void process(const TransformUserData *user_data, int scanline)
+  {
+    // if (user_data->subsampling.num > 1) {
+    process_with_subsampling(user_data, scanline);
+    // }
+    // else {
+    //   process_one_sample_per_pixel(user_data, scanline);
+    // }
+  }
+
+ private:
+  void process_one_sample_per_pixel(const TransformUserData *user_data, int scanline)
   {
     const int width = user_data->dst->x;
     double2 uv = user_data->start_uv + user_data->add_y * scanline;
 
     output.init_pixel_pointer(user_data->dst, int2(0, scanline));
-    for (int xi = 0; xi < width; xi++) {
+    int xi = 0;
+    while (xi < width) {
+      const bool discard_pixel = discarder.should_discard(*user_data, uv);
+      if (!discard_pixel) {
+        break;
+      }
+      uv += user_data->add_x;
+      output.increase_pixel_pointer();
+      xi += 1;
+    }
+
+    /*
+     * Draw until we didn't draw for at least 4 pixels.
+     */
+    int num_output_pixels_skipped = 0;
+    const int num_missing_output_pixels_allowed = 4;
+    for (; xi < width && num_output_pixels_skipped < num_missing_output_pixels_allowed; xi++) {
       if (!discarder.should_discard(*user_data, uv)) {
         typename Sampler::SampleType sample;
         sampler.sample(user_data->src, uv, sample);
         channel_converter.convert_and_store(sample, output);
       }
+      else {
+        num_output_pixels_skipped += 1;
+      }
+
+      uv += user_data->add_x;
+      output.increase_pixel_pointer();
+    }
+  }
+
+  void process_with_subsampling(const TransformUserData *user_data, int scanline)
+  {
+    const int width = user_data->dst->x;
+    double2 uv = user_data->start_uv + user_data->add_y * scanline;
+
+    output.init_pixel_pointer(user_data->dst, int2(0, scanline));
+    int xi = 0;
+    /*
+     * Skip leading pixels that would be fully discarded.
+     *
+     * NOTE: This could be improved by intersection between an ray and the image bounds.
+     */
+    while (xi < width) {
+      const bool discard_pixel = discarder.should_discard(*user_data, uv) &&
+                                 discarder.should_discard(*user_data, uv + user_data->add_x) &&
+                                 discarder.should_discard(*user_data, uv + user_data->add_y) &&
+                                 discarder.should_discard(
+                                     *user_data, uv + user_data->add_x + user_data->add_y);
+      if (!discard_pixel) {
+        break;
+      }
+      uv += user_data->add_x;
+      output.increase_pixel_pointer();
+      xi += 1;
+    }
+
+    /*
+     * Draw until we didn't draw for at least 4 pixels.
+     */
+    int num_output_pixels_skipped = 0;
+    const int num_missing_output_pixels_allowed = 4;
+    for (; xi < width && num_output_pixels_skipped < num_missing_output_pixels_allowed; xi++) {
+      typename Sampler::SampleType sample;
+      sample.clear();
+      int num_subsamples_added = 0;
+
+      double2 subsample_uv_y = uv + user_data->subsampling.offset_y;
+      for (int subsample_yi : IndexRange(user_data->subsampling.num)) {
+        UNUSED_VARS(subsamp

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list