[Bf-blender-cvs] [384a02a214c] master: BLI: add missing materialize methods for virtual arrays

Jacques Lucke noreply at git.blender.org
Thu Apr 7 10:03:03 CEST 2022


Commit: 384a02a214cad88f3180deee36b22529c213ddaf
Author: Jacques Lucke
Date:   Thu Apr 7 10:02:34 2022 +0200
Branches: master
https://developer.blender.org/rB384a02a214cad88f3180deee36b22529c213ddaf

BLI: add missing materialize methods for virtual arrays

This does two things:
* Introduce new `materialize_compressed` methods. Those are used
  when the dst array should not have any gaps.
* Add materialize methods in various classes where they were missing
  (and therefore caused overhead, because slower fallbacks had to be used).

===================================================================

M	source/blender/blenlib/BLI_generic_virtual_array.hh
M	source/blender/blenlib/BLI_virtual_array.hh
M	source/blender/blenlib/intern/generic_virtual_array.cc
M	source/blender/blenlib/tests/BLI_virtual_array_test.cc

===================================================================

diff --git a/source/blender/blenlib/BLI_generic_virtual_array.hh b/source/blender/blenlib/BLI_generic_virtual_array.hh
index f4c9e745cf9..4aed1caf796 100644
--- a/source/blender/blenlib/BLI_generic_virtual_array.hh
+++ b/source/blender/blenlib/BLI_generic_virtual_array.hh
@@ -51,6 +51,9 @@ class GVArrayImpl {
   virtual void materialize(const IndexMask mask, void *dst) const;
   virtual void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
 
+  virtual void materialize_compressed(IndexMask mask, void *dst) const;
+  virtual void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
+
   virtual bool try_assign_VArray(void *varray) const;
   virtual bool may_have_ownership() const;
 };
@@ -133,6 +136,9 @@ class GVArrayCommon {
   void materialize_to_uninitialized(void *dst) const;
   void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
 
+  void materialize_compressed(IndexMask mask, void *dst) const;
+  void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
+
   /**
    * Returns true when the virtual array is stored as a span internally.
    */
@@ -336,6 +342,16 @@ template<typename T> class GVArrayImpl_For_VArray : public GVArrayImpl {
     varray_.materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
   }
 
+  void materialize_compressed(const IndexMask mask, void *dst) const override
+  {
+    varray_.materialize_compressed(mask, MutableSpan((T *)dst, mask.size()));
+  }
+
+  void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
+  {
+    varray_.materialize_compressed_to_uninitialized(mask, MutableSpan((T *)dst, mask.size()));
+  }
+
   bool try_assign_VArray(void *varray) const override
   {
     *(VArray<T> *)varray = varray_;
@@ -400,6 +416,27 @@ template<typename T> class VArrayImpl_For_GVArray : public VArrayImpl<T> {
   {
     return varray_.may_have_ownership();
   }
+
+  void materialize(IndexMask mask, MutableSpan<T> r_span) const override
+  {
+    varray_.materialize(mask, r_span.data());
+  }
+
+  void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
+  {
+    varray_.materialize_to_uninitialized(mask, r_span.data());
+  }
+
+  void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
+  {
+    varray_.materialize_compressed(mask, r_span.data());
+  }
+
+  void materialize_compressed_to_uninitialized(IndexMask mask,
+                                               MutableSpan<T> r_span) const override
+  {
+    varray_.materialize_compressed_to_uninitialized(mask, r_span.data());
+  }
 };
 
 /* Used to convert any typed virtual mutable array into a generic one. */
@@ -479,6 +516,16 @@ template<typename T> class GVMutableArrayImpl_For_VMutableArray : public GVMutab
     varray_.materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
   }
 
+  void materialize_compressed(const IndexMask mask, void *dst) const override
+  {
+    varray_.materialize_compressed(mask, MutableSpan((T *)dst, mask.size()));
+  }
+
+  void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
+  {
+    varray_.materialize_compressed_to_uninitialized(mask, MutableSpan((T *)dst, mask.size()));
+  }
+
   bool try_assign_VArray(void *varray) const override
   {
     *(VArray<T> *)varray = varray_;
@@ -561,6 +608,27 @@ template<typename T> class VMutableArrayImpl_For_GVMutableArray : public VMutabl
   {
     return varray_.may_have_ownership();
   }
+
+  void materialize(IndexMask mask, MutableSpan<T> r_span) const override
+  {
+    varray_.materialize(mask, r_span.data());
+  }
+
+  void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
+  {
+    varray_.materialize_to_uninitialized(mask, r_span.data());
+  }
+
+  void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
+  {
+    varray_.materialize_compressed(mask, r_span.data());
+  }
+
+  void materialize_compressed_to_uninitialized(IndexMask mask,
+                                               MutableSpan<T> r_span) const override
+  {
+    varray_.materialize_compressed_to_uninitialized(mask, r_span.data());
+  }
 };
 
 /** \} */
@@ -590,6 +658,13 @@ class GVArrayImpl_For_GSpan : public GVMutableArrayImpl {
 
   bool is_span() const override;
   GSpan get_internal_span() const override;
+
+  virtual void materialize(const IndexMask mask, void *dst) const override;
+  virtual void materialize_to_uninitialized(const IndexMask mask, void *dst) const override;
+
+  virtual void materialize_compressed(const IndexMask mask, void *dst) const override;
+  virtual void materialize_compressed_to_uninitialized(const IndexMask mask,
+                                                       void *dst) const override;
 };
 
 /** \} */
diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh
index 3aa25bf6819..206e0191a54 100644
--- a/source/blender/blenlib/BLI_virtual_array.hh
+++ b/source/blender/blenlib/BLI_virtual_array.hh
@@ -107,7 +107,7 @@ template<typename T> class VArrayImpl {
 
   /**
    * Copy values from the virtual array into the provided span. The index of the value in the
-   * virtual is the same as the index in the span.
+   * virtual array is the same as the index in the span.
    */
   virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const
   {
@@ -146,6 +146,35 @@ template<typename T> class VArrayImpl {
     }
   }
 
+  /**
+   * Copy values from the virtual array into the provided span. Contrary to #materialize, the index
+   * in virtual array is not the same as the index in the output span. Instead, the span is filled
+   * without gaps.
+   */
+  virtual void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
+  {
+    BLI_assert(mask.size() == r_span.size());
+    mask.to_best_mask_type([&](auto best_mask) {
+      for (const int64_t i : IndexRange(best_mask.size())) {
+        r_span[i] = this->get(best_mask[i]);
+      }
+    });
+  }
+
+  /**
+   * Same as #materialize_compressed but #r_span is expected to be uninitialized.
+   */
+  virtual void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
+  {
+    BLI_assert(mask.size() == r_span.size());
+    T *dst = r_span.data();
+    mask.to_best_mask_type([&](auto best_mask) {
+      for (const int64_t i : IndexRange(best_mask.size())) {
+        new (dst + i) T(this->get(best_mask[i]));
+      }
+    });
+  }
+
   /**
    * If this virtual wraps another #GVArray, this method should assign the wrapped array to the
    * provided reference. This allows losslessly converting between generic and typed virtual
@@ -265,6 +294,25 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
     const Span<T> other_span = other.get_internal_span();
     return data_ == other_span.data();
   }
+
+  void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
+  {
+    mask.to_best_mask_type([&](auto best_mask) {
+      for (const int64_t i : IndexRange(best_mask.size())) {
+        r_span[i] = data_[best_mask[i]];
+      }
+    });
+  }
+
+  void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
+  {
+    T *dst = r_span.data();
+    mask.to_best_mask_type([&](auto best_mask) {
+      for (const int64_t i : IndexRange(best_mask.size())) {
+        new (dst + i) T(data_[best_mask[i]]);
+      }
+    });
+  }
 };
 
 /**
@@ -341,6 +389,20 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
   {
     return value_;
   }
+
+  void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
+  {
+    BLI_assert(mask.size() == r_span.size());
+    UNUSED_VARS_NDEBUG(mask);
+    r_span.fill(value_);
+  }
+
+  void materialize_compressed_to_uninitialized(IndexMask mask,
+                                               MutableSpan<T> r_span) const override
+  {
+    BLI_assert(mask.size() == r_span.size());
+    uninitialized_fill_n(r_span.data(), mask.size(), value_);
+  }
 };
 
 /**
@@ -374,6 +436,29 @@ template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public
     T *dst = r_span.data();
     mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
   }
+
+  void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
+  {
+    BLI_assert(mask.size() == r_span.size());
+    T *dst = r_span.data();
+    mask.to_best_mask_type([&](auto best_mask) {
+      for (const int64_t i : IndexRange(best_mask.size())) {
+        dst[i] = get_func_(best_mask[i]);
+      }
+    });
+  }
+
+  void materialize_compressed_to_uninitialized(IndexMask mask,
+                                               MutableSpan<T> r_span) const override
+  {
+    BLI_assert(mask.size() == r_span.size());
+    T *dst = r_span.data();
+    mask.to_best_mask_type([&](auto best_mask) {
+      for (const int64_t i : IndexRange(best_mask.size())) {
+        new (dst + i) T(get_func_(best_mask[i]));
+      }
+    });
+  }
 };
 
 /**
@@ -422,6 +507,29 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
     mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
   }
 
+  void materialize_compressed(IndexMask mask, MutableSpan<ElemT> r_span) const override
+  {
+    BLI_assert(mask.size() == r_span.size());
+    ElemT *dst = r_span.data();
+    mask.to_best_mask_type([&](auto best_mask) {
+      for (const int64_t i : IndexRange(best_mask.size())) {
+        dst[i] = GetFunc(data_[best_mask[i]]);
+      }
+    });
+  }
+
+  void materialize_compressed_to_uninitialized(IndexMask mask,
+                                               MutableSpan<ElemT> r_span) const override
+  {
+    BLI_assert(mask.size() == r_span.size());
+    ElemT *dst = r_span.data();
+    mask.to_best_mask_type([&](auto best_mask) {
+      for (const int64_t i : IndexRange(best_mask.size())) {
+        new (dst + i) ElemT(GetFunc(data_[best_mask[i]]));
+      }
+    });
+  }
+
   bool may_have_ownership() const override
   {
     return false;
@@ -740,6 +848,17 @@ 

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list