[Bf-blender-cvs] [25266caa454] master: Cleanup: spelling

Campbell Barton noreply at git.blender.org
Fri Nov 20 02:16:25 CET 2020


Commit: 25266caa454324b3394c09920913fb419b5abf2b
Author: Campbell Barton
Date:   Fri Nov 20 11:39:03 2020 +1100
Branches: master
https://developer.blender.org/rB25266caa454324b3394c09920913fb419b5abf2b

Cleanup: spelling

===================================================================

M	intern/cycles/kernel/kernel_light.h
M	intern/cycles/kernel/kernel_light_background.h
M	intern/cycles/render/object.cpp
M	intern/guardedalloc/MEM_guardedalloc.h
M	intern/guardedalloc/intern/mallocn.c
M	source/blender/blenkernel/intern/mesh_mapping.c
M	source/blender/blenlib/intern/math_geom.c
M	source/blender/modifiers/intern/MOD_cast.c
M	source/blender/nodes/composite/nodes/node_composite_colorSpill.c
M	source/blender/sequencer/intern/strip_edit.c
M	source/blender/sequencer/intern/strip_transform.c

===================================================================

diff --git a/intern/cycles/kernel/kernel_light.h b/intern/cycles/kernel/kernel_light.h
index 138b90373a6..9650b85a5c2 100644
--- a/intern/cycles/kernel/kernel_light.h
+++ b/intern/cycles/kernel/kernel_light.h
@@ -582,7 +582,7 @@ ccl_device_forceinline void triangle_light_sample(KernelGlobals *kg,
 
 ccl_device int light_distribution_sample(KernelGlobals *kg, float *randu)
 {
-  /* This is basically std::upper_bound as used by pbrt, to find a point light or
+  /* This is basically std::upper_bound as used by PBRT, to find a point light or
    * triangle to emit from, proportional to area. a good improvement would be to
    * also sample proportional to power, though it's not so well defined with
    * arbitrary shaders. */
diff --git a/intern/cycles/kernel/kernel_light_background.h b/intern/cycles/kernel/kernel_light_background.h
index 5fa25069fcd..2a685c0adfa 100644
--- a/intern/cycles/kernel/kernel_light_background.h
+++ b/intern/cycles/kernel/kernel_light_background.h
@@ -31,7 +31,7 @@ ccl_device float3 background_map_sample(KernelGlobals *kg, float randu, float ra
   int res_y = kernel_data.background.map_res_y;
   int cdf_width = res_x + 1;
 
-  /* this is basically std::lower_bound as used by pbrt */
+  /* This is basically std::lower_bound as used by PBRT. */
   int first = 0;
   int count = res_y;
 
@@ -58,7 +58,7 @@ ccl_device float3 background_map_sample(KernelGlobals *kg, float randu, float ra
   float dv = inverse_lerp(cdf_v.y, cdf_next_v.y, randv);
   float v = (index_v + dv) / res_y;
 
-  /* this is basically std::lower_bound as used by pbrt */
+  /* This is basically std::lower_bound as used by PBRT. */
   first = 0;
   count = res_x;
   while (count > 0) {
diff --git a/intern/cycles/render/object.cpp b/intern/cycles/render/object.cpp
index 70ce60252f0..a1a08f26229 100644
--- a/intern/cycles/render/object.cpp
+++ b/intern/cycles/render/object.cpp
@@ -171,7 +171,7 @@ void Object::compute_bounds(bool motion_blur)
 
     bounds = BoundBox::empty;
 
-    /* todo: this is really terrible. according to pbrt there is a better
+    /* TODO: this is really terrible. according to PBRT there is a better
      * way to find this iteratively, but did not find implementation yet
      * or try to implement myself */
     for (float t = 0.0f; t < 1.0f; t += (1.0f / 128.0f)) {
diff --git a/intern/guardedalloc/MEM_guardedalloc.h b/intern/guardedalloc/MEM_guardedalloc.h
index 0f30f7bd1a5..a0174e30aff 100644
--- a/intern/guardedalloc/MEM_guardedalloc.h
+++ b/intern/guardedalloc/MEM_guardedalloc.h
@@ -239,7 +239,7 @@ void MEM_use_lockfree_allocator(void);
  *
  * Use for debug purposes. This allocator contains lock section around every allocator call, which
  * makes it slow. What is gained with this is the ability to have list of allocated blocks (in an
- * addition to the trackign of number of allocations and amount of allocated bytes).
+ * addition to the tracking of number of allocations and amount of allocated bytes).
  *
  * NOTE: The switch between allocator types can only happen before any allocation did happen. */
 void MEM_use_guarded_allocator(void);
@@ -249,7 +249,7 @@ void MEM_use_guarded_allocator(void);
 #endif /* __cplusplus */
 
 #ifdef __cplusplus
-/* alloc funcs for C++ only */
+/* Allocation functions (for C++ only). */
 #  define MEM_CXX_CLASS_ALLOC_FUNCS(_id) \
    public: \
     void *operator new(size_t num_bytes) \
diff --git a/intern/guardedalloc/intern/mallocn.c b/intern/guardedalloc/intern/mallocn.c
index f0dd29a0b9e..673821546e8 100644
--- a/intern/guardedalloc/intern/mallocn.c
+++ b/intern/guardedalloc/intern/mallocn.c
@@ -99,7 +99,7 @@ void aligned_free(void *ptr)
 /* Perform assert checks on allocator type change.
  *
  * Helps catching issues (in debug build) caused by an unintended allocator type change when there
- * are allocation happenned. */
+ * are allocation happened. */
 static void assert_for_allocator_change(void)
 {
   /* NOTE: Assume that there is no "sticky" internal state which would make switching allocator
diff --git a/source/blender/blenkernel/intern/mesh_mapping.c b/source/blender/blenkernel/intern/mesh_mapping.c
index 3572939f78c..81642b156d5 100644
--- a/source/blender/blenkernel/intern/mesh_mapping.c
+++ b/source/blender/blenkernel/intern/mesh_mapping.c
@@ -777,7 +777,7 @@ static void poly_edge_loop_islands_calc(const MEdge *medge,
       if (UNLIKELY(gid_bit > 31)) {
         /* All bits used in contiguous smooth groups, we can't do much!
          * Note: this is *very* unlikely - theoretically, four groups are enough,
-         *       I don't think we can reach this goal with such a simple algo,
+         *       I don't think we can reach this goal with such a simple algorithm,
          *       but I don't think either we'll never need all 32 groups!
          */
         printf(
diff --git a/source/blender/blenlib/intern/math_geom.c b/source/blender/blenlib/intern/math_geom.c
index 2b0018e7662..3cc4d03d547 100644
--- a/source/blender/blenlib/intern/math_geom.c
+++ b/source/blender/blenlib/intern/math_geom.c
@@ -5537,7 +5537,7 @@ void vcloud_estimate_transform_v3(const int list_size,
       /* build 'projection' matrix */
       for (a = 0; a < list_size; a++) {
         sub_v3_v3v3(va, rpos[a], accu_rcom);
-        /* mul_v3_fl(va, bp->mass);  mass needs renormalzation here ?? */
+        /* mul_v3_fl(va, bp->mass);  mass needs re-normalization here ?? */
         sub_v3_v3v3(vb, pos[a], accu_com);
         /* mul_v3_fl(va, rp->mass); */
         m[0][0] += va[0] * vb[0];
@@ -5571,11 +5571,11 @@ void vcloud_estimate_transform_v3(const int list_size,
       stunt[0] = q[0][0];
       stunt[1] = q[1][1];
       stunt[2] = q[2][2];
-      /* renormalizing for numeric stability */
+      /* Re-normalizing for numeric stability. */
       mul_m3_fl(q, 1.0f / len_v3(stunt));
 
-      /* this is pretty much Polardecompose 'inline' the algo based on Higham's thesis */
-      /* without the far case ... but seems to work here pretty neat                   */
+      /* This is pretty much Polar-decompose 'inline' the algorithm based on Higham's thesis
+       * without the far case ... but seems to work here pretty neat. */
       odet = 0.0f;
       ndet = determinant_m3_array(q);
       while ((odet - ndet) * (odet - ndet) > eps && i < imax) {
diff --git a/source/blender/modifiers/intern/MOD_cast.c b/source/blender/modifiers/intern/MOD_cast.c
index 185c21af7ad..06bd9ada0fb 100644
--- a/source/blender/modifiers/intern/MOD_cast.c
+++ b/source/blender/modifiers/intern/MOD_cast.c
@@ -397,7 +397,7 @@ static void cuboid_do(CastModifierData *cmd,
       facm = 1.0f - fac;
     }
 
-    /* The algo used to project the vertices to their
+    /* The algorithm used to project the vertices to their
      * bounding box (bb) is pretty simple:
      * for each vertex v:
      * 1) find in which octant v is in;
diff --git a/source/blender/nodes/composite/nodes/node_composite_colorSpill.c b/source/blender/nodes/composite/nodes/node_composite_colorSpill.c
index 49a565d912a..8ff4bcdced3 100644
--- a/source/blender/nodes/composite/nodes/node_composite_colorSpill.c
+++ b/source/blender/nodes/composite/nodes/node_composite_colorSpill.c
@@ -40,7 +40,7 @@ static void node_composit_init_color_spill(bNodeTree *UNUSED(ntree), bNode *node
   NodeColorspill *ncs = MEM_callocN(sizeof(NodeColorspill), "node colorspill");
   node->storage = ncs;
   node->custom1 = 2;    /* green channel */
-  node->custom2 = 0;    /* simple limit algo*/
+  node->custom2 = 0;    /* simple limit algorithm */
   ncs->limchan = 0;     /* limit by red */
   ncs->limscale = 1.0f; /* limit scaling factor */
   ncs->unspill = 0;     /* do not use unspill */
diff --git a/source/blender/sequencer/intern/strip_edit.c b/source/blender/sequencer/intern/strip_edit.c
index b03c5142a60..3137a471470 100644
--- a/source/blender/sequencer/intern/strip_edit.c
+++ b/source/blender/sequencer/intern/strip_edit.c
@@ -278,7 +278,7 @@ static void seq_split_set_left_offset(Sequence *seq, int timeline_frame)
  * \param seq: Sequence to be split
  * \param timeline_frame: frame at which seq is split.
  * \param method: affects type of offset to be applied to resize Sequence
- * \return poitner to created Sequence. This is always Sequence on right side.
+ * \return The newly created sequence strip. This is always Sequence on right side.
  */
 Sequence *SEQ_edit_strip_split(Main *bmain,
                                Scene *scene,
diff --git a/source/blender/sequencer/intern/strip_transform.c b/source/blender/sequencer/intern/strip_transform.c
index dd6b7e6dba5..233f8e5b22e 100644
--- a/source/blender/sequencer/intern/strip_transform.c
+++ b/source/blender/sequencer/intern/strip_transform.c
@@ -140,8 +140,10 @@ bool BKE_sequence_base_isolated_sel_check(ListBase *seqbase)
   return true;
 }
 
-/* use to impose limits when dragging/extending - so impossible situations don't happen
- * Cant use the SEQ_LEFTSEL and SEQ_LEFTSEL directly because the strip may be in a metastrip */
+/**
+ * Use to impose limits when dragging/extending - so impossible situations don't happen.
+ * Cant use the #SEQ_LEFTSEL and #SEQ_LEFTSEL directly because the strip may be in a meta-strip.
+ */
 void BKE_sequence_tx_handle_xlimits(Sequence *seq, int leftflag, int rightflag)
 {
   if (leftflag) {
@@ -214,7 +216,7 @@ bool BKE_sequence_tx_test(Sequence *seq)
 /**
  * Return \a true if given \a seq needs a complete cleanup of its cache when it is transformed.
  *
- * Some (effect) strip types need a complete recache of themselves when they are transformed,
+ * Some (effect) strip types need a complete re-cache of themselves when they are transformed,
  * because they do not 'contain' anything and do not have any explicit relations to other strips.
  */
 bool BKE_sequence_tx_fullupdate_test(Sequence *seq)



More information about the Bf-blender-cvs mailing list