[Bf-blender-cvs] [baaa2d6] master: Change Request: use weight centre of location tracks as pivot

Ichthyostega noreply at git.blender.org
Tue Aug 23 11:53:45 CEST 2016


Commit: baaa2d6d396ef8003cf2241ae0a6304d6aebb496
Author: Ichthyostega
Date:   Mon Aug 22 07:02:22 2016 +0200
Branches: master
https://developer.blender.org/rBbaaa2d6d396ef8003cf2241ae0a6304d6aebb496

Change Request: use weight centre of location tracks as pivot

Previously, this extension used the translation compensated image centre
as reference point for rotation measurement and compensation. During
user tests, it turned out that this setup tends to give poor results
with very simple track configurations.

This can be improved by useiing the weighted average of the location
tracks for each frame as pivot point. But there is a technical problem:
the existing public API functions do not allow to pass the pivot point
for each frame alongside with the stabilisation data. Thus this
change implements a trick to package a compensation shift into
the translation offset, so the rotation can be performed around
a fixed point (center of frame). The compensation shift will then shift
the image as if it had been rotated around the desired pivot point.

===================================================================

M	source/blender/blenkernel/intern/tracking_stabilize.c

===================================================================

diff --git a/source/blender/blenkernel/intern/tracking_stabilize.c b/source/blender/blenkernel/intern/tracking_stabilize.c
index 93addc4..758e1c4 100644
--- a/source/blender/blenkernel/intern/tracking_stabilize.c
+++ b/source/blender/blenkernel/intern/tracking_stabilize.c
@@ -452,6 +452,29 @@ static MovieTrackingMarker *get_tracking_data_point(
 	}
 }
 
+
+/* Define the reference point for rotation/scale measurement and compensation.
+ * The stabilizator works by assuming the image was distorted by a affine linear
+ * transform, i.e. it was rotated and stretched around this reference point
+ * (pivot point) and then shifted laterally. Any scale and orientation changes
+ * will be picked up relative to this point. And later the image will be
+ * stabilized by rotating around this point. The result can only be as
+ * accurate as this pivot point actually matches the real rotation center
+ * of the actual movements. Thus any scheme to define a pivot point is
+ * always guesswork.
+ *
+ * As a simple default, we use the weighted average of the location markers
+ * of the current frame as pivot point. TODO It is planned to add further
+ * options,  like e.g. anchoring the pivot point at the canvas. Moreover,
+ * it is planned to allow for a user controllable offset.
+ */
+static void setup_pivot(const float ref_pos[2], float r_pivot[2])
+{
+	zero_v2(r_pivot);  /* TODO: add an animated offset position here. */
+	add_v2_v2(r_pivot, ref_pos);
+}
+
+
 /* Calculate the contribution of a single track at the time position (frame) of
  * the given marker. Each track has a local reference frame, which is as close
  * as possible to the global anchor_frame. Thus the translation contribution is
@@ -508,22 +531,21 @@ static void translation_contribution(TrackStabilizationBase *track_ref,
  *   in the same framework, we average the scales as logarithms.
  *
  * aspect is a total aspect ratio of the undistorted image (includes fame and
- * pixel aspect).
+ * pixel aspect). The function returns a quality factor, which can be used
+ * to damp the contributions of points in close proximity to the pivot point,
+ * since such contributions might be dominated by rounding errors and thus
+ * poison the calculated average. When the quality factor goes towards zero,
+ * the weight of this contribution should be reduced accordingly.
  */
-static void rotation_contribution(TrackStabilizationBase *track_ref,
-                                  MovieTrackingMarker *marker,
-                                  float aspect,
-                                  float target_pos[2],
-                                  float averaged_translation_contribution[2],
-                                  float *result_angle,
-                                  float *result_scale)
+static float rotation_contribution(TrackStabilizationBase *track_ref,
+                                   MovieTrackingMarker *marker,
+                                   const float aspect,
+                                   const float pivot[2],
+                                   float *result_angle,
+                                   float *result_scale)
 {
-	float len;
+	float len, quality;
 	float pos[2];
-	float pivot[2];
-	copy_v2_fl(pivot, 0.5f);  /* Use center of frame as hard wired pivot. */
-	add_v2_v2(pivot, averaged_translation_contribution);
-	sub_v2_v2(pivot, target_pos);
 	sub_v2_v2v2(pos, marker->pos, pivot);
 
 	pos[0] *= aspect;
@@ -531,9 +553,47 @@ static void rotation_contribution(TrackStabilizationBase *track_ref,
 
 	*result_angle = atan2f(pos[1],pos[0]);
 
-	len = len_v2(pos) + SCALE_ERROR_LIMIT_BIAS;
+	len = len_v2(pos);
+
+	/* prevent points very close to the pivot point from poisoning the result */
+	quality = 1 - expf(-len*len / SCALE_ERROR_LIMIT_BIAS*SCALE_ERROR_LIMIT_BIAS);
+	len += SCALE_ERROR_LIMIT_BIAS;
+
 	*result_scale = len * track_ref->stabilization_scale_base;
 	BLI_assert(0.0 < *result_scale);
+
+	return quality;
+}
+
+
+/* Workaround to allow for rotation around an arbitrary pivot point.
+ * Currently, the public API functions do not support this flexibility.
+ * Rather, rotation will always be applied around a fixed origin.
+ * As a workaround, we shift the image after rotation to match the
+ * desired rotation centre. And since this offset needs to be applied
+ * after the rotation and scaling, we can collapse it with the
+ * translation compensation, which is also a lateral shift (offset).
+ * The offset to apply is intended_pivot - rotated_pivot
+ */
+static void compensate_rotation_center(const int size, float aspect,
+                                       const float angle,
+                                       const float scale,
+                                       const float pivot[2],
+                                       float result_translation[2])
+{
+	const float origin[2]  = {0.5f*aspect*size, 0.5f*size};
+	float intended_pivot[2], rotated_pivot[2];
+	float rotation_mat[2][2];
+
+	copy_v2_v2(intended_pivot, pivot);
+	copy_v2_v2(rotated_pivot, pivot);
+	rotate_m2(rotation_mat, +angle);
+	sub_v2_v2(rotated_pivot, origin);
+	mul_m2v2(rotation_mat, rotated_pivot);
+	mul_v2_fl(rotated_pivot, scale);
+	add_v2_v2(rotated_pivot, origin);
+	add_v2_v2(result_translation, intended_pivot);
+	sub_v2_v2(result_translation, rotated_pivot);
 }
 
 
@@ -553,6 +613,7 @@ static bool average_track_contributions(StabContext *ctx,
                                         int framenr,
                                         float aspect,
                                         float r_translation[2],
+                                        float r_pivot[2],
                                         float *r_angle,
                                         float *r_scale_step)
 {
@@ -561,12 +622,15 @@ static bool average_track_contributions(StabContext *ctx,
 	MovieTrackingTrack *track;
 	MovieTracking *tracking = ctx->tracking;
 	MovieTrackingStabilization *stab = &tracking->stabilization;
+	float ref_pos[2];
 	BLI_assert(stab->flag & TRACKING_2D_STABILIZATION);
 
 	zero_v2(r_translation);
 	*r_scale_step = 0.0f;  /* logarithm */
 	*r_angle = 0.0f;
 
+	zero_v2(ref_pos);
+
 	ok = false;
 	weight_sum = 0.0f;
 	for (track = tracking->tracks.first; track; track = track->next) {
@@ -586,8 +650,10 @@ static bool average_track_contributions(StabContext *ctx,
 				float offset[2];
 				weight_sum += weight;
 				translation_contribution(stabilization_base, marker, offset);
-				mul_v2_fl(offset, weight);
-				add_v2_v2(r_translation, offset);
+				r_translation[0] += weight * offset[0];
+				r_translation[1] += weight * offset[1];
+				ref_pos[0] += weight * marker->pos[0];
+				ref_pos[1] += weight * marker->pos[1];
 				ok |= (weight_sum > EPSILON_WEIGHT);
 			}
 		}
@@ -596,8 +662,11 @@ static bool average_track_contributions(StabContext *ctx,
 		return false;
 	}
 
+	ref_pos[0] /= weight_sum;
+	ref_pos[1] /= weight_sum;
 	r_translation[0] /= weight_sum;
 	r_translation[1] /= weight_sum;
+	setup_pivot(ref_pos, r_pivot);
 
 	if (!(stab->flag & TRACKING_STABILIZE_ROTATION)) {
 		return ok;
@@ -619,17 +688,15 @@ static bool average_track_contributions(StabContext *ctx,
 				TrackStabilizationBase *stabilization_base =
 				        access_stabilization_baseline_data(ctx, track);
 				BLI_assert(stabilization_base != NULL);
-				float rotation, scale;
-				float target_pos[2];
+				float rotation, scale, quality;
+				quality = rotation_contribution(stabilization_base,
+				                                marker,
+				                                aspect,
+				                                r_pivot,
+				                                &rotation,
+				                                &scale);
+				weight *= quality;
 				weight_sum += weight;
-				get_animated_target_pos(ctx, framenr, target_pos);
-				rotation_contribution(stabilization_base,
-				                      marker,
-				                      aspect,
-				                      target_pos,
-				                      r_translation,
-				                      &rotation,
-				                      &scale);
 				*r_angle += rotation * weight;
 				if (stab->flag & TRACKING_STABILIZE_SCALE) {
 					*r_scale_step += logf(scale) * weight;
@@ -656,6 +723,75 @@ static bool average_track_contributions(StabContext *ctx,
 }
 
 
+/* Calculate weight center of location tracks for given frame.
+ * This function performs similar calculations as average_track_contributions(),
+ * but does not require the tracks to be initialized for stabilisation. Moreover,
+ * when there is no usable tracking data for the given frame number, data from
+ * a neighbouring frame is used. Thus this function can be used to calculate
+ * a starting point on initialization.
+ */
+static void average_marker_positions(StabContext *ctx, int framenr, float r_ref_pos[2])
+{
+	bool ok = false;
+	float weight_sum;
+	MovieTrackingTrack *track;
+	MovieTracking *tracking = ctx->tracking;
+
+	zero_v2(r_ref_pos);
+	weight_sum = 0.0f;
+	for (track = tracking->tracks.first; track; track = track->next) {
+		if (track->flag & TRACK_USE_2D_STAB) {
+			float weight = 0.0f;
+			MovieTrackingMarker *marker =
+				get_tracking_data_point(ctx, track, framenr, &weight);
+			if (marker) {
+				weight_sum += weight;
+				r_ref_pos[0] += weight * marker->pos[0];
+				r_ref_pos[1] += weight * marker->pos[1];
+				ok |= (weight_sum > EPSILON_WEIGHT);
+			}
+		}
+	}
+	if (ok) {
+		r_ref_pos[0] /= weight_sum;
+		r_ref_pos[1] /= weight_sum;
+	} else {
+		/* No usable tracking data on any track on this frame.
+		 * Use data from neighbouring frames to extrapolate...
+		 */
+		int next_lower = MINAFRAME;
+		int next_higher = MAXFRAME;
+		use_values_from_fcurves(ctx, true);
+		for (track = tracking->tracks.first; track; track = track->next) {
+			/* Note: we deliberately do not care if this track
+			 *       is already initialized for stabilisation */
+			if (track->flag & TRACK_USE_2D_STAB) {
+				int startpoint = search_closest_marker_index(track, framenr);
+				retrieve_next_higher_usable_frame(ctx,
+                                                  track,
+                                                  startpoint,
+                                                  framenr,
+             

@@ Diff output truncated at 10240 characters. @@




More information about the Bf-blender-cvs mailing list