[Bf-blender-cvs] SVN commit: /data/svn/bf-blender [60025] branches/soc-2013-motion_track/ extern/libmv: Modify data structure for storing tracks

Joseph Mansfield sftrabbit at gmail.com
Tue Sep 10 23:07:01 CEST 2013


Revision: 60025
          http://projects.blender.org/scm/viewvc.php?view=rev&root=bf-blender&revision=60025
Author:   sftrabbit
Date:     2013-09-10 21:07:01 +0000 (Tue, 10 Sep 2013)
Log Message:
-----------
Modify data structure for storing tracks

The Tracks data structure stores Markers. The current design has it so that a marker's image identifier denotes the frame from the associated camera that the marker belongs to. That is, it treats the image identifier more as a frame identifier. You could have markers in image 0 for camera 0 and image 0 for camera 1 and they wouldn't be considered to be in the same image.

However, this design doesn't make much sense because the reconstruction algorithms don't care about frames or the cameras from which a frame came, only that there is a set of images observing the same scene. A better design is for all frames from all cameras to have unique image identifiers. The camera identifiers can then denote a subset of those images. No image captured by camera 0 will have the same image identifier as images from camera 1. This way, the algorithms can continue to just treat all frames as one big set of images and the camera identifier provides the association of an image to the camera it originated from. This makes it much easier to implement multicamera reconstruction without significant changes to the way libmv behaves.

Modified Paths:
--------------
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/bundle.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/intersect.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/keyframe_selection.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/modal_solver.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/pipeline.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/reconstruction.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/reconstruction.h
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/reconstruction_scale.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/tracks.cc
    branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/tracks.h
    branches/soc-2013-motion_track/extern/libmv/libmv-capi.cc
    branches/soc-2013-motion_track/extern/libmv/libmv-capi.h
    branches/soc-2013-motion_track/extern/libmv/libmv-capi_stub.cc

Modified: branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/bundle.cc
===================================================================
--- branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/bundle.cc	2013-09-10 20:45:47 UTC (rev 60024)
+++ branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/bundle.cc	2013-09-10 21:07:01 UTC (rev 60025)
@@ -255,7 +255,7 @@
   all_cameras_R_t.resize(max_image + 1);
 
   for (int i = 0; i <= max_image; i++) {
-    const EuclideanView *view = reconstruction.ViewForImage(0, i);
+    const EuclideanView *view = reconstruction.ViewForImage(i);
 
     if (!view) {
       continue;
@@ -276,7 +276,7 @@
   int max_image = tracks.MaxImage();
 
   for (int i = 0; i <= max_image; i++) {
-    EuclideanView *view = reconstruction->ViewForImage(0, i);
+    EuclideanView *view = reconstruction->ViewForImage(i);
 
     if (!view) {
       continue;
@@ -343,7 +343,7 @@
       int max_image = tracks.MaxImage();
       bool is_first_camera = true;
       for (int i = 0; i <= max_image; i++) {
-        const EuclideanView *view = reconstruction->ViewForImage(0, i);
+        const EuclideanView *view = reconstruction->ViewForImage(i);
         if (view) {
           double *current_camera_R_t = &(*all_cameras_R_t)[i](0);
 
@@ -455,8 +455,7 @@
   bool have_locked_camera = false;
   for (int i = 0; i < markers.size(); ++i) {
     const Marker &marker = markers[i];
-    int camera = marker.camera;
-    EuclideanView *view = reconstruction->ViewForImage(camera, marker.image);
+    EuclideanView *view = reconstruction->ViewForImage(marker.image);
     EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
     if (view == NULL || point == NULL) {
       continue;

Modified: branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
===================================================================
--- branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc	2013-09-10 20:45:47 UTC (rev 60024)
+++ branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc	2013-09-10 21:07:01 UTC (rev 60025)
@@ -63,8 +63,8 @@
   GetImagesInMarkers(markers, &image1, &image2);
 
   Mat x1, x2;
-  CoordinatesForMarkersInImage(markers, camera, image1, &x1);
-  CoordinatesForMarkersInImage(markers, camera, image2, &x2);
+  CoordinatesForMarkersInImage(markers, image1, &x1);
+  CoordinatesForMarkersInImage(markers, image2, &x2);
 
   Mat3 F;
   NormalizedEightPointSolver(x1, x2, &F);
@@ -151,8 +151,8 @@
   GetImagesInMarkers(markers, &image1, &image2);
 
   Mat x1, x2;
-  CoordinatesForMarkersInImage(markers, 0, image1, &x1);
-  CoordinatesForMarkersInImage(markers, 0, image2, &x2);
+  CoordinatesForMarkersInImage(markers, image1, &x1);
+  CoordinatesForMarkersInImage(markers, image2, &x2);
 
   Mat3 F;
   NormalizedEightPointSolver(x1, x2, &F);

Modified: branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/intersect.cc
===================================================================
--- branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/intersect.cc	2013-09-10 20:45:47 UTC (rev 60024)
+++ branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/intersect.cc	2013-09-10 21:07:01 UTC (rev 60025)
@@ -78,7 +78,7 @@
   vector<Mat34> cameras;
   Mat34 P;
   for (int i = 0; i < markers.size(); ++i) {
-    EuclideanView *view = reconstruction->ViewForImage(0, markers[i].image);
+    EuclideanView *view = reconstruction->ViewForImage(markers[i].image);
     P_From_KRt(K, view->R, view->t, &P);
     cameras.push_back(P);
   }
@@ -103,7 +103,7 @@
   for (int i = 0; i < markers.size(); ++i) {
     const Marker &marker = markers[i];
     const EuclideanView &view =
-        *reconstruction->ViewForImage(0, marker.image);
+        *reconstruction->ViewForImage(marker.image);
 
     problem.AddResidualBlock(
         new ceres::AutoDiffCostFunction<
@@ -131,7 +131,7 @@
   // Try projecting the point; make sure it's in front of everyone.
   for (int i = 0; i < cameras.size(); ++i) {
     const EuclideanView &view =
-        *reconstruction->ViewForImage(0, markers[i].image);
+        *reconstruction->ViewForImage(markers[i].image);
     Vec3 x = view.R * X + view.t;
     if (x(2) < 0) {
       LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
@@ -164,7 +164,7 @@
     residuals.setZero();
     for (int i = 0; i < markers.size(); ++i) {
       const ProjectiveView &view =
-          *reconstruction.ViewForImage(0, markers[i].image);
+          *reconstruction.ViewForImage(markers[i].image);
       Vec3 projected = view.P * X;
       projected /= projected(2);
       residuals[2*i + 0] = projected(0) - markers[i].x;
@@ -187,7 +187,7 @@
   // Get the cameras to use for the intersection.
   vector<Mat34> cameras;
   for (int i = 0; i < markers.size(); ++i) {
-    ProjectiveView *view = reconstruction->ViewForImage(0, markers[i].image);
+    ProjectiveView *view = reconstruction->ViewForImage(markers[i].image);
     cameras.push_back(view->P);
   }
 
@@ -215,7 +215,7 @@
   // Try projecting the point; make sure it's in front of everyone.
   for (int i = 0; i < cameras.size(); ++i) {
     const ProjectiveView &view =
-        *reconstruction->ViewForImage(0, markers[i].image);
+        *reconstruction->ViewForImage(markers[i].image);
     Vec3 x = view.P * X;
     if (x(2) < 0) {
       LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image

Modified: branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/keyframe_selection.cc
===================================================================
--- branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/keyframe_selection.cc	2013-09-10 20:45:47 UTC (rev 60024)
+++ branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/keyframe_selection.cc	2013-09-10 21:07:01 UTC (rev 60025)
@@ -332,16 +332,16 @@
          candidate_image++) {
       // Conjunction of all markers from both keyframes
       vector<Marker> all_markers =
-        tracks.MarkersInBothImages(kSelectionCamera, current_keyframe, candidate_image);
+        tracks.MarkersInBothImages(current_keyframe, candidate_image);
 
       // Match keypoints between frames current_keyframe and candidate_image
       vector<Marker> tracked_markers =
-        tracks.MarkersForTracksInBothImages(kSelectionCamera, current_keyframe, candidate_image);
+        tracks.MarkersForTracksInBothImages(current_keyframe, candidate_image);
 
       // Correspondences in normalized space
       Mat x1, x2;
-      CoordinatesForMarkersInImage(tracked_markers, kSelectionCamera, current_keyframe, &x1);
-      CoordinatesForMarkersInImage(tracked_markers, kSelectionCamera, candidate_image, &x2);
+      CoordinatesForMarkersInImage(tracked_markers, current_keyframe, &x1);
+      CoordinatesForMarkersInImage(tracked_markers, candidate_image, &x2);
 
       LG << "Found " << x1.cols()
          << " correspondences between " << current_keyframe

Modified: branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/modal_solver.cc
===================================================================
--- branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/modal_solver.cc	2013-09-10 20:45:47 UTC (rev 60024)
+++ branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/modal_solver.cc	2013-09-10 21:07:01 UTC (rev 60025)
@@ -112,7 +112,7 @@
   ceres::AngleAxisToQuaternion(&zero_rotation(0), &quaternion(0));
 
   for (int image = 0; image <= max_image; ++image) {
-    vector<Marker> all_markers = tracks.MarkersInImage(kModalCamera, image);
+    vector<Marker> all_markers = tracks.MarkersInImage(image);
 
     ModalSolverLogProress(update_callback, (float) image / max_image);
 
@@ -226,7 +226,7 @@
     // and reproject them on sphere to obtain 3D position/
     for (int track = 0; track <= max_track; ++track) {
       if (!reconstruction->PointForTrack(track)) {
-        Marker marker = tracks.MarkerInImageForTrack(kModalCamera, image, track);
+        Marker marker = tracks.MarkerInImageForTrack(image, track);
 
         if (marker.image == image) {
           // New track appeared on this image,

Modified: branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/pipeline.cc
===================================================================
--- branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/pipeline.cc	2013-09-10 20:45:47 UTC (rev 60024)
+++ branches/soc-2013-motion_track/extern/libmv/libmv/simple_pipeline/pipeline.cc	2013-09-10 21:07:01 UTC (rev 60025)
@@ -169,7 +169,7 @@
 
       vector<Marker> reconstructed_markers;
       for (int i = 0; i < all_markers.size(); ++i) {
-        if (reconstruction->ViewForImage(all_markers[i].camera, all_markers[i].image)) {
+        if (reconstruction->ViewForImage(all_markers[i].image)) {
           reconstructed_markers.push_back(all_markers[i]);
         }
       }
@@ -199,11 +199,11 @@
     // Do all possible resections.
     num_resects = 0;
     for (int image = 0; image <= max_image; ++image) {
-      if (reconstruction->ViewForImage(0, image)) {
+      if (reconstruction->ViewForImage(image)) {
         LG << "Skipping frame: " << image;
         continue;
       }
-      vector<Marker> all_markers = tracks.MarkersInImage(0, image);
+      vector<Marker> all_markers = tracks.MarkersInImage(image);
       LG << "Got " << all_markers.size() << " markers for image " << image;
 
       vector<Marker> reconstructed_markers;
@@ -239,11 +239,11 @@
   // One last pass...
   num_resects = 0;
   for (int image = 0; image <= max_image; ++image) {
-    if (reconstruction->ViewForImage(0, image)) {
+    if (reconstruction->ViewForImage(image)) {
       LG << "Skipping frame: " << image;
       continue;
     }
-    vector<Marker> all_markers = tracks.MarkersInImage(0, image);
+    vector<Marker> all_markers = tracks.MarkersInImage(image);
 
     vector<Marker> reconstructed_markers;
     for (int i = 0; i < all_markers.size(); ++i) {
@@ -282,7 +282,7 @@
   vector<Marker> markers = image_tracks.AllMarkers();
   for (int i = 0; i < markers.size(); ++i) {
     const typename PipelineRoutines::View *view =
-        reconstruction.ViewForImage(markers[i].camera, markers[i].image);
+        reconstruction.ViewForImage(markers[i].image);
     const typename PipelineRoutines::Point *point =

@@ Diff output truncated at 10240 characters. @@



More information about the Bf-blender-cvs mailing list