MRPT  2.0.0
chessboard_stereo_camera_calib.cpp
Go to the documentation of this file.
1 /* +------------------------------------------------------------------------+
2  | Mobile Robot Programming Toolkit (MRPT) |
3  | https://www.mrpt.org/ |
4  | |
5  | Copyright (c) 2005-2020, Individual contributors, see AUTHORS file |
6  | See: https://www.mrpt.org/Authors - All rights reserved. |
7  | Released under BSD License. See: https://www.mrpt.org/License |
8  +------------------------------------------------------------------------+ */
9 
10 #include "vision-precomp.h" // Precompiled headers
11 
15 #include <mrpt/math/wrap2pi.h>
16 #include <mrpt/poses/CPose3DQuat.h>
17 #include <mrpt/poses/Lie/SE.h>
18 #include <mrpt/system/filesystem.h>
21 #include <mrpt/vision/pinhole.h>
22 #include <Eigen/Dense>
23 #include <algorithm> // reverse()
24 
26 
27 //#define USE_NUMERIC_JACOBIANS
28 //#define COMPARE_NUMERIC_JACOBIANS
29 
30 #ifdef USE_NUMERIC_JACOBIANS
31 #include <mrpt/math/num_jacobian.h>
32 #endif
33 
34 using namespace mrpt;
35 using namespace mrpt::vision;
36 using namespace mrpt::img;
37 using namespace mrpt::poses;
38 using namespace mrpt::math;
39 using namespace std;
40 
41 /* -------------------------------------------------------
42  checkerBoardStereoCalibration
43  ------------------------------------------------------- */
47 {
48  try
49  {
50  ASSERT_(p.check_size_x > 2);
51  ASSERT_(p.check_size_y > 2);
54  const bool user_wants_use_robust = p.use_robust_kernel;
55 
56  if (images.size() < 1 && p.verbose)
57  {
58  std::cout << "ERROR: No input images." << std::endl;
59  return false;
60  }
61 
62  const unsigned CORNERS_COUNT = p.check_size_x * p.check_size_y;
63  const TImageSize check_size =
66 
67  // For each image, find checkerboard corners:
68  // -----------------------------------------------
69  // Left/Right sizes (may be different)
70  TImageSize imgSize[2] = {TImageSize(0, 0), TImageSize(0, 0)};
71 
72  vector<size_t> valid_image_pair_indices; // Indices in images[] which
73  // are valid pairs to be used
74  // in the optimization
75 
76  for (size_t i = 0; i < images.size(); i++)
77  {
78  // Do loop for each left/right image:
79  TImageCalibData* dats[2] = {&images[i].left, &images[i].right};
80  bool corners_found[2] = {false, false};
81 
82  for (int lr = 0; lr < 2; lr++)
83  {
84  TImageCalibData& dat = *dats[lr];
85  dat.detected_corners.clear();
86 
87  // Make grayscale version:
88  const CImage img_gray(
90 
91  if (!i)
92  {
93  imgSize[lr] = img_gray.getSize();
94  if (lr == 0)
95  {
96  out.cam_params.leftCamera.ncols = imgSize[lr].x;
97  out.cam_params.leftCamera.nrows = imgSize[lr].y;
98  }
99  else
100  {
101  out.cam_params.rightCamera.ncols = imgSize[lr].x;
102  out.cam_params.rightCamera.nrows = imgSize[lr].y;
103  }
104  }
105  else
106  {
107  if (imgSize[lr].y != (int)img_gray.getHeight() ||
108  imgSize[lr].x != (int)img_gray.getWidth())
109  {
110  std::cout << "ERROR: All the images in each left/right "
111  "channel must have the same size."
112  << std::endl;
113  return false;
114  }
115  }
116 
117  // Do detection (this includes the "refine corners" with
118  // cvFindCornerSubPix):
119  corners_found[lr] = mrpt::vision::findChessboardCorners(
120  img_gray, dat.detected_corners, p.check_size_x,
121  p.check_size_y,
122  p.normalize_image // normalize_image
123  );
124 
125  if (corners_found[lr] &&
126  dat.detected_corners.size() != CORNERS_COUNT)
127  corners_found[lr] = false;
128 
129  if (p.verbose)
130  cout << format(
131  "%s img #%u: %s\n", lr == 0 ? "LEFT" : "RIGHT",
132  static_cast<unsigned int>(i),
133  corners_found[lr] ? "DETECTED" : "NOT DETECTED");
134  // User Callback?
135  if (p.callback)
136  {
137  cbPars.calibRound = -1; // Detecting corners
138  cbPars.current_iter = 0;
139  cbPars.current_rmse = 0;
140  cbPars.nImgsProcessed = i * 2 + lr + 1;
141  cbPars.nImgsToProcess = images.size() * 2;
142  (*p.callback)(cbPars, p.callback_user_param);
143  }
144 
145  if (corners_found[lr])
146  {
147  // Draw the checkerboard in the corresponding image:
148  if (!dat.img_original.isExternallyStored())
149  {
150  // Checkboad as color image:
153  dat.detected_corners, check_size.x, check_size.y);
154  }
155  }
156 
157  } // end for lr
158 
159  // We just finished detecting corners in a left/right pair.
160  // Only if corners were detected perfectly in BOTH images, add this
161  // image pair as a good one for optimization:
162  if (corners_found[0] && corners_found[1])
163  {
164  valid_image_pair_indices.push_back(i);
165 
166  // Consistency between left/right pair: the corners MUST BE
167  // ORDERED so they match to each other,
168  // and the criterion must be the same in ALL pairs to avoid
169  // rubbish optimization:
170 
171  // Key idea: Generate a representative vector that goes along
172  // rows and columns.
173  // Check the angle of those director vectors between L/R images.
174  // That can be done
175  // via the dot product. Swap rows/columns order as needed.
176  bool has_to_redraw_corners = false;
177 
178  const TPixelCoordf pt_l0 = images[i].left.detected_corners[0],
179  pt_l1 = images[i].left.detected_corners[1],
180  pt_r0 = images[i].right.detected_corners[0],
181  pt_r1 = images[i].right.detected_corners[1];
182  const auto Al =
183  TPoint2D(pt_l1.x, pt_l1.y) - TPoint2D(pt_l0.x, pt_l0.y);
184  const auto Ar =
185  TPoint2D(pt_r1.x, pt_r1.y) - TPoint2D(pt_r0.x, pt_r0.y);
186 
187  // If the dot product is negative, we have INVERTED order of
188  // corners:
189  if (Al.x * Ar.x + Al.y * Ar.y < 0)
190  {
191  has_to_redraw_corners = true;
192  // Invert all corners:
193  std::reverse(
194  images[i].right.detected_corners.begin(),
195  images[i].right.detected_corners.end());
196  }
197 
198  if (has_to_redraw_corners)
199  {
200  // Checkboad as color image:
201  images[i].right.img_original.colorImage(
202  images[i].right.img_checkboard);
203  images[i].right.img_checkboard.drawChessboardCorners(
204  images[i].right.detected_corners, check_size.x,
205  check_size.y);
206  }
207  }
208 
209  } // end find corners
210 
211  if (p.verbose)
212  std::cout << valid_image_pair_indices.size()
213  << " valid image pairs.\n";
214  if (valid_image_pair_indices.empty())
215  {
216  if (p.verbose)
217  std::cerr << "ERROR: No valid images. Perhaps the checkerboard "
218  "size is incorrect?\n";
219  return false;
220  }
221 
222  // Create reference coordinates of the detected corners, in "global"
223  // coordinates:
224  // -------------------------------------------------------------------------------
225  vector<TPoint3D> obj_points;
226  obj_points.reserve(CORNERS_COUNT);
227  for (unsigned int y = 0; y < p.check_size_y; y++)
228  for (unsigned int x = 0; x < p.check_size_x; x++)
229  obj_points.emplace_back(
232 
233  // ----------------------------------------------------------------------------------
234  // Levenberg-Marquardt algorithm
235  //
236  // State space (poses are actually on-manifold increments):
237  // N*left_cam_pose + right2left_pose + left_cam_params +
238  // right_cam_params
239  // N * 6 + 6 + 9 + 9
240  // = 6N+24
241  // ----------------------------------------------------------------------------------
242  const size_t N = valid_image_pair_indices.size();
243  const size_t nObs = 2 * N * CORNERS_COUNT; // total number of valid
244  // observations (px,py)
245  // taken into account in the
246  // optimization
247 
248  const double tau = 0.3;
249  const double t1 = 1e-8;
250  const double t2 = 1e-8;
251  const double MAX_LAMBDA = 1e20;
252 
253  // Initial state:
254  // Within lm_stat: CVectorFixedDouble<9> left_cam_params,
255  // right_cam_params; // [fx fy cx cy k1 k2 k3 t1 t2]
256  lm_stat_t lm_stat(images, valid_image_pair_indices, obj_points);
257 
258  lm_stat.left_cam_params[0] = imgSize[0].x * 0.9;
259  lm_stat.left_cam_params[1] = imgSize[0].y * 0.9;
260  lm_stat.left_cam_params[2] = imgSize[0].x * 0.5;
261  lm_stat.left_cam_params[3] = imgSize[0].y * 0.5;
262  lm_stat.left_cam_params[4] = lm_stat.left_cam_params[5] =
263  lm_stat.left_cam_params[6] = lm_stat.left_cam_params[7] =
264  lm_stat.left_cam_params[8] = 0;
265 
266  lm_stat.right_cam_params[0] = imgSize[1].x * 0.9;
267  lm_stat.right_cam_params[1] = imgSize[1].y * 0.9;
268  lm_stat.right_cam_params[2] = imgSize[1].x * 0.5;
269  lm_stat.right_cam_params[3] = imgSize[1].y * 0.5;
270  lm_stat.right_cam_params[4] = lm_stat.right_cam_params[5] =
271  lm_stat.right_cam_params[6] = lm_stat.right_cam_params[7] =
272  lm_stat.right_cam_params[8] = 0;
273 
274  // ===============================================================================
275  // Run stereo calibration in two stages:
276  // (0) Estimate all parameters except distortion
277  // (1) Estimate all parameters, using as starting point the guess from
278  // (0)
279  // ===============================================================================
280  size_t nUnknownsCamParams = 0;
281  size_t iter = 0;
282  double err = 0;
283  std::vector<size_t> vars_to_optimize;
284  // Hessian matrix (Declared here so it's accessible as the final
285  // uncertainty measure)
287 
288  for (int calibRound = 0; calibRound < 2; calibRound++)
289  {
290  cbPars.calibRound = calibRound;
291  if (p.verbose)
292  cout
293  << ((calibRound == 0) ? "LM calibration round #0: WITHOUT "
294  "distortion ----------\n"
295  : "LM calibration round #1: ALL "
296  "parameters --------------\n");
297 
298  // Select which cam. parameters to optimize (for each camera!) and
299  // which to leave fixed:
300  // Indices: [0 1 2 3 4 5 6 7 8]
301  // Variables:[fx fy cx cy k1 k2 k3 t1 t2]
302  vars_to_optimize.clear();
303  vars_to_optimize.push_back(0);
304  vars_to_optimize.push_back(1);
305  vars_to_optimize.push_back(2);
306  vars_to_optimize.push_back(3);
307  if (calibRound == 1)
308  {
309  if (p.optimize_k1) vars_to_optimize.push_back(4);
310  if (p.optimize_k2) vars_to_optimize.push_back(5);
311  if (p.optimize_t1) vars_to_optimize.push_back(6);
312  if (p.optimize_t2) vars_to_optimize.push_back(7);
313  if (p.optimize_k3) vars_to_optimize.push_back(8);
314  }
315 
316  nUnknownsCamParams = vars_to_optimize.size();
317 
318  // * N x Manifold Epsilon of left camera pose (6)
319  // * Manifold Epsilon of right2left pose (6)
320  // * Left-cam-params (<=9)
321  // * Right-cam-params (<=9)
322  const size_t nUnknowns = N * 6 + 6 + 2 * nUnknownsCamParams;
323 
324  // Residuals & Jacobians:
325  TResidualJacobianList res_jacob;
327  lm_stat, res_jacob, false /* no robust */,
329 
330  // Build linear system:
331  mrpt::math::CVectorDynamic<double> minus_g; // minus gradient
332  build_linear_system(res_jacob, vars_to_optimize, minus_g, H);
333 
334  ASSERT_EQUAL_(nUnknowns, (size_t)H.cols());
335  // Lev-Marq. parameters:
336  double nu = 2;
337  double lambda = tau * H.asEigen().diagonal().array().maxCoeff();
338  bool done = (minus_g.array().abs().maxCoeff() < t1);
339  int numItersImproving = 0;
340  bool use_robust = false;
341 
342  // Lev-Marq. main loop:
343  iter = 0;
344  while (iter < p.maxIters && !done)
345  {
346  // User Callback?
347  if (p.callback)
348  {
349  cbPars.current_iter = iter;
350  cbPars.current_rmse = std::sqrt(err / nObs);
351  (*p.callback)(cbPars, p.callback_user_param);
352  }
353 
354  // Solve for increment: (H + \lambda I) eps = -gradient
355  auto HH = H;
356  for (size_t i = 0; i < nUnknowns; i++) HH(i, i) += lambda;
357  // HH(i,i)*= (1.0 + lambda);
358 
359  Eigen::LLT<Eigen::MatrixXd> llt(
360  HH.asEigen().selfadjointView<Eigen::Lower>());
361  if (llt.info() != Eigen::Success)
362  {
363  lambda *= nu;
364  nu *= 2;
365  done = (lambda > MAX_LAMBDA);
366 
367  if (p.verbose && !done)
368  cout << "LM iter#" << iter
369  << ": Couldn't solve LLt, retrying with larger "
370  "lambda="
371  << lambda << endl;
372  continue;
373  }
375  llt.solve(minus_g.asEigen()).eval());
376 
377  const double eps_norm = eps.norm();
378  if (eps_norm < t2 * (eps_norm + t2))
379  {
380  if (p.verbose)
381  cout << "Termination criterion: eps_norm < "
382  "t2*(eps_norm+t2): "
383  << eps_norm << " < " << t2 * (eps_norm + t2)
384  << endl;
385  done = true;
386  break;
387  }
388 
389  // Tentative new state:
390  lm_stat_t new_lm_stat(
391  lm_stat); // images,valid_image_pair_indices,obj_points);
392  add_lm_increment(eps, vars_to_optimize, new_lm_stat);
393 
394  TResidualJacobianList new_res_jacob;
395 
396  // discriminant:
397  double err_new = recompute_errors_and_Jacobians(
398  new_lm_stat, new_res_jacob, use_robust,
400 
401  if (err_new < err)
402  {
403  // const double l = (err-err_new)/ (eps.dot( lambda*eps +
404  // minus_g) );
405 
406  if (numItersImproving++ > 5)
407  use_robust = user_wants_use_robust;
408 
409  // Good: Accept new values
410  if (p.verbose)
411  cout << "LM iter#" << iter
412  << ": Avr.err(px):" << std::sqrt(err / nObs)
413  << "->" << std::sqrt(err_new / nObs)
414  << " lambda:" << lambda << endl;
415 
416  // swap: faster than "lm_stat <- new_lm_stat"
417  lm_stat.swap(new_lm_stat);
418  res_jacob.swap(new_res_jacob);
419 
420  err = err_new;
422  res_jacob, vars_to_optimize, minus_g, H);
423 
424  // Too small gradient?
425  done = (minus_g.array().abs().maxCoeff() < t1);
426  // lambda *= max(1.0/3.0, 1-std::pow(2*l-1,3.0) );
427  lambda *= 0.6;
428  lambda = std::max(lambda, 1e-100);
429  nu = 2.0;
430 
431  iter++;
432  }
433  else
434  {
435  lambda *= nu;
436  if (p.verbose)
437  cout << "LM iter#" << iter << ": No update: err=" << err
438  << " -> err_new=" << err_new
439  << " retrying with larger lambda=" << lambda
440  << endl;
441  nu *= 2.0;
442  done = (lambda > MAX_LAMBDA);
443  }
444 
445  } // end while LevMarq.
446 
447  } // end for each calibRound = [0,1]
448  // -------------------------------------------------------------------------------
449  // End of Levenberg-Marquardt
450  // -------------------------------------------------------------------------------
451 
452  // Save final optimum values to the output structure
453  out.final_rmse = std::sqrt(err / nObs);
454  out.final_iters = iter;
456 
457  // [fx fy cx cy k1 k2 k3 t1 t2]
467 
468  // [fx fy cx cy k1 k2 k3 t1 t2]
478 
479  const auto l2r_pose = -mrpt::poses::CPose3D(lm_stat.right2left_pose);
481  l2r_pose.getAsQuaternion(l2r_quat);
482 
483  out.cam_params.rightCameraPose.x = l2r_pose.x();
484  out.cam_params.rightCameraPose.y = l2r_pose.y();
485  out.cam_params.rightCameraPose.z = l2r_pose.z();
486  out.cam_params.rightCameraPose.qr = l2r_quat.r();
487  out.cam_params.rightCameraPose.qx = l2r_quat.x();
488  out.cam_params.rightCameraPose.qy = l2r_quat.y();
489  out.cam_params.rightCameraPose.qz = l2r_quat.z();
490 
491  // R2L pose:
494 
495  // All the estimated camera poses:
497 
498  out.image_pair_was_used.assign(images.size(), false);
499  for (unsigned long valid_image_pair_indice : valid_image_pair_indices)
500  out.image_pair_was_used[valid_image_pair_indice] = true;
501 
502  // Uncertainties ---------------------
503  // The order of inv. variances in the diagonal of the Hessian is:
504  // * N x Manifold Epsilon of left camera pose (6)
505  // * Manifold Epsilon of right2left pose (6)
506  // * Left-cam-params (<=9)
507  // * Right-cam-params (<=9)
510  const size_t base_idx_H_CPs = H.cols() - 2 * nUnknownsCamParams;
511  for (size_t i = 0; i < nUnknownsCamParams; i++)
512  {
513  out.left_params_inv_variance[vars_to_optimize[i]] =
514  H(base_idx_H_CPs + i, base_idx_H_CPs + i);
515  out.right_params_inv_variance[vars_to_optimize[i]] =
516  H(base_idx_H_CPs + nUnknownsCamParams + i,
517  base_idx_H_CPs + nUnknownsCamParams + i);
518  }
519 
520  // Draw projected points
521  for (unsigned long idx : valid_image_pair_indices)
522  {
523  TImageCalibData& dat_l = images[idx].left;
524  TImageCalibData& dat_r = images[idx].right;
525 
526  // Rectify image.
529 
530  // Camera poses:
532  -mrpt::poses::CPose3D(lm_stat.left_cam_poses[idx]);
535  mrpt::poses::CPose3D(lm_stat.left_cam_poses[idx]));
536 
537  // Project distorted images:
539  obj_points, out.cam_params.leftCamera,
541  dat_l.projectedPoints_distorted, true);
543  obj_points, out.cam_params.rightCamera,
545  dat_r.projectedPoints_distorted, true);
546 
547  // Draw corners:
549  dat_l.projectedPoints_distorted, check_size.x, check_size.y);
551  dat_r.projectedPoints_distorted, check_size.x, check_size.y);
552  }
553 
554  return true;
555  }
556  catch (const std::exception& e)
557  {
558  std::cout << e.what() << std::endl;
559  return false;
560  }
561 }
562 
563 /*
564 Jacobian:
565 
566  d b( [px py pz] )
567 ------------------ (5x3) =
568  d{ px py pz }
569 
570 \left(\begin{array}{ccc} \frac{1}{\mathrm{pz}} & 0 &
571 -\frac{\mathrm{px}}{{\mathrm{pz}}^2}\\ 0 & \frac{1}{\mathrm{pz}} &
572 -\frac{\mathrm{py}}{{\mathrm{pz}}^2} \end{array}\right)
573 
574 */
575 
576 static void jacob_db_dp(
577  const TPoint3D& p, // 3D coordinates wrt the camera
579 {
580  const double pz_ = 1 / p.z;
581  const double pz_2 = pz_ * pz_;
582 
583  G(0, 0) = pz_;
584  G(0, 1) = 0;
585  G(0, 2) = -p.x * pz_2;
586 
587  G(1, 0) = 0;
588  G(1, 1) = pz_;
589  G(1, 2) = -p.y * pz_2;
590 }
591 
592 /*
593 Jacobian:
594 
595  dh( b,c )
596 ----------- = Hb | 2x2 (b=[x=px/pz y=py/pz])
597  d{ b }
598 
599 \left(\begin{array}{cc} \mathrm{fx}\, \left(\mathrm{k2}\, {\left(x^2 +
600 y^2\right)}^2 + \mathrm{k3}\, {\left(x^2 + y^2\right)}^3 + 6\, \mathrm{t2}\, x +
601 2\, \mathrm{t1}\, y + x\, \left(2\, \mathrm{k1}\, x + 4\, \mathrm{k2}\, x\,
602 \left(x^2 + y^2\right) + 6\, \mathrm{k3}\, x\, {\left(x^2 + y^2\right)}^2\right)
603 + \mathrm{k1}\, \left(x^2 + y^2\right) + 1\right) & \mathrm{fx}\, \left(2\,
604 \mathrm{t1}\, x + 2\, \mathrm{t2}\, y + x\, \left(2\, \mathrm{k1}\, y + 4\,
605 \mathrm{k2}\, y\, \left(x^2 + y^2\right) + 6\, \mathrm{k3}\, y\, {\left(x^2 +
606 y^2\right)}^2\right)\right)\\ \mathrm{fy}\, \left(2\, \mathrm{t1}\, x + 2\,
607 \mathrm{t2}\, y + y\, \left(2\, \mathrm{k1}\, x + 4\, \mathrm{k2}\, x\,
608 \left(x^2 + y^2\right) + 6\, \mathrm{k3}\, x\, {\left(x^2 +
609 y^2\right)}^2\right)\right) & \mathrm{fy}\, \left(\mathrm{k2}\, {\left(x^2 +
610 y^2\right)}^2 + \mathrm{k3}\, {\left(x^2 + y^2\right)}^3 + 2\, \mathrm{t2}\, x +
611 6\, \mathrm{t1}\, y + y\, \left(2\, \mathrm{k1}\, y + 4\, \mathrm{k2}\, y\,
612 \left(x^2 + y^2\right) + 6\, \mathrm{k3}\, y\, {\left(x^2 + y^2\right)}^2\right)
613 + \mathrm{k1}\, \left(x^2 + y^2\right) + 1\right) \end{array}\right)
614 
615  dh( b,c )
616 ----------- = Hc | 2x9
617  d{ c }
618 
619 \left(\begin{array}{ccccccccc} \mathrm{t2}\, \left(3\, x^2 + y^2\right) + x\,
620 \left(\mathrm{k2}\, {\left(x^2 + y^2\right)}^2 + \mathrm{k3}\, {\left(x^2 +
621 y^2\right)}^3 + \mathrm{k1}\, \left(x^2 + y^2\right) + 1\right) + 2\,
622 \mathrm{t1}\, x\, y & 0 & 1 & 0 & \mathrm{fx}\, x\, \left(x^2 + y^2\right) &
623 \mathrm{fx}\, x\, {\left(x^2 + y^2\right)}^2 & \mathrm{fx}\, x\, {\left(x^2 +
624 y^2\right)}^3 & 2\, \mathrm{fx}\, x\, y & \mathrm{fx}\, \left(3\, x^2 +
625 y^2\right)\\ 0 & \mathrm{t1}\, \left(x^2 + 3\, y^2\right) + y\,
626 \left(\mathrm{k2}\, {\left(x^2 + y^2\right)}^2 + \mathrm{k3}\, {\left(x^2 +
627 y^2\right)}^3 + \mathrm{k1}\, \left(x^2 + y^2\right) + 1\right) + 2\,
628 \mathrm{t2}\, x\, y & 0 & 1 & \mathrm{fy}\, y\, \left(x^2 + y^2\right) &
629 \mathrm{fy}\, y\, {\left(x^2 + y^2\right)}^2 & \mathrm{fy}\, y\, {\left(x^2 +
630 y^2\right)}^3 & \mathrm{fy}\, \left(x^2 + 3\, y^2\right) & 2\, \mathrm{fy}\, x\,
631 y \end{array}\right)
632 
633 */
634 
636  const TPoint3D& nP, // Point in relative coords wrt the camera
637  const mrpt::math::CVectorFixedDouble<9>& c, // camera parameters
640 {
641  const double x = nP.x / nP.z;
642  const double y = nP.y / nP.z;
643 
644  const double r2 = x * x + y * y;
645  const double r = std::sqrt(r2);
646  const double r6 = r2 * r2 * r2; // (x^2+y^2)^3 = r^6
647 
648  // c=[fx fy cx cy k1 k2 k3 t1 t2]
649  const double fx = c[0], fy = c[1];
650  // const double cx=c[2], cy=c[3]; // Un-unused
651  const double k1 = c[4], k2 = c[5], k3 = c[6];
652  const double t1 = c[7], t2 = c[8];
653 
654  // Hb = dh(b,c)/db (2x2)
655  Hb(0, 0) =
656  fx * (k2 * r2 + k3 * r6 + 6 * t2 * x + 2 * t1 * y +
657  x * (2 * k1 * x + 4 * k2 * x * r + 6 * k3 * x * r2) + k1 * r + 1);
658  Hb(0, 1) = fx * (2 * t1 * x + 2 * t2 * y +
659  x * (2 * k1 * y + 4 * k2 * y * r + 6 * k3 * y * r2));
660 
661  Hb(1, 0) = fy * (2 * t1 * x + 2 * t2 * y +
662  y * (2 * k1 * x + 4 * k2 * x * r + 6 * k3 * x * r2));
663  Hb(1, 1) =
664  fy * (k2 * r2 + k3 * r6 + 2 * t2 * x + 6 * t1 * y +
665  y * (2 * k1 * y + 4 * k2 * y * r + 6 * k3 * y * r2) + k1 * r + 1);
666 
667  // Hc = dh(b,c)/dc (2x9)
668  Hc(0, 0) = t2 * (3 * x * x + y * y) + x * (k2 * r2 + k3 * r6 + k1 * r + 1) +
669  2 * t1 * x * y;
670  Hc(0, 1) = 0;
671  Hc(0, 2) = 1;
672  Hc(0, 3) = 0;
673  Hc(0, 4) = fx * x * r;
674  Hc(0, 5) = fx * x * r2;
675  Hc(0, 6) = fx * x * r6;
676  Hc(0, 7) = 2 * fx * x * y;
677  Hc(0, 8) = fx * (3 * x * x + y * y);
678 
679  Hc(1, 0) = 0;
680  Hc(1, 1) = t1 * (x * x + 3 * y * y) + y * (k2 * r2 + k3 * r6 + k1 * r + 1) +
681  2 * t2 * x * y;
682  Hc(1, 2) = 0;
683  Hc(1, 3) = 1;
684  Hc(1, 4) = fy * y * r;
685  Hc(1, 5) = fy * y * r2;
686  Hc(1, 6) = fy * y * r6;
687  Hc(1, 7) = fy * (x * x + 3 * y * y);
688  Hc(1, 8) = 2 * fy * x * y;
689 }
690 
692  const TPoint3D& p_D, // D (+) p
694 {
695  // Jacobian 10.3.4 in technical report "A tutorial on SE(3) transformation
696  // parameterizations and on-manifold optimization"
697  dpl_del.block<3, 3>(0, 0).setIdentity();
698  dpl_del.block<3, 3>(0, 3) = mrpt::math::skew_symmetric3_neg(p_D).asEigen();
699 }
700 
702  const CPose3D& A, const CPose3D& D, const TPoint3D& p,
704 {
705  // Jacobian 10.3.7 in technical report "A tutorial on SE(3) transformation
706  // parameterizations and on-manifold optimization"
707  const Eigen::Matrix<double, 1, 3> P(p.x, p.y, p.z);
708  const auto dr1 = D.getRotationMatrix().asEigen().block<1, 3>(0, 0);
709  const auto dr2 = D.getRotationMatrix().asEigen().block<1, 3>(1, 0);
710  const auto dr3 = D.getRotationMatrix().asEigen().block<1, 3>(2, 0);
711 
713  P.dot(dr1) + D.x(), P.dot(dr2) + D.y(), P.dot(dr3) + D.z());
714 
716  H.block<3, 3>(0, 0).setIdentity();
717  H.block<3, 3>(0, 3) = mrpt::math::skew_symmetric3_neg(v).asEigen();
718 
719  dp_deps = A.getRotationMatrix().asEigen() * H;
720 }
721 
722 static void project_point(
724  const mrpt::math::TPose3D& cameraPose, mrpt::img::TPixelCoordf& px)
725 {
726  // Change the reference system to that wrt the camera
727  TPoint3D nP;
728  cameraPose.composePoint(P, nP);
729 
730  // Pinhole model:
731  const double x = nP.x / nP.z;
732  const double y = nP.y / nP.z;
733 
734  // Radial distortion:
735  const double r2 = square(x) + square(y);
736  const double r4 = square(r2);
737  const double r6 = r2 * r4;
738  const double A =
739  1 + params.dist[0] * r2 + params.dist[1] * r4 + params.dist[4] * r6;
740  const double B = 2 * x * y;
741 
742  px.x = params.cx() + params.fx() * (x * A + params.dist[2] * B +
743  params.dist[3] * (r2 + 2 * square(x)));
744  px.y = params.cy() + params.fy() * (y * A + params.dist[3] * B +
745  params.dist[2] * (r2 + 2 * square(y)));
746 }
747 
748 // Build the "-gradient" and the Hessian matrix:
749 // Variables are in these order:
750 // * N x Manifold Epsilon of left camera pose (6)
751 // * Manifold Epsilon of right2left pose (6)
752 // * Left-cam-params (<=9)
753 // * Right-cam-params (<=9)
755  const TResidualJacobianList& res_jac, const std::vector<size_t>& var_indxs,
757 {
758  const size_t N = res_jac.size(); // Number of stereo image pairs
759  const size_t nMaxUnknowns = N * 6 + 6 + 9 + 9;
760 
761  // Reset to zeros:
762  auto minus_g_tot =
764  auto H_tot = mrpt::math::CMatrixDouble::Zero(nMaxUnknowns, nMaxUnknowns);
765 
766  // Sum the contribution from each observation:
767  for (size_t i = 0; i < N; i++)
768  {
769  const size_t nObs = res_jac[i].size();
770  for (size_t j = 0; j < nObs; j++)
771  {
772  const TResidJacobElement& rje = res_jac[i][j];
773  // Sub-Hessian & sub-gradient are variables in this order:
774  // eps_left_cam, eps_right2left_cam, left_cam_params,
775  // right_cam_params
776  const Eigen::Matrix<double, 30, 30> Hij = rje.J.transpose() * rje.J;
777  const Eigen::Matrix<double, 30, 1> gij =
778  rje.J.transpose() * rje.residual;
779 
780  // Assemble in their place:
781  minus_g_tot.block<6, 1>(i * 6, 0) -= gij.block<6, 1>(0, 0);
782  minus_g_tot.block<6 + 9 + 9, 1>(N * 6, 0) -=
783  gij.block<6 + 9 + 9, 1>(6, 0);
784 
785  H_tot.block<6, 6>(i * 6, i * 6) += Hij.block<6, 6>(0, 0);
786 
787  H_tot.block<6 + 9 + 9, 6 + 9 + 9>(N * 6, N * 6) +=
788  Hij.block<6 + 9 + 9, 6 + 9 + 9>(6, 6);
789  H_tot.block<6, 6 + 9 + 9>(i * 6, N * 6) +=
790  Hij.block<6, 6 + 9 + 9>(0, 6);
791  H_tot.block<6 + 9 + 9, 6>(N * 6, i * 6) +=
792  Hij.block<6 + 9 + 9, 6>(6, 0);
793  }
794  }
795 
796 #if 0
797  // Also include additional cost terms to stabilize estimation:
798  // -----------------------------------------------------------------
799  // In the left->right pose increment: Add cost if we know that both cameras are almost parallel:
800  const double cost_lr_angular = 1e10;
801  H_tot.block<3,3>(N*6+3,N*6+3) += Eigen::Matrix<double,3,3>::Identity() * cost_lr_angular;
802 #endif
803 
804  // Ignore those derivatives of "fixed" (non-optimizable) variables in camera
805  // parameters
806  // --------------------------------------------------------------------------------------
807  const size_t N_Cs = var_indxs.size(); // [0-8]
808  const size_t nUnknowns = N * 6 + 6 + 2 * N_Cs;
809  const size_t nUnkPoses = N * 6 + 6;
810 
811  minus_g.setZero(nUnknowns, 1);
812  H.setZero(nUnknowns, nUnknowns);
813  // Copy unmodified all but the cam. params:
814  minus_g.asEigen().block(0, 0, nUnkPoses, 1) =
815  minus_g_tot.asEigen().block(0, 0, nUnkPoses, 1);
816  H.asEigen().block(0, 0, nUnkPoses, nUnkPoses) =
817  H_tot.asEigen().block(0, 0, nUnkPoses, nUnkPoses);
818 
819  // Selective copy cam. params parts:
820  for (size_t i = 0; i < N_Cs; i++)
821  {
822  minus_g[nUnkPoses + i] = minus_g_tot[nUnkPoses + var_indxs[i]];
823  minus_g[nUnkPoses + N_Cs + i] =
824  minus_g_tot[nUnkPoses + 9 + var_indxs[i]];
825  }
826 
827  for (size_t k = 0; k < nUnkPoses; k++)
828  {
829  for (size_t i = 0; i < N_Cs; i++)
830  {
831  H(nUnkPoses + i, k) = H(k, nUnkPoses + i) =
832  H_tot(k, nUnkPoses + var_indxs[i]);
833  H(nUnkPoses + i + N_Cs, k) = H(k, nUnkPoses + i + N_Cs) =
834  H_tot(k, nUnkPoses + 9 + var_indxs[i]);
835  }
836  }
837 
838  for (size_t i = 0; i < N_Cs; i++)
839  {
840  for (size_t j = 0; j < N_Cs; j++)
841  {
842  H(nUnkPoses + i, nUnkPoses + j) =
843  H_tot(nUnkPoses + var_indxs[i], nUnkPoses + var_indxs[j]);
844  H(nUnkPoses + N_Cs + i, nUnkPoses + N_Cs + j) = H_tot(
845  nUnkPoses + 9 + var_indxs[i], nUnkPoses + 9 + var_indxs[j]);
846  }
847  }
848 
849 #if 0
850  {
851  CMatrixDouble M1 = H_tot;
852  CMatrixDouble g1 = minus_g_tot;
853  M1.saveToTextFile("H1.txt");
854  g1.saveToTextFile("g1.txt");
855 
856  CMatrixDouble M2 = H;
857  CMatrixDouble g2 = minus_g;
858  M2.saveToTextFile("H2.txt");
859  g2.saveToTextFile("g2.txt");
861  }
862 #endif
863 
864 } // end of build_linear_system
865 
866 // * N x Manifold Epsilon of left camera pose (6)
867 // * Manifold Epsilon of right2left pose (6)
868 // * Left-cam-params (<=9)
869 // * Right-cam-params (<=9)
872  const std::vector<size_t>& var_indxs, lm_stat_t& lm_stat)
873 {
874  // Increment of the N cam poses
875  const size_t N = lm_stat.valid_image_pair_indices.size();
876  for (size_t i = 0; i < N; i++)
877  {
878  auto& cam_pose =
879  lm_stat.left_cam_poses[lm_stat.valid_image_pair_indices[i]];
880 
881  // Use the Lie Algebra methods for the increment:
882  const CVectorFixedDouble<6> incr(eps.asEigen().block<6, 1>(6 * i, 0));
883  const CPose3D incrPose = Lie::SE<3>::exp(incr);
884 
885  // new_pose = old_pose [+] delta
886  // = exp(delta) (+) old_pose
887  cam_pose = (incrPose + CPose3D(cam_pose)).asTPose();
888  }
889 
890  // Increment of the right-left pose:
891  {
892  // Use the Lie Algebra methods for the increment:
893  const CVectorFixedDouble<6> incr(eps.asEigen().block<6, 1>(6 * N, 0));
894  const CPose3D incrPose = Lie::SE<3>::exp(incr);
895 
896  // new_pose = old_pose [+] delta
897  // = exp(delta) (+) old_pose
898  lm_stat.right2left_pose =
899  (incrPose + CPose3D(lm_stat.right2left_pose)).asTPose();
900  }
901 
902  // Increment of the camera params:
903  const size_t idx = 6 * N + 6;
904  const size_t nPC =
905  var_indxs.size(); // Number of camera parameters being optimized
906  for (size_t i = 0; i < nPC; i++)
907  {
908  lm_stat.left_cam_params[var_indxs[i]] += eps[idx + i];
909  lm_stat.right_cam_params[var_indxs[i]] += eps[idx + nPC + i];
910  }
911 
912 } // end of add_lm_increment
913 
914 #ifdef USE_NUMERIC_JACOBIANS
915 // ---------------------------------------------------------------
916 // Aux. function, only if we evaluate Jacobians numerically:
917 // ---------------------------------------------------------------
918 struct TNumJacobData
919 {
920  const lm_stat_t& lm_stat;
921  const TPoint3D& obj_point;
922  const CPose3D& left_cam;
923  const CPose3D& right2left;
924  const Eigen::Matrix<double, 4, 1>& real_obs;
925 
926  TNumJacobData(
927  const lm_stat_t& _lm_stat, const TPoint3D& _obj_point,
928  const CPose3D& _left_cam, const CPose3D& _right2left,
929  const Eigen::Matrix<double, 4, 1>& _real_obs)
930  : lm_stat(_lm_stat),
931  obj_point(_obj_point),
932  left_cam(_left_cam),
933  right2left(_right2left),
934  real_obs(_real_obs)
935  {
936  }
937 };
938 
939 void numeric_jacob_eval_function(
940  const CVectorFixedDouble<30>& x, const TNumJacobData& dat,
942 {
943  // Recover the state out from "x":
944  const CVectorFixedDouble<6> incr_l(&x[0]);
945  const CPose3D incrPose_l = Lie::SE<3>::exp(incr_l);
946  const CVectorFixedDouble<6> incr_rl(&x[6]);
947  const CPose3D incrPose_rl = Lie::SE<3>::exp(incr_rl);
948 
949  // [fx fy cx cy k1 k2 k3 t1 t2]
950  TStereoCamera cam_params;
951  CVectorFixedDouble<9> left_cam_params = x.segment<9>(6 + 6);
952  cam_params.leftCamera.fx(left_cam_params[0]);
953  cam_params.leftCamera.fy(left_cam_params[1]);
954  cam_params.leftCamera.cx(left_cam_params[2]);
955  cam_params.leftCamera.cy(left_cam_params[3]);
956  cam_params.leftCamera.k1(left_cam_params[4]);
957  cam_params.leftCamera.k2(left_cam_params[5]);
958  cam_params.leftCamera.k3(left_cam_params[6]);
959  cam_params.leftCamera.p1(left_cam_params[7]);
960  cam_params.leftCamera.p2(left_cam_params[8]);
961 
962  // [fx fy cx cy k1 k2 k3 t1 t2]
963  CVectorFixedDouble<9> right_cam_params = x.segment<9>(6 + 6 + 9);
964  cam_params.rightCamera.fx(right_cam_params[0]);
965  cam_params.rightCamera.fy(right_cam_params[1]);
966  cam_params.rightCamera.cx(right_cam_params[2]);
967  cam_params.rightCamera.cy(right_cam_params[3]);
968  cam_params.rightCamera.k1(right_cam_params[4]);
969  cam_params.rightCamera.k2(right_cam_params[5]);
970  cam_params.rightCamera.k3(right_cam_params[6]);
971  cam_params.rightCamera.p1(right_cam_params[7]);
972  cam_params.rightCamera.p2(right_cam_params[8]);
973 
974  TPixelCoordf px_l;
976  dat.obj_point, cam_params.leftCamera, incrPose_l + dat.left_cam, px_l);
977  TPixelCoordf px_r;
979  dat.obj_point, cam_params.rightCamera,
980  incrPose_rl + dat.right2left + incrPose_l + dat.left_cam, px_r);
981 
982  const Eigen::Matrix<double, 4, 1> predicted_obs(
983  px_l.x, px_l.y, px_r.x, px_r.y);
984 
985  // Residual:
986  out = predicted_obs; // - dat.real_obs;
987 }
988 
989 void eval_h_b(
990  const CVectorFixedDouble<2>& X, const TCamera& params,
992 {
993  // Radial distortion:
994  const double x = X[0], y = X[1];
995  const double r2 = square(x) + square(y);
996  const double r4 = square(r2);
997  const double r6 = r2 * r4;
998  const double A =
999  1 + params.dist[0] * r2 + params.dist[1] * r4 + params.dist[4] * r6;
1000  const double B = 2 * x * y;
1001 
1002  out[0] =
1003  params.cx() + params.fx() * (x * A + params.dist[2] * B +
1004  params.dist[3] * (r2 + 2 * square(x)));
1005  out[1] =
1006  params.cy() + params.fy() * (y * A + params.dist[3] * B +
1007  params.dist[2] * (r2 + 2 * square(y)));
1008 }
1009 
1010 void eval_b_p(
1011  const CVectorFixedDouble<3>& P, int dummy, CVectorFixedDouble<2>& b)
1012 {
1013  // Radial distortion:
1014  b[0] = P[0] / P[2];
1015  b[1] = P[1] / P[2];
1016 }
1017 
1018 void eval_deps_D_p(
1019  const CVectorFixedDouble<6>& eps, const TPoint3D& D_p,
1021 {
1022  const CVectorFixedDouble<6> incr(&eps[0]);
1023  const CPose3D incrPose = Lie::SE<3>::exp(incr);
1024  TPoint3D D_p_out;
1025  incrPose.composePoint(D_p, D_p_out);
1026  for (int i = 0; i < 3; i++) out[i] = D_p_out[i];
1027 }
1028 
1029 struct TEvalData_A_eps_D_p
1030 {
1031  CPose3D A, D;
1032  TPoint3D p;
1033 };
1034 
1035 void eval_dA_eps_D_p(
1036  const CVectorFixedDouble<6>& eps, const TEvalData_A_eps_D_p& dat,
1038 {
1039  const CVectorFixedDouble<6> incr(&eps[0]);
1040  const CPose3D incrPose = Lie::SE<3>::exp(incr);
1041  const CPose3D A_eps_D = dat.A + (incrPose + dat.D);
1042  TPoint3D pt;
1043  A_eps_D.composePoint(dat.p, pt);
1044  for (int i = 0; i < 3; i++) out[i] = pt[i];
1045 }
1046 // ---------------------------------------------------------------
1047 // End of aux. function, only if we evaluate Jacobians numerically:
1048 // ---------------------------------------------------------------
1049 #endif
1050 
1051 // the 4x1 prediction are the (u,v) pixel coordinates for the left / right
1052 // cameras:
1054  const lm_stat_t& lm_stat, TResidualJacobianList& res_jac,
1055  bool use_robust_kernel, double kernel_param)
1056 {
1057  double total_err = 0;
1058  const size_t N = lm_stat.valid_image_pair_indices.size();
1059  res_jac.resize(N);
1060 
1061  // Parse lm_stat data: CVectorFixedDouble<9> left_cam_params,
1062  // right_cam_params; // [fx fy cx cy k1 k2 k3 t1 t2]
1063  TCamera camparam_l;
1064  camparam_l.fx(lm_stat.left_cam_params[0]);
1065  camparam_l.fy(lm_stat.left_cam_params[1]);
1066  camparam_l.cx(lm_stat.left_cam_params[2]);
1067  camparam_l.cy(lm_stat.left_cam_params[3]);
1068  camparam_l.k1(lm_stat.left_cam_params[4]);
1069  camparam_l.k2(lm_stat.left_cam_params[5]);
1070  camparam_l.k3(lm_stat.left_cam_params[6]);
1071  camparam_l.p1(lm_stat.left_cam_params[7]);
1072  camparam_l.p2(lm_stat.left_cam_params[8]);
1073  TCamera camparam_r;
1074  camparam_r.fx(lm_stat.right_cam_params[0]);
1075  camparam_r.fy(lm_stat.right_cam_params[1]);
1076  camparam_r.cx(lm_stat.right_cam_params[2]);
1077  camparam_r.cy(lm_stat.right_cam_params[3]);
1078  camparam_r.k1(lm_stat.right_cam_params[4]);
1079  camparam_r.k2(lm_stat.right_cam_params[5]);
1080  camparam_r.k3(lm_stat.right_cam_params[6]);
1081  camparam_r.p1(lm_stat.right_cam_params[7]);
1082  camparam_r.p2(lm_stat.right_cam_params[8]);
1083 
1084  // process all points from all N image pairs:
1085  for (size_t k = 0; k < N; k++)
1086  {
1087  const size_t k_idx = lm_stat.valid_image_pair_indices[k];
1088  const size_t nPts = lm_stat.obj_points.size();
1089  res_jac[k].resize(nPts);
1090  // For each point in the pattern:
1091  for (size_t i = 0; i < nPts; i++)
1092  {
1093  // Observed corners:
1094  const TPixelCoordf& px_obs_l =
1095  lm_stat.images[k_idx].left.detected_corners[i];
1096  const TPixelCoordf& px_obs_r =
1097  lm_stat.images[k_idx].right.detected_corners[i];
1098  const Eigen::Matrix<double, 4, 1> obs(
1099  px_obs_l.x, px_obs_l.y, px_obs_r.x, px_obs_r.y);
1100 
1101  TResidJacobElement& rje = res_jac[k][i];
1102 
1103  // Predict i'th corner on left & right cameras:
1104  // ---------------------------------------------
1105  TPixelCoordf px_l, px_r;
1106  project_point(
1107  lm_stat.obj_points[i], camparam_l,
1108  lm_stat.left_cam_poses[k_idx], px_l);
1109 
1110  TPose3D auxPose;
1111  lm_stat.right2left_pose.composePose(
1112  lm_stat.left_cam_poses[k_idx], auxPose);
1113 
1114  project_point(lm_stat.obj_points[i], camparam_r, auxPose, px_r);
1115  rje.predicted_obs =
1116  Eigen::Matrix<double, 4, 1>(px_l.x, px_l.y, px_r.x, px_r.y);
1117 
1118  // Residual:
1119  rje.residual = rje.predicted_obs - obs;
1120 
1121  // Accum. total squared error:
1122  const double err_sqr_norm = rje.residual.squaredNorm();
1123  if (use_robust_kernel)
1124  {
1126  rk.param_sq = kernel_param;
1127 
1128  double kernel_1st_deriv, kernel_2nd_deriv;
1129  const double scaled_err =
1130  rk.eval(err_sqr_norm, kernel_1st_deriv, kernel_2nd_deriv);
1131 
1132  rje.residual *= kernel_1st_deriv;
1133  total_err += scaled_err;
1134  }
1135  else
1136  total_err += err_sqr_norm;
1137 
1138 // ---------------------------------------------------------------------------------
1139 // Jacobian: (4x30)
1140 // See technical report cited in the headers for the theory behind these
1141 // formulas.
1142 // ---------------------------------------------------------------------------------
1143 #if !defined(USE_NUMERIC_JACOBIANS) || defined(COMPARE_NUMERIC_JACOBIANS)
1144  // ----- Theoretical Jacobians -----
1145  mrpt::math::CMatrixFixed<double, 2, 6> dhl_del, dhr_del, dhr_der;
1146  mrpt::math::CMatrixFixed<double, 2, 9> dhl_dcl, dhr_dcr;
1147 
1148  // 3D coordinates of the corner point wrt both cameras:
1149  TPoint3D pt_wrt_left, pt_wrt_right;
1150  lm_stat.left_cam_poses[k_idx].composePoint(
1151  lm_stat.obj_points[i], pt_wrt_left);
1152 
1153  lm_stat.right2left_pose.composePose(
1154  lm_stat.left_cam_poses[k_idx], auxPose);
1155 
1156  auxPose.composePoint(lm_stat.obj_points[i], pt_wrt_right);
1157 
1158  // Build partial Jacobians:
1159  mrpt::math::CMatrixFixed<double, 2, 2> dhl_dbl, dhr_dbr;
1161  pt_wrt_left, lm_stat.left_cam_params, dhl_dbl, dhl_dcl);
1163  pt_wrt_right, lm_stat.right_cam_params, dhr_dbr, dhr_dcr);
1164 
1165  mrpt::math::CMatrixFixed<double, 2, 3> dbl_dpl, dbr_dpr;
1166  jacob_db_dp(pt_wrt_left, dbl_dpl);
1167  jacob_db_dp(pt_wrt_right, dbr_dpr);
1168 
1169  // p_l = exp(epsilon_l) (+) pose_left (+) point_ij
1170  // p_l = [exp(epsilon_r) (+) pose_right2left] (+) [exp(epsilon_l)
1171  // (+) pose_left] (+) point_ij
1172  mrpt::math::CMatrixFixed<double, 3, 6> dpl_del, dpr_del, dpr_der;
1173  jacob_deps_D_p_deps(pt_wrt_left, dpl_del);
1174  jacob_deps_D_p_deps(pt_wrt_right, dpr_der);
1175 
1176  const auto r2l_p = mrpt::poses::CPose3D(lm_stat.right2left_pose);
1177  const auto lcp =
1178  mrpt::poses::CPose3D(lm_stat.left_cam_poses[k_idx]);
1179  jacob_dA_eps_D_p_deps(r2l_p, lcp, lm_stat.obj_points[i], dpr_del);
1180 
1181  // Jacobian chain rule:
1182  dhl_del = dhl_dbl.asEigen() * dbl_dpl.asEigen() * dpl_del.asEigen();
1183  dhr_del = dhr_dbr.asEigen() * dbr_dpr.asEigen() * dpr_del.asEigen();
1184  dhr_der = dhr_dbr.asEigen() * dbr_dpr.asEigen() * dpr_der.asEigen();
1185 
1186  rje.J.setZero(4, 30);
1187  rje.J.block<2, 6>(0, 0) = dhl_del.asEigen();
1188  rje.J.block<2, 6>(2, 0) = dhr_del.asEigen();
1189  rje.J.block<2, 6>(2, 6) = dhr_der.asEigen();
1190  rje.J.block<2, 9>(0, 12) = dhl_dcl.asEigen();
1191  rje.J.block<2, 9>(2, 21) = dhr_dcr.asEigen();
1192 
1193 #if defined(COMPARE_NUMERIC_JACOBIANS)
1194  const mrpt::math::CMatrixFixed<double, 4, 30> J_theor = rje.J;
1195 #endif
1196 // ---- end of theoretical Jacobians ----
1197 #endif
1198 
1199 #if defined(USE_NUMERIC_JACOBIANS) || defined(COMPARE_NUMERIC_JACOBIANS)
1200  // ----- Numeric Jacobians ----
1201 
1203  x0; // eps_l (6) + eps_lr (6) + l_camparams (9) +
1204  // r_camparams (9)
1205  x0.setZero();
1206  x0.segment<9>(6 + 6) = lm_stat.left_cam_params;
1207  x0.segment<9>(6 + 6 + 9) = lm_stat.right_cam_params;
1208 
1209  const double x_incrs_val[30] = {
1210  1e-6, 1e-6, 1e-6, 1e-7, 1e-7, 1e-7, // eps_l
1211  1e-6, 1e-6, 1e-6, 1e-7, 1e-7, 1e-7, // eps_r
1212  1e-3, 1e-3, 1e-3, 1e-3, 1e-8, 1e-8, 1e-8, 1e-8, 1e-4, // cam_l
1213  1e-3, 1e-3, 1e-3, 1e-3, 1e-8, 1e-8, 1e-8, 1e-8, 1e-4 // cam_rl
1214  };
1215  const CVectorFixedDouble<30> x_incrs(x_incrs_val);
1216  TNumJacobData dat(
1217  lm_stat, lm_stat.obj_points[i], lm_stat.left_cam_poses[k_idx],
1218  lm_stat.right2left_pose, obs);
1219 
1221  x0, &numeric_jacob_eval_function, x_incrs, dat, rje.J);
1222 
1223 #if defined(COMPARE_NUMERIC_JACOBIANS)
1224  const mrpt::math::CMatrixFixed<double, 4, 30> J_num = rje.J;
1225 #endif
1226 #endif // ---- end of numeric Jacobians ----
1227 
1228 // Only for debugging:
1229 #if defined(COMPARE_NUMERIC_JACOBIANS)
1230  // if ( (J_num-J_theor).array().abs().maxCoeff()>1e-2)
1231  {
1232  ofstream f;
1233  f.open("dbg.txt", ios_base::out | ios_base::app);
1234  f << "J_num:\n"
1235  << J_num << endl
1236  << "J_theor:\n"
1237  << J_theor << endl
1238  << "diff:\n"
1239  << J_num - J_theor << endl
1240  << "diff (ratio):\n"
1241  << (J_num - J_theor).cwiseQuotient(J_num) << endl
1242  << endl;
1243  }
1244 #endif
1245  } // for i
1246  } // for k
1247 
1248  return total_err;
1249 } // end of recompute_errors_and_Jacobians
1250 
1251 // Ctor:
1253 
double k3() const
Get the value of the k3 distortion parameter.
Definition: TCamera.h:195
bool optimize_k1
Select which distortion parameters (of both left/right cameras) will be optimzed: k1...
double final_rmse
Final reprojection square Root Mean Square Error (in pixels).
uint32_t nrows
Definition: TCamera.h:40
A compile-time fixed-size numeric matrix container.
Definition: CMatrixFixed.h:33
static void jacob_db_dp(const TPoint3D &p, mrpt::math::CMatrixFixed< double, 2, 3 > &G)
TPoint2D_< double > TPoint2D
Lightweight 2D point.
Definition: TPoint2D.h:213
bool drawChessboardCorners(std::vector< TPixelCoordf > &cornerCoords, unsigned int check_size_x, unsigned int check_size_y, unsigned int lines_width=1, unsigned int circles_radius=4)
Draw onto this image the detected corners of a chessboard.
Definition: CImage.cpp:1799
mrpt::vision::TCalibrationStereoImageList images
double fx() const
Get the value of the focal length x-value (in pixels).
Definition: TCamera.h:175
std::string std::string format(std::string_view fmt, ARGS &&... args)
Definition: format.h:26
std::vector< bool > image_pair_was_used
true if a checkerboard was correctly detected in both left/right images.
T y() const
Return y coordinate of the quaternion.
Definition: CQuaternion.h:107
static void project_point(const mrpt::math::TPoint3D &P, const mrpt::img::TCamera &params, const mrpt::math::TPose3D &cameraPose, mrpt::img::TPixelCoordf &px)
void * callback_user_param
If using a callback function, you can use this to pass custom data to your callback.
const double G
size_t final_number_good_image_pairs
Number of image pairs in which valid checkerboards were correctly detected.
size_t maxIters
Maximum number of iterations of the optimizer (default=300)
double fy() const
Get the value of the focal length y-value (in pixels).
Definition: TCamera.h:177
size_t getHeight() const override
Returns the height of the image in pixels.
Definition: CImage.cpp:849
mrpt::vision::TStereoCalibParams params
size_t final_iters
Final number of optimization iterations executed.
A pair (x,y) of pixel coordinates (subpixel resolution).
Definition: TPixelCoord.h:18
std::vector< mrpt::math::TPose3D > left_cam_poses
Poses of the origin of coordinates of the pattern wrt the left camera (i.e.
TSteroCalibCallbackFunctor callback
If set to !=NULL, this function will be called within each Lev-Marq.
STL namespace.
Eigen::Matrix< double, 4, 1 > predicted_obs
[u_l v_l u_r v_r]: left/right camera pixels
bool isExternallyStored() const noexcept
See setExternalStorage().
Definition: img/CImage.h:782
const TCalibrationStereoImageList & images
Data associated to each observation in the Lev-Marq.
double check_squares_length_X_meters
The size of each square in the checkerboard, in meters, in the "X" & Y" axes.
void saveToTextFile(const std::string &file, mrpt::math::TMatrixTextFileFormat fileFormat=mrpt::math::MATRIX_FORMAT_ENG, bool appendMRPTHeader=false, const std::string &userHeader=std::string()) const
Saves the vector/matrix to a file compatible with MATLAB/Octave text format.
void getSize(TImageSize &s) const
Return the size of the image.
Definition: CImage.cpp:807
mrpt::img::TStereoCamera cam_params
Recovered parameters of the stereo camera.
double k2() const
Get the value of the k2 distortion parameter.
Definition: TCamera.h:189
CImage colorImage() const
Returns a color (RGB) version of the grayscale image, or a shallow copy of itself if it is already a ...
Definition: CImage.cpp:1861
#define ASSERT_(f)
Defines an assertion mechanism.
Definition: exceptions.h:120
void skew_symmetric3_neg(const VECTOR &v, MATRIX &M)
Computes the negative version of a 3x3 skew symmetric matrix from a 3-vector or 3-array: ...
Definition: geometry.h:836
mrpt::img::CImage img_checkboard
At output, this will contain the detected checkerboard overprinted to the image.
Data associated to each image in the calibration process mrpt::vision::checkerBoardCameraCalibration ...
This base provides a set of functions for maths stuff.
size_t getWidth() const override
Returns the width of the image in pixels.
Definition: CImage.cpp:818
T r() const
Return r (real part) coordinate of the quaternion.
Definition: CQuaternion.h:101
double cy() const
Get the value of the principal point y-coordinate (in pixels).
Definition: TCamera.h:173
Input parameters for mrpt::vision::checkerBoardStereoCalibration.
Params of the optional callback provided by the user.
#define ASSERT_EQUAL_(__A, __B)
Assert comparing two values, reporting their actual values upon failure.
Definition: exceptions.h:137
void composePose(const TPose3D other, TPose3D &result) const
Definition: TPose3D.cpp:209
auto block(int start_row, int start_col)
non-const block(): Returns an Eigen::Block reference to the block
void add_lm_increment(const mrpt::math::CVectorDynamic< double > &eps, const std::vector< size_t > &var_indxs, lm_stat_t &new_lm_stat)
double x
Translation in x,y,z.
Definition: TPose3DQuat.h:27
bool findChessboardCorners(const mrpt::img::CImage &img, std::vector< mrpt::img::TPixelCoordf > &cornerCoords, unsigned int check_size_x, unsigned int check_size_y, bool normalize_image=true, bool useScaramuzzaMethod=false)
Look for the corners of a chessboard in the image using one of two different methods.
A pair (x,y) of pixel coordinates (integer resolution).
Definition: TPixelCoord.h:40
const double eps
void composePoint(const TPoint3D &l, TPoint3D &g) const
Definition: TPose3D.cpp:80
std::vector< mrpt::img::TPixelCoordf > detected_corners
At output, the detected corners (x,y) in pixel units.
Classes for computer vision, detectors, features, etc.
Definition: CDifodo.h:17
Parameters for the Brown-Conrady camera lens distortion model.
Definition: TCamera.h:26
const std::vector< mrpt::math::TPoint3D > & obj_points
std::vector< mrpt::aligned_std_vector< TResidJacobElement > > TResidualJacobianList
double x() const
Common members of all points & poses classes.
Definition: CPoseOrPoint.h:143
A class used to store a 3D pose as a translation (x,y,z) and a quaternion (qr,qx,qy,qz).
Definition: CPose3DQuat.h:45
double qr
Unit quaternion part, qr,qx,qy,qz.
Definition: TPose3DQuat.h:29
size_type cols() const
Number of columns in the matrix.
double p1() const
Get the value of the p1 distortion parameter.
Definition: TCamera.h:191
Classes for 2D/3D geometry representation, both of single values and probability density distribution...
void estimateJacobian(const VECTORLIKE &x, const std::function< void(const VECTORLIKE &x, const USERPARAM &y, VECTORLIKE3 &out)> &functor, const VECTORLIKE2 &increments, const USERPARAM &userParam, MATRIXLIKE &out_Jacobian)
Estimate the Jacobian of a multi-dimensional function around a point "x", using finite differences of...
Definition: num_jacobian.h:31
double recompute_errors_and_Jacobians(const lm_stat_t &lm_stat, TResidualJacobianList &res_jac, bool use_robust_kernel, double kernel_param)
const std::vector< size_t > & valid_image_pair_indices
static void jacob_dh_db_and_dh_dc(const TPoint3D &nP, const mrpt::math::CVectorFixedDouble< 9 > &c, mrpt::math::CMatrixFixed< double, 2, 2 > &Hb, mrpt::math::CMatrixFixed< double, 2, 9 > &Hc)
std::vector< mrpt::math::TPose3D > left_cam_poses
void projectPoints_with_distortion(const std::vector< mrpt::math::TPoint3D > &in_points_3D, const mrpt::poses::CPose3D &cameraPose, const mrpt::math::CMatrixDouble33 &intrinsicParams, const std::vector< double > &distortionParams, std::vector< mrpt::img::TPixelCoordf > &projectedPoints, bool accept_points_behind=false)
Project a set of 3D points into a camera at an arbitrary 6D pose using its calibration matrix and dis...
Definition: pinhole.cpp:51
void pause(const std::string &msg=std::string("Press any key to continue...")) noexcept
Shows the message "Press any key to continue" (or other custom message) to the current standard outpu...
Definition: os.cpp:430
Traits for SE(n), rigid-body transformations in R^n space.
Definition: SE.h:30
return_t square(const num_t x)
Inline function for the square of a number.
Eigen::Matrix< double, 4, 1 > residual
= predicted_obs - observations
Eigen::Matrix< double, 4, 30 > J
Jacobian.
double cx() const
Get the value of the principal point x-coordinate (in pixels).
Definition: TCamera.h:171
unsigned int check_size_x
The number of squares in the checkerboard in the "X" & "Y" direction.
T x
X,Y,Z coordinates.
Definition: TPoint3D.h:29
bool checkerBoardStereoCalibration(TCalibrationStereoImageList &images, const TStereoCalibParams &params, TStereoCalibResults &out_results)
Optimize the calibration parameters of a stereo camera or a RGB+D (Kinect) camera.
This is the global namespace for all Mobile Robot Programming Toolkit (MRPT) libraries.
double current_rmse
Current root-mean square reprojection error (in pixels)
T x() const
Return x coordinate of the quaternion.
Definition: CQuaternion.h:105
mrpt::math::CVectorFixedDouble< 9 > right_cam_params
unsigned int nImgsProcessed
Info for calibRound==-1.
A class used to store a 3D pose (a 3D translation + a rotation in 3D).
Definition: CPose3D.h:85
mrpt::vision::TStereoCalibResults out
std::array< double, 9 > left_params_inv_variance
The inverse variance (information/precision) of each of the 9 left/right camera parameters [fx fy cx ...
static void jacob_deps_D_p_deps(const TPoint3D &p_D, mrpt::math::CMatrixFixed< double, 3, 6 > &dpl_del)
TPixelCoord TImageSize
A type for image sizes.
Definition: TPixelCoord.h:58
Lightweight 3D pose (three spatial coordinates, plus three angular coordinates).
Definition: TPose3D.h:24
TCamera leftCamera
Intrinsic and distortion parameters of the left and right cameras.
Definition: TStereoCamera.h:28
EIGEN_MAP asEigen()
Get as an Eigen-compatible Eigen::Map object.
Definition: CMatrixFixed.h:251
mrpt::poses::CPose3D right2left_camera_pose
The pose of the left camera as seen from the right camera.
mrpt::math::TPose3DQuat rightCameraPose
Pose of the right camera with respect to the coordinate origin of the left camera.
Definition: TStereoCamera.h:31
void composePoint(double lx, double ly, double lz, double &gx, double &gy, double &gz, mrpt::optional_ref< mrpt::math::CMatrixDouble33 > out_jacobian_df_dpoint=std::nullopt, mrpt::optional_ref< mrpt::math::CMatrixDouble36 > out_jacobian_df_dpose=std::nullopt, mrpt::optional_ref< mrpt::math::CMatrixDouble36 > out_jacobian_df_dse3=std::nullopt, bool use_small_rot_approx=false) const
An alternative, slightly more efficient way of doing with G and L being 3D points and P this 6D pose...
double p2() const
Get the value of the p2 distortion parameter.
Definition: TCamera.h:193
bool use_robust_kernel
Employ a Pseudo-Huber robustifier kernel (Default: false)
double k1() const
Get the value of the k1 distortion parameter.
Definition: TCamera.h:187
EIGEN_MAP asEigen()
Get as an Eigen-compatible Eigen::Map object.
bool verbose
Show progress messages to std::cout console (default=true)
double robust_kernel_param
The parameter of the robust kernel, in pixels (only if use_robust_kernel=true) (Default=10) ...
A quaternion, which can represent a 3D rotation as pair , with a real part "r" and a 3D vector ...
Definition: CQuaternion.h:44
Output results for mrpt::vision::checkerBoardStereoCalibration.
void getRotationMatrix(mrpt::math::CMatrixDouble33 &ROT) const
Get the 3x3 rotation matrix.
Definition: CPose3D.h:225
T z() const
Return z coordinate of the quaternion.
Definition: CQuaternion.h:109
EIGEN_MAP asEigen()
Get as an Eigen-compatible Eigen::Map object.
mrpt::poses::CPose3D reconstructed_camera_pose
At output, the reconstructed pose of the camera.
int calibRound
=-1:Processing images; =0: Initial calib without distortion, =1: Calib of all parameters ...
static void jacob_dA_eps_D_p_deps(const CPose3D &A, const CPose3D &D, const TPoint3D &p, mrpt::math::CMatrixFixed< double, 3, 6 > &dp_deps)
std::vector< mrpt::img::TPixelCoordf > projectedPoints_distorted
At output, only will have an empty vector if the checkerboard was not found in this image...
uint32_t ncols
Camera resolution.
Definition: TCamera.h:40
mrpt::img::CImage img_original
This image will be automatically loaded from the file name passed to checkerBoardCameraCalibration.
Structure to hold the parameters of a pinhole stereo camera model.
Definition: TStereoCamera.h:23
std::vector< TImageStereoCalibData > TCalibrationStereoImageList
A list of images, used in checkerBoardStereoCalibration.
mrpt::math::CVectorFixedDouble< 9 > left_cam_params
[fx fy cx cy k1 k2 k3 t1 t2]
mrpt::img::CImage img_rectified
At output, this will be the rectified image.
A class for storing images as grayscale or RGB bitmaps.
Definition: img/CImage.h:148
void build_linear_system(const TResidualJacobianList &res_jac, const std::vector< size_t > &var_indxs, mrpt::math::CVectorDynamic< double > &minus_g, mrpt::math::CMatrixDouble &H)



Page generated by Doxygen 1.8.14 for MRPT 2.0.0 Git: b38439d21 Tue Mar 31 19:58:06 2020 +0200 at miƩ abr 1 00:50:30 CEST 2020