77 class GRAPH_T,
class FEEDBACK_CALLABLE =
81 const std::set<mrpt::graphs::TNodeID>* in_nodes_to_optimize =
nullptr,
84 FEEDBACK_CALLABLE functor_feedback = FEEDBACK_CALLABLE())
99 constexpr
auto DIMS_POSE = gst::SE_TYPE::DOFs;
102 const bool verbose = 0 != extra_params.getWithDefaultVal(
"verbose", 0);
103 const size_t max_iters =
104 extra_params.getWithDefaultVal(
"max_iterations", 100);
105 const bool enable_profiler =
106 0 != extra_params.getWithDefaultVal(
"profiler", 0);
108 const double initial_lambda = extra_params.getWithDefaultVal(
109 "initial_lambda", 0);
110 const double tau = extra_params.getWithDefaultVal(
"tau", 1e-3);
111 const double e1 = extra_params.getWithDefaultVal(
"e1", 1e-6);
112 const double e2 = extra_params.getWithDefaultVal(
"e2", 1e-6);
115 profiler.
enter(
"optimize_graph_spa_levmarq (entire)");
119 profiler.
enter(
"optimize_graph_spa_levmarq.list_IDs");
120 const set<TNodeID>* nodes_to_optimize;
121 set<TNodeID> nodes_to_optimize_auxlist;
123 if (in_nodes_to_optimize)
125 nodes_to_optimize = in_nodes_to_optimize;
130 for (
const auto& n : graph.nodes)
131 if (n.first != graph.root)
132 nodes_to_optimize_auxlist.insert(
133 nodes_to_optimize_auxlist.end(),
136 nodes_to_optimize = &nodes_to_optimize_auxlist;
138 profiler.
leave(
"optimize_graph_spa_levmarq.list_IDs");
141 const size_t nFreeNodes = nodes_to_optimize->size();
146 <<
" nodes to optimize: ";
149 ostream_iterator<TNodeID> out_it(cout,
", ");
151 nodes_to_optimize->begin(), nodes_to_optimize->end(), out_it);
159 using observation_info_t =
typename gst::observation_info_t;
160 vector<observation_info_t> lstObservationData;
165 for (
const auto& e : graph.edges)
167 const auto& ids = e.first;
168 const auto& edge = e.second;
170 if (nodes_to_optimize->find(ids.first) == nodes_to_optimize->end() &&
171 nodes_to_optimize->find(ids.second) == nodes_to_optimize->end())
175 auto itP1 = graph.nodes.find(ids.first);
176 auto itP2 = graph.nodes.find(ids.second);
177 ASSERTMSG_(itP1 != graph.nodes.end(),
"Edge node1 has no global pose");
178 ASSERTMSG_(itP2 != graph.nodes.end(),
"Edge node2 has no global pose");
180 const auto& EDGE_POSE = edge.getPoseMean();
183 typename gst::observation_info_t new_entry;
185 new_entry.edge_mean = &EDGE_POSE;
186 new_entry.P1 = &itP1->second;
187 new_entry.P2 = &itP2->second;
189 lstObservationData.push_back(new_entry);
194 const size_t nObservations = lstObservationData.size();
198 using SparseCholeskyDecompPtr =
199 std::unique_ptr<CSparseMatrix::CholeskyDecomp>;
200 SparseCholeskyDecompPtr ptrCh;
206 typename gst::map_pairIDs_pairJacobs_t lstJacobians;
211 std::vector<typename gst::Array_O> errs;
216 profiler.
enter(
"optimize_graph_spa_levmarq.Jacobians&err");
217 double total_sqr_err = computeJacobiansAndErrors<GRAPH_T>(
218 graph, lstObservationData, lstJacobians, errs);
219 profiler.
leave(
"optimize_graph_spa_levmarq.Jacobians&err");
226 vector<pair<size_t, size_t>> obsIdx2fnIdx;
229 obsIdx2fnIdx.reserve(nObservations);
230 ASSERTDEB_(lstJacobians.size() == nObservations);
231 for (
auto itJ = lstJacobians.begin(); itJ != lstJacobians.end(); ++itJ)
233 const TNodeID id1 = itJ->first.first;
234 const TNodeID id2 = itJ->first.second;
235 obsIdx2fnIdx.emplace_back(
243 using map_ID2matrix_TxT_t = std::map<TNodeID, typename gst::matrix_TxT>;
245 double lambda = initial_lambda;
247 bool have_to_recompute_H_and_grad =
true;
254 size_t last_iter = 0;
256 for (
size_t iter = 0; iter < max_iters; ++iter)
258 vector<map_ID2matrix_TxT_t> H_map(nFreeNodes);
263 if (have_to_recompute_H_and_grad)
265 have_to_recompute_H_and_grad =
false;
274 profiler.
enter(
"optimize_graph_spa_levmarq.grad");
283 typename gst::map_pairIDs_pairJacobs_t::const_iterator itJ;
285 for (idx_obs = 0, itJ = lstJacobians.begin();
286 itJ != lstJacobians.end(); ++itJ, ++idx_obs)
290 itJ->first == lstObservationData[idx_obs].edge->first);
298 const size_t idx1 = obsIdx2fnIdx[idx_obs].first;
299 const size_t idx2 = obsIdx2fnIdx[idx_obs].second;
301 if (idx1 != string::npos)
303 typename gst::Array_O grad_idx1;
307 lstObservationData[idx_obs].edge ,
308 errs[idx_obs] , grad_idx1
310 for (
unsigned int i = 0; i < DIMS_POSE; i++)
311 grad[DIMS_POSE * idx1 + i] += grad_idx1[i];
314 if (idx2 != string::npos)
316 typename gst::Array_O grad_idx2;
320 lstObservationData[idx_obs].edge ,
321 errs[idx_obs] , grad_idx2
323 for (
unsigned int i = 0; i < DIMS_POSE; i++)
324 grad[DIMS_POSE * idx2 + i] += grad_idx2[i];
328 profiler.
leave(
"optimize_graph_spa_levmarq.grad");
333 if (grad_norm_inf <= e1)
339 "End condition #1: math::norm_inf(g)<=e1 " 345 profiler.
enter(
"optimize_graph_spa_levmarq.sp_H:build map");
359 typename gst::map_pairIDs_pairJacobs_t::const_iterator
362 for (idxObs = 0, itJacobPair = lstJacobians.begin();
363 idxObs < nObservations; ++itJacobPair, ++idxObs)
365 const bool Hij_upper_triang =
366 itJacobPair->first.first < itJacobPair->first.second;
369 const size_t idx_i = obsIdx2fnIdx[idxObs].first;
370 const size_t idx_j = obsIdx2fnIdx[idxObs].second;
372 const bool is_i_free_node = idx_i != string::npos;
373 const bool is_j_free_node = idx_j != string::npos;
378 const typename gst::matrix_TxT& J1 =
379 itJacobPair->second.first;
380 const typename gst::matrix_TxT& J2 =
381 itJacobPair->second.second;
386 typename gst::matrix_TxT JtJ(
390 J1, JtJ, lstObservationData[idxObs].edge);
391 H_map[idx_i][idx_i] += JtJ;
396 typename gst::matrix_TxT JtJ(
400 J2, JtJ, lstObservationData[idxObs].edge);
401 H_map[idx_j][idx_j] += JtJ;
404 if (is_i_free_node && is_j_free_node)
406 typename gst::matrix_TxT JtJ(
410 J1, J2, JtJ, lstObservationData[idxObs].edge);
413 if (Hij_upper_triang)
414 H_map[idx_j][idx_i] += JtJ;
416 H_map[idx_i][idx_j].sum_At(JtJ);
420 profiler.
leave(
"optimize_graph_spa_levmarq.sp_H:build map");
424 if (lambda <= 0 && iter == 0)
427 "optimize_graph_spa_levmarq.lambda_init");
428 double H_diagonal_max = 0;
429 for (
size_t i = 0; i < nFreeNodes; i++)
430 for (
auto it = H_map[i].
begin(); it != H_map[i].end(); ++it)
436 for (
size_t k = 0; k < DIMS_POSE; k++)
438 H_diagonal_max, it->second(k, k));
441 lambda = tau * H_diagonal_max;
444 "optimize_graph_spa_levmarq.lambda_init");
457 <<
" ,total sqr. err: " << total_sqr_err
458 <<
", avrg. err per edge: " 459 << std::sqrt(total_sqr_err / nObservations)
460 <<
" lambda: " << lambda << endl;
463 if (functor_feedback)
465 functor_feedback(graph, iter, max_iters, total_sqr_err);
468 profiler.
enter(
"optimize_graph_spa_levmarq.sp_H:build");
472 CSparseMatrix sp_H(nFreeNodes * DIMS_POSE, nFreeNodes * DIMS_POSE);
473 for (
size_t i = 0; i < nFreeNodes; i++)
475 const size_t i_offset = i * DIMS_POSE;
477 for (
auto it = H_map[i].
begin(); it != H_map[i].end(); ++it)
479 const size_t j = it->first;
480 const size_t j_offset = j * DIMS_POSE;
488 for (
size_t r = 0; r < DIMS_POSE; r++)
492 j_offset + r, i_offset + r,
493 it->second(r, r) + lambda);
495 for (
size_t c = r + 1; c < DIMS_POSE; c++)
498 j_offset + r, i_offset + c, it->second(r, c));
510 profiler.
leave(
"optimize_graph_spa_levmarq.sp_H:build");
520 profiler.
enter(
"optimize_graph_spa_levmarq.sp_H:chol");
522 ptrCh = std::make_unique<CSparseMatrix::CholeskyDecomp>(sp_H);
524 ptrCh.get()->update(sp_H);
525 profiler.
leave(
"optimize_graph_spa_levmarq.sp_H:chol");
527 profiler.
enter(
"optimize_graph_spa_levmarq.sp_H:backsub");
528 ptrCh->backsub(grad, delta);
529 profiler.
leave(
"optimize_graph_spa_levmarq.sp_H:backsub");
536 <<
"] Got non-definite positive matrix, retrying with a " 537 "larger lambda...\n";
548 profiler.
enter(
"optimize_graph_spa_levmarq.delta_norm");
550 profiler.
leave(
"optimize_graph_spa_levmarq.delta_norm");
553 profiler.
enter(
"optimize_graph_spa_levmarq.x_norm");
556 for (
unsigned long it : *nodes_to_optimize)
558 auto itP = graph.nodes.find(it);
559 const typename gst::graph_t::constraint_t::type_value& P =
561 for (
size_t i = 0; i < DIMS_POSE; i++) x_norm +=
square(P[i]);
563 x_norm = std::sqrt(x_norm);
565 profiler.
leave(
"optimize_graph_spa_levmarq.x_norm");
568 const double thres_norm = e2 * (x_norm + e2);
569 if (delta_norm < thres_norm)
575 "End condition #2: %e < %e\n", delta_norm,
587 typename gst::graph_t::global_poses_t old_poses_backup;
591 delta.
size() == int(nodes_to_optimize->size() * DIMS_POSE));
592 const double* delta_ptr = &delta[0];
593 for (
unsigned long it : *nodes_to_optimize)
595 typename gst::Array_O exp_delta;
596 for (
size_t i = 0; i < DIMS_POSE; i++)
597 exp_delta[i] = -*delta_ptr++;
602 auto it_old_value = graph.nodes.find(it);
603 old_poses_backup[it] =
604 it_old_value->second;
607 it_old_value->second =
608 it_old_value->second + gst::SE_TYPE::exp(exp_delta);
615 typename gst::map_pairIDs_pairJacobs_t new_lstJacobians;
616 std::vector<typename gst::Array_O> new_errs;
618 profiler.
enter(
"optimize_graph_spa_levmarq.Jacobians&err");
619 double new_total_sqr_err = computeJacobiansAndErrors<GRAPH_T>(
620 graph, lstObservationData, new_lstJacobians, new_errs);
621 profiler.
leave(
"optimize_graph_spa_levmarq.Jacobians&err");
624 if (new_total_sqr_err < total_sqr_err)
627 new_lstJacobians.swap(lstJacobians);
629 std::swap(new_total_sqr_err, total_sqr_err);
632 have_to_recompute_H_and_grad =
true;
638 for (
auto it = old_poses_backup.begin();
639 it != old_poses_backup.end(); ++it)
640 graph.nodes[it->first] = it->second;
644 <<
"] Got larger error=" << new_total_sqr_err
645 <<
", retrying with a larger lambda...\n";
655 profiler.
leave(
"optimize_graph_spa_levmarq (entire)");
void optimize_graph_spa_levmarq(GRAPH_T &graph, TResultInfoSpaLevMarq &out_info, const std::set< mrpt::graphs::TNodeID > *in_nodes_to_optimize=nullptr, const mrpt::system::TParametersDouble &extra_params=mrpt::system::TParametersDouble(), FEEDBACK_CALLABLE functor_feedback=FEEDBACK_CALLABLE())
Optimize a graph of pose constraints using the Sparse Pose Adjustment (SPA) sparse representation and...
std::string std::string format(std::string_view fmt, ARGS &&... args)
Used in mrpt::math::CSparseMatrix.
void insert_entry_fast(const size_t row, const size_t col, const double val)
This was an optimized version, but is now equivalent to insert_entry() due to the need to be compatib...
size_type size() const
Get a 2-vector with [NROWS NCOLS] (as in MATLAB command size(x))
size_t num_iters
The number of LM iterations executed.
A sparse matrix structure, wrapping T.
void compressFromTriplet()
ONLY for TRIPLET matrices: convert the matrix in a column-compressed form.
size_t find_in_vector(const T &value, const CONTAINER &vect)
Returns the index of the value "T" in the container "vect" (std::vector,std::deque,etc), or string::npos if not found.
SLAM methods related to graphs of pose constraints.
This base provides a set of functions for maths stuff.
#define ASSERTDEB_ABOVE_(__A, __B)
#define ASSERTDEB_EQUAL_(__A, __B)
#define ASSERTMSG_(f, __ERROR_MSG)
Defines an assertion mechanism.
Classes for 2D/3D geometry representation, both of single values and probability density distribution...
void keep_max(T &var, const K test_val)
If the second argument is above the first one, set the first argument to this higher value...
Auxiliary traits template for use among graph-slam problems to make life easier with these complicate...
CONTAINER::Scalar norm_inf(const CONTAINER &v)
return_t square(const num_t x)
Inline function for the square of a number.
void enter(const std::string_view &func_name) noexcept
Start of a named section.
This is the global namespace for all Mobile Robot Programming Toolkit (MRPT) libraries.
const_iterator begin() const
A versatile "profiler" that logs the time spent within each pair of calls to enter(X)-leave(X), among other stats.
std::function< void(const GRAPH_T &graph, const size_t iter, const size_t max_iter, const double cur_sq_error)> TFunctorFeedback
#define ASSERTDEB_(f)
Defines an assertion mechanism - only when compiled in debug.
void insert_submatrix(const size_t row, const size_t col, const MATRIX &M)
ONLY for TRIPLET matrices: insert a given matrix (in any of the MRPT formats) at a given location of ...
Output information for mrpt::graphslam::optimize_graph_spa_levmarq()
double leave(const std::string_view &func_name) noexcept
End of a named section.
uint64_t TNodeID
A generic numeric type for unique IDs of nodes or entities.
#define __CURRENT_FUNCTION_NAME__
A macro for obtaining the name of the current function:
double final_total_sq_error
The sum of all the squared errors for every constraint involved in the problem.
CONTAINER::Scalar norm(const CONTAINER &v)