crocoddyl  1.9.0
Contact RObot COntrol by Differential DYnamic programming Library (Crocoddyl)
box-fddp.cpp
1 // BSD 3-Clause License
3 //
4 // Copyright (C) 2019-2021, University of Edinburgh
5 // Copyright note valid unless otherwise stated in individual files.
6 // All rights reserved.
8 
9 #include <iostream>
10 
11 #include "crocoddyl/core/solvers/box-fddp.hpp"
12 #include "crocoddyl/core/utils/exception.hpp"
13 
14 namespace crocoddyl {
15 
16 SolverBoxFDDP::SolverBoxFDDP(boost::shared_ptr<ShootingProblem> problem)
17  : SolverFDDP(problem), qp_(problem->get_runningModels()[0]->get_nu(), 100, 0.1, 1e-5, 0.) {
18  allocateData();
19 
20  const std::size_t n_alphas = 10;
21  alphas_.resize(n_alphas);
22  for (std::size_t n = 0; n < n_alphas; ++n) {
23  alphas_[n] = 1. / pow(2., static_cast<double>(n));
24  }
25  // Change the default convergence tolerance since the gradient of the Lagrangian is smaller
26  // than an unconstrained OC problem (i.e. gradient = Qu - mu^T * C where mu > 0 and C defines
27  // the inequality matrix that bounds the control); and we don't have access to mu from the
28  // box QP.
29  th_stop_ = 5e-5;
30 }
31 
32 SolverBoxFDDP::~SolverBoxFDDP() {}
33 
35  START_PROFILER("SolverBoxFDDP::resizeData");
37 
38  const std::size_t T = problem_->get_T();
39  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
40  for (std::size_t t = 0; t < T; ++t) {
41  const boost::shared_ptr<ActionModelAbstract>& model = models[t];
42  const std::size_t nu = model->get_nu();
43  Quu_inv_[t].conservativeResize(nu, nu);
44  du_lb_[t].conservativeResize(nu);
45  du_ub_[t].conservativeResize(nu);
46  }
47  STOP_PROFILER("SolverBoxFDDP::resizeData");
48 }
49 
52 
53  const std::size_t T = problem_->get_T();
54  Quu_inv_.resize(T);
55  du_lb_.resize(T);
56  du_ub_.resize(T);
57  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
58  for (std::size_t t = 0; t < T; ++t) {
59  const boost::shared_ptr<ActionModelAbstract>& model = models[t];
60  const std::size_t nu = model->get_nu();
61  Quu_inv_[t] = Eigen::MatrixXd::Zero(nu, nu);
62  du_lb_[t] = Eigen::VectorXd::Zero(nu);
63  du_ub_[t] = Eigen::VectorXd::Zero(nu);
64  }
65 }
66 
67 void SolverBoxFDDP::computeGains(const std::size_t t) {
68  const std::size_t nu = problem_->get_runningModels()[t]->get_nu();
69  if (nu > 0) {
70  if (!problem_->get_runningModels()[t]->get_has_control_limits() || !is_feasible_) {
71  // No control limits on this model: Use vanilla DDP
73  return;
74  }
75 
76  du_lb_[t] = problem_->get_runningModels()[t]->get_u_lb() - us_[t];
77  du_ub_[t] = problem_->get_runningModels()[t]->get_u_ub() - us_[t];
78 
79  const BoxQPSolution& boxqp_sol = qp_.solve(Quu_[t], Qu_[t], du_lb_[t], du_ub_[t], k_[t]);
80 
81  // Compute controls
82  Quu_inv_[t].setZero();
83  for (std::size_t i = 0; i < boxqp_sol.free_idx.size(); ++i) {
84  for (std::size_t j = 0; j < boxqp_sol.free_idx.size(); ++j) {
85  Quu_inv_[t](boxqp_sol.free_idx[i], boxqp_sol.free_idx[j]) = boxqp_sol.Hff_inv(i, j);
86  }
87  }
88  K_[t].noalias() = Quu_inv_[t] * Qxu_[t].transpose();
89  k_[t] = -boxqp_sol.x;
90 
91  // The box-QP clamped the gradient direction; this is important for accounting
92  // the algorithm advancement (i.e. stopping criteria)
93  for (std::size_t i = 0; i < boxqp_sol.clamped_idx.size(); ++i) {
94  Qu_[t](boxqp_sol.clamped_idx[i]) = 0.;
95  }
96  }
97 }
98 
99 void SolverBoxFDDP::forwardPass(const double steplength) {
100  if (steplength > 1. || steplength < 0.) {
101  throw_pretty("Invalid argument: "
102  << "invalid step length, value is between 0. to 1.");
103  }
104  cost_try_ = 0.;
105  xnext_ = problem_->get_x0();
106  const std::size_t T = problem_->get_T();
107  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
108  const std::vector<boost::shared_ptr<ActionDataAbstract> >& datas = problem_->get_runningDatas();
109  if ((is_feasible_) || (steplength == 1)) {
110  for (std::size_t t = 0; t < T; ++t) {
111  const boost::shared_ptr<ActionModelAbstract>& m = models[t];
112  const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
113  const std::size_t nu = m->get_nu();
114 
115  xs_try_[t] = xnext_;
116  m->get_state()->diff(xs_[t], xs_try_[t], dx_[t]);
117  if (nu != 0) {
118  us_try_[t].noalias() = us_[t] - k_[t] * steplength - K_[t] * dx_[t];
119  if (m->get_has_control_limits()) { // clamp control
120  us_try_[t] = us_try_[t].cwiseMax(m->get_u_lb()).cwiseMin(m->get_u_ub());
121  }
122  m->calc(d, xs_try_[t], us_try_[t]);
123  } else {
124  m->calc(d, xs_try_[t]);
125  }
126  xnext_ = d->xnext;
127  cost_try_ += d->cost;
128 
129  if (raiseIfNaN(cost_try_)) {
130  throw_pretty("forward_error");
131  }
132  if (raiseIfNaN(xnext_.lpNorm<Eigen::Infinity>())) {
133  throw_pretty("forward_error");
134  }
135  }
136 
137  const boost::shared_ptr<ActionModelAbstract>& m = problem_->get_terminalModel();
138  const boost::shared_ptr<ActionDataAbstract>& d = problem_->get_terminalData();
139  xs_try_.back() = xnext_;
140  m->calc(d, xs_try_.back());
141  cost_try_ += d->cost;
142 
143  if (raiseIfNaN(cost_try_)) {
144  throw_pretty("forward_error");
145  }
146  } else {
147  for (std::size_t t = 0; t < T; ++t) {
148  const boost::shared_ptr<ActionModelAbstract>& m = models[t];
149  const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
150  const std::size_t nu = m->get_nu();
151  m->get_state()->integrate(xnext_, fs_[t] * (steplength - 1), xs_try_[t]);
152  m->get_state()->diff(xs_[t], xs_try_[t], dx_[t]);
153  if (nu != 0) {
154  us_try_[t].noalias() = us_[t] - k_[t] * steplength - K_[t] * dx_[t];
155  if (m->get_has_control_limits()) { // clamp control
156  us_try_[t] = us_try_[t].cwiseMax(m->get_u_lb()).cwiseMin(m->get_u_ub());
157  }
158  m->calc(d, xs_try_[t], us_try_[t]);
159  } else {
160  m->calc(d, xs_try_[t]);
161  }
162  xnext_ = d->xnext;
163  cost_try_ += d->cost;
164 
165  if (raiseIfNaN(cost_try_)) {
166  throw_pretty("forward_error");
167  }
168  if (raiseIfNaN(xnext_.lpNorm<Eigen::Infinity>())) {
169  throw_pretty("forward_error");
170  }
171  }
172 
173  const boost::shared_ptr<ActionModelAbstract>& m = problem_->get_terminalModel();
174  const boost::shared_ptr<ActionDataAbstract>& d = problem_->get_terminalData();
175  m->get_state()->integrate(xnext_, fs_.back() * (steplength - 1), xs_try_.back());
176  m->calc(d, xs_try_.back());
177  cost_try_ += d->cost;
178 
179  if (raiseIfNaN(cost_try_)) {
180  throw_pretty("forward_error");
181  }
182  }
183 }
184 
185 const std::vector<Eigen::MatrixXd>& SolverBoxFDDP::get_Quu_inv() const { return Quu_inv_; }
186 
187 } // namespace crocoddyl
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: box-ddp.cpp:50
bool is_feasible_
Label that indicates is the iteration is feasible.
virtual void forwardPass(const double steplength)
Run the forward pass or rollout.
Definition: box-fddp.cpp:99
virtual void computeGains(const std::size_t t)
Compute the feedforward and feedback terms using a Cholesky decomposition.
Definition: box-fddp.cpp:67
Box QP solution.
Definition: box-qp.hpp:28
std::vector< MatrixXdRowMajor > K_
Feedback gains .
Definition: ddp.hpp:316
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: ddp.cpp:376
std::vector< Eigen::VectorXd > xs_try_
State trajectory computed by line-search procedure.
Definition: ddp.hpp:303
virtual void resizeData()
Resizing the solver data.
Definition: ddp.cpp:172
const BoxQPSolution & solve(const Eigen::MatrixXd &H, const Eigen::VectorXd &q, const Eigen::VectorXd &lb, const Eigen::VectorXd &ub, const Eigen::VectorXd &xinit)
Compute the solution of bound-constrained QP based on Newton projection.
Definition: box-qp.cpp:58
std::vector< Eigen::VectorXd > us_
Control trajectory.
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: box-fddp.cpp:50
std::vector< Eigen::VectorXd > k_
Feed-forward terms .
Definition: ddp.hpp:317
virtual void resizeData()
Resizing the solver data.
Definition: box-fddp.cpp:34
Eigen::MatrixXd Hff_inv
Inverse of the free space Hessian.
Definition: box-qp.hpp:48
Eigen::VectorXd xnext_
Next state .
Definition: ddp.hpp:319
boost::shared_ptr< ShootingProblem > problem_
optimal control problem
Eigen::VectorXd x
Decision vector.
Definition: box-qp.hpp:49
std::vector< Eigen::VectorXd > xs_
State trajectory.
std::vector< Eigen::VectorXd > dx_
State error during the roll-out/forward-pass (size T)
Definition: ddp.hpp:305
std::vector< Eigen::VectorXd > fs_
Gaps/defects between shooting nodes.
virtual void computeGains(const std::size_t t)
Compute the feedforward and feedback terms using a Cholesky decomposition.
Definition: ddp.cpp:337
std::vector< double > alphas_
Set of step lengths using by the line-search procedure.
Definition: ddp.hpp:326
double th_stop_
Tolerance for stopping the algorithm.
std::vector< size_t > clamped_idx
Clamped space indexes.
Definition: box-qp.hpp:51
std::vector< Eigen::MatrixXd > Qxu_
Hessian of the Hamiltonian .
Definition: ddp.hpp:312
std::vector< size_t > free_idx
Free space indexes.
Definition: box-qp.hpp:50
std::vector< Eigen::VectorXd > us_try_
Control trajectory computed by line-search procedure.
Definition: ddp.hpp:304
std::vector< Eigen::MatrixXd > Quu_
Hessian of the Hamiltonian .
Definition: ddp.hpp:313
std::vector< Eigen::VectorXd > Qu_
Gradient of the Hamiltonian .
Definition: ddp.hpp:315
double cost_try_
Total cost computed by line-search procedure.
Definition: ddp.hpp:302