crocoddyl  1.6.0
Contact RObot COntrol by Differential DYnamic programming Library (Crocoddyl)
box-fddp.cpp
1 // BSD 3-Clause License
3 //
4 // Copyright (C) 2019-2020, University of Edinburgh
5 // Copyright note valid unless otherwise stated in individual files.
6 // All rights reserved.
8 
9 #include <iostream>
10 #include "crocoddyl/core/utils/exception.hpp"
11 #include "crocoddyl/core/solvers/box-fddp.hpp"
12 
13 namespace crocoddyl {
14 
15 SolverBoxFDDP::SolverBoxFDDP(boost::shared_ptr<ShootingProblem> problem)
16  : SolverFDDP(problem), qp_(problem->get_runningModels()[0]->get_nu(), 100, 0.1, 1e-5, 0.) {
17  allocateData();
18 
19  const std::size_t& n_alphas = 10;
20  alphas_.resize(n_alphas);
21  for (std::size_t n = 0; n < n_alphas; ++n) {
22  alphas_[n] = 1. / pow(2., static_cast<double>(n));
23  }
24  // Change the default convergence tolerance since the gradient of the Lagrangian is smaller
25  // than an unconstrained OC problem (i.e. gradient = Qu - mu^T * C where mu > 0 and C defines
26  // the inequality matrix that bounds the control); and we don't have access to mu from the
27  // box QP.
28  th_stop_ = 5e-5;
29 }
30 
31 SolverBoxFDDP::~SolverBoxFDDP() {}
32 
35 
36  const std::size_t& T = problem_->get_T();
37  Quu_inv_.resize(T);
38  const std::size_t& nu = problem_->get_nu_max();
39  for (std::size_t t = 0; t < T; ++t) {
40  Quu_inv_[t] = Eigen::MatrixXd::Zero(nu, nu);
41  }
42  du_lb_.resize(nu);
43  du_ub_.resize(nu);
44 }
45 
46 void SolverBoxFDDP::computeGains(const std::size_t& t) {
47  const std::size_t& nu = problem_->get_runningModels()[t]->get_nu();
48  if (nu > 0) {
49  if (!problem_->get_runningModels()[t]->get_has_control_limits() || !is_feasible_) {
50  // No control limits on this model: Use vanilla DDP
52  return;
53  }
54 
55  du_lb_.head(nu) = problem_->get_runningModels()[t]->get_u_lb() - us_[t].head(nu);
56  du_ub_.head(nu) = problem_->get_runningModels()[t]->get_u_ub() - us_[t].head(nu);
57 
58  const BoxQPSolution& boxqp_sol =
59  qp_.solve(Quu_[t].topLeftCorner(nu, nu), Qu_[t].head(nu), du_lb_.head(nu), du_ub_.head(nu), k_[t].head(nu));
60 
61  // Compute controls
62  Quu_inv_[t].topLeftCorner(nu, nu).setZero();
63  for (std::size_t i = 0; i < boxqp_sol.free_idx.size(); ++i) {
64  for (std::size_t j = 0; j < boxqp_sol.free_idx.size(); ++j) {
65  Quu_inv_[t](boxqp_sol.free_idx[i], boxqp_sol.free_idx[j]) = boxqp_sol.Hff_inv(i, j);
66  }
67  }
68  K_[t].topRows(nu).noalias() = Quu_inv_[t].topLeftCorner(nu, nu) * Qxu_[t].leftCols(nu).transpose();
69  k_[t].topRows(nu).noalias() = -boxqp_sol.x;
70 
71  // The box-QP clamped the gradient direction; this is important for accounting
72  // the algorithm advancement (i.e. stopping criteria)
73  for (std::size_t i = 0; i < boxqp_sol.clamped_idx.size(); ++i) {
74  Qu_[t].head(nu)(boxqp_sol.clamped_idx[i]) = 0.;
75  }
76  }
77 }
78 
79 void SolverBoxFDDP::forwardPass(const double& steplength) {
80  if (steplength > 1. || steplength < 0.) {
81  throw_pretty("Invalid argument: "
82  << "invalid step length, value is between 0. to 1.");
83  }
84  cost_try_ = 0.;
85  xnext_ = problem_->get_x0();
86  const std::size_t& T = problem_->get_T();
87  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
88  const std::vector<boost::shared_ptr<ActionDataAbstract> >& datas = problem_->get_runningDatas();
89  if ((is_feasible_) || (steplength == 1)) {
90  for (std::size_t t = 0; t < T; ++t) {
91  const boost::shared_ptr<ActionModelAbstract>& m = models[t];
92  const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
93  const std::size_t& nu = m->get_nu();
94 
95  xs_try_[t] = xnext_;
96  m->get_state()->diff(xs_[t], xs_try_[t], dx_[t]);
97  if (nu != 0) {
98  us_try_[t].head(nu).noalias() = us_[t].head(nu) - k_[t].head(nu) * steplength - K_[t].topRows(nu) * dx_[t];
99  if (m->get_has_control_limits()) { // clamp control
100  us_try_[t].head(nu) = us_try_[t].head(nu).cwiseMax(m->get_u_lb()).cwiseMin(m->get_u_ub());
101  }
102  m->calc(d, xs_try_[t], us_try_[t].head(nu));
103  } else {
104  m->calc(d, xs_try_[t]);
105  }
106  xnext_ = d->xnext;
107  cost_try_ += d->cost;
108 
109  if (raiseIfNaN(cost_try_)) {
110  throw_pretty("forward_error");
111  }
112  if (raiseIfNaN(xnext_.lpNorm<Eigen::Infinity>())) {
113  throw_pretty("forward_error");
114  }
115  }
116 
117  const boost::shared_ptr<ActionModelAbstract>& m = problem_->get_terminalModel();
118  const boost::shared_ptr<ActionDataAbstract>& d = problem_->get_terminalData();
119  xs_try_.back() = xnext_;
120  m->calc(d, xs_try_.back());
121  cost_try_ += d->cost;
122 
123  if (raiseIfNaN(cost_try_)) {
124  throw_pretty("forward_error");
125  }
126  } else {
127  for (std::size_t t = 0; t < T; ++t) {
128  const boost::shared_ptr<ActionModelAbstract>& m = models[t];
129  const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
130  const std::size_t& nu = m->get_nu();
131  m->get_state()->integrate(xnext_, fs_[t] * (steplength - 1), xs_try_[t]);
132  m->get_state()->diff(xs_[t], xs_try_[t], dx_[t]);
133  if (nu != 0) {
134  us_try_[t].head(nu).noalias() = us_[t].head(nu) - k_[t].head(nu) * steplength - K_[t].topRows(nu) * dx_[t];
135  if (m->get_has_control_limits()) { // clamp control
136  us_try_[t].head(nu) = us_try_[t].head(nu).cwiseMax(m->get_u_lb()).cwiseMin(m->get_u_ub());
137  }
138  m->calc(d, xs_try_[t], us_try_[t].head(nu));
139  } else {
140  m->calc(d, xs_try_[t]);
141  }
142  xnext_ = d->xnext;
143  cost_try_ += d->cost;
144 
145  if (raiseIfNaN(cost_try_)) {
146  throw_pretty("forward_error");
147  }
148  if (raiseIfNaN(xnext_.lpNorm<Eigen::Infinity>())) {
149  throw_pretty("forward_error");
150  }
151  }
152 
153  const boost::shared_ptr<ActionModelAbstract>& m = problem_->get_terminalModel();
154  const boost::shared_ptr<ActionDataAbstract>& d = problem_->get_terminalData();
155  m->get_state()->integrate(xnext_, fs_.back() * (steplength - 1), xs_try_.back());
156  m->calc(d, xs_try_.back());
157  cost_try_ += d->cost;
158 
159  if (raiseIfNaN(cost_try_)) {
160  throw_pretty("forward_error");
161  }
162  }
163 }
164 
165 const std::vector<Eigen::MatrixXd>& SolverBoxFDDP::get_Quu_inv() const { return Quu_inv_; }
166 
167 } // namespace crocoddyl
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: box-ddp.cpp:33
std::vector< Eigen::VectorXd > fs_
Gaps/defects between shooting nodes.
Definition: ddp.hpp:303
bool is_feasible_
Label that indicates is the iteration is feasible.
Box QP solution.
Definition: box-qp.hpp:29
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: ddp.cpp:347
std::vector< Eigen::VectorXd > xs_try_
State trajectory computed by line-search procedure.
Definition: ddp.hpp:289
const BoxQPSolution & solve(const Eigen::MatrixXd &H, const Eigen::VectorXd &q, const Eigen::VectorXd &lb, const Eigen::VectorXd &ub, const Eigen::VectorXd &xinit)
Compute the solution of bound-constrained QP based on Newton projection.
Definition: box-qp.cpp:58
std::vector< Eigen::VectorXd > us_
Control trajectory.
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: box-fddp.cpp:33
std::vector< Eigen::VectorXd > k_
Feed-forward terms.
Definition: ddp.hpp:302
Eigen::MatrixXd Hff_inv
Inverse of the free space Hessian.
Definition: box-qp.hpp:49
Eigen::VectorXd xnext_
Next state.
Definition: ddp.hpp:305
boost::shared_ptr< ShootingProblem > problem_
optimal control problem
Eigen::VectorXd x
Decision vector.
Definition: box-qp.hpp:50
std::vector< Eigen::MatrixXd > K_
Feedback gains.
Definition: ddp.hpp:301
std::vector< Eigen::VectorXd > xs_
State trajectory.
std::vector< double > alphas_
Set of step lengths using by the line-search procedure.
Definition: ddp.hpp:311
double th_stop_
Tolerance for stopping the algorithm.
virtual void computeGains(const std::size_t &t)
Compute the feedforward and feedback terms using a Cholesky decomposition.
Definition: ddp.cpp:313
std::vector< size_t > clamped_idx
Clamped space indexes.
Definition: box-qp.hpp:52
std::vector< Eigen::MatrixXd > Qxu_
Hessian of the Hamiltonian.
Definition: ddp.hpp:297
virtual void forwardPass(const double &steplength)
Run the forward pass or rollout.
Definition: box-fddp.cpp:79
std::vector< size_t > free_idx
Free space indexes.
Definition: box-qp.hpp:51
std::vector< Eigen::VectorXd > us_try_
Control trajectory computed by line-search procedure.
Definition: ddp.hpp:290
std::vector< Eigen::MatrixXd > Quu_
Hessian of the Hamiltonian.
Definition: ddp.hpp:298
virtual void computeGains(const std::size_t &t)
Compute the feedforward and feedback terms using a Cholesky decomposition.
Definition: box-fddp.cpp:46
std::vector< Eigen::VectorXd > Qu_
Gradient of the Hamiltonian.
Definition: ddp.hpp:300
double cost_try_
Total cost computed by line-search procedure.
Definition: ddp.hpp:288