crocoddyl  1.5.0
Contact RObot COntrol by Differential DYnamic programming Library (Crocoddyl)
kkt.cpp
1 // BSD 3-Clause License
3 //
4 // Copyright (C) 2018-2020, LAAS-CNRS, New York University, Max Planck Gesellschaft,
5 // University of Edinburgh
6 // Copyright note valid unless otherwise stated in individual files.
7 // All rights reserved.
9 
10 #include "crocoddyl/core/solvers/kkt.hpp"
11 
12 namespace crocoddyl {
13 
14 SolverKKT::SolverKKT(boost::shared_ptr<ShootingProblem> problem)
15  : SolverAbstract(problem),
16  regfactor_(10.),
17  regmin_(1e-9),
18  regmax_(1e9),
19  cost_try_(0.),
20  th_grad_(1e-12),
21  was_feasible_(false) {
22  allocateData();
23  const unsigned int& n_alphas = 10;
24  xreg_ = 0.;
25  ureg_ = 0.;
26  alphas_.resize(n_alphas);
27  for (unsigned int n = 0; n < n_alphas; ++n) {
28  alphas_[n] = 1. / pow(2., (double)n);
29  }
30 }
31 
32 SolverKKT::~SolverKKT() {}
33 
34 bool SolverKKT::solve(const std::vector<Eigen::VectorXd>& init_xs, const std::vector<Eigen::VectorXd>& init_us,
35  const std::size_t& maxiter, const bool& is_feasible, const double&) {
36  setCandidate(init_xs, init_us, is_feasible);
37  bool recalc = true;
38  for (iter_ = 0; iter_ < maxiter; ++iter_) {
39  while (true) {
40  try {
41  computeDirection(recalc);
42  } catch (std::exception& e) {
43  recalc = false;
44  if (xreg_ == regmax_) {
45  return false;
46  } else {
47  continue;
48  }
49  }
50  break;
51  }
52 
54  for (std::vector<double>::const_iterator it = alphas_.begin(); it != alphas_.end(); ++it) {
55  steplength_ = *it;
56  try {
58  } catch (std::exception& e) {
59  continue;
60  }
61  dVexp_ = steplength_ * d_[0] + 0.5 * steplength_ * steplength_ * d_[1];
62  if (d_[0] < th_grad_ || !is_feasible_ || dV_ > th_acceptstep_ * dVexp_) {
65  cost_ = cost_try_;
66  break;
67  }
68  }
70  const std::size_t& n_callbacks = callbacks_.size();
71  if (n_callbacks != 0) {
72  for (std::size_t c = 0; c < n_callbacks; ++c) {
73  CallbackAbstract& callback = *callbacks_[c];
74  callback(*this);
75  }
76  }
77  if (was_feasible_ && stop_ < th_stop_) {
78  return true;
79  }
80  }
81  return false;
82 }
83 
84 void SolverKKT::computeDirection(const bool& recalc) {
85  const std::size_t& T = problem_->get_T();
86  if (recalc) {
87  calc();
88  }
89  computePrimalDual();
90  const Eigen::VectorBlock<Eigen::VectorXd, Eigen::Dynamic> p_x = primal_.segment(0, ndx_);
91  const Eigen::VectorBlock<Eigen::VectorXd, Eigen::Dynamic> p_u = primal_.segment(ndx_, nu_);
92 
93  std::size_t ix = 0;
94  std::size_t iu = 0;
95  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
96  for (std::size_t t = 0; t < T; ++t) {
97  const std::size_t& ndxi = models[t]->get_state()->get_ndx();
98  const std::size_t& nui = models[t]->get_nu();
99  dxs_[t] = p_x.segment(ix, ndxi);
100  dus_[t] = p_u.segment(iu, nui);
101  lambdas_[t] = dual_.segment(ix, ndxi);
102  ix += ndxi;
103  iu += nui;
104  }
105  const std::size_t& ndxi = problem_->get_terminalModel()->get_state()->get_ndx();
106  dxs_.back() = p_x.segment(ix, ndxi);
107  lambdas_.back() = dual_.segment(ix, ndxi);
108 }
109 
110 double SolverKKT::tryStep(const double& steplength) {
111  const std::size_t& T = problem_->get_T();
112  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
113  for (std::size_t t = 0; t < T; ++t) {
114  const boost::shared_ptr<ActionModelAbstract>& m = models[t];
115 
116  m->get_state()->integrate(xs_[t], steplength * dxs_[t], xs_try_[t]);
117  if (m->get_nu() != 0) {
118  us_try_[t] = us_[t];
119  us_try_[t] += steplength * dus_[t];
120  }
121  }
122  const boost::shared_ptr<ActionModelAbstract> m = problem_->get_terminalModel();
123  m->get_state()->integrate(xs_[T], steplength * dxs_[T], xs_try_[T]);
124  cost_try_ = problem_->calc(xs_try_, us_try_);
125  return cost_ - cost_try_;
126 }
127 
129  const std::size_t& T = problem_->get_T();
130  std::size_t ix = 0;
131  std::size_t iu = 0;
132  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
133  const std::vector<boost::shared_ptr<ActionDataAbstract> >& datas = problem_->get_runningDatas();
134  for (std::size_t t = 0; t < T; ++t) {
135  const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
136  const std::size_t& ndxi = models[t]->get_state()->get_ndx();
137  const std::size_t& nui = models[t]->get_nu();
138 
139  dF.segment(ix, ndxi) = lambdas_[t];
140  dF.segment(ix, ndxi).noalias() -= d->Fx.transpose() * lambdas_[t + 1];
141  dF.segment(ndx_ + iu, nui).noalias() = -lambdas_[t + 1].transpose() * d->Fu;
142  ix += ndxi;
143  iu += nui;
144  }
145  const std::size_t& ndxi = problem_->get_terminalModel()->get_state()->get_ndx();
146  dF.segment(ix, ndxi) = lambdas_.back();
147  stop_ = (kktref_.segment(0, ndx_ + nu_) + dF).squaredNorm() + kktref_.segment(ndx_ + nu_, ndx_).squaredNorm();
148  return stop_;
149 }
150 
151 const Eigen::Vector2d& SolverKKT::expectedImprovement() {
152  d_ = Eigen::Vector2d::Zero();
153  // -grad^T.primal
154  d_(0) = -kktref_.segment(0, ndx_ + nu_).dot(primal_);
155  // -(hessian.primal)^T.primal
156  kkt_primal_.noalias() = kkt_.block(0, 0, ndx_ + nu_, ndx_ + nu_) * primal_;
157  d_(1) = -kkt_primal_.dot(primal_);
158  return d_;
159 }
160 
161 const Eigen::MatrixXd& SolverKKT::get_kkt() const { return kkt_; }
162 
163 const Eigen::VectorXd& SolverKKT::get_kktref() const { return kktref_; }
164 
165 const Eigen::VectorXd& SolverKKT::get_primaldual() const { return primaldual_; }
166 
167 const std::vector<Eigen::VectorXd>& SolverKKT::get_dxs() const { return dxs_; }
168 
169 const std::vector<Eigen::VectorXd>& SolverKKT::get_dus() const { return dus_; }
170 
171 const std::vector<Eigen::VectorXd>& SolverKKT::get_lambdas() const { return lambdas_; }
172 
173 const std::size_t& SolverKKT::get_nx() const { return nx_; }
174 
175 const std::size_t& SolverKKT::get_ndx() const { return ndx_; }
176 
177 const std::size_t& SolverKKT::get_nu() const { return nu_; }
178 
179 double SolverKKT::calc() {
180  cost_ = problem_->calc(xs_, us_);
181  cost_ = problem_->calcDiff(xs_, us_);
182 
183  // offset on constraint xnext = f(x,u) due to x0 = ref.
184  const std::size_t& cx0 = problem_->get_runningModels()[0]->get_state()->get_ndx();
185 
186  std::size_t ix = 0;
187  std::size_t iu = 0;
188  const std::size_t& T = problem_->get_T();
189  kkt_.block(ndx_ + nu_, 0, ndx_, ndx_) = Eigen::MatrixXd::Identity(ndx_, ndx_);
190  for (std::size_t t = 0; t < T; ++t) {
191  const boost::shared_ptr<ActionModelAbstract>& m = problem_->get_runningModels()[t];
192  const boost::shared_ptr<ActionDataAbstract>& d = problem_->get_runningDatas()[t];
193  const std::size_t& ndxi = m->get_state()->get_ndx();
194  const std::size_t& nui = m->get_nu();
195 
196  // Computing the gap at the initial state
197  if (t == 0) {
198  m->get_state()->diff(problem_->get_x0(), xs_[0], kktref_.segment(ndx_ + nu_, ndxi));
199  }
200 
201  // Filling KKT matrix
202  kkt_.block(ix, ix, ndxi, ndxi) = d->Lxx;
203  kkt_.block(ix, ndx_ + iu, ndxi, nui) = d->Lxu;
204  kkt_.block(ndx_ + iu, ix, nui, ndxi) = d->Lxu.transpose();
205  kkt_.block(ndx_ + iu, ndx_ + iu, nui, nui) = d->Luu;
206  kkt_.block(ndx_ + nu_ + cx0 + ix, ix, ndxi, ndxi) = -d->Fx;
207  kkt_.block(ndx_ + nu_ + cx0 + ix, ndx_ + iu, ndxi, nui) = -d->Fu;
208 
209  // Filling KKT vector
210  kktref_.segment(ix, ndxi) = d->Lx;
211  kktref_.segment(ndx_ + iu, nui) = d->Lu;
212  m->get_state()->diff(d->xnext, xs_[t + 1], kktref_.segment(ndx_ + nu_ + cx0 + ix, ndxi));
213 
214  ix += ndxi;
215  iu += nui;
216  }
217  const boost::shared_ptr<ActionDataAbstract>& df = problem_->get_terminalData();
218  const std::size_t& ndxf = problem_->get_terminalModel()->get_state()->get_ndx();
219  kkt_.block(ix, ix, ndxf, ndxf) = df->Lxx;
220  kktref_.segment(ix, ndxf) = df->Lx;
221  kkt_.block(0, ndx_ + nu_, ndx_ + nu_, ndx_) = kkt_.block(ndx_ + nu_, 0, ndx_, ndx_ + nu_).transpose();
222  return cost_;
223 }
224 
225 void SolverKKT::computePrimalDual() {
226  primaldual_ = kkt_.lu().solve(-kktref_);
227  primal_ = primaldual_.segment(0, ndx_ + nu_);
228  dual_ = primaldual_.segment(ndx_ + nu_, ndx_);
229 }
230 
231 void SolverKKT::increaseRegularization() {
232  xreg_ *= regfactor_;
233  if (xreg_ > regmax_) {
234  xreg_ = regmax_;
235  }
236  ureg_ = xreg_;
237 }
238 
239 void SolverKKT::decreaseRegularization() {
240  xreg_ /= regfactor_;
241  if (xreg_ < regmin_) {
242  xreg_ = regmin_;
243  }
244  ureg_ = xreg_;
245 }
246 
247 void SolverKKT::allocateData() {
248  const std::size_t& T = problem_->get_T();
249  dxs_.resize(T + 1);
250  dus_.resize(T);
251  lambdas_.resize(T + 1);
252  xs_try_.resize(T + 1);
253  us_try_.resize(T);
254 
255  nx_ = 0;
256  ndx_ = 0;
257  nu_ = 0;
258  const std::size_t& nx = problem_->get_nx();
259  const std::size_t& ndx = problem_->get_ndx();
260  const std::size_t& nu = problem_->get_nu_max();
261  for (std::size_t t = 0; t < T; ++t) {
262  if (t == 0) {
263  xs_try_[t] = problem_->get_x0();
264  } else {
265  xs_try_[t] = Eigen::VectorXd::Constant(nx, NAN);
266  }
267  us_try_[t] = Eigen::VectorXd::Constant(nu, NAN);
268  dxs_[t] = Eigen::VectorXd::Zero(ndx);
269  dus_[t] = Eigen::VectorXd::Zero(nu);
270  lambdas_[t] = Eigen::VectorXd::Zero(ndx);
271  nx_ += nx;
272  ndx_ += ndx;
273  nu_ += nu;
274  }
275  const boost::shared_ptr<ActionModelAbstract>& model = problem_->get_terminalModel();
276  nx_ += nx;
277  ndx_ += ndx;
278  xs_try_.back() = problem_->get_terminalModel()->get_state()->zero();
279  dxs_.back() = Eigen::VectorXd::Zero(model->get_state()->get_ndx());
280  lambdas_.back() = Eigen::VectorXd::Zero(model->get_state()->get_ndx());
281 
282  // Set dimensions for kkt matrix and kkt_ref vector
283  kkt_.resize(2 * ndx_ + nu_, 2 * ndx_ + nu_);
284  kkt_.setZero();
285  kktref_.resize(2 * ndx_ + nu_);
286  kktref_.setZero();
287  primaldual_.resize(2 * ndx_ + nu_);
288  primaldual_.setZero();
289  primal_.resize(ndx_ + nu_);
290  primal_.setZero();
291  kkt_primal_.resize(ndx_ + nu_);
292  kkt_primal_.setZero();
293  dual_.resize(ndx_);
294  dual_.setZero();
295  dF.resize(ndx_ + nu_);
296  dF.setZero();
297 }
298 
299 } // namespace crocoddyl
bool is_feasible_
Label that indicates is the iteration is feasible.
virtual double stoppingCriteria()
Return a positive value that quantifies the algorithm termination.
Definition: kkt.cpp:128
double ureg_
Current control regularization values.
double steplength_
Current applied step-length.
void setCandidate(const std::vector< Eigen::VectorXd > &xs_warm=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &us_warm=DEFAULT_VECTOR, const bool &is_feasible=false)
Set the solver candidate warm-point values .
Definition: solver-base.cpp:43
virtual bool solve(const std::vector< Eigen::VectorXd > &init_xs=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &init_us=DEFAULT_VECTOR, const std::size_t &maxiter=100, const bool &is_feasible=false, const double &regInit=1e-9)
Compute the optimal trajectory as lists of and terms.
Definition: kkt.cpp:34
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: ddp.cpp:339
std::vector< Eigen::VectorXd > xs_try_
State trajectory computed by line-search procedure.
Definition: ddp.hpp:289
double xreg_
Current state regularization value.
std::vector< boost::shared_ptr< CallbackAbstract > > callbacks_
Callback functions.
virtual const Eigen::Vector2d & expectedImprovement()
Return the expected improvement from a given current search direction.
Definition: kkt.cpp:151
double th_acceptstep_
Threshold used for accepting step.
std::vector< Eigen::VectorXd > us_
Control trajectory.
double dV_
Cost reduction obtained by tryStep()
virtual double stoppingCriteria()
Return a positive value that quantifies the algorithm termination.
Definition: ddp.cpp:133
Eigen::Vector2d d_
LQ approximation of the expected improvement.
boost::shared_ptr< ShootingProblem > problem_
optimal control problem
double dVexp_
Expected cost reduction.
virtual void computeDirection(const bool &recalc=true)
Compute the search direction for the current guess .
Definition: ddp.cpp:121
std::vector< Eigen::VectorXd > xs_
State trajectory.
double cost_
Total cost.
bool was_feasible_
Label that indicates in the previous iterate was feasible.
Definition: ddp.hpp:316
double th_grad_
Tolerance of the expected gradient used for testing the step.
Definition: ddp.hpp:312
virtual double tryStep(const double &steplength=1)
Try a predefined step length and compute its cost improvement.
Definition: kkt.cpp:110
std::vector< double > alphas_
Set of step lengths using by the line-search procedure.
Definition: ddp.hpp:311
double th_stop_
Tolerance for stopping the algorithm.
double regmin_
Minimum allowed regularization value.
Definition: ddp.hpp:285
double regmax_
Maximum allowed regularization value.
Definition: ddp.hpp:286
double regfactor_
Regularization factor used to decrease / increase it.
Definition: ddp.hpp:284
virtual void computeDirection(const bool &recalc=true)
Compute the search direction for the current guess .
Definition: kkt.cpp:84
Abstract class for solver callbacks.
virtual double tryStep(const double &steplength=1)
Try a predefined step length and compute its cost improvement.
Definition: ddp.cpp:128
std::size_t iter_
Number of iteration performed by the solver.
double stop_
Value computed by stoppingCriteria()
std::vector< Eigen::VectorXd > us_try_
Control trajectory computed by line-search procedure.
Definition: ddp.hpp:290
virtual const Eigen::Vector2d & expectedImprovement()
Definition: fddp.cpp:107
double cost_try_
Total cost computed by line-search procedure.
Definition: ddp.hpp:288