crocoddyl  1.4.0
Contact RObot COntrol by Differential DYnamic programming Library (Crocoddyl)
kkt.cpp
1 // BSD 3-Clause License
3 //
4 // Copyright (C) 2018-2020, LAAS-CNRS, New York University, Max Planck Gesellschaft,
5 // University of Edinburgh
6 // Copyright note valid unless otherwise stated in individual files.
7 // All rights reserved.
9 
10 #include "crocoddyl/core/solvers/kkt.hpp"
11 
12 namespace crocoddyl {
13 
14 SolverKKT::SolverKKT(boost::shared_ptr<ShootingProblem> problem)
15  : SolverAbstract(problem),
16  regfactor_(10.),
17  regmin_(1e-9),
18  regmax_(1e9),
19  cost_try_(0.),
20  th_grad_(1e-12),
21  was_feasible_(false) {
22  allocateData();
23  const unsigned int& n_alphas = 10;
24  xreg_ = 0.;
25  ureg_ = 0.;
26  alphas_.resize(n_alphas);
27  for (unsigned int n = 0; n < n_alphas; ++n) {
28  alphas_[n] = 1. / pow(2., (double)n);
29  }
30 }
31 
32 SolverKKT::~SolverKKT() {}
33 
34 bool SolverKKT::solve(const std::vector<Eigen::VectorXd>& init_xs, const std::vector<Eigen::VectorXd>& init_us,
35  const std::size_t& maxiter, const bool& is_feasible, const double&) {
36  setCandidate(init_xs, init_us, is_feasible);
37  bool recalc = true;
38  for (iter_ = 0; iter_ < maxiter; ++iter_) {
39  while (true) {
40  try {
41  computeDirection(recalc);
42  } catch (std::exception& e) {
43  recalc = false;
44  if (xreg_ == regmax_) {
45  return false;
46  } else {
47  continue;
48  }
49  }
50  break;
51  }
52 
54  for (std::vector<double>::const_iterator it = alphas_.begin(); it != alphas_.end(); ++it) {
55  steplength_ = *it;
56  try {
58  } catch (std::exception& e) {
59  continue;
60  }
61  dVexp_ = steplength_ * d_[0] + 0.5 * steplength_ * steplength_ * d_[1];
62  if (d_[0] < th_grad_ || !is_feasible_ || dV_ > th_acceptstep_ * dVexp_) {
65  cost_ = cost_try_;
66  break;
67  }
68  }
70  const std::size_t& n_callbacks = callbacks_.size();
71  if (n_callbacks != 0) {
72  for (std::size_t c = 0; c < n_callbacks; ++c) {
73  CallbackAbstract& callback = *callbacks_[c];
74  callback(*this);
75  }
76  }
77  if (was_feasible_ && stop_ < th_stop_) {
78  return true;
79  }
80  }
81  return false;
82 }
83 
84 void SolverKKT::computeDirection(const bool& recalc) {
85  const std::size_t& T = problem_->get_T();
86  if (recalc) {
87  calc();
88  }
89  computePrimalDual();
90  const Eigen::VectorBlock<Eigen::VectorXd, Eigen::Dynamic> p_x = primal_.segment(0, ndx_);
91  const Eigen::VectorBlock<Eigen::VectorXd, Eigen::Dynamic> p_u = primal_.segment(ndx_, nu_);
92 
93  std::size_t ix = 0;
94  std::size_t iu = 0;
95  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
96  for (std::size_t t = 0; t < T; ++t) {
97  const std::size_t& ndxi = models[t]->get_state()->get_ndx();
98  const std::size_t& nui = models[t]->get_nu();
99  dxs_[t] = p_x.segment(ix, ndxi);
100  dus_[t] = p_u.segment(iu, nui);
101  lambdas_[t] = dual_.segment(ix, ndxi);
102  ix += ndxi;
103  iu += nui;
104  }
105  const std::size_t& ndxi = problem_->get_terminalModel()->get_state()->get_ndx();
106  dxs_.back() = p_x.segment(ix, ndxi);
107  lambdas_.back() = dual_.segment(ix, ndxi);
108 }
109 
110 double SolverKKT::tryStep(const double& steplength) {
111  const std::size_t& T = problem_->get_T();
112  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
113  for (std::size_t t = 0; t < T; ++t) {
114  const boost::shared_ptr<ActionModelAbstract>& m = models[t];
115 
116  m->get_state()->integrate(xs_[t], steplength * dxs_[t], xs_try_[t]);
117  if (m->get_nu() != 0) {
118  us_try_[t] = us_[t];
119  us_try_[t] += steplength * dus_[t];
120  }
121  }
122  const boost::shared_ptr<ActionModelAbstract> m = problem_->get_terminalModel();
123  m->get_state()->integrate(xs_[T], steplength * dxs_[T], xs_try_[T]);
124  cost_try_ = problem_->calc(xs_try_, us_try_);
125  return cost_ - cost_try_;
126 }
127 
129  const std::size_t& T = problem_->get_T();
130  std::size_t ix = 0;
131  std::size_t iu = 0;
132  const std::vector<boost::shared_ptr<ActionModelAbstract> >& models = problem_->get_runningModels();
133  const std::vector<boost::shared_ptr<ActionDataAbstract> >& datas = problem_->get_runningDatas();
134  for (std::size_t t = 0; t < T; ++t) {
135  const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
136  const std::size_t& ndxi = models[t]->get_state()->get_ndx();
137  const std::size_t& nui = models[t]->get_nu();
138 
139  dF.segment(ix, ndxi) = lambdas_[t];
140  dF.segment(ix, ndxi).noalias() -= d->Fx.transpose() * lambdas_[t + 1];
141  dF.segment(ndx_ + iu, nui).noalias() = -lambdas_[t + 1].transpose() * d->Fu;
142  ix += ndxi;
143  iu += nui;
144  }
145  const std::size_t& ndxi = problem_->get_terminalModel()->get_state()->get_ndx();
146  dF.segment(ix, ndxi) = lambdas_.back();
147  stop_ = (kktref_.segment(0, ndx_ + nu_) + dF).squaredNorm() + kktref_.segment(ndx_ + nu_, ndx_).squaredNorm();
148  return stop_;
149 }
150 
151 const Eigen::Vector2d& SolverKKT::expectedImprovement() {
152  d_ = Eigen::Vector2d::Zero();
153  // -grad^T.primal
154  d_(0) = -kktref_.segment(0, ndx_ + nu_).dot(primal_);
155  // -(hessian.primal)^T.primal
156  kkt_primal_.noalias() = kkt_.block(0, 0, ndx_ + nu_, ndx_ + nu_) * primal_;
157  d_(1) = -kkt_primal_.dot(primal_);
158  return d_;
159 }
160 
161 const Eigen::MatrixXd& SolverKKT::get_kkt() const { return kkt_; }
162 
163 const Eigen::VectorXd& SolverKKT::get_kktref() const { return kktref_; }
164 
165 const Eigen::VectorXd& SolverKKT::get_primaldual() const { return primaldual_; }
166 
167 const std::vector<Eigen::VectorXd>& SolverKKT::get_dxs() const { return dxs_; }
168 
169 const std::vector<Eigen::VectorXd>& SolverKKT::get_dus() const { return dus_; }
170 
171 const std::size_t& SolverKKT::get_nx() const { return nx_; }
172 
173 const std::size_t& SolverKKT::get_ndx() const { return ndx_; }
174 
175 const std::size_t& SolverKKT::get_nu() const { return nu_; }
176 
177 double SolverKKT::calc() {
178  cost_ = problem_->calc(xs_, us_);
179  cost_ = problem_->calcDiff(xs_, us_);
180 
181  // offset on constraint xnext = f(x,u) due to x0 = ref.
182  const std::size_t& cx0 = problem_->get_runningModels()[0]->get_state()->get_ndx();
183 
184  std::size_t ix = 0;
185  std::size_t iu = 0;
186  const std::size_t& T = problem_->get_T();
187  kkt_.block(ndx_ + nu_, 0, ndx_, ndx_) = Eigen::MatrixXd::Identity(ndx_, ndx_);
188  for (std::size_t t = 0; t < T; ++t) {
189  const boost::shared_ptr<ActionModelAbstract>& m = problem_->get_runningModels()[t];
190  const boost::shared_ptr<ActionDataAbstract>& d = problem_->get_runningDatas()[t];
191  const std::size_t& ndxi = m->get_state()->get_ndx();
192  const std::size_t& nui = m->get_nu();
193 
194  // Computing the gap at the initial state
195  if (t == 0) {
196  m->get_state()->diff(problem_->get_x0(), xs_[0], kktref_.segment(ndx_ + nu_, ndxi));
197  }
198 
199  // Filling KKT matrix
200  kkt_.block(ix, ix, ndxi, ndxi) = d->Lxx;
201  kkt_.block(ix, ndx_ + iu, ndxi, nui) = d->Lxu;
202  kkt_.block(ndx_ + iu, ix, nui, ndxi) = d->Lxu.transpose();
203  kkt_.block(ndx_ + iu, ndx_ + iu, nui, nui) = d->Luu;
204  kkt_.block(ndx_ + nu_ + cx0 + ix, ix, ndxi, ndxi) = -d->Fx;
205  kkt_.block(ndx_ + nu_ + cx0 + ix, ndx_ + iu, ndxi, nui) = -d->Fu;
206 
207  // Filling KKT vector
208  kktref_.segment(ix, ndxi) = d->Lx;
209  kktref_.segment(ndx_ + iu, nui) = d->Lu;
210  m->get_state()->diff(d->xnext, xs_[t + 1], kktref_.segment(ndx_ + nu_ + cx0 + ix, ndxi));
211 
212  ix += ndxi;
213  iu += nui;
214  }
215  const boost::shared_ptr<ActionDataAbstract>& df = problem_->get_terminalData();
216  const std::size_t& ndxf = problem_->get_terminalModel()->get_state()->get_ndx();
217  kkt_.block(ix, ix, ndxf, ndxf) = df->Lxx;
218  kktref_.segment(ix, ndxf) = df->Lx;
219  kkt_.block(0, ndx_ + nu_, ndx_ + nu_, ndx_) = kkt_.block(ndx_ + nu_, 0, ndx_, ndx_ + nu_).transpose();
220  return cost_;
221 }
222 
223 void SolverKKT::computePrimalDual() {
224  primaldual_ = kkt_.lu().solve(-kktref_);
225  primal_ = primaldual_.segment(0, ndx_ + nu_);
226  dual_ = primaldual_.segment(ndx_ + nu_, ndx_);
227 }
228 
229 void SolverKKT::increaseRegularization() {
230  xreg_ *= regfactor_;
231  if (xreg_ > regmax_) {
232  xreg_ = regmax_;
233  }
234  ureg_ = xreg_;
235 }
236 
237 void SolverKKT::decreaseRegularization() {
238  xreg_ /= regfactor_;
239  if (xreg_ < regmin_) {
240  xreg_ = regmin_;
241  }
242  ureg_ = xreg_;
243 }
244 
245 void SolverKKT::allocateData() {
246  const std::size_t& T = problem_->get_T();
247  dxs_.resize(T + 1);
248  dus_.resize(T);
249  lambdas_.resize(T + 1);
250  xs_try_.resize(T + 1);
251  us_try_.resize(T);
252 
253  nx_ = 0;
254  ndx_ = 0;
255  nu_ = 0;
256  const std::size_t& nx = problem_->get_nx();
257  const std::size_t& ndx = problem_->get_ndx();
258  const std::size_t& nu = problem_->get_nu_max();
259  for (std::size_t t = 0; t < T; ++t) {
260  if (t == 0) {
261  xs_try_[t] = problem_->get_x0();
262  } else {
263  xs_try_[t] = Eigen::VectorXd::Constant(nx, NAN);
264  }
265  us_try_[t] = Eigen::VectorXd::Constant(nu, NAN);
266  dxs_[t] = Eigen::VectorXd::Zero(ndx);
267  dus_[t] = Eigen::VectorXd::Zero(nu);
268  lambdas_[t] = Eigen::VectorXd::Zero(ndx);
269  nx_ += nx;
270  ndx_ += ndx;
271  nu_ += nu;
272  }
273  const boost::shared_ptr<ActionModelAbstract>& model = problem_->get_terminalModel();
274  nx_ += nx;
275  ndx_ += ndx;
276  xs_try_.back() = problem_->get_terminalModel()->get_state()->zero();
277  dxs_.back() = Eigen::VectorXd::Zero(model->get_state()->get_ndx());
278  lambdas_.back() = Eigen::VectorXd::Zero(model->get_state()->get_ndx());
279 
280  // Set dimensions for kkt matrix and kkt_ref vector
281  kkt_.resize(2 * ndx_ + nu_, 2 * ndx_ + nu_);
282  kkt_.setZero();
283  kktref_.resize(2 * ndx_ + nu_);
284  kktref_.setZero();
285  primaldual_.resize(2 * ndx_ + nu_);
286  primaldual_.setZero();
287  primal_.resize(ndx_ + nu_);
288  primal_.setZero();
289  kkt_primal_.resize(ndx_ + nu_);
290  kkt_primal_.setZero();
291  dual_.resize(ndx_);
292  dual_.setZero();
293  dF.resize(ndx_ + nu_);
294  dF.setZero();
295 }
296 
297 } // namespace crocoddyl
bool is_feasible_
Label that indicates is the iteration is feasible.
virtual double stoppingCriteria()
Return a positive value that quantifies the algorithm termination.
Definition: kkt.cpp:128
double ureg_
Current control regularization values.
double steplength_
Current applied step-length.
void setCandidate(const std::vector< Eigen::VectorXd > &xs_warm=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &us_warm=DEFAULT_VECTOR, const bool &is_feasible=false)
Set the solver candidate warm-point values .
Definition: solver-base.cpp:42
virtual bool solve(const std::vector< Eigen::VectorXd > &init_xs=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &init_us=DEFAULT_VECTOR, const std::size_t &maxiter=100, const bool &is_feasible=false, const double &regInit=1e-9)
Compute the optimal trajectory as lists of and terms.
Definition: kkt.cpp:34
virtual void allocateData()
Allocate all the internal data needed for the solver.
Definition: ddp.cpp:328
std::vector< Eigen::VectorXd > xs_try_
State trajectory computed by line-search procedure.
Definition: ddp.hpp:279
double xreg_
Current state regularization value.
std::vector< boost::shared_ptr< CallbackAbstract > > callbacks_
Callback functions.
virtual const Eigen::Vector2d & expectedImprovement()
Return the expected improvement from a given current search direction.
Definition: kkt.cpp:151
double th_acceptstep_
Threshold used for accepting step.
std::vector< Eigen::VectorXd > us_
Control trajectory.
double dV_
Cost reduction obtained by tryStep()
virtual double stoppingCriteria()
Return a positive value that quantifies the algorithm termination.
Definition: ddp.cpp:132
Eigen::Vector2d d_
LQ approximation of the expected improvement.
boost::shared_ptr< ShootingProblem > problem_
optimal control problem
double dVexp_
Expected cost reduction.
virtual void computeDirection(const bool &recalc=true)
Compute the search direction for the current guess .
Definition: ddp.cpp:120
std::vector< Eigen::VectorXd > xs_
State trajectory.
double cost_
Total cost.
bool was_feasible_
Label that indicates in the previous iterate was feasible.
Definition: ddp.hpp:305
double th_grad_
Tolerance of the expected gradient used for testing the step.
Definition: ddp.hpp:302
virtual double tryStep(const double &steplength=1)
Try a predefined step length and compute its cost improvement.
Definition: kkt.cpp:110
std::vector< double > alphas_
Set of step lengths using by the line-search procedure.
Definition: ddp.hpp:301
double th_stop_
Tolerance for stopping the algorithm.
double regmin_
Minimum allowed regularization value.
Definition: ddp.hpp:275
double regmax_
Maximum allowed regularization value.
Definition: ddp.hpp:276
double regfactor_
Regularization factor used to decrease / increase it.
Definition: ddp.hpp:274
virtual void computeDirection(const bool &recalc=true)
Compute the search direction for the current guess .
Definition: kkt.cpp:84
Abstract class for solver callbacks.
virtual double tryStep(const double &steplength=1)
Try a predefined step length and compute its cost improvement.
Definition: ddp.cpp:127
std::size_t iter_
Number of iteration performed by the solver.
double stop_
Value computed by stoppingCriteria()
std::vector< Eigen::VectorXd > us_try_
Control trajectory computed by line-search procedure.
Definition: ddp.hpp:280
virtual const Eigen::Vector2d & expectedImprovement()
Definition: fddp.cpp:107
double cost_try_
Total cost computed by line-search procedure.
Definition: ddp.hpp:278