9 #ifdef CROCODDYL_WITH_MULTITHREADING 11 #endif // CROCODDYL_WITH_MULTITHREADING 13 #include "crocoddyl/core/utils/exception.hpp" 14 #include "crocoddyl/core/solvers/fddp.hpp" 19 :
SolverDDP(problem), dg_(0), dq_(0), dv_(0), th_acceptnegstep_(2) {}
21 SolverFDDP::~SolverFDDP() {}
23 bool SolverFDDP::solve(
const std::vector<Eigen::VectorXd>& init_xs,
const std::vector<Eigen::VectorXd>& init_us,
24 const std::size_t maxiter,
const bool is_feasible,
const double reginit) {
25 START_PROFILER(
"SolverFDDP::solve");
32 if (std::isnan(reginit)) {
41 bool recalcDiff =
true;
46 }
catch (std::exception& e) {
61 for (std::vector<double>::const_iterator it =
alphas_.begin(); it !=
alphas_.end(); ++it) {
66 }
catch (std::exception& e) {
97 STOP_PROFILER(
"SolverFDDP::solve");
103 const std::size_t n_callbacks =
callbacks_.size();
104 for (std::size_t c = 0; c < n_callbacks; ++c) {
110 STOP_PROFILER(
"SolverFDDP::solve");
114 STOP_PROFILER(
"SolverFDDP::solve");
120 const std::size_t T = this->
problem_->get_T();
129 const std::vector<boost::shared_ptr<ActionModelAbstract> >& models =
problem_->get_runningModels();
131 for (std::size_t t = 0; t < T; ++t) {
145 const std::size_t T = this->
problem_->get_T();
151 const std::vector<boost::shared_ptr<ActionModelAbstract> >& models =
problem_->get_runningModels();
152 for (std::size_t t = 0; t < T; ++t) {
153 const std::size_t nu = models[t]->get_nu();
167 if (steplength > 1. || steplength < 0.) {
168 throw_pretty(
"Invalid argument: " 169 <<
"invalid step length, value is between 0. to 1.");
171 START_PROFILER(
"SolverFDDP::forwardPass");
174 const std::size_t T =
problem_->get_T();
175 const std::vector<boost::shared_ptr<ActionModelAbstract> >& models =
problem_->get_runningModels();
176 const std::vector<boost::shared_ptr<ActionDataAbstract> >& datas =
problem_->get_runningDatas();
178 for (std::size_t t = 0; t < T; ++t) {
179 const boost::shared_ptr<ActionModelAbstract>& m = models[t];
180 const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
181 const std::size_t nu = m->get_nu();
195 STOP_PROFILER(
"SolverFDDP::forwardPass");
196 throw_pretty(
"forward_error");
198 if (raiseIfNaN(
xnext_.lpNorm<Eigen::Infinity>())) {
199 STOP_PROFILER(
"SolverFDDP::forwardPass");
200 throw_pretty(
"forward_error");
204 const boost::shared_ptr<ActionModelAbstract>& m =
problem_->get_terminalModel();
205 const boost::shared_ptr<ActionDataAbstract>& d =
problem_->get_terminalData();
211 STOP_PROFILER(
"SolverFDDP::forwardPass");
212 throw_pretty(
"forward_error");
215 for (std::size_t t = 0; t < T; ++t) {
216 const boost::shared_ptr<ActionModelAbstract>& m = models[t];
217 const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
218 const std::size_t nu = m->get_nu();
231 STOP_PROFILER(
"SolverFDDP::forwardPass");
232 throw_pretty(
"forward_error");
234 if (raiseIfNaN(
xnext_.lpNorm<Eigen::Infinity>())) {
235 STOP_PROFILER(
"SolverFDDP::forwardPass");
236 throw_pretty(
"forward_error");
240 const boost::shared_ptr<ActionModelAbstract>& m =
problem_->get_terminalModel();
241 const boost::shared_ptr<ActionDataAbstract>& d =
problem_->get_terminalData();
242 m->get_state()->integrate(
xnext_,
fs_.back() * (steplength - 1),
xs_try_.back());
247 STOP_PROFILER(
"SolverFDDP::forwardPass");
248 throw_pretty(
"forward_error");
251 STOP_PROFILER(
"SolverFDDP::forwardPass");
257 if (0. > th_acceptnegstep) {
258 throw_pretty(
"Invalid argument: " 259 <<
"th_acceptnegstep value has to be positive.");
261 th_acceptnegstep_ = th_acceptnegstep;
double reg_max_
Maximum allowed regularization value.
bool is_feasible_
Label that indicates is the iteration is feasible.
virtual void forwardPass(const double stepLength)
Run the forward pass or rollout.
void updateExpectedImprovement()
Update internal values for computing the expected improvement.
double ureg_
Current control regularization values.
double steplength_
Current applied step-length.
virtual bool solve(const std::vector< Eigen::VectorXd > &init_xs=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &init_us=DEFAULT_VECTOR, const std::size_t maxiter=100, const bool is_feasible=false, const double regInit=1e-9)
Compute the optimal trajectory as lists of and terms.
std::vector< MatrixXdRowMajor > K_
Feedback gains .
std::vector< Eigen::VectorXd > xs_try_
State trajectory computed by line-search procedure.
bool was_feasible_
Label that indicates in the previous iterate was feasible.
double th_stepdec_
Step-length threshold used to decrease regularization.
double dq_
Internal data for computing the expected improvement.
double xreg_
Current state regularization value.
std::vector< boost::shared_ptr< CallbackAbstract > > callbacks_
Callback functions.
virtual void resizeData()
Resizing the solver data.
double th_acceptstep_
Threshold used for accepting step.
double reg_min_
Minimum allowed regularization value.
std::vector< Eigen::VectorXd > us_
Control trajectory.
double dV_
Cost reduction obtained by tryStep()
std::vector< Eigen::VectorXd > k_
Feed-forward terms .
std::vector< Eigen::VectorXd > Vx_
Gradient of the Value function .
void increaseRegularization()
Increase the state and control regularization values by a regfactor_ factor.
virtual double stoppingCriteria()
Return a positive value that quantifies the algorithm termination.
Eigen::Vector2d d_
LQ approximation of the expected improvement.
Eigen::VectorXd fTVxx_p_
Store the value of .
void set_th_acceptnegstep(const double th_acceptnegstep)
Modify the threshold used for accepting step along ascent direction.
Eigen::VectorXd xnext_
Next state .
boost::shared_ptr< ShootingProblem > problem_
optimal control problem
EIGEN_MAKE_ALIGNED_OPERATOR_NEW SolverFDDP(boost::shared_ptr< ShootingProblem > problem)
Initialize the FDDP solver.
double dVexp_
Expected cost reduction.
double dg_
Internal data for computing the expected improvement.
void setCandidate(const std::vector< Eigen::VectorXd > &xs_warm=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &us_warm=DEFAULT_VECTOR, const bool is_feasible=false)
Set the solver candidate trajectories .
void decreaseRegularization()
Decrease the state and control regularization values by a regfactor_ factor.
std::vector< Eigen::VectorXd > xs_
State trajectory.
std::vector< Eigen::MatrixXd > Vxx_
Hessian of the Value function .
double th_stepinc_
Step-length threshold used to increase regularization.
double get_th_acceptnegstep() const
Return the threshold used for accepting step along ascent direction.
std::vector< Eigen::VectorXd > dx_
State error during the roll-out/forward-pass (size T)
std::vector< Eigen::VectorXd > fs_
Gaps/defects between shooting nodes.
double th_grad_
Tolerance of the expected gradient used for testing the step.
virtual double tryStep(const double steplength=1)
Try a predefined step length and compute its cost improvement .
std::vector< double > alphas_
Set of step lengths using by the line-search procedure.
double th_stop_
Tolerance for stopping the algorithm.
double dv_
Internal data for computing the expected improvement.
Differential Dynamic Programming (DDP) solver.
std::vector< Eigen::VectorXd > Quuk_
Store the values of.
virtual void computeDirection(const bool recalc=true)
Compute the search direction for the current guess .
Abstract class for solver callbacks.
std::size_t iter_
Number of iteration performed by the solver.
double stop_
Value computed by stoppingCriteria()
std::vector< Eigen::VectorXd > us_try_
Control trajectory computed by line-search procedure.
virtual const Eigen::Vector2d & expectedImprovement()
std::vector< Eigen::VectorXd > Qu_
Gradient of the Hamiltonian .
double cost_try_
Total cost computed by line-search procedure.