9 #include "crocoddyl/core/utils/exception.hpp" 10 #include "crocoddyl/core/solvers/fddp.hpp" 15 :
SolverDDP(problem), dg_(0), dq_(0), dv_(0), th_acceptnegstep_(2) {}
17 SolverFDDP::~SolverFDDP() {}
19 bool SolverFDDP::solve(
const std::vector<Eigen::VectorXd>& init_xs,
const std::vector<Eigen::VectorXd>& init_us,
20 const std::size_t& maxiter,
const bool& is_feasible,
const double& reginit) {
24 if (std::isnan(reginit)) {
33 bool recalcDiff =
true;
38 }
catch (std::exception& e) {
53 for (std::vector<double>::const_iterator it =
alphas_.begin(); it !=
alphas_.end(); ++it) {
58 }
catch (std::exception& e) {
94 const std::size_t& n_callbacks =
callbacks_.size();
95 for (std::size_t c = 0; c < n_callbacks; ++c) {
109 const std::size_t& T = this->
problem_->get_T();
111 problem_->get_terminalModel()->get_state()->diff(
xs_try_.back(),
xs_.back(), dx_.back());
114 const std::vector<boost::shared_ptr<ActionModelAbstract> >& models =
problem_->get_runningModels();
115 for (std::size_t t = 0; t < T; ++t) {
116 models[t]->get_state()->diff(
xs_try_[t],
xs_[t], dx_[t]);
129 const std::size_t& T = this->
problem_->get_T();
135 const std::vector<boost::shared_ptr<ActionModelAbstract> >& models =
problem_->get_runningModels();
136 for (std::size_t t = 0; t < T; ++t) {
137 if (models[t]->get_nu() != 0) {
150 if (steplength > 1. || steplength < 0.) {
151 throw_pretty(
"Invalid argument: " 152 <<
"invalid step length, value is between 0. to 1.");
156 const std::size_t& T =
problem_->get_T();
157 const std::vector<boost::shared_ptr<ActionModelAbstract> >& models =
problem_->get_runningModels();
158 const std::vector<boost::shared_ptr<ActionDataAbstract> >& datas =
problem_->get_runningDatas();
160 for (std::size_t t = 0; t < T; ++t) {
161 const boost::shared_ptr<ActionModelAbstract>& m = models[t];
162 const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
165 m->get_state()->diff(
xs_[t],
xs_try_[t], dx_[t]);
166 if (m->get_nu() != 0) {
167 us_try_[t].noalias() =
us_[t] -
k_[t] * steplength -
K_[t] * dx_[t];
176 throw_pretty(
"forward_error");
178 if (raiseIfNaN(
xnext_.lpNorm<Eigen::Infinity>())) {
179 throw_pretty(
"forward_error");
183 const boost::shared_ptr<ActionModelAbstract>& m =
problem_->get_terminalModel();
184 const boost::shared_ptr<ActionDataAbstract>& d =
problem_->get_terminalData();
190 throw_pretty(
"forward_error");
193 for (std::size_t t = 0; t < T; ++t) {
194 const boost::shared_ptr<ActionModelAbstract>& m = models[t];
195 const boost::shared_ptr<ActionDataAbstract>& d = datas[t];
197 m->get_state()->diff(
xs_[t],
xs_try_[t], dx_[t]);
198 if (m->get_nu() != 0) {
199 us_try_[t].noalias() =
us_[t] -
k_[t] * steplength -
K_[t] * dx_[t];
208 throw_pretty(
"forward_error");
210 if (raiseIfNaN(
xnext_.lpNorm<Eigen::Infinity>())) {
211 throw_pretty(
"forward_error");
215 const boost::shared_ptr<ActionModelAbstract>& m =
problem_->get_terminalModel();
216 const boost::shared_ptr<ActionDataAbstract>& d =
problem_->get_terminalData();
217 m->get_state()->integrate(
xnext_,
fs_.back() * (steplength - 1),
xs_try_.back());
222 throw_pretty(
"forward_error");
230 if (0. > th_acceptnegstep) {
231 throw_pretty(
"Invalid argument: " 232 <<
"th_acceptnegstep value has to be positive.");
234 th_acceptnegstep_ = th_acceptnegstep;
std::vector< Eigen::VectorXd > fs_
Gaps/defects between shooting nodes.
bool is_feasible_
Label that indicates is the iteration is feasible.
void updateExpectedImprovement()
Update internal values for computing the expected improvement.
double ureg_
Current control regularization values.
double steplength_
Current applied step-length.
void setCandidate(const std::vector< Eigen::VectorXd > &xs_warm=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &us_warm=DEFAULT_VECTOR, const bool &is_feasible=false)
Set the solver candidate warm-point values .
std::vector< Eigen::VectorXd > xs_try_
State trajectory computed by line-search procedure.
double th_stepdec_
Step-length threshold used to decrease regularization.
double dq_
Internal data for computing the expected improvement.
double xreg_
Current state regularization value.
std::vector< boost::shared_ptr< CallbackAbstract > > callbacks_
Callback functions.
virtual bool solve(const std::vector< Eigen::VectorXd > &init_xs=DEFAULT_VECTOR, const std::vector< Eigen::VectorXd > &init_us=DEFAULT_VECTOR, const std::size_t &maxiter=100, const bool &is_feasible=false, const double ®Init=1e-9)
Compute the optimal trajectory as lists of and terms.
double th_acceptstep_
Threshold used for accepting step.
std::vector< Eigen::VectorXd > us_
Control trajectory.
double dV_
Cost reduction obtained by tryStep()
std::vector< Eigen::VectorXd > k_
Feed-forward terms.
std::vector< Eigen::VectorXd > Vx_
Gradient of the Value function.
void increaseRegularization()
Increase the state and control regularization values by a regfactor_ factor.
virtual double stoppingCriteria()
Return a positive value that quantifies the algorithm termination.
Eigen::Vector2d d_
LQ approximation of the expected improvement.
Eigen::VectorXd fTVxx_p_
fTVxx_p term
Eigen::VectorXd xnext_
Next state.
boost::shared_ptr< ShootingProblem > problem_
optimal control problem
EIGEN_MAKE_ALIGNED_OPERATOR_NEW SolverFDDP(boost::shared_ptr< ShootingProblem > problem)
Initialize the FDDP solver.
double dVexp_
Expected cost reduction.
double dg_
Internal data for computing the expected improvement.
void decreaseRegularization()
Decrease the state and control regularization values by a regfactor_ factor.
std::vector< Eigen::MatrixXd > K_
Feedback gains.
virtual void computeDirection(const bool &recalc=true)
Compute the search direction for the current guess .
std::vector< Eigen::VectorXd > xs_
State trajectory.
std::vector< Eigen::MatrixXd > Vxx_
Hessian of the Value function.
void set_th_acceptnegstep(const double &th_acceptnegstep)
Modify the threshold used for accepting step along ascent direction.
double th_stepinc_
Step-length threshold used to increase regularization.
double get_th_acceptnegstep() const
Return the threshold used for accepting step along ascent direction.
bool was_feasible_
Label that indicates in the previous iterate was feasible.
double th_grad_
Tolerance of the expected gradient used for testing the step.
std::vector< double > alphas_
Set of step lengths using by the line-search procedure.
double th_stop_
Tolerance for stopping the algorithm.
double dv_
Internal data for computing the expected improvement.
double regmin_
Minimum allowed regularization value.
double regmax_
Maximum allowed regularization value.
Differential Dynamic Programming (DDP) solver.
std::vector< Eigen::VectorXd > Quuk_
Quuk term.
Abstract class for solver callbacks.
virtual double tryStep(const double &steplength=1)
Try a predefined step length and compute its cost improvement.
std::size_t iter_
Number of iteration performed by the solver.
double stop_
Value computed by stoppingCriteria()
virtual void forwardPass(const double &stepLength)
Run the forward pass or rollout.
std::vector< Eigen::VectorXd > us_try_
Control trajectory computed by line-search procedure.
virtual const Eigen::Vector2d & expectedImprovement()
std::vector< Eigen::VectorXd > Qu_
Gradient of the Hamiltonian.
double cost_try_
Total cost computed by line-search procedure.