Lines Matching full:current_state
116 State current_state(num_parameters, num_effective_parameters);
134 if (!Evaluate(evaluator, x, ¤t_state, &summary->message)) {
142 summary->initial_cost = current_state.cost + summary->fixed_cost;
143 iteration_summary.cost = current_state.cost + summary->fixed_cost;
145 iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
146 iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
238 current_state.search_direction = -current_state.gradient;
242 current_state,
243 ¤t_state.search_direction);
278 current_state.search_direction = -current_state.gradient;
281 line_search_function.Init(x, current_state.search_direction);
282 current_state.directional_derivative =
283 current_state.gradient.dot(current_state.search_direction);
293 ? min(1.0, 1.0 / current_state.gradient_max_norm)
294 : min(1.0, 2.0 * (current_state.cost - previous_state.cost) /
295 current_state.directional_derivative);
304 initial_step_size, current_state.directional_derivative,
305 (current_state.cost - previous_state.cost));
312 current_state.cost,
313 current_state.directional_derivative,
321 initial_step_size, current_state.cost,
322 current_state.directional_derivative);
328 current_state.step_size = line_search_summary.optimal_step_size;
329 delta = current_state.step_size * current_state.search_direction;
331 previous_state = current_state;
344 ¤t_state,
357 iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
358 iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
359 iteration_summary.cost_change = previous_state.cost - current_state.cost;
360 iteration_summary.cost = current_state.cost + summary->fixed_cost;
365 iteration_summary.step_size = current_state.step_size;