Austin Schuh | 085eab9 | 2020-11-26 13:54:51 -0800 | [diff] [blame] | 1 | #!/usr/bin/python3 |
Austin Schuh | 0038f8e | 2016-07-20 19:57:01 -0700 | [diff] [blame] | 2 | |
| 3 | # This is an initial, hacky implementation of the extended LQR paper. It's just a proof of concept, |
| 4 | # so don't trust it too much. |
| 5 | |
| 6 | import numpy |
| 7 | import scipy.optimize |
| 8 | from matplotlib import pylab |
| 9 | import sys |
| 10 | |
| 11 | from frc971.control_loops.python import controls |
| 12 | |
| 13 | l = 100 |
| 14 | width = 0.2 |
| 15 | dt = 0.05 |
| 16 | num_states = 3 |
| 17 | num_inputs = 2 |
| 18 | x_hat_initial = numpy.matrix([[0.10], [1.0], [0.0]]) |
| 19 | |
| 20 | def dynamics(X, U): |
| 21 | """Calculates the dynamics for a 2 wheeled robot. |
| 22 | |
| 23 | Args: |
| 24 | X, numpy.matrix(3, 1), The state. [x, y, theta] |
| 25 | U, numpy.matrix(2, 1), The input. [left velocity, right velocity] |
| 26 | |
| 27 | Returns: |
| 28 | numpy.matrix(3, 1), The derivative of the dynamics. |
| 29 | """ |
| 30 | #return numpy.matrix([[X[1, 0]], |
| 31 | # [X[2, 0]], |
| 32 | # [U[0, 0]]]) |
| 33 | return numpy.matrix([[(U[0, 0] + U[1, 0]) * numpy.cos(X[2, 0]) / 2.0], |
| 34 | [(U[0, 0] + U[1, 0]) * numpy.sin(X[2, 0]) / 2.0], |
| 35 | [(U[1, 0] - U[0, 0]) / width]]) |
| 36 | |
| 37 | def RungeKutta(f, x, dt): |
| 38 | """4th order RungeKutta integration of F starting at X.""" |
| 39 | a = f(x) |
| 40 | b = f(x + dt / 2.0 * a) |
| 41 | c = f(x + dt / 2.0 * b) |
| 42 | d = f(x + dt * c) |
| 43 | return x + dt * (a + 2.0 * b + 2.0 * c + d) / 6.0 |
| 44 | |
| 45 | def discrete_dynamics(X, U): |
| 46 | return RungeKutta(lambda startingX: dynamics(startingX, U), X, dt) |
| 47 | |
| 48 | def inverse_discrete_dynamics(X, U): |
| 49 | return RungeKutta(lambda startingX: -dynamics(startingX, U), X, dt) |
| 50 | |
| 51 | def numerical_jacobian_x(fn, X, U, epsilon=1e-4): |
| 52 | """Numerically estimates the jacobian around X, U in X. |
| 53 | |
| 54 | Args: |
| 55 | fn: A function of X, U. |
| 56 | X: numpy.matrix(num_states, 1), The state vector to take the jacobian |
| 57 | around. |
| 58 | U: numpy.matrix(num_inputs, 1), The input vector to take the jacobian |
| 59 | around. |
| 60 | |
| 61 | Returns: |
| 62 | numpy.matrix(num_states, num_states), The jacobian of fn with X as the |
| 63 | variable. |
| 64 | """ |
| 65 | num_states = X.shape[0] |
| 66 | nominal = fn(X, U) |
| 67 | answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_states))) |
| 68 | # It's more expensive, but +- epsilon will be more reliable |
| 69 | for i in range(0, num_states): |
| 70 | dX_plus = X.copy() |
| 71 | dX_plus[i] += epsilon |
| 72 | dX_minus = X.copy() |
| 73 | dX_minus[i] -= epsilon |
| 74 | answer[:, i] = (fn(dX_plus, U) - fn(dX_minus, U)) / epsilon / 2.0 |
| 75 | return answer |
| 76 | |
| 77 | def numerical_jacobian_u(fn, X, U, epsilon=1e-4): |
| 78 | """Numerically estimates the jacobian around X, U in U. |
| 79 | |
| 80 | Args: |
| 81 | fn: A function of X, U. |
| 82 | X: numpy.matrix(num_states, 1), The state vector to take the jacobian |
| 83 | around. |
| 84 | U: numpy.matrix(num_inputs, 1), The input vector to take the jacobian |
| 85 | around. |
| 86 | |
| 87 | Returns: |
| 88 | numpy.matrix(num_states, num_inputs), The jacobian of fn with U as the |
| 89 | variable. |
| 90 | """ |
| 91 | num_states = X.shape[0] |
| 92 | num_inputs = U.shape[0] |
| 93 | nominal = fn(X, U) |
| 94 | answer = numpy.matrix(numpy.zeros((nominal.shape[0], num_inputs))) |
| 95 | for i in range(0, num_inputs): |
| 96 | dU_plus = U.copy() |
| 97 | dU_plus[i] += epsilon |
| 98 | dU_minus = U.copy() |
| 99 | dU_minus[i] -= epsilon |
| 100 | answer[:, i] = (fn(X, dU_plus) - fn(X, dU_minus)) / epsilon / 2.0 |
| 101 | return answer |
| 102 | |
| 103 | def numerical_jacobian_x_x(fn, X, U): |
| 104 | return numerical_jacobian_x( |
| 105 | lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T, X, U) |
| 106 | |
| 107 | def numerical_jacobian_x_u(fn, X, U): |
| 108 | return numerical_jacobian_x( |
| 109 | lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T, X, U) |
| 110 | |
| 111 | def numerical_jacobian_u_x(fn, X, U): |
| 112 | return numerical_jacobian_u( |
| 113 | lambda X_inner, U_inner: numerical_jacobian_x(fn, X_inner, U_inner).T, X, U) |
| 114 | |
| 115 | def numerical_jacobian_u_u(fn, X, U): |
| 116 | return numerical_jacobian_u( |
| 117 | lambda X_inner, U_inner: numerical_jacobian_u(fn, X_inner, U_inner).T, X, U) |
| 118 | |
| 119 | # Simple implementation for a quadratic cost function. |
| 120 | class CostFunction: |
| 121 | def __init__(self): |
| 122 | self.num_states = num_states |
| 123 | self.num_inputs = num_inputs |
| 124 | self.dt = dt |
| 125 | self.Q = numpy.matrix([[0.1, 0, 0], |
| 126 | [0, 0.6, 0], |
| 127 | [0, 0, 0.1]]) / dt / dt |
| 128 | self.R = numpy.matrix([[0.40, 0], |
| 129 | [0, 0.40]]) / dt / dt |
| 130 | |
| 131 | def estimate_Q_final(self, X_hat): |
| 132 | """Returns the quadraticized final Q around X_hat. |
| 133 | |
| 134 | This is calculated by evaluating partial^2 cost(X_hat) / (partial X * partial X) |
| 135 | |
| 136 | Args: |
| 137 | X_hat: numpy.matrix(self.num_states, 1), The state to quadraticize around. |
| 138 | |
| 139 | Result: |
| 140 | numpy.matrix(self.num_states, self.num_states) |
| 141 | """ |
| 142 | zero_U = numpy.matrix(numpy.zeros((self.num_inputs, 1))) |
| 143 | return numerical_jacobian_x_x(self.final_cost, X_hat, zero_U) |
| 144 | |
| 145 | def estimate_partial_cost_partial_x_final(self, X_hat): |
| 146 | """Returns \frac{\partial cost}{\partial X}(X_hat) for the final cost. |
| 147 | |
| 148 | Args: |
| 149 | X_hat: numpy.matrix(self.num_states, 1), The state to quadraticize around. |
| 150 | |
| 151 | Result: |
| 152 | numpy.matrix(self.num_states, 1) |
| 153 | """ |
| 154 | return numerical_jacobian_x(self.final_cost, X_hat, numpy.matrix(numpy.zeros((self.num_inputs, 1)))).T |
| 155 | |
| 156 | def estimate_q_final(self, X_hat): |
| 157 | """Returns q evaluated at X_hat for the final cost function.""" |
| 158 | return self.estimate_partial_cost_partial_x_final(X_hat) - self.estimate_Q_final(X_hat) * X_hat |
| 159 | |
| 160 | def final_cost(self, X, U): |
| 161 | """Computes the final cost of being at X |
| 162 | |
| 163 | Args: |
| 164 | X: numpy.matrix(self.num_states, 1) |
| 165 | U: numpy.matrix(self.num_inputs, 1), ignored |
| 166 | |
| 167 | Returns: |
| 168 | numpy.matrix(1, 1), The quadratic cost of being at X |
| 169 | """ |
| 170 | return X.T * self.Q * X * 1000 |
| 171 | |
| 172 | def cost(self, X, U): |
| 173 | """Computes the incremental cost given a position and U. |
| 174 | |
| 175 | Args: |
| 176 | X: numpy.matrix(self.num_states, 1) |
| 177 | U: numpy.matrix(self.num_inputs, 1) |
| 178 | |
| 179 | Returns: |
| 180 | numpy.matrix(1, 1), The quadratic cost of evaluating U. |
| 181 | """ |
| 182 | return U.T * self.R * U + X.T * self.Q * X |
| 183 | |
| 184 | cost_fn_obj = CostFunction() |
| 185 | |
| 186 | S_bar_t = [numpy.matrix(numpy.zeros((num_states, num_states))) for _ in range(l + 1)] |
| 187 | s_bar_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)] |
| 188 | s_scalar_bar_t = [numpy.matrix(numpy.zeros((1, 1))) for _ in range(l + 1)] |
| 189 | |
| 190 | L_t = [numpy.matrix(numpy.zeros((num_inputs, num_states))) for _ in range(l + 1)] |
| 191 | l_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)] |
| 192 | L_bar_t = [numpy.matrix(numpy.zeros((num_inputs, num_states))) for _ in range(l + 1)] |
| 193 | l_bar_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)] |
| 194 | |
| 195 | S_t = [numpy.matrix(numpy.zeros((num_states, num_states))) for _ in range(l + 1)] |
| 196 | s_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)] |
| 197 | s_scalar_t = [numpy.matrix(numpy.zeros((1, 1))) for _ in range(l + 1)] |
| 198 | |
| 199 | |
| 200 | last_x_hat_t = [numpy.matrix(numpy.zeros((num_states, 1))) for _ in range(l + 1)] |
| 201 | |
| 202 | for a in range(15): |
| 203 | x_hat = x_hat_initial |
| 204 | u_t = L_t[0] * x_hat + l_t[0] |
| 205 | S_bar_t[0] = numpy.matrix(numpy.zeros((num_states, num_states))) |
| 206 | s_bar_t[0] = numpy.matrix(numpy.zeros((num_states, 1))) |
| 207 | s_scalar_bar_t[0] = numpy.matrix([[0]]) |
| 208 | |
| 209 | last_x_hat_t[0] = x_hat_initial |
| 210 | |
| 211 | Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat_initial, u_t) |
| 212 | P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat_initial, u_t) |
| 213 | R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat_initial, u_t) |
| 214 | |
| 215 | q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat_initial, u_t).T - Q_t * x_hat_initial - P_t.T * u_t |
| 216 | r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat_initial, u_t).T - P_t * x_hat_initial - R_t * u_t |
| 217 | |
| 218 | q_scalar_t = cost_fn_obj.cost(x_hat_initial, u_t) - 0.5 * (x_hat_initial.T * (Q_t * x_hat_initial + P_t.T * u_t) + u_t.T * (P_t * x_hat_initial + R_t * u_t)) - x_hat_initial.T * q_t - u_t.T * r_t |
| 219 | |
| 220 | start_A_t = numerical_jacobian_x(discrete_dynamics, x_hat_initial, u_t) |
| 221 | start_B_t = numerical_jacobian_u(discrete_dynamics, x_hat_initial, u_t) |
| 222 | x_hat_next = discrete_dynamics(x_hat_initial, u_t) |
| 223 | start_c_t = x_hat_next - start_A_t * x_hat_initial - start_B_t * u_t |
| 224 | |
| 225 | B_svd_u, B_svd_sigma_diag, B_svd_v = numpy.linalg.svd(start_B_t) |
| 226 | B_svd_sigma = numpy.matrix(numpy.zeros(start_B_t.shape)) |
| 227 | B_svd_sigma[0:B_svd_sigma_diag.shape[0], 0:B_svd_sigma_diag.shape[0]] = numpy.diag(B_svd_sigma_diag) |
| 228 | |
| 229 | B_svd_sigma_inv = numpy.matrix(numpy.zeros(start_B_t.shape)).T |
| 230 | B_svd_sigma_inv[0:B_svd_sigma_diag.shape[0], 0:B_svd_sigma_diag.shape[0]] = numpy.linalg.inv(numpy.diag(B_svd_sigma_diag)) |
| 231 | B_svd_inv = B_svd_v.T * B_svd_sigma_inv * B_svd_u.T |
| 232 | |
| 233 | L_bar_t[1] = B_svd_inv |
| 234 | l_bar_t[1] = -B_svd_inv * (start_A_t * x_hat_initial + start_c_t) |
| 235 | |
| 236 | S_bar_t[1] = L_bar_t[1].T * R_t * L_bar_t[1] |
| 237 | |
| 238 | TotalS_1 = start_B_t.T * S_t[1] * start_B_t + R_t |
| 239 | Totals_1 = start_B_t.T * S_t[1] * (start_c_t + start_A_t * x_hat_initial) + start_B_t.T * s_t[1] + P_t * x_hat_initial + r_t |
| 240 | Totals_scalar_1 = 0.5 * (start_c_t.T + x_hat_initial.T * start_A_t.T) * S_t[1] * (start_c_t + start_A_t * x_hat_initial) + s_scalar_t[1] + x_hat_initial.T * q_t + q_scalar_t + 0.5 * x_hat_initial.T * Q_t * x_hat_initial + (start_c_t.T + x_hat_initial.T * start_A_t.T) * s_t[1] |
| 241 | |
| 242 | optimal_u_1 = -numpy.linalg.solve(TotalS_1, Totals_1) |
| 243 | optimal_x_1 = start_A_t * x_hat_initial + start_B_t * optimal_u_1 + start_c_t |
| 244 | |
| 245 | S_bar_1_eigh_eigenvalues, S_bar_1_eigh_eigenvectors = numpy.linalg.eigh(S_bar_t[1]) |
| 246 | S_bar_1_eigh = numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues)) |
| 247 | S_bar_1_eigh_eigenvalues_stiff = S_bar_1_eigh_eigenvalues.copy() |
| 248 | for i in range(S_bar_1_eigh_eigenvalues_stiff.shape[0]): |
| 249 | if abs(S_bar_1_eigh_eigenvalues_stiff[i]) < 1e-8: |
| 250 | S_bar_1_eigh_eigenvalues_stiff[i] = max(S_bar_1_eigh_eigenvalues_stiff) * 1.0 |
| 251 | |
| 252 | #print 'eigh eigenvalues of S bar', S_bar_1_eigh_eigenvalues |
| 253 | #print 'eigh eigenvectors of S bar', S_bar_1_eigh_eigenvectors.T |
| 254 | |
| 255 | #print 'S bar eig recreate', S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues)) * S_bar_1_eigh_eigenvectors.T |
| 256 | #print 'S bar eig recreate error', (S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues)) * S_bar_1_eigh_eigenvectors.T - S_bar_t[1]) |
| 257 | |
| 258 | S_bar_stiff = S_bar_1_eigh_eigenvectors * numpy.matrix(numpy.diag(S_bar_1_eigh_eigenvalues_stiff)) * S_bar_1_eigh_eigenvectors.T |
| 259 | |
| 260 | print 'Min u', -numpy.linalg.solve(TotalS_1, Totals_1) |
| 261 | print 'Min x_hat', optimal_x_1 |
| 262 | s_bar_t[1] = -s_t[1] - (S_bar_stiff + S_t[1]) * optimal_x_1 |
| 263 | s_scalar_bar_t[1] = 0.5 * (optimal_u_1.T * TotalS_1 * optimal_u_1 - optimal_x_1.T * (S_bar_stiff + S_t[1]) * optimal_x_1) + optimal_u_1.T * Totals_1 - optimal_x_1.T * (s_bar_t[1] + s_t[1]) - s_scalar_t[1] + Totals_scalar_1 |
| 264 | |
| 265 | print 'optimal_u_1', optimal_u_1 |
| 266 | print 'TotalS_1', TotalS_1 |
| 267 | print 'Totals_1', Totals_1 |
| 268 | print 'Totals_scalar_1', Totals_scalar_1 |
| 269 | print 'overall cost 1', 0.5 * (optimal_u_1.T * TotalS_1 * optimal_u_1) + optimal_u_1.T * Totals_1 + Totals_scalar_1 |
| 270 | print 'overall cost 0', 0.5 * (x_hat_initial.T * S_t[0] * x_hat_initial) + x_hat_initial.T * s_t[0] + s_scalar_t[0] |
| 271 | |
| 272 | print 't forward 0' |
| 273 | print 'x_hat_initial[ 0]: %s' % (x_hat_initial) |
| 274 | print 'x_hat[%2d]: %s' % (0, x_hat.T) |
| 275 | print 'x_hat_next[%2d]: %s' % (0, x_hat_next.T) |
| 276 | print 'u[%2d]: %s' % (0, u_t.T) |
| 277 | print ('L[ 0]: %s' % (L_t[0],)).replace('\n', '\n ') |
| 278 | print ('l[ 0]: %s' % (l_t[0],)).replace('\n', '\n ') |
| 279 | |
| 280 | print ('A_t[%2d]: %s' % (0, start_A_t)).replace('\n', '\n ') |
| 281 | print ('B_t[%2d]: %s' % (0, start_B_t)).replace('\n', '\n ') |
| 282 | print ('c_t[%2d]: %s' % (0, start_c_t)).replace('\n', '\n ') |
| 283 | |
| 284 | # TODO(austin): optimal_x_1 is x_hat |
| 285 | x_hat = -numpy.linalg.solve((S_t[1] + S_bar_stiff), (s_t[1] + s_bar_t[1])) |
| 286 | print 'new xhat', x_hat |
| 287 | |
| 288 | S_bar_t[1] = S_bar_stiff |
| 289 | |
| 290 | last_x_hat_t[1] = x_hat |
| 291 | |
| 292 | for t in range(1, l): |
| 293 | print 't forward', t |
| 294 | u_t = L_t[t] * x_hat + l_t[t] |
| 295 | |
| 296 | x_hat_next = discrete_dynamics(x_hat, u_t) |
| 297 | A_bar_t = numerical_jacobian_x(inverse_discrete_dynamics, x_hat_next, u_t) |
| 298 | B_bar_t = numerical_jacobian_u(inverse_discrete_dynamics, x_hat_next, u_t) |
| 299 | c_bar_t = x_hat - A_bar_t * x_hat_next - B_bar_t * u_t |
| 300 | |
| 301 | print 'x_hat[%2d]: %s' % (t, x_hat.T) |
| 302 | print 'x_hat_next[%2d]: %s' % (t, x_hat_next.T) |
| 303 | print ('L[%2d]: %s' % (t, L_t[t],)).replace('\n', '\n ') |
| 304 | print ('l[%2d]: %s' % (t, l_t[t],)).replace('\n', '\n ') |
| 305 | print 'u[%2d]: %s' % (t, u_t.T) |
| 306 | |
| 307 | print ('A_bar_t[%2d]: %s' % (t, A_bar_t)).replace('\n', '\n ') |
| 308 | print ('B_bar_t[%2d]: %s' % (t, B_bar_t)).replace('\n', '\n ') |
| 309 | print ('c_bar_t[%2d]: %s' % (t, c_bar_t)).replace('\n', '\n ') |
| 310 | |
| 311 | Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat, u_t) |
| 312 | P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat, u_t) |
| 313 | R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat, u_t) |
| 314 | |
| 315 | q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat, u_t).T - Q_t * x_hat - P_t.T * u_t |
| 316 | r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat, u_t).T - P_t * x_hat - R_t * u_t |
| 317 | |
| 318 | q_scalar_t = cost_fn_obj.cost(x_hat, u_t) - 0.5 * (x_hat.T * (Q_t * x_hat + P_t.T * u_t) + u_t.T * (P_t * x_hat + R_t * u_t)) - x_hat.T * q_t - u_t.T * r_t |
| 319 | |
| 320 | C_bar_t = B_bar_t.T * (S_bar_t[t] + Q_t) * A_bar_t + P_t * A_bar_t |
| 321 | D_bar_t = A_bar_t.T * (S_bar_t[t] + Q_t) * A_bar_t |
| 322 | E_bar_t = B_bar_t.T * (S_bar_t[t] + Q_t) * B_bar_t + R_t + P_t * B_bar_t + B_bar_t.T * P_t.T |
| 323 | d_bar_t = A_bar_t.T * (s_bar_t[t] + q_t) + A_bar_t.T * (S_bar_t[t] + Q_t) * c_bar_t |
| 324 | e_bar_t = r_t + P_t * c_bar_t + B_bar_t.T * s_bar_t[t] + B_bar_t.T * (S_bar_t[t] + Q_t) * c_bar_t |
| 325 | |
| 326 | L_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * C_bar_t |
| 327 | l_bar_t[t + 1] = -numpy.linalg.inv(E_bar_t) * e_bar_t |
| 328 | |
| 329 | S_bar_t[t + 1] = D_bar_t + C_bar_t.T * L_bar_t[t + 1] |
| 330 | s_bar_t[t + 1] = d_bar_t + C_bar_t.T * l_bar_t[t + 1] |
| 331 | s_scalar_bar_t[t + 1] = -0.5 * e_bar_t.T * numpy.linalg.inv(E_bar_t) * e_bar_t + 0.5 * c_bar_t.T * (S_bar_t[t] + Q_t) * c_bar_t + c_bar_t.T * s_bar_t[t] + c_bar_t.T * q_t + s_scalar_bar_t[t] + q_scalar_t |
| 332 | |
| 333 | x_hat = -numpy.linalg.solve((S_t[t + 1] + S_bar_t[t + 1]), (s_t[t + 1] + s_bar_t[t + 1])) |
| 334 | |
| 335 | S_t[l] = cost_fn_obj.estimate_Q_final(x_hat) |
| 336 | s_t[l] = cost_fn_obj.estimate_q_final(x_hat) |
| 337 | x_hat = -numpy.linalg.inv(S_t[l] + S_bar_t[l]) * (s_t[l] + s_bar_t[l]) |
| 338 | |
| 339 | for t in reversed(range(l)): |
| 340 | print 't backward', t |
| 341 | # TODO(austin): I don't think we can use L_t like this here. |
| 342 | # I think we are off by 1 somewhere... |
| 343 | u_t = L_bar_t[t + 1] * x_hat + l_bar_t[t + 1] |
| 344 | |
| 345 | x_hat_prev = inverse_discrete_dynamics(x_hat, u_t) |
| 346 | print 'x_hat[%2d]: %s' % (t, x_hat.T) |
| 347 | print 'x_hat_prev[%2d]: %s' % (t, x_hat_prev.T) |
| 348 | print ('L_bar[%2d]: %s' % (t + 1, L_bar_t[t + 1])).replace('\n', '\n ') |
| 349 | print ('l_bar[%2d]: %s' % (t + 1, l_bar_t[t + 1])).replace('\n', '\n ') |
| 350 | print 'u[%2d]: %s' % (t, u_t.T) |
| 351 | # Now compute the linearized A, B, and C |
| 352 | # Start by doing it numerically, and then optimize. |
| 353 | A_t = numerical_jacobian_x(discrete_dynamics, x_hat_prev, u_t) |
| 354 | B_t = numerical_jacobian_u(discrete_dynamics, x_hat_prev, u_t) |
| 355 | c_t = x_hat - A_t * x_hat_prev - B_t * u_t |
| 356 | |
| 357 | print ('A_t[%2d]: %s' % (t, A_t)).replace('\n', '\n ') |
| 358 | print ('B_t[%2d]: %s' % (t, B_t)).replace('\n', '\n ') |
| 359 | print ('c_t[%2d]: %s' % (t, c_t)).replace('\n', '\n ') |
| 360 | |
| 361 | Q_t = numerical_jacobian_x_x(cost_fn_obj.cost, x_hat_prev, u_t) |
| 362 | P_t = numerical_jacobian_x_u(cost_fn_obj.cost, x_hat_prev, u_t) |
| 363 | P_T_t = numerical_jacobian_u_x(cost_fn_obj.cost, x_hat_prev, u_t) |
| 364 | R_t = numerical_jacobian_u_u(cost_fn_obj.cost, x_hat_prev, u_t) |
| 365 | |
| 366 | q_t = numerical_jacobian_x(cost_fn_obj.cost, x_hat_prev, u_t).T - Q_t * x_hat_prev - P_T_t * u_t |
| 367 | r_t = numerical_jacobian_u(cost_fn_obj.cost, x_hat_prev, u_t).T - P_t * x_hat_prev - R_t * u_t |
| 368 | |
| 369 | q_scalar_t = cost_fn_obj.cost(x_hat_prev, u_t) - 0.5 * (x_hat_prev.T * (Q_t * x_hat_prev + P_t.T * u_t) + u_t.T * (P_t * x_hat_prev + R_t * u_t)) - x_hat_prev.T * q_t - u_t.T * r_t |
| 370 | |
| 371 | C_t = P_t + B_t.T * S_t[t + 1] * A_t |
| 372 | D_t = Q_t + A_t.T * S_t[t + 1] * A_t |
| 373 | E_t = R_t + B_t.T * S_t[t + 1] * B_t |
| 374 | d_t = q_t + A_t.T * s_t[t + 1] + A_t.T * S_t[t + 1] * c_t |
| 375 | e_t = r_t + B_t.T * s_t[t + 1] + B_t.T * S_t[t + 1] * c_t |
| 376 | L_t[t] = -numpy.linalg.inv(E_t) * C_t |
| 377 | l_t[t] = -numpy.linalg.inv(E_t) * e_t |
| 378 | s_t[t] = d_t + C_t.T * l_t[t] |
| 379 | S_t[t] = D_t + C_t.T * L_t[t] |
| 380 | s_scalar_t[t] = q_scalar_t - 0.5 * e_t.T * numpy.linalg.inv(E_t) * e_t + 0.5 * c_t.T * S_t[t + 1] * c_t + c_t.T * s_t[t + 1] + s_scalar_t[t + 1] |
| 381 | |
| 382 | x_hat = -numpy.linalg.solve((S_t[t] + S_bar_t[t]), (s_t[t] + s_bar_t[t])) |
| 383 | if t == 0: |
| 384 | last_x_hat_t[t] = x_hat_initial |
| 385 | else: |
| 386 | last_x_hat_t[t] = x_hat |
| 387 | |
| 388 | x_hat_t = [x_hat_initial] |
| 389 | |
| 390 | pylab.figure('states %d' % a) |
| 391 | pylab.ion() |
| 392 | for dim in range(num_states): |
| 393 | pylab.plot(numpy.arange(len(last_x_hat_t)), |
| 394 | [x_hat_loop[dim, 0] for x_hat_loop in last_x_hat_t], marker='o', label='Xhat[%d]'%dim) |
| 395 | pylab.legend() |
| 396 | pylab.draw() |
| 397 | |
| 398 | pylab.figure('xy %d' % a) |
| 399 | pylab.ion() |
| 400 | pylab.plot([x_hat_loop[0, 0] for x_hat_loop in last_x_hat_t], |
| 401 | [x_hat_loop[1, 0] for x_hat_loop in last_x_hat_t], marker='o', label='trajectory') |
| 402 | pylab.legend() |
| 403 | pylab.draw() |
| 404 | |
| 405 | final_u_t = [numpy.matrix(numpy.zeros((num_inputs, 1))) for _ in range(l + 1)] |
| 406 | cost_to_go = [] |
| 407 | cost_to_come = [] |
| 408 | cost = [] |
| 409 | for t in range(l): |
| 410 | cost_to_go.append((0.5 * last_x_hat_t[t].T * S_t[t] * last_x_hat_t[t] + last_x_hat_t[t].T * s_t[t] + s_scalar_t[t])[0, 0]) |
| 411 | cost_to_come.append((0.5 * last_x_hat_t[t].T * S_bar_t[t] * last_x_hat_t[t] + last_x_hat_t[t].T * s_bar_t[t] + s_scalar_bar_t[t])[0, 0]) |
| 412 | cost.append(cost_to_go[-1] + cost_to_come[-1]) |
| 413 | final_u_t[t] = L_t[t] * last_x_hat_t[t] + l_t[t] |
| 414 | |
| 415 | for t in range(l): |
| 416 | A_t = numerical_jacobian_x(discrete_dynamics, last_x_hat_t[t], final_u_t[t]) |
| 417 | B_t = numerical_jacobian_u(discrete_dynamics, last_x_hat_t[t], final_u_t[t]) |
| 418 | c_t = discrete_dynamics(last_x_hat_t[t], final_u_t[t]) - A_t * last_x_hat_t[t] - B_t * final_u_t[t] |
| 419 | print("Infeasability at", t, "is", ((A_t * last_x_hat_t[t] + B_t * final_u_t[t] + c_t) - last_x_hat_t[t + 1]).T) |
| 420 | |
| 421 | pylab.figure('u') |
| 422 | samples = numpy.arange(len(final_u_t)) |
| 423 | for i in range(num_inputs): |
| 424 | pylab.plot(samples, [u[i, 0] for u in final_u_t], label='u[%d]' % i, marker='o') |
| 425 | pylab.legend() |
| 426 | |
| 427 | pylab.figure('cost') |
| 428 | cost_samples = numpy.arange(len(cost)) |
| 429 | pylab.plot(cost_samples, cost_to_go, label='cost to go', marker='o') |
| 430 | pylab.plot(cost_samples, cost_to_come, label='cost to come', marker='o') |
| 431 | pylab.plot(cost_samples, cost, label='cost', marker='o') |
| 432 | pylab.legend() |
| 433 | |
| 434 | pylab.ioff() |
| 435 | pylab.show() |
| 436 | |
| 437 | sys.exit(1) |