Split out superstructure into arm and intake state feedback loops.
This is more code for the structure of the superstructure control loop.
Change-Id: I4abc83b04c57174ce087be0932e777cafdce8373
diff --git a/y2016/control_loops/python/BUILD b/y2016/control_loops/python/BUILD
index 9396fea..4c261e1 100644
--- a/y2016/control_loops/python/BUILD
+++ b/y2016/control_loops/python/BUILD
@@ -56,6 +56,7 @@
'intake.py',
],
deps = [
+ '//aos/common/util:py_trapezoid_profile',
'//external:python-gflags',
'//external:python-glog',
'//frc971/control_loops/python:controls',
diff --git a/y2016/control_loops/python/arm.py b/y2016/control_loops/python/arm.py
old mode 100644
new mode 100755
index 28a704d..0bfa04b
--- a/y2016/control_loops/python/arm.py
+++ b/y2016/control_loops/python/arm.py
@@ -75,13 +75,23 @@
self.Q[2, 2] = 1.0 / q_pos_shooter ** 2.0
self.Q[3, 3] = 1.0 / q_vel_shooter ** 2.0
+ self.Qff = numpy.matrix(numpy.zeros((4, 4)))
+ qff_pos_shoulder = 0.005
+ qff_vel_shoulder = 1.00
+ qff_pos_shooter = 0.005
+ qff_vel_shooter = 1.00
+ self.Qff[0, 0] = 1.0 / qff_pos_shoulder ** 2.0
+ self.Qff[1, 1] = 1.0 / qff_vel_shoulder ** 2.0
+ self.Qff[2, 2] = 1.0 / qff_pos_shooter ** 2.0
+ self.Qff[3, 3] = 1.0 / qff_vel_shooter ** 2.0
+
# Cost of control effort
self.R = numpy.matrix(numpy.zeros((2, 2)))
r_voltage = 1.0 / 12.0
self.R[0, 0] = r_voltage ** 2.0
self.R[1, 1] = r_voltage ** 2.0
- self.Kff = controls.TwoStateFeedForwards(self.B, self.Q)
+ self.Kff = controls.TwoStateFeedForwards(self.B, self.Qff)
glog.debug('Shoulder K')
glog.debug(self._shoulder.K)
@@ -224,17 +234,15 @@
initial_t = 0
goal = numpy.concatenate((arm.X, numpy.matrix(numpy.zeros((2, 1)))), axis=0)
- current_shoulder_goal = goal[0:2, 0].copy()
- current_shooter_goal = goal[2:4, 0].copy()
shoulder_profile = TrapizoidProfile(arm.dt)
shoulder_profile.set_maximum_acceleration(50.0)
shoulder_profile.set_maximum_velocity(10.0)
- shoulder_profile.SetGoal(current_shoulder_goal[0, 0])
+ shoulder_profile.SetGoal(goal[0, 0])
shooter_profile = TrapizoidProfile(arm.dt)
shooter_profile.set_maximum_acceleration(50.0)
shooter_profile.set_maximum_velocity(10.0)
- shooter_profile.SetGoal(current_shooter_goal[0, 0])
+ shooter_profile.SetGoal(goal[2, 0])
U_last = numpy.matrix(numpy.zeros((2, 1)))
for i in xrange(iterations):
@@ -246,10 +254,18 @@
self.offset_shoulder.append(observer.X_hat[4, 0])
self.offset_shooter.append(observer.X_hat[5, 0])
+ X_hat = observer.X_hat
+ self.x_hat_shoulder.append(observer.X_hat[0, 0])
+ self.x_hat_shooter.append(observer.X_hat[2, 0])
+
next_shoulder_goal = shoulder_profile.Update(end_goal[0, 0], end_goal[1, 0])
next_shooter_goal = shooter_profile.Update(end_goal[2, 0], end_goal[3, 0])
- next_goal = numpy.concatenate((next_shoulder_goal, next_shooter_goal, numpy.matrix(numpy.zeros((2, 1)))), axis=0)
+ next_goal = numpy.concatenate(
+ (next_shoulder_goal,
+ next_shooter_goal,
+ numpy.matrix(numpy.zeros((2, 1)))),
+ axis=0)
self.goal_x_shoulder.append(goal[0, 0])
self.goal_v_shoulder.append(goal[1, 0])
self.goal_x_shooter.append(goal[2, 0])
@@ -257,11 +273,6 @@
ff_U = controller.Kff * (next_goal - observer.A * goal)
- if observer is not None:
- X_hat = observer.X_hat
- self.x_hat_shoulder.append(observer.X_hat[0, 0])
- self.x_hat_shooter.append(observer.X_hat[2, 0])
-
U_uncapped = controller.K * (goal - X_hat) + ff_U
U = U_uncapped.copy()
@@ -326,9 +337,8 @@
shooter_profile.MoveCurrentState(
numpy.matrix([[goal[2, 0]], [goal[3, 0]]]))
U_last = U
- glog.debug('End goal is %s', repr(end_goal))
- glog.debug('last goal is %s', repr(goal))
- glog.debug('End state is %s', repr(arm.X))
+ glog.debug('goal_error %s', repr(end_goal - goal))
+ glog.debug('error %s', repr(observer.X_hat - end_goal))
def Plot(self):
diff --git a/y2016/control_loops/python/intake.py b/y2016/control_loops/python/intake.py
index 2a623d7..311cf17 100755
--- a/y2016/control_loops/python/intake.py
+++ b/y2016/control_loops/python/intake.py
@@ -1,5 +1,6 @@
#!/usr/bin/python
+from aos.common.util.trapezoid_profile import TrapizoidProfile
from frc971.control_loops.python import control_loop
from frc971.control_loops.python import controls
import numpy
@@ -77,6 +78,13 @@
self.R = numpy.matrix([[(1.0 / (12.0 ** 2.0))]])
self.K = controls.dlqr(self.A, self.B, self.Q, self.R)
+ q_pos_ff = 0.005
+ q_vel_ff = 1.0
+ self.Qff = numpy.matrix([[(1.0 / (q_pos_ff ** 2.0)), 0.0],
+ [0.0, (1.0 / (q_vel_ff ** 2.0))]])
+
+ self.Kff = controls.TwoStateFeedForwards(self.B, self.Qff)
+
glog.debug('K %s', repr(self.K))
glog.debug('Poles are %s',
repr(numpy.linalg.eig(self.A - self.B * self.K)[0]))
@@ -149,6 +157,8 @@
self.K[0, 0:2] = self.K_unaugmented
self.K[0, 2] = 1
+ self.Kff = numpy.concatenate((self.Kff, numpy.matrix(numpy.zeros((1, 1)))), axis=1)
+
self.InitializeState()
class ScenarioPlotter(object):
@@ -162,24 +172,20 @@
self.u = []
self.offset = []
- def run_test(self, intake, goal, iterations=200, controller_intake=None,
- observer_intake=None):
+ def run_test(self, intake, end_goal,
+ controller_intake,
+ observer_intake=None,
+ iterations=200):
"""Runs the intake plant with an initial condition and goal.
- Test for whether the goal has been reached and whether the separation
- goes outside of the initial and goal values by more than
- max_separation_error.
-
- Prints out something for a failure of either condition and returns
- False if tests fail.
Args:
intake: intake object to use.
- goal: goal state.
- iterations: Number of timesteps to run the model for.
+ end_goal: end_goal state.
controller_intake: Intake object to get K from, or None if we should
use intake.
observer_intake: Intake object to use for the observer, or None if we should
use the actual state.
+ iterations: Number of timesteps to run the model for.
"""
if controller_intake is None:
@@ -192,14 +198,30 @@
else:
initial_t = 0
+ goal = numpy.concatenate((intake.X, numpy.matrix(numpy.zeros((1, 1)))), axis=0)
+
+ profile = TrapizoidProfile(intake.dt)
+ profile.set_maximum_acceleration(70.0)
+ profile.set_maximum_velocity(10.0)
+ profile.SetGoal(goal[0, 0])
+
+ U_last = numpy.matrix(numpy.zeros((1, 1)))
for i in xrange(iterations):
- X_hat = intake.X
+ observer_intake.Y = intake.Y
+ observer_intake.CorrectObserver(U_last)
- if observer_intake is not None:
- X_hat = observer_intake.X_hat
- self.x_hat.append(observer_intake.X_hat[0, 0])
+ self.offset.append(observer_intake.X_hat[2, 0])
+ self.x_hat.append(observer_intake.X_hat[0, 0])
- U = controller_intake.K * (goal - X_hat)
+ next_goal = numpy.concatenate(
+ (profile.Update(end_goal[0, 0], end_goal[1, 0]),
+ numpy.matrix(numpy.zeros((1, 1)))),
+ axis=0)
+
+ ff_U = controller_intake.Kff * (next_goal - observer_intake.A * goal)
+
+ U_uncapped = controller_intake.K * (goal - observer_intake.X_hat) + ff_U
+ U = U_uncapped.copy()
U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)
self.x.append(intake.X[0, 0])
@@ -211,20 +233,23 @@
self.v.append(intake.X[1, 0])
self.a.append((self.v[-1] - last_v) / intake.dt)
- if observer_intake is not None:
- observer_intake.Y = intake.Y
- observer_intake.CorrectObserver(U)
- self.offset.append(observer_intake.X_hat[2, 0])
+ intake.Update(U + 0.0)
- intake.Update(U + 2.0)
-
- if observer_intake is not None:
- observer_intake.PredictObserver(U)
+ observer_intake.PredictObserver(U)
self.t.append(initial_t + i * intake.dt)
self.u.append(U[0, 0])
- glog.debug('Time: %f', self.t[-1])
+ ff_U -= U_uncapped - U
+ goal = controller_intake.A * goal + controller_intake.B * ff_U
+
+ if U[0, 0] != U_uncapped[0, 0]:
+ profile.MoveCurrentState(
+ numpy.matrix([[goal[0, 0]], [goal[1, 0]]]))
+
+ glog.debug('Time: %f', self.t[-1])
+ glog.debug('goal_error %s', repr(end_goal - goal))
+ glog.debug('error %s', repr(observer_intake.X_hat - end_goal))
def Plot(self):
pylab.subplot(3, 1, 1)
@@ -257,7 +282,8 @@
# Test moving the intake with constant separation.
initial_X = numpy.matrix([[0.0], [0.0]])
R = numpy.matrix([[numpy.pi/2.0], [0.0], [0.0]])
- scenario_plotter.run_test(intake, goal=R, controller_intake=intake_controller,
+ scenario_plotter.run_test(intake, end_goal=R,
+ controller_intake=intake_controller,
observer_intake=observer_intake, iterations=200)
if FLAGS.plot: