Add flax and friends for training neural networks
This updates our formatter, which changed what it expects very slightly.
Update all the pieces of code to match
optax is for the actual optimizers, jax is the underlying accelerated
linear algebra library, tensorflow is for loading datasets and exporting
models.
Change-Id: Ic4c3b425cda74267e1d0ad1615c42452cbefab8a
Signed-off-by: Austin Schuh <austin.linux@gmail.com>
diff --git a/motors/pistol_grip/generate_cogging.py b/motors/pistol_grip/generate_cogging.py
index 5b0a6e6..dc9f87d 100644
--- a/motors/pistol_grip/generate_cogging.py
+++ b/motors/pistol_grip/generate_cogging.py
@@ -8,7 +8,7 @@
def main(argv):
if len(argv) < 4:
- print 'Args: input output.cc struct_name'
+ print('Args: input output.cc struct_name')
return 1
data_sum = [0.0] * 4096
data_count = [0] * 4096
diff --git a/motors/python/haptic_phase_current.py b/motors/python/haptic_phase_current.py
index fec909d..ed0062a 100755
--- a/motors/python/haptic_phase_current.py
+++ b/motors/python/haptic_phase_current.py
@@ -117,11 +117,11 @@
# by to get motor current.
one_amp_scalar = (phases(f_single, 0.0).T * phases(g_single, 0.0))[0, 0]
-print 'Max BEMF', max(f(theta_range))
-print 'Max current', max(g(theta_range))
-print 'Max drive voltage (one_amp_driving_voltage)', max(
- one_amp_driving_voltage)
-print 'one_amp_scalar', one_amp_scalar
+print('Max BEMF', max(f(theta_range)))
+print('Max current', max(g(theta_range)))
+print('Max drive voltage (one_amp_driving_voltage)',
+ max(one_amp_driving_voltage))
+print('one_amp_scalar', one_amp_scalar)
pylab.figure()
pylab.subplot(1, 1, 1)
@@ -464,19 +464,20 @@
self.B_discrete_inverse_model = numpy.matrix(numpy.eye(3)) / (
self.B_discrete_model[0, 0] - self.B_discrete_model[1, 0])
- print 'constexpr double kL = %g;' % self.L_model
- print 'constexpr double kM = %g;' % self.M_model
- print 'constexpr double kR = %g;' % self.R_model
- print 'constexpr float kAdiscrete_diagonal = %gf;' % self.A_discrete_model[
- 0, 0]
- print 'constexpr float kAdiscrete_offdiagonal = %gf;' % self.A_discrete_model[
- 1, 0]
- print 'constexpr float kBdiscrete_inv_diagonal = %gf;' % self.B_discrete_inverse_model[
- 0, 0]
- print 'constexpr float kBdiscrete_inv_offdiagonal = %gf;' % self.B_discrete_inverse_model[
- 1, 0]
- print 'constexpr double kOneAmpScalar = %g;' % one_amp_scalar
- print 'constexpr double kMaxOneAmpDrivingVoltage = %g;' % max_one_amp_driving_voltage
+ print('constexpr double kL = %g;' % self.L_model)
+ print('constexpr double kM = %g;' % self.M_model)
+ print('constexpr double kR = %g;' % self.R_model)
+ print('constexpr float kAdiscrete_diagonal = %gf;' %
+ self.A_discrete_model[0, 0])
+ print('constexpr float kAdiscrete_offdiagonal = %gf;' %
+ self.A_discrete_model[1, 0])
+ print('constexpr float kBdiscrete_inv_diagonal = %gf;' %
+ self.B_discrete_inverse_model[0, 0])
+ print('constexpr float kBdiscrete_inv_offdiagonal = %gf;' %
+ self.B_discrete_inverse_model[1, 0])
+ print('constexpr double kOneAmpScalar = %g;' % one_amp_scalar)
+ print('constexpr double kMaxOneAmpDrivingVoltage = %g;' %
+ max_one_amp_driving_voltage)
print('A_discrete', self.A_discrete)
print('B_discrete', self.B_discrete)
print('B_discrete_sub', numpy.linalg.inv(self.B_discrete[0:2, 0:2]))
@@ -574,8 +575,8 @@
# Subtract that, and then run the stock statespace equation.
Vn_ff = self.B_discrete_inverse * (Inext - self.A_discrete *
(Icurrent - p) - p_next_imag.real)
- print 'Vn_ff', Vn_ff
- print 'Inext', Inext
+ print('Vn_ff', Vn_ff)
+ print('Inext', Inext)
Vn = Vn_ff + self.K * (Icurrent - measured_current)
E = phases(f_single, self.X[3, 0]) / Kv * self.X[4, 0]
@@ -629,7 +630,7 @@
self.current_time = t
- print 'Took %f to simulate' % (time.time() - start_wall_time)
+ print('Took %f to simulate' % (time.time() - start_wall_time))
self.data_logger.plot()