ekf2: symforce codegen remove reserved identifier naming

- from the C standard, "All identifiers that begin with an underscore and either an uppercase letter or another underscore are always reserved for any use"
This commit is contained in:
Daniel Agar 2023-01-26 12:48:02 -05:00
parent 0687fd2689
commit efe1d43550
21 changed files with 745 additions and 739 deletions

View File

View File

@ -36,6 +36,8 @@ Description:
import symforce.symbolic as sf
import re
# q: quaternion describing rotation from frame 1 to frame 2
# returns a rotation matrix derived form q which describes the same
# rotation
@ -107,6 +109,10 @@ def generate_px4_function(function_name, output_names):
line = line.replace("std::min", "math::min")
line = line.replace("Eigen", "matrix")
line = line.replace("matrix/Dense", "matrix/math.hpp")
# don't allow underscore + uppercase identifier naming (always reserved for any use)
line = re.sub(r'_([A-Z])', lambda x: '_' + x.group(1).lower(), line)
print(line, end='')
def generate_python_function(function_name, output_names):

View File

@ -47,67 +47,67 @@ void ComputeAirspeedHAndK(const matrix::Matrix<Scalar, 24, 1>& state,
// Output terms (2)
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(4, 0) = _tmp3;
_H(5, 0) = _tmp4;
_H(6, 0) = _tmp5;
_H(22, 0) = -_tmp3;
_H(23, 0) = -_tmp4;
_h(4, 0) = _tmp3;
_h(5, 0) = _tmp4;
_h(6, 0) = _tmp5;
_h(22, 0) = -_tmp3;
_h(23, 0) = -_tmp4;
}
if (K != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _K = (*K);
matrix::Matrix<Scalar, 24, 1>& _k = (*K);
_K(0, 0) = _tmp6 * (-P(0, 22) * _tmp3 - P(0, 23) * _tmp4 + P(0, 4) * _tmp3 + P(0, 5) * _tmp4 +
_k(0, 0) = _tmp6 * (-P(0, 22) * _tmp3 - P(0, 23) * _tmp4 + P(0, 4) * _tmp3 + P(0, 5) * _tmp4 +
P(0, 6) * _tmp5);
_K(1, 0) = _tmp6 * (-P(1, 22) * _tmp3 - P(1, 23) * _tmp4 + P(1, 4) * _tmp3 + P(1, 5) * _tmp4 +
_k(1, 0) = _tmp6 * (-P(1, 22) * _tmp3 - P(1, 23) * _tmp4 + P(1, 4) * _tmp3 + P(1, 5) * _tmp4 +
P(1, 6) * _tmp5);
_K(2, 0) = _tmp6 * (-P(2, 22) * _tmp3 - P(2, 23) * _tmp4 + P(2, 4) * _tmp3 + P(2, 5) * _tmp4 +
_k(2, 0) = _tmp6 * (-P(2, 22) * _tmp3 - P(2, 23) * _tmp4 + P(2, 4) * _tmp3 + P(2, 5) * _tmp4 +
P(2, 6) * _tmp5);
_K(3, 0) = _tmp6 * (-P(3, 22) * _tmp3 - P(3, 23) * _tmp4 + P(3, 4) * _tmp3 + P(3, 5) * _tmp4 +
_k(3, 0) = _tmp6 * (-P(3, 22) * _tmp3 - P(3, 23) * _tmp4 + P(3, 4) * _tmp3 + P(3, 5) * _tmp4 +
P(3, 6) * _tmp5);
_K(4, 0) = _tmp6 * (-P(4, 22) * _tmp3 - P(4, 23) * _tmp4 + P(4, 4) * _tmp3 + P(4, 5) * _tmp4 +
_k(4, 0) = _tmp6 * (-P(4, 22) * _tmp3 - P(4, 23) * _tmp4 + P(4, 4) * _tmp3 + P(4, 5) * _tmp4 +
P(4, 6) * _tmp5);
_K(5, 0) = _tmp6 * (-P(5, 22) * _tmp3 - P(5, 23) * _tmp4 + P(5, 4) * _tmp3 + P(5, 5) * _tmp4 +
_k(5, 0) = _tmp6 * (-P(5, 22) * _tmp3 - P(5, 23) * _tmp4 + P(5, 4) * _tmp3 + P(5, 5) * _tmp4 +
P(5, 6) * _tmp5);
_K(6, 0) = _tmp6 * (-P(6, 22) * _tmp3 - P(6, 23) * _tmp4 + P(6, 4) * _tmp3 + P(6, 5) * _tmp4 +
_k(6, 0) = _tmp6 * (-P(6, 22) * _tmp3 - P(6, 23) * _tmp4 + P(6, 4) * _tmp3 + P(6, 5) * _tmp4 +
P(6, 6) * _tmp5);
_K(7, 0) = _tmp6 * (-P(7, 22) * _tmp3 - P(7, 23) * _tmp4 + P(7, 4) * _tmp3 + P(7, 5) * _tmp4 +
_k(7, 0) = _tmp6 * (-P(7, 22) * _tmp3 - P(7, 23) * _tmp4 + P(7, 4) * _tmp3 + P(7, 5) * _tmp4 +
P(7, 6) * _tmp5);
_K(8, 0) = _tmp6 * (-P(8, 22) * _tmp3 - P(8, 23) * _tmp4 + P(8, 4) * _tmp3 + P(8, 5) * _tmp4 +
_k(8, 0) = _tmp6 * (-P(8, 22) * _tmp3 - P(8, 23) * _tmp4 + P(8, 4) * _tmp3 + P(8, 5) * _tmp4 +
P(8, 6) * _tmp5);
_K(9, 0) = _tmp6 * (-P(9, 22) * _tmp3 - P(9, 23) * _tmp4 + P(9, 4) * _tmp3 + P(9, 5) * _tmp4 +
_k(9, 0) = _tmp6 * (-P(9, 22) * _tmp3 - P(9, 23) * _tmp4 + P(9, 4) * _tmp3 + P(9, 5) * _tmp4 +
P(9, 6) * _tmp5);
_K(10, 0) = _tmp6 * (-P(10, 22) * _tmp3 - P(10, 23) * _tmp4 + P(10, 4) * _tmp3 +
_k(10, 0) = _tmp6 * (-P(10, 22) * _tmp3 - P(10, 23) * _tmp4 + P(10, 4) * _tmp3 +
P(10, 5) * _tmp4 + P(10, 6) * _tmp5);
_K(11, 0) = _tmp6 * (-P(11, 22) * _tmp3 - P(11, 23) * _tmp4 + P(11, 4) * _tmp3 +
_k(11, 0) = _tmp6 * (-P(11, 22) * _tmp3 - P(11, 23) * _tmp4 + P(11, 4) * _tmp3 +
P(11, 5) * _tmp4 + P(11, 6) * _tmp5);
_K(12, 0) = _tmp6 * (-P(12, 22) * _tmp3 - P(12, 23) * _tmp4 + P(12, 4) * _tmp3 +
_k(12, 0) = _tmp6 * (-P(12, 22) * _tmp3 - P(12, 23) * _tmp4 + P(12, 4) * _tmp3 +
P(12, 5) * _tmp4 + P(12, 6) * _tmp5);
_K(13, 0) = _tmp6 * (-P(13, 22) * _tmp3 - P(13, 23) * _tmp4 + P(13, 4) * _tmp3 +
_k(13, 0) = _tmp6 * (-P(13, 22) * _tmp3 - P(13, 23) * _tmp4 + P(13, 4) * _tmp3 +
P(13, 5) * _tmp4 + P(13, 6) * _tmp5);
_K(14, 0) = _tmp6 * (-P(14, 22) * _tmp3 - P(14, 23) * _tmp4 + P(14, 4) * _tmp3 +
_k(14, 0) = _tmp6 * (-P(14, 22) * _tmp3 - P(14, 23) * _tmp4 + P(14, 4) * _tmp3 +
P(14, 5) * _tmp4 + P(14, 6) * _tmp5);
_K(15, 0) = _tmp6 * (-P(15, 22) * _tmp3 - P(15, 23) * _tmp4 + P(15, 4) * _tmp3 +
_k(15, 0) = _tmp6 * (-P(15, 22) * _tmp3 - P(15, 23) * _tmp4 + P(15, 4) * _tmp3 +
P(15, 5) * _tmp4 + P(15, 6) * _tmp5);
_K(16, 0) = _tmp6 * (-P(16, 22) * _tmp3 - P(16, 23) * _tmp4 + P(16, 4) * _tmp3 +
_k(16, 0) = _tmp6 * (-P(16, 22) * _tmp3 - P(16, 23) * _tmp4 + P(16, 4) * _tmp3 +
P(16, 5) * _tmp4 + P(16, 6) * _tmp5);
_K(17, 0) = _tmp6 * (-P(17, 22) * _tmp3 - P(17, 23) * _tmp4 + P(17, 4) * _tmp3 +
_k(17, 0) = _tmp6 * (-P(17, 22) * _tmp3 - P(17, 23) * _tmp4 + P(17, 4) * _tmp3 +
P(17, 5) * _tmp4 + P(17, 6) * _tmp5);
_K(18, 0) = _tmp6 * (-P(18, 22) * _tmp3 - P(18, 23) * _tmp4 + P(18, 4) * _tmp3 +
_k(18, 0) = _tmp6 * (-P(18, 22) * _tmp3 - P(18, 23) * _tmp4 + P(18, 4) * _tmp3 +
P(18, 5) * _tmp4 + P(18, 6) * _tmp5);
_K(19, 0) = _tmp6 * (-P(19, 22) * _tmp3 - P(19, 23) * _tmp4 + P(19, 4) * _tmp3 +
_k(19, 0) = _tmp6 * (-P(19, 22) * _tmp3 - P(19, 23) * _tmp4 + P(19, 4) * _tmp3 +
P(19, 5) * _tmp4 + P(19, 6) * _tmp5);
_K(20, 0) = _tmp6 * (-P(20, 22) * _tmp3 - P(20, 23) * _tmp4 + P(20, 4) * _tmp3 +
_k(20, 0) = _tmp6 * (-P(20, 22) * _tmp3 - P(20, 23) * _tmp4 + P(20, 4) * _tmp3 +
P(20, 5) * _tmp4 + P(20, 6) * _tmp5);
_K(21, 0) = _tmp6 * (-P(21, 22) * _tmp3 - P(21, 23) * _tmp4 + P(21, 4) * _tmp3 +
_k(21, 0) = _tmp6 * (-P(21, 22) * _tmp3 - P(21, 23) * _tmp4 + P(21, 4) * _tmp3 +
P(21, 5) * _tmp4 + P(21, 6) * _tmp5);
_K(22, 0) = _tmp6 * (-P(22, 22) * _tmp3 - P(22, 23) * _tmp4 + P(22, 4) * _tmp3 +
_k(22, 0) = _tmp6 * (-P(22, 22) * _tmp3 - P(22, 23) * _tmp4 + P(22, 4) * _tmp3 +
P(22, 5) * _tmp4 + P(22, 6) * _tmp5);
_K(23, 0) = _tmp6 * (-P(23, 22) * _tmp3 - P(23, 23) * _tmp4 + P(23, 4) * _tmp3 +
_k(23, 0) = _tmp6 * (-P(23, 22) * _tmp3 - P(23, 23) * _tmp4 + P(23, 4) * _tmp3 +
P(23, 5) * _tmp4 + P(23, 6) * _tmp5);
}
} // NOLINT(readability/fn_size)

View File

@ -167,14 +167,14 @@ void ComputeDragXInnovVarAndK(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (K != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _K = (*K);
matrix::Matrix<Scalar, 24, 1>& _k = (*K);
_K.setZero();
_k.setZero();
_K(22, 0) = _tmp75 * (P(22, 0) * _tmp57 + P(22, 1) * _tmp67 + P(22, 2) * _tmp45 +
_k(22, 0) = _tmp75 * (P(22, 0) * _tmp57 + P(22, 1) * _tmp67 + P(22, 2) * _tmp45 +
P(22, 23) * _tmp66 + P(22, 3) * _tmp70 + P(22, 4) * _tmp63 +
P(22, 5) * _tmp69 + P(22, 6) * _tmp51 + _tmp73);
_K(23, 0) = _tmp75 * (P(23, 0) * _tmp57 + P(23, 1) * _tmp67 + P(23, 2) * _tmp45 +
_k(23, 0) = _tmp75 * (P(23, 0) * _tmp57 + P(23, 1) * _tmp67 + P(23, 2) * _tmp45 +
P(23, 22) * _tmp71 + P(23, 3) * _tmp70 + P(23, 4) * _tmp63 +
P(23, 5) * _tmp69 + P(23, 6) * _tmp51 + _tmp72);
}

View File

@ -168,14 +168,14 @@ void ComputeDragYInnovVarAndK(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (K != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _K = (*K);
matrix::Matrix<Scalar, 24, 1>& _k = (*K);
_K.setZero();
_k.setZero();
_K(22, 0) = _tmp75 * (P(22, 0) * _tmp50 + P(22, 1) * _tmp44 + P(22, 2) * _tmp51 +
_k(22, 0) = _tmp75 * (P(22, 0) * _tmp50 + P(22, 1) * _tmp44 + P(22, 2) * _tmp51 +
P(22, 23) * _tmp58 + P(22, 3) * _tmp53 + P(22, 4) * _tmp65 +
P(22, 5) * _tmp71 + P(22, 6) * _tmp70 + _tmp72);
_K(23, 0) = _tmp75 * (P(23, 0) * _tmp50 + P(23, 1) * _tmp44 + P(23, 2) * _tmp51 +
_k(23, 0) = _tmp75 * (P(23, 0) * _tmp50 + P(23, 1) * _tmp44 + P(23, 2) * _tmp51 +
P(23, 22) * _tmp66 + P(23, 3) * _tmp53 + P(23, 4) * _tmp65 +
P(23, 5) * _tmp71 + P(23, 6) * _tmp70 + _tmp73);
}

View File

@ -109,17 +109,17 @@ void ComputeFlowXyInnovVarAndHx(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = _tmp17;
_H(1, 0) = _tmp21;
_H(2, 0) = _tmp22;
_H(3, 0) = _tmp15;
_H(4, 0) = _tmp9;
_H(5, 0) = _tmp4;
_H(6, 0) = _tmp23;
_h(0, 0) = _tmp17;
_h(1, 0) = _tmp21;
_h(2, 0) = _tmp22;
_h(3, 0) = _tmp15;
_h(4, 0) = _tmp9;
_h(5, 0) = _tmp4;
_h(6, 0) = _tmp23;
}
} // NOLINT(readability/fn_size)

View File

@ -77,17 +77,17 @@ void ComputeFlowYInnovVarAndH(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = -_tmp10;
_H(1, 0) = -_tmp11;
_H(2, 0) = -_tmp12;
_H(3, 0) = -_tmp8;
_H(4, 0) = -_tmp1;
_H(5, 0) = -_tmp4;
_H(6, 0) = -_tmp5;
_h(0, 0) = -_tmp10;
_h(1, 0) = -_tmp11;
_h(2, 0) = -_tmp12;
_h(3, 0) = -_tmp8;
_h(4, 0) = -_tmp1;
_h(5, 0) = -_tmp4;
_h(6, 0) = -_tmp5;
}
} // NOLINT(readability/fn_size)

View File

@ -88,14 +88,14 @@ void ComputeGnssYawInnonInnovVarAndH(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = _tmp26;
_H(1, 0) = _tmp25;
_H(2, 0) = _tmp27;
_H(3, 0) = _tmp19;
_h(0, 0) = _tmp26;
_h(1, 0) = _tmp25;
_h(2, 0) = _tmp27;
_h(3, 0) = _tmp19;
}
} // NOLINT(readability/fn_size)

View File

@ -61,12 +61,12 @@ void ComputeMagDeclinationInnovInnovVarAndH(const matrix::Matrix<Scalar, 24, 1>&
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(16, 0) = -_tmp2;
_H(17, 0) = _tmp3;
_h(16, 0) = -_tmp2;
_h(17, 0) = _tmp3;
}
} // NOLINT(readability/fn_size)

View File

@ -158,18 +158,18 @@ void ComputeMagInnovInnovVarAndHx(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (Hx != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _Hx = (*Hx);
matrix::Matrix<Scalar, 24, 1>& _hx = (*Hx);
_Hx.setZero();
_hx.setZero();
_Hx(0, 0) = _tmp23;
_Hx(1, 0) = _tmp24;
_Hx(2, 0) = _tmp32;
_Hx(3, 0) = _tmp28;
_Hx(16, 0) = _tmp6;
_Hx(17, 0) = _tmp35;
_Hx(18, 0) = _tmp38;
_Hx(19, 0) = 1;
_hx(0, 0) = _tmp23;
_hx(1, 0) = _tmp24;
_hx(2, 0) = _tmp32;
_hx(3, 0) = _tmp28;
_hx(16, 0) = _tmp6;
_hx(17, 0) = _tmp35;
_hx(18, 0) = _tmp38;
_hx(19, 0) = 1;
}
} // NOLINT(readability/fn_size)

View File

@ -72,18 +72,18 @@ void ComputeMagYInnovVarAndH(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = _tmp3;
_H(1, 0) = _tmp5;
_H(2, 0) = _tmp7;
_H(3, 0) = _tmp6;
_H(16, 0) = _tmp8;
_H(17, 0) = _tmp10;
_H(18, 0) = _tmp9;
_H(20, 0) = 1;
_h(0, 0) = _tmp3;
_h(1, 0) = _tmp5;
_h(2, 0) = _tmp7;
_h(3, 0) = _tmp6;
_h(16, 0) = _tmp8;
_h(17, 0) = _tmp10;
_h(18, 0) = _tmp9;
_h(20, 0) = 1;
}
} // NOLINT(readability/fn_size)

View File

@ -72,18 +72,18 @@ void ComputeMagZInnovVarAndH(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = _tmp3;
_H(1, 0) = _tmp5;
_H(2, 0) = _tmp7;
_H(3, 0) = _tmp6;
_H(16, 0) = _tmp8;
_H(17, 0) = _tmp9;
_H(18, 0) = _tmp10;
_H(21, 0) = 1;
_h(0, 0) = _tmp3;
_h(1, 0) = _tmp5;
_h(2, 0) = _tmp7;
_h(3, 0) = _tmp6;
_h(16, 0) = _tmp8;
_h(17, 0) = _tmp9;
_h(18, 0) = _tmp10;
_h(21, 0) = 1;
}
} // NOLINT(readability/fn_size)

View File

@ -84,94 +84,94 @@ void ComputeSideslipHAndK(const matrix::Matrix<Scalar, 24, 1>& state,
// Output terms (2)
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = _tmp26;
_H(1, 0) = _tmp31;
_H(2, 0) = _tmp32;
_H(3, 0) = _tmp33;
_H(4, 0) = _tmp37;
_H(5, 0) = _tmp39;
_H(6, 0) = _tmp40;
_H(22, 0) = _tmp41;
_H(23, 0) = _tmp42;
_h(0, 0) = _tmp26;
_h(1, 0) = _tmp31;
_h(2, 0) = _tmp32;
_h(3, 0) = _tmp33;
_h(4, 0) = _tmp37;
_h(5, 0) = _tmp39;
_h(6, 0) = _tmp40;
_h(22, 0) = _tmp41;
_h(23, 0) = _tmp42;
}
if (K != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _K = (*K);
matrix::Matrix<Scalar, 24, 1>& _k = (*K);
_K(0, 0) = _tmp43 * (P(0, 0) * _tmp26 + P(0, 1) * _tmp31 + P(0, 2) * _tmp32 +
_k(0, 0) = _tmp43 * (P(0, 0) * _tmp26 + P(0, 1) * _tmp31 + P(0, 2) * _tmp32 +
P(0, 22) * _tmp41 + P(0, 23) * _tmp42 + P(0, 3) * _tmp33 +
P(0, 4) * _tmp37 + P(0, 5) * _tmp39 + P(0, 6) * _tmp40);
_K(1, 0) = _tmp43 * (P(1, 0) * _tmp26 + P(1, 1) * _tmp31 + P(1, 2) * _tmp32 +
_k(1, 0) = _tmp43 * (P(1, 0) * _tmp26 + P(1, 1) * _tmp31 + P(1, 2) * _tmp32 +
P(1, 22) * _tmp41 + P(1, 23) * _tmp42 + P(1, 3) * _tmp33 +
P(1, 4) * _tmp37 + P(1, 5) * _tmp39 + P(1, 6) * _tmp40);
_K(2, 0) = _tmp43 * (P(2, 0) * _tmp26 + P(2, 1) * _tmp31 + P(2, 2) * _tmp32 +
_k(2, 0) = _tmp43 * (P(2, 0) * _tmp26 + P(2, 1) * _tmp31 + P(2, 2) * _tmp32 +
P(2, 22) * _tmp41 + P(2, 23) * _tmp42 + P(2, 3) * _tmp33 +
P(2, 4) * _tmp37 + P(2, 5) * _tmp39 + P(2, 6) * _tmp40);
_K(3, 0) = _tmp43 * (P(3, 0) * _tmp26 + P(3, 1) * _tmp31 + P(3, 2) * _tmp32 +
_k(3, 0) = _tmp43 * (P(3, 0) * _tmp26 + P(3, 1) * _tmp31 + P(3, 2) * _tmp32 +
P(3, 22) * _tmp41 + P(3, 23) * _tmp42 + P(3, 3) * _tmp33 +
P(3, 4) * _tmp37 + P(3, 5) * _tmp39 + P(3, 6) * _tmp40);
_K(4, 0) = _tmp43 * (P(4, 0) * _tmp26 + P(4, 1) * _tmp31 + P(4, 2) * _tmp32 +
_k(4, 0) = _tmp43 * (P(4, 0) * _tmp26 + P(4, 1) * _tmp31 + P(4, 2) * _tmp32 +
P(4, 22) * _tmp41 + P(4, 23) * _tmp42 + P(4, 3) * _tmp33 +
P(4, 4) * _tmp37 + P(4, 5) * _tmp39 + P(4, 6) * _tmp40);
_K(5, 0) = _tmp43 * (P(5, 0) * _tmp26 + P(5, 1) * _tmp31 + P(5, 2) * _tmp32 +
_k(5, 0) = _tmp43 * (P(5, 0) * _tmp26 + P(5, 1) * _tmp31 + P(5, 2) * _tmp32 +
P(5, 22) * _tmp41 + P(5, 23) * _tmp42 + P(5, 3) * _tmp33 +
P(5, 4) * _tmp37 + P(5, 5) * _tmp39 + P(5, 6) * _tmp40);
_K(6, 0) = _tmp43 * (P(6, 0) * _tmp26 + P(6, 1) * _tmp31 + P(6, 2) * _tmp32 +
_k(6, 0) = _tmp43 * (P(6, 0) * _tmp26 + P(6, 1) * _tmp31 + P(6, 2) * _tmp32 +
P(6, 22) * _tmp41 + P(6, 23) * _tmp42 + P(6, 3) * _tmp33 +
P(6, 4) * _tmp37 + P(6, 5) * _tmp39 + P(6, 6) * _tmp40);
_K(7, 0) = _tmp43 * (P(7, 0) * _tmp26 + P(7, 1) * _tmp31 + P(7, 2) * _tmp32 +
_k(7, 0) = _tmp43 * (P(7, 0) * _tmp26 + P(7, 1) * _tmp31 + P(7, 2) * _tmp32 +
P(7, 22) * _tmp41 + P(7, 23) * _tmp42 + P(7, 3) * _tmp33 +
P(7, 4) * _tmp37 + P(7, 5) * _tmp39 + P(7, 6) * _tmp40);
_K(8, 0) = _tmp43 * (P(8, 0) * _tmp26 + P(8, 1) * _tmp31 + P(8, 2) * _tmp32 +
_k(8, 0) = _tmp43 * (P(8, 0) * _tmp26 + P(8, 1) * _tmp31 + P(8, 2) * _tmp32 +
P(8, 22) * _tmp41 + P(8, 23) * _tmp42 + P(8, 3) * _tmp33 +
P(8, 4) * _tmp37 + P(8, 5) * _tmp39 + P(8, 6) * _tmp40);
_K(9, 0) = _tmp43 * (P(9, 0) * _tmp26 + P(9, 1) * _tmp31 + P(9, 2) * _tmp32 +
_k(9, 0) = _tmp43 * (P(9, 0) * _tmp26 + P(9, 1) * _tmp31 + P(9, 2) * _tmp32 +
P(9, 22) * _tmp41 + P(9, 23) * _tmp42 + P(9, 3) * _tmp33 +
P(9, 4) * _tmp37 + P(9, 5) * _tmp39 + P(9, 6) * _tmp40);
_K(10, 0) = _tmp43 * (P(10, 0) * _tmp26 + P(10, 1) * _tmp31 + P(10, 2) * _tmp32 +
_k(10, 0) = _tmp43 * (P(10, 0) * _tmp26 + P(10, 1) * _tmp31 + P(10, 2) * _tmp32 +
P(10, 22) * _tmp41 + P(10, 23) * _tmp42 + P(10, 3) * _tmp33 +
P(10, 4) * _tmp37 + P(10, 5) * _tmp39 + P(10, 6) * _tmp40);
_K(11, 0) = _tmp43 * (P(11, 0) * _tmp26 + P(11, 1) * _tmp31 + P(11, 2) * _tmp32 +
_k(11, 0) = _tmp43 * (P(11, 0) * _tmp26 + P(11, 1) * _tmp31 + P(11, 2) * _tmp32 +
P(11, 22) * _tmp41 + P(11, 23) * _tmp42 + P(11, 3) * _tmp33 +
P(11, 4) * _tmp37 + P(11, 5) * _tmp39 + P(11, 6) * _tmp40);
_K(12, 0) = _tmp43 * (P(12, 0) * _tmp26 + P(12, 1) * _tmp31 + P(12, 2) * _tmp32 +
_k(12, 0) = _tmp43 * (P(12, 0) * _tmp26 + P(12, 1) * _tmp31 + P(12, 2) * _tmp32 +
P(12, 22) * _tmp41 + P(12, 23) * _tmp42 + P(12, 3) * _tmp33 +
P(12, 4) * _tmp37 + P(12, 5) * _tmp39 + P(12, 6) * _tmp40);
_K(13, 0) = _tmp43 * (P(13, 0) * _tmp26 + P(13, 1) * _tmp31 + P(13, 2) * _tmp32 +
_k(13, 0) = _tmp43 * (P(13, 0) * _tmp26 + P(13, 1) * _tmp31 + P(13, 2) * _tmp32 +
P(13, 22) * _tmp41 + P(13, 23) * _tmp42 + P(13, 3) * _tmp33 +
P(13, 4) * _tmp37 + P(13, 5) * _tmp39 + P(13, 6) * _tmp40);
_K(14, 0) = _tmp43 * (P(14, 0) * _tmp26 + P(14, 1) * _tmp31 + P(14, 2) * _tmp32 +
_k(14, 0) = _tmp43 * (P(14, 0) * _tmp26 + P(14, 1) * _tmp31 + P(14, 2) * _tmp32 +
P(14, 22) * _tmp41 + P(14, 23) * _tmp42 + P(14, 3) * _tmp33 +
P(14, 4) * _tmp37 + P(14, 5) * _tmp39 + P(14, 6) * _tmp40);
_K(15, 0) = _tmp43 * (P(15, 0) * _tmp26 + P(15, 1) * _tmp31 + P(15, 2) * _tmp32 +
_k(15, 0) = _tmp43 * (P(15, 0) * _tmp26 + P(15, 1) * _tmp31 + P(15, 2) * _tmp32 +
P(15, 22) * _tmp41 + P(15, 23) * _tmp42 + P(15, 3) * _tmp33 +
P(15, 4) * _tmp37 + P(15, 5) * _tmp39 + P(15, 6) * _tmp40);
_K(16, 0) = _tmp43 * (P(16, 0) * _tmp26 + P(16, 1) * _tmp31 + P(16, 2) * _tmp32 +
_k(16, 0) = _tmp43 * (P(16, 0) * _tmp26 + P(16, 1) * _tmp31 + P(16, 2) * _tmp32 +
P(16, 22) * _tmp41 + P(16, 23) * _tmp42 + P(16, 3) * _tmp33 +
P(16, 4) * _tmp37 + P(16, 5) * _tmp39 + P(16, 6) * _tmp40);
_K(17, 0) = _tmp43 * (P(17, 0) * _tmp26 + P(17, 1) * _tmp31 + P(17, 2) * _tmp32 +
_k(17, 0) = _tmp43 * (P(17, 0) * _tmp26 + P(17, 1) * _tmp31 + P(17, 2) * _tmp32 +
P(17, 22) * _tmp41 + P(17, 23) * _tmp42 + P(17, 3) * _tmp33 +
P(17, 4) * _tmp37 + P(17, 5) * _tmp39 + P(17, 6) * _tmp40);
_K(18, 0) = _tmp43 * (P(18, 0) * _tmp26 + P(18, 1) * _tmp31 + P(18, 2) * _tmp32 +
_k(18, 0) = _tmp43 * (P(18, 0) * _tmp26 + P(18, 1) * _tmp31 + P(18, 2) * _tmp32 +
P(18, 22) * _tmp41 + P(18, 23) * _tmp42 + P(18, 3) * _tmp33 +
P(18, 4) * _tmp37 + P(18, 5) * _tmp39 + P(18, 6) * _tmp40);
_K(19, 0) = _tmp43 * (P(19, 0) * _tmp26 + P(19, 1) * _tmp31 + P(19, 2) * _tmp32 +
_k(19, 0) = _tmp43 * (P(19, 0) * _tmp26 + P(19, 1) * _tmp31 + P(19, 2) * _tmp32 +
P(19, 22) * _tmp41 + P(19, 23) * _tmp42 + P(19, 3) * _tmp33 +
P(19, 4) * _tmp37 + P(19, 5) * _tmp39 + P(19, 6) * _tmp40);
_K(20, 0) = _tmp43 * (P(20, 0) * _tmp26 + P(20, 1) * _tmp31 + P(20, 2) * _tmp32 +
_k(20, 0) = _tmp43 * (P(20, 0) * _tmp26 + P(20, 1) * _tmp31 + P(20, 2) * _tmp32 +
P(20, 22) * _tmp41 + P(20, 23) * _tmp42 + P(20, 3) * _tmp33 +
P(20, 4) * _tmp37 + P(20, 5) * _tmp39 + P(20, 6) * _tmp40);
_K(21, 0) = _tmp43 * (P(21, 0) * _tmp26 + P(21, 1) * _tmp31 + P(21, 2) * _tmp32 +
_k(21, 0) = _tmp43 * (P(21, 0) * _tmp26 + P(21, 1) * _tmp31 + P(21, 2) * _tmp32 +
P(21, 22) * _tmp41 + P(21, 23) * _tmp42 + P(21, 3) * _tmp33 +
P(21, 4) * _tmp37 + P(21, 5) * _tmp39 + P(21, 6) * _tmp40);
_K(22, 0) = _tmp43 * (P(22, 0) * _tmp26 + P(22, 1) * _tmp31 + P(22, 2) * _tmp32 +
_k(22, 0) = _tmp43 * (P(22, 0) * _tmp26 + P(22, 1) * _tmp31 + P(22, 2) * _tmp32 +
P(22, 22) * _tmp41 + P(22, 23) * _tmp42 + P(22, 3) * _tmp33 +
P(22, 4) * _tmp37 + P(22, 5) * _tmp39 + P(22, 6) * _tmp40);
_K(23, 0) = _tmp43 * (P(23, 0) * _tmp26 + P(23, 1) * _tmp31 + P(23, 2) * _tmp32 +
_k(23, 0) = _tmp43 * (P(23, 0) * _tmp26 + P(23, 1) * _tmp31 + P(23, 2) * _tmp32 +
P(23, 22) * _tmp41 + P(23, 23) * _tmp42 + P(23, 3) * _tmp33 +
P(23, 4) * _tmp37 + P(23, 5) * _tmp39 + P(23, 6) * _tmp40);
}

View File

@ -60,14 +60,14 @@ void ComputeYaw312InnovVarAndH(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = _tmp10;
_H(1, 0) = _tmp7;
_H(2, 0) = _tmp8;
_H(3, 0) = _tmp9;
_h(0, 0) = _tmp10;
_h(1, 0) = _tmp7;
_h(2, 0) = _tmp8;
_h(3, 0) = _tmp9;
}
} // NOLINT(readability/fn_size)

View File

@ -60,14 +60,14 @@ void ComputeYaw312InnovVarAndHAlternate(const matrix::Matrix<Scalar, 24, 1>& sta
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = -_tmp10;
_H(1, 0) = -_tmp8;
_H(2, 0) = -_tmp7;
_H(3, 0) = -_tmp9;
_h(0, 0) = -_tmp10;
_h(1, 0) = -_tmp8;
_h(2, 0) = -_tmp7;
_h(3, 0) = -_tmp9;
}
} // NOLINT(readability/fn_size)

View File

@ -60,14 +60,14 @@ void ComputeYaw321InnovVarAndH(const matrix::Matrix<Scalar, 24, 1>& state,
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = _tmp9;
_H(1, 0) = _tmp8;
_H(2, 0) = _tmp7;
_H(3, 0) = _tmp10;
_h(0, 0) = _tmp9;
_h(1, 0) = _tmp8;
_h(2, 0) = _tmp7;
_h(3, 0) = _tmp10;
}
} // NOLINT(readability/fn_size)

View File

@ -60,14 +60,14 @@ void ComputeYaw321InnovVarAndHAlternate(const matrix::Matrix<Scalar, 24, 1>& sta
}
if (H != nullptr) {
matrix::Matrix<Scalar, 24, 1>& _H = (*H);
matrix::Matrix<Scalar, 24, 1>& _h = (*H);
_H.setZero();
_h.setZero();
_H(0, 0) = -_tmp9;
_H(1, 0) = -_tmp8;
_H(2, 0) = -_tmp10;
_H(3, 0) = -_tmp7;
_h(0, 0) = -_tmp9;
_h(1, 0) = -_tmp8;
_h(2, 0) = -_tmp10;
_h(3, 0) = -_tmp7;
}
} // NOLINT(readability/fn_size)

View File

@ -56,9 +56,9 @@ void TerrEstComputeFlowXyInnovVarAndHx(const Scalar terrain_vpos, const Scalar P
}
if (H != nullptr) {
Scalar& _H = (*H);
Scalar& _h = (*H);
_H = _tmp0 * v(1, 0) / std::pow(_tmp2, Scalar(2));
_h = _tmp0 * v(1, 0) / std::pow(_tmp2, Scalar(2));
}
} // NOLINT(readability/fn_size)

View File

@ -55,9 +55,9 @@ void TerrEstComputeFlowYInnovVarAndH(const Scalar terrain_vpos, const Scalar P,
}
if (H != nullptr) {
Scalar& _H = (*H);
Scalar& _h = (*H);
_H = -_tmp0 * v(0, 0) / std::pow(_tmp2, Scalar(2));
_h = -_tmp0 * v(0, 0) / std::pow(_tmp2, Scalar(2));
}
} // NOLINT(readability/fn_size)