You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

Mean.cpp 10.0KB

3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. #include "geometry/Metrics.hpp"
  2. #include "geometry/Basics.hpp"
  3. #include "geometry/Geodesic.hpp"
  4. #include "geometry/Distance.hpp"
  5. #include "geometry/Mean.hpp"
  6. #include <unsupported/Eigen/MatrixFunctions>
  7. #include <iostream>
  8. namespace Geometry {
  9. //static const double EPSILON = 0.000000001; // 10^{-9}
  10. static const double EPSILON = 0.0001; // 10^{-4}
  11. static const size_t ITER_MAX = 50;
  12. //---------------------------------------------------------------------------------------------------
  13. bool Mean(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean, const EMetric metric)
  14. {
  15. if (covs.empty()) { return false; } // If no matrix in vector
  16. if (covs.size() == 1) // If just one matrix in vector
  17. {
  18. mean = covs[0];
  19. return true;
  20. }
  21. if (!HaveSameSize(covs))
  22. {
  23. std::cout << "Matrices haven't same size." << std::endl;
  24. return false;
  25. }
  26. // Force Square Matrix for non Euclidian and non Identity metric
  27. if (!IsSquare(covs[0]) && (metric != EMetric::Euclidian && metric != EMetric::Identity))
  28. {
  29. std::cout << "Non Square Matrix is invalid with " << toString(metric) << " metric." << std::endl;
  30. return false;
  31. }
  32. switch (metric) // Switch method
  33. {
  34. case EMetric::Riemann: return MeanRiemann(covs, mean);
  35. case EMetric::Euclidian: return MeanEuclidian(covs, mean);
  36. case EMetric::LogEuclidian: return MeanLogEuclidian(covs, mean);
  37. case EMetric::LogDet: return MeanLogDet(covs, mean);
  38. case EMetric::Kullback: return MeanKullback(covs, mean);
  39. case EMetric::ALE: return MeanALE(covs, mean);
  40. case EMetric::Harmonic: return MeanHarmonic(covs, mean);
  41. case EMetric::Wasserstein: return MeanWasserstein(covs, mean);
  42. case EMetric::Identity:
  43. default: return MeanIdentity(covs, mean);
  44. }
  45. }
  46. //---------------------------------------------------------------------------------------------------
  47. //---------------------------------------------------------------------------------------------------
  48. bool AJDPham(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& ajd, double /*epsilon*/, const int /*maxIter*/)
  49. {
  50. MeanIdentity(covs, ajd);
  51. return true;
  52. }
  53. //---------------------------------------------------------------------------------------------------
  54. //---------------------------------------------------------------------------------------------------
  55. bool MeanRiemann(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  56. {
  57. const size_t k = covs.size(), n = covs[0].rows(); // Number of Matrix & Features => K & N
  58. size_t i = 0; // Index of Covariance Matrix => i
  59. double nu = 1.0, // Coefficient change => nu
  60. tau = std::numeric_limits<double>::max(), // Coefficient change criterion => tau
  61. crit = std::numeric_limits<double>::max(); // Current change => crit
  62. if (!MeanEuclidian(covs, mean)) { return false; } // Initial Mean
  63. while (i < ITER_MAX && EPSILON < crit && EPSILON < nu) // Stopping criterion
  64. {
  65. i++; // Iteration Criterion
  66. const Eigen::MatrixXd sC = mean.sqrt(), isC = sC.inverse(); // Square root & Inverse Square root of Mean => sC & isC
  67. Eigen::MatrixXd mJ = Eigen::MatrixXd::Zero(n, n); // Change => J
  68. for (const auto& cov : covs) { mJ += (isC * cov * isC).log(); } // Sum of log(isC*Ci*isC)
  69. mJ /= double(k); // Normalization
  70. crit = mJ.norm(); // Current change criterion
  71. mean = sC * (nu * mJ).exp() * sC; // Update Mean => M = sC * exp(nu*J) * sC
  72. const double h = nu * crit; // Update Coefficient change
  73. if (h < tau)
  74. {
  75. nu *= 0.95;
  76. tau = h;
  77. }
  78. else { nu *= 0.5; }
  79. }
  80. return true;
  81. }
  82. //---------------------------------------------------------------------------------------------------
  83. //---------------------------------------------------------------------------------------------------
  84. bool MeanEuclidian(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  85. {
  86. const size_t k = covs.size(), n = covs[0].rows(); // Number of Matrix & Features => K & N
  87. mean = Eigen::MatrixXd::Zero(n, n); // Initial Mean
  88. for (const auto& cov : covs) { mean += cov; } // Sum of Ci
  89. mean /= double(k); // Normalization
  90. return true;
  91. }
  92. //---------------------------------------------------------------------------------------------------
  93. //---------------------------------------------------------------------------------------------------
  94. bool MeanLogEuclidian(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  95. {
  96. const size_t k = covs.size(), n = covs[0].rows(); // Number of Matrix & Features => K & N
  97. mean = Eigen::MatrixXd::Zero(n, n); // Initial Mean
  98. for (const auto& cov : covs) { mean += cov.log(); } // Sum of log(Ci)
  99. mean = (mean / double(k)).exp(); // Normalization
  100. return true;
  101. }
  102. //---------------------------------------------------------------------------------------------------
  103. //---------------------------------------------------------------------------------------------------
  104. bool MeanLogDet(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  105. {
  106. const size_t k = covs.size(), n = covs[0].rows(); // Number of Matrix & Features => K & N
  107. size_t i = 0; // Index of Covariance Matrix => i
  108. double crit = std::numeric_limits<double>::max(); // Current change => crit
  109. if (!MeanEuclidian(covs, mean)) { return false; } // Initial Mean
  110. while (i < ITER_MAX && EPSILON < crit) // Stopping criterion
  111. {
  112. i++; // Iteration Criterion
  113. Eigen::MatrixXd mJ = Eigen::MatrixXd::Zero(n, n); // Change => J
  114. for (const auto& cov : covs) { mJ += (0.5 * (cov + mean)).inverse(); } // Sum of ((Ci+M)/2)^{-1}
  115. mJ = (mJ / double(k)).inverse(); // Normalization
  116. crit = (mJ - mean).norm(); // Current change criterion
  117. mean = mJ; // Update mean
  118. }
  119. return true;
  120. }
  121. //---------------------------------------------------------------------------------------------------
  122. //---------------------------------------------------------------------------------------------------
  123. bool MeanKullback(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  124. {
  125. Eigen::MatrixXd m1, m2;
  126. if (!MeanEuclidian(covs, m1)) { return false; }
  127. if (!MeanHarmonic(covs, m2)) { return false; }
  128. if (!GeodesicRiemann(m1, m2, mean, 0.5)) { return false; }
  129. return true;
  130. }
  131. //---------------------------------------------------------------------------------------------------
  132. //---------------------------------------------------------------------------------------------------
  133. bool MeanWasserstein(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  134. {
  135. const size_t k = covs.size(), n = covs[0].rows(); // Number of Matrix & Features => K & N
  136. size_t i = 0; // Index of Covariance Matrix => i
  137. double crit = std::numeric_limits<double>::max(); // Current change => crit
  138. if (!MeanEuclidian(covs, mean)) { return false; } // Initial Mean
  139. Eigen::MatrixXd sC = mean.sqrt(); // Square root of Mean => sC
  140. while (i < ITER_MAX && EPSILON < crit) // Stopping criterion
  141. {
  142. i++; // Iteration Criterion
  143. Eigen::MatrixXd mJ = Eigen::MatrixXd::Zero(n, n); // Change => J
  144. for (const auto& cov : covs) { mJ += (sC * cov * sC).sqrt(); } // Sum of sqrt(sC*Ci*sC)
  145. mJ /= double(k); // Normalization
  146. const Eigen::MatrixXd sJ = mJ.sqrt(); // Square root of change => sJ
  147. crit = (sJ - sC).norm(); // Current change criterion
  148. sC = sJ; // Update sC
  149. }
  150. mean = sC * sC; // Un-square root
  151. return true;
  152. }
  153. //---------------------------------------------------------------------------------------------------
  154. //---------------------------------------------------------------------------------------------------
  155. bool MeanALE(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  156. {
  157. const size_t k = covs.size(), n = covs[0].rows(); // Number of Matrix & Features => K & N
  158. size_t i = 0; // Index of Covariance Matrix => i
  159. double crit = std::numeric_limits<double>::max(); // Change criterion => crit
  160. if (!AJDPham(covs, mean)) { return false; } // Initial Mean
  161. Eigen::MatrixXd mJ; // Change
  162. while (i < ITER_MAX && EPSILON < crit) // Stopping criterion
  163. {
  164. i++; // Iteration Criterion
  165. mJ = Eigen::MatrixXd::Zero(n, n); // Change => J
  166. for (const auto& cov : covs) { mJ += (mean.transpose() * cov * mean).log(); } // Sum of log(C^T*Ci*C)
  167. mJ /= double(k); // Normalization
  168. Eigen::MatrixXd update = mJ.exp().diagonal().asDiagonal(); // Update Form => U
  169. mean = mean * update.sqrt().inverse(); // Update Mean M = M * U^{-1/2}
  170. crit = DistanceRiemann(Eigen::MatrixXd::Identity(n, n), update);
  171. }
  172. mJ = Eigen::MatrixXd::Zero(n, n); // Last Change => J
  173. for (const auto& cov : covs) { mJ += (mean.transpose() * cov * mean).log(); } // Sum of log(C^T*Ci*C)
  174. mJ /= double(k); // Normalization
  175. Eigen::MatrixXd mA = mean.inverse();
  176. mean = mA.transpose() * mJ.exp() * mA;
  177. return true;
  178. }
  179. //---------------------------------------------------------------------------------------------------
  180. //---------------------------------------------------------------------------------------------------
  181. bool MeanHarmonic(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  182. {
  183. const size_t k = covs.size(), n = covs[0].rows(); // Number of Matrix & Features => K & N
  184. mean = Eigen::MatrixXd::Zero(n, n); // Initial Mean
  185. for (const auto& cov : covs) { mean += cov.inverse(); } // Sum of Inverse
  186. mean = (mean / double(k)).inverse(); // Normalization and inverse
  187. return true;
  188. }
  189. //---------------------------------------------------------------------------------------------------
  190. //---------------------------------------------------------------------------------------------------
  191. bool MeanIdentity(const std::vector<Eigen::MatrixXd>& covs, Eigen::MatrixXd& mean)
  192. {
  193. mean = Eigen::MatrixXd::Identity(covs[0].rows(), covs[0].cols());
  194. return true;
  195. }
  196. //---------------------------------------------------------------------------------------------------
  197. } // namespace Geometry