xref: /aosp_15_r20/external/eigen/unsupported/Eigen/src/SparseExtra/RandomSetter.h (revision bf2c37156dfe67e5dfebd6d394bad8b2ab5804d4)
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008 Gael Guennebaud <[email protected]>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_RANDOMSETTER_H
11 #define EIGEN_RANDOMSETTER_H
12 
13 #if defined(EIGEN_GOOGLEHASH_SUPPORT)
14 // Ensure the ::google namespace exists, required for checking existence of
15 // ::google::dense_hash_map and ::google::sparse_hash_map.
16 namespace google {}
17 #endif
18 
19 namespace Eigen {
20 
21 /** Represents a std::map
22   *
23   * \see RandomSetter
24   */
25 template<typename Scalar> struct StdMapTraits
26 {
27   typedef int KeyType;
28   typedef std::map<KeyType,Scalar> Type;
29   enum {
30     IsSorted = 1
31   };
32 
setInvalidKeyStdMapTraits33   static void setInvalidKey(Type&, const KeyType&) {}
34 };
35 
36 #ifdef EIGEN_UNORDERED_MAP_SUPPORT
37 /** Represents a std::unordered_map
38   *
39   * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file
40   * yourself making sure that unordered_map is defined in the std namespace.
41   *
42   * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do:
43   * \code
44   * #include <tr1/unordered_map>
45   * #define EIGEN_UNORDERED_MAP_SUPPORT
46   * namespace std {
47   *   using std::tr1::unordered_map;
48   * }
49   * \endcode
50   *
51   * \see RandomSetter
52   */
53 template<typename Scalar> struct StdUnorderedMapTraits
54 {
55   typedef int KeyType;
56   typedef std::unordered_map<KeyType,Scalar> Type;
57   enum {
58     IsSorted = 0
59   };
60 
setInvalidKeyStdUnorderedMapTraits61   static void setInvalidKey(Type&, const KeyType&) {}
62 };
63 #endif // EIGEN_UNORDERED_MAP_SUPPORT
64 
65 #if defined(EIGEN_GOOGLEHASH_SUPPORT)
66 
67 namespace google {
68 
69 // Namespace work-around, since sometimes dense_hash_map and sparse_hash_map
70 // are in the global namespace, and other times they are under ::google.
71 using namespace ::google;
72 
73 template<typename KeyType, typename Scalar>
74 struct DenseHashMap {
75   typedef dense_hash_map<KeyType, Scalar> type;
76 };
77 
78 template<typename KeyType, typename Scalar>
79 struct SparseHashMap {
80   typedef sparse_hash_map<KeyType, Scalar> type;
81 };
82 
83 } // namespace google
84 
85 /** Represents a google::dense_hash_map
86   *
87   * \see RandomSetter
88   */
89 template<typename Scalar> struct GoogleDenseHashMapTraits
90 {
91   typedef int KeyType;
92   typedef typename google::DenseHashMap<KeyType,Scalar>::type Type;
93   enum {
94     IsSorted = 0
95   };
96 
setInvalidKeyGoogleDenseHashMapTraits97   static void setInvalidKey(Type& map, const KeyType& k)
98   { map.set_empty_key(k); }
99 };
100 
101 /** Represents a google::sparse_hash_map
102   *
103   * \see RandomSetter
104   */
105 template<typename Scalar> struct GoogleSparseHashMapTraits
106 {
107   typedef int KeyType;
108   typedef typename google::SparseHashMap<KeyType,Scalar>::type Type;
109   enum {
110     IsSorted = 0
111   };
112 
setInvalidKeyGoogleSparseHashMapTraits113   static void setInvalidKey(Type&, const KeyType&) {}
114 };
115 #endif
116 
117 /** \class RandomSetter
118   *
119   * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access
120   *
121   * \tparam SparseMatrixType the type of the sparse matrix we are updating
122   * \tparam MapTraits a traits class representing the map implementation used for the temporary sparse storage.
123   *                  Its default value depends on the system.
124   * \tparam OuterPacketBits defines the number of rows (or columns) manage by a single map object
125   *                        as a power of two exponent.
126   *
127   * This class temporarily represents a sparse matrix object using a generic map implementation allowing for
128   * efficient random access. The conversion from the compressed representation to a hash_map object is performed
129   * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy
130   * suggest the use of nested blocks as in this example:
131   *
132   * \code
133   * SparseMatrix<double> m(rows,cols);
134   * {
135   *   RandomSetter<SparseMatrix<double> > w(m);
136   *   // don't use m but w instead with read/write random access to the coefficients:
137   *   for(;;)
138   *     w(rand(),rand()) = rand;
139   * }
140   * // when w is deleted, the data are copied back to m
141   * // and m is ready to use.
142   * \endcode
143   *
144   * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would
145   * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter
146   * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order.
147   * To reach optimal performance, this value should be adjusted according to the average number of nonzeros
148   * per rows/columns.
149   *
150   * The possible values for the template parameter MapTraits are:
151   *  - \b StdMapTraits: corresponds to std::map. (does not perform very well)
152   *  - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC)
153   *  - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption)
154   *  - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance)
155   *
156   * The default map implementation depends on the availability, and the preferred order is:
157   * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits.
158   *
159   * For performance and memory consumption reasons it is highly recommended to use one of
160   * Google's hash_map implementations. To enable the support for them, you must define
161   * EIGEN_GOOGLEHASH_SUPPORT. This will include both <google/dense_hash_map> and
162   * <google/sparse_hash_map> for you.
163   *
164   * \see https://github.com/sparsehash/sparsehash
165   */
166 template<typename SparseMatrixType,
167          template <typename T> class MapTraits =
168 #if defined(EIGEN_GOOGLEHASH_SUPPORT)
169           GoogleDenseHashMapTraits
170 #elif defined(_HASH_MAP)
171           GnuHashMapTraits
172 #else
173           StdMapTraits
174 #endif
175          ,int OuterPacketBits = 6>
176 class RandomSetter
177 {
178     typedef typename SparseMatrixType::Scalar Scalar;
179     typedef typename SparseMatrixType::StorageIndex StorageIndex;
180 
181     struct ScalarWrapper
182     {
ScalarWrapperScalarWrapper183       ScalarWrapper() : value(0) {}
184       Scalar value;
185     };
186     typedef typename MapTraits<ScalarWrapper>::KeyType KeyType;
187     typedef typename MapTraits<ScalarWrapper>::Type HashMapType;
188     static const int OuterPacketMask = (1 << OuterPacketBits) - 1;
189     enum {
190       SwapStorage = 1 - MapTraits<ScalarWrapper>::IsSorted,
191       TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0,
192       SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor
193     };
194 
195   public:
196 
197     /** Constructs a random setter object from the sparse matrix \a target
198       *
199       * Note that the initial value of \a target are imported. If you want to re-set
200       * a sparse matrix from scratch, then you must set it to zero first using the
201       * setZero() function.
202       */
RandomSetter(SparseMatrixType & target)203     inline RandomSetter(SparseMatrixType& target)
204       : mp_target(&target)
205     {
206       const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize();
207       const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize();
208       m_outerPackets = outerSize >> OuterPacketBits;
209       if (outerSize&OuterPacketMask)
210         m_outerPackets += 1;
211       m_hashmaps = new HashMapType[m_outerPackets];
212       // compute number of bits needed to store inner indices
213       Index aux = innerSize - 1;
214       m_keyBitsOffset = 0;
215       while (aux)
216       {
217         ++m_keyBitsOffset;
218         aux = aux >> 1;
219       }
220       KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset));
221       for (Index k=0; k<m_outerPackets; ++k)
222         MapTraits<ScalarWrapper>::setInvalidKey(m_hashmaps[k],ik);
223 
224       // insert current coeffs
225       for (Index j=0; j<mp_target->outerSize(); ++j)
226         for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it)
227           (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value();
228     }
229 
230     /** Destructor updating back the sparse matrix target */
~RandomSetter()231     ~RandomSetter()
232     {
233       KeyType keyBitsMask = (1<<m_keyBitsOffset)-1;
234       if (!SwapStorage) // also means the map is sorted
235       {
236         mp_target->setZero();
237         mp_target->makeCompressed();
238         mp_target->reserve(nonZeros());
239         Index prevOuter = -1;
240         for (Index k=0; k<m_outerPackets; ++k)
241         {
242           const Index outerOffset = (1<<OuterPacketBits) * k;
243           typename HashMapType::iterator end = m_hashmaps[k].end();
244           for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
245           {
246             const Index outer = (it->first >> m_keyBitsOffset) + outerOffset;
247             const Index inner = it->first & keyBitsMask;
248             if (prevOuter!=outer)
249             {
250               for (Index j=prevOuter+1;j<=outer;++j)
251                 mp_target->startVec(j);
252               prevOuter = outer;
253             }
254             mp_target->insertBackByOuterInner(outer, inner) = it->second.value;
255           }
256         }
257         mp_target->finalize();
258       }
259       else
260       {
261         VectorXi positions(mp_target->outerSize());
262         positions.setZero();
263         // pass 1
264         for (Index k=0; k<m_outerPackets; ++k)
265         {
266           typename HashMapType::iterator end = m_hashmaps[k].end();
267           for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
268           {
269             const Index outer = it->first & keyBitsMask;
270             ++positions[outer];
271           }
272         }
273         // prefix sum
274         StorageIndex count = 0;
275         for (Index j=0; j<mp_target->outerSize(); ++j)
276         {
277           StorageIndex tmp = positions[j];
278           mp_target->outerIndexPtr()[j] = count;
279           positions[j] = count;
280           count += tmp;
281         }
282         mp_target->makeCompressed();
283         mp_target->outerIndexPtr()[mp_target->outerSize()] = count;
284         mp_target->resizeNonZeros(count);
285         // pass 2
286         for (Index k=0; k<m_outerPackets; ++k)
287         {
288           const Index outerOffset = (1<<OuterPacketBits) * k;
289           typename HashMapType::iterator end = m_hashmaps[k].end();
290           for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
291           {
292             const Index inner = (it->first >> m_keyBitsOffset) + outerOffset;
293             const Index outer = it->first & keyBitsMask;
294             // sorted insertion
295             // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients,
296             // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a
297             // small fraction of them have to be sorted, whence the following simple procedure:
298             Index posStart = mp_target->outerIndexPtr()[outer];
299             Index i = (positions[outer]++) - 1;
300             while ( (i >= posStart) && (mp_target->innerIndexPtr()[i] > inner) )
301             {
302               mp_target->valuePtr()[i+1] = mp_target->valuePtr()[i];
303               mp_target->innerIndexPtr()[i+1] = mp_target->innerIndexPtr()[i];
304               --i;
305             }
306             mp_target->innerIndexPtr()[i+1] = internal::convert_index<StorageIndex>(inner);
307             mp_target->valuePtr()[i+1] = it->second.value;
308           }
309         }
310       }
311       delete[] m_hashmaps;
312     }
313 
314     /** \returns a reference to the coefficient at given coordinates \a row, \a col */
operator()315     Scalar& operator() (Index row, Index col)
316     {
317       const Index outer = SetterRowMajor ? row : col;
318       const Index inner = SetterRowMajor ? col : row;
319       const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map
320       const Index outerMinor = outer & OuterPacketMask;  // index of the inner vector in the packet
321       const KeyType key = internal::convert_index<KeyType>((outerMinor<<m_keyBitsOffset) | inner);
322       return m_hashmaps[outerMajor][key].value;
323     }
324 
325     /** \returns the number of non zero coefficients
326       *
327       * \note According to the underlying map/hash_map implementation,
328       * this function might be quite expensive.
329       */
nonZeros()330     Index nonZeros() const
331     {
332       Index nz = 0;
333       for (Index k=0; k<m_outerPackets; ++k)
334         nz += static_cast<Index>(m_hashmaps[k].size());
335       return nz;
336     }
337 
338 
339   protected:
340 
341     HashMapType* m_hashmaps;
342     SparseMatrixType* mp_target;
343     Index m_outerPackets;
344     unsigned char m_keyBitsOffset;
345 };
346 
347 } // end namespace Eigen
348 
349 #endif // EIGEN_RANDOMSETTER_H
350