Please, help us to better know about our user community by answering the following short survey: https://forms.gle/wpyrxWi18ox9Z5ae9
TensorFixedSize.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
12 
13 namespace Eigen {
14 
26 template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
27 class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> >
28 {
29  public:
32  typedef typename Eigen::internal::nested<Self>::type Nested;
33  typedef typename internal::traits<Self>::StorageKind StorageKind;
34  typedef typename internal::traits<Self>::Index Index;
35  typedef Scalar_ Scalar;
36  typedef typename NumTraits<Scalar>::Real RealScalar;
37  typedef typename Base::CoeffReturnType CoeffReturnType;
38 
39  static const int Options = Options_;
40 
41  enum {
42  IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0),
43  PacketAccess = (internal::packet_traits<Scalar>::size > 1),
44  BlockAccess = false,
45  PreferBlockAccess = false,
46  Layout = Options_ & RowMajor ? RowMajor : ColMajor,
47  CoordAccess = true,
48  RawAccess = true
49  };
50 
51  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
52  typedef internal::TensorBlockNotImplemented TensorBlock;
53  //===--------------------------------------------------------------------===//
54 
55  typedef Dimensions_ Dimensions;
56  static const std::size_t NumIndices = Dimensions::count;
57 
58  protected:
59  TensorStorage<Scalar, Dimensions, Options> m_storage;
60 
61  public:
62  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
63  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
64  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
65  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
66  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
67  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
68 
69  // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
70  // work, because that uses base().coeffRef() - and we don't yet
71  // implement a similar class hierarchy
72  inline Self& base() { return *this; }
73  inline const Self& base() const { return *this; }
74 
75 #if EIGEN_HAS_VARIADIC_TEMPLATES
76  template<typename... IndexTypes>
77  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
78  {
79  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
80  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
81  return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
82  }
83 #endif
84 
85  EIGEN_DEVICE_FUNC
86  EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
87  {
88  eigen_internal_assert(checkIndexRange(indices));
89  return m_storage.data()[linearizedIndex(indices)];
90  }
91 
92  EIGEN_DEVICE_FUNC
93  EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
94  {
95  eigen_internal_assert(index >= 0 && index < size());
96  return m_storage.data()[index];
97  }
98 
99  EIGEN_DEVICE_FUNC
100  EIGEN_STRONG_INLINE const Scalar& coeff() const
101  {
102  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
103  return m_storage.data()[0];
104  }
105 
106 
107 #if EIGEN_HAS_VARIADIC_TEMPLATES
108  template<typename... IndexTypes>
109  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
110  {
111  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
112  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
113  return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
114  }
115 #endif
116 
117  EIGEN_DEVICE_FUNC
118  EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
119  {
120  eigen_internal_assert(checkIndexRange(indices));
121  return m_storage.data()[linearizedIndex(indices)];
122  }
123 
124  EIGEN_DEVICE_FUNC
125  EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
126  {
127  eigen_internal_assert(index >= 0 && index < size());
128  return m_storage.data()[index];
129  }
130 
131  EIGEN_DEVICE_FUNC
132  EIGEN_STRONG_INLINE Scalar& coeffRef()
133  {
134  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
135  return m_storage.data()[0];
136  }
137 
138 #if EIGEN_HAS_VARIADIC_TEMPLATES
139  template<typename... IndexTypes>
140  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
141  {
142  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
143  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
144  return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
145  }
146 #else
147  EIGEN_DEVICE_FUNC
148  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
149  {
150  if (Options&RowMajor) {
151  const Index index = i1 + i0 * m_storage.dimensions()[1];
152  return m_storage.data()[index];
153  } else {
154  const Index index = i0 + i1 * m_storage.dimensions()[0];
155  return m_storage.data()[index];
156  }
157  }
158  EIGEN_DEVICE_FUNC
159  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
160  {
161  if (Options&RowMajor) {
162  const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
163  return m_storage.data()[index];
164  } else {
165  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
166  return m_storage.data()[index];
167  }
168  }
169  EIGEN_DEVICE_FUNC
170  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
171  {
172  if (Options&RowMajor) {
173  const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
174  return m_storage.data()[index];
175  } else {
176  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
177  return m_storage.data()[index];
178  }
179  }
180  EIGEN_DEVICE_FUNC
181  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
182  {
183  if (Options&RowMajor) {
184  const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
185  return m_storage.data()[index];
186  } else {
187  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
188  return m_storage.data()[index];
189  }
190  }
191 #endif
192 
193 
194  EIGEN_DEVICE_FUNC
195  EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
196  {
197  eigen_assert(checkIndexRange(indices));
198  return coeff(indices);
199  }
200 
201  EIGEN_DEVICE_FUNC
202  EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
203  {
204  eigen_internal_assert(index >= 0 && index < size());
205  return coeff(index);
206  }
207 
208  EIGEN_DEVICE_FUNC
209  EIGEN_STRONG_INLINE const Scalar& operator()() const
210  {
211  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
212  return coeff();
213  }
214 
215  EIGEN_DEVICE_FUNC
216  EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
217  {
218  // The bracket operator is only for vectors, use the parenthesis operator instead.
219  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
220  return coeff(index);
221  }
222 
223 #if EIGEN_HAS_VARIADIC_TEMPLATES
224  template<typename... IndexTypes>
225  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
226  {
227  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
228  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
229  return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
230  }
231 #else
232  EIGEN_DEVICE_FUNC
233  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
234  {
235  if (Options&RowMajor) {
236  const Index index = i1 + i0 * m_storage.dimensions()[1];
237  return m_storage.data()[index];
238  } else {
239  const Index index = i0 + i1 * m_storage.dimensions()[0];
240  return m_storage.data()[index];
241  }
242  }
243  EIGEN_DEVICE_FUNC
244  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
245  {
246  if (Options&RowMajor) {
247  const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
248  return m_storage.data()[index];
249  } else {
250  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
251  return m_storage.data()[index];
252  }
253  }
254  EIGEN_DEVICE_FUNC
255  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
256  {
257  if (Options&RowMajor) {
258  const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
259  return m_storage.data()[index];
260  } else {
261  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
262  return m_storage.data()[index];
263  }
264  }
265  EIGEN_DEVICE_FUNC
266  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
267  {
268  if (Options&RowMajor) {
269  const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
270  return m_storage.data()[index];
271  } else {
272  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
273  return m_storage.data()[index];
274  }
275  }
276 #endif
277 
278  EIGEN_DEVICE_FUNC
279  EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
280  {
281  eigen_assert(checkIndexRange(indices));
282  return coeffRef(indices);
283  }
284 
285  EIGEN_DEVICE_FUNC
286  EIGEN_STRONG_INLINE Scalar& operator()(Index index)
287  {
288  eigen_assert(index >= 0 && index < size());
289  return coeffRef(index);
290  }
291 
292  EIGEN_DEVICE_FUNC
293  EIGEN_STRONG_INLINE Scalar& operator()()
294  {
295  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
296  return coeffRef();
297  }
298 
299  EIGEN_DEVICE_FUNC
300  EIGEN_STRONG_INLINE Scalar& operator[](Index index)
301  {
302  // The bracket operator is only for vectors, use the parenthesis operator instead
303  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
304  return coeffRef(index);
305  }
306 
307  EIGEN_DEVICE_FUNC
308  EIGEN_STRONG_INLINE TensorFixedSize()
309  : m_storage()
310  {
311  }
312 
313  EIGEN_DEVICE_FUNC
314  EIGEN_STRONG_INLINE TensorFixedSize(const Self& other)
315  : m_storage(other.m_storage)
316  {
317  }
318 
319 #if EIGEN_HAS_RVALUE_REFERENCES
320  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other)
321  : m_storage(other.m_storage)
322  {
323  }
324 #endif
325 
326  template<typename OtherDerived>
327  EIGEN_DEVICE_FUNC
328  EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
329  {
330  typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign;
331  Assign assign(*this, other.derived());
332  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
333  }
334  template<typename OtherDerived>
335  EIGEN_DEVICE_FUNC
336  EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase<OtherDerived, WriteAccessors>& other)
337  {
338  typedef TensorAssignOp<TensorFixedSize, const OtherDerived> Assign;
339  Assign assign(*this, other.derived());
340  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
341  }
342 
343  // FIXME: check that the dimensions of other match the dimensions of *this.
344  // Unfortunately this isn't possible yet when the rhs is an expression.
345  EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(TensorFixedSize)
346 
347 
348  protected:
349  EIGEN_DEVICE_FUNC
350  EIGEN_STRONG_INLINE bool checkIndexRange(const array<Index, NumIndices>& /*indices*/) const
351  {
352  using internal::array_apply_and_reduce;
353  using internal::array_zip_and_reduce;
354  using internal::greater_equal_zero_op;
355  using internal::logical_and_op;
356  using internal::lesser_op;
357 
358  return true;
359  // check whether the indices are all >= 0
360  /* array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
361  // check whether the indices fit in the dimensions
362  array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/
363  }
364 
365  EIGEN_DEVICE_FUNC
366  EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
367  {
368  if (Options&RowMajor) {
369  return m_storage.dimensions().IndexOfRowMajor(indices);
370  } else {
371  return m_storage.dimensions().IndexOfColMajor(indices);
372  }
373  }
374 };
375 
376 
377 } // end namespace Eigen
378 
379 #endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
Namespace containing all symbols from the Eigen library.
The fixed sized version of the tensor class.
Definition: TensorFixedSize.h:27
The tensor base class.
Definition: TensorForwardDeclarations.h:56