Please, help us to better know about our user community by answering the following short survey: https://forms.gle/wpyrxWi18ox9Z5ae9
Tensor.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 // Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_H
13 
14 namespace Eigen {
15 
62 template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
63 class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
64 {
65  public:
68  typedef typename Eigen::internal::nested<Self>::type Nested;
69  typedef typename internal::traits<Self>::StorageKind StorageKind;
70  typedef typename internal::traits<Self>::Index Index;
71  typedef Scalar_ Scalar;
72  typedef typename NumTraits<Scalar>::Real RealScalar;
73  typedef typename Base::CoeffReturnType CoeffReturnType;
74 
75  enum {
76  IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign),
77  Layout = Options_ & RowMajor ? RowMajor : ColMajor,
78  CoordAccess = true,
79  RawAccess = true
80  };
81 
82  static const int Options = Options_;
83  static const int NumIndices = NumIndices_;
84  typedef DSizes<Index, NumIndices_> Dimensions;
85 
86  protected:
87  TensorStorage<Scalar, Dimensions, Options> m_storage;
88 
89 #ifdef EIGEN_HAS_SFINAE
90  template<typename CustomIndices>
91  struct isOfNormalIndex{
92  static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value;
93  static const bool is_int = NumTraits<CustomIndices>::IsInteger;
94  static const bool value = is_array | is_int;
95  };
96 #endif
97 
98  public:
99  // Metadata
100  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
101  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
102  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
103  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
104  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
105  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
106 
107  // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
108  // work, because that uses base().coeffRef() - and we don't yet
109  // implement a similar class hierarchy
110  inline Self& base() { return *this; }
111  inline const Self& base() const { return *this; }
112 
113 #if EIGEN_HAS_VARIADIC_TEMPLATES
114  template<typename... IndexTypes>
115  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
116  {
117  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
118  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
119  return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
120  }
121 #endif
122 
123  // normal indices
124  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
125  {
126  eigen_internal_assert(checkIndexRange(indices));
127  return m_storage.data()[linearizedIndex(indices)];
128  }
129 
130  // custom indices
131 #ifdef EIGEN_HAS_SFINAE
132  template<typename CustomIndices,
133  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
134  >
135  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const
136  {
137  return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
138  }
139 #endif
140 
141  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const
142  {
143  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
144  return m_storage.data()[0];
145  }
146 
147  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
148  {
149  eigen_internal_assert(index >= 0 && index < size());
150  return m_storage.data()[index];
151  }
152 
153 #if EIGEN_HAS_VARIADIC_TEMPLATES
154  template<typename... IndexTypes>
155  inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
156  {
157  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
158  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
159  return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
160  }
161 #endif
162 
163  // normal indices
164  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
165  {
166  eigen_internal_assert(checkIndexRange(indices));
167  return m_storage.data()[linearizedIndex(indices)];
168  }
169 
170  // custom indices
171 #ifdef EIGEN_HAS_SFINAE
172  template<typename CustomIndices,
173  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
174  >
175  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices)
176  {
177  return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
178  }
179 #endif
180 
181  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef()
182  {
183  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
184  return m_storage.data()[0];
185  }
186 
187  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
188  {
189  eigen_internal_assert(index >= 0 && index < size());
190  return m_storage.data()[index];
191  }
192 
193 #if EIGEN_HAS_VARIADIC_TEMPLATES
194  template<typename... IndexTypes>
195  inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
196  {
197  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
198  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
199  return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
200  }
201 #else
202  EIGEN_DEVICE_FUNC
203  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
204  {
205  return coeff(array<Index, 2>(i0, i1));
206  }
207  EIGEN_DEVICE_FUNC
208  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
209  {
210  return coeff(array<Index, 3>(i0, i1, i2));
211  }
212  EIGEN_DEVICE_FUNC
213  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
214  {
215  return coeff(array<Index, 4>(i0, i1, i2, i3));
216  }
217  EIGEN_DEVICE_FUNC
218  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
219  {
220  return coeff(array<Index, 5>(i0, i1, i2, i3, i4));
221  }
222 #endif
223 
224  // custom indices
225 #ifdef EIGEN_HAS_SFINAE
226  template<typename CustomIndices,
227  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
228  >
229  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const
230  {
231  return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
232  }
233 #endif
234 
235  // normal indices
236  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
237  {
238  return coeff(indices);
239  }
240 
241  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
242  {
243  eigen_internal_assert(index >= 0 && index < size());
244  return coeff(index);
245  }
246 
247  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const
248  {
249  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
250  return coeff();
251  }
252 
253  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
254  {
255  // The bracket operator is only for vectors, use the parenthesis operator instead.
256  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
257  return coeff(index);
258  }
259 
260 #if EIGEN_HAS_VARIADIC_TEMPLATES
261  template<typename... IndexTypes>
262  inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
263  {
264  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
265  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
266  return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
267  }
268 #else
269  EIGEN_DEVICE_FUNC
270  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
271  {
272  return coeffRef(array<Index, 2>(i0, i1));
273  }
274  EIGEN_DEVICE_FUNC
275  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
276  {
277  return coeffRef(array<Index, 3>(i0, i1, i2));
278  }
279  EIGEN_DEVICE_FUNC
280  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
281  {
282  return coeffRef(array<Index, 4>(i0, i1, i2, i3));
283  }
284  EIGEN_DEVICE_FUNC
285  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
286  {
287  return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4));
288  }
289 #endif
290 
291  // normal indices
292  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
293  {
294  return coeffRef(indices);
295  }
296 
297  // custom indices
298 #ifdef EIGEN_HAS_SFINAE
299  template<typename CustomIndices,
300  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
301  >
302  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices)
303  {
304  return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
305  }
306 #endif
307 
308  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
309  {
310  eigen_assert(index >= 0 && index < size());
311  return coeffRef(index);
312  }
313 
314  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()()
315  {
316  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
317  return coeffRef();
318  }
319 
320  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index)
321  {
322  // The bracket operator is only for vectors, use the parenthesis operator instead
323  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
324  return coeffRef(index);
325  }
326 
327  EIGEN_DEVICE_FUNC
328  EIGEN_STRONG_INLINE Tensor()
329  : m_storage()
330  {
331  }
332 
333  EIGEN_DEVICE_FUNC
334  EIGEN_STRONG_INLINE Tensor(const Self& other)
335  : m_storage(other.m_storage)
336  {
337  }
338 
339 #if EIGEN_HAS_VARIADIC_TEMPLATES
340  template<typename... IndexTypes>
341  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
342  : m_storage(firstDimension, otherDimensions...)
343  {
344  // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
345  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
346  }
347 #else
348  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1)
349  : m_storage(dim1, array<Index, 1>(dim1))
350  {
351  EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
352  }
353  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2)
354  : m_storage(dim1*dim2, array<Index, 2>(dim1, dim2))
355  {
356  EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
357  }
358  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3)
359  : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3))
360  {
361  EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
362  }
363  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4)
364  : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4))
365  {
366  EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
367  }
368  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
369  : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5))
370  {
371  EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
372  }
373 #endif
374 
376  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
377  : m_storage(internal::array_prod(dimensions), dimensions)
378  {
379  EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
380  }
381 
382  template<typename OtherDerived>
383  EIGEN_DEVICE_FUNC
384  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
385  {
386  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
387  Assign assign(*this, other.derived());
388  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
389  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
390  }
391 
392  template<typename OtherDerived>
393  EIGEN_DEVICE_FUNC
394  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other)
395  {
396  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
397  Assign assign(*this, other.derived());
398  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
399  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
400  }
401 
402  #if EIGEN_HAS_RVALUE_REFERENCES
403  EIGEN_DEVICE_FUNC
404  EIGEN_STRONG_INLINE Tensor(Self&& other)
405  : m_storage(std::move(other.m_storage))
406  {
407  }
408  EIGEN_DEVICE_FUNC
409  EIGEN_STRONG_INLINE Tensor& operator=(Self&& other)
410  {
411  m_storage = std::move(other.m_storage);
412  return *this;
413  }
414  #endif
415 
416  EIGEN_DEVICE_FUNC
417  EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
418  {
419  typedef TensorAssignOp<Tensor, const Tensor> Assign;
420  Assign assign(*this, other);
421  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
422  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
423  return *this;
424  }
425  template<typename OtherDerived>
426  EIGEN_DEVICE_FUNC
427  EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
428  {
429  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
430  Assign assign(*this, other);
431  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
432  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
433  return *this;
434  }
435 
436 #if EIGEN_HAS_VARIADIC_TEMPLATES
437  template<typename... IndexTypes> EIGEN_DEVICE_FUNC
438  void resize(Index firstDimension, IndexTypes... otherDimensions)
439  {
440  // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
441  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
442  resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
443  }
444 #endif
445 
447  EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)
448  {
449  int i;
450  Index size = Index(1);
451  for (i = 0; i < NumIndices; i++) {
452  internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]);
453  size *= dimensions[i];
454  }
455  #ifdef EIGEN_INITIALIZE_COEFFS
456  bool size_changed = size != this->size();
457  m_storage.resize(size, dimensions);
458  if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
459  #else
460  m_storage.resize(size, dimensions);
461  #endif
462  }
463 
464  // Why this overload, DSizes is derived from array ??? //
465  EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) {
466  array<Index, NumIndices> dims;
467  for (int i = 0; i < NumIndices; ++i) {
468  dims[i] = dimensions[i];
469  }
470  resize(dims);
471  }
472 
473  EIGEN_DEVICE_FUNC
474  void resize()
475  {
476  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
477  // Nothing to do: rank 0 tensors have fixed size
478  }
479 
480 #ifdef EIGEN_HAS_INDEX_LIST
481  template <typename FirstType, typename... OtherTypes>
482  EIGEN_DEVICE_FUNC
483  void resize(const Eigen::IndexList<FirstType, OtherTypes...>& dimensions) {
484  array<Index, NumIndices> dims;
485  for (int i = 0; i < NumIndices; ++i) {
486  dims[i] = static_cast<Index>(dimensions[i]);
487  }
488  resize(dims);
489  }
490 #endif
491 
493 #ifdef EIGEN_HAS_SFINAE
494  template<typename CustomDimension,
495  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) )
496  >
497  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions)
498  {
499  resize(internal::customIndices2Array<Index,NumIndices>(dimensions));
500  }
501 #endif
502 
503 #ifndef EIGEN_EMULATE_CXX11_META_H
504  template <typename std::ptrdiff_t... Indices>
505  EIGEN_DEVICE_FUNC
506  void resize(const Sizes<Indices...>& dimensions) {
507  array<Index, NumIndices> dims;
508  for (int i = 0; i < NumIndices; ++i) {
509  dims[i] = static_cast<Index>(dimensions[i]);
510  }
511  resize(dims);
512  }
513 #else
514  template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
515  EIGEN_DEVICE_FUNC
516  void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) {
517  array<Index, NumIndices> dims;
518  for (int i = 0; i < NumIndices; ++i) {
519  dims[i] = static_cast<Index>(dimensions[i]);
520  }
521  resize(dims);
522  }
523 #endif
524 
525  protected:
526 
527  bool checkIndexRange(const array<Index, NumIndices>& indices) const
528  {
529  using internal::array_apply_and_reduce;
530  using internal::array_zip_and_reduce;
531  using internal::greater_equal_zero_op;
532  using internal::logical_and_op;
533  using internal::lesser_op;
534 
535  return
536  // check whether the indices are all >= 0
537  array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
538  // check whether the indices fit in the dimensions
539  array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
540  }
541 
542  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
543  {
544  if (Options&RowMajor) {
545  return m_storage.dimensions().IndexOfRowMajor(indices);
546  } else {
547  return m_storage.dimensions().IndexOfColMajor(indices);
548  }
549  }
550 };
551 
552 } // end namespace Eigen
553 
554 #endif // EIGEN_CXX11_TENSOR_TENSOR_H
void resize(const array< Index, NumIndices > &dimensions)
Definition: Tensor.h:447
Namespace containing all symbols from the Eigen library.
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:28
Tensor(const array< Index, NumIndices > &dimensions)
Definition: Tensor.h:376
The tensor base class.
Definition: TensorForwardDeclarations.h:56
void resize(const Sizes< Indices... > &dimensions)
Definition: Tensor.h:506
The tensor class.
Definition: Tensor.h:63