11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_TRACE_H 12 #define EIGEN_CXX11_TENSOR_TENSOR_TRACE_H 25 template<
typename Dims,
typename XprType>
26 struct traits<TensorTraceOp<Dims, XprType> > :
public traits<XprType>
28 typedef typename XprType::Scalar Scalar;
29 typedef traits<XprType> XprTraits;
30 typedef typename XprTraits::StorageKind StorageKind;
31 typedef typename XprTraits::Index
Index;
32 typedef typename XprType::Nested Nested;
33 typedef typename remove_reference<Nested>::type _Nested;
34 static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
35 static const int Layout = XprTraits::Layout;
38 template<
typename Dims,
typename XprType>
39 struct eval<TensorTraceOp<Dims, XprType>,
Eigen::Dense>
41 typedef const TensorTraceOp<Dims, XprType>& type;
44 template<
typename Dims,
typename XprType>
45 struct nested<TensorTraceOp<Dims, XprType>, 1, typename eval<TensorTraceOp<Dims, XprType> >::type>
47 typedef TensorTraceOp<Dims, XprType> type;
53 template<
typename Dims,
typename XprType>
54 class TensorTraceOp :
public TensorBase<TensorTraceOp<Dims, XprType> >
57 typedef typename Eigen::internal::traits<TensorTraceOp>::Scalar Scalar;
59 typedef typename XprType::CoeffReturnType CoeffReturnType;
60 typedef typename Eigen::internal::nested<TensorTraceOp>::type Nested;
61 typedef typename Eigen::internal::traits<TensorTraceOp>::StorageKind StorageKind;
62 typedef typename Eigen::internal::traits<TensorTraceOp>::Index
Index;
64 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTraceOp(
const XprType& expr,
const Dims& dims)
65 : m_xpr(expr), m_dims(dims) {
68 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
69 const Dims& dims()
const {
return m_dims; }
71 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
72 const typename internal::remove_all<typename XprType::Nested>::type& expression()
const {
return m_xpr; }
75 typename XprType::Nested m_xpr;
81 template<
typename Dims,
typename ArgType,
typename Device>
82 struct TensorEvaluator<const TensorTraceOp<Dims, ArgType>, Device>
84 typedef TensorTraceOp<Dims, ArgType> XprType;
85 static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
86 static const int NumReducedDims = internal::array_size<Dims>::value;
87 static const int NumOutputDims = NumInputDims - NumReducedDims;
88 typedef typename XprType::Index
Index;
89 typedef DSizes<Index, NumOutputDims> Dimensions;
90 typedef typename XprType::Scalar Scalar;
91 typedef typename XprType::CoeffReturnType CoeffReturnType;
92 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
93 static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
94 typedef StorageMemory<CoeffReturnType, Device> Storage;
95 typedef typename Storage::Type EvaluatorPointerType;
99 PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
101 PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
102 Layout = TensorEvaluator<ArgType, Device>::Layout,
108 typedef internal::TensorBlockNotImplemented TensorBlock;
111 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
112 : m_impl(op.expression(), device), m_traceDim(1), m_device(device)
115 EIGEN_STATIC_ASSERT((NumOutputDims >= 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
116 EIGEN_STATIC_ASSERT((NumReducedDims >= 2) || ((NumReducedDims == 0) && (NumInputDims == 0)), YOU_MADE_A_PROGRAMMING_MISTAKE);
118 for (
int i = 0; i < NumInputDims; ++i) {
119 m_reduced[i] =
false;
122 const Dims& op_dims = op.dims();
123 for (
int i = 0; i < NumReducedDims; ++i) {
124 eigen_assert(op_dims[i] >= 0);
125 eigen_assert(op_dims[i] < NumInputDims);
126 m_reduced[op_dims[i]] =
true;
130 int num_distinct_reduce_dims = 0;
131 for (
int i = 0; i < NumInputDims; ++i) {
133 ++num_distinct_reduce_dims;
137 eigen_assert(num_distinct_reduce_dims == NumReducedDims);
140 const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
142 int output_index = 0;
143 int reduced_index = 0;
144 for (
int i = 0; i < NumInputDims; ++i) {
146 m_reducedDims[reduced_index] = input_dims[i];
147 if (reduced_index > 0) {
149 eigen_assert(m_reducedDims[0] == m_reducedDims[reduced_index]);
154 m_dimensions[output_index] = input_dims[i];
159 if (NumReducedDims != 0) {
160 m_traceDim = m_reducedDims[0];
164 if (NumOutputDims > 0) {
165 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
166 m_outputStrides[0] = 1;
167 for (
int i = 1; i < NumOutputDims; ++i) {
168 m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
172 m_outputStrides.back() = 1;
173 for (
int i = NumOutputDims - 2; i >= 0; --i) {
174 m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
180 if (NumInputDims > 0) {
181 array<Index, NumInputDims> input_strides;
182 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
183 input_strides[0] = 1;
184 for (
int i = 1; i < NumInputDims; ++i) {
185 input_strides[i] = input_strides[i - 1] * input_dims[i - 1];
189 input_strides.back() = 1;
190 for (
int i = NumInputDims - 2; i >= 0; --i) {
191 input_strides[i] = input_strides[i + 1] * input_dims[i + 1];
197 for (
int i = 0; i < NumInputDims; ++i) {
199 m_reducedStrides[reduced_index] = input_strides[i];
203 m_preservedStrides[output_index] = input_strides[i];
210 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
214 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
215 m_impl.evalSubExprsIfNeeded(NULL);
219 EIGEN_STRONG_INLINE
void cleanup() {
223 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const 226 CoeffReturnType result = internal::cast<int, CoeffReturnType>(0);
227 Index index_stride = 0;
228 for (
int i = 0; i < NumReducedDims; ++i) {
229 index_stride += m_reducedStrides[i];
234 if (NumOutputDims != 0)
235 cur_index = firstInput(index);
236 for (Index i = 0; i < m_traceDim; ++i) {
237 result += m_impl.coeff(cur_index);
238 cur_index += index_stride;
244 template<
int LoadMode>
245 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const {
247 EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
248 eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
250 EIGEN_ALIGN_MAX
typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
251 for (
int i = 0; i < PacketSize; ++i) {
252 values[i] = coeff(index + i);
254 PacketReturnType result = internal::ploadt<PacketReturnType, LoadMode>(values);
258 #ifdef EIGEN_USE_SYCL 260 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
267 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index)
const {
268 Index startInput = 0;
269 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
270 for (
int i = NumOutputDims - 1; i > 0; --i) {
271 const Index idx = index / m_outputStrides[i];
272 startInput += idx * m_preservedStrides[i];
273 index -= idx * m_outputStrides[i];
275 startInput += index * m_preservedStrides[0];
278 for (
int i = 0; i < NumOutputDims - 1; ++i) {
279 const Index idx = index / m_outputStrides[i];
280 startInput += idx * m_preservedStrides[i];
281 index -= idx * m_outputStrides[i];
283 startInput += index * m_preservedStrides[NumOutputDims - 1];
288 Dimensions m_dimensions;
289 TensorEvaluator<ArgType, Device> m_impl;
292 const Device EIGEN_DEVICE_REF m_device;
293 array<bool, NumInputDims> m_reduced;
294 array<Index, NumReducedDims> m_reducedDims;
295 array<Index, NumOutputDims> m_outputStrides;
296 array<Index, NumReducedDims> m_reducedStrides;
297 array<Index, NumOutputDims> m_preservedStrides;
303 #endif // EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
Namespace containing all symbols from the Eigen library.
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index