10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H 11 #define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H 17 template <
typename Op,
typename XprType>
18 struct traits<TensorScanOp<Op, XprType> >
19 :
public traits<XprType> {
20 typedef typename XprType::Scalar Scalar;
21 typedef traits<XprType> XprTraits;
22 typedef typename XprTraits::StorageKind StorageKind;
23 typedef typename XprType::Nested Nested;
24 typedef typename remove_reference<Nested>::type _Nested;
25 static const int NumDimensions = XprTraits::NumDimensions;
26 static const int Layout = XprTraits::Layout;
27 typedef typename XprTraits::PointerType PointerType;
30 template<
typename Op,
typename XprType>
31 struct eval<TensorScanOp<Op, XprType>,
Eigen::Dense>
33 typedef const TensorScanOp<Op, XprType>& type;
36 template<
typename Op,
typename XprType>
37 struct nested<TensorScanOp<Op, XprType>, 1,
38 typename eval<TensorScanOp<Op, XprType> >::type>
40 typedef TensorScanOp<Op, XprType> type;
49 template <
typename Op,
typename XprType>
51 :
public TensorBase<TensorScanOp<Op, XprType>, ReadOnlyAccessors> {
53 typedef typename Eigen::internal::traits<TensorScanOp>::Scalar Scalar;
55 typedef typename XprType::CoeffReturnType CoeffReturnType;
56 typedef typename Eigen::internal::nested<TensorScanOp>::type Nested;
57 typedef typename Eigen::internal::traits<TensorScanOp>::StorageKind StorageKind;
58 typedef typename Eigen::internal::traits<TensorScanOp>::Index
Index;
60 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorScanOp(
61 const XprType& expr,
const Index& axis,
bool exclusive =
false,
const Op& op = Op())
62 : m_expr(expr), m_axis(axis), m_accumulator(op), m_exclusive(exclusive) {}
64 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
65 const Index axis()
const {
return m_axis; }
66 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
67 const XprType& expression()
const {
return m_expr; }
68 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
69 const Op accumulator()
const {
return m_accumulator; }
70 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
71 bool exclusive()
const {
return m_exclusive; }
74 typename XprType::Nested m_expr;
76 const Op m_accumulator;
77 const bool m_exclusive;
83 template <
typename Self>
84 EIGEN_STRONG_INLINE
void ReduceScalar(Self&
self, Index offset,
85 typename Self::CoeffReturnType* data) {
87 typename Self::CoeffReturnType accum =
self.accumulator().initialize();
88 if (
self.stride() == 1) {
89 if (
self.exclusive()) {
90 for (Index curr = offset; curr < offset +
self.size(); ++curr) {
91 data[curr] =
self.accumulator().finalize(accum);
92 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
95 for (Index curr = offset; curr < offset +
self.size(); ++curr) {
96 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
97 data[curr] =
self.accumulator().finalize(accum);
101 if (
self.exclusive()) {
102 for (Index idx3 = 0; idx3 <
self.size(); idx3++) {
103 Index curr = offset + idx3 *
self.stride();
104 data[curr] =
self.accumulator().finalize(accum);
105 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
108 for (Index idx3 = 0; idx3 <
self.size(); idx3++) {
109 Index curr = offset + idx3 *
self.stride();
110 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
111 data[curr] =
self.accumulator().finalize(accum);
117 template <
typename Self>
118 EIGEN_STRONG_INLINE
void ReducePacket(Self&
self, Index offset,
119 typename Self::CoeffReturnType* data) {
120 using Scalar =
typename Self::CoeffReturnType;
121 using Packet =
typename Self::PacketReturnType;
123 Packet accum =
self.accumulator().template initializePacket<Packet>();
124 if (
self.stride() == 1) {
125 if (
self.exclusive()) {
126 for (Index curr = offset; curr < offset +
self.size(); ++curr) {
127 internal::pstoreu<Scalar, Packet>(data + curr,
self.accumulator().finalizePacket(accum));
128 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
131 for (Index curr = offset; curr < offset +
self.size(); ++curr) {
132 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
133 internal::pstoreu<Scalar, Packet>(data + curr,
self.accumulator().finalizePacket(accum));
137 if (
self.exclusive()) {
138 for (Index idx3 = 0; idx3 <
self.size(); idx3++) {
139 const Index curr = offset + idx3 *
self.stride();
140 internal::pstoreu<Scalar, Packet>(data + curr,
self.accumulator().finalizePacket(accum));
141 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
144 for (Index idx3 = 0; idx3 <
self.size(); idx3++) {
145 const Index curr = offset + idx3 *
self.stride();
146 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
147 internal::pstoreu<Scalar, Packet>(data + curr,
self.accumulator().finalizePacket(accum));
153 template <
typename Self,
bool Vectorize,
bool Parallel>
155 EIGEN_STRONG_INLINE
void operator()(Self&
self, Index idx1,
156 typename Self::CoeffReturnType* data) {
157 for (Index idx2 = 0; idx2 <
self.stride(); idx2++) {
159 Index offset = idx1 + idx2;
160 ReduceScalar(
self, offset, data);
166 template <
typename Self>
167 struct ReduceBlock<Self, true, false> {
168 EIGEN_STRONG_INLINE
void operator()(Self&
self, Index idx1,
169 typename Self::CoeffReturnType* data) {
170 using Packet =
typename Self::PacketReturnType;
171 const int PacketSize = internal::unpacket_traits<Packet>::size;
173 for (; idx2 + PacketSize <=
self.stride(); idx2 += PacketSize) {
175 Index offset = idx1 + idx2;
176 ReducePacket(
self, offset, data);
178 for (; idx2 <
self.stride(); idx2++) {
180 Index offset = idx1 + idx2;
181 ReduceScalar(
self, offset, data);
187 template <
typename Self,
typename Reducer,
typename Device,
189 (TensorEvaluator<typename Self::ChildTypeNoConst, Device>::PacketAccess &&
190 internal::reducer_traits<Reducer, Device>::PacketAccess)>
191 struct ScanLauncher {
192 void operator()(Self&
self,
typename Self::CoeffReturnType* data) {
193 Index total_size = internal::array_prod(
self.dimensions());
199 for (Index idx1 = 0; idx1 < total_size; idx1 +=
self.stride() *
self.size()) {
200 ReduceBlock<Self, Vectorize,
false> block_reducer;
201 block_reducer(
self, idx1, data);
206 #ifdef EIGEN_USE_THREADS 211 EIGEN_STRONG_INLINE Index AdjustBlockSize(Index item_size, Index block_size) {
212 EIGEN_CONSTEXPR Index kBlockAlignment = 128;
213 const Index items_per_cacheline =
214 numext::maxi<Index>(1, kBlockAlignment / item_size);
215 return items_per_cacheline * divup(block_size, items_per_cacheline);
218 template <
typename Self>
219 struct ReduceBlock<Self,
true,
true> {
220 EIGEN_STRONG_INLINE
void operator()(Self&
self, Index idx1,
221 typename Self::CoeffReturnType* data) {
222 using Scalar =
typename Self::CoeffReturnType;
223 using Packet =
typename Self::PacketReturnType;
224 const int PacketSize = internal::unpacket_traits<Packet>::size;
225 Index num_scalars =
self.stride();
226 Index num_packets = 0;
227 if (
self.stride() >= PacketSize) {
228 num_packets =
self.stride() / PacketSize;
229 self.device().parallelFor(
231 TensorOpCost(PacketSize *
self.size(), PacketSize *
self.size(),
232 16 * PacketSize *
self.size(),
true, PacketSize),
235 [=](Index blk_size) {
236 return AdjustBlockSize(PacketSize *
sizeof(Scalar), blk_size);
238 [&](Index first, Index
last) {
239 for (Index packet = first; packet <
last; ++packet) {
240 const Index idx2 = packet * PacketSize;
241 ReducePacket(
self, idx1 + idx2, data);
244 num_scalars -= num_packets * PacketSize;
246 self.device().parallelFor(
247 num_scalars, TensorOpCost(
self.size(),
self.size(), 16 *
self.size()),
250 [=](Index blk_size) {
251 return AdjustBlockSize(
sizeof(Scalar), blk_size);
253 [&](Index first, Index
last) {
254 for (Index scalar = first; scalar <
last; ++scalar) {
255 const Index idx2 = num_packets * PacketSize + scalar;
256 ReduceScalar(
self, idx1 + idx2, data);
262 template <
typename Self>
263 struct ReduceBlock<Self, false, true> {
264 EIGEN_STRONG_INLINE
void operator()(Self&
self, Index idx1,
265 typename Self::CoeffReturnType* data) {
266 using Scalar =
typename Self::CoeffReturnType;
267 self.device().parallelFor(
268 self.stride(), TensorOpCost(
self.size(),
self.size(), 16 *
self.size()),
271 [=](Index blk_size) {
272 return AdjustBlockSize(
sizeof(Scalar), blk_size);
274 [&](Index first, Index
last) {
275 for (Index idx2 = first; idx2 <
last; ++idx2) {
276 ReduceScalar(
self, idx1 + idx2, data);
283 template <
typename Self,
typename Reducer,
bool Vectorize>
284 struct ScanLauncher<Self, Reducer, ThreadPoolDevice, Vectorize> {
285 void operator()(Self&
self,
typename Self::CoeffReturnType* data) {
286 using Scalar =
typename Self::CoeffReturnType;
287 using Packet =
typename Self::PacketReturnType;
288 const int PacketSize = internal::unpacket_traits<Packet>::size;
289 const Index total_size = internal::array_prod(
self.dimensions());
290 const Index inner_block_size =
self.stride() *
self.size();
291 bool parallelize_by_outer_blocks = (total_size >= (
self.stride() * inner_block_size));
293 if ((parallelize_by_outer_blocks && total_size <= 4096) ||
294 (!parallelize_by_outer_blocks &&
self.stride() < PacketSize)) {
295 ScanLauncher<Self, Reducer, DefaultDevice, Vectorize> launcher;
296 launcher(
self, data);
300 if (parallelize_by_outer_blocks) {
302 const Index num_outer_blocks = total_size / inner_block_size;
303 self.device().parallelFor(
305 TensorOpCost(inner_block_size, inner_block_size,
306 16 * PacketSize * inner_block_size, Vectorize,
308 [=](Index blk_size) {
309 return AdjustBlockSize(inner_block_size *
sizeof(Scalar), blk_size);
311 [&](Index first, Index
last) {
312 for (Index idx1 = first; idx1 <
last; ++idx1) {
313 ReduceBlock<Self, Vectorize,
false> block_reducer;
314 block_reducer(
self, idx1 * inner_block_size, data);
320 ReduceBlock<Self, Vectorize,
true> block_reducer;
321 for (Index idx1 = 0; idx1 < total_size;
322 idx1 +=
self.stride() *
self.size()) {
323 block_reducer(
self, idx1, data);
328 #endif // EIGEN_USE_THREADS 330 #if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) 336 template <
typename Self,
typename Reducer>
337 __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024
void ScanKernel(Self
self, Index total_size,
typename Self::CoeffReturnType* data) {
339 Index val = threadIdx.x + blockIdx.x * blockDim.x;
340 Index offset = (val /
self.stride()) *
self.stride() *
self.size() + val %
self.stride();
342 if (offset + (
self.size() - 1) *
self.stride() < total_size) {
344 typename Self::CoeffReturnType accum =
self.accumulator().initialize();
345 for (Index idx = 0; idx <
self.size(); idx++) {
346 Index curr = offset + idx *
self.stride();
347 if (
self.exclusive()) {
348 data[curr] =
self.accumulator().finalize(accum);
349 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
351 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
352 data[curr] =
self.accumulator().finalize(accum);
360 template <
typename Self,
typename Reducer,
bool Vectorize>
361 struct ScanLauncher<Self, Reducer, GpuDevice, Vectorize> {
362 void operator()(
const Self&
self,
typename Self::CoeffReturnType* data) {
363 Index total_size = internal::array_prod(
self.dimensions());
364 Index num_blocks = (total_size /
self.size() + 63) / 64;
365 Index block_size = 64;
367 LAUNCH_GPU_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0,
self.device(),
self, total_size, data);
370 #endif // EIGEN_USE_GPU && (EIGEN_GPUCC) 375 template <
typename Op,
typename ArgType,
typename Device>
376 struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> {
378 typedef TensorScanOp<Op, ArgType> XprType;
379 typedef typename XprType::Index
Index;
380 typedef const ArgType ChildTypeNoConst;
381 typedef const ArgType ChildType;
382 static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
383 typedef DSizes<Index, NumDims> Dimensions;
384 typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
385 typedef typename XprType::CoeffReturnType CoeffReturnType;
386 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
387 typedef TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> Self;
388 typedef StorageMemory<Scalar, Device> Storage;
389 typedef typename Storage::Type EvaluatorPointerType;
393 PacketAccess = (PacketType<CoeffReturnType, Device>::size > 1),
395 PreferBlockAccess =
false,
396 Layout = TensorEvaluator<ArgType, Device>::Layout,
402 typedef internal::TensorBlockNotImplemented TensorBlock;
405 EIGEN_STRONG_INLINE TensorEvaluator(
const XprType& op,
const Device& device)
406 : m_impl(op.expression(), device),
408 m_exclusive(op.exclusive()),
409 m_accumulator(op.accumulator()),
410 m_size(m_impl.dimensions()[op.axis()]),
411 m_stride(1), m_consume_dim(op.axis()),
415 EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
416 eigen_assert(op.axis() >= 0 && op.axis() < NumDims);
419 const Dimensions& dims = m_impl.dimensions();
420 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
421 for (
int i = 0; i < op.axis(); ++i) {
422 m_stride = m_stride * dims[i];
428 unsigned int axis = internal::convert_index<unsigned int>(op.axis());
429 for (
unsigned int i = NumDims - 1; i > axis; --i) {
430 m_stride = m_stride * dims[i];
435 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
436 return m_impl.dimensions();
439 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Index& stride()
const {
443 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Index& consume_dim()
const {
444 return m_consume_dim;
447 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Index& size()
const {
451 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Op& accumulator()
const {
452 return m_accumulator;
455 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool exclusive()
const {
459 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorEvaluator<ArgType, Device>& inner()
const {
463 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Device& device()
const {
467 EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(EvaluatorPointerType data) {
468 m_impl.evalSubExprsIfNeeded(NULL);
469 internal::ScanLauncher<Self, Op, Device> launcher;
471 launcher(*
this, data);
475 const Index total_size = internal::array_prod(dimensions());
476 m_output =
static_cast<EvaluatorPointerType
>(m_device.get((Scalar*) m_device.allocate_temp(total_size *
sizeof(Scalar))));
477 launcher(*
this, m_output);
481 template<
int LoadMode>
482 EIGEN_DEVICE_FUNC PacketReturnType packet(Index index)
const {
483 return internal::ploadt<PacketReturnType, LoadMode>(m_output + index);
486 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data()
const 491 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const 493 return m_output[index];
496 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(
bool)
const {
497 return TensorOpCost(
sizeof(CoeffReturnType), 0, 0);
500 EIGEN_STRONG_INLINE
void cleanup() {
502 m_device.deallocate_temp(m_output);
508 #ifdef EIGEN_USE_SYCL 510 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void bind(cl::sycl::handler &cgh)
const {
516 TensorEvaluator<ArgType, Device> m_impl;
517 const Device EIGEN_DEVICE_REF m_device;
518 const bool m_exclusive;
523 EvaluatorPointerType m_output;
528 #endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
Namespace containing all symbols from the Eigen library.
static const symbolic::SymbolExpr< internal::symbolic_last_tag > last
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index