HomeSort by relevance Sort by last modified time
    Searched refs:GPUDevice (Results 176 - 200 of 262) sorted by null

1 2 3 4 5 6 78 91011

  /external/tensorflow/tensorflow/core/kernels/
pack_op.cc 35 typedef Eigen::GpuDevice GPUDevice;
116 if (std::is_same<Device, GPUDevice>::value) {
156 PackOp<GPUDevice, type>)
qr_op_impl.h 127 typedef Eigen::GpuDevice GPUDevice;
194 const GPUDevice& device = context->eigen_device<GPUDevice>();
236 functor::MatrixBandPartFunctor<GPUDevice, Scalar> band_part;
248 functor::EyeFunctor<GPUDevice, Scalar> eye;
264 functor::UnaryFunctor<GPUDevice, functor::conj<Scalar>> conj;
bias_op.cc 37 typedef Eigen::GpuDevice GPUDevice;
308 class BiasOp<GPUDevice, T> : public BinaryOp<T> {
310 typedef GPUDevice Device;
358 BiasOp<GPUDevice, type>); \
361 BiasOp<GPUDevice, type>);
367 class BiasGradOp<GPUDevice, T> : public OpKernel {
369 typedef GPUDevice Device;
415 BiasGradOp<GPUDevice, type>);
reduction_ops_max.cc 45 ReductionOp<GPUDevice, type, int32, Eigen::internal::MaxReducer<type>>); \
52 ReductionOp<GPUDevice, type, int64, Eigen::internal::MaxReducer<type>>);
reduction_ops_min.cc 45 ReductionOp<GPUDevice, type, int32, Eigen::internal::MinReducer<type>>); \
52 ReductionOp<GPUDevice, type, int64, Eigen::internal::MinReducer<type>>);
reduction_ops_sum.cc 45 ReductionOp<GPUDevice, type, int32, Eigen::internal::SumReducer<type>>); \
52 ReductionOp<GPUDevice, type, int64, Eigen::internal::SumReducer<type>>);
tensor_array_ops.cc 46 typedef Eigen::GpuDevice GPUDevice;
442 TensorArrayWriteOp<GPUDevice, type>); \
448 TensorArrayWriteOp<GPUDevice, type>); \
454 TensorArrayWriteOp<GPUDevice, type>);
531 TensorArrayReadOp<GPUDevice, type>); \
537 TensorArrayReadOp<GPUDevice, type>); \
543 TensorArrayReadOp<GPUDevice, type>);
672 if (std::is_same<Device, GPUDevice>::value) {
722 TensorArrayPackOrGatherOp<GPUDevice, type, true /* LEGACY_PACK */>);
    [all...]
dynamic_partition_op_gpu.cu.cc 55 typedef Eigen::GpuDevice GPUDevice;
79 void RangeInit(const GPUDevice& d, const T start, const T delta,
89 void MoveValues(const GPUDevice& d, int32* keys, int32* values, int32* num_runs,
102 void CallGatherKernel(const GPUDevice& d, const T* params, const int32* indices,
327 const GPUDevice& device = c->eigen_device<GPUDevice>();
360 const GPUDevice& device = c->eigen_device<GPUDevice>();
378 functor::SetZeroFunctor<GPUDevice, int32> zero_functor
    [all...]
adjust_hue_op.cc 38 typedef Eigen::GpuDevice GPUDevice;
253 class AdjustHueOp<GPUDevice> : public AdjustHueOpBase {
264 GPUDevice device = context->eigen_gpu_device();
278 AdjustHueOp<GPUDevice>);
adjust_saturation_op.cc 36 typedef Eigen::GpuDevice GPUDevice;
219 class AdjustSaturationOp<GPUDevice> : public AdjustSaturationOpBase {
230 GPUDevice device = context->eigen_gpu_device();
244 AdjustSaturationOp<GPUDevice>);
compare_and_bitpack_op.cc 34 typedef Eigen::GpuDevice GPUDevice;
172 CompareAndBitpackOp<GPUDevice, type>);
183 void CompareAndBitpack<GPUDevice, T>::operator()( \
187 extern template struct CompareAndBitpack<GPUDevice, T>;
dense_update_functor.cc 28 typedef Eigen::GpuDevice GPUDevice;
dynamic_stitch_op_gpu.cu.cc 27 using GPUDevice = Eigen::GpuDevice;
52 void DynamicStitchGPUImpl(const Eigen::GpuDevice& gpu_device,
67 const Eigen::GpuDevice& gpu_device, const int32 slice_size, \
gather_nd_op.cc 33 typedef Eigen::GpuDevice GPUDevice;
209 Index GatherNdSlice<GPUDevice, T, Index, NDIM>::operator()( \
210 const GPUDevice& d, const Index slice_size, \
215 extern template struct GatherNdSlice<GPUDevice, T, Index, NDIM>;
matrix_inverse_op.cc 88 typedef Eigen::GpuDevice GPUDevice;
139 const GPUDevice& device = context->eigen_device<GPUDevice>();
227 functor::EyeFunctor<GPUDevice, Scalar> eye;
multinomial_op.cc 39 typedef Eigen::GpuDevice GPUDevice;
176 if (std::is_same<Device, GPUDevice>::value) {
236 MultinomialOp<GPUDevice, TYPE, int32>) \
242 MultinomialOp<GPUDevice, TYPE, int64>)
quantize_and_dequantize_op.cc 34 typedef Eigen::GpuDevice GPUDevice;
232 QuantizeAndDequantizeV2Op<GPUDevice, T>); \
239 QuantizeAndDequantizeV3Op<GPUDevice, T>); \
242 QuantizeAndDequantizeOp<GPUDevice, T>);
scatter_nd_op_gpu.cu.cc 28 typedef Eigen::GpuDevice GPUDevice;
112 struct ScatterNdFunctor<GPUDevice, T, Index, op, IXDIM> {
114 const GPUDevice& d, const Index slice_size,
153 template struct functor::ScatterNdFunctor<GPUDevice, T, Index, op, IXDIM>;
topk_op.cc 37 typedef Eigen::GpuDevice GPUDevice;
250 Status TopKFunctor<GPUDevice, T>::Compute( \
255 extern template struct functor::TopKFunctor<GPUDevice, T>;
267 TopK<GPUDevice, type>) \
272 TopK<GPUDevice, type>)
sparse_tensor_dense_matmul_op.cc 30 typedef Eigen::GpuDevice GPUDevice;
96 if (std::is_same<Device, GPUDevice>::value) {
183 GPUDevice, T, Tindices, ADJ_A, \
184 ADJ_B>::Compute(const GPUDevice& d, typename TTypes<T>::Matrix out, \
189 GPUDevice, T, Tindices, ADJ_A, ADJ_B>;
215 SparseTensorDenseMatMulOp<GPUDevice, TypeT, TypeIndex>);
split_v_op.cc 45 typedef Eigen::GpuDevice GPUDevice;
283 void Run(const Eigen::GpuDevice& d, bool fixed, const T* input,
291 class SplitVOpGPU : public SplitVOpBase<GPUDevice, T, Tlen> {
293 typedef SplitVOpBase<GPUDevice, T, Tlen> Base;
353 context->eigen_device<GPUDevice>(), fixed_size,
386 functor::SplitCustom<GPUDevice, T>()(
387 context->eigen_device<GPUDevice>(), result_shaped, input_reshaped,
bincount_op.cc 32 typedef Eigen::GpuDevice GPUDevice;
130 BincountOp<GPUDevice, type>)
  /external/tensorflow/tensorflow/contrib/reduce_slice_ops/kernels/
reduce_slice_ops.cc 27 using GPUDevice = Eigen::GpuDevice;
204 ReduceSliceKernel<GPUDevice, type, index_type, \
211 ReduceSliceKernel<GPUDevice, type, index_type, \
218 ReduceSliceKernel<GPUDevice, type, index_type, \
225 ReduceSliceKernel<GPUDevice, type, index_type, \
  /external/tensorflow/tensorflow/contrib/mpi_collectives/kernels/
mpi_ops.cc 79 using GPUDevice = Eigen::GpuDevice;
87 extern template Status RingAllreduce<GPUDevice, int>(OpKernelContext*,
90 extern template Status RingAllreduce<GPUDevice, long long>(OpKernelContext*,
93 extern template Status RingAllreduce<GPUDevice, float>(OpKernelContext*,
96 extern template Status RingAllgather<GPUDevice, int>(OpKernelContext*,
100 extern template Status RingAllgather<GPUDevice, long long>(
102 extern template Status RingAllgather<GPUDevice, float>(
124 // Return true if the templated type is GPUDevice, otherwise false.
128 bool IsGPUDevice<GPUDevice>() {
    [all...]
  /external/tensorflow/tensorflow/contrib/rnn/kernels/
gru_ops.cc 25 typedef Eigen::GpuDevice GPUDevice;
390 void GRUBlockCellFprop<GPUDevice, T, true>::operator()( \
391 OpKernelContext* ctx, const GPUDevice& d, \
401 extern template struct GRUBlockCellFprop<GPUDevice, T, true>;
411 GRUCellBlockOp<GPUDevice, T, true>);
420 void GRUBlockCellBprop<GPUDevice, T, true>::operator()( \
421 OpKernelContext* ctx, const GPUDevice& d, \
434 extern template struct GRUBlockCellBprop<GPUDevice, T, true>;
444 GRUBlockCellGradOp<GPUDevice, T, true>)
    [all...]

Completed in 1434 milliseconds

1 2 3 4 5 6 78 91011