/external/tensorflow/tensorflow/core/common_runtime/sycl/ |
sycl_util.h | 34 Tensor const& src_tensor, Tensor* dst_tensor) { 35 const size_t size = src_tensor.TotalBytes(); 37 void const* src_ptr = GetBase(&src_tensor); 41 switch (src_tensor.dtype()) { 73 LOG(FATAL) << "Unknown data type " << src_tensor.dtype();
|
/external/tensorflow/tensorflow/core/kernels/ |
debug_ops.h | 62 const Tensor& src_tensor = context->input(0); variable 64 if (src_tensor.IsInitialized() && 65 DataTypeCanUseMemcpy(src_tensor.dtype()) && 69 OP_REQUIRES_OK(context, context->allocate_output(0, src_tensor.shape(), 83 device, device_ctxt, &src_tensor, copied_tensor, 88 *copied_tensor = tensor::DeepCopy(src_tensor); 97 SYCLmemcpy(context->eigen_sycl_device(), src_tensor, copied_tensor); 99 *copied_tensor = tensor::DeepCopy(src_tensor); 102 *copied_tensor = tensor::DeepCopy(src_tensor); 107 context->set_output(0, src_tensor); [all...] |
mkl_relu_op.cc | 460 const Tensor& src_tensor = MklGetInput(context, src_index); variable 464 if (src_tensor.dims() == 0) { 477 src_dims = TFShapeToMklDnnDims(src_tensor.shape()); 490 const T* src_data = src_tensor.flat<T>().data(); 493 src.SetUsrMem(src_md, &src_tensor); 518 tf_shape_dst = src_tensor.shape(); 571 const Tensor& src_tensor = MklGetInput(context, src_index); local 579 int src_dims_size = src_tensor.dims(); 590 src_dims = TFShapeToMklDnnDims(src_tensor.shape()); 615 src_dims = (src_tensor.dims() == 4 730 const Tensor& src_tensor = MklGetInput(context, src_index); local 760 const Tensor& src_tensor = MklGetInput(context, src_index); local 793 const Tensor& src_tensor = MklGetInput(context, src_index); local 827 const Tensor& src_tensor = MklGetInput(context, src_index); local 865 const Tensor& src_tensor = MklGetInput(context, src_index); local 898 const Tensor& src_tensor = MklGetInput(context, src_index); local 937 const Tensor& src_tensor = MklGetInput(context, src_index); local 968 const Tensor& src_tensor = MklGetInput(context, src_index); local 1009 const Tensor& src_tensor = MklGetInput(context, src_index); local 1047 const Tensor& src_tensor = MklGetInput(context, src_index); local [all...] |
mkl_softmax_op.cc | 50 // src_tensor now points to the 0-th input of global data struct "context" 52 const Tensor& src_tensor = MklGetInput(context, src_idx); variable 57 // src_dims is the dimension of src_tensor 61 : src_tensor.shape(); 127 src.SetUsrMem(src_md, &src_tensor);
|
mkl_fused_batch_norm_op.cc | 518 const Tensor& src_tensor = MklGetInput(context, kSrcIndex); variable 532 src_tensor.shape().DebugString())); 534 tf_shape_src = src_tensor.shape(); 535 OP_REQUIRES(context, src_tensor.dims() == 4, 537 src_tensor.shape().DebugString())); 613 : TFShapeToMklDnnDimsInNCHW(src_tensor.shape(), tensor_format_); 645 const T* src_data = src_tensor.flat<T>().data(); 647 src.SetUsrMem(src_md, &src_tensor); 666 : src_tensor.shape().dims(); 842 const Tensor& src_tensor = MklGetInput(context, kSrcIndex); variable [all...] |
mkl_lrn_op.cc | 95 const Tensor& src_tensor = MklGetInput(context, kIdxInput); variable 107 MklDefaultToEigen(context, src_tensor); 112 ConvertMklToTF<T>(context, src_tensor, src_dnn_shape); 124 TensorShape tf_output_shape = src_tensor.shape(); 133 src_dnn_data.SetUsrMem(src_md, &src_tensor); 289 const Tensor& src_tensor = MklGetInput(context, kIdxInput); local 296 FastBoundsCheck(src_tensor.NumElements(), 300 OP_REQUIRES(context, src_tensor.dims() == 4, 303 FastBoundsCheck(src_tensor.NumElements(),
|
mkl_conv_grad_filter_ops.cc | 380 const Tensor& src_tensor = MklGetInput(context, kInputIdx); local 396 TensorShape src_tf_shape = MakeInputTfShape(context, src_tensor); 540 src.SetUsrMem(fwd_src_md, &src_tensor); 544 src_data = static_cast<T*>(const_cast<T*>(src_tensor.flat<T>().data()));
|
mkl_conv_grad_input_ops.cc | 317 const Tensor& src_tensor = MklGetInput(context, kInputIdx); local 333 TensorShape src_tf_shape = MakeInputTfShape(context, src_tensor);
|
mkl_conv_ops.cc | 923 const Tensor& src_tensor = MklGetInput(context, kInputIndex_Src); variable [all...] |
/external/tensorflow/tensorflow/lite/kernels/ |
while.cc | 40 const TfLiteTensor* src_tensor = local 42 std::vector<int> dims(src_tensor->dims->data, 43 src_tensor->dims->data + src_tensor->dims->size); 46 dst_tensor->type = src_tensor->type; 61 const TfLiteTensor* src_tensor = local 64 TF_LITE_ENSURE_EQ(context, src_tensor->bytes, dst_tensor->bytes); 65 memcpy(dst_tensor->data.raw, src_tensor->data.raw, src_tensor->bytes);
|
/external/tensorflow/tensorflow/core/common_runtime/ |
hierarchical_tree_broadcaster.h | 68 // Sends `src_tensor` asynchronously from this device to device at `dst_rank` 71 const Tensor* src_tensor, const StatusCallback& done);
|
hierarchical_tree_broadcaster.cc | 400 const Tensor* src_tensor, 415 src_tensor, col_ctx_->device_locality, done);
|