Home | History | Annotate | Download | only in core
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 // Must be included first.
     17 #include "tensorflow/python/lib/core/numpy.h"
     18 
     19 #include <vector>
     20 
     21 #include "tensorflow/c/c_api.h"
     22 #include "tensorflow/core/lib/core/errors.h"
     23 #include "tensorflow/core/platform/mutex.h"
     24 #include "tensorflow/python/lib/core/bfloat16.h"
     25 #include "tensorflow/python/lib/core/ndarray_tensor_bridge.h"
     26 
     27 namespace tensorflow {
     28 
     29 // Mutex used to serialize accesses to cached vector of pointers to python
     30 // arrays to be dereferenced.
     31 static mutex* DelayedDecrefLock() {
     32   static mutex* decref_lock = new mutex;
     33   return decref_lock;
     34 }
     35 
     36 // Caches pointers to numpy arrays which need to be dereferenced.
     37 static std::vector<void*>* DecrefCache() {
     38   static std::vector<void*>* decref_cache = new std::vector<void*>;
     39   return decref_cache;
     40 }
     41 
     42 // Destructor passed to TF_NewTensor when it reuses a numpy buffer. Stores a
     43 // pointer to the pyobj in a buffer to be dereferenced later when we're actually
     44 // holding the GIL.
     45 void DelayedNumpyDecref(void* data, size_t len, void* obj) {
     46   mutex_lock ml(*DelayedDecrefLock());
     47   DecrefCache()->push_back(obj);
     48 }
     49 
     50 // Actually dereferences cached numpy arrays. REQUIRES being called while
     51 // holding the GIL.
     52 void ClearDecrefCache() {
     53   std::vector<void*> cache_copy;
     54   {
     55     mutex_lock ml(*DelayedDecrefLock());
     56     cache_copy.swap(*DecrefCache());
     57   }
     58   for (void* obj : cache_copy) {
     59     Py_DECREF(reinterpret_cast<PyObject*>(obj));
     60   }
     61 }
     62 
     63 // Structure which keeps a reference to a Tensor alive while numpy has a pointer
     64 // to it.
     65 struct TensorReleaser {
     66   // Python macro to include standard members.
     67   PyObject_HEAD
     68 
     69       // Destructor responsible for releasing the memory.
     70       std::function<void()>* destructor;
     71 };
     72 
     73 extern PyTypeObject TensorReleaserType;
     74 
     75 static void TensorReleaser_dealloc(TensorReleaser* self) {
     76   (*self->destructor)();
     77   delete self->destructor;
     78   TensorReleaserType.tp_free(self);
     79 }
     80 
     81 PyTypeObject TensorReleaserType = {
     82     PyVarObject_HEAD_INIT(nullptr, 0) /* head init */
     83     "tensorflow_wrapper",             /* tp_name */
     84     sizeof(TensorReleaser),           /* tp_basicsize */
     85     0,                                /* tp_itemsize */
     86     /* methods */
     87     (destructor)TensorReleaser_dealloc, /* tp_dealloc */
     88     nullptr,                            /* tp_print */
     89     nullptr,                            /* tp_getattr */
     90     nullptr,                            /* tp_setattr */
     91     nullptr,                            /* tp_compare */
     92     nullptr,                            /* tp_repr */
     93     nullptr,                            /* tp_as_number */
     94     nullptr,                            /* tp_as_sequence */
     95     nullptr,                            /* tp_as_mapping */
     96     nullptr,                            /* tp_hash */
     97     nullptr,                            /* tp_call */
     98     nullptr,                            /* tp_str */
     99     nullptr,                            /* tp_getattro */
    100     nullptr,                            /* tp_setattro */
    101     nullptr,                            /* tp_as_buffer */
    102     Py_TPFLAGS_DEFAULT,                 /* tp_flags */
    103     "Wrapped TensorFlow Tensor",        /* tp_doc */
    104     nullptr,                            /* tp_traverse */
    105     nullptr,                            /* tp_clear */
    106     nullptr,                            /* tp_richcompare */
    107 };
    108 
    109 Status TF_DataType_to_PyArray_TYPE(TF_DataType tf_datatype,
    110                                    int* out_pyarray_type) {
    111   switch (tf_datatype) {
    112     case TF_HALF:
    113       *out_pyarray_type = NPY_FLOAT16;
    114       break;
    115     case TF_FLOAT:
    116       *out_pyarray_type = NPY_FLOAT32;
    117       break;
    118     case TF_DOUBLE:
    119       *out_pyarray_type = NPY_FLOAT64;
    120       break;
    121     case TF_INT32:
    122       *out_pyarray_type = NPY_INT32;
    123       break;
    124     case TF_UINT32:
    125       *out_pyarray_type = NPY_UINT32;
    126       break;
    127     case TF_UINT8:
    128       *out_pyarray_type = NPY_UINT8;
    129       break;
    130     case TF_UINT16:
    131       *out_pyarray_type = NPY_UINT16;
    132       break;
    133     case TF_INT8:
    134       *out_pyarray_type = NPY_INT8;
    135       break;
    136     case TF_INT16:
    137       *out_pyarray_type = NPY_INT16;
    138       break;
    139     case TF_INT64:
    140       *out_pyarray_type = NPY_INT64;
    141       break;
    142     case TF_UINT64:
    143       *out_pyarray_type = NPY_UINT64;
    144       break;
    145     case TF_BOOL:
    146       *out_pyarray_type = NPY_BOOL;
    147       break;
    148     case TF_COMPLEX64:
    149       *out_pyarray_type = NPY_COMPLEX64;
    150       break;
    151     case TF_COMPLEX128:
    152       *out_pyarray_type = NPY_COMPLEX128;
    153       break;
    154     case TF_STRING:
    155       *out_pyarray_type = NPY_OBJECT;
    156       break;
    157     case TF_RESOURCE:
    158       *out_pyarray_type = NPY_VOID;
    159       break;
    160     // TODO(keveman): These should be changed to NPY_VOID, and the type used for
    161     // the resulting numpy array should be the custom struct types that we
    162     // expect for quantized types.
    163     case TF_QINT8:
    164       *out_pyarray_type = NPY_INT8;
    165       break;
    166     case TF_QUINT8:
    167       *out_pyarray_type = NPY_UINT8;
    168       break;
    169     case TF_QINT16:
    170       *out_pyarray_type = NPY_INT16;
    171       break;
    172     case TF_QUINT16:
    173       *out_pyarray_type = NPY_UINT16;
    174       break;
    175     case TF_QINT32:
    176       *out_pyarray_type = NPY_INT32;
    177       break;
    178     case TF_BFLOAT16:
    179       *out_pyarray_type = Bfloat16NumpyType();
    180       break;
    181     default:
    182       return errors::Internal("Tensorflow type ", tf_datatype,
    183                               " not convertible to numpy dtype.");
    184   }
    185   return Status::OK();
    186 }
    187 
    188 Status ArrayFromMemory(int dim_size, npy_intp* dims, void* data, DataType dtype,
    189                        std::function<void()> destructor, PyObject** result) {
    190   int size = 1;
    191   for (int i = 0; i < dim_size; ++i) {
    192     size *= dims[i];
    193   }
    194   if (dtype == DT_STRING || dtype == DT_RESOURCE || size == 0) {
    195     return errors::FailedPrecondition(
    196         "Cannot convert strings, resources, or empty Tensors.");
    197   }
    198 
    199   int type_num = -1;
    200   Status s =
    201       TF_DataType_to_PyArray_TYPE(static_cast<TF_DataType>(dtype), &type_num);
    202   if (!s.ok()) {
    203     return s;
    204   }
    205 
    206   PyObject* np_array =
    207       PyArray_SimpleNewFromData(dim_size, dims, type_num, data);
    208   if (PyType_Ready(&TensorReleaserType) == -1) {
    209     return errors::Unknown("Python type initialization failed.");
    210   }
    211   TensorReleaser* releaser = reinterpret_cast<TensorReleaser*>(
    212       TensorReleaserType.tp_alloc(&TensorReleaserType, 0));
    213   releaser->destructor = new std::function<void()>(std::move(destructor));
    214   if (PyArray_SetBaseObject(reinterpret_cast<PyArrayObject*>(np_array),
    215                             reinterpret_cast<PyObject*>(releaser)) == -1) {
    216     Py_DECREF(releaser);
    217     return errors::Unknown("Python array refused to use memory.");
    218   }
    219   *result = PyArray_Return(reinterpret_cast<PyArrayObject*>(np_array));
    220   return Status::OK();
    221 }
    222 
    223 }  // namespace tensorflow
    224