Home | History | Annotate | Download | only in service
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_PLATFORM_UTIL_H_
     17 #define TENSORFLOW_COMPILER_XLA_SERVICE_PLATFORM_UTIL_H_
     18 
     19 #include <string>
     20 #include <vector>
     21 
     22 #include "tensorflow/compiler/xla/statusor.h"
     23 #include "tensorflow/compiler/xla/types.h"
     24 #include "tensorflow/core/platform/macros.h"
     25 #include "tensorflow/core/platform/stream_executor_no_cuda.h"
     26 #include "tensorflow/core/platform/types.h"
     27 
     28 namespace xla {
     29 
     30 // Utilities for querying platforms and devices used by XLA.
     31 class PlatformUtil {
     32  public:
     33   // Returns the platforms present on the system and supported by XLA.
     34   //
     35   // Note that, even if a platform is present with zero devices, if we *do* have
     36   // compilation support for it, it will be returned in this sequence.
     37   static StatusOr<std::vector<perftools::gputools::Platform*>>
     38   GetSupportedPlatforms();
     39 
     40   // Convenience function which returns the default supported platform for
     41   // tests. If exactly one supported platform is present, then this platform is
     42   // the default platform. If exactly two platforms are present and one of them
     43   // is the interpreter platform, then the other platform is the default
     44   // platform. Otherwise returns an error.
     45   static StatusOr<perftools::gputools::Platform*> GetDefaultPlatform();
     46 
     47   // Convenience function which returns the sole supported platform. If
     48   // exactly one supported platform is present, then this platform is the
     49   // default platform. Otherwise returns an error.
     50   static StatusOr<perftools::gputools::Platform*> GetSolePlatform();
     51 
     52   // Returns the platform according to the given name. Returns error if there is
     53   // no such platform.
     54   static StatusOr<perftools::gputools::Platform*> GetPlatform(
     55       const string& platform_name);
     56 
     57   // Returns exactly one platform that does not have given name. Returns error
     58   // if there is no such platform, or there are multiple such platforms.
     59   static StatusOr<perftools::gputools::Platform*> GetPlatformExceptFor(
     60       const string& platform_name);
     61 
     62   // Returns a vector of StreamExecutors for the given platform. The vector is
     63   // indexed by device ordinal (device numbering used by StreamExecutor). If an
     64   // element is nullptr, then the device is present by not supported by XLA.
     65   //
     66   // If the platform has no visible devices, a not-found error is returned.
     67   static StatusOr<std::vector<perftools::gputools::StreamExecutor*>>
     68   GetStreamExecutors(perftools::gputools::Platform* platform);
     69 
     70  private:
     71   TF_DISALLOW_COPY_AND_ASSIGN(PlatformUtil);
     72 };
     73 
     74 }  // namespace xla
     75 
     76 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_PLATFORM_UTIL_H_
     77