github.com/johnnyeven/libtools@v0.0.0-20191126065708-61829c1adf46/third_party/nccl/nccl_configure.bzl (about)

     1  # -*- Python -*-
     2  """Repository rule for NCCL configuration.
     3  
     4  `nccl_configure` depends on the following environment variables:
     5  
     6    * `TF_NCCL_VERSION`: Installed NCCL version or empty to build from source.
     7    * `NCCL_INSTALL_PATH` (deprecated): The installation path of the NCCL library.
     8    * `NCCL_HDR_PATH` (deprecated): The installation path of the NCCL header 
     9      files.
    10    * `TF_CUDA_PATHS`: The base paths to look for CUDA and cuDNN. Default is
    11      `/usr/local/cuda,usr/`.
    12  
    13  """
    14  
    15  load(
    16      "//third_party/gpus:cuda_configure.bzl",
    17      "compute_capabilities",
    18      "enable_cuda",
    19      "find_cuda_config",
    20      "get_cpu_value",
    21  )
    22  
    23  _CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH"
    24  _NCCL_HDR_PATH = "NCCL_HDR_PATH"
    25  _NCCL_INSTALL_PATH = "NCCL_INSTALL_PATH"
    26  _TF_CUDA_COMPUTE_CAPABILITIES = "TF_CUDA_COMPUTE_CAPABILITIES"
    27  _TF_NCCL_VERSION = "TF_NCCL_VERSION"
    28  _TF_NEED_CUDA = "TF_NEED_CUDA"
    29  
    30  _DEFINE_NCCL_MAJOR = "#define NCCL_MAJOR"
    31  _DEFINE_NCCL_MINOR = "#define NCCL_MINOR"
    32  _DEFINE_NCCL_PATCH = "#define NCCL_PATCH"
    33  
    34  _NCCL_DUMMY_BUILD_CONTENT = """
    35  filegroup(
    36    name = "LICENSE",
    37    visibility = ["//visibility:public"],
    38  )
    39  
    40  cc_library(
    41    name = "nccl",
    42    visibility = ["//visibility:public"],
    43  )
    44  """
    45  
    46  _NCCL_ARCHIVE_BUILD_CONTENT = """
    47  filegroup(
    48    name = "LICENSE",
    49    data = ["@nccl_archive//:LICENSE.txt"],
    50    visibility = ["//visibility:public"],
    51  )
    52  
    53  alias(
    54    name = "nccl",
    55    actual = "@nccl_archive//:nccl",
    56    visibility = ["//visibility:public"],
    57  )
    58  """
    59  
    60  def _label(file):
    61      return Label("//third_party/nccl:{}".format(file))
    62  
    63  def _nccl_configure_impl(repository_ctx):
    64      """Implementation of the nccl_configure repository rule."""
    65      if (not enable_cuda(repository_ctx) or
    66          get_cpu_value(repository_ctx) not in ("Linux", "FreeBSD")):
    67          # Add a dummy build file to make bazel query happy.
    68          repository_ctx.file("BUILD", _NCCL_DUMMY_BUILD_CONTENT)
    69          return
    70  
    71      nccl_version = ""
    72      if _TF_NCCL_VERSION in repository_ctx.os.environ:
    73          nccl_version = repository_ctx.os.environ[_TF_NCCL_VERSION].strip()
    74          nccl_version = nccl_version.split(".")[0]
    75  
    76      if nccl_version == "":
    77          # Alias to open source build from @nccl_archive.
    78          repository_ctx.file("BUILD", _NCCL_ARCHIVE_BUILD_CONTENT)
    79  
    80          # TODO(csigg): implement and reuse in cuda_configure.bzl.
    81          gpu_architectures = [
    82              "sm_" + capability.replace(".", "")
    83              for capability in compute_capabilities(repository_ctx)
    84          ]
    85  
    86          # Round-about way to make the list unique.
    87          gpu_architectures = dict(zip(gpu_architectures, gpu_architectures)).keys()
    88          repository_ctx.template("build_defs.bzl", _label("build_defs.bzl.tpl"), {
    89              "%{gpu_architectures}": str(gpu_architectures),
    90          })
    91      else:
    92          # Create target for locally installed NCCL.
    93          config = find_cuda_config(repository_ctx, ["nccl"])
    94          config_wrap = {
    95              "%{nccl_version}": config["nccl_version"],
    96              "%{nccl_header_dir}": config["nccl_include_dir"],
    97              "%{nccl_library_dir}": config["nccl_library_dir"],
    98          }
    99          repository_ctx.template("BUILD", _label("system.BUILD.tpl"), config_wrap)
   100  
   101  nccl_configure = repository_rule(
   102      implementation = _nccl_configure_impl,
   103      environ = [
   104          _CUDA_TOOLKIT_PATH,
   105          _NCCL_HDR_PATH,
   106          _NCCL_INSTALL_PATH,
   107          _TF_NCCL_VERSION,
   108          _TF_CUDA_COMPUTE_CAPABILITIES,
   109          _TF_NEED_CUDA,
   110          "TF_CUDA_PATHS",
   111      ],
   112  )
   113  """Detects and configures the NCCL configuration.
   114  
   115  Add the following to your WORKSPACE FILE:
   116  
   117  ```python
   118  nccl_configure(name = "local_config_nccl")
   119  ```
   120  
   121  Args:
   122    name: A unique name for this workspace rule.
   123  """