https://github.com/hughperkins/tensorflow-cl
Revision a81ef448d4733c27d1bfc85b9099d264bc773b4a authored by Hugh Perkins on 08 June 2017, 10:05:13 UTC, committed by Hugh Perkins on 08 June 2017, 10:05:13 UTC
1 parent 15a53a0
Raw File
Tip revision: a81ef448d4733c27d1bfc85b9099d264bc773b4a authored by Hugh Perkins on 08 June 2017, 10:05:13 UTC
update runconfigure
Tip revision: a81ef44
configure
#!/usr/bin/env bash

set -e
set -o pipefail

ln -sf util/BUILD.d/* .
cp -f tensorflow/core/platform/default/build_config.bzl.templ tensorflow/core/platform/default/build_config.bzl

# Find out the absolute path to where ./configure resides
pushd `dirname $0` #> /dev/null
SOURCE_BASE_DIR=`pwd -P`
popd > /dev/null

function bazel_clean_and_fetch() {
  bazel clean --expunge
  # bazel fetch //tensorflow/...
  # from https://github.com/tensorflow/tensorflow/pull/6225/files
  bazel fetch $(bazel query "//tensorflow/... -//tensorflow/examples/android/...")
}

## Set up python-related environment settings
while true; do
  fromuser=""
  if [ -z "$PYTHON_BIN_PATH" ]; then
    default_python_bin_path=$(which python || which python3  || true)
    read -p "Please specify the location of python. [Default is $default_python_bin_path]: " PYTHON_BIN_PATH
    fromuser="1"
    if [ -z "$PYTHON_BIN_PATH" ]; then
      PYTHON_BIN_PATH=$default_python_bin_path
    fi
  fi
  if [ -e "$PYTHON_BIN_PATH" ]; then
    break
  fi
  echo "Invalid python path. ${PYTHON_BIN_PATH} cannot be found" 1>&2
  if [ -z "$fromuser" ]; then
    exit 1
  fi
  PYTHON_BIN_PATH=""
  # Retry
done

while [ "$TF_NEED_GCP" == "" ]; do
  read -p "Do you wish to build TensorFlow with "\
"Google Cloud Platform support? [y/N] " INPUT
  case $INPUT in
    [Yy]* ) echo "Google Cloud Platform support will be enabled for "\
"TensorFlow"; TF_NEED_GCP=1;;
    [Nn]* ) echo "No Google Cloud Platform support will be enabled for "\
"TensorFlow"; TF_NEED_GCP=0;;
    "" ) echo "No Google Cloud Platform support will be enabled for "\
"TensorFlow"; TF_NEED_GCP=0;;
    * ) echo "Invalid selection: " $INPUT;;
  esac
done

if [ "$TF_NEED_GCP" == "1" ]; then
  ## Verify that libcurl header files are available.
  # Only check Linux, since on MacOS the header files are installed with XCode.
  if [[ $(uname -a) =~ Linux ]] && [[ ! -f "/usr/include/curl/curl.h" ]]; then
    echo "ERROR: It appears that the development version of libcurl is not "\
"available. Please install the libcurl3-dev package."
    exit 1
  fi

  # Update Bazel build configuration.
  perl -pi -e "s,WITH_GCP_SUPPORT = (False|True),WITH_GCP_SUPPORT = True,s" tensorflow/core/platform/default/build_config.bzl
else
  # Update Bazel build configuration.
  perl -pi -e "s,WITH_GCP_SUPPORT = (False|True),WITH_GCP_SUPPORT = False,s" tensorflow/core/platform/default/build_config.bzl
fi

while [ "$TF_NEED_HDFS" == "" ]; do
  read -p "Do you wish to build TensorFlow with "\
"Hadoop File System support? [y/N] " INPUT
  case $INPUT in
    [Yy]* ) echo "Hadoop File System support will be enabled for "\
"TensorFlow"; TF_NEED_HDFS=1;;
    [Nn]* ) echo "No Hadoop File System support will be enabled for "\
"TensorFlow"; TF_NEED_HDFS=0;;
    "" ) echo "No Hadoop File System support will be enabled for "\
"TensorFlow"; TF_NEED_HDFS=0;;
    * ) echo "Invalid selection: " $INPUT;;
  esac
done

if [ "$TF_NEED_HDFS" == "1" ]; then
  # Update Bazel build configuration.
  perl -pi -e "s,WITH_HDFS_SUPPORT = (False|True),WITH_HDFS_SUPPORT = True,s" tensorflow/core/platform/default/build_config.bzl
else
  # Update Bazel build configuration.
  perl -pi -e "s,WITH_HDFS_SUPPORT = (False|True),WITH_HDFS_SUPPORT = False,s" tensorflow/core/platform/default/build_config.bzl
fi

## Find swig path
if [ -z "$SWIG_PATH" ]; then
  SWIG_PATH=`type -p swig 2> /dev/null`
fi
if [[ ! -e "$SWIG_PATH" ]]; then
  echo "Can't find swig.  Ensure swig is in \$PATH or set \$SWIG_PATH."
  exit 1
fi
echo "$SWIG_PATH" > tensorflow/tools/swig/swig_path

# Invoke python_config and set up symlinks to python includes
./util/python/python_config.sh --setup "$PYTHON_BIN_PATH"

# Run the gen_git_source to create links where bazel can track dependencies for
# git hash propagation
GEN_GIT_SOURCE=tensorflow/tools/git/gen_git_source.py
chmod a+x ${GEN_GIT_SOURCE}
${PYTHON_BIN_PATH} ${GEN_GIT_SOURCE} --configure ${SOURCE_BASE_DIR}

## Set up Cuda-related environment settings

while [ "$TF_NEED_CUDA" == "" ]; do
  read -p "Do you wish to build TensorFlow with GPU support? [y/N] " INPUT
  case $INPUT in
    [Yy]* ) echo "GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=1;;
    [Nn]* ) echo "No GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
    "" ) echo "No GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
    * ) echo "Invalid selection: " $INPUT;;
  esac
done

# set so suffix to .dylib or .so, depending on Mac or not
echo checking operating system
OSNAME=`uname -s`
if [[ $OSNAME == Darwin ]]; then
  perl -pi -e "s,SO_SUFFIX = \".(so|dylib)\",SO_SUFFIX = \".dylib\",s" tensorflow/core/platform/default/build_config.bzl
fi

export TF_NEED_CUDA
if [ "$TF_NEED_CUDA" == "0" ]; then
  echo "Configuration finished"
  bazel_clean_and_fetch
  exit
fi

# Set up which gcc nvcc should use as the host compiler
while true; do
  fromuser=""
  if [ -z "$GCC_HOST_COMPILER_PATH" ]; then
    default_gcc_host_compiler_path=$(which gcc || true)
    read -p "Please specify which gcc should be used by nvcc as the host compiler. [Default is $default_gcc_host_compiler_path]: " GCC_HOST_COMPILER_PATH
    fromuser="1"
    if [ -z "$GCC_HOST_COMPILER_PATH" ]; then
      GCC_HOST_COMPILER_PATH=$default_gcc_host_compiler_path
    fi
  fi
  if [ -e "$GCC_HOST_COMPILER_PATH" ]; then
    export GCC_HOST_COMPILER_PATH
    break
  fi
  echo "Invalid gcc path. ${GCC_HOST_COMPILER_PATH} cannot be found" 1>&2
  if [ -z "$fromuser" ]; then
    exit 1
  fi
  GCC_HOST_COMPILER_PATH=""
  # Retry
done

while true; do
  # Configure the Cuda SDK version to use.
  if [ -z "$TF_CUDA_VERSION" ]; then
    read -p "Please specify the Cuda SDK version you want to use, e.g. 7.0. [Leave empty to use system default]: " TF_CUDA_VERSION
  fi

  fromuser=""
  if [ -z "$CUDA_TOOLKIT_PATH" ]; then
    default_cuda_path=/usr/local/cuda
    read -p "Please specify the location where CUDA $TF_CUDA_VERSION toolkit is installed. Refer to README.md for more details. [Default is $default_cuda_path]: " CUDA_TOOLKIT_PATH
    fromuser="1"
    if [ -z "$CUDA_TOOLKIT_PATH" ]; then
      CUDA_TOOLKIT_PATH=$default_cuda_path
    fi
  fi

  if [[ -z "$TF_CUDA_VERSION" ]]; then
    TF_CUDA_EXT=""
  else
    TF_CUDA_EXT=".$TF_CUDA_VERSION"
  fi

  if [ "$OSNAME" == "Linux" ]; then
    CUDA_RT_LIB_PATH="lib64/libcudart.so${TF_CUDA_EXT}"
  elif [ "$OSNAME" == "Darwin" ]; then
    CUDA_RT_LIB_PATH="lib/libcudart${TF_CUDA_EXT}.dylib"
  fi

  if [ -e "${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH}" ]; then
    export CUDA_TOOLKIT_PATH
    export TF_CUDA_VERSION
    break
  fi
  echo "Invalid path to CUDA $TF_CUDA_VERSION toolkit. ${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH} cannot be found"

  if [ -z "$fromuser" ]; then
    exit 1
  fi
  # Retry
  TF_CUDA_VERSION=""
  CUDA_TOOLKIT_PATH=""
done

# Find out where the cuDNN library is installed
while true; do
  # Configure the Cudnn version to use.
  if [ -z "$TF_CUDNN_VERSION" ]; then
    read -p "Please specify the Cudnn version you want to use. [Leave empty to use system default]: " TF_CUDNN_VERSION
  fi

  fromuser=""
  if [ -z "$CUDNN_INSTALL_PATH" ]; then
    default_cudnn_path=${CUDA_TOOLKIT_PATH}
    read -p "Please specify the location where cuDNN $TF_CUDNN_VERSION library is installed. Refer to README.md for more details. [Default is $default_cudnn_path]: " CUDNN_INSTALL_PATH
    fromuser="1"
    if [ -z "$CUDNN_INSTALL_PATH" ]; then
      CUDNN_INSTALL_PATH=$default_cudnn_path
    fi
    # Result returned from "read" will be used unexpanded. That make "~" unuseable.
    # Going through one more level of expansion to handle that.
    CUDNN_INSTALL_PATH=`${PYTHON_BIN_PATH} -c "import os; print(os.path.realpath(os.path.expanduser('${CUDNN_INSTALL_PATH}')))"`
  fi

  if [[ -z "$TF_CUDNN_VERSION" ]]; then
    TF_CUDNN_EXT=""
    cudnn_lib_path=""
    cudnn_alt_lib_path=""
    if [ "$OSNAME" == "Linux" ]; then
      cudnn_lib_path="${CUDNN_INSTALL_PATH}/lib64/libcudnn.so"
      cudnn_alt_lib_path="${CUDNN_INSTALL_PATH}/libcudnn.so"
    elif [ "$OSNAME" == "Darwin" ]; then
      cudnn_lib_path="${CUDNN_INSTALL_PATH}/lib/libcudnn.dylib"
      cudnn_alt_lib_path="${CUDNN_INSTALL_PATH}/libcudnn.dylib"
    fi
    # Resolve to the SONAME of the symlink.  Use readlink without -f
    # to resolve exactly once to the SONAME.  E.g, libcudnn.so ->
    # libcudnn.so.4.
    # If the path is not a symlink, readlink will exit with an error code, so
    # in that case, we return the path itself.
    if [ -f "$cudnn_lib_path" ]; then
      REALVAL=`readlink ${cudnn_lib_path} || echo "${cudnn_lib_path}"`
    else
      REALVAL=`readlink ${cudnn_alt_lib_path} || echo "${cudnn_alt_lib_path}"`
    fi

    # Extract the version of the SONAME, if it was indeed symlinked to
    # the SONAME version of the file.
    if [[ "$REALVAL" =~ .so[.]+([0-9]*) ]]; then
      TF_CUDNN_EXT="."${BASH_REMATCH[1]}
      TF_CUDNN_VERSION=${BASH_REMATCH[1]}
      echo "libcudnn.so resolves to libcudnn${TF_CUDNN_EXT}"
    elif [[ "$REALVAL" =~ ([0-9]*).dylib ]]; then
      TF_CUDNN_EXT=${BASH_REMATCH[1]}".dylib"
      TF_CUDNN_VERSION=${BASH_REMATCH[1]}
      echo "libcudnn.dylib resolves to libcudnn${TF_CUDNN_EXT}"
    fi
  else
    TF_CUDNN_EXT=".$TF_CUDNN_VERSION"
  fi

  if [ "$OSNAME" == "Linux" ]; then
    CUDA_DNN_LIB_PATH="lib64/libcudnn.so${TF_CUDNN_EXT}"
    CUDA_DNN_LIB_ALT_PATH="libcudnn.so${TF_CUDNN_EXT}"
  elif [ "$OSNAME" == "Darwin" ]; then
    CUDA_DNN_LIB_PATH="lib/libcudnn${TF_CUDNN_EXT}"
    CUDA_DNN_LIB_ALT_PATH="libcudnn${TF_CUDNN_EXT}"
  fi

  if [ -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_ALT_PATH}" -o -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_PATH}" ]; then
    export TF_CUDNN_VERSION
    export CUDNN_INSTALL_PATH
    break
  fi

  if [ "$OSNAME" == "Linux" ]; then
    CUDNN_PATH_FROM_LDCONFIG="$(ldconfig -p | sed -n 's/.*libcudnn.so .* => \(.*\)/\1/p')"
    if [ -e "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}" ]; then
      export TF_CUDNN_VERSION
      export CUDNN_INSTALL_PATH="$(dirname ${CUDNN_PATH_FROM_LDCONFIG})"
      break
    fi
  fi
  echo "Invalid path to cuDNN ${CUDNN_VERSION} toolkit. Neither of the following two files can be found:"
  echo "${CUDNN_INSTALL_PATH}/${CUDA_DNN_LIB_PATH}"
  echo "${CUDNN_INSTALL_PATH}/${CUDA_DNN_LIB_ALT_PATH}"
  if [ "$OSNAME" == "Linux" ]; then
    echo "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}"
  fi

  if [ -z "$fromuser" ]; then
    exit 1
  fi
  # Retry
  TF_CUDNN_VERSION=""
  CUDNN_INSTALL_PATH=""
done

# Configure the compute capabilities that TensorFlow builds for.
# Since Cuda toolkit is not backward-compatible, this is not guaranteed to work.
while true; do
  fromuser=""
  default_cuda_compute_capabilities="3.5,5.2"
  if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
cat << EOF
Please specify a list of comma-separated Cuda compute capabilities you want to build with.
You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
Please note that each additional compute capability significantly increases your build time and binary size.
EOF
    read -p "[Default is: \"3.5,5.2\"]: " TF_CUDA_COMPUTE_CAPABILITIES
    fromuser=1
  fi
  if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
    TF_CUDA_COMPUTE_CAPABILITIES=$default_cuda_compute_capabilities
  fi
  # Check whether all capabilities from the input is valid
  COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES//,/ }
  ALL_VALID=1
  for CAPABILITY in $COMPUTE_CAPABILITIES; do
    if [[ ! "$CAPABILITY" =~ [0-9]+.[0-9]+ ]]; then
      echo "Invalid compute capability: " $CAPABILITY
      ALL_VALID=0
      break
    fi
  done
  if [ "$ALL_VALID" == "0" ]; then
    if [ -z "$fromuser" ]; then
      exit 1
    fi
  else
    export TF_CUDA_COMPUTE_CAPABILITIES
    break
  fi
  TF_CUDA_COMPUTE_CAPABILITIES=""
done

bazel_clean_and_fetch

echo "Configuration finished"
back to top