From 45dd800ffd0fb9cd4c886b447e56f9c6828254fe Mon Sep 17 00:00:00 2001 From: moneta <lorenzo.moneta@cern.ch> Date: Mon, 30 Apr 2018 15:59:40 +0200 Subject: [PATCH] Fix the configuration to find Blas and decide either to build CPU or GPU architecture. Define this in RCondifure.h --- cmake/modules/RootBuildOptions.cmake | 2 ++ cmake/modules/RootConfiguration.cmake | 11 +++++++++++ cmake/modules/SearchInstalledSoftware.cmake | 8 ++++++++ config/RConfigure.in | 2 ++ test/stressTMVA.cxx | 4 ++-- tmva/tmva/CMakeLists.txt | 4 ++-- tmva/tmva/inc/TMVA/MethodDL.h | 16 +++++++++------- tmva/tmva/inc/TMVA/MethodDNN.h | 7 +++++++ 8 files changed, 43 insertions(+), 11 deletions(-) diff --git a/cmake/modules/RootBuildOptions.cmake b/cmake/modules/RootBuildOptions.cmake index 5c6b1d9f544..4c11c977357 100644 --- a/cmake/modules/RootBuildOptions.cmake +++ b/cmake/modules/RootBuildOptions.cmake @@ -167,6 +167,8 @@ ROOT_BUILD_OPTION(table OFF "Build libTable contrib library") ROOT_BUILD_OPTION(tcmalloc OFF "Using the tcmalloc allocator") ROOT_BUILD_OPTION(thread ON "Using thread library (cannot be disabled)") ROOT_BUILD_OPTION(tmva ON "Build TMVA multi variate analysis library") +ROOT_BUILD_OPTION(tmva-cpu ON "Build TMVA with CPU support for deep learning. Requires BLAS") +ROOT_BUILD_OPTION(tmva-gpu ON "Build TMVA with GPU support for deep learning. Requries CUDA") ROOT_BUILD_OPTION(unuran OFF "UNURAN - package for generating non-uniform random numbers") ROOT_BUILD_OPTION(vc OFF "Vc adds a few new types for portable and intuitive SIMD programming") ROOT_BUILD_OPTION(vdt OFF "VDT adds a set of fast and vectorisable mathematical functions") diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake index be460e3fe01..2cf5f2aa157 100644 --- a/cmake/modules/RootConfiguration.cmake +++ b/cmake/modules/RootConfiguration.cmake @@ -565,6 +565,17 @@ if(qt5web) else() set(hasqt5webengine undef) endif() +if (tmva AND imt AND BLAS_FOUND) + set(hastmvacpu define) +else() + set(hastmvacpu undef) +endif() +if (tmva AND CUDA_FOUND) + set(hastmvagpu define) +else() + set(hastmvagpu undef) +endif() + CHECK_CXX_SOURCE_COMPILES("#include <string_view> int main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}" found_stdstringview) diff --git a/cmake/modules/SearchInstalledSoftware.cmake b/cmake/modules/SearchInstalledSoftware.cmake index 97e93c26cf1..3722621f42c 100644 --- a/cmake/modules/SearchInstalledSoftware.cmake +++ b/cmake/modules/SearchInstalledSoftware.cmake @@ -1513,6 +1513,7 @@ if(tmva AND cuda) message(STATUS "CUDA not found. Ensure that the installation of CUDA is in the CMAKE_PREFIX_PATH") message(STATUS " For the time being switching OFF 'cuda' option") set(cuda OFF CACHE BOOL "" FORCE) + set(tmva-cuda OFF CACHE BOOL "" FORCE) endif() endif() endif() @@ -1522,6 +1523,13 @@ if(tmva AND imt) find_package(BLAS) endif() +if(NOT BLAS_FOUND) + set(tmva-cpu OFF CACHE BOOL "" FORCE) +endif() +if(NOT CUDA_FOUND) + set(tmva-gpu OFF CACHE BOOL "" FORCE) +endif() + #---Download googletest-------------------------------------------------------------- if (testing) diff --git a/config/RConfigure.in b/config/RConfigure.in index e028d0c8b53..32a94d0011d 100644 --- a/config/RConfigure.in +++ b/config/RConfigure.in @@ -55,5 +55,7 @@ #@usezlib@ R__HAS_DEFAULT_ZLIB /**/ #@uselzma@ R__HAS_DEFAULT_LZMA /**/ +#@hastmvacpu@ R__HAS_TMVACPU /**/ +#@hastmvagpu@ R__HAS_TMVAGPU /**/ #endif diff --git a/test/stressTMVA.cxx b/test/stressTMVA.cxx index 8f004f91a7f..ee1336c41a7 100644 --- a/test/stressTMVA.cxx +++ b/test/stressTMVA.cxx @@ -3049,12 +3049,12 @@ void addClassificationTests( UnitTestSuite& TMVA_test, bool full=true) TString configCpu = "Architecture=CPU:" + config; TString configGpu = "Architecture=GPU:" + config; -#ifdef DNNCPU +#ifdef R__HAS_TMVACPU TMVA_test.addTest(new MethodUnitTestWithROCLimits( TMVA::Types::kDNN, "DNN CPU", configCpu, 0.85, 0.98) ); #endif -#ifdef DNNCUDA +#ifdef R__HAS_TMVAGPU TMVA_test.addTest(new MethodUnitTestWithROCLimits( TMVA::Types::kDNN, "DNN GPU", configGpu, 0.85, 0.98) ); diff --git a/tmva/tmva/CMakeLists.txt b/tmva/tmva/CMakeLists.txt index 797284b7f86..6094b35acf3 100644 --- a/tmva/tmva/CMakeLists.txt +++ b/tmva/tmva/CMakeLists.txt @@ -73,14 +73,14 @@ endif() #---Handle BLAS dependent code. ----------------- if(BLAS_FOUND AND imt) message(STATUS "Using TMVA-DNN with BLAS installation") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNNCPU") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") set(DNN_CPU_LIBRARIES MathCore Matrix ${BLAS_LINKER_FLAGS} ${BLAS_LIBRARIES} ${TBB_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) include_directories(SYSTEM ${TBB_INCLUDE_DIRS}) else() if (mathmore AND imt) #use GSL cblas installation message(STATUS "Using TMVA-DNN with gslcblas installation") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNNCPU -DDNN_USE_CBLAS") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNN_USE_CBLAS") set(DNN_CPU_LIBRARIES MathCore Matrix ${TBB_LIBRARIES} ${GSL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) include_directories(SYSTEM ${TBB_INCLUDE_DIRS} ${GSL_INCLUDE_DIR} ) else() diff --git a/tmva/tmva/inc/TMVA/MethodDL.h b/tmva/tmva/inc/TMVA/MethodDL.h index ffe98746a24..c5e24bf586d 100644 --- a/tmva/tmva/inc/TMVA/MethodDL.h +++ b/tmva/tmva/inc/TMVA/MethodDL.h @@ -43,11 +43,11 @@ #include "TMVA/DNN/Architectures/Reference.h" -#ifdef DNNCPU +#ifdef R__HAS_TMVACPU #include "TMVA/DNN/Architectures/Cpu.h" #endif -#ifdef DNNCUDA +#ifdef R__HAS_TMVACUDA #include "TMVA/DNN/Architectures/Cuda.h" #endif @@ -77,13 +77,13 @@ class MethodDL : public MethodBase { private: // Key-Value vector type, contining the values for the training options using KeyValueVector_t = std::vector<std::map<TString, TString>>; -#ifdef DNNCPU - using ArchitectureCpu_t = TMVA::DNN::TCpu<Double_t>; +#ifdef R__HAS_TMVACPU + using ArchitectureImpl_t = TMVA::DNN::TCpu<Double_t>; #else - using ArchitectureCpu_t = TMVA::DNN::TReference<Double_t>; + using ArchitectureImpl_t = TMVA::DNN::TReference<Double_t>; #endif - using DeepNetCpu_t = TMVA::DNN::TDeepNet<ArchitectureCpu_t>; - std::unique_ptr<DeepNetCpu_t> fNet; + using DeepNetImpl_t = TMVA::DNN::TDeepNet<ArchitectureImpl_t>; + std::unique_ptr<DeepNetImpl_t> fNet; /*! The option handling methods */ void DeclareOptions(); @@ -199,6 +199,8 @@ public: size_t GetBatchHeight() const { return fBatchHeight; } size_t GetBatchWidth() const { return fBatchWidth; } + const DeepNetImpl_t & GetDeepNet() const { return *fNet; } + DNN::EInitialization GetWeightInitialization() const { return fWeightInitialization; } DNN::EOutputFunction GetOutputFunction() const { return fOutputFunction; } DNN::ELossFunction GetLossFunction() const { return fLossFunction; } diff --git a/tmva/tmva/inc/TMVA/MethodDNN.h b/tmva/tmva/inc/TMVA/MethodDNN.h index 46e7007a54a..ffe39f792ca 100644 --- a/tmva/tmva/inc/TMVA/MethodDNN.h +++ b/tmva/tmva/inc/TMVA/MethodDNN.h @@ -52,6 +52,13 @@ #include "TMVA/DNN/Minimizers.h" #include "TMVA/DNN/Architectures/Reference.h" +#ifdef R__HAS_TMVACPU +#define DNNCPU +#endif +#ifdef R__HAS_TMVAGPU +#define DNNCUDA +#endif + #ifdef DNNCPU #include "TMVA/DNN/Architectures/Cpu.h" #endif -- GitLab