diff --git a/cmake/modules/RootBuildOptions.cmake b/cmake/modules/RootBuildOptions.cmake index 5c6b1d9f54477bccda641cda502147ecb824aa51..4c11c977357101637513b778a471f52c86c98e75 100644 --- a/cmake/modules/RootBuildOptions.cmake +++ b/cmake/modules/RootBuildOptions.cmake @@ -167,6 +167,8 @@ ROOT_BUILD_OPTION(table OFF "Build libTable contrib library") ROOT_BUILD_OPTION(tcmalloc OFF "Using the tcmalloc allocator") ROOT_BUILD_OPTION(thread ON "Using thread library (cannot be disabled)") ROOT_BUILD_OPTION(tmva ON "Build TMVA multi variate analysis library") +ROOT_BUILD_OPTION(tmva-cpu ON "Build TMVA with CPU support for deep learning. Requires BLAS") +ROOT_BUILD_OPTION(tmva-gpu ON "Build TMVA with GPU support for deep learning. Requries CUDA") ROOT_BUILD_OPTION(unuran OFF "UNURAN - package for generating non-uniform random numbers") ROOT_BUILD_OPTION(vc OFF "Vc adds a few new types for portable and intuitive SIMD programming") ROOT_BUILD_OPTION(vdt OFF "VDT adds a set of fast and vectorisable mathematical functions") diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake index be460e3fe0109271f5959bb0472e2f8b6a00dce7..2cf5f2aa157fd1b98150dafa9f6c77658161825c 100644 --- a/cmake/modules/RootConfiguration.cmake +++ b/cmake/modules/RootConfiguration.cmake @@ -565,6 +565,17 @@ if(qt5web) else() set(hasqt5webengine undef) endif() +if (tmva AND imt AND BLAS_FOUND) + set(hastmvacpu define) +else() + set(hastmvacpu undef) +endif() +if (tmva AND CUDA_FOUND) + set(hastmvagpu define) +else() + set(hastmvagpu undef) +endif() + CHECK_CXX_SOURCE_COMPILES("#include <string_view> int main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}" found_stdstringview) diff --git a/cmake/modules/SearchInstalledSoftware.cmake b/cmake/modules/SearchInstalledSoftware.cmake index 97e93c26cf1cd08c80ffc37b99816b3a9c7fa5d7..3722621f42ceaf67b61274930a653e78ef0a235f 100644 --- a/cmake/modules/SearchInstalledSoftware.cmake +++ b/cmake/modules/SearchInstalledSoftware.cmake @@ -1513,6 +1513,7 @@ if(tmva AND cuda) message(STATUS "CUDA not found. Ensure that the installation of CUDA is in the CMAKE_PREFIX_PATH") message(STATUS " For the time being switching OFF 'cuda' option") set(cuda OFF CACHE BOOL "" FORCE) + set(tmva-cuda OFF CACHE BOOL "" FORCE) endif() endif() endif() @@ -1522,6 +1523,13 @@ if(tmva AND imt) find_package(BLAS) endif() +if(NOT BLAS_FOUND) + set(tmva-cpu OFF CACHE BOOL "" FORCE) +endif() +if(NOT CUDA_FOUND) + set(tmva-gpu OFF CACHE BOOL "" FORCE) +endif() + #---Download googletest-------------------------------------------------------------- if (testing) diff --git a/config/RConfigure.in b/config/RConfigure.in index e028d0c8b53cec1d58c66a9b300e902392816769..32a94d0011d0fc6f547bd6b65f2ad1ed8dbc5e6d 100644 --- a/config/RConfigure.in +++ b/config/RConfigure.in @@ -55,5 +55,7 @@ #@usezlib@ R__HAS_DEFAULT_ZLIB /**/ #@uselzma@ R__HAS_DEFAULT_LZMA /**/ +#@hastmvacpu@ R__HAS_TMVACPU /**/ +#@hastmvagpu@ R__HAS_TMVAGPU /**/ #endif diff --git a/test/stressTMVA.cxx b/test/stressTMVA.cxx index 8f004f91a7f95e427f53432b7efa5e74aee7ebf4..ee1336c41a7820b00580ea5886ebfff5216c40ff 100644 --- a/test/stressTMVA.cxx +++ b/test/stressTMVA.cxx @@ -3049,12 +3049,12 @@ void addClassificationTests( UnitTestSuite& TMVA_test, bool full=true) TString configCpu = "Architecture=CPU:" + config; TString configGpu = "Architecture=GPU:" + config; -#ifdef DNNCPU +#ifdef R__HAS_TMVACPU TMVA_test.addTest(new MethodUnitTestWithROCLimits( TMVA::Types::kDNN, "DNN CPU", configCpu, 0.85, 0.98) ); #endif -#ifdef DNNCUDA +#ifdef R__HAS_TMVAGPU TMVA_test.addTest(new MethodUnitTestWithROCLimits( TMVA::Types::kDNN, "DNN GPU", configGpu, 0.85, 0.98) ); diff --git a/tmva/tmva/CMakeLists.txt b/tmva/tmva/CMakeLists.txt index 797284b7f862b6b671a32197c7a46245f7b32045..6094b35acf3285970b9302710e25b1b18a234806 100644 --- a/tmva/tmva/CMakeLists.txt +++ b/tmva/tmva/CMakeLists.txt @@ -73,14 +73,14 @@ endif() #---Handle BLAS dependent code. ----------------- if(BLAS_FOUND AND imt) message(STATUS "Using TMVA-DNN with BLAS installation") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNNCPU") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") set(DNN_CPU_LIBRARIES MathCore Matrix ${BLAS_LINKER_FLAGS} ${BLAS_LIBRARIES} ${TBB_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) include_directories(SYSTEM ${TBB_INCLUDE_DIRS}) else() if (mathmore AND imt) #use GSL cblas installation message(STATUS "Using TMVA-DNN with gslcblas installation") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNNCPU -DDNN_USE_CBLAS") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNN_USE_CBLAS") set(DNN_CPU_LIBRARIES MathCore Matrix ${TBB_LIBRARIES} ${GSL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) include_directories(SYSTEM ${TBB_INCLUDE_DIRS} ${GSL_INCLUDE_DIR} ) else() diff --git a/tmva/tmva/inc/TMVA/MethodDL.h b/tmva/tmva/inc/TMVA/MethodDL.h index ffe98746a2498fe652dd5be1bbd86adbd87385bd..c5e24bf586df50b4076e33e58101250ec02761b1 100644 --- a/tmva/tmva/inc/TMVA/MethodDL.h +++ b/tmva/tmva/inc/TMVA/MethodDL.h @@ -43,11 +43,11 @@ #include "TMVA/DNN/Architectures/Reference.h" -#ifdef DNNCPU +#ifdef R__HAS_TMVACPU #include "TMVA/DNN/Architectures/Cpu.h" #endif -#ifdef DNNCUDA +#ifdef R__HAS_TMVACUDA #include "TMVA/DNN/Architectures/Cuda.h" #endif @@ -77,13 +77,13 @@ class MethodDL : public MethodBase { private: // Key-Value vector type, contining the values for the training options using KeyValueVector_t = std::vector<std::map<TString, TString>>; -#ifdef DNNCPU - using ArchitectureCpu_t = TMVA::DNN::TCpu<Double_t>; +#ifdef R__HAS_TMVACPU + using ArchitectureImpl_t = TMVA::DNN::TCpu<Double_t>; #else - using ArchitectureCpu_t = TMVA::DNN::TReference<Double_t>; + using ArchitectureImpl_t = TMVA::DNN::TReference<Double_t>; #endif - using DeepNetCpu_t = TMVA::DNN::TDeepNet<ArchitectureCpu_t>; - std::unique_ptr<DeepNetCpu_t> fNet; + using DeepNetImpl_t = TMVA::DNN::TDeepNet<ArchitectureImpl_t>; + std::unique_ptr<DeepNetImpl_t> fNet; /*! The option handling methods */ void DeclareOptions(); @@ -199,6 +199,8 @@ public: size_t GetBatchHeight() const { return fBatchHeight; } size_t GetBatchWidth() const { return fBatchWidth; } + const DeepNetImpl_t & GetDeepNet() const { return *fNet; } + DNN::EInitialization GetWeightInitialization() const { return fWeightInitialization; } DNN::EOutputFunction GetOutputFunction() const { return fOutputFunction; } DNN::ELossFunction GetLossFunction() const { return fLossFunction; } diff --git a/tmva/tmva/inc/TMVA/MethodDNN.h b/tmva/tmva/inc/TMVA/MethodDNN.h index 46e7007a54a45b058fef817530f011849cc5e518..ffe39f792ca5b48ba759acf42c2f5776c76f7703 100644 --- a/tmva/tmva/inc/TMVA/MethodDNN.h +++ b/tmva/tmva/inc/TMVA/MethodDNN.h @@ -52,6 +52,13 @@ #include "TMVA/DNN/Minimizers.h" #include "TMVA/DNN/Architectures/Reference.h" +#ifdef R__HAS_TMVACPU +#define DNNCPU +#endif +#ifdef R__HAS_TMVAGPU +#define DNNCUDA +#endif + #ifdef DNNCPU #include "TMVA/DNN/Architectures/Cpu.h" #endif