[arch-commits] Commit in root/trunk (3 files)
Konstantin Gizdov
kgizdov at archlinux.org
Tue Sep 1 08:53:50 UTC 2020
Date: Tuesday, September 1, 2020 @ 08:53:50
Author: kgizdov
Revision: 695267
upgpkg: root 6.22.02-1
Modified:
root/trunk/PKGBUILD
Deleted:
root/trunk/adapt_tmva_to_support_cudnn8.patch
root/trunk/fix_cuda_cxx17.patch
------------------------------------+
PKGBUILD | 12
adapt_tmva_to_support_cudnn8.patch | 1130 -----------------------------------
fix_cuda_cxx17.patch | 417 ------------
3 files changed, 4 insertions(+), 1555 deletions(-)
Modified: PKGBUILD
===================================================================
--- PKGBUILD 2020-09-01 08:14:08 UTC (rev 695266)
+++ PKGBUILD 2020-09-01 08:53:50 UTC (rev 695267)
@@ -6,8 +6,8 @@
pkgbase=root
pkgname=('root' 'root-cuda')
-pkgver=6.22.00
-pkgrel=4
+pkgver=6.22.02
+pkgrel=1
pkgdesc='C++ data analysis framework and interpreter from CERN'
arch=('x86_64')
url='https://root.cern'
@@ -92,10 +92,8 @@
'jupyter_notebook_config.py'
'nbman-for-arch.patch'
'thisroot.fail'
- 'adapt_tmva_to_support_cudnn8.patch'
- 'fix_cuda_cxx17.patch'
)
-sha512sums=('9e3c54bbc146b0abb0a2d960af380255ec59d0b3a11a4a97a2a25cb7ac567b07280c4eb48dddf99c1fa2e692881f6396a842ce125d3a253037e52f719739f01e'
+sha512sums=('0a0ffbcee2ef971bebf32bc38a247ea981f56721314bc2eab69cfe66ae86eafb00568c475b41b2ae7db7ad25f0c8b0953e10f1841316a49cf62fbad664ccee17'
'af8f178fc9df66997d5495b271e38adcd1636aab4c8fc994c6600c2496127829d831250d73d3fc229b02dfe49b9867d0be979beacb959f2f3a05351b8118a4a6'
'1fe6f4aa09d583d33f27cc766f4935510bb7ab6bbb8d4700baa1aaab92ea6c876500b67da1e4f6e0b510aa5616e4e193b860264b86925de85f2d9f558d75d5dc'
'3c81d255a17b902ffac0187af1752847036137e16641a88b17eef0d9c944e6f0d3c954bc93307d6270603f43f6c23f2e04f98dc7a68f9d076dbaa8006a2527d6'
@@ -103,9 +101,7 @@
'324adbff951f5fd60307ce12591be2c6c583021bf4645c4d2043e37d3313cecb841f13997bf23587beac85158333b49359709b385592ec74cd006c37a170290e'
'1c905ee7a3f8f5f3f567d957f9be6b503a8631565d4d9b9bfea5e496ef86865c5a8be1a1f8c7842754029879cf0afd2465249f532a116cc43660aa2e460ae682'
'12814f50b7016bd86d3f91e0e31c052783a0c0fa72b7d6a072d3ae6f86c2437323d585e531235377ebbfdd9cb76abd7da84d9631de821151547f1d4b13417e69'
- 'ff555ac4db568affe139701907f86d919a2206f3e304f69dd317b756ea0904b5934d9364a524060778aa507809ce78448621619bb34039ba34c5a71af71a4a8c'
- '2ae126795df4127c27a6287a1499bdb8b2bacb74cfbec17dabe378a5fb9fc7c755644e4090a4da1d0045bf5d4f542f06da827a0f48a5927ee8509874045f18b6'
- 'b437f3b2a50efe5b482727bd8c1d606d8b572fbca1f2fab6255e521a084810c0cf1c5b09a561e2efca76934182fe5ec4a284286556860ffa31f7113be8a95c12')
+ 'ff555ac4db568affe139701907f86d919a2206f3e304f69dd317b756ea0904b5934d9364a524060778aa507809ce78448621619bb34039ba34c5a71af71a4a8c')
get_pyver () {
python -c 'import sys; print(str(sys.version_info[0]) + "." + str(sys.version_info[1]))'
Deleted: adapt_tmva_to_support_cudnn8.patch
===================================================================
--- adapt_tmva_to_support_cudnn8.patch 2020-09-01 08:14:08 UTC (rev 695266)
+++ adapt_tmva_to_support_cudnn8.patch 2020-09-01 08:53:50 UTC (rev 695267)
@@ -1,1130 +0,0 @@
-From 05739e6b01fb34b5ef40e1a584107876e68e4b77 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Tue, 21 Jul 2020 15:13:57 +0300
-Subject: [PATCH 01/10] update deprecated function call name to backward
- compatible one
-
----
- tmva/tmva/src/DNN/Architectures/Cudnn/RecurrentPropagation.cu | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/RecurrentPropagation.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/RecurrentPropagation.cu
-index 058cee28424..60289ec2fdd 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/RecurrentPropagation.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/RecurrentPropagation.cu
-@@ -132,7 +132,11 @@ void TCudnn<AFloat>::InitializeRecurrentDescriptors(TDescriptors *&descriptors,
- cudnnDataType_t mathPrec = CUDNN_DATA_FLOAT;
- if (std::is_same<AFloat, double>::value) { mathPrec = CUDNN_DATA_DOUBLE;}
-
-+#if (CUDNN_VERSION >= 8000)
-+ CUDNNCHECK(cudnnSetRNNDescriptor_v6(handle, rnnDescriptors->LayerDescriptor, hiddenSize, numLayers, rnnDescriptors->HelperDescriptor,
-+#else
- CUDNNCHECK(cudnnSetRNNDescriptor(handle, rnnDescriptors->LayerDescriptor, hiddenSize, numLayers, rnnDescriptors->HelperDescriptor,
-+#endif
- inputMode, direction, mode, algo, mathPrec) );
-
-
-
-From 90baa4f6ad10076fa148f5aa06ef432bd0f34208 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Tue, 21 Jul 2020 19:06:09 +0300
-Subject: [PATCH 02/10] adapt convolution forward to cuDNN 8
-
----
- .../src/DNN/Architectures/Cudnn/Propagate.cu | 77 ++++++++++++++++++-
- 1 file changed, 76 insertions(+), 1 deletion(-)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index 7a57b6bf104..cc953ee45f9 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -27,6 +27,9 @@
- // #include "Kernels.cuh"*/
- // #include <math.h>
-
-+// for std::numeric_limits<T>::max()
-+#include <limits>
-+
- namespace TMVA {
- namespace DNN {
-
-@@ -378,7 +381,78 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- cudnnHandle_t cudnnHandle = outputTensor.GetCudnnHandle();
-
- // cuDNN decides which algorithm to use
-- // More detailed alternative: cudnnFindConvolutionForwardAlgorithm
-+#if (CUDNN_VERSION >= 8000)
-+ /**
-+ * I'm sure there may be a faster way, but this works
-+ */
-+ int convRequestedAlgoCount{8}; // requestedAlgoCount is setting how many algorithms to try, can be tuned, fixed for now as all available
-+ cudnnConvolutionDescriptor_t tempConvDescriptor;
-+ CUDDNCHECK(cudnnCreateConvolutionDescriptor(&tempConvDescriptor));
-+ cudnnTensorDescriptor_t outputTensorDescriptor;
-+ CUDNNCHECK(cudnnCreateTensorDescriptor(&outputTensorDescriptor));
-+ CUDNNCHECK(cudnnSetTensor4dDescriptor(outputTensorDescriptor,
-+ CUDNN_TENSOR_NCHW, // Layout of the tensor in memory
-+ Tensor_t::GetDataType(),
-+ (int)L->GetBatchSize(),
-+ (int)L->GetDepth(),
-+ (int)L->GetHeight(),
-+ (int)L->GetWidth()));
-+ int algoCount;
-+ cudnnConvolutionFwdAlgoPerf_t convPerfResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-+ CUDNNCHECK(cudnnFindConvolutionForwardAlgorithm(
-+ cudnnHandle,
-+ inputTensorDescriptor,
-+ convDescriptors->WeightsDescriptor,
-+ tempConvDescriptor,
-+ outputTensorDescriptor,
-+ convRequestedAlgoCount,
-+ &algoCount,
-+ &convPerfResults));
-+ // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
-+ // but we arrive at an chicken or egg problem:
-+ // workspace size is calculated from chosen forward algorithm,
-+ // but finding a forward algorithm depends on workspace size...
-+ // i.e.
-+ // Tensor_t & inputTensor = L->GetInput();
-+ // inputTensor = Tensor_t(inputTensor.GetDeviceBuffer(),{ L->GetBatchSize(), L->GetInputDepth(), L->GetInputHeight(), L->GetInputWidth() },GetTensorLayout(),0,0);
-+ // CUDNNCHECK(cudnnFindConvolutionForwardAlgorithmEx(
-+ // cudnnHandle,
-+ // inputTensorDescriptor,
-+ // &inputTensor,
-+ // convDescriptors->WeightsDescriptor,
-+ // &filters,
-+ // tempConvDescriptor,
-+ // outputTensorDescriptor,
-+ // &outputTensor,
-+ // convRequestedAlgoCount,
-+ // &algoCount,
-+ // &convPerfResults,
-+ // &convWorkspace,
-+ // convWorkspace->ForwardWorkspaceSize));
-+ // instead choose either fastest or lowest memory algo as per preference
-+ int algoIdx{0};
-+ if (CNNOptions::ConvMaxWorkspaceSize != 0) { // prefer fastest
-+ float temp_runtime{std::numeric_limits<float>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (convPerfResults[i].status != 0) continue;
-+ if (convPerfResults[i].time < temp_runtime) {
-+ temp_runtime = convPerfResults[i].time;
-+ algoIdx = i;
-+ }
-+ }
-+ } else { // prefer smallest workspace size
-+ size_t temp_memsize{std::numeric_limits<size_t>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (convPerfResults[i].status != 0) continue;
-+ if (convPerfResults[i].memory < temp_memsize) {
-+ temp_memsize = convPerfResults[i].memory;
-+ algoIdx = i;
-+ }
-+ }
-+ }
-+ convWorkspace->AlgorithmForward = convPerfResults[algoIdx].algo;
-+#else
-+ // More detailed alternative: cudnnFindConvolutionForwardAlgorithm (only option in newer cuDNN versions)
- cudnnConvolutionFwdPreference_t preferenceFwd = (CNNOptions::ConvMaxWorkspaceSize !=0) ? CUDNN_CONVOLUTION_FWD_PREFER_FASTEST :
- CUDNN_CONVOLUTION_FWD_NO_WORKSPACE;
-
-@@ -389,6 +463,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- outputTensor.GetTensorDescriptor(), preferenceFwd,
- memLimit, // Memory limit in bytes for mode CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
- &convWorkspace->AlgorithmForward));
-+#endif
-
- // Allocate memory for the convolution
- //size_t workSpaceSizeInBytes = 0;
-
-From d9b5e2f82917e7183b9f45a49135641981741477 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Tue, 21 Jul 2020 19:34:00 +0300
-Subject: [PATCH 03/10] adapt convolution backward to cuDNN 8
-
----
- .../src/DNN/Architectures/Cudnn/Propagate.cu | 72 +++++++++++++++++++
- 1 file changed, 72 insertions(+)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index cc953ee45f9..85a5c3aa175 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -515,6 +515,77 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // dx : Activation gradient to be computed -> activationGradients [in place op]
- // dy : Gradient of activation from the following layer (backpropagation)-> activationGradients
-
-+#if (CUDNN_VERSION >= 8000)
-+ /**
-+ * I'm sure there may be a faster way, but this works
-+ */
-+ convRequestedAlgoCount = 6; // reset to max number of available backward algorithms
-+ cudnnConvolutionDescriptor_t tempConvBwdDescriptor;
-+ CUDDNCHECK(cudnnCreateConvolutionDescriptor(&tempConvBwdDescriptor));
-+ cudnnTensorDescriptor_t outputBwdTensorDescriptor;
-+ CUDNNCHECK(cudnnCreateTensorDescriptor(&outputBwdTensorDescriptor));
-+ CUDNNCHECK(cudnnSetTensor4dDescriptor(outputBwdTensorDescriptor,
-+ CUDNN_TENSOR_NCHW, // Layout of the tensor in memory
-+ Tensor_t::GetDataType(),
-+ (int)L->GetBatchSize(),
-+ (int)L->GetInputDepth(),
-+ (int)L->GetInputHeight(),
-+ (int)L->GetInputWidth()));
-+ int algoCount;
-+ cudnnConvolutionBwdDataAlgoPerf_t convPerfBwdResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-+ CUDNNCHECK(cudnnFindConvolutionBackwardDataAlgorithm(
-+ cudnnHandle,
-+ convDescriptors->WeightsDescriptor,
-+ activationGradientsBackwardDescriptor,
-+ tempConvBwdDescriptor,
-+ outputBwdTensorDescriptor,
-+ convRequestedAlgoCount,
-+ &algoCount,
-+ &convPerfBwdResults));
-+ // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
-+ // but we arrive at an chicken or egg problem:
-+ // workspace size is calculated from chosen forward algorithm,
-+ // but finding a forward algorithm depends on workspace size...
-+ // i.e.
-+ // Tensor_t & outputBwdTensor = L->GetInput();
-+ // outputBwdTensor = Tensor_t(outputBwdTensor.GetDeviceBuffer(),{ L->GetBatchSize(), L->GetInputDepth(), L->GetInputHeight(), L->GetInputWidth() },GetTensorLayout(),0,0);
-+ // CUDNNCHECK(cudnnFindConvolutionBackwardDataAlgorithmEx(
-+ // cudnnHandle,
-+ // convDescriptors->WeightsDescriptor,
-+ // &filters,
-+ // activationGradientsBackwardDescriptor,
-+ // &activationGradientsBackwardTensor,
-+ // tempConvBwdDescriptor,
-+ // outputBwdTensorDescriptor,
-+ // &outputBwdTensor,
-+ // convRequestedAlgoCount,
-+ // &algoCount,
-+ // &convPerfBwdResults,
-+ // &convWorkspace,
-+ // convWorkspace->ForwardWorkspaceSize));
-+ // instead choose either fastest or lowest memory algo as per preference
-+ int algoIdx{0};
-+ if (CNNOptions::ConvMaxWorkspaceSize != 0) { // prefer fastest
-+ float temp_runtime{std::numeric_limits<float>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (convPerfBwdResults[i].status != 0) continue;
-+ if (convPerfBwdResults[i].time < temp_runtime) {
-+ temp_runtime = convPerfBwdResults[i].time;
-+ algoIdx = i;
-+ }
-+ }
-+ } else { // prefer smallest workspace size
-+ size_t temp_memsize{std::numeric_limits<size_t>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (convPerfBwdResults[i].status != 0) continue;
-+ if (convPerfBwdResults[i].memory < temp_memsize) {
-+ temp_memsize = convPerfBwdResults[i].memory;
-+ algoIdx = i;
-+ }
-+ }
-+ }
-+ convWorkspace->AlgorithmBackward = convPerfBwdResults[algoIdx].algo;
-+#else
- cudnnConvolutionBwdDataPreference_t preferenceBwdData =
- (CNNOptions::ConvMaxWorkspaceSize != 0) ? CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST : CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE;
-
-@@ -525,6 +596,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- activationGradientsBackwardDescriptor,
- preferenceBwdData, memLimit,
- &convWorkspace->AlgorithmBackward));
-+#endif
-
- std::cout << "CONV BWD Data Algo used is " << convWorkspace->AlgorithmBackward << std::endl;
- //CUDNNCHECK(cudnnSetConvolutionMathType(convDescriptors->LayerDescriptor, CUDNN_TENSOR_OP_MATH));
-
-From 526b7177c0201be1d0c6b36de0772b7d2ecb90d5 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Wed, 22 Jul 2020 11:50:29 +0300
-Subject: [PATCH 04/10] fix typo and re-declarations
-
----
- tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu | 11 +++++------
- 1 file changed, 5 insertions(+), 6 deletions(-)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index 85a5c3aa175..1b7e3e845d8 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -387,7 +387,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- */
- int convRequestedAlgoCount{8}; // requestedAlgoCount is setting how many algorithms to try, can be tuned, fixed for now as all available
- cudnnConvolutionDescriptor_t tempConvDescriptor;
-- CUDDNCHECK(cudnnCreateConvolutionDescriptor(&tempConvDescriptor));
-+ CUDNNCHECK(cudnnCreateConvolutionDescriptor(&tempConvDescriptor));
- cudnnTensorDescriptor_t outputTensorDescriptor;
- CUDNNCHECK(cudnnCreateTensorDescriptor(&outputTensorDescriptor));
- CUDNNCHECK(cudnnSetTensor4dDescriptor(outputTensorDescriptor,
-@@ -407,7 +407,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- outputTensorDescriptor,
- convRequestedAlgoCount,
- &algoCount,
-- &convPerfResults));
-+ convPerfResults));
- // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
- // but we arrive at an chicken or egg problem:
- // workspace size is calculated from chosen forward algorithm,
-@@ -521,7 +521,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- */
- convRequestedAlgoCount = 6; // reset to max number of available backward algorithms
- cudnnConvolutionDescriptor_t tempConvBwdDescriptor;
-- CUDDNCHECK(cudnnCreateConvolutionDescriptor(&tempConvBwdDescriptor));
-+ CUDNNCHECK(cudnnCreateConvolutionDescriptor(&tempConvBwdDescriptor));
- cudnnTensorDescriptor_t outputBwdTensorDescriptor;
- CUDNNCHECK(cudnnCreateTensorDescriptor(&outputBwdTensorDescriptor));
- CUDNNCHECK(cudnnSetTensor4dDescriptor(outputBwdTensorDescriptor,
-@@ -531,7 +531,6 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- (int)L->GetInputDepth(),
- (int)L->GetInputHeight(),
- (int)L->GetInputWidth()));
-- int algoCount;
- cudnnConvolutionBwdDataAlgoPerf_t convPerfBwdResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
- CUDNNCHECK(cudnnFindConvolutionBackwardDataAlgorithm(
- cudnnHandle,
-@@ -541,7 +540,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- outputBwdTensorDescriptor,
- convRequestedAlgoCount,
- &algoCount,
-- &convPerfBwdResults));
-+ convPerfBwdResults));
- // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
- // but we arrive at an chicken or egg problem:
- // workspace size is calculated from chosen forward algorithm,
-@@ -564,7 +563,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &convWorkspace,
- // convWorkspace->ForwardWorkspaceSize));
- // instead choose either fastest or lowest memory algo as per preference
-- int algoIdx{0};
-+ algoIdx = 0;
- if (CNNOptions::ConvMaxWorkspaceSize != 0) { // prefer fastest
- float temp_runtime{std::numeric_limits<float>::max()};
- for (int i = 0; i < algoCount; ++i) {
-
-From 6d84e765322a72c48de00b4a9b7471da8a15fece Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Wed, 22 Jul 2020 17:00:01 +0300
-Subject: [PATCH 05/10] implement workspace limits, fix an algoruthm preference
- bug and rewrite relevant sections
-
----
- .../src/DNN/Architectures/Cudnn/Propagate.cu | 273 ++++++++++--------
- 1 file changed, 151 insertions(+), 122 deletions(-)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index 1b7e3e845d8..2049e2b9195 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -333,35 +333,108 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- TDescriptors * & descriptors,
- const DNN::CNN::TConvParams & /*params*/,
- ConvLayer_t *L) {
-- auto convWorkspace = new ConvWorkspace_t ();
-+ auto convWorkspace = new ConvWorkspace_t();
-+ size_t memLimit = (CNNOptions::ConvMaxWorkspaceSize > 0) ? static_cast<size_t>(CNNOptions::ConvMaxWorkspaceSize) : 0;
- auto convDescriptors = static_cast<ConvDescriptors_t *>(descriptors);
-+ // can we do the following and substitute below???
-+ // auto weightsDescriptor{convDescriptors->WeightsDescriptor};
-+ // auto convDescriptor{convDescriptors->LayerDescriptor};
-
-+#if (CUDNN_VERSION >= 8000)
-+ enum algoPreference { no_workspace, fastest, workspace_limit };
-+ algoPreference algoChoice;
-+ auto choose_algo = [](algoPreference const& algoPref, auto&& perfResults, size_t memLim = std::numeric_limits<size_t>::max()) -> int {
-+ int algoIdx{0};
-+ if (algoPref == algoPreference::fastest) { // prefer fastest
-+ float temp_runtime{std::numeric_limits<float>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (PerfResults[i].status == CUDNN_STATUS_SUCCESS && PerfResults[i].time < temp_runtime) {
-+ temp_runtime = PerfResults[i].time;
-+ algoIdx = i;
-+ }
-+ }
-+ } else if (algoPref == algoPreference::workspace_limit) { // constrain to workspace size
-+ float temp_runtime{std::numeric_limits<float>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (PerfResults[i].status == CUDNN_STATUS_SUCCESS && PerfResults[i].time < temp_runtime && PerfResults[i].memory <= memLim) {
-+ temp_runtime = PerfResults[i].time;
-+ algoIdx = i;
-+ }
-+ }
-+ } else { // prefer smallest workspace size
-+ size_t temp_memsize{std::numeric_limits<size_t>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (PerfResults[i].status == CUDNN_STATUS_SUCCESS && PerfResults[i].memory < temp_memsize) {
-+ temp_memsize = PerfResults[i].memory;
-+ algoIdx = i;
-+ }
-+ }
-+ }
-+ return algoIdx;
-+ };
-+#else
-+ // More detailed alternative: cudnnFindConvolutionForwardAlgorithm (only option in newer cuDNN versions)
-+ cudnnConvolutionFwdPreference_t preferenceFwd;
-+ cudnnConvolutionBwdDataPreference_t preferenceBwdData;
-+ cudnnConvolutionBwdFilterPreference_t preferenceBwdFilter;
-+#endif
-+ // decide on algorithm preference early
-+ if (CNNOptions::ConvMaxWorkspaceSize < 0) {
-+ // no workspace case
-+#if (CUDNN_VERSION >= 8000)
-+ algoChoice = no_workspace;
-+#else
-+ preferenceFwd = CUDNN_CONVOLUTION_FWD_NO_WORKSPACE;
-+ preferenceBwdData = CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE;
-+ preferenceBwdFilter = CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE;
-+#endif
-+
-+ } else if (CNNOptions::ConvMaxWorkspaceSize == 0) {
-+ // fastest overall
-+#if (CUDNN_VERSION >= 8000)
-+ algoChoice = fastest;
-+#else
-+ preferenceFwd = CUDNN_CONVOLUTION_FWD_PREFER_FASTEST;
-+ preferenceBwdData = CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST;
-+ preferenceBwdFilter = CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST;
-+#endif
-+
-+ } else {
-+ // fastest in memory limit
-+#if (CUDNN_VERSION >= 8000)
-+ algoChoice = workspace_limit;
-+#else
-+ preferenceFwd = CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT;
-+ preferenceBwdData = CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT;
-+ preferenceBwdFilter = CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT;
-+#endif
-+ }
- // fix the weight tensor shapes
- // by default the weights are columnmajor, set them to be row major . At this points
- // they are not yet initialized
- Tensor_t & filters = L->GetWeightsAt(0);
-- filters = Tensor_t (filters.GetDeviceBuffer(), {L->GetDepth(),L->GetInputDepth(), L->GetFilterHeight(),L->GetFilterWidth()}, MemoryLayout::RowMajor, 0, 0 );
-- //PrintTensor(L->GetWeightsAt(0));
-+ filters = Tensor_t(filters.GetDeviceBuffer(), {L->GetDepth(), L->GetInputDepth(), L->GetFilterHeight(), L->GetFilterWidth()}, MemoryLayout::RowMajor, 0, 0);
-+ // PrintTensor(L->GetWeightsAt(0));
- Tensor_t & biases = L->GetBiasesAt(0);
-- biases = Tensor_t (biases.GetDeviceBuffer(), {1, L->GetDepth(),1,1}, GetTensorLayout(), 0, 0 );
-+ biases = Tensor_t(biases.GetDeviceBuffer(), {1, L->GetDepth(), 1, 1}, GetTensorLayout(), 0, 0);
-
- Tensor_t & outputTensor = L->GetOutput();
-- outputTensor = Tensor_t(outputTensor.GetDeviceBuffer(),{ L->GetBatchSize(), L->GetDepth(), L->GetHeight(), L->GetWidth() },GetTensorLayout(),0,0 );
-+ outputTensor = Tensor_t(outputTensor.GetDeviceBuffer(), {L->GetBatchSize(), L->GetDepth(), L->GetHeight(), L->GetWidth()}, GetTensorLayout(), 0, 0);
- Tensor_t & inputActivation = L->GetInputActivation();
-- inputActivation = Tensor_t(inputActivation.GetDeviceBuffer(),outputTensor.GetShape() ,GetTensorLayout(),0,0 );
-+ inputActivation = Tensor_t(inputActivation.GetDeviceBuffer(),outputTensor.GetShape() ,GetTensorLayout(), 0, 0);
-
- Tensor_t & activationGradients = L->GetActivationGradients();
-- activationGradients = Tensor_t(activationGradients.GetDeviceBuffer(),outputTensor.GetShape() ,GetTensorLayout(),0,0 );
-+ activationGradients = Tensor_t(activationGradients.GetDeviceBuffer(),outputTensor.GetShape(), GetTensorLayout(), 0, 0);
-
- Tensor_t & weightGradients = L->GetWeightGradientsAt(0);
-- weightGradients = Tensor_t( weightGradients.GetDeviceBuffer(), filters.GetShape(), GetTensorLayout(), 0, 0 );
-+ weightGradients = Tensor_t(weightGradients.GetDeviceBuffer(), filters.GetShape(), GetTensorLayout(), 0, 0);
-
- Tensor_t & biasGradients = L->GetBiasGradientsAt(0);
-- biasGradients = Tensor_t( biasGradients.GetDeviceBuffer(), biases.GetShape(), GetTensorLayout(), 0, 0 );
-+ biasGradients = Tensor_t(biasGradients.GetDeviceBuffer(), biases.GetShape(), GetTensorLayout(), 0, 0);
-
-
- // FIXME: Use descriptors instead (Tensor device memory is otherwise allocated during initialization)
-- //Tensor_t inputTensor ({L->GetBatchSize(), L->GetInputDepth(), L->GetInputHeight(), L->GetInputWidth()}, MemoryLayout::RowMajor, 0, 0);
-+ // Tensor_t inputTensor ({L->GetBatchSize(), L->GetInputDepth(), L->GetInputHeight(), L->GetInputWidth()}, MemoryLayout::RowMajor, 0, 0);
- cudnnTensorDescriptor_t inputTensorDescriptor;
- CUDNNCHECK(cudnnCreateTensorDescriptor(&inputTensorDescriptor) );
- CUDNNCHECK(cudnnSetTensor4dDescriptor(inputTensorDescriptor,
-@@ -385,79 +458,44 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- /**
- * I'm sure there may be a faster way, but this works
- */
-- int convRequestedAlgoCount{8}; // requestedAlgoCount is setting how many algorithms to try, can be tuned, fixed for now as all available
-- cudnnConvolutionDescriptor_t tempConvDescriptor;
-- CUDNNCHECK(cudnnCreateConvolutionDescriptor(&tempConvDescriptor));
-- cudnnTensorDescriptor_t outputTensorDescriptor;
-- CUDNNCHECK(cudnnCreateTensorDescriptor(&outputTensorDescriptor));
-- CUDNNCHECK(cudnnSetTensor4dDescriptor(outputTensorDescriptor,
-- CUDNN_TENSOR_NCHW, // Layout of the tensor in memory
-- Tensor_t::GetDataType(),
-- (int)L->GetBatchSize(),
-- (int)L->GetDepth(),
-- (int)L->GetHeight(),
-- (int)L->GetWidth()));
-+ int convRequestedAlgoCount{8}; // requestedAlgoCount is setting how many algorithms to try, can be tuned, fixed for now as all available
-+
- int algoCount;
- cudnnConvolutionFwdAlgoPerf_t convPerfResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-- CUDNNCHECK(cudnnFindConvolutionForwardAlgorithm(
-- cudnnHandle,
-- inputTensorDescriptor,
-- convDescriptors->WeightsDescriptor,
-- tempConvDescriptor,
-- outputTensorDescriptor,
-- convRequestedAlgoCount,
-- &algoCount,
-- convPerfResults));
-+ CUDNNCHECK(
-+ cudnnFindConvolutionForwardAlgorithm(
-+ cudnnHandle,
-+ inputTensorDescriptor,
-+ convDescriptors->WeightsDescriptor,
-+ convDescriptors->LayerDescriptor,
-+ outputTensor.GetTensorDescriptor(),
-+ convRequestedAlgoCount,
-+ &algoCount,
-+ convPerfResults
-+ )
-+ );
- // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
-- // but we arrive at an chicken or egg problem:
-- // workspace size is calculated from chosen forward algorithm,
-- // but finding a forward algorithm depends on workspace size...
- // i.e.
-- // Tensor_t & inputTensor = L->GetInput();
-- // inputTensor = Tensor_t(inputTensor.GetDeviceBuffer(),{ L->GetBatchSize(), L->GetInputDepth(), L->GetInputHeight(), L->GetInputWidth() },GetTensorLayout(),0,0);
-+ // create an input tensor before the inputTensorDescriptor
-+ // and get the descriptor from there
-+ // Tensor_t inputTensor({L->GetBatchSize(), L->GetInputDepth(), L->GetInputHeight(), L->GetInputWidth()}, MemoryLayout::RowMajor, 0, 0);
- // CUDNNCHECK(cudnnFindConvolutionForwardAlgorithmEx(
- // cudnnHandle,
-- // inputTensorDescriptor,
-+ // inputTensor.GetTensorDescriptor(),
- // &inputTensor,
- // convDescriptors->WeightsDescriptor,
- // &filters,
-- // tempConvDescriptor,
-- // outputTensorDescriptor,
-+ // convDescriptors->LayerDescriptor,
-+ // outputTensor.GetTensorDescriptor(),
- // &outputTensor,
- // convRequestedAlgoCount,
- // &algoCount,
- // &convPerfResults,
- // &convWorkspace,
-- // convWorkspace->ForwardWorkspaceSize));
-+ // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- int algoIdx{0};
-- if (CNNOptions::ConvMaxWorkspaceSize != 0) { // prefer fastest
-- float temp_runtime{std::numeric_limits<float>::max()};
-- for (int i = 0; i < algoCount; ++i) {
-- if (convPerfResults[i].status != 0) continue;
-- if (convPerfResults[i].time < temp_runtime) {
-- temp_runtime = convPerfResults[i].time;
-- algoIdx = i;
-- }
-- }
-- } else { // prefer smallest workspace size
-- size_t temp_memsize{std::numeric_limits<size_t>::max()};
-- for (int i = 0; i < algoCount; ++i) {
-- if (convPerfResults[i].status != 0) continue;
-- if (convPerfResults[i].memory < temp_memsize) {
-- temp_memsize = convPerfResults[i].memory;
-- algoIdx = i;
-- }
-- }
-- }
-- convWorkspace->AlgorithmForward = convPerfResults[algoIdx].algo;
-+ convWorkspace->AlgorithmForward = convPerfResults[choose_algo(algoChoice, convPerfResults, memLimit)].algo;
- #else
-- // More detailed alternative: cudnnFindConvolutionForwardAlgorithm (only option in newer cuDNN versions)
-- cudnnConvolutionFwdPreference_t preferenceFwd = (CNNOptions::ConvMaxWorkspaceSize !=0) ? CUDNN_CONVOLUTION_FWD_PREFER_FASTEST :
-- CUDNN_CONVOLUTION_FWD_NO_WORKSPACE;
--
-- size_t memLimit = (CNNOptions::ConvMaxWorkspaceSize > 0) ? (size_t) CNNOptions::ConvMaxWorkspaceSize : 0;
--
- CUDNNCHECK(cudnnGetConvolutionForwardAlgorithm(
- cudnnHandle, inputTensorDescriptor, convDescriptors->WeightsDescriptor, convDescriptors->LayerDescriptor,
- outputTensor.GetTensorDescriptor(), preferenceFwd,
-@@ -519,75 +557,36 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- /**
- * I'm sure there may be a faster way, but this works
- */
-- convRequestedAlgoCount = 6; // reset to max number of available backward algorithms
-- cudnnConvolutionDescriptor_t tempConvBwdDescriptor;
-- CUDNNCHECK(cudnnCreateConvolutionDescriptor(&tempConvBwdDescriptor));
-- cudnnTensorDescriptor_t outputBwdTensorDescriptor;
-- CUDNNCHECK(cudnnCreateTensorDescriptor(&outputBwdTensorDescriptor));
-- CUDNNCHECK(cudnnSetTensor4dDescriptor(outputBwdTensorDescriptor,
-- CUDNN_TENSOR_NCHW, // Layout of the tensor in memory
-- Tensor_t::GetDataType(),
-- (int)L->GetBatchSize(),
-- (int)L->GetInputDepth(),
-- (int)L->GetInputHeight(),
-- (int)L->GetInputWidth()));
-- cudnnConvolutionBwdDataAlgoPerf_t convPerfBwdResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-+ convRequestedAlgoCount = 6; // reset to max number of available backward algorithms
-+ cudnnConvolutionBwdDataAlgoPerf_t convPerfBwdDataResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
- CUDNNCHECK(cudnnFindConvolutionBackwardDataAlgorithm(
- cudnnHandle,
- convDescriptors->WeightsDescriptor,
-+ activationGradients.GetTensorDescriptor(),
-+ convDescriptors->LayerDescriptor,
- activationGradientsBackwardDescriptor,
-- tempConvBwdDescriptor,
-- outputBwdTensorDescriptor,
- convRequestedAlgoCount,
- &algoCount,
-- convPerfBwdResults));
-+ convPerfBwdDataResults));
- // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
-- // but we arrive at an chicken or egg problem:
-- // workspace size is calculated from chosen forward algorithm,
-- // but finding a forward algorithm depends on workspace size...
- // i.e.
-- // Tensor_t & outputBwdTensor = L->GetInput();
-- // outputBwdTensor = Tensor_t(outputBwdTensor.GetDeviceBuffer(),{ L->GetBatchSize(), L->GetInputDepth(), L->GetInputHeight(), L->GetInputWidth() },GetTensorLayout(),0,0);
- // CUDNNCHECK(cudnnFindConvolutionBackwardDataAlgorithmEx(
- // cudnnHandle,
- // convDescriptors->WeightsDescriptor,
- // &filters,
-+ // activationGradients.GetTensorDescriptor(),
-+ // &activationGradients,
-+ // convDescriptors->LayerDescriptor,
- // activationGradientsBackwardDescriptor,
-- // &activationGradientsBackwardTensor,
-- // tempConvBwdDescriptor,
-- // outputBwdTensorDescriptor,
-- // &outputBwdTensor,
-+ // &inputTensor,
- // convRequestedAlgoCount,
- // &algoCount,
- // &convPerfBwdResults,
- // &convWorkspace,
-- // convWorkspace->ForwardWorkspaceSize));
-+ // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- algoIdx = 0;
-- if (CNNOptions::ConvMaxWorkspaceSize != 0) { // prefer fastest
-- float temp_runtime{std::numeric_limits<float>::max()};
-- for (int i = 0; i < algoCount; ++i) {
-- if (convPerfBwdResults[i].status != 0) continue;
-- if (convPerfBwdResults[i].time < temp_runtime) {
-- temp_runtime = convPerfBwdResults[i].time;
-- algoIdx = i;
-- }
-- }
-- } else { // prefer smallest workspace size
-- size_t temp_memsize{std::numeric_limits<size_t>::max()};
-- for (int i = 0; i < algoCount; ++i) {
-- if (convPerfBwdResults[i].status != 0) continue;
-- if (convPerfBwdResults[i].memory < temp_memsize) {
-- temp_memsize = convPerfBwdResults[i].memory;
-- algoIdx = i;
-- }
-- }
-- }
-- convWorkspace->AlgorithmBackward = convPerfBwdResults[algoIdx].algo;
-+ convWorkspace->AlgorithmBackward = convPerfBwdDataResults[choose_algo(algoChoice, convPerfBwdDataResults, memLimit)].algo;
- #else
-- cudnnConvolutionBwdDataPreference_t preferenceBwdData =
-- (CNNOptions::ConvMaxWorkspaceSize != 0) ? CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST : CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE;
--
- CUDNNCHECK(cudnnGetConvolutionBackwardDataAlgorithm(cudnnHandle,
- convDescriptors->WeightsDescriptor,
- activationGradients.GetTensorDescriptor(),
-@@ -628,11 +627,40 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // here should be able to use inputTensorDescriptor
- cudnnTensorDescriptor_t activationBackwardDescriptor = inputTensorDescriptor;
-
-- // cudnnConvolutionBwdFilterPreference_t preference =
-- cudnnConvolutionBwdFilterPreference_t preferenceBwdFilter = (CNNOptions::ConvMaxWorkspaceSize != 0)
-- ? CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE
-- : CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST;
--
-+#if (CUDNN_VERSION >= 8000)
-+ /**
-+ * I'm sure there may be a faster way, but this works
-+ */
-+ convRequestedAlgoCount = 6; // reset to max number of available backward algorithms
-+ cudnnConvolutionBwdDataAlgoPerf_t convPerfBwdFilterResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-+ CUDNNCHECK(cudnnFindConvolutionBackwardFilterAlgorithm(
-+ cudnnHandle,
-+ activationBackwardDescriptor,
-+ activationGradients.GetTensorDescriptor(),
-+ convDescriptors->LayerDescriptor,
-+ convDescriptors->WeightsDescriptor,
-+ convRequestedAlgoCount,
-+ &algoCount,
-+ convPerfBwdFilterResults));
-+ // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
-+ // i.e.
-+ // CUDNNCHECK(cudnnFindConvolutionBackwardFilterAlgorithmEx(
-+ // cudnnHandle,
-+ // activationBackwardDescriptor,
-+ // &inputTensor,
-+ // activationGradients.GetTensorDescriptor(),
-+ // &activationGradients,
-+ // convDescriptors->LayerDescriptor,
-+ // convDescriptors->WeightsDescriptor,
-+ // &filters,
-+ // convRequestedAlgoCount,
-+ // &algoCount,
-+ // &convPerfBwdFilterResults,
-+ // &convWorkspace,
-+ // memLimit)); // use memLimit for workspace size
-+ // instead choose either fastest or lowest memory algo as per preference
-+ convWorkspace->AlgorithmBackward = convPerfBwdFilterResults[choose_algo(algoChoice, convPerfBwdFilterResults, memLimit)].algo;
-+#else
- CUDNNCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(cudnnHandle,
- activationBackwardDescriptor,
- activationGradients.GetTensorDescriptor(),
-@@ -641,6 +669,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- preferenceBwdFilter,
- memLimit,
- &convWorkspace->HelperAlgorithm));
-+#endif
-
- std::cout << "CONV BWD Filter Algo used is " << convWorkspace->HelperAlgorithm << std::endl;
-
-
-From a9d39cc9ccf9ae474d90b6671d3e0d69d4cf6872 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Wed, 22 Jul 2020 17:11:30 +0300
-Subject: [PATCH 06/10] implement correct logic behind cudnn logarithm
- preference
-
----
- .../src/DNN/Architectures/Cudnn/Propagate.cu | 20 +++++++++----------
- 1 file changed, 10 insertions(+), 10 deletions(-)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index 2049e2b9195..b74c99d1a99 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -380,18 +380,8 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- #endif
- // decide on algorithm preference early
- if (CNNOptions::ConvMaxWorkspaceSize < 0) {
-- // no workspace case
- #if (CUDNN_VERSION >= 8000)
-- algoChoice = no_workspace;
--#else
-- preferenceFwd = CUDNN_CONVOLUTION_FWD_NO_WORKSPACE;
-- preferenceBwdData = CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE;
-- preferenceBwdFilter = CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE;
--#endif
--
-- } else if (CNNOptions::ConvMaxWorkspaceSize == 0) {
- // fastest overall
--#if (CUDNN_VERSION >= 8000)
- algoChoice = fastest;
- #else
- preferenceFwd = CUDNN_CONVOLUTION_FWD_PREFER_FASTEST;
-@@ -399,6 +389,16 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- preferenceBwdFilter = CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST;
- #endif
-
-+ } else if (CNNOptions::ConvMaxWorkspaceSize == 0) {
-+ // no workspace case
-+#if (CUDNN_VERSION >= 8000)
-+ algoChoice = no_workspace;
-+#else
-+ preferenceFwd = CUDNN_CONVOLUTION_FWD_NO_WORKSPACE;
-+ preferenceBwdData = CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE;
-+ preferenceBwdFilter = CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE;
-+#endif
-+
- } else {
- // fastest in memory limit
- #if (CUDNN_VERSION >= 8000)
-
-From 6282dfa816c7f51af5c0ecaa0065514e3f627631 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Wed, 22 Jul 2020 18:51:56 +0300
-Subject: [PATCH 07/10] use decltype instead of auto, fix typos
-
----
- .../src/DNN/Architectures/Cudnn/Propagate.cu | 22 +++++++++----------
- 1 file changed, 11 insertions(+), 11 deletions(-)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index b74c99d1a99..6cefd72c099 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -343,29 +343,29 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- #if (CUDNN_VERSION >= 8000)
- enum algoPreference { no_workspace, fastest, workspace_limit };
- algoPreference algoChoice;
-- auto choose_algo = [](algoPreference const& algoPref, auto&& perfResults, size_t memLim = std::numeric_limits<size_t>::max()) -> int {
-+ auto choose_algo = [](algoPreference const& algoPref, int const algoCount, decltype(perfResults) const& perfResults, size_t memLim = std::numeric_limits<size_t>::max()) -> int {
- int algoIdx{0};
- if (algoPref == algoPreference::fastest) { // prefer fastest
- float temp_runtime{std::numeric_limits<float>::max()};
- for (int i = 0; i < algoCount; ++i) {
-- if (PerfResults[i].status == CUDNN_STATUS_SUCCESS && PerfResults[i].time < temp_runtime) {
-- temp_runtime = PerfResults[i].time;
-+ if (perfResults[i].status == CUDNN_STATUS_SUCCESS && perfResults[i].time < temp_runtime) {
-+ temp_runtime = perfResults[i].time;
- algoIdx = i;
- }
- }
- } else if (algoPref == algoPreference::workspace_limit) { // constrain to workspace size
- float temp_runtime{std::numeric_limits<float>::max()};
- for (int i = 0; i < algoCount; ++i) {
-- if (PerfResults[i].status == CUDNN_STATUS_SUCCESS && PerfResults[i].time < temp_runtime && PerfResults[i].memory <= memLim) {
-- temp_runtime = PerfResults[i].time;
-+ if (perfResults[i].status == CUDNN_STATUS_SUCCESS && perfResults[i].time < temp_runtime && perfResults[i].memory <= memLim) {
-+ temp_runtime = perfResults[i].time;
- algoIdx = i;
- }
- }
- } else { // prefer smallest workspace size
- size_t temp_memsize{std::numeric_limits<size_t>::max()};
- for (int i = 0; i < algoCount; ++i) {
-- if (PerfResults[i].status == CUDNN_STATUS_SUCCESS && PerfResults[i].memory < temp_memsize) {
-- temp_memsize = PerfResults[i].memory;
-+ if (perfResults[i].status == CUDNN_STATUS_SUCCESS && perfResults[i].memory < temp_memsize) {
-+ temp_memsize = perfResults[i].memory;
- algoIdx = i;
- }
- }
-@@ -494,7 +494,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- convWorkspace->AlgorithmForward = convPerfResults[choose_algo(algoChoice, convPerfResults, memLimit)].algo;
-+ convWorkspace->AlgorithmForward = convPerfResults[choose_algo(algoChoice, algoCount, convPerfResults, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionForwardAlgorithm(
- cudnnHandle, inputTensorDescriptor, convDescriptors->WeightsDescriptor, convDescriptors->LayerDescriptor,
-@@ -585,7 +585,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- convWorkspace->AlgorithmBackward = convPerfBwdDataResults[choose_algo(algoChoice, convPerfBwdDataResults, memLimit)].algo;
-+ convWorkspace->AlgorithmBackward = convPerfBwdDataResults[choose_algo(algoChoice, algoCount, convPerfBwdDataResults, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionBackwardDataAlgorithm(cudnnHandle,
- convDescriptors->WeightsDescriptor,
-@@ -632,7 +632,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- * I'm sure there may be a faster way, but this works
- */
- convRequestedAlgoCount = 6; // reset to max number of available backward algorithms
-- cudnnConvolutionBwdDataAlgoPerf_t convPerfBwdFilterResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-+ cudnnConvolutionBwdFilterAlgoPerf_t convPerfBwdFilterResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
- CUDNNCHECK(cudnnFindConvolutionBackwardFilterAlgorithm(
- cudnnHandle,
- activationBackwardDescriptor,
-@@ -659,7 +659,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- convWorkspace->AlgorithmBackward = convPerfBwdFilterResults[choose_algo(algoChoice, convPerfBwdFilterResults, memLimit)].algo;
-+ convWorkspace->AlgorithmBackward = convPerfBwdFilterResults[choose_algo(algoChoice, algoCount, convPerfBwdFilterResults, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(cudnnHandle,
- activationBackwardDescriptor,
-
-From 259c1c9c4d86391d1987f6635a2aece8cae587ac Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Wed, 22 Jul 2020 19:39:40 +0300
-Subject: [PATCH 08/10] assign backward filter algo to correct place
-
----
- tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index 6cefd72c099..5a80dfbc03d 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -659,7 +659,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- convWorkspace->AlgorithmBackward = convPerfBwdFilterResults[choose_algo(algoChoice, algoCount, convPerfBwdFilterResults, memLimit)].algo;
-+ convWorkspace->HelperAlgorithm = convPerfBwdFilterResults[choose_algo(algoChoice, algoCount, convPerfBwdFilterResults, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(cudnnHandle,
- activationBackwardDescriptor,
-
-From 2c109efea0e970b380a62f6102a286542676912a Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Thu, 23 Jul 2020 17:58:58 +0300
-Subject: [PATCH 09/10] make it compile and support C++11
-
----
- .../src/DNN/Architectures/Cudnn/Propagate.cu | 49 ++++++++++++-------
- 1 file changed, 30 insertions(+), 19 deletions(-)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index 5a80dfbc03d..66ce64a5efc 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -343,29 +343,37 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- #if (CUDNN_VERSION >= 8000)
- enum algoPreference { no_workspace, fastest, workspace_limit };
- algoPreference algoChoice;
-- auto choose_algo = [](algoPreference const& algoPref, int const algoCount, decltype(perfResults) const& perfResults, size_t memLim = std::numeric_limits<size_t>::max()) -> int {
-+ // C++11 lambdas cannot be templated, so we have to do this HORRIBLE stuff...
-+ union LocalPerf_t {
-+ // these three type are absolutely equivalent
-+ // and one can access them as they wish to get info
-+ cudnnConvolutionFwdAlgoPerf_t * fwd;
-+ cudnnConvolutionBwdFilterAlgoPerf_t * bwdFilter;
-+ cudnnConvolutionBwdDataAlgoPerf_t * bwdData;
-+ };
-+ auto choose_algo = [](algoPreference const & algoPref, int const algoCount, LocalPerf_t const & perfResults, size_t memLim = std::numeric_limits<size_t>::max()) -> int {
- int algoIdx{0};
- if (algoPref == algoPreference::fastest) { // prefer fastest
- float temp_runtime{std::numeric_limits<float>::max()};
- for (int i = 0; i < algoCount; ++i) {
-- if (perfResults[i].status == CUDNN_STATUS_SUCCESS && perfResults[i].time < temp_runtime) {
-- temp_runtime = perfResults[i].time;
-+ if (perfResults.fwd[i].status == CUDNN_STATUS_SUCCESS && perfResults.fwd[i].time < temp_runtime) {
-+ temp_runtime = perfResults.fwd[i].time;
- algoIdx = i;
- }
- }
- } else if (algoPref == algoPreference::workspace_limit) { // constrain to workspace size
- float temp_runtime{std::numeric_limits<float>::max()};
- for (int i = 0; i < algoCount; ++i) {
-- if (perfResults[i].status == CUDNN_STATUS_SUCCESS && perfResults[i].time < temp_runtime && perfResults[i].memory <= memLim) {
-- temp_runtime = perfResults[i].time;
-+ if (perfResults.fwd[i].status == CUDNN_STATUS_SUCCESS && perfResults.fwd[i].time < temp_runtime && perfResults.fwd[i].memory <= memLim) {
-+ temp_runtime = perfResults.fwd[i].time;
- algoIdx = i;
- }
- }
- } else { // prefer smallest workspace size
- size_t temp_memsize{std::numeric_limits<size_t>::max()};
- for (int i = 0; i < algoCount; ++i) {
-- if (perfResults[i].status == CUDNN_STATUS_SUCCESS && perfResults[i].memory < temp_memsize) {
-- temp_memsize = perfResults[i].memory;
-+ if (perfResults.fwd[i].status == CUDNN_STATUS_SUCCESS && perfResults.fwd[i].memory < temp_memsize) {
-+ temp_memsize = perfResults.fwd[i].memory;
- algoIdx = i;
- }
- }
-@@ -461,7 +469,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- int convRequestedAlgoCount{8}; // requestedAlgoCount is setting how many algorithms to try, can be tuned, fixed for now as all available
-
- int algoCount;
-- cudnnConvolutionFwdAlgoPerf_t convPerfResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-+ cudnnConvolutionFwdAlgoPerf_t convFwdPerfResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
- CUDNNCHECK(
- cudnnFindConvolutionForwardAlgorithm(
- cudnnHandle,
-@@ -471,7 +479,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- outputTensor.GetTensorDescriptor(),
- convRequestedAlgoCount,
- &algoCount,
-- convPerfResults
-+ convFwdPerfResults
- )
- );
- // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
-@@ -490,11 +498,12 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &outputTensor,
- // convRequestedAlgoCount,
- // &algoCount,
-- // &convPerfResults,
-+ // &convFwdPerfResults,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- convWorkspace->AlgorithmForward = convPerfResults[choose_algo(algoChoice, algoCount, convPerfResults, memLimit)].algo;
-+ LocalPerf_t fwdPerfResults{convFwdPerfResults};
-+ convWorkspace->AlgorithmForward = convFwdPerfResults[choose_algo(algoChoice, algoCount, fwdPerfResults, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionForwardAlgorithm(
- cudnnHandle, inputTensorDescriptor, convDescriptors->WeightsDescriptor, convDescriptors->LayerDescriptor,
-@@ -558,7 +567,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- * I'm sure there may be a faster way, but this works
- */
- convRequestedAlgoCount = 6; // reset to max number of available backward algorithms
-- cudnnConvolutionBwdDataAlgoPerf_t convPerfBwdDataResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-+ cudnnConvolutionBwdDataAlgoPerf_t convBwdDataPerfResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
- CUDNNCHECK(cudnnFindConvolutionBackwardDataAlgorithm(
- cudnnHandle,
- convDescriptors->WeightsDescriptor,
-@@ -567,7 +576,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- activationGradientsBackwardDescriptor,
- convRequestedAlgoCount,
- &algoCount,
-- convPerfBwdDataResults));
-+ convBwdDataPerfResults));
- // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
- // i.e.
- // CUDNNCHECK(cudnnFindConvolutionBackwardDataAlgorithmEx(
-@@ -581,11 +590,12 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &inputTensor,
- // convRequestedAlgoCount,
- // &algoCount,
-- // &convPerfBwdResults,
-+ // &convBwdDataPerfResults,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- convWorkspace->AlgorithmBackward = convPerfBwdDataResults[choose_algo(algoChoice, algoCount, convPerfBwdDataResults, memLimit)].algo;
-+ LocalPerf_t bwdDataPerfResults{convBwdDataPerfResults};
-+ convWorkspace->AlgorithmBackward = convBwdDataPerfResults[choose_algo(algoChoice, algoCount, bwdDataPerfResults, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionBackwardDataAlgorithm(cudnnHandle,
- convDescriptors->WeightsDescriptor,
-@@ -632,7 +642,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- * I'm sure there may be a faster way, but this works
- */
- convRequestedAlgoCount = 6; // reset to max number of available backward algorithms
-- cudnnConvolutionBwdFilterAlgoPerf_t convPerfBwdFilterResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
-+ cudnnConvolutionBwdFilterAlgoPerf_t convBwdFilterPerfResults[convRequestedAlgoCount]; // this will store metrics to choose convolution algorithm
- CUDNNCHECK(cudnnFindConvolutionBackwardFilterAlgorithm(
- cudnnHandle,
- activationBackwardDescriptor,
-@@ -641,7 +651,7 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- convDescriptors->WeightsDescriptor,
- convRequestedAlgoCount,
- &algoCount,
-- convPerfBwdFilterResults));
-+ convBwdFilterPerfResults));
- // we could also do it with the expert mode (cudnnFindConvolutionForwardAlgorithmEx),
- // i.e.
- // CUDNNCHECK(cudnnFindConvolutionBackwardFilterAlgorithmEx(
-@@ -655,11 +665,12 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &filters,
- // convRequestedAlgoCount,
- // &algoCount,
-- // &convPerfBwdFilterResults,
-+ // &convBwdFilterPerfResults,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- convWorkspace->HelperAlgorithm = convPerfBwdFilterResults[choose_algo(algoChoice, algoCount, convPerfBwdFilterResults, memLimit)].algo;
-+ LocalPerf_t bwdFilterPerfResults{convBwdFilterPerfResults};
-+ convWorkspace->HelperAlgorithm = convBwdFilterPerfResults[choose_algo(algoChoice, algoCount, bwdFilterPerfResults, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(cudnnHandle,
- activationBackwardDescriptor,
-
-From 1f1dfbbac06c29df98bdebdd9367bf566f2e7ce8 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Thu, 23 Jul 2020 21:37:33 +0300
-Subject: [PATCH 10/10] compiles completely
-
----
- .../src/DNN/Architectures/Cudnn/Propagate.cu | 83 ++++++++++---------
- 1 file changed, 46 insertions(+), 37 deletions(-)
-
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-index 66ce64a5efc..0694369860a 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn/Propagate.cu
-@@ -344,41 +344,50 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- enum algoPreference { no_workspace, fastest, workspace_limit };
- algoPreference algoChoice;
- // C++11 lambdas cannot be templated, so we have to do this HORRIBLE stuff...
-- union LocalPerf_t {
-- // these three type are absolutely equivalent
-- // and one can access them as they wish to get info
-- cudnnConvolutionFwdAlgoPerf_t * fwd;
-- cudnnConvolutionBwdFilterAlgoPerf_t * bwdFilter;
-- cudnnConvolutionBwdDataAlgoPerf_t * bwdData;
-- };
-- auto choose_algo = [](algoPreference const & algoPref, int const algoCount, LocalPerf_t const & perfResults, size_t memLim = std::numeric_limits<size_t>::max()) -> int {
-- int algoIdx{0};
-- if (algoPref == algoPreference::fastest) { // prefer fastest
-- float temp_runtime{std::numeric_limits<float>::max()};
-- for (int i = 0; i < algoCount; ++i) {
-- if (perfResults.fwd[i].status == CUDNN_STATUS_SUCCESS && perfResults.fwd[i].time < temp_runtime) {
-- temp_runtime = perfResults.fwd[i].time;
-- algoIdx = i;
-+ class LocalPerf {
-+ public:
-+ LocalPerf(cudnnConvolutionFwdAlgoPerf_t * fwd) {m_fwd = fwd;}
-+ LocalPerf(cudnnConvolutionBwdFilterAlgoPerf_t * bwdFilter) {m_bwdFilter = bwdFilter;}
-+ LocalPerf(cudnnConvolutionBwdDataAlgoPerf_t * bwdData) {m_bwdData = bwdData;}
-+ size_t getMemory(int i) {return m_fwd != nullptr ? m_fwd[i].memory : m_bwdFilter != nullptr ? m_bwdFilter[i].memory : m_bwdData != nullptr ? m_bwdData[i].memory : 0;}
-+ float getTime(int i) {return m_fwd != nullptr ? m_fwd[i].time : m_bwdFilter != nullptr ? m_bwdFilter[i].time : m_bwdData != nullptr ? m_bwdData[i].time : 0;}
-+ cudnnStatus_t getStatus(int i) {return m_fwd != nullptr ? m_fwd[i].status : m_bwdFilter != nullptr ? m_bwdFilter[i].status : m_bwdData != nullptr ? m_bwdData[i].status : CUDNN_STATUS_BAD_PARAM;}
-+ int getIdx(algoPreference const & algoPref, int const algoCount, size_t memLim = std::numeric_limits<size_t>::max()) {
-+ int algoIdx{0};
-+ if (algoPref == algoPreference::fastest) { // prefer fastest
-+ float temp_runtime{std::numeric_limits<float>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (getStatus(i) == CUDNN_STATUS_SUCCESS && getTime(i) < temp_runtime) {
-+ temp_runtime = getTime(i);
-+ algoIdx = i;
-+ }
- }
-- }
-- } else if (algoPref == algoPreference::workspace_limit) { // constrain to workspace size
-- float temp_runtime{std::numeric_limits<float>::max()};
-- for (int i = 0; i < algoCount; ++i) {
-- if (perfResults.fwd[i].status == CUDNN_STATUS_SUCCESS && perfResults.fwd[i].time < temp_runtime && perfResults.fwd[i].memory <= memLim) {
-- temp_runtime = perfResults.fwd[i].time;
-- algoIdx = i;
-+ } else if (algoPref == algoPreference::workspace_limit) { // constrain to workspace size
-+ float temp_runtime{std::numeric_limits<float>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (getStatus(i) == CUDNN_STATUS_SUCCESS && getTime(i) < temp_runtime && getMemory(i) <= memLim) {
-+ temp_runtime = getTime(i);
-+ algoIdx = i;
-+ }
- }
-- }
-- } else { // prefer smallest workspace size
-- size_t temp_memsize{std::numeric_limits<size_t>::max()};
-- for (int i = 0; i < algoCount; ++i) {
-- if (perfResults.fwd[i].status == CUDNN_STATUS_SUCCESS && perfResults.fwd[i].memory < temp_memsize) {
-- temp_memsize = perfResults.fwd[i].memory;
-- algoIdx = i;
-+ } else { // prefer smallest workspace size
-+ size_t temp_memsize{std::numeric_limits<size_t>::max()};
-+ for (int i = 0; i < algoCount; ++i) {
-+ if (getStatus(i) == CUDNN_STATUS_SUCCESS && getMemory(i) < temp_memsize) {
-+ temp_memsize = getMemory(i);
-+ algoIdx = i;
-+ }
- }
- }
-- }
-- return algoIdx;
-+ return algoIdx;
-+ };
-+ private:
-+ LocalPerf();
-+ // these three type are absolutely equivalent
-+ // and one can access them as they wish to get info
-+ cudnnConvolutionFwdAlgoPerf_t * m_fwd;
-+ cudnnConvolutionBwdFilterAlgoPerf_t * m_bwdFilter;
-+ cudnnConvolutionBwdDataAlgoPerf_t * m_bwdData;
- };
- #else
- // More detailed alternative: cudnnFindConvolutionForwardAlgorithm (only option in newer cuDNN versions)
-@@ -502,8 +511,8 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- LocalPerf_t fwdPerfResults{convFwdPerfResults};
-- convWorkspace->AlgorithmForward = convFwdPerfResults[choose_algo(algoChoice, algoCount, fwdPerfResults, memLimit)].algo;
-+ LocalPerf fwdPerfResults{convFwdPerfResults};
-+ convWorkspace->AlgorithmForward = convFwdPerfResults[fwdPerfResults.getIdx(algoChoice, algoCount, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionForwardAlgorithm(
- cudnnHandle, inputTensorDescriptor, convDescriptors->WeightsDescriptor, convDescriptors->LayerDescriptor,
-@@ -594,8 +603,8 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- LocalPerf_t bwdDataPerfResults{convBwdDataPerfResults};
-- convWorkspace->AlgorithmBackward = convBwdDataPerfResults[choose_algo(algoChoice, algoCount, bwdDataPerfResults, memLimit)].algo;
-+ LocalPerf bwdDataPerfResults{convBwdDataPerfResults};
-+ convWorkspace->AlgorithmBackward = convBwdDataPerfResults[bwdDataPerfResults.getIdx(algoChoice, algoCount, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionBackwardDataAlgorithm(cudnnHandle,
- convDescriptors->WeightsDescriptor,
-@@ -669,8 +678,8 @@ void TCudnn<AFloat>::InitializeConvWorkspace(TWorkspace * & workspace,
- // &convWorkspace,
- // memLimit)); // use memLimit for workspace size
- // instead choose either fastest or lowest memory algo as per preference
-- LocalPerf_t bwdFilterPerfResults{convBwdFilterPerfResults};
-- convWorkspace->HelperAlgorithm = convBwdFilterPerfResults[choose_algo(algoChoice, algoCount, bwdFilterPerfResults, memLimit)].algo;
-+ LocalPerf bwdFilterPerfResults{convBwdFilterPerfResults};
-+ convWorkspace->HelperAlgorithm = convBwdFilterPerfResults[bwdFilterPerfResults.getIdx(algoChoice, algoCount, memLimit)].algo;
- #else
- CUDNNCHECK(cudnnGetConvolutionBackwardFilterAlgorithm(cudnnHandle,
- activationBackwardDescriptor,
Deleted: fix_cuda_cxx17.patch
===================================================================
--- fix_cuda_cxx17.patch 2020-09-01 08:14:08 UTC (rev 695266)
+++ fix_cuda_cxx17.patch 2020-09-01 08:53:50 UTC (rev 695267)
@@ -1,417 +0,0 @@
-From 62fff7d03d8785a69f56115b27081fe1081edc9b Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Fri, 24 Jul 2020 18:23:49 +0300
-Subject: [PATCH 1/6] fix regression in f8edeb9 not using correct string_view
- when CUDA C++ standard allows it
-
----
- cmake/modules/RootConfiguration.cmake | 3 +++
- config/RConfigure.in | 1 +
- tmva/tmva/inc/TMVA/DNN/Architectures/Cuda/CudaMatrix.h | 3 +++
- tmva/tmva/src/DNN/Architectures/Cuda.cu | 3 +++
- tmva/tmva/src/DNN/Architectures/Cudnn.cu | 5 ++++-
- 5 files changed, 14 insertions(+), 1 deletion(-)
-
-diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake
-index 1fe84d1515a..a19eafabb71 100644
---- a/cmake/modules/RootConfiguration.cmake
-+++ b/cmake/modules/RootConfiguration.cmake
-@@ -531,6 +531,9 @@ endif()
- if(found_stdstringview)
- CHECK_CXX_SOURCE_COMPILES("#include <string_view>
- int main() { size_t pos; std::string_view str; std::stod(str,&pos); return 0;}" found_stod_stringview)
-+ if(CMAKE_CUDA_STANDARD GREATER_EQUAL CMAKE_CXX_STANDARD)
-+ set(cudahasstdstringview define)
-+ endif()
- elseif(found_stdexpstringview)
- CHECK_CXX_SOURCE_COMPILES("#include <experimental/string_view>
- int main() { size_t pos; std::experimental::string_view str; std::stod(str,&pos); return 0;}" found_stod_stringview)
-diff --git a/config/RConfigure.in b/config/RConfigure.in
-index 14921f244b0..43daff4cd66 100644
---- a/config/RConfigure.in
-+++ b/config/RConfigure.in
-@@ -33,6 +33,7 @@
- #@usecxxmodules@ R__USE_CXXMODULES /**/
- #@uselibc++@ R__USE_LIBCXX /**/
- #@hasstdstringview@ R__HAS_STD_STRING_VIEW /**/
-+#@cudahasstdstringview@ R__CUDA_HAS_STD_STRING_VIEW /**/
- #@hasstdexpstringview@ R__HAS_STD_EXPERIMENTAL_STRING_VIEW /**/
- #@hasstodstringview@ R__HAS_STOD_STRING_VIEW /**/
- #@hasopplusequalstringview@ R__HAS_OP_EQUAL_PLUS_STRING_VIEW /**/
-diff --git a/tmva/tmva/inc/TMVA/DNN/Architectures/Cuda/CudaMatrix.h b/tmva/tmva/inc/TMVA/DNN/Architectures/Cuda/CudaMatrix.h
-index 3c224f185f1..72581eaedcd 100644
---- a/tmva/tmva/inc/TMVA/DNN/Architectures/Cuda/CudaMatrix.h
-+++ b/tmva/tmva/inc/TMVA/DNN/Architectures/Cuda/CudaMatrix.h
-@@ -20,11 +20,14 @@
- #define TMVA_DNN_ARCHITECTURES_CUDA_CUDAMATRIX
-
- // in case we compile C++ code with std-17 and cuda with lower standard
-+// use experimental string_view, otherwise keep as is
- #include "RConfigure.h"
- #ifdef R__HAS_STD_STRING_VIEW
-+#ifndef R__CUDA_HAS_STD_STRING_VIEW
- #undef R__HAS_STD_STRING_VIEW
- #define R__HAS_STD_EXPERIMENTAL_STRING_VIEW
- #endif
-+#endif
-
- #include "cuda.h"
- #include "cuda_runtime.h"
-diff --git a/tmva/tmva/src/DNN/Architectures/Cuda.cu b/tmva/tmva/src/DNN/Architectures/Cuda.cu
-index 56daac79850..547460017b2 100644
---- a/tmva/tmva/src/DNN/Architectures/Cuda.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cuda.cu
-@@ -15,11 +15,14 @@
- /////////////////////////////////////////////////////////////////
-
- // in case we compile C++ code with std-17 and cuda with lower standard
-+// use experimental string_view, otherwise keep as is
- #include "RConfigure.h"
- #ifdef R__HAS_STD_STRING_VIEW
-+#ifndef R__CUDA_HAS_STD_STRING_VIEW
- #undef R__HAS_STD_STRING_VIEW
- #define R__HAS_STD_EXPERIMENTAL_STRING_VIEW
- #endif
-+#endif
-
- #include "TMVA/DNN/Architectures/Cuda.h"
- #include "Cuda/Propagation.cu"
-diff --git a/tmva/tmva/src/DNN/Architectures/Cudnn.cu b/tmva/tmva/src/DNN/Architectures/Cudnn.cu
-index 4b32d4b2d8e..15e4277d6be 100644
---- a/tmva/tmva/src/DNN/Architectures/Cudnn.cu
-+++ b/tmva/tmva/src/DNN/Architectures/Cudnn.cu
-@@ -15,11 +15,14 @@
- ///////////////////////////////////////////////////////////////////
-
- // in case we compile C++ code with std-17 and cuda with lower standard
-+// use experimental string_view, otherwise keep as is
- #include "RConfigure.h"
- #ifdef R__HAS_STD_STRING_VIEW
-+#ifndef R__CUDA_HAS_STD_STRING_VIEW
- #undef R__HAS_STD_STRING_VIEW
- #define R__HAS_STD_EXPERIMENTAL_STRING_VIEW
- #endif
-+#endif
-
- #include "TMVA/DNN/Architectures/TCudnn.h"
- #include "Cudnn/Propagate.cu"
-@@ -54,4 +57,4 @@ template class TCudnn<Double_t>;
- // size_t TCudnn<Double_t>::CNNOptions::ConvMaxWorkspaceSize = 0;
-
- } // end namespace DNN
--} // end namespace TMVA
-\ No newline at end of file
-+} // end namespace TMVA
-
-From d30bf0190f668434f23875e201a80450b6d2dddb Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Fri, 24 Jul 2020 18:53:53 +0300
-Subject: [PATCH 2/6] set R__CUDA_HAS_STD_STRING_VIEW to undef in all other
- cases
-
----
- cmake/modules/RootConfiguration.cmake | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake
-index a19eafabb71..c9f7206d12c 100644
---- a/cmake/modules/RootConfiguration.cmake
-+++ b/cmake/modules/RootConfiguration.cmake
-@@ -528,6 +528,7 @@ else()
- endif()
- endif()
-
-+set(cudahasstdstringview undef)
- if(found_stdstringview)
- CHECK_CXX_SOURCE_COMPILES("#include <string_view>
- int main() { size_t pos; std::string_view str; std::stod(str,&pos); return 0;}" found_stod_stringview)
-
-From 5e473046f480af31e038b04a0a8101f3c86ab590 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Sat, 25 Jul 2020 01:48:18 +0300
-Subject: [PATCH 3/6] re-locate to correct place in configuration and check by
- compilation
-
----
- cmake/modules/RootConfiguration.cmake | 43 ++++++++++++++++++++++++---
- 1 file changed, 39 insertions(+), 4 deletions(-)
-
-diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake
-index c9f7206d12c..d306245199c 100644
---- a/cmake/modules/RootConfiguration.cmake
-+++ b/cmake/modules/RootConfiguration.cmake
-@@ -512,10 +512,49 @@ unset(found_stdexpstringview CACHE)
- unset(found_stod_stringview CACHE)
-
- set(hasstdexpstringview undef)
-+set(cudahasstdstringview undef)
- CHECK_CXX_SOURCE_COMPILES("#include <string_view>
- int main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}" found_stdstringview)
- if(found_stdstringview)
- set(hasstdstringview define)
-+ if(cuda)
-+ if(CMAKE_CUDA_STANDARD GREATER_EQUAL CMAKE_CXX_STANDARD)
-+ # CUDA_NVCC_EXECUTABLE
-+ if(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
-+ set(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}" CACHE FILEPATH "The CUDA compiler")
-+ else()
-+ find_program(CUDA_NVCC_EXECUTABLE
-+ NAMES nvcc nvcc.exe
-+ PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
-+ ENV CUDA_TOOKIT_ROOT
-+ ENV CUDA_PATH
-+ ENV CUDA_BIN_PATH
-+ PATH_SUFFIXES bin bin64
-+ DOC "The CUDA compiler"
-+ NO_DEFAULT_PATH
-+ )
-+ find_program(CUDA_NVCC_EXECUTABLE
-+ NAMES nvcc nvcc.exe
-+ PATHS /opt/cuda/bin
-+ PATH_SUFFIXES cuda/bin
-+ DOC "The CUDA compiler"
-+ )
-+ # Search default search paths, after we search our own set of paths.
-+ find_program(CUDA_NVCC_EXECUTABLE nvcc)
-+ endif()
-+ mark_as_advanced(CUDA_NVCC_EXECUTABLE)
-+ if(CUDA_NVCC_EXECUTABLE)
-+ execute_process(COMMAND "echo"
-+ "-e" "#include <string_view>\nint main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}"
-+ "|"
-+ "${CUDA_NVCC_EXECUTABLE}" "-std=c++${CMAKE_CUDA_STANDARD}" "-o" "/dev/null" "-x" "c++" "-"
-+ RESULT_VARIABLE nvcc_compiled_string_view)
-+ if (nvcc_compiled_string_view EQUAL "0")
-+ set(cudahasstdstringview define)
-+ endif()
-+ endif()
-+ endif()
-+ endif()
- else()
- set(hasstdstringview undef)
-
-@@ -528,13 +567,9 @@ else()
- endif()
- endif()
-
--set(cudahasstdstringview undef)
- if(found_stdstringview)
- CHECK_CXX_SOURCE_COMPILES("#include <string_view>
- int main() { size_t pos; std::string_view str; std::stod(str,&pos); return 0;}" found_stod_stringview)
-- if(CMAKE_CUDA_STANDARD GREATER_EQUAL CMAKE_CXX_STANDARD)
-- set(cudahasstdstringview define)
-- endif()
- elseif(found_stdexpstringview)
- CHECK_CXX_SOURCE_COMPILES("#include <experimental/string_view>
- int main() { size_t pos; std::experimental::string_view str; std::stod(str,&pos); return 0;}" found_stod_stringview)
-
-From 32e1f3c0cec557c4fa82a07992ae9c1cd4075fcf Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Sat, 25 Jul 2020 01:50:20 +0300
-Subject: [PATCH 4/6] no need to compare C++ standard between CUDA and host
- anymore
-
----
- cmake/modules/RootConfiguration.cmake | 66 +++++++++++++--------------
- 1 file changed, 32 insertions(+), 34 deletions(-)
-
-diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake
-index d306245199c..2eff954ef94 100644
---- a/cmake/modules/RootConfiguration.cmake
-+++ b/cmake/modules/RootConfiguration.cmake
-@@ -518,40 +518,38 @@ CHECK_CXX_SOURCE_COMPILES("#include <string_view>
- if(found_stdstringview)
- set(hasstdstringview define)
- if(cuda)
-- if(CMAKE_CUDA_STANDARD GREATER_EQUAL CMAKE_CXX_STANDARD)
-- # CUDA_NVCC_EXECUTABLE
-- if(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
-- set(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}" CACHE FILEPATH "The CUDA compiler")
-- else()
-- find_program(CUDA_NVCC_EXECUTABLE
-- NAMES nvcc nvcc.exe
-- PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
-- ENV CUDA_TOOKIT_ROOT
-- ENV CUDA_PATH
-- ENV CUDA_BIN_PATH
-- PATH_SUFFIXES bin bin64
-- DOC "The CUDA compiler"
-- NO_DEFAULT_PATH
-- )
-- find_program(CUDA_NVCC_EXECUTABLE
-- NAMES nvcc nvcc.exe
-- PATHS /opt/cuda/bin
-- PATH_SUFFIXES cuda/bin
-- DOC "The CUDA compiler"
-- )
-- # Search default search paths, after we search our own set of paths.
-- find_program(CUDA_NVCC_EXECUTABLE nvcc)
-- endif()
-- mark_as_advanced(CUDA_NVCC_EXECUTABLE)
-- if(CUDA_NVCC_EXECUTABLE)
-- execute_process(COMMAND "echo"
-- "-e" "#include <string_view>\nint main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}"
-- "|"
-- "${CUDA_NVCC_EXECUTABLE}" "-std=c++${CMAKE_CUDA_STANDARD}" "-o" "/dev/null" "-x" "c++" "-"
-- RESULT_VARIABLE nvcc_compiled_string_view)
-- if (nvcc_compiled_string_view EQUAL "0")
-- set(cudahasstdstringview define)
-- endif()
-+ # CUDA_NVCC_EXECUTABLE
-+ if(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
-+ set(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}" CACHE FILEPATH "The CUDA compiler")
-+ else()
-+ find_program(CUDA_NVCC_EXECUTABLE
-+ NAMES nvcc nvcc.exe
-+ PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
-+ ENV CUDA_TOOKIT_ROOT
-+ ENV CUDA_PATH
-+ ENV CUDA_BIN_PATH
-+ PATH_SUFFIXES bin bin64
-+ DOC "The CUDA compiler"
-+ NO_DEFAULT_PATH
-+ )
-+ find_program(CUDA_NVCC_EXECUTABLE
-+ NAMES nvcc nvcc.exe
-+ PATHS /opt/cuda/bin
-+ PATH_SUFFIXES cuda/bin
-+ DOC "The CUDA compiler"
-+ )
-+ # Search default search paths, after we search our own set of paths.
-+ find_program(CUDA_NVCC_EXECUTABLE nvcc)
-+ endif()
-+ mark_as_advanced(CUDA_NVCC_EXECUTABLE)
-+ if(CUDA_NVCC_EXECUTABLE)
-+ execute_process(COMMAND "echo"
-+ "-e" "#include <string_view>\nint main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}"
-+ "|"
-+ "${CUDA_NVCC_EXECUTABLE}" "-std=c++${CMAKE_CUDA_STANDARD}" "-o" "/dev/null" "-x" "c++" "-"
-+ RESULT_VARIABLE nvcc_compiled_string_view)
-+ if (nvcc_compiled_string_view EQUAL "0")
-+ set(cudahasstdstringview define)
- endif()
- endif()
- endif()
-
-From fccf1ba488fb7da334ef89e9df098bbdeee83ca5 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Sat, 25 Jul 2020 02:39:22 +0300
-Subject: [PATCH 5/6] find NVCC early; fix CMake execute process
-
----
- cmake/modules/RootConfiguration.cmake | 32 +++------------------
- cmake/modules/SearchInstalledSoftware.cmake | 24 ++++++++++++++++
- 2 files changed, 28 insertions(+), 28 deletions(-)
-
-diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake
-index 2eff954ef94..11d1e6b76c0 100644
---- a/cmake/modules/RootConfiguration.cmake
-+++ b/cmake/modules/RootConfiguration.cmake
-@@ -518,35 +518,11 @@ CHECK_CXX_SOURCE_COMPILES("#include <string_view>
- if(found_stdstringview)
- set(hasstdstringview define)
- if(cuda)
-- # CUDA_NVCC_EXECUTABLE
-- if(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
-- set(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}" CACHE FILEPATH "The CUDA compiler")
-- else()
-- find_program(CUDA_NVCC_EXECUTABLE
-- NAMES nvcc nvcc.exe
-- PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
-- ENV CUDA_TOOKIT_ROOT
-- ENV CUDA_PATH
-- ENV CUDA_BIN_PATH
-- PATH_SUFFIXES bin bin64
-- DOC "The CUDA compiler"
-- NO_DEFAULT_PATH
-- )
-- find_program(CUDA_NVCC_EXECUTABLE
-- NAMES nvcc nvcc.exe
-- PATHS /opt/cuda/bin
-- PATH_SUFFIXES cuda/bin
-- DOC "The CUDA compiler"
-- )
-- # Search default search paths, after we search our own set of paths.
-- find_program(CUDA_NVCC_EXECUTABLE nvcc)
-- endif()
-- mark_as_advanced(CUDA_NVCC_EXECUTABLE)
- if(CUDA_NVCC_EXECUTABLE)
-- execute_process(COMMAND "echo"
-- "-e" "#include <string_view>\nint main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}"
-- "|"
-- "${CUDA_NVCC_EXECUTABLE}" "-std=c++${CMAKE_CUDA_STANDARD}" "-o" "/dev/null" "-x" "c++" "-"
-+ execute_process(
-+ COMMAND "echo"
-+ "-e" "#include <string_view>\nint main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}"
-+ COMMAND "${CUDA_NVCC_EXECUTABLE}" "-std=c++${CMAKE_CUDA_STANDARD}" "-o" "/dev/null" "-x" "c++" "-"
- RESULT_VARIABLE nvcc_compiled_string_view)
- if (nvcc_compiled_string_view EQUAL "0")
- set(cudahasstdstringview define)
-diff --git a/cmake/modules/SearchInstalledSoftware.cmake b/cmake/modules/SearchInstalledSoftware.cmake
-index 12f9d65371b..297578d3437 100644
---- a/cmake/modules/SearchInstalledSoftware.cmake
-+++ b/cmake/modules/SearchInstalledSoftware.cmake
-@@ -1462,6 +1462,30 @@ if(cuda OR tmva-gpu)
- endif()
- enable_language(CUDA)
- set(cuda ON CACHE BOOL "Found Cuda for TMVA GPU" FORCE)
-+ # CUDA_NVCC_EXECUTABLE
-+ if(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
-+ set(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}" CACHE FILEPATH "The CUDA compiler")
-+ else()
-+ find_program(CUDA_NVCC_EXECUTABLE
-+ NAMES nvcc nvcc.exe
-+ PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
-+ ENV CUDA_TOOKIT_ROOT
-+ ENV CUDA_PATH
-+ ENV CUDA_BIN_PATH
-+ PATH_SUFFIXES bin bin64
-+ DOC "The CUDA compiler"
-+ NO_DEFAULT_PATH
-+ )
-+ find_program(CUDA_NVCC_EXECUTABLE
-+ NAMES nvcc nvcc.exe
-+ PATHS /opt/cuda/bin
-+ PATH_SUFFIXES cuda/bin
-+ DOC "The CUDA compiler"
-+ )
-+ # Search default search paths, after we search our own set of paths.
-+ find_program(CUDA_NVCC_EXECUTABLE nvcc)
-+ endif()
-+ mark_as_advanced(CUDA_NVCC_EXECUTABLE)
- ###
- ### look for package CuDNN
- if (cudnn)
-
-From bd7728fe40fa98640354baeab6504b4b499327c0 Mon Sep 17 00:00:00 2001
-From: Konstantin Gizdov <kgizdov at gmail.com>
-Date: Sat, 25 Jul 2020 02:44:25 +0300
-Subject: [PATCH 6/6] null sinks differ on different platforms
-
----
- cmake/modules/RootConfiguration.cmake | 8 +++++++-
- 1 file changed, 7 insertions(+), 1 deletion(-)
-
-diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake
-index 11d1e6b76c0..5a3ff396780 100644
---- a/cmake/modules/RootConfiguration.cmake
-+++ b/cmake/modules/RootConfiguration.cmake
-@@ -519,11 +519,17 @@ if(found_stdstringview)
- set(hasstdstringview define)
- if(cuda)
- if(CUDA_NVCC_EXECUTABLE)
-+ if (WIN32)
-+ set(PLATFORM_NULL_FILE "nul")
-+ else()
-+ set(PLATFORM_NULL_FILE "/dev/null")
-+ endif()
- execute_process(
- COMMAND "echo"
- "-e" "#include <string_view>\nint main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}"
-- COMMAND "${CUDA_NVCC_EXECUTABLE}" "-std=c++${CMAKE_CUDA_STANDARD}" "-o" "/dev/null" "-x" "c++" "-"
-+ COMMAND "${CUDA_NVCC_EXECUTABLE}" "-std=c++${CMAKE_CUDA_STANDARD}" "-o" "${PLATFORM_NULL_FILE}" "-x" "c++" "-"
- RESULT_VARIABLE nvcc_compiled_string_view)
-+ unset(PLATFORM_NULL_FILE CACHE)
- if (nvcc_compiled_string_view EQUAL "0")
- set(cudahasstdstringview define)
- endif()
More information about the arch-commits
mailing list