24template <
typename AFloat>
25template <
typename RNNLayer>
26void TCudnn<AFloat>::InitializeRecurrentTensors(RNNLayer *layer)
29 size_t timeSteps = (layer->DoesReturnSequence()) ? layer->GetTimeSteps() : 1;
31 Tensor_t(layer->GetOutput().GetDeviceBuffer(),
32 {layer->GetBatchSize(), timeSteps, layer->GetStateSize()}, GetTensorLayout());
33 layer->GetActivationGradients() =
34 Tensor_t(layer->GetActivationGradients().GetDeviceBuffer(), {layer->GetBatchSize(), timeSteps, layer->GetStateSize()},
38 for (
size_t i = 0; i < layer->GetWeights().
size(); ++i) {
39 auto &
w = layer->GetWeightsAt(i);
41 w = Tensor_t(layer->GetWeightsAt(i).GetDeviceBuffer(), {layer->GetWeightsAt(i).GetNrows(), layer->GetWeightsAt(i).GetNcols()},
45 for (
size_t i = 0; i < layer->GetBiases().
size(); ++i) {
48 auto &
b = layer->GetBiasesAt(i);
49 b = Tensor_t(layer->GetBiasesAt(i).GetDeviceBuffer(), {layer->GetStateSize(), 1}, GetTensorLayout(), 0, 0);
60 layer->GetX() = Tensor_t({layer->GetTimeSteps(), layer->GetBatchSize(), layer->GetInputSize() }, GetTensorLayout());
61 layer->GetY() = Tensor_t({layer->GetTimeSteps(), layer->GetBatchSize(), layer->GetStateSize() }, GetTensorLayout());
63 layer->GetDX() = Tensor_t({layer->GetTimeSteps(), layer->GetBatchSize(), layer->GetInputSize() }, GetTensorLayout());
64 layer->GetDY() = Tensor_t({layer->GetTimeSteps(), layer->GetBatchSize(), layer->GetStateSize() }, GetTensorLayout());
67template <
typename AFloat>
68template <
typename RNNLayer>
69void TCudnn<AFloat>::InitializeRecurrentDescriptors(TDescriptors *&descriptors, RNNLayer *layer)
72 auto rnnDescriptors =
new RNNDescriptors_t ();
73 CUDNNCHECK(cudnnCreateRNNDescriptor(&rnnDescriptors->LayerDescriptor));
75 CUDNNCHECK(cudnnCreateDropoutDescriptor(&rnnDescriptors->HelperDescriptor));
77 enum RNNType {kRNN, kLSTM, kGRU};
78 RNNType rnn_type = kRNN;
79 if ( std::is_same<RNNLayer, LSTMLayer_t>::value ) rnn_type = kLSTM;
80 if ( std::is_same<RNNLayer, GRULayer_t>::value ) rnn_type = kGRU;
82 cudnnHandle_t handle = layer->GetOutput().GetCudnnHandle();
83 float dropoutProb = 0.0;
85 void *dropoutStates =
nullptr;
86 size_t dropoutStateSize = 0;
89 CUDNNCHECK(cudnnDropoutGetStatesSize(handle, &dropoutStateSize));
93 unsigned long long seed = GetRandomGenerator().GetSeed();
95 CUDNNCHECK(cudnnSetDropoutDescriptor(rnnDescriptors->HelperDescriptor, handle, dropoutProb, dropoutStates,
96 dropoutStateSize, seed));
104 int hiddenSize = layer->GetStateSize();
107 cudnnRNNInputMode_t inputMode = CUDNN_LINEAR_INPUT;
109 cudnnDirectionMode_t direction = CUDNN_UNIDIRECTIONAL;
110 bool bidirectional = (direction == CUDNN_BIDIRECTIONAL);
112 cudnnRNNMode_t
mode = CUDNN_RNN_TANH;
113 if (rnn_type == kLSTM)
mode = CUDNN_LSTM;
114 if (rnn_type == kGRU)
mode = CUDNN_GRU;
116 cudnnRNNAlgo_t algo = CUDNN_RNN_ALGO_STANDARD;
119 int numLinearLayers = 0;
120 if (
mode == CUDNN_RNN_RELU ||
mode == CUDNN_RNN_TANH) {
123 if (
mode == CUDNN_GRU ) {
126 if (
mode == CUDNN_LSTM) {
130 assert(numLinearLayers == layer->GetWeights().size());
132 cudnnDataType_t mathPrec = CUDNN_DATA_FLOAT;
133 if (std::is_same<AFloat, double>::value) { mathPrec = CUDNN_DATA_DOUBLE;}
135#if (CUDNN_VERSION >= 8000)
136 CUDNNCHECK(cudnnSetRNNDescriptor_v6(handle, rnnDescriptors->LayerDescriptor, hiddenSize, numLayers, rnnDescriptors->HelperDescriptor,
138 CUDNNCHECK(cudnnSetRNNDescriptor(handle, rnnDescriptors->LayerDescriptor, hiddenSize, numLayers, rnnDescriptors->HelperDescriptor,
140 inputMode, direction,
mode, algo, mathPrec) );
144 cudnnRNNBiasMode_t biasMode = CUDNN_RNN_NO_BIAS;
145 if (layer->GetBiases().size() > 0)
146 biasMode = CUDNN_RNN_SINGLE_INP_BIAS;
149 CUDNNCHECK(cudnnSetRNNBiasMode(rnnDescriptors->LayerDescriptor, biasMode));
155 int seqLength = layer->GetTimeSteps();
157 rnnDescriptors->xDesc.resize(seqLength);
158 rnnDescriptors->yDesc.resize(seqLength);
159 rnnDescriptors->dxDesc.resize(seqLength);
160 rnnDescriptors->dyDesc.resize(seqLength);
161 TensorDescriptor_t *xDesc = rnnDescriptors->xDesc.data();
162 TensorDescriptor_t *yDesc = rnnDescriptors->yDesc.data();
163 TensorDescriptor_t *dxDesc = rnnDescriptors->dxDesc.data();
164 TensorDescriptor_t *dyDesc = rnnDescriptors->dyDesc.data();
166 for (
int i = 0; i < seqLength; i++) {
167 CUDNNCHECK(cudnnCreateTensorDescriptor(&xDesc[i]));
168 CUDNNCHECK(cudnnCreateTensorDescriptor(&yDesc[i]));
169 CUDNNCHECK(cudnnCreateTensorDescriptor(&dxDesc[i]));
170 CUDNNCHECK(cudnnCreateTensorDescriptor(&dyDesc[i]));
172 dimA[0] = layer->GetBatchSize();
173 dimA[1] = layer->GetInputSize();
176 strideA[0] = dimA[2] * dimA[1];
177 strideA[1] = dimA[2];
180 CUDNNCHECK(cudnnSetTensorNdDescriptor(xDesc[i], mathPrec, 3, dimA, strideA));
181 CUDNNCHECK(cudnnSetTensorNdDescriptor(dxDesc[i], mathPrec, 3, dimA, strideA));
183 dimA[0] = layer->GetBatchSize();
184 dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize;
187 strideA[0] = dimA[2] * dimA[1];
188 strideA[1] = dimA[2];
191 CUDNNCHECK(cudnnSetTensorNdDescriptor(yDesc[i], mathPrec, 3, dimA, strideA));
192 CUDNNCHECK(cudnnSetTensorNdDescriptor(dyDesc[i], mathPrec, 3, dimA, strideA));
196 CUDNNCHECK(cudnnCreateFilterDescriptor(&rnnDescriptors->WeightsDescriptor));
197 CUDNNCHECK(cudnnCreateFilterDescriptor(&rnnDescriptors->WeightsGradDescriptor));
200 size_t weightsSize = 0;
201 CUDNNCHECK(cudnnGetRNNParamsSize(handle, rnnDescriptors->LayerDescriptor, xDesc[0], &weightsSize, mathPrec));
204 dimW[0] = (mathPrec == CUDNN_DATA_DOUBLE) ? weightsSize /
sizeof(
double) : weightsSize /
sizeof(float);
208 CUDNNCHECK(cudnnSetFilterNdDescriptor(rnnDescriptors->WeightsDescriptor, mathPrec, CUDNN_TENSOR_NCHW, 3, dimW));
209 CUDNNCHECK(cudnnSetFilterNdDescriptor(rnnDescriptors->WeightsGradDescriptor, mathPrec, CUDNN_TENSOR_NCHW, 3, dimW));
212 auto &weightTensor = layer->GetWeightsTensor();
213 auto &weightGradTensor = layer->GetWeightGradientsTensor();
215 weightTensor = Tensor_t( { (size_t) dimW[0], 1, 1}, GetTensorLayout(), 0, 0);
216 weightGradTensor = Tensor_t({(size_t) dimW[0], 1, 1}, GetTensorLayout(), 0, 0);
221 int nL = (!bidirectional) ? numLayers : 2 * numLayers;
222 for (
int ilayer = 0; ilayer < nL; ilayer++) {
223 for (
int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) {
224 cudnnFilterDescriptor_t linLayerMatDesc;
225 CUDNNCHECK(cudnnCreateFilterDescriptor(&linLayerMatDesc));
228 CUDNNCHECK(cudnnGetRNNLinLayerMatrixParams(handle, rnnDescriptors->LayerDescriptor, ilayer, rnnDescriptors->xDesc.data()[0],
229 rnnDescriptors->WeightsDescriptor, weightTensor.GetDataPointer(),
230 linLayerID, linLayerMatDesc, (
void **)&linLayerMat));
232 cudnnDataType_t dataType;
233 cudnnTensorFormat_t
format;
236 CUDNNCHECK(cudnnGetFilterNdDescriptor(linLayerMatDesc, 3, &dataType, &
format, &nbDims, filterDimA));
252 int wsize = layer->GetWeightsAt(linLayerID).GetSize();
259 assert(wsize == filterDimA[1] * filterDimA[2]);
260 cudaMemcpyAsync(linLayerMat, layer->GetWeightsAt(linLayerID).GetDataPointer(), wsize *
sizeof(AFloat),
261 cudaMemcpyDeviceToDevice, layer->GetWeightsAt(linLayerID).GetComputeStream());
264 CUDNNCHECK(cudnnDestroyFilterDescriptor(linLayerMatDesc));
266 cudnnFilterDescriptor_t linLayerBiasDesc;
267 CUDNNCHECK(cudnnCreateFilterDescriptor(&linLayerBiasDesc));
268 AFloat *linLayerBias;
270 CUDNNCHECK(cudnnGetRNNLinLayerBiasParams(handle, rnnDescriptors->LayerDescriptor, ilayer,
271 rnnDescriptors->xDesc.data()[0], rnnDescriptors->WeightsDescriptor,
272 weightTensor.GetDataPointer(), linLayerID, linLayerBiasDesc,
273 (
void **)&linLayerBias));
275 CUDNNCHECK(cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, &dataType, &
format, &nbDims, filterDimA));
280 int biasID = linLayerID;
281 if (biasMode == CUDNN_RNN_SINGLE_REC_BIAS) {
284 biasID = linLayerID - 1;
285 if (
mode == CUDNN_LSTM) biasID = linLayerID - 4;
286 if (
mode == CUDNN_GRU) biasID = linLayerID - 3;
289 if (filterDimA[0] > 0) {
295 int wsize = layer->GetBiasesAt(biasID).GetSize();
305 assert(wsize == filterDimA[1]);
306 cudaMemcpyAsync(linLayerBias, layer->GetBiasesAt(biasID).GetDataPointer(), wsize *
sizeof(AFloat),
307 cudaMemcpyDeviceToDevice, layer->GetBiasesAt(biasID).GetComputeStream());
312 CUDNNCHECK(cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, &dataType, &
format, &nbDims, filterDimA));
316 CUDNNCHECK(cudnnDestroyFilterDescriptor(linLayerBiasDesc));
327 for (
size_t i = 0; i < layer->GetWeights().
size(); ++i) {
328 auto &
w = layer->GetWeightsAt(i);
329 auto & dw = layer->GetWeightGradientsAt(i);
330 assert(weightTensor(
offset, 0, 0) ==
w(0, 0));
333 w = Tensor_t(weightTensor.GetDeviceBuffer().GetSubBuffer(
offset,
w.GetSize()),
w.GetShape(),
334 GetTensorLayout(), 0, 0);
335 dw = Tensor_t(weightGradTensor.GetDeviceBuffer().GetSubBuffer(
offset,
w.GetSize()),
w.GetShape(), GetTensorLayout(), 0, 0);
340 for (
size_t i = 0; i < layer->GetBiases().
size(); ++i) {
341 auto &
b = layer->GetBiasesAt(i);
342 auto &db = layer->GetBiasGradientsAt(i);
343 assert(weightTensor(
offset, 0, 0) ==
b(0, 0));
346 b = Tensor_t(weightTensor.GetDeviceBuffer().GetSubBuffer(
offset,
b.GetSize()),
b.GetShape(), GetTensorLayout(), 0, 0);
347 db = Tensor_t(weightGradTensor.GetDeviceBuffer().GetSubBuffer(
offset,
b.GetSize()),
b.GetShape(), GetTensorLayout(), 0,
387 descriptors = rnnDescriptors;
391template<
typename AFloat>
392void TCudnn<AFloat>::ReleaseRNNDescriptors(TDescriptors * descriptors)
394 auto rnnDescriptors =
static_cast<RNNDescriptors_t *
>(descriptors);
395 CUDNNCHECK(cudnnDestroyRNNDescriptor(rnnDescriptors->LayerDescriptor));
397 ReleaseDescriptor(rnnDescriptors->HelperDescriptor);
398 ReleaseDescriptor(rnnDescriptors->WeightsDescriptor);
399 ReleaseDescriptor(rnnDescriptors->WeightsGradDescriptor);
402 for (
size_t i = 0; i < rnnDescriptors->xDesc.size(); i++) {
403 cudnnDestroyTensorDescriptor(rnnDescriptors->xDesc.data()[i]);
404 cudnnDestroyTensorDescriptor(rnnDescriptors->yDesc.data()[i]);
406 cudnnDestroyTensorDescriptor(rnnDescriptors->dxDesc.data()[i]);
407 cudnnDestroyTensorDescriptor(rnnDescriptors->dyDesc.data()[i]);
414template <
typename AFloat>
415template <
typename RNNLayer>
416void TCudnn<AFloat>::InitializeRecurrentWorkspace(TWorkspace *&workspace, TDescriptors *&descriptors, RNNLayer *layer)
418 auto rnnWorkspace =
new RNNWorkspace_t ();
419 auto rnnDescriptors =
static_cast<RNNDescriptors_t *
>(descriptors);
421 cudnnHandle_t handle = layer->GetOutput().GetCudnnHandle();
423 bool bidirectional =
false;
425 size_t numLayers = 1;
426 if (bidirectional) numLayers *= 2;
429 Tensor_t &stateTensor = layer->GetState();
430 stateTensor = Tensor_t(stateTensor.GetDeviceBuffer(), { numLayers, layer->GetBatchSize(), layer->GetStateSize()},
431 GetTensorLayout(), 0, 0 );
433 if (layer->GetCell().GetSize() > 0) {
434 Tensor_t & cellStateTensor = layer->GetCell();
435 cellStateTensor = Tensor_t(cellStateTensor.GetDeviceBuffer(), {numLayers, layer->GetBatchSize(), layer->GetStateSize()}, GetTensorLayout(), 0, 0 );
442 CUDNNCHECK(cudnnGetRNNWorkspaceSize(handle, rnnDescriptors->LayerDescriptor, layer->GetTimeSteps(),
443 rnnDescriptors->xDesc.data(), &rnnWorkspace->ForwardWorkspaceSize));
445 if (rnnWorkspace->ForwardWorkspaceSize) cudaMalloc(&rnnWorkspace->ForwardWorkspace, rnnWorkspace->ForwardWorkspaceSize*
sizeof(AFloat));
446 if (rnnWorkspace->ForwardWorkspaceSize > 0 && rnnWorkspace->ForwardWorkspace ==
nullptr ) {
447 std::cerr <<
"Error allocating RNN workspace of size " << rnnWorkspace->ForwardWorkspaceSize <<
" - probably running out of memory on the GPU"
449 std::cout <<
" layer input shape is { " << layer->GetBatchSize() <<
" , " << layer->GetTimeSteps() <<
" , "
450 <<layer->GetStateSize() <<
" } " << std::endl;
455 CUDNNCHECK(cudnnGetRNNTrainingReserveSize(handle, rnnDescriptors->LayerDescriptor, layer->GetTimeSteps(),
456 rnnDescriptors->xDesc.data(), &rnnWorkspace->HelperWorkspaceSize));
458 if (rnnWorkspace->HelperWorkspaceSize) cudaMalloc(&rnnWorkspace->HelperWorkspace, rnnWorkspace->HelperWorkspaceSize*
sizeof(AFloat));
459 if (rnnWorkspace->HelperWorkspaceSize > 0 && rnnWorkspace->HelperWorkspace ==
nullptr ) {
460 std::cerr <<
"Error allocating RNN reserved workspace of size " << rnnWorkspace->HelperWorkspaceSize <<
" - probably running out of memory on the GPU"
462 std::cout <<
" layer input shape is { " << layer->GetBatchSize() <<
" , " << layer->GetTimeSteps() <<
" , "
463 <<layer->GetStateSize() <<
" } " << std::endl;
467 workspace = rnnWorkspace;
471template <
typename AFloat>
472void TCudnn<AFloat>::FreeRNNWorkspace(TWorkspace * workspace) {
473 if (!workspace)
return;
474 auto rnnWorkspace =
static_cast<RNNWorkspace_t *
>(workspace);
476 if(rnnWorkspace->ForwardWorkspace) cudaFree(rnnWorkspace->ForwardWorkspace);
477 if(rnnWorkspace->HelperWorkspace) cudaFree(rnnWorkspace->HelperWorkspace);
483template <
typename AFloat>
484void TCudnn<AFloat>::RNNForward(
const Tensor_t &
x,
const Tensor_t &hx,
const Tensor_t &cx,
const Tensor_t & weights, Tensor_t &
y,
485 Tensor_t &hy, Tensor_t &cy,
const RNNDescriptors_t & desc, RNNWorkspace_t &workspace,
bool isTraining)
489 bool rememberState =
false;
490 cudnnHandle_t cudnnHandle =
x.GetCudnnHandle();
492 int seqLength =
x.GetShape()[0];
493 cudnnRNNDescriptor_t rnnDesc = desc.LayerDescriptor;
496 bool isLSTM = (cx.GetSize() > 0) && rememberState;
500 cudnnStatus_t status = cudnnRNNForwardTraining(
501 cudnnHandle, rnnDesc, seqLength, desc.xDesc.data(),
x.GetDataPointer(), hx.GetTensorDescriptor(), (rememberState) ?
502 hx.GetDataPointer() : nullptr, (isLSTM) ? cx.GetTensorDescriptor() : hx.GetTensorDescriptor(), (isLSTM) ? cx.GetDataPointer() : nullptr, desc.WeightsDescriptor,
503 weights.GetDataPointer(), desc.yDesc.
data(),
y.GetDataPointer(), hy.GetTensorDescriptor(), hy.GetDataPointer(),
504 (isLSTM) ? cy.GetTensorDescriptor() : hy.GetTensorDescriptor(), (isLSTM) ? cy.GetDataPointer() : nullptr, workspace.ForwardWorkspace, workspace.ForwardWorkspaceSize,
505 workspace.HelperWorkspace, workspace.HelperWorkspaceSize);
507 assert(status == CUDNN_STATUS_SUCCESS);
513 cudnnStatus_t status = cudnnRNNForwardInference(
514 cudnnHandle, rnnDesc, seqLength, desc.xDesc.data(),
x.GetDataPointer(), hx.GetTensorDescriptor(),
515 (rememberState) ? hx.GetDataPointer() : nullptr,
516 (isLSTM) ? cx.GetTensorDescriptor() : hx.GetTensorDescriptor(), (isLSTM) ? cx.GetDataPointer() : nullptr,
517 desc.WeightsDescriptor, weights.GetDataPointer(), desc.yDesc.
data(),
y.GetDataPointer(),
518 hy.GetTensorDescriptor(), hy.GetDataPointer(), (isLSTM) ? cy.GetTensorDescriptor() : hy.GetTensorDescriptor(),
519 (isLSTM) ? cy.GetDataPointer() : nullptr, workspace.ForwardWorkspace, workspace.ForwardWorkspaceSize);
521 assert(status == CUDNN_STATUS_SUCCESS);
527template <
typename AFloat>
528void TCudnn<AFloat>::RNNBackward(
const Tensor_t &
x,
const Tensor_t &hx,
const Tensor_t &cx,
const Tensor_t &
y,
529 const Tensor_t &dy,
const Tensor_t &dhy,
const Tensor_t &dcy,
const Tensor_t &weights,
530 Tensor_t &dx, Tensor_t &dhx, Tensor_t &dcx, Tensor_t &dw,
const RNNDescriptors_t &desc,
531 RNNWorkspace_t &workspace)
534 bool rememberState =
false;
535 bool rememberStateGrad =
false;
536 bool isLSTM = (cx.GetSize() > 0) && rememberState;
537 int seqLength =
x.GetShape()[0];
538 cudnnRNNDescriptor_t rnnDesc = desc.LayerDescriptor;
539 cudnnHandle_t cudnnHandle =
x.GetCudnnHandle();
545 cudnnStatus_t status = cudnnRNNBackwardData(
546 cudnnHandle, rnnDesc, seqLength, desc.yDesc.data(),
y.GetDataPointer(), desc.dyDesc.data(), dy.GetDataPointer(),
547 dhy.GetTensorDescriptor(), (rememberStateGrad) ? dhy.GetDataPointer() : nullptr,
548 (isLSTM) ? dcy.GetTensorDescriptor() : dhy.GetTensorDescriptor(), (isLSTM) ? dcy.GetDataPointer() : nullptr,
549 desc.WeightsDescriptor, weights.GetDataPointer(), hx.GetTensorDescriptor(),
550 (rememberState) ? hx.GetDataPointer() : nullptr, (isLSTM) ? cx.GetTensorDescriptor() : hx.GetTensorDescriptor(),
551 (isLSTM) ? cx.GetDataPointer() : nullptr,
552 desc.dxDesc.
data(), dx.GetDataPointer(), dhx.GetTensorDescriptor(),
553 (rememberState) ? dhx.GetDataPointer() : nullptr,
554 (isLSTM) ? dcx.GetTensorDescriptor() : dhx.GetTensorDescriptor(),
555 (isLSTM) ? dcx.GetDataPointer() : nullptr,
556 workspace.ForwardWorkspace, workspace.ForwardWorkspaceSize, workspace.HelperWorkspace,
557 workspace.HelperWorkspaceSize);
559 assert(status == CUDNN_STATUS_SUCCESS);
571 status = cudnnRNNBackwardWeights(cudnnHandle, rnnDesc, seqLength, desc.xDesc.data(),
x.GetDataPointer(),
572 hx.GetTensorDescriptor(), (rememberState) ? dhx.GetDataPointer() : nullptr,
573 desc.yDesc.
data(),
y.GetDataPointer(), workspace.ForwardWorkspace,
574 workspace.ForwardWorkspaceSize, desc.WeightsGradDescriptor, dw.GetDataPointer(),
575 workspace.HelperWorkspace, workspace.HelperWorkspaceSize);
577 assert(status == CUDNN_STATUS_SUCCESS);
584template<
typename AFloat>
585void TCudnn<AFloat>::Rearrange(Tensor_t &
y,
const Tensor_t &
x) {
589 cudnnHandle_t cudnnHandle =
x.GetCudnnHandle();
592 TensorDescriptor_t
d = tmp.GetTensorDescriptor();
596 cudnnDataType_t dataType;
597 cudnnGetTensorNdDescriptor(
d,tmp.GetNDim() , &dataType, &
n, dims, strides);
602 auto outputShape =
y.GetShape();
603 assert(xNdim ==
y.GetNDim());
605 assert(outputShape[0] = dims[1]);
606 assert(outputShape[1] == dims[0]);
607 assert(outputShape[2] == (
n ==4) ? dims[3] : dims[2]);
608 if (
n==4) assert(dims[2] == 1);
612 int xStrides[xNdim] = { (
int) outputShape[2], (
int)(outputShape[2] * outputShape[0]), 1 };
614 for (
int i = 0; i < xNdim; ++i)
615 xDims[i] = outputShape[i];
617 cudnnStatus_t status = cudnnSetTensorNdDescriptor(
d, dataType, xNdim, xDims, xStrides);
618 assert(status == CUDNN_STATUS_SUCCESS);
620 status = cudnnTransformTensor(cudnnHandle, &alpha,
d,
x.GetDataPointer() , &beta,
621 y.GetTensorDescriptor(),
y.GetDataPointer());
622 assert(status == CUDNN_STATUS_SUCCESS);
626 status = cudnnSetTensorNdDescriptor(
d, dataType,
n, dims, strides);
627 assert(status == CUDNN_STATUS_SUCCESS);
size_t size(const MatrixT &matrix)
retrieve the size of a square matrix
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void data
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h offset
Option_t Option_t TPoint TPoint const char mode
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t format
double beta(double x, double y)
Calculates the beta function.
create variable transformations