CUDNN Frontend API  8.2.0
cudnn_frontend_Tensor.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #pragma once
24 
25 #include <algorithm>
26 #include <array>
27 #include <functional>
28 #include <memory>
29 #include <sstream>
30 #include <utility>
31 
32 #include "cudnn_frontend_utils.h"
33 
34 namespace cudnn_frontend {
35 
51 class Tensor_v8 : public BackendDescriptor {
52  public:
53  friend class TensorBuilder_v8;
54  std::string
55  describe() const override {
56  std::stringstream ss;
57  char sep = ' ';
58  ss << "CUDNN_BACKEND_TENSOR_DESCRIPTOR :"
59  << " Datatype: " << std::to_string(data_type) << " Id: " << std::to_string(id)
60  << " Alignment: " << std::to_string(alignment) << " nDims " << nDims;
61  ss << " Dim [";
62  std::for_each(std::begin(btensor_dimA), std::end(btensor_dimA), [&ss, sep](int x) mutable {
63  ss << sep << x;
64  sep = ',';
65  });
66  ss << " ] Str [";
67  std::for_each(std::begin(btensor_strA), std::end(btensor_strA), [&ss, sep](int x) mutable {
68  ss << sep << x;
69  sep = ',';
70  });
71  ss << "]";
72  return ss.str();
73  }
74 
76  : BackendDescriptor(from.get_desc(), from.get_status(), from.get_error()),
77  data_type(from.data_type),
78  id(from.id),
79  alignment(from.alignment),
80  nDims(from.nDims),
81  isVirtual(from.isVirtual) {
82  std::copy(std::begin(from.btensor_dimA), std::end(from.btensor_dimA), btensor_dimA);
83  std::copy(std::begin(from.btensor_strA), std::end(from.btensor_strA), btensor_strA);
84  }
85 
86  ~Tensor_v8() = default;
87 
88  private:
89  Tensor_v8() = default;
90  Tensor_v8(Tensor_v8 const &) = delete;
91  Tensor_v8 &
92  operator=(Tensor_v8 const &) = delete;
93 
94  cudnnDataType_t data_type = CUDNN_DATA_FLOAT;
95  int64_t btensor_dimA[CUDNN_DIM_MAX + 1] = {-1};
96  int64_t btensor_strA[CUDNN_DIM_MAX + 1] = {-1};
97  int64_t id = -1;
98  int64_t alignment = -1;
99  int64_t nDims = -1;
101  bool isVirtual = false;
102 };
103 
108  public:
113  auto
115  setDataType(cudnnDataType_t data_type_) -> TensorBuilder_v8 & {
116  m_tensor.data_type = data_type_;
117  return *this;
118  }
120  auto
121  setDim(int64_t ndim, int64_t const *dim) -> TensorBuilder_v8 & {
122  std::copy((dim), dim + ndim, m_tensor.btensor_dimA);
123  m_tensor.nDims = ndim;
124  return *this;
125  }
127  auto
128  setStrides(int64_t ndim, int64_t const *strides) -> TensorBuilder_v8 & {
129  std::copy(strides, strides + ndim, m_tensor.btensor_strA);
130  return *this;
131  }
133  auto
134  setId(int64_t id_) -> TensorBuilder_v8 & {
135  m_tensor.id = id_;
136  return *this;
137  }
139  auto
140  setAlignment(int64_t alignment_) -> TensorBuilder_v8 & {
141  m_tensor.alignment = alignment_;
142  return *this;
143  }
145  auto
146  setVirtual(bool virtual_ = true) -> TensorBuilder_v8 & {
147  m_tensor.isVirtual = virtual_;
148  return *this;
149  }
152  Tensor_v8 &&
155  build() {
156  // Sanity check if non-default fields have been set correctly.
157  if (m_tensor.alignment <= 0) {
159  &m_tensor,
160  CUDNN_STATUS_BAD_PARAM,
161  "CUDNN_BACKEND_TENSOR_DESCRIPTOR: Check and Set the CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT field");
162  return std::move(m_tensor);
163  }
164  if (m_tensor.id <= 0) {
166  &m_tensor,
167  CUDNN_STATUS_BAD_PARAM,
168  "CUDNN_BACKEND_TENSOR_DESCRIPTOR: Check and Set the CUDNN_ATTR_TENSOR_UNIQUE_ID as a valid value");
169  return std::move(m_tensor);
170  }
171  if (m_tensor.btensor_strA[0] <= 0) {
173  &m_tensor,
174  CUDNN_STATUS_BAD_PARAM,
175  "CUDNN_BACKEND_TENSOR_DESCRIPTOR: Check and Set the CUDNN_ATTR_TENSOR_STRIDES Correctly");
176  return std::move(m_tensor);
177  }
178  if (m_tensor.btensor_dimA[0] <= 0) {
180  &m_tensor,
181  CUDNN_STATUS_BAD_PARAM,
182  "CUDNN_BACKEND_TENSOR_DESCRIPTOR: Check and Set the CUDNN_ATTR_TENSOR_DIMENSIONS Correctly");
183  return std::move(m_tensor);
184  }
185  if (m_tensor.pointer != nullptr) {
187  CUDNN_STATUS_BAD_PARAM,
188  "CUDNN_BACKEND_TENSOR_DESCRIPTOR: Bad tensor created. The tensor already "
189  "seems to be pointing to something");
190  return std::move(m_tensor);
191  }
192 
193  // Create a descriptor. Memory allocation happens here.
194  auto status = m_tensor.initialize_managed_backend_pointer(CUDNN_BACKEND_TENSOR_DESCRIPTOR);
195  if (status != CUDNN_STATUS_SUCCESS) {
197  &m_tensor, status, "CUDNN_BACKEND_TENSOR_DESCRIPTOR: cudnnCreate Descriptor Failed");
198  return std::move(m_tensor);
199  }
200 
201  // Once Created lets set the descriptor parameters.
202  status = cudnnBackendSetAttribute(m_tensor.pointer->get_backend_descriptor(),
203  CUDNN_ATTR_TENSOR_DATA_TYPE,
204  CUDNN_TYPE_DATA_TYPE,
205  1,
206  &m_tensor.data_type);
207  if (status != CUDNN_STATUS_SUCCESS) {
209  &m_tensor, status, "CUDNN_BACKEND_TENSOR_DESCRIPTOR: SetAttribute CUDNN_ATTR_TENSOR_DATA_TYPE Failed");
210  return std::move(m_tensor);
211  }
212  status = cudnnBackendSetAttribute(m_tensor.pointer->get_backend_descriptor(),
213  CUDNN_ATTR_TENSOR_DIMENSIONS,
214  CUDNN_TYPE_INT64,
215  m_tensor.nDims,
216  m_tensor.btensor_dimA);
217  if (status != CUDNN_STATUS_SUCCESS) {
219  &m_tensor, status, "CUDNN_BACKEND_TENSOR_DESCRIPTOR: SetAttribute CUDNN_ATTR_TENSOR_DIMENSIONS Failed");
220  return std::move(m_tensor);
221  }
222  status = cudnnBackendSetAttribute(m_tensor.pointer->get_backend_descriptor(),
223  CUDNN_ATTR_TENSOR_STRIDES,
224  CUDNN_TYPE_INT64,
225  m_tensor.nDims,
226  m_tensor.btensor_strA);
227  if (status != CUDNN_STATUS_SUCCESS) {
229  &m_tensor, status, "CUDNN_BACKEND_TENSOR_DESCRIPTOR: SetAttribute CUDNN_ATTR_TENSOR_STRIDES Failed");
230  return std::move(m_tensor);
231  }
232  status = cudnnBackendSetAttribute(
233  m_tensor.pointer->get_backend_descriptor(), CUDNN_ATTR_TENSOR_UNIQUE_ID, CUDNN_TYPE_INT64, 1, &m_tensor.id);
234  if (status != CUDNN_STATUS_SUCCESS) {
236  &m_tensor, status, "CUDNN_BACKEND_TENSOR_DESCRIPTOR: SetAttribute CUDNN_ATTR_TENSOR_UNIQUE_ID Failed");
237  return std::move(m_tensor);
238  }
239  cudnnBackendSetAttribute(m_tensor.pointer->get_backend_descriptor(),
240  CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT,
241  CUDNN_TYPE_INT64,
242  1,
243  &m_tensor.alignment);
244  if (status != CUDNN_STATUS_SUCCESS) {
246  &m_tensor,
247  status,
248  "CUDNN_BACKEND_TENSOR_DESCRIPTOR: SetAttribute CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT Failed");
249  return std::move(m_tensor);
250  }
251  if (m_tensor.isVirtual) {
252  cudnnBackendSetAttribute(m_tensor.pointer->get_backend_descriptor(),
253  CUDNN_ATTR_TENSOR_IS_VIRTUAL,
254  CUDNN_TYPE_BOOLEAN,
255  1,
256  &m_tensor.isVirtual);
257  if (status != CUDNN_STATUS_SUCCESS) {
259  &m_tensor,
260  status,
261  "CUDNN_BACKEND_TENSOR_DESCRIPTOR: SetAttribute CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT Failed");
262  return std::move(m_tensor);
263  }
264  }
265 
266  // Finalizing the descriptor
267  status = cudnnBackendFinalize(m_tensor.pointer->get_backend_descriptor());
268  if (status != CUDNN_STATUS_SUCCESS) {
269  set_error_and_throw_exception(&m_tensor, status, "CUDNN_BACKEND_TENSOR_DESCRIPTOR cudnnFinalize failed");
270  return std::move(m_tensor);
271  }
272  return std::move(m_tensor);
273  }
274 
275  explicit TensorBuilder_v8() = default;
276  ~TensorBuilder_v8() = default;
277  TensorBuilder_v8(TensorBuilder_v8 &&) = delete;
278  TensorBuilder_v8(TensorBuilder_v8 const &) = delete;
280  operator=(TensorBuilder_v8 const &) = delete;
281 
282  private:
284 };
285 }
auto setVirtual(bool virtual_=true) -> TensorBuilder_v8 &
Set Alignment of the tensor.
static void set_error_and_throw_exception(BackendDescriptor const *desc, cudnnStatus_t status, const char *message)
NLOHMANN_BASIC_JSON_TPL_DECLARATION std::string to_string(const NLOHMANN_BASIC_JSON_TPL &j)
user-defined to_string function for JSON values
Definition: json.hpp:25855
int64_t btensor_strA[CUDNN_DIM_MAX+1]
n, g, c, d, h, w
int64_t alignment
Unique id of the tensor.
bool isVirtual
Number of Dimensions of the tensor.
auto setAlignment(int64_t alignment_) -> TensorBuilder_v8 &
Set Alignment of the tensor.
ManagedOpaqueDescriptor get_desc() const
Returns a copy of underlying managed descriptor.
int64_t nDims
Certain engine config expect minimum alignment of 16B.
int64_t btensor_dimA[CUDNN_DIM_MAX+1]
Datatype of the elements.
cudnnStatus_t get_status() const
Current status of the descriptor.
std::string describe() const override
Return a string describing the backend Descriptor.
auto setStrides(int64_t ndim, int64_t const *strides) -> TensorBuilder_v8 &
Set Strides of the tensor.
const char * get_error() const
Diagonistic error message if any.
auto setId(int64_t id_) -> TensorBuilder_v8 &
Set Unique Id of the tensor.
auto setDim(int64_t ndim, int64_t const *dim) -> TensorBuilder_v8 &
Set Dimensions of the tensor.
Tensor_v8 & operator=(Tensor_v8 const &)=delete
int64_t id
n, g, c, d, h, w
auto setDataType(cudnnDataType_t data_type_) -> TensorBuilder_v8 &
Set Datatype for the Tensor_v8.
cudnnStatus_t status
Shared pointer of the OpaqueBackendPointer.