Deep Neural Network Library (DNNL)  1.90.1
Performance library for Deep Learning
cross_engine_reorder.cpp

Annotated version: Getting started on GPU

/*******************************************************************************
* Copyright 2019 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <iostream>
#include <sstream>
// [Prologue]
#include "dnnl.hpp"
#include "example_utils.hpp"
// Optional header to access debug functions like `dnnl_status2str()`
#include "dnnl_debug.h"
using namespace dnnl;
using namespace std;
// [Prologue]
size_t product(const memory::dims adims) {
size_t n_elems = 1;
for (size_t d = 0; d < adims.size(); ++d) {
n_elems *= (size_t)adims[d];
}
return n_elems;
}
void fill(const memory &mem, const memory::dims adims) {
float *array = mem.map_data<float>();
for (size_t e = 0; e < adims.size(); ++e) {
array[e] = e % 7 ? 1.0f : -1.0f;
}
mem.unmap_data(array);
}
int find_negative(const memory &mem, const memory::dims adims) {
int negs = 0;
float *array = mem.map_data<float>();
for (size_t e = 0; e < adims.size(); ++e) {
negs += array[e] < 0.0f;
}
mem.unmap_data(array);
return negs;
}
void cross_engine_reorder_tutorial() {
// [Initialize engine]
auto cpu_engine = engine(validate_engine_kind(engine::kind::cpu), 0);
auto gpu_engine = engine(validate_engine_kind(engine::kind::gpu), 0);
// [Initialize engine]
// [Initialize stream]
auto stream_gpu = stream(gpu_engine);
// [Initialize stream]
// [reorder cpu2gpu]
const auto tz = memory::dims {2, 16, 1, 1};
auto m_cpu
cpu_engine);
auto m_gpu
gpu_engine);
fill(m_cpu, tz);
auto r1 = reorder(m_cpu, m_gpu);
// [reorder cpu2gpu]
// [Create a ReLU primitive]
// ReLU op descriptor (uses a GPU memory as source memory.
// no engine- or implementation-specific information)
// ReLU primitive descriptor, which corresponds to a particular
// implementation in the library. Specify engine type for the ReLU
// primitive. Use a GPU engine here.
auto relu_pd = eltwise_forward::primitive_desc(relu_d, gpu_engine);
// ReLU primitive
auto relu = eltwise_forward(relu_pd);
// [Create a ReLU primitive]
// [reorder gpu2cpu]
auto r2 = reorder(m_gpu, m_cpu);
// [reorder gpu2cpu]
// [Execute primitives]
// wrap source data from CPU to GPU
r1.execute(stream_gpu, m_cpu, m_gpu);
// Execute ReLU on a GPU stream
relu.execute(stream_gpu, {{DNNL_ARG_SRC, m_gpu}, {DNNL_ARG_DST, m_gpu}});
// Get result data from GPU to CPU
r2.execute(stream_gpu, m_gpu, m_cpu);
stream_gpu.wait();
// [Execute primitives]
// [Check the results]
if (find_negative(m_cpu, tz) != 0) {
std::stringstream ss;
ss << "Unexpected output, find a negative value after the ReLU "
"execution";
throw ss.str();
}
// [Check the results]
}
// [Main]
int main(int argc, char **argv) {
try {
cross_engine_reorder_tutorial();
} catch (dnnl::error &e) {
std::cerr << "DNNL error: " << e.what() << std::endl
<< "Error status: " << dnnl_status2str(e.status) << std::endl;
return 1;
} catch (std::string &e) {
std::cerr << "Error in the example: " << e << std::endl;
return 2;
}
std::cout << "Example passes" << std::endl;
return 0;
}
// [Main]
dnnl::reorder
Initializes a reorder primitive using the description of the source (src_engine and src_md) and desti...
Definition: dnnl.hpp:1868
dnnl::error::what
const char * what() const noexcept override
Returns the explanatory string.
Definition: dnnl.hpp:68
dnnl::stream
An execution stream.
Definition: dnnl.hpp:975
dnnl::eltwise_forward
Element-wise operations for forward propagation.
Definition: dnnl.hpp:3353
dnnl::memory::data_type::f32
32-bit/single-precision floating point.
dnnl::query::engine
execution engine
dnnl::eltwise_forward::desc
Initializes an eltwise descriptor for forward propagation using prop_kind (possible values are dnnl::...
Definition: dnnl.hpp:3359
dnnl::eltwise_forward::primitive_desc
Primitive descriptor for eltwise forward propagation.
Definition: dnnl.hpp:3372
dnnl.hpp
dnnl::memory::get_desc
desc get_desc() const
Returns the descriptor of the memory.
Definition: dnnl.hpp:1542
dnnl::memory::unmap_data
void unmap_data(void *mapped_ptr) const
Unmaps the previously mapped data for the memory.
Definition: dnnl.hpp:1603
dnnl::algorithm::eltwise_relu
Eltwise: ReLU.
dnnl::engine::kind::gpu
GPU engine.
dnnl::error
DNNL exception class.
Definition: dnnl.hpp:56
dnnl_debug.h
dnnl::memory
Memory that describes the data.
Definition: dnnl.hpp:1070
dnnl::prop_kind::forward
Forward data propagation, alias for dnnl::prop_kind::forward_training.
dnnl::memory::format_tag::nchw
4D CNN activations tensor, an alias to dnnl::memory::format_tag::abcd
dnnl::memory::map_data
T * map_data() const
Maps the data of the memory.
Definition: dnnl.hpp:1588
dnnl::engine::kind::cpu
CPU engine.