Skip to content
Snippets Groups Projects
Commit 03de25a0 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'nhwc_for_conv' into 'dev'

ForwardDims Conv for NCHW and NHWC

See merge request !314
parents 16be15b7 7c87a79d
No related branches found
No related tags found
3 merge requests!414Update version 0.5.1 -> 0.6.0,!408[Add] Dropout Operator,!314[Feat] ForwardDims Conv for NCHW and NHWC
Pipeline #65943 passed
...@@ -462,12 +462,33 @@ public: ...@@ -462,12 +462,33 @@ public:
* data is copy-transposed. * data is copy-transposed.
*/ */
void setDataFormat(const DataFormat df, bool copyTrans = true) { void setDataFormat(const DataFormat df, bool copyTrans = true) {
if (mImpl && copyTrans && (dataFormat() != df) && df != DataFormat::Default && dataFormat() != DataFormat::Default) { if (!copyTrans || df == dataFormat()) {
copyTranspose(*this, getDataFormatTranspose(dataFormat(), df)); mDataFormat = df;
return;
} }
const auto transpose = getDataFormatTranspose(dataFormat(), df);
if (mImpl) {
copyTranspose(*this, transpose);
} else {
std::vector<DimSize_t> newDims;
for (std::size_t i = 0; i < dims().size(); ++i) {
newDims.push_back(dims()[transpose[i]]);
}
std::vector<std::size_t> newStrides(dims().size(), 1);
for (size_t i = 0; i < dims().size(); ++i) {
for (size_t j = i + 1; j < dims().size(); ++j) {
newStrides[i] *= newDims[j];
}
}
mDims = std::move(newDims);
mStrides = std::move(newStrides);
}
mDataFormat = df; mDataFormat = df;
} }
/** /**
* @brief Get the Impl object * @brief Get the Impl object
* @return constexpr const std::shared_ptr<TensorImpl>& * @return constexpr const std::shared_ptr<TensorImpl>&
......
...@@ -172,6 +172,11 @@ public: ...@@ -172,6 +172,11 @@ public:
if (!getInput(1)) { if (!getInput(1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed."); AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
} }
// check format
if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC)
return getInput(1)->template dims<DIM+2>()[DIM+1];
// default format is NCHW
return getInput(1)->template dims<DIM+2>()[1]; return getInput(1)->template dims<DIM+2>()[1];
} }
...@@ -184,6 +189,7 @@ public: ...@@ -184,6 +189,7 @@ public:
if (!getInput(1)) { if (!getInput(1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed."); AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
} }
// first weight dimension for both NCHW (Cout,Cin,H,W) and NHWC (Cout,H,W,Cin) data format
return getInput(1)->template dims<DIM+2>()[0]; return getInput(1)->template dims<DIM+2>()[0];
} }
......
...@@ -40,42 +40,57 @@ Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op) ...@@ -40,42 +40,57 @@ Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
template <Aidge::DimIdx_t DIM> template <Aidge::DimIdx_t DIM>
bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) { if (!inputsAssociated())
// first check weight since it defines inChannels and outChannels return false;
AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)), // first check weight since it defines inChannels and outChannels
"Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2); if(getInput(0)->dataFormat() == Aidge::DataFormat::NHWC){
// check data
AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) && AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
(getInput(0)->template dims<DIM+2>()[1] == inChannels()), (getInput(0)->template dims<DIM+2>()[DIM+1] == inChannels()),
"Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", ")); "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), fmt::join(std::vector<std::string>(DIM, "x"), ", "), inChannels());
// check optional bias }
if(getInput(2)) else{ //For dataFormat in NCHW or Default Format
AIDGE_ASSERT((getInput(2)->nbDims() == (1)) && AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
(getInput(2)->template dims<1>()[0] == outChannels()), (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
"Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels()); "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
}
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
(mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim])));
}
outputDims[1] = outChannels(); AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
outputDims[0] = inputDims[0]; "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
mOutputs[0]->resize(outputDims);
return true; if(getInput(2))
AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
(getInput(2)->template dims<1>()[0] == outChannels()),
"Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
std::array<DimSize_t, DIM + 2> outputDims{};
unsigned int in_dims_index = (getInput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
unsigned int out_dims_index = (getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size(); ++dim) {
const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
(mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
1;
outputDims[dim + out_dims_index] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim + in_dims_index] - kernelExtent) /
static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim]))
);
} }
return false; if(getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC)
} outputDims[DIM+1] = outChannels();
else
outputDims[1] = outChannels();
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
return true;
}
template <Aidge::DimIdx_t DIM> template <Aidge::DimIdx_t DIM>
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>> std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
......
...@@ -22,6 +22,89 @@ ...@@ -22,6 +22,89 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
TEST_CASE("[core/operator] Conv_Op(ForwardDims) ", "[Operator][ForwardDims][Conv]") {
SECTION("I:NCHW O:NCHW W:NCHW"){
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450}));
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4})); // Out_ch, In_ch_h,W,H
const std::vector<std::size_t> expectedOutputDims({16,4,222,447});
auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
//Set DataFormat
conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
input->setDataFormat(Aidge::DataFormat::NCHW);
weight->setDataFormat(Aidge::DataFormat::NCHW);
//Set inputs
conv1.setInput(1,weight);
conv1.setInput(0,input);
REQUIRE(conv1.forwardDims());
REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
}
SECTION("I:NCHW O:NCHW W:NHWC") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450}));
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // Out_ch, H, W, In_ch
const std::vector<std::size_t> expectedOutputDims({16, 4, 222, 447});
auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
// Set DataFormat
conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
input->setDataFormat(Aidge::DataFormat::NCHW);
weight->setDataFormat(Aidge::DataFormat::NHWC); // NHWC weight format
// Set inputs
conv1.setInput(1, weight);
conv1.setInput(0, input);
REQUIRE(conv1.forwardDims());
REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
}
SECTION("I:NHWC O:NHWC W:NCHW") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450}));
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // Out_ch, In_ch, H, W
const std::vector<std::size_t> expectedOutputDims({16, 222, 447, 4});
auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
// Set DataFormat
conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
input->setDataFormat(Aidge::DataFormat::NHWC);
weight->setDataFormat(Aidge::DataFormat::NCHW); // NCHW weight format
// Set inputs
conv1.setInput(1, weight);
conv1.setInput(0, input);
REQUIRE(conv1.forwardDims());
REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
}
SECTION("I:NHWC O:NHWC W:NHWC") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3,224, 450}));
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // (Out_ch, H, W, In_ch)
const std::vector<std::size_t> expectedOutputDims({16, 222, 447, 4});
auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
// Set DataFormat
conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
input->setDataFormat(Aidge::DataFormat::NHWC);
weight->setDataFormat(Aidge::DataFormat::NHWC);
// Set inputs
conv1.setInput(1, weight);
conv1.setInput(0, input);
REQUIRE(conv1.forwardDims());
REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
}
}
TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") { TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider"); auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
auto conv1 = Conv(3, 32, {5, 5}, "conv1"); // output dims: {16, 32, 220, 220} auto conv1 = Conv(3, 32, {5, 5}, "conv1"); // output dims: {16, 32, 220, 220}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment