Skip to content
Snippets Groups Projects
Commit 45b6e4a0 authored by Maxence Naud's avatar Maxence Naud
Browse files

Update conv and convdepth wise operators

- Move code definition in an external cpp file in src directory
- Remove InChannels, OutChannels, NbChannels from the list of attributes as they are only used for creating the associated Producer. They are kept as parameters of the Operator factory function to continue creating Weight and Bias Producers automatically. The number of in/out channels is now based on the Weight parameter
- 'Conv_Op::inChannels()', 'Conv_Op::outChannels()', 'ConvDepthWise::nbChannels()' functons added
parent 097ee26f
No related branches found
No related tags found
No related merge requests found
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias }; enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class Conv_Op : public OperatorTensor, class Conv_Op : public OperatorTensor,
...@@ -38,8 +38,6 @@ class Conv_Op : public OperatorTensor, ...@@ -38,8 +38,6 @@ class Conv_Op : public OperatorTensor,
public StaticAttributes<ConvAttr, public StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool> { bool> {
...@@ -51,24 +49,20 @@ public: ...@@ -51,24 +49,20 @@ public:
using Attributes_ = StaticAttributes<ConvAttr, using Attributes_ = StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool>; bool>;
template <ConvAttr e> template <ConvAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr Conv_Op(DimSize_t inChannels, constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false) bool noBias = false)
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvAttr::StrideDims>(strideDims), Attributes_(attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims), attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::InChannels>(inChannels), // attr<ConvAttr::InChannels>(inChannels),
attr<ConvAttr::OutChannels>(outChannels), // attr<ConvAttr::OutChannels>(outChannels),
attr<ConvAttr::KernelDims>(kernelDims), attr<ConvAttr::KernelDims>(kernelDims),
attr<ConvAttr::NoBias>(noBias)) {} attr<ConvAttr::NoBias>(noBias)) {}
...@@ -76,16 +70,7 @@ public: ...@@ -76,16 +70,7 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Conv_Op(const Conv_Op<DIM>& op) Conv_Op(const Conv_Op<DIM>& op);
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl) {
SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
...@@ -108,115 +93,28 @@ public: ...@@ -108,115 +93,28 @@ public:
// } // }
bool forwardDims(bool /*allowDataDependency*/ = false) override final { bool forwardDims(bool /*allowDataDependency*/ = false) override final;
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
associated &= !(getInput(i)->empty());
}
if (associated) {
AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
(getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
"Wrong input size for Conv operator.");
AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
(getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
(getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
"Wrong weight size for Conv operator.");
if(!this->template getAttr<ConvAttr::NoBias>())
AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
(getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
"Wrong bias size for Conv operator.");
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
(this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
}
outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated;
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims, const std::vector<DimSize_t>& outputDims,
const IOIndex_t outputIdx = 0) const override { const IOIndex_t outputIdx = 0) const override;
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
inputIdxDims[1] = 0; // each channel is used so start with the first one
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Input
// same batch value, every input channel is used
std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
// Weight void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
// same output value, every input channel is used
std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
for (std::size_t i = 0; i < DIM; ++i) {
weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
}
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Result DimSize_t inChannels() const {
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res; if (!getInput(1)) {
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims)); AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
// Bias
if (! this->template getAttr<ConvAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
} }
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); return getInput(1)->template dims<DIM+2>()[1];
} }
DimSize_t outChannels() const {
void setBackend(const std::string &name, DeviceIdx_t device = 0) override { if (!getInput(1)) {
SET_IMPL_MACRO(Conv_Op<DIM>, *this, name); AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
mOutputs[0]->setBackend(name, device); }
return getInput(1)->template dims<DIM+2>()[0];
// By default, automatically set backend for weight and bias inputs
getInput(1)->setBackend(name, device);
getInput(2)->setBackend(name, device);
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
...@@ -227,8 +125,6 @@ public: ...@@ -227,8 +125,6 @@ public:
} }
}; };
template <DimIdx_t DIM>
const std::string Conv_Op<DIM>::Type = "Conv";
/** /**
* @brief Perform a convolution on the input Tensor. * @brief Perform a convolution on the input Tensor.
...@@ -252,7 +148,7 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels, ...@@ -252,7 +148,7 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
bool noBias = false) { bool noBias = false) {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w"); addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
...@@ -274,13 +170,13 @@ inline std::shared_ptr<Node> Conv( ...@@ -274,13 +170,13 @@ inline std::shared_ptr<Node> Conv(
} }
} // namespace Aidge } // namespace Aidge
extern template class Aidge::Conv_Op<2>;
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::ConvAttr>::data[] = { const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"StrideDims", "StrideDims",
"DilationDims", "DilationDims",
"InChannels",
"OutChannels",
"KernelDims", "KernelDims",
"NoBias" "NoBias"
}; };
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias }; enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class ConvDepthWise_Op : public OperatorTensor, class ConvDepthWise_Op : public OperatorTensor,
...@@ -37,7 +37,6 @@ class ConvDepthWise_Op : public OperatorTensor, ...@@ -37,7 +37,6 @@ class ConvDepthWise_Op : public OperatorTensor,
public StaticAttributes<ConvDepthWiseAttr, public StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool> { bool> {
public: public:
...@@ -48,21 +47,18 @@ public: ...@@ -48,21 +47,18 @@ public:
using Attributes_ = StaticAttributes<ConvDepthWiseAttr, using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool>; bool>;
template <ConvDepthWiseAttr e> template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr ConvDepthWise_Op(const DimSize_t nbChannels, constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias=false) bool no_bias=false)
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims), Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(nbChannels),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims), attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
attr<ConvDepthWiseAttr::NoBias>(no_bias)) {} attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
...@@ -70,16 +66,7 @@ public: ...@@ -70,16 +66,7 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op) ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op);
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl){
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
}else{
mImpl = nullptr;
}
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
...@@ -90,105 +77,20 @@ public: ...@@ -90,105 +77,20 @@ public:
} }
bool forwardDims(bool /*allowDataDependency*/ = false) override final { bool forwardDims(bool /*allowDataDependency*/ = false) override final;
// check inputs have been associated
// TODO : add a check of inputs dimensions ?
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
associated &= !(getInput(i)->empty());
}
if (associated) {
std::array<DimSize_t, DIM + 2> outputDims = {};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
}
// std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
// if (mInputs[1]->empty()) {
// mInputs[1]->resize(weightDims);
// }
// if (mInputs[2]->empty()) {
// mInputs[2]->resize({mInputs[0]->dims()[1]});
// }
outputDims[1] = inputDims[1];
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated; std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
} computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims,
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { const IOIndex_t outputIdx = 0) const override;
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Input
// same batch value
std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
// Weight
std::vector<DimSize_t> weightDims{outputDims[1], 1};
for (std::size_t i = 0; i < DIM; ++i) {
weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
}
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Result
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
// Bias
if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override { void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for weight and bias inputs DimSize_t nbChannels() const {
getInput(1)->setBackend(name, device); if (!getInput(1)) {
getInput(2)->setBackend(name, device); AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of channel imposed.");
}
return getInput(1)->template dims<DIM+2>()[0];
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
...@@ -199,9 +101,6 @@ public: ...@@ -199,9 +101,6 @@ public:
} }
}; };
template <DimIdx_t DIM>
const std::string ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &kernelDims,
...@@ -211,7 +110,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, ...@@ -211,7 +110,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
bool noBias=false) { bool noBias=false) {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name); auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w"); addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b"); addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
return convDW; return convDW;
...@@ -231,9 +130,11 @@ inline std::shared_ptr<Node> ConvDepthWise( ...@@ -231,9 +130,11 @@ inline std::shared_ptr<Node> ConvDepthWise(
} }
} // namespace Aidge } // namespace Aidge
extern template class Aidge::ConvDepthWise_Op<2>;
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels", const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
"KernelDims", "NoBias"}; "KernelDims", "NoBias"};
} }
......
...@@ -30,24 +30,27 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { ...@@ -30,24 +30,27 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>( py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
m, pyClassName.c_str(), m, pyClassName.c_str(),
py::multiple_inheritance()) py::multiple_inheritance())
.def(py::init<DimSize_t, .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
DimSize_t, const std::vector<DimSize_t> &stride_dims,
const std::array<DimSize_t, DIM> &, const std::vector<DimSize_t> &dilation_dims,
const std::array<DimSize_t, DIM> &, bool no_bias) {
const std::array<DimSize_t, DIM> &, AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
bool>(), AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
py::arg("in_channels"), AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
py::arg("out_channels"),
py::arg("kernel_dims"), return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
py::arg("stride_dims"), }), py::arg("kernel_dims"),
py::arg("dilation_dims"), py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
py::arg("no_bias")) py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
.def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName) py::arg("no_bias") = false)
.def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName) .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
.def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName) .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
; .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
declare_registrable<Conv_Op<DIM>>(m, pyClassName); .def("in_channels", &Conv_Op<DIM>::inChannels)
.def("out_channels", &Conv_Op<DIM>::outChannels)
;
declare_registrable<Conv_Op<DIM>>(m, pyClassName);
m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels, m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
DimSize_t out_channels, DimSize_t out_channels,
...@@ -72,8 +75,9 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { ...@@ -72,8 +75,9 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
void init_Conv(py::module &m) { void init_Conv(py::module &m) {
declare_ConvOp<1>(m); // declare_ConvOp<1>(m);
declare_ConvOp<2>(m); declare_ConvOp<2>(m);
declare_ConvOp<3>(m); // declare_ConvOp<3>(m);
} }
} // namespace Aidge } // namespace Aidge
...@@ -31,12 +31,10 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { ...@@ -31,12 +31,10 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>( py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
m, pyClassName.c_str(), m, pyClassName.c_str(),
py::multiple_inheritance()) py::multiple_inheritance())
.def(py::init<const DimSize_t, .def(py::init<const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &,
const std::array<DimSize_t, DIM> &, const std::array<DimSize_t, DIM> &,
bool>(), bool>(),
py::arg("nb_channels"),
py::arg("kernel_dims"), py::arg("kernel_dims"),
py::arg("stride_dims"), py::arg("stride_dims"),
py::arg("dilation_dims"), py::arg("dilation_dims"),
...@@ -67,9 +65,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { ...@@ -67,9 +65,9 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
void init_ConvDepthWise(py::module &m) { void init_ConvDepthWise(py::module &m) {
declare_ConvDepthWiseOp<1>(m); // declare_ConvDepthWiseOp<1>(m);
declare_ConvDepthWiseOp<2>(m); declare_ConvDepthWiseOp<2>(m);
declare_ConvDepthWiseOp<3>(m); // declare_ConvDepthWiseOp<3>(m);
// FIXME: // FIXME:
// m.def("ConvDepthWise1D", static_cast<NodeAPI(*)(const char*, int, int, int const // m.def("ConvDepthWise1D", static_cast<NodeAPI(*)(const char*, int, int, int const
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Conv.hpp"
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
template <Aidge::DimIdx_t DIM>
const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
template <Aidge::DimIdx_t DIM>
Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl) {
SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
} else {
mImpl = nullptr;
}
}
template <Aidge::DimIdx_t DIM>
bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
associated &= !(getInput(i)->empty());
}
if (associated) {
// first check weight since it defines inChannels and outChannels
AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
"Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
// check data
AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
(getInput(0)->template dims<DIM+2>()[1] == inChannels()),
"Wrong input size for Conv operator.");
// check optional bias
if(!this->template getAttr<ConvAttr::NoBias>())
AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
(getInput(2)->template dims<1>()[0] == outChannels()),
"Wrong bias size for Conv operator.");
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
(this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
}
outputDims[1] = outChannels();
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated;
}
template <Aidge::DimIdx_t DIM>
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
Aidge::Conv_Op<DIM>::computeReceptiveField(
const std::vector<Aidge::DimSize_t>& firstEltDims,
const std::vector<Aidge::DimSize_t>& outputDims,
const Aidge::IOIndex_t outputIdx) const
{
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
inputIdxDims[1] = 0; // each channel is used so start with the first one
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Input
// same batch value, every input channel is used
std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
// Weight
// same output value, every input channel is used
std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
for (std::size_t i = 0; i < DIM; ++i) {
weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
}
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Result
std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
// Bias
if (! this->template getAttr<ConvAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
template <Aidge::DimIdx_t DIM>
void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for weight and bias inputs
getInput(1)->setBackend(name, device);
getInput(2)->setBackend(name, device);
}
template class Aidge::Conv_Op<2>;
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/ConvDepthWise.hpp"
#include <array>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
template <Aidge::DimIdx_t DIM>
const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
template <Aidge::DimIdx_t DIM>
Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl) {
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
} else {
mImpl = nullptr;
}
}
template <Aidge::DimIdx_t DIM>
bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
// check inputs have been associated
// TODO : add a check of inputs dimensions ?
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
associated &= !(getInput(i)->empty());
}
if (associated) {
// first check weight since it defines nbChannels
AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
"Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
// check data
AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
(getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
"Wrong input size for Conv operator.");
// check optional bias
if(!this->template getAttr<ConvDepthWiseAttr::NoBias>())
AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
(getInput(2)->template dims<1>()[0] == nbChannels()),
"Wrong bias size for Conv operator.");
std::array<DimSize_t, DIM + 2> outputDims = {};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
}
outputDims[1] = inputDims[1];
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated;
}
template <Aidge::DimIdx_t DIM>
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
const std::vector<Aidge::DimSize_t>& firstEltDims,
const std::vector<Aidge::DimSize_t>& outputDims,
const Aidge::IOIndex_t outputIdx) const
{
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Input
// same batch value
std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
// Weight
std::vector<DimSize_t> weightDims{outputDims[1], 1};
for (std::size_t i = 0; i < DIM; ++i) {
weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
}
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Result
std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
// Bias
if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
template <Aidge::DimIdx_t DIM>
void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for weight and bias inputs
getInput(1)->setBackend(name, device);
getInput(2)->setBackend(name, device);
}
template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment