Skip to content
Snippets Groups Projects
Commit 8cbb8279 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge remote-tracking branch 'origin/dev' into matmultiling

parents 009d227d 2a2046a7
No related branches found
No related tags found
3 merge requests!279v0.4.0,!253v0.4.0,!244Add MatMulTiling recipe
Showing
with 401 additions and 418 deletions
...@@ -27,8 +27,8 @@ class test_attributes(unittest.TestCase): ...@@ -27,8 +27,8 @@ class test_attributes(unittest.TestCase):
out_channels = 8 out_channels = 8
k_dims = [2, 2] k_dims = [2, 2]
conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator() conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
self.assertEqual(conv_op.get_attr("InChannels"), in_channels) self.assertEqual(conv_op.in_channels(), in_channels)
self.assertEqual(conv_op.get_attr("OutChannels"), out_channels) self.assertEqual(conv_op.out_channels(), out_channels)
self.assertEqual(conv_op.get_attr("KernelDims"), k_dims) self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
def test_fc(self): def test_fc(self):
...@@ -36,7 +36,7 @@ class test_attributes(unittest.TestCase): ...@@ -36,7 +36,7 @@ class test_attributes(unittest.TestCase):
out_channels = 8 out_channels = 8
nb_bias = True nb_bias = True
fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator() fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
self.assertEqual(fc_op.get_attr("OutChannels"), out_channels) self.assertEqual(fc_op.out_channels(), out_channels)
self.assertEqual(fc_op.get_attr("NoBias"), nb_bias) self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
def test_producer_1D(self): def test_producer_1D(self):
......
...@@ -59,9 +59,11 @@ ...@@ -59,9 +59,11 @@
#include "aidge/operator/ReduceMean.hpp" #include "aidge/operator/ReduceMean.hpp"
#include "aidge/operator/ReLU.hpp" #include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Reshape.hpp" #include "aidge/operator/Reshape.hpp"
#include "aidge/operator/Shape.hpp"
#include "aidge/operator/Scaling.hpp" #include "aidge/operator/Scaling.hpp"
#include "aidge/operator/Slice.hpp" #include "aidge/operator/Slice.hpp"
#include "aidge/operator/Softmax.hpp" #include "aidge/operator/Softmax.hpp"
#include "aidge/operator/Split.hpp"
#include "aidge/operator/Sqrt.hpp" #include "aidge/operator/Sqrt.hpp"
#include "aidge/operator/Sub.hpp" #include "aidge/operator/Sub.hpp"
#include "aidge/operator/Transpose.hpp" #include "aidge/operator/Transpose.hpp"
......
...@@ -182,6 +182,17 @@ public: ...@@ -182,6 +182,17 @@ public:
*/ */
inline std::size_t size() const noexcept { return mNbElts; } inline std::size_t size() const noexcept { return mNbElts; }
/**
* @brief Return the current capacity of the tensor, i.e. the actual memory
* currently being allocated. It can be different from the size:
* - Capacity can be 0 if the tensor memory was not yet initialized (because
* of lazy initialization, memory is allocated only when it needs to be
* accessed the first time).
* - Capacity can be > size if the tensor was downsized but memory was not
* reallocated.
*/
virtual std::size_t capacity() const noexcept = 0;
/** /**
* @brief Return the size (in bytes) of one element (scalar). * @brief Return the size (in bytes) of one element (scalar).
*/ */
......
...@@ -43,6 +43,8 @@ public: ...@@ -43,6 +43,8 @@ public:
return std::make_shared<TensorImpl_cpu<T>>(device, dims); return std::make_shared<TensorImpl_cpu<T>>(device, dims);
} }
inline std::size_t capacity() const noexcept override final { return mData.size(); }
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void zeros() override final; void zeros() override final;
......
...@@ -208,7 +208,12 @@ public: ...@@ -208,7 +208,12 @@ public:
/** /**
* @brief Compute dimensions of input/output Tensors for each Operator of the * @brief Compute dimensions of input/output Tensors for each Operator of the
* GraphView object's Nodes. * GraphView object's Nodes, by calling Node::forwardDims().
* This function verifies the following conditions:
* - Every node will forwardDims() regardless of if dims were previously forwarded or not;
* - forwadDims() calls are made in node dependencies order, because if dims have changed
* at any point in the graph, it must de propagated correctly to all succeeding nodes;
* - It handles cyclic dependencies correctly (currently only induced by the Memorize_Op).
*/ */
bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false); bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
......
...@@ -102,13 +102,14 @@ inline std::shared_ptr<Node> AvgPooling( ...@@ -102,13 +102,14 @@ inline std::shared_ptr<Node> AvgPooling(
return AvgPooling(to_array(kernel_dims), name, stride_dims); return AvgPooling(to_array(kernel_dims), name, stride_dims);
} }
} // namespace Aidge
extern template class Aidge::AvgPooling_Op<1>; extern template class Aidge::AvgPooling_Op<1>;
extern template class Aidge::AvgPooling_Op<2>; extern template class Aidge::AvgPooling_Op<2>;
extern template class Aidge::AvgPooling_Op<3>; extern template class Aidge::AvgPooling_Op<3>;
extern template class Aidge::AvgPooling_Op<4>; extern template class Aidge::AvgPooling_Op<4>;
} // namespace Aidge
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims", const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims, NoBias }; enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class Conv_Op : public OperatorTensor, class Conv_Op : public OperatorTensor,
...@@ -38,8 +38,6 @@ class Conv_Op : public OperatorTensor, ...@@ -38,8 +38,6 @@ class Conv_Op : public OperatorTensor,
public StaticAttributes<ConvAttr, public StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool> { bool> {
...@@ -51,24 +49,20 @@ public: ...@@ -51,24 +49,20 @@ public:
using Attributes_ = StaticAttributes<ConvAttr, using Attributes_ = StaticAttributes<ConvAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t,
DimSize_t,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool>; bool>;
template <ConvAttr e> template <ConvAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr Conv_Op(DimSize_t inChannels, constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
DimSize_t outChannels,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
bool noBias = false) bool noBias = false)
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvAttr::StrideDims>(strideDims), Attributes_(attr<ConvAttr::StrideDims>(strideDims),
attr<ConvAttr::DilationDims>(dilationDims), attr<ConvAttr::DilationDims>(dilationDims),
attr<ConvAttr::InChannels>(inChannels), // attr<ConvAttr::InChannels>(inChannels),
attr<ConvAttr::OutChannels>(outChannels), // attr<ConvAttr::OutChannels>(outChannels),
attr<ConvAttr::KernelDims>(kernelDims), attr<ConvAttr::KernelDims>(kernelDims),
attr<ConvAttr::NoBias>(noBias)) {} attr<ConvAttr::NoBias>(noBias)) {}
...@@ -76,16 +70,7 @@ public: ...@@ -76,16 +70,7 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Conv_Op(const Conv_Op<DIM>& op) Conv_Op(const Conv_Op<DIM>& op);
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl) {
SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
} else {
mImpl = nullptr;
}
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
...@@ -108,115 +93,28 @@ public: ...@@ -108,115 +93,28 @@ public:
// } // }
bool forwardDims(bool /*allowDataDependency*/ = false) override final { bool forwardDims(bool /*allowDataDependency*/ = false) override final;
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
associated &= !(getInput(i)->empty());
}
if (associated) {
AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
(getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
"Wrong input size for Conv operator.");
AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
(getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
(getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
"Wrong weight size for Conv operator.");
if(!this->template getAttr<ConvAttr::NoBias>())
AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
(getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
"Wrong bias size for Conv operator.");
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
(this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
}
outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated;
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims, const std::vector<DimSize_t>& outputDims,
const IOIndex_t outputIdx = 0) const override { const IOIndex_t outputIdx = 0) const override;
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
inputIdxDims[1] = 0; // each channel is used so start with the first one
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Input
// same batch value, every input channel is used
std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
// Weight void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
// same output value, every input channel is used
std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
for (std::size_t i = 0; i < DIM; ++i) {
weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
}
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Result DimSize_t inChannels() const {
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res; if (!getInput(1)) {
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims)); AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
// Bias
if (! this->template getAttr<ConvAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
} }
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); return getInput(1)->template dims<DIM+2>()[1];
} }
DimSize_t outChannels() const {
void setBackend(const std::string &name, DeviceIdx_t device = 0) override { if (!getInput(1)) {
SET_IMPL_MACRO(Conv_Op<DIM>, *this, name); AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
mOutputs[0]->setBackend(name, device); }
return getInput(1)->template dims<DIM+2>()[0];
// By default, automatically set backend for weight and bias inputs
getInput(1)->setBackend(name, device);
getInput(2)->setBackend(name, device);
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
...@@ -227,8 +125,6 @@ public: ...@@ -227,8 +125,6 @@ public:
} }
}; };
template <DimIdx_t DIM>
const std::string Conv_Op<DIM>::Type = "Conv";
/** /**
* @brief Perform a convolution on the input Tensor. * @brief Perform a convolution on the input Tensor.
...@@ -252,7 +148,7 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels, ...@@ -252,7 +148,7 @@ inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
bool noBias = false) { bool noBias = false) {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(inChannels, outChannels, kernelDims, strideDims, dilationDims, noBias), name); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w"); addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
...@@ -274,13 +170,13 @@ inline std::shared_ptr<Node> Conv( ...@@ -274,13 +170,13 @@ inline std::shared_ptr<Node> Conv(
} }
} // namespace Aidge } // namespace Aidge
extern template class Aidge::Conv_Op<2>;
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::ConvAttr>::data[] = { const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
"StrideDims", "StrideDims",
"DilationDims", "DilationDims",
"InChannels",
"OutChannels",
"KernelDims", "KernelDims",
"NoBias" "NoBias"
}; };
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims, NoBias }; enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class ConvDepthWise_Op : public OperatorTensor, class ConvDepthWise_Op : public OperatorTensor,
...@@ -37,7 +37,6 @@ class ConvDepthWise_Op : public OperatorTensor, ...@@ -37,7 +37,6 @@ class ConvDepthWise_Op : public OperatorTensor,
public StaticAttributes<ConvDepthWiseAttr, public StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool> { bool> {
public: public:
...@@ -48,21 +47,18 @@ public: ...@@ -48,21 +47,18 @@ public:
using Attributes_ = StaticAttributes<ConvDepthWiseAttr, using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
DimSize_t,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
bool>; bool>;
template <ConvDepthWiseAttr e> template <ConvDepthWiseAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr ConvDepthWise_Op(const DimSize_t nbChannels, constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias=false) bool no_bias=false)
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims), Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
attr<ConvDepthWiseAttr::Channels>(nbChannels),
attr<ConvDepthWiseAttr::KernelDims>(kernel_dims), attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
attr<ConvDepthWiseAttr::NoBias>(no_bias)) {} attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
...@@ -70,16 +66,7 @@ public: ...@@ -70,16 +66,7 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op) ConvDepthWise_Op(const ConvDepthWise_Op<DIM>& op);
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl){
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
}else{
mImpl = nullptr;
}
}
/** /**
* @brief Clone the operator using its copy-constructor. * @brief Clone the operator using its copy-constructor.
...@@ -90,105 +77,20 @@ public: ...@@ -90,105 +77,20 @@ public:
} }
bool forwardDims(bool /*allowDataDependency*/ = false) override final { bool forwardDims(bool /*allowDataDependency*/ = false) override final;
// check inputs have been associated
// TODO : add a check of inputs dimensions ?
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
if (!getInput(i)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
}
associated &= !(getInput(i)->empty());
}
if (associated) {
std::array<DimSize_t, DIM + 2> outputDims = {};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
1;
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
}
// std::array<DimSize_t, DIM+2> weightDims = append(mInputs[0]->dims()[1],append(1, this->template getAttr<ConvDepthWiseAttr::KernelDims>()));
// if (mInputs[1]->empty()) {
// mInputs[1]->resize(weightDims);
// }
// if (mInputs[2]->empty()) {
// mInputs[2]->resize({mInputs[0]->dims()[1]});
// }
outputDims[1] = inputDims[1];
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated; std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
} computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims,
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { const IOIndex_t outputIdx = 0) const override;
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Input
// same batch value
std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
// Weight
std::vector<DimSize_t> weightDims{outputDims[1], 1};
for (std::size_t i = 0; i < DIM; ++i) {
weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
}
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = firstEltDims[1];
// Result
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
// Bias
if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
}
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override { void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for weight and bias inputs DimSize_t nbChannels() const {
getInput(1)->setBackend(name, device); if (!getInput(1)) {
getInput(2)->setBackend(name, device); AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of channel imposed.");
}
return getInput(1)->template dims<DIM+2>()[0];
} }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
...@@ -199,9 +101,6 @@ public: ...@@ -199,9 +101,6 @@ public:
} }
}; };
template <DimIdx_t DIM>
const std::string ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &kernelDims,
...@@ -211,7 +110,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels, ...@@ -211,7 +110,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
bool noBias=false) { bool noBias=false) {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nbChannels, kernelDims, strideDims, dilationDims, noBias), name); auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w"); addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b"); addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
return convDW; return convDW;
...@@ -231,9 +130,11 @@ inline std::shared_ptr<Node> ConvDepthWise( ...@@ -231,9 +130,11 @@ inline std::shared_ptr<Node> ConvDepthWise(
} }
} // namespace Aidge } // namespace Aidge
extern template class Aidge::ConvDepthWise_Op<2>;
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims", "Channels", const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
"KernelDims", "NoBias"}; "KernelDims", "NoBias"};
} }
......
...@@ -24,26 +24,24 @@ ...@@ -24,26 +24,24 @@
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
namespace Aidge { namespace Aidge {
enum class FCAttr { OutChannels, NoBias }; enum class FCAttr { NoBias };
class FC_Op : public OperatorTensor, class FC_Op : public OperatorTensor,
public Registrable<FC_Op, public Registrable<FC_Op,
std::string, std::string,
std::shared_ptr<OperatorImpl>(const FC_Op &)>, std::shared_ptr<OperatorImpl>(const FC_Op &)>,
public StaticAttributes<FCAttr, DimSize_t, bool> { public StaticAttributes<FCAttr, bool> {
public: public:
static const std::string Type; static const std::string Type;
FC_Op() = delete; FC_Op() = delete;
using Attributes_ = StaticAttributes<FCAttr, DimSize_t, bool>; using Attributes_ = StaticAttributes<FCAttr, bool>;
template <FCAttr e> using attr = typename Attributes_::template attr<e>; template <FCAttr e> using attr = typename Attributes_::template attr<e>;
FC_Op(DimSize_t out_channels, bool noBias) FC_Op(bool noBias)
: OperatorTensor(Type, 1, 2, 1), : OperatorTensor(Type, 1, 2, 1),
Attributes_( Attributes_(attr<FCAttr::NoBias>(noBias))
attr<FCAttr::OutChannels>(out_channels),
attr<FCAttr::NoBias>(noBias))
{} {}
/** /**
...@@ -75,6 +73,13 @@ public: ...@@ -75,6 +73,13 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
DimSize_t outChannels() const {
if (!getInput(1)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of output channel imposed.");
}
return getInput(1)->template dims<2>()[0];
}
static const std::vector<std::string> getInputsName() { static const std::vector<std::string> getInputsName() {
return {"data_input", "weight", "bias"}; return {"data_input", "weight", "bias"};
} }
...@@ -83,9 +88,9 @@ public: ...@@ -83,9 +88,9 @@ public:
} }
}; };
inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, bool noBias = false, const std::string& name = "") { inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(outChannels, noBias), name); auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), name);
addProducer(fc, 1, {outChannels, inChannels}, "w"); addProducer(fc, 1, {outChannels, inChannels}, "w");
addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
return fc; return fc;
...@@ -94,8 +99,7 @@ inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, boo ...@@ -94,8 +99,7 @@ inline std::shared_ptr<Node> FC(DimSize_t inChannels, DimSize_t outChannels, boo
namespace { namespace {
template <> template <>
const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels", const char *const EnumStrings<Aidge::FCAttr>::data[] = {"NoBias"};
"NoBias"};
} }
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */ #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
namespace Aidge { namespace Aidge {
/** /**
* @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes. * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
* This Operator has no Implementation, it just forward its input Tensor. * This Operator has no Implementation, it just forward its input Tensor.
...@@ -63,7 +65,7 @@ public: ...@@ -63,7 +65,7 @@ public:
return std::make_shared<Identity_Op>(*this); return std::make_shared<Identity_Op>(*this);
} }
bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
/** /**
* @brief Check if output dimensions have been computed. * @brief Check if output dimensions have been computed.
...@@ -74,7 +76,7 @@ public: ...@@ -74,7 +76,7 @@ public:
* @return false Input has no dimensions or is a nullptr. * @return false Input has no dimensions or is a nullptr.
*/ */
bool dimsForwarded() const override final { bool dimsForwarded() const override final {
return mInputs[0] ? !mInputs[0]->empty() : false; return mInputs[0] ? (mInputs[0]->empty() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
} }
......
...@@ -72,7 +72,6 @@ public: ...@@ -72,7 +72,6 @@ public:
void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final;
bool forwardDims(bool allowDataDependency = false) override final { bool forwardDims(bool allowDataDependency = false) override final {
// Check first that all required inputs are available, otherwise // Check first that all required inputs are available, otherwise
...@@ -118,7 +117,7 @@ public: ...@@ -118,7 +117,7 @@ public:
void updateConsummerProducer() override; void updateConsummerProducer() override;
void forward() override; void forward() override;
void backward() override { void backward() override {
assert(false && "not implemented"); AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for a MetaOperator");
} }
inline bool isAtomic() const noexcept override final { return false; } inline bool isAtomic() const noexcept override final { return false; }
......
...@@ -12,22 +12,26 @@ ...@@ -12,22 +12,26 @@
#ifndef AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ #ifndef AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
#define AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ #define AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
#include <array>
#include <memory>
#include <string>
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/graph/OpArgs.hpp" // Sequential
#include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/AvgPooling.hpp" #include "aidge/operator/AvgPooling.hpp"
#include "aidge/operator/MaxPooling.hpp" #include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/Conv.hpp" #include "aidge/operator/Conv.hpp"
#include "aidge/operator/ConvDepthWise.hpp" #include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/Pad.hpp" #include "aidge/operator/Pad.hpp"
#include "aidge/operator/Memorize.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/FC.hpp"
#include "aidge/operator/Identity.hpp"
#include "aidge/operator/Concat.hpp"
#include "aidge/operator/Tanh.hpp"
#include "aidge/operator/Sigmoid.hpp" #include "aidge/operator/Sigmoid.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels, inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
DimSize_t out_channels, DimSize_t out_channels,
...@@ -40,7 +44,7 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels, ...@@ -40,7 +44,7 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
{ {
// Construct micro-graph // Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0); auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : ""); auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name); auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w"); addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
...@@ -48,6 +52,20 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels, ...@@ -48,6 +52,20 @@ inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
return metaOp; return metaOp;
} }
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{
auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConv( inline std::shared_ptr<Node> PaddedConv(
...@@ -63,6 +81,8 @@ inline std::shared_ptr<Node> PaddedConv( ...@@ -63,6 +81,8 @@ inline std::shared_ptr<Node> PaddedConv(
return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias); return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
} }
////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &kernel_dims,
...@@ -74,7 +94,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, ...@@ -74,7 +94,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
{ {
// Construct micro-graph // Construct micro-graph
auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0); auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(nb_channels, kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : ""); auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name); auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w"); addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
...@@ -82,6 +102,20 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels, ...@@ -82,6 +102,20 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
return metaOp; return metaOp;
} }
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
bool no_bias = false)
{
auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConvDepthWise( inline std::shared_ptr<Node> PaddedConvDepthWise(
...@@ -96,30 +130,29 @@ inline std::shared_ptr<Node> PaddedConvDepthWise( ...@@ -96,30 +130,29 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias); return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
} }
////////////////////////////////////////////////////////////////////////////////
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, extern std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0)) const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
AvgPooling(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
});
return MetaOperator("PaddedAvgPooling", graph, name);
} template <std::array<DimSize_t, 1>::size_type DIM>
extern std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedAvgPooling( extern std::shared_ptr<Node> PaddedAvgPooling(DimSize_t const (&kernel_dims)[DIM],
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "", const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0)) const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0));
{
return PaddedAvgPooling(to_array(kernel_dims), name, stride_dims, padding_dims); ////////////////////////////////////////////////////////////////////////////////
}
template <std::array<DimSize_t, 1>::size_type DIM> template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims, inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
...@@ -136,6 +169,20 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> & ...@@ -136,6 +169,20 @@ inline std::shared_ptr<Node> PaddedMaxPooling(const std::array<DimSize_t, DIM> &
return MetaOperator("PaddedMaxPooling", graph, name); return MetaOperator("PaddedMaxPooling", graph, name);
} }
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<MetaOperator_Op> PaddedMaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
bool ceil_mode = false)
{
auto graph = Sequential({
Pad<DIM>(padding_dims, ""),
MaxPooling(kernel_dims, "", stride_dims, ceil_mode)
});
return std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM> template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedMaxPooling( inline std::shared_ptr<Node> PaddedMaxPooling(
...@@ -148,115 +195,17 @@ inline std::shared_ptr<Node> PaddedMaxPooling( ...@@ -148,115 +195,17 @@ inline std::shared_ptr<Node> PaddedMaxPooling(
return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode); return PaddedMaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims, ceil_mode);
} }
inline std::shared_ptr<Node> LSTM(DimSize_t in_channels, ////////////////////////////////////////////////////////////////////////////////
DimSize_t hidden_channels,
DimSize_t seq_length, std::shared_ptr<Node> LSTM(DimSize_t in_channels,
bool noBias = false, DimSize_t hidden_channels,
const std::string& name = "") DimSize_t seq_length,
{ bool noBias = false,
// Construct micro-graph const std::string& name = "");
auto input = Identity((!name.empty()) ? name + "_input" : "");
auto hiddenState = Memorize(seq_length, (!name.empty()) ? name + "_hidden_state" : ""); std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length,
auto cellState = Memorize(seq_length, (!name.empty()) ? name + "_cell_state" : ""); bool noBias = false);
auto add = Add(2, (!name.empty()) ? name + "_add" : "");
// Forget gate
auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_forgetGateX" : "");
input->addChild(forgetGateX, 0, 0);
auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_forgetGateH" : "");
hiddenState->addChild(forgetGateH, 1, 0);
auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
forgetGateX->addChild(forgetGate, 0, 0);
forgetGateH->addChild(forgetGate, 0, 1);
auto forgetGateAct = Sigmoid((!name.empty()) ? name + "_forgetGateAct" : "");
auto forgetGateMul = Mul((!name.empty()) ? name + "_forgetGateMul" : "");
forgetGate->addChild(forgetGateAct, 0, 0);
forgetGateAct->addChild(forgetGateMul, 0, 0);
forgetGateMul->addChild(add, 0, 0);
cellState->addChild(forgetGateMul, 1, 1);
// Input gate
auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_inputGateX" : "");
input->addChild(inputGateX, 0, 0);
auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_inputGateH" : "");
hiddenState->addChild(inputGateH, 1, 0);
auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
inputGateX->addChild(inputGate, 0, 0);
inputGateH->addChild(inputGate, 0, 1);
auto inputGateAct = Sigmoid((!name.empty()) ? name + "_inputGateAct" : "");
auto inputGateMul = Mul((!name.empty()) ? name + "_inputGateMul" : "");
inputGate->addChild(inputGateAct, 0, 0);
inputGateAct->addChild(inputGateMul, 0, 0);
inputGateMul->addChild(add, 0, 1);
// Candidate for cell update
auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
input->addChild(cellCandidateX, 0, 0);
auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
hiddenState->addChild(cellCandidateH, 1, 0);
auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
cellCandidateX->addChild(cellCandidate, 0, 0);
cellCandidateH->addChild(cellCandidate, 0, 1);
auto cellCandidateAct = Tanh((!name.empty()) ? name + "_cellCandidateAct" : "");
cellCandidate->addChild(cellCandidateAct, 0, 0);
cellCandidateAct->addChild(inputGateMul, 0, 1);
// Output gate
auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_outputGateX" : "");
input->addChild(outputGateX, 0, 0);
auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(hidden_channels, noBias), (!name.empty()) ? name + "_outputGateH" : "");
hiddenState->addChild(outputGateH, 1, 0);
auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
outputGateX->addChild(outputGate, 0, 0);
outputGateH->addChild(outputGate, 0, 1);
auto outputGateAct = Sigmoid((!name.empty()) ? name + "_outputGateAct" : "");
auto outputGateMul = Mul((!name.empty()) ? name + "_outputGateMul" : "");
outputGate->addChild(outputGateAct, 0, 0);
outputGateAct->addChild(outputGateMul, 0, 0);
// Updated cell state to help determine new hidden state
auto cellUpdatedAct = Tanh((!name.empty()) ? name + "_cellUpdatedAct" : "");
add->addChild(cellUpdatedAct, 0, 0);
cellUpdatedAct->addChild(outputGateMul, 0, 1);
outputGateMul->addChild(hiddenState, 0, 0);
add->addChild(cellState, 0, 0);
std::shared_ptr<GraphView> microGraph = std::make_shared<GraphView>();
microGraph->add(input);
microGraph->add({hiddenState, cellState, add,
forgetGateX, forgetGateH, forgetGate, forgetGateAct, forgetGateMul,
inputGateX, inputGateH, inputGate, inputGateAct, inputGateMul,
cellCandidateX, cellCandidateH, cellCandidate, cellCandidateAct,
outputGateX, outputGateH, outputGate, outputGateAct, outputGateMul,
cellUpdatedAct}, false);
microGraph->setOrderedInputs({{input, 0},
{inputGateX, 1}, {outputGateX, 1}, {forgetGateX, 1}, {cellCandidateX, 1},
{inputGateH, 1}, {outputGateH, 1}, {forgetGateH, 1}, {cellCandidateH, 1},
{inputGateX, 2}, {outputGateX, 2}, {forgetGateX, 2}, {cellCandidateX, 2},
{inputGateH, 2}, {outputGateH, 2}, {forgetGateH, 2}, {cellCandidateH, 2},
{hiddenState, 1}, {cellState, 1}});
microGraph->setOrderedOutputs({{hiddenState, 0}, {cellState, 0}});
auto metaOp = MetaOperator("LSTM", microGraph, name);
addProducer(metaOp, 1, {hidden_channels, in_channels}, "wi");
addProducer(metaOp, 2, {hidden_channels, in_channels}, "wo");
addProducer(metaOp, 3, {hidden_channels, in_channels}, "wf");
addProducer(metaOp, 4, {hidden_channels, in_channels}, "wc");
addProducer(metaOp, 5, {hidden_channels, hidden_channels}, "ri");
addProducer(metaOp, 6, {hidden_channels, hidden_channels}, "ro");
addProducer(metaOp, 7, {hidden_channels, hidden_channels}, "rf");
addProducer(metaOp, 8, {hidden_channels, hidden_channels}, "rc");
addProducer(metaOp, 9, {(noBias ? 0 : hidden_channels)}, "wbi");
addProducer(metaOp, 10, {(noBias ? 0 : hidden_channels)}, "wbo");
addProducer(metaOp, 11, {(noBias ? 0 : hidden_channels)}, "wbf");
addProducer(metaOp, 12, {(noBias ? 0 : hidden_channels)}, "wbc");
addProducer(metaOp, 13, {(noBias ? 0 : hidden_channels)}, "rbi");
addProducer(metaOp, 14, {(noBias ? 0 : hidden_channels)}, "rbo");
addProducer(metaOp, 15, {(noBias ? 0 : hidden_channels)}, "rbf");
addProducer(metaOp, 16, {(noBias ? 0 : hidden_channels)}, "rbc");
return metaOp;
}
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */ #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
...@@ -87,7 +87,6 @@ public: ...@@ -87,7 +87,6 @@ public:
* @param data Data to copy. * @param data Data to copy.
*/ */
virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0; virtual void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0;
virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0; virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0;
/** /**
* @brief Set the specified output value by performing a deep copy of the given data. * @brief Set the specified output value by performing a deep copy of the given data.
...@@ -95,7 +94,6 @@ public: ...@@ -95,7 +94,6 @@ public:
* @param inputIdx Index of the input to set. * @param inputIdx Index of the input to set.
*/ */
virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0; virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0;
virtual void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) = 0;
virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0; virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
std::shared_ptr<Hook> getHook(const std::string& hookName) { std::shared_ptr<Hook> getHook(const std::string& hookName) {
......
...@@ -57,13 +57,11 @@ public: ...@@ -57,13 +57,11 @@ public:
// Tensor access // Tensor access
// input management // input management
void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override; void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override;
void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override;
const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const; const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final; std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
// output management // output management
void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override; void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override;
virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const; virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final; std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
/////////////////////////////////////////////////// ///////////////////////////////////////////////////
......
...@@ -107,12 +107,6 @@ public: ...@@ -107,12 +107,6 @@ public:
void backward() override final { void backward() override final {
// fmt::print("Basic Producer backward() function.\n"); // fmt::print("Basic Producer backward() function.\n");
} }
void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
if (getAttr<ProdAttr::Constant>()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
}
OperatorTensor::setOutput(outputIdx, std::move(data));
}
void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override { void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
if (getAttr<ProdAttr::Constant>()) { if (getAttr<ProdAttr::Constant>()) {
......
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_SHAPE_H_
#define AIDGE_CORE_OPERATOR_SHAPE_H_
#include <cstdint> // std::int64_t
#include <memory>
#include <string>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Shape_OpImpl : public OperatorImpl {
public:
Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class ShapeAttr { Start, End };
class Shape_Op : public OperatorTensor,
public Registrable<Shape_Op,
std::string,
std::shared_ptr<OperatorImpl>(const Shape_Op&)>,
public StaticAttributes<ShapeAttr, std::int64_t, std::int64_t> {
public:
static const std::string Type;
Shape_Op() = delete;
using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
Shape_Op(std::int64_t start, std::int64_t end)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<ShapeAttr::Start>(start),
attr<ShapeAttr::End>(end))
{
mImpl = std::make_shared<Shape_OpImpl>(*this);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Shape_Op(const Shape_Op& op)
: OperatorTensor(op),
Attributes_(op)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Shape_Op, *this, op.backend());
}
else {
mImpl = std::make_shared<Shape_OpImpl>(*this);
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Shape_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Shape_Op>(*this);
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Shape(std::int64_t start = 0, std::int64_t end = -1, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"Start", "End"};
}
#endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
...@@ -30,16 +30,16 @@ class Softmax_Op : public OperatorTensor, ...@@ -30,16 +30,16 @@ class Softmax_Op : public OperatorTensor,
public Registrable<Softmax_Op, public Registrable<Softmax_Op,
std::string, std::string,
std::shared_ptr<OperatorImpl>(const Softmax_Op&)>, std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
public StaticAttributes<SoftmaxAttr, int> { public StaticAttributes<SoftmaxAttr, std::size_t> {
public: public:
static const std::string Type; static const std::string Type;
Softmax_Op() = delete; Softmax_Op() = delete;
using Attributes_ = StaticAttributes<SoftmaxAttr, int>; using Attributes_ = StaticAttributes<SoftmaxAttr, std::size_t>;
template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>; template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
Softmax_Op(int axis) Softmax_Op(std::size_t axis)
: OperatorTensor(Type, 1, 0, 1), : OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {} Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
...@@ -76,7 +76,7 @@ public: ...@@ -76,7 +76,7 @@ public:
} }
}; };
inline std::shared_ptr<Node> Softmax(int axis, const std::string& name = "") { inline std::shared_ptr<Node> Softmax(std::size_t axis, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name); return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
} }
} // namespace Aidge } // namespace Aidge
......
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_SPLIT_H_
#define AIDGE_CORE_OPERATOR_SPLIT_H_
#include <memory>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Split_OpImpl : public OperatorImpl {
public:
Split_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class SplitAttr { Axis, Split };
class Split_Op
: public OperatorTensor,
public Registrable<Split_Op, std::string, std::shared_ptr<OperatorImpl>(const Split_Op &)>,
public StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>> {
public:
static const std::string Type;
Split_Op() = delete;
using Attributes_ = StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>>;
template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
: OperatorTensor(Type, 2, 0, nbOutputs),
Attributes_(attr<SplitAttr::Axis>(axis),
attr<SplitAttr::Split>(split))
{
mImpl = std::make_shared<Split_OpImpl>(*this);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
* input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Split_Op(const Split_Op &op)
: OperatorTensor(op),
Attributes_(op)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Split_Op, *this, op.backend());
}
else {
mImpl = std::make_shared<Split_OpImpl>(*this);
}
}
public:
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Split_Op
*/
std::shared_ptr<Operator> clone() const override { return std::make_shared<Split_Op>(*this); }
bool dimsForwarded() const override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input", "split"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output_0", "data_output_n"};
}
};
/**
* @brief Exract a sub-Tensor from a bigger original Tensor.
* @param name Name of the Operator.
* @return std::shared_ptr<Node> A Node containing the Operator.
*/
inline std::shared_ptr<Node> Split(DimSize_t nbOutput,
std::int8_t axis = 0,
const std::vector<DimSize_t>& split = {},
const std::string &name = "") {
return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "Axis", "Split" };
}
#endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
...@@ -26,9 +26,9 @@ ...@@ -26,9 +26,9 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
class Transpose_OpImpl : public OperatorImpl { class TransposeImpl : public OperatorImpl {
public: public:
Transpose_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} TransposeImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override; void forward() override;
}; };
...@@ -47,11 +47,11 @@ class Transpose_Op : public OperatorTensor, ...@@ -47,11 +47,11 @@ class Transpose_Op : public OperatorTensor,
template <TransposeAttr e> template <TransposeAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
Transpose_Op(const std::vector<DimSize_t> &output_dims_order) Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
: OperatorTensor(Type, 1, 0, 1), : OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order)) Attributes_(attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder))
{ {
mImpl = std::make_shared<Transpose_OpImpl>(*this); mImpl = std::make_shared<TransposeImpl>(*this);
} }
/** /**
...@@ -66,7 +66,7 @@ class Transpose_Op : public OperatorTensor, ...@@ -66,7 +66,7 @@ class Transpose_Op : public OperatorTensor,
SET_IMPL_MACRO(Transpose_Op, *this, op.backend()); SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
} }
else { else {
mImpl = std::make_shared<Transpose_OpImpl>(*this); mImpl = std::make_shared<TransposeImpl>(*this);
} }
} }
...@@ -90,9 +90,9 @@ class Transpose_Op : public OperatorTensor, ...@@ -90,9 +90,9 @@ class Transpose_Op : public OperatorTensor,
} }
}; };
inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &output_dims_order, inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
const std::string& name = "") { const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Transpose_Op>(output_dims_order), name); return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
} }
} // namespace Aidge } // namespace Aidge
......
...@@ -73,6 +73,10 @@ void init_Tensor(py::module& m){ ...@@ -73,6 +73,10 @@ void init_Tensor(py::module& m){
(m,"Tensor", py::multiple_inheritance(), py::buffer_protocol()); (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
pyClassTensor.def(py::init<>()) pyClassTensor.def(py::init<>())
.def(py::self + py::self)
.def(py::self - py::self)
.def(py::self * py::self)
.def(py::self / py::self)
.def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true) .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
.def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true) .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
.def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims) .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
...@@ -89,6 +93,9 @@ void init_Tensor(py::module& m){ ...@@ -89,6 +93,9 @@ void init_Tensor(py::module& m){
.def("__str__", [](Tensor& b) { .def("__str__", [](Tensor& b) {
return b.toString(); return b.toString();
}) })
.def("__repr__", [](Tensor& b) {
return "Tensor(dtype = " + std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]) + ",\n" + b.toString() + ")";
})
.def("__len__", [](Tensor& b) -> size_t{ .def("__len__", [](Tensor& b) -> size_t{
return b.size(); return b.size();
}) })
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment