Skip to content
Snippets Groups Projects
Commit a830a77d authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

FIxed merge issues

parent b7a41332
No related branches found
No related tags found
2 merge requests!212Version 0.3.0,!153Im2col
Pipeline #50140 passed
...@@ -34,35 +34,33 @@ enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims }; ...@@ -34,35 +34,33 @@ enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class Fold_Op : public OperatorTensor, class Fold_Op : public OperatorTensor,
public Registrable<Fold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>, public Registrable<Fold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)> {
public StaticAttributes<FoldAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
public: public:
static const std::string Type; static const std::string Type;
Fold_Op() = delete; private:
using Attributes_ = StaticAttributes<FoldAttr, using Attributes_ = StaticAttributes<FoldAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>; std::array<DimSize_t, DIM>>;
template <FoldAttr e> template <FoldAttr e> using attr = typename Attributes_::template attr<e>;
using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes;
public:
Fold_Op() = delete;
constexpr Fold_Op(const std::array<DimSize_t, DIM> &outputDims, constexpr Fold_Op(const std::array<DimSize_t, DIM> &outputDims,
const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data}, 1), : OperatorTensor(Type, {InputCategory::Data}, 1),
Attributes_(attr<FoldAttr::OutputDims>(outputDims), mAttributes(std::make_shared<Attributes_>(
attr<FoldAttr::StrideDims>(strideDims), attr<FoldAttr::OutputDims>(outputDims),
attr<FoldAttr::DilationDims>(dilationDims), attr<FoldAttr::StrideDims>(strideDims),
attr<FoldAttr::KernelDims>(kernelDims)) {} attr<FoldAttr::DilationDims>(dilationDims),
attr<FoldAttr::KernelDims>(kernelDims))) {}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
...@@ -71,7 +69,7 @@ public: ...@@ -71,7 +69,7 @@ public:
*/ */
Fold_Op(const Fold_Op<DIM> &op) Fold_Op(const Fold_Op<DIM> &op)
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) mAttributes(op.mAttributes)
{ {
if (!op.backend().empty()) { if (!op.backend().empty()) {
SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend()); SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
...@@ -93,6 +91,12 @@ public: ...@@ -93,6 +91,12 @@ public:
void setBackend(const std::string &name, DeviceIdx_t device = 0) override; void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::array<DimSize_t, DIM>& outputDims() const { return mAttributes->template getAttr<FoldAttr::OutputDims>(); }
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<FoldAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<FoldAttr::DilationDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<FoldAttr::KernelDims>(); }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
......
...@@ -41,31 +41,30 @@ enum class UnfoldAttr { StrideDims, DilationDims, KernelDims }; ...@@ -41,31 +41,30 @@ enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM> template <DimIdx_t DIM>
class Unfold_Op : public OperatorTensor, class Unfold_Op : public OperatorTensor,
public Registrable<Unfold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)>, public Registrable<Unfold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)> {
public StaticAttributes<UnfoldAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
public: public:
static const std::string Type; static const std::string Type;
Unfold_Op() = delete; private:
using Attributes_ = StaticAttributes<UnfoldAttr, using Attributes_ = StaticAttributes<UnfoldAttr,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>; std::array<DimSize_t, DIM>>;
template <UnfoldAttr e> template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes;
public:
Unfold_Op() = delete;
constexpr Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims, constexpr Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data}, 1), : OperatorTensor(Type, {InputCategory::Data}, 1),
Attributes_(attr<UnfoldAttr::StrideDims>(strideDims), mAttributes(std::make_shared<Attributes_>(
attr<UnfoldAttr::DilationDims>(dilationDims), attr<UnfoldAttr::StrideDims>(strideDims),
attr<UnfoldAttr::KernelDims>(kernelDims)) attr<UnfoldAttr::DilationDims>(dilationDims),
attr<UnfoldAttr::KernelDims>(kernelDims)))
{ {
mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this); mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
} }
...@@ -77,7 +76,7 @@ public: ...@@ -77,7 +76,7 @@ public:
*/ */
Unfold_Op(const Unfold_Op<DIM> &op) Unfold_Op(const Unfold_Op<DIM> &op)
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) mAttributes(op.mAttributes)
{ {
if (!op.backend().empty()) { if (!op.backend().empty()) {
SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend()); SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
...@@ -99,6 +98,11 @@ public: ...@@ -99,6 +98,11 @@ public:
void setBackend(const std::string &name, DeviceIdx_t device = 0) override; void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); }
inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<UnfoldAttr::DilationDims>(); }
inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<UnfoldAttr::KernelDims>(); }
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName(){
return {"data_input"}; return {"data_input"};
} }
......
...@@ -33,14 +33,14 @@ bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { ...@@ -33,14 +33,14 @@ bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
DimSize_t k = 1; DimSize_t k = 1;
DimSize_t l = 1; DimSize_t l = 1;
for (std::size_t dim = 0; dim < this->template getAttr<FoldAttr::KernelDims>().size() ; ++dim) { for (std::size_t dim = 0; dim < this->kernelDims().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<FoldAttr::DilationDims>()[dim] * const DimSize_t kernelExtent = this->dilationDims()[dim] *
(this->template getAttr<FoldAttr::KernelDims>()[dim] - 1) + 1; (this->kernelDims()[dim] - 1) + 1;
k *= this->template getAttr<FoldAttr::KernelDims>()[dim]; k *= this->kernelDims()[dim];
l *= 1 + static_cast<DimSize_t>( l *= 1 + static_cast<DimSize_t>(
floor(static_cast<float>(this->template getAttr<FoldAttr::OutputDims>()[dim] - kernelExtent) / floor(static_cast<float>(this->outputDims()[dim] - kernelExtent) /
static_cast<float>(this->template getAttr<FoldAttr::StrideDims>()[dim]))); static_cast<float>(this->strideDims()[dim])));
} }
AIDGE_ASSERT(dims[dims.size() - 2] % k == 0 , "Fold: input number of channels ({}) is not divisible by the product of provided kernel dims ({})!", AIDGE_ASSERT(dims[dims.size() - 2] % k == 0 , "Fold: input number of channels ({}) is not divisible by the product of provided kernel dims ({})!",
...@@ -50,7 +50,7 @@ bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { ...@@ -50,7 +50,7 @@ bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
dims[dims.size() - 2] /= k; dims[dims.size() - 2] /= k;
dims.pop_back(); dims.pop_back();
dims.insert(dims.end(), this->template getAttr<FoldAttr::OutputDims>().begin(), this->template getAttr<FoldAttr::OutputDims>().end()); dims.insert(dims.end(), this->outputDims().begin(), this->outputDims().end());
mOutputs[0]->resize(dims); mOutputs[0]->resize(dims);
return true; return true;
} }
......
...@@ -26,23 +26,23 @@ ...@@ -26,23 +26,23 @@
template <Aidge::DimIdx_t DIM> template <Aidge::DimIdx_t DIM>
void Aidge::Unfold_OpImpl<DIM>::forward() { void Aidge::Unfold_OpImpl<DIM>::forward() {
const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp); const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
const auto kernelDims = op.template getAttr<UnfoldAttr::KernelDims>(); const auto kernelDims = op.kernelDims();
const auto dilationDims = op.template getAttr<UnfoldAttr::DilationDims>(); const auto dilationDims = op.dilationDims();
const auto strideDims = op.template getAttr<UnfoldAttr::StrideDims>(); const auto strideDims = op.strideDims();
const DimSize_t inHeight = op.getInput(0)->dims()[2]; const DimSize_t inHeight = op.getInput(0)->dims()[2];
const DimSize_t inWidth = op.getInput(0)->dims()[3]; const DimSize_t inWidth = op.getInput(0)->dims()[3];
const DimSize_t inChannels = op.getInput(0)->dims()[1]; const DimSize_t inChannels = op.getInput(0)->dims()[1];
const DimSize_t kernelExtentHeight = op.template getAttr<UnfoldAttr::DilationDims>()[0] * const DimSize_t kernelExtentHeight = op.dilationDims()[0] *
(op.template getAttr<UnfoldAttr::KernelDims>()[0] - 1) + 1; (op.kernelDims()[0] - 1) + 1;
const DimSize_t outHeight = 1 + static_cast<DimSize_t>( const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inHeight - kernelExtentHeight) / floor(static_cast<float>(inHeight - kernelExtentHeight) /
static_cast<float>(op.template getAttr<UnfoldAttr::StrideDims>()[0]))); static_cast<float>(op.strideDims()[0])));
const DimSize_t kernelExtentWidth = op.template getAttr<UnfoldAttr::DilationDims>()[1] * const DimSize_t kernelExtentWidth = op.dilationDims()[1] *
(op.template getAttr<UnfoldAttr::KernelDims>()[1] - 1) + 1; (op.kernelDims()[1] - 1) + 1;
const DimSize_t outWidth = 1 + static_cast<DimSize_t>( const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inWidth - kernelExtentWidth) / floor(static_cast<float>(inWidth - kernelExtentWidth) /
static_cast<float>(op.template getAttr<UnfoldAttr::StrideDims>()[1]))); static_cast<float>(op.strideDims()[1])));
const DimSize_t outChannels = op.getOutput(0)->dims()[1]; const DimSize_t outChannels = op.getOutput(0)->dims()[1];
for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) { for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) {
...@@ -75,14 +75,14 @@ bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { ...@@ -75,14 +75,14 @@ bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
DimSize_t k = 1; DimSize_t k = 1;
DimSize_t l = 1; DimSize_t l = 1;
for (std::size_t dim = 0; dim < this->template getAttr<UnfoldAttr::KernelDims>().size() ; ++dim) { for (std::size_t dim = 0; dim < this->kernelDims().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<UnfoldAttr::DilationDims>()[dim] * const DimSize_t kernelExtent = this->dilationDims()[dim] *
(this->template getAttr<UnfoldAttr::KernelDims>()[dim] - 1) + 1; (this->kernelDims()[dim] - 1) + 1;
k *= this->template getAttr<UnfoldAttr::KernelDims>()[dim]; k *= this->kernelDims()[dim];
l *= 1 + static_cast<DimSize_t>( l *= 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) / floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<UnfoldAttr::StrideDims>()[dim]))); static_cast<float>(this->strideDims()[dim])));
} }
mOutputs[0]->resize({inputDims[0], inputDims[1] * k, l}); mOutputs[0]->resize({inputDims[0], inputDims[1] * k, l});
......
...@@ -41,10 +41,10 @@ size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) { ...@@ -41,10 +41,10 @@ size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
const auto wFlattenSize = std::accumulate(wShape.cbegin() + 1, wShape.cend(), DimSize_t(1), std::multiplies<DimSize_t>()); const auto wFlattenSize = std::accumulate(wShape.cbegin() + 1, wShape.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
auto microGraph = std::make_shared<GraphView>(); auto microGraph = std::make_shared<GraphView>();
auto unfold = Unfold(convOp->getAttr<std::array<DimSize_t, 2>>("KernelDims"), auto unfold = Unfold(convOp->kernelDims(),
(!convNode->name().empty()) ? convNode->name() + "_unfold" : "", (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
convOp->getAttr<std::array<DimSize_t, 2>>("StrideDims"), convOp->strideDims(),
convOp->getAttr<std::array<DimSize_t, 2>>("DilationDims")); convOp->dilationDims());
auto wReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{static_cast<int64_t>(convOp->getInput(1)->dims()[0]), static_cast<int64_t>(wFlattenSize)}}), auto wReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{static_cast<int64_t>(convOp->getInput(1)->dims()[0]), static_cast<int64_t>(wFlattenSize)}}),
(!convNode->name().empty()) ? convNode->name() + "_w_reshape_shape_prod" : "", (!convNode->name().empty()) ? convNode->name() + "_w_reshape_shape_prod" : "",
true); true);
...@@ -59,10 +59,10 @@ size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) { ...@@ -59,10 +59,10 @@ size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
false, false,
(!convNode->name().empty()) ? convNode->name() + "_reshape" : ""); (!convNode->name().empty()) ? convNode->name() + "_reshape" : "");
//auto fold = Fold(outputDims, //auto fold = Fold(outputDims,
// convOp->getAttr<std::array<DimSize_t, 2>>("KernelDims"), // convOp->kernelDims(),
// (!convNode->name().empty()) ? convNode->name() + "_unfold" : "", // (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
// convOp->getAttr<std::array<DimSize_t, 2>>("StrideDims"), // convOp->strideDims(),
// convOp->getAttr<std::array<DimSize_t, 2>>("DilationDims")); // convOp->dilationDims());
wReshapeProd->addChild(wReshape, 0, 1); wReshapeProd->addChild(wReshape, 0, 1);
wReshape->addChild(matMul, 0, 0); wReshape->addChild(matMul, 0, 0);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment