Skip to content
Snippets Groups Projects
Commit 053a98f9 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

More fixes

parent f44d2613
No related branches found
No related tags found
No related merge requests found
Showing with 261 additions and 28 deletions
......@@ -81,14 +81,14 @@ public:
// return *in;
// }
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
const auto expectedDims = mInputs[0]->dims();
std::size_t nonEmptyInputTensor = 1;
......@@ -140,7 +140,7 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
mOutput->setBackend(name);
......@@ -150,7 +150,7 @@ public:
}
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......
......@@ -84,7 +84,7 @@ public:
return std::make_shared<AvgPooling_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
......@@ -92,7 +92,7 @@ public:
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
......
......@@ -87,14 +87,14 @@ public:
// return *in;
// }
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 5 && "operators supports only 5 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
......
......@@ -100,14 +100,14 @@ public:
// }
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
......
......@@ -92,14 +92,14 @@ class ConvDepthWise_Op : public Operator,
return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
......
......@@ -135,7 +135,7 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<FC_Op>::create(name)(*this);
mOutput->setBackend(name);
......@@ -145,7 +145,7 @@ public:
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......
......@@ -120,14 +120,14 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......
......@@ -127,7 +127,7 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<MatMul_Op>::create(name)(*this);
mOutput->setBackend(name);
......@@ -136,7 +136,7 @@ public:
mInputs[1]->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......
......@@ -85,7 +85,7 @@ public:
return std::make_shared<MaxPooling_Op<DIM>>(*this);
}
constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
......@@ -93,7 +93,7 @@ public:
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
constexpr void computeOutputDims() override final {
void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_PAD_H_
#define AIDGE_CORE_OPERATOR_PAD_H_
#include <array>
#include <numeric>
#include <vector>
#include <cmath>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
enum class PadBorderType { Constant, Replicate, Reflect, Wrap };
template <DimIdx_t DIM>
class Pad_Op : public Operator,
public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
public StaticAttributes<PadAttr,
std::array<std::array<DimSize_t, 2>, DIM>,
PadBorderType,
double> {
private:
// FIXME: change accessibility
std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char *Type = "Pad";
Pad_Op() = delete;
using Attributes_ = StaticAttributes<PadAttr,
std::array<std::array<DimSize_t, 2>, DIM>,
PadBorderType,
double>;
template <PadAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Pad_Op(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
: Operator(Type),
Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
attr<PadAttr::BorderType>(borderType),
attr<PadAttr::BorderValue>(borderValue)) {
setDatatype(DataType::Float32);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Pad_Op(const Pad_Op& op)
: Operator(Type),
Attributes_(op),
mOutput(std::make_shared<Tensor>(*op.mOutput))
{
// cpy-ctor
setDatatype(op.mOutput->dataType());
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Pad_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Pad_Op<DIM>>(*this);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 1 && "operators supports only 3 inputs");
(void) inputIdx; // avoid unused warning
assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
mInput = std::dynamic_pointer_cast<Tensor>(data);
}
void computeOutputDims() override final {
if (!mInput->empty()) {
std::array<DimSize_t, DIM + 2> outputDims = {};
for (std::size_t dim = 0; dim < DIM; ++dim) {
outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[dim][0]
+ mInput->dims()[dim+2]
+ this->template getAttr<PadAttr::BeginEndBorders>()[dim][1];
}
outputDims[1] = mInput->dims()[1];
outputDims[0] = mInput->dims()[0];
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return *(mInput.get());
}
inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "Pad Operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return mInput;
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "Pad Operators has only 1 outputs");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx == 0 && "operators supports only 1 inputs");
(void) inputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mInput);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string &name) {
mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType &datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInput->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 1; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Pad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
const std::string& name = "",
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, DIM> &dimBeginEnd,
const std::string& name = "",
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
for (size_t i = 0; i < DIM; ++i) {
beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
}
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ZeroPad(const std::array<std::array<DimSize_t, 2>, DIM> &beginEndTuples,
const std::string& name = "")
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
return pad;
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ZeroPad(const std::array<DimSize_t, DIM> &dimBeginEnd,
const std::string& name = "")
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
std::array<std::array<DimSize_t, 2>, DIM> beginEndTuples;
for (size_t i = 0; i < DIM; ++i) {
beginEndTuples[i] = {dimBeginEnd[i], dimBeginEnd[i]};
}
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, PadBorderType::Constant, 0.0), name);
return pad;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> Pad(
std::array<DimSize_t, 2> const (&beginEndTuples)[DIM],
const std::string& name = "",
const PadBorderType &borderType = PadBorderType::Constant,
double borderValue = 0.0)
{
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
return Pad(to_array(beginEndTuples), name, borderType, borderValue);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
template <>
const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Replicate", "Reflect", "Wrap"};
}
#endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
......@@ -108,14 +108,14 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<ReLU_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......
......@@ -130,13 +130,13 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<Scaling_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......
......@@ -108,14 +108,14 @@ public:
}
void setBackend(const std::string& name) {
void setBackend(const std::string& name) override {
mImpl = Registrar<Softmax_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInput->setBackend(name);
}
void setDatatype(const DataType& datatype) {
void setDatatype(const DataType& datatype) override {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
......
......@@ -79,10 +79,10 @@ void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
printf("variance < 1e-12 for all outputs! Is the network correctly trained?\n");
}
const DimSize_t channelsSize = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
const DimSize_t channelsSize = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
// TODO : suppose we have Conv2D ...
const std::array<DimSize_t, 2> kernelDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
const std::array<DimSize_t, 2> kernelDims = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
std::shared_ptr<Tensor> weight = conv->input(1).first->getOperator()->getOutput(conv->input(1).second);
std::shared_ptr<Tensor> bias = conv->input(2).first->getOperator()->getOutput(conv->input(2).second);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment