Skip to content
Snippets Groups Projects
Commit f0fd3c3a authored by Maxence Naud's avatar Maxence Naud
Browse files

Fix slice and gather adaptation to data inputs

parents 019f134f 73269f1c
No related branches found
No related tags found
2 merge requests!152Update Aidge export to take a graph view has an argument instead of a...,!93Change Gather and Slice's attributes into intputs
Showing
with 247 additions and 201 deletions
......@@ -44,6 +44,8 @@ set(FMT_SYSTEM_HEADERS ON)
FetchContent_MakeAvailable(fmt)
set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
find_package(Threads REQUIRED)
##############################################
# Create target and set properties
......@@ -88,7 +90,7 @@ if (PYBIND)
)
endif()
target_link_libraries(${module_name} PUBLIC fmt::fmt)
target_link_libraries(${module_name} PUBLIC Threads::Threads fmt::fmt)
target_compile_features(${module_name} PRIVATE cxx_std_14)
if (DOSANITIZE STREQUAL "ON")
......
......@@ -2,6 +2,7 @@
include(CMakeFindDependencyMacro)
find_dependency(fmt)
find_dependency(Threads)
include(${CMAKE_CURRENT_LIST_DIR}/aidge_core-config-version.cmake)
......
......@@ -39,7 +39,11 @@ class ExportNode(ABC):
if parent_node is not None:
self.inputs_dims.append(self.operator.get_input(idx).dims())
else:
self.inputs_dims.append(None)
print(self.operator.get_input(idx))
if self.operator.get_input(idx) is not None:
self.inputs_dims.append(self.operator.get_input(idx).dims())
else:
self.inputs_dims.append(None)
for idx, child_node in enumerate(self.node.get_children()):
self.outputs.append(child_node)
......
......@@ -39,7 +39,7 @@ class test_OperatorImpl(unittest.TestCase):
global GLOBAL_CPT
matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0")
generic_matmul_op = matmul.get_operator()
generic_matmul_op.set_compute_output_dims(lambda x: x)
generic_matmul_op.set_forward_dims(lambda x: x)
generic_matmul_op.set_impl(testImpl(generic_matmul_op))
generic_matmul_op.forward()
self.assertEqual(GLOBAL_CPT, 1)
......@@ -52,6 +52,7 @@ class test_OperatorImpl(unittest.TestCase):
self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
conv.get_operator().set_backend("cpu")
conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
conv.get_operator().forward()
self.assertEqual(GLOBAL_CPT, 1)
......@@ -65,6 +66,7 @@ class test_OperatorImpl(unittest.TestCase):
conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
model = aidge_core.sequential([conv])
model.set_backend("cpu")
conv.get_operator().set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
conv.get_operator().forward()
self.assertEqual(GLOBAL_CPT, 1)
......
......@@ -92,14 +92,14 @@ class test_operator_binding(unittest.TestCase):
attrs.set_attr("d", 23.89)
self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
def test_compute_output_dims(self):
def test_forward_dims(self):
in_dims=[25, 25]
input = aidge_core.Producer(in_dims, name="In")
genOp = aidge_core.GenericOperator("genOp", 1, 0, 1, name="genOp")
_ = aidge_core.sequential([input, genOp])
self.assertListEqual(genOp.get_operator().get_output(0).dims(), [])
genOp.get_operator().set_compute_output_dims(lambda x:x)
genOp.get_operator().compute_output_dims()
genOp.get_operator().set_forward_dims(lambda x:x)
genOp.get_operator().forward_dims()
self.assertListEqual(genOp.get_operator().get_output(0).dims(), in_dims)
def test_set_impl(self):
......
......@@ -16,13 +16,14 @@
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/data/Elts.hpp"
namespace Aidge {
class Operator;
class OperatorImpl {
public:
OperatorImpl(const Operator& op, const std::string& backend);
OperatorImpl(const Operator& op, const std::string& backend = "");
virtual void forward();
virtual void backward();
......@@ -36,13 +37,13 @@ public:
* @param inputIdx Index of the input analysed.
* @return std::size_t
*/
virtual NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
// Amount of input data that cannot be overwritten during the execution.
virtual NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
// Memory required at an output for a given input size.
virtual NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
/**
* @brief Total amount of consumed data from a specific input.
......@@ -50,7 +51,7 @@ public:
* @param inputIdx Index of the input analysed.
* @return DimSize_t
*/
virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
/**
* @brief Total amount of produced data ready to be used on a specific output.
......@@ -58,7 +59,7 @@ public:
* @param outputIdx Index of the output analysed.
* @return DimSize_t
*/
virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
/**
* @brief Update the Consummer Producer system by simulating the consumption and production of i/o
......@@ -77,8 +78,8 @@ public:
protected:
const Operator &mOp;
const std::string mBackend;
std::vector<NbElts_t> mNbConsumedData;
std::vector<NbElts_t> mNbProducedData;
std::vector<Elts_t> mNbConsumedData;
std::vector<Elts_t> mNbProducedData;
};
} // namespace Aidge
......
......@@ -23,6 +23,8 @@ namespace Aidge {
template <class T>
class TensorImpl_cpu : public TensorImpl {
static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copyable");
private:
/// Pointer to the data and its capacity
future_std::span<T> mData;
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_ELTS_H_
#define AIDGE_ELTS_H_
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
/**
* Base object for Aidge consumer-producer model (C-P model).
* It is a hybrid model: operator implementations can specify their C-P model
* with precise data (bytes) or with tokens.
*/
struct Elts_t {
enum EltType {
Data,
Token,
Undef
};
NbElts_t data;
NbElts_t token;
EltType type;
// Addition operator
inline Elts_t operator+(const Elts_t& other) const {
AIDGE_ASSERT(type == other.type || other.type == Undef || type == Undef,
"Incompatible C-P model types: {} + {}. Data and Token cannot be mixed.", type, other.type);
return Elts_t(data + other.data, token + other.token, (other.type == Undef) ? type : other.type);
}
// Addition assignment operator
inline Elts_t& operator+=(const Elts_t& other) {
AIDGE_ASSERT(type == other.type || other.type == Undef || type == Undef,
"Incompatible C-P model types: {} += {}. Data and Token cannot be mixed.", type, other.type);
data += other.data;
token += other.token;
type = (other.type == Undef) ? type : other.type;
return *this;
}
// Comparison operators
inline bool operator<(const Elts_t& other) const {
if (type == Elts_t::Undef || type == Elts_t::Token) {
// Nothing, or only a token is required: don't care about how much data has been produced for the token
return (token < other.token);
}
else if (type == Elts_t::Data && other.type != Elts_t::Token) {
// A precise amount of data is required, so the amount of produced data must be specified, a token is not enough
return (data < other.data);
}
else {
AIDGE_THROW_OR_ABORT(std::runtime_error,
"Incompatible C-P model types: {} < {}. Data is expected for right-hand side.", type, other.type);
}
}
inline bool operator>(const Elts_t& other) const {
if (type == Elts_t::Undef || type == Elts_t::Token) {
// Nothing, or only a token is required: don't care about how much data has been produced for the token
return (token > other.token);
}
else if (type == Elts_t::Data && other.type != Elts_t::Token) {
// A precise amount of data is required, so the amount of produced data must be specified, a token is not enough
return (data > other.data);
}
else {
AIDGE_THROW_OR_ABORT(std::runtime_error,
"Incompatible C-P model types: {} > {}. Data is expected for right-hand side.", type, other.type);
}
}
inline static Elts_t NoneElts() {
return Elts_t(0, 0, Elts_t::Undef);
}
inline static Elts_t DataElts(NbElts_t data, NbElts_t token = 1) {
return Elts_t(data, token, Elts_t::Data);
}
inline static Elts_t TokenElts(NbElts_t token) {
return Elts_t(0, token, Elts_t::Token);
}
private:
inline Elts_t(NbElts_t data_, NbElts_t token_, EltType type_):
data(data_), token(token_), type(type_) {}
};
} // end namespace Aidge
template<>
struct fmt::formatter<Aidge::Elts_t> {
template<typename ParseContext>
inline constexpr auto parse(ParseContext& ctx) {
return ctx.begin();
}
template<typename FormatContext>
inline auto format(Aidge::Elts_t const& elt, FormatContext& ctx) {
return fmt::format_to(ctx.out(), "{}:{}", elt.data, elt.token);
}
};
namespace {
template <>
const char* const EnumStrings<Aidge::Elts_t::EltType>::data[]
= {"Data", "Token", "Undef"};
}
namespace Aidge {
inline auto format_as(Elts_t::EltType elt) { return EnumStrings<Aidge::Elts_t::EltType>::data[static_cast<int>(elt)]; }
}
#endif /* AIDGE_ELTS_H_ */
......@@ -251,7 +251,6 @@ class Tensor : public Data,
auto add_ = Add_Op(2);
add_.associateInput(0, std::make_shared<Tensor>(*this));
add_.associateInput(1, std::make_shared<Tensor>(other));
add_.computeOutputDims();
add_.setDataType(dataType());
add_.setBackend(mImpl->backend());
add_.forward();
......@@ -275,7 +274,6 @@ class Tensor : public Data,
auto sub_ = Sub_Op();
sub_.associateInput(0, std::make_shared<Tensor>(*this));
sub_.associateInput(1, std::make_shared<Tensor>(other));
sub_.computeOutputDims();
sub_.setDataType(dataType());
sub_.setBackend(mImpl->backend());
sub_.forward();
......@@ -299,7 +297,6 @@ class Tensor : public Data,
auto mul_ = Mul_Op();
mul_.associateInput(0, std::make_shared<Tensor>(*this));
mul_.associateInput(1, std::make_shared<Tensor>(other));
mul_.computeOutputDims();
mul_.setDataType(dataType());
mul_.setBackend(mImpl->backend());
mul_.forward();
......@@ -323,7 +320,6 @@ class Tensor : public Data,
auto div_ = Div_Op();
div_.associateInput(0, std::make_shared<Tensor>(*this));
div_.associateInput(1, std::make_shared<Tensor>(other));
div_.computeOutputDims();
div_.setDataType(dataType());
div_.setBackend(mImpl->backend());
div_.forward();
......@@ -331,6 +327,8 @@ class Tensor : public Data,
return div_.getOutput(0)->clone();
}
~Tensor() noexcept;
public:
/**
* @brief Perform a deep copy of the tensor.
......@@ -527,6 +525,7 @@ public:
template <typename expectedType>
const expectedType& get(std::size_t idx) const {
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
AIDGE_ASSERT(idx < mSize, "idx out of range");
return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx));
}
......@@ -539,6 +538,7 @@ public:
template <typename expectedType>
void set(std::size_t idx, expectedType value){
AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type");
AIDGE_ASSERT(mImpl->hostPtr() != nullptr, "get() can only be used for backends providing a valid host pointer");
AIDGE_ASSERT(idx < mSize, "idx out of range");
expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(mImplOffset + idx));
*dataPtr = value;
......
......@@ -9,34 +9,20 @@
*
********************************************************************************/
#ifndef AIDGE_CORE_FILLER_H_
#define AIDGE_CORE_FILLER_H_
#ifndef AIDGE_CORE_FILLER_FILLER_H_
#define AIDGE_CORE_FILLER_FILLER_H_
#include <cstdint> // std::uint32_t
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp"
namespace Aidge {
inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
unsigned int& fanIn, unsigned int& fanOut) {
AIDGE_ASSERT(
tensor->nbDims() == 4,
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
DimSize_t batchSize = tensor->dims()[0];
DimSize_t channelSize = tensor->dims()[1];
AIDGE_ASSERT(batchSize != 0,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<unsigned int>(tensor->size() / batchSize);
fanOut = static_cast<unsigned int>(tensor->size() / channelSize);
}
enum VarianceNorm { FanIn, Average, FanOut };
void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
std::uint32_t& fanIn, std::uint32_t& fanOut);
enum class VarianceNorm { FanIn, Average, FanOut };
template <typename T>
void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue);
......@@ -50,14 +36,15 @@ void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max);
template <typename T>
void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn);
VarianceNorm varianceNorm = VarianceNorm::FanIn);
template <typename T>
void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn);
VarianceNorm varianceNorm = VarianceNorm::FanIn);
template <typename T>
void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = FanIn,
void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = VarianceNorm::FanIn,
T meanNorm = 0.0, T scaling = 1.0);
} // namespace Aidge
#endif /* AIDGE_CORE_FILLER_H_ */
#endif /* AIDGE_CORE_FILLER_FILLER_H_ */
......@@ -201,13 +201,16 @@ public:
* If not, add a Transpose Operator.
* 4 - Propagate Tensor dimensions through the consecutive Operators.
*/
void compile(const std::string& backend = "cpu", const Aidge::DataType datatype = DataType::Float32, DeviceIdx_t device = 0);
void compile(const std::string& backend = "cpu",
const Aidge::DataType datatype = DataType::Float32,
DeviceIdx_t device = 0,
const std::vector<std::vector<DimSize_t>> dims = {});
/**
* @brief Compute dimensions of input/output Tensors for each Operator of the
* GraphView object's Nodes.
*/
void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
bool forwardDims(const std::vector<std::vector<DimSize_t>> dims = {}, bool allowDataDependency = false);
/** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
......
......@@ -60,7 +60,7 @@ public:
// }
void computeOutputDims() override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
......
......@@ -13,18 +13,12 @@
#define AIDGE_CORE_OPERATOR_AVGPOOLING_H_
#include <array>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
......@@ -60,105 +54,36 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
AvgPooling_Op(const AvgPooling_Op<DIM>& op)
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl) {
SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
} else {
mImpl = nullptr;
}
}
AvgPooling_Op(const AvgPooling_Op<DIM>& op);
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::AvgPooling_Op
*/
std::shared_ptr<Operator> clone() const override {
std::shared_ptr<Operator> clone() const override final {
return std::make_shared<AvgPooling_Op<DIM>>(*this);
}
void computeOutputDims() override final {
// check inputs have been associated
if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
}
if (!(getInput(0)->empty())) {
std::array<DimSize_t, DIM + 2> outputDims;
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
outputDims[0] = inputDims[0];
outputDims[1] = inputDims[1];
for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
outputDims[dim+2] = 1 + static_cast<DimSize_t>(
std::floor(static_cast<float>(inputDims[dim+2] -
this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
}
getOutput(0)->resize(outputDims);
}
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>>
computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims,
const IOIndex_t outputIdx = 0) const override final {
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// Offset
std::vector<DimSize_t> inputIdxDims = firstEltDims;
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Width
std::vector<DimSize_t> inputDims;
inputDims.push_back(outputDims[0]); // same batch value
inputDims.push_back(outputDims[1]); // same channel value
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
const IOIndex_t outputIdx = 0) const override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
static const std::vector<std::string> getInputsName(){
static const std::vector<std::string> getInputsName() {
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
};
template <Aidge::DimIdx_t DIM>
const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
......@@ -176,6 +101,12 @@ inline std::shared_ptr<Node> AvgPooling(
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
return AvgPooling(to_array(kernel_dims), name, stride_dims);
}
extern template class Aidge::AvgPooling_Op<1>;
extern template class Aidge::AvgPooling_Op<2>;
extern template class Aidge::AvgPooling_Op<3>;
extern template class Aidge::AvgPooling_Op<4>;
} // namespace Aidge
namespace {
......
......@@ -16,13 +16,11 @@
#include <memory>
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
......@@ -50,16 +48,7 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
BatchNorm_Op(const BatchNorm_Op<DIM>& op)
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl){
SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
}else{
mImpl = nullptr;
}
}
BatchNorm_Op(const BatchNorm_Op<DIM>& op);
/**
* @brief Clone the operator using its copy-constructor.
......@@ -79,35 +68,9 @@ public:
// }
void computeOutputDims() override final {
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < nbInputs(); ++i) {
associated &= !(getInput(i)->empty());
}
if (associated) {
const DimSize_t nbFeatures = getInput(0)->dims()[1];
for (std::size_t i = nbData(); i < nbInputs(); ++i) {
if(getInput(i)->size() != nbFeatures) {
// /!\ Input size should be handled BEFORE calling this function
// This should raise an error
getInput(i)->resize({getInput(0)->dims()[1]});
}
}
mOutputs[0]->resize(getInput(0)->dims());
}
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
// By default, automatically set backend for scale, shift, mean and variance
getInput(1)->setBackend(name, device);
getInput(2)->setBackend(name, device);
getInput(3)->setBackend(name, device);
getInput(4)->setBackend(name, device);
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
static const std::vector<std::string> getInputsName() {
return {"data_input", "scale", "shift", "mean", "variance"};
......@@ -117,22 +80,19 @@ public:
}
};
template <DimIdx_t DIM>
const std::string BatchNorm_Op<DIM>::Type = "BatchNorm";
extern template class Aidge::BatchNorm_Op<2>;
extern template class Aidge::BatchNorm_Op<3>;
extern template class Aidge::BatchNorm_Op<4>;
template <DimSize_t DIM>
inline std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
const float epsilon = 1.0e-5F,
const float momentum = 0.1F,
const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name);
addProducer(batchNorm, 1, {nbFeatures}, "scale");
addProducer(batchNorm, 2, {nbFeatures}, "shift");
addProducer(batchNorm, 3, {nbFeatures}, "batch_mean");
addProducer(batchNorm, 4, {nbFeatures}, "batch_variance");
return batchNorm;
}
const std::string& name = "");
extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
} // namespace Aidge
namespace {
......
......@@ -24,13 +24,20 @@
#include "aidge/utils/Types.h"
namespace Aidge {
class Cast_OpImpl : public OperatorImpl {
public:
Cast_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
class Cast_Op : public OperatorTensor,
public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
public:
static const std::string Type;
Cast_Op() : OperatorTensor(Type, 1, 0, 1) {}
Cast_Op() : OperatorTensor(Type, 1, 0, 1) {
mImpl = std::make_shared<Cast_OpImpl>(*this);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -39,10 +46,11 @@ public:
Cast_Op(const Cast_Op& op)
: OperatorTensor(op)
{
if (op.mImpl) {
if (!op.backend().empty()) {
SET_IMPL_MACRO(Cast_Op, *this, op.backend());
} else {
mImpl = nullptr;
}
else {
mImpl = std::make_shared<Cast_OpImpl>(*this);
}
}
......@@ -56,8 +64,6 @@ public:
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
void forward() override;
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
......
......@@ -26,6 +26,12 @@
#include "aidge/utils/Types.h"
namespace Aidge {
class Concat_OpImpl : public OperatorImpl {
public:
Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class ConcatAttr { Axis };
class Concat_Op : public OperatorTensor,
......@@ -45,6 +51,7 @@ public:
if (nbIn == 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
}
mImpl = std::make_shared<Concat_OpImpl>(*this);
}
/**
......@@ -55,10 +62,11 @@ public:
: OperatorTensor(op),
Attributes_(op)
{
if (op.mImpl){
if (!op.backend().empty()) {
SET_IMPL_MACRO(Concat_Op, *this, op.backend());
}else{
mImpl = nullptr;
}
else {
mImpl = std::make_shared<Concat_OpImpl>(*this);
}
}
......@@ -70,7 +78,7 @@ public:
return std::make_shared<Concat_Op>(*this);
}
void computeOutputDims() override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
......
......@@ -108,7 +108,7 @@ public:
// }
void computeOutputDims() override final {
bool forwardDims(bool /*allowDataDependency*/ = false) override final {
// check inputs have been associated
bool associated = true;
for (IOIndex_t i = 0; i < 3; ++i) {
......@@ -118,6 +118,17 @@ public:
associated &= !(getInput(i)->empty());
}
if (associated) {
AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
(getInput(0)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()),
"Wrong input size for Conv operator.");
AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)) &&
(getInput(1)->template dims<DIM+2>()[1] == this->template getAttr<ConvAttr::InChannels>()) &&
(getInput(1)->template dims<DIM+2>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
"Wrong weight size for Conv operator.");
if(!this->template getAttr<ConvAttr::NoBias>())
AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
(getInput(2)->template dims<1>()[0] == this->template getAttr<ConvAttr::OutChannels>()),
"Wrong bias size for Conv operator.");
std::array<DimSize_t, DIM + 2> outputDims{};
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
......@@ -135,6 +146,8 @@ public:
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated;
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
......@@ -147,7 +160,7 @@ public:
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
inputIdxDims[1] = 0; // each channel is used so start with the first one
......
......@@ -90,7 +90,7 @@ public:
}
void computeOutputDims() override final {
bool forwardDims(bool /*allowDataDependency*/ = false) override final {
// check inputs have been associated
// TODO : add a check of inputs dimensions ?
bool associated = true;
......@@ -124,6 +124,8 @@ public:
outputDims[0] = inputDims[0];
mOutputs[0]->resize(outputDims);
}
return associated;
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
......@@ -133,7 +135,7 @@ public:
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
// Offset
auto inputIdxDims = firstEltDims; // batch idx is the same
......
......@@ -54,7 +54,7 @@ public:
return std::make_shared<Div_Op>(*this);
}
void computeOutputDims() override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
......
......@@ -71,14 +71,14 @@ public:
void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
void computeOutputDims() override final;
bool forwardDims(bool allowDataDependency = false) override final;
void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
static const std::vector<std::string> getInputsName() {
return {"data_input", "weight", "bias"};
}
static const std::vector<std::string> getOutputsName(){
static const std::vector<std::string> getOutputsName() {
return {"data_output"};
}
};
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment