Skip to content
Snippets Groups Projects
Commit b4569cbe authored by Maxence Naud's avatar Maxence Naud Committed by Maxence Naud
Browse files

Chagen ReduceMean dimension from static to dynamic

parent 57b62a49
No related branches found
No related tags found
3 merge requests!105version 0.2.0,!88Basic supervised learning,!78Loss function
...@@ -12,14 +12,11 @@ ...@@ -12,14 +12,11 @@
#ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ #ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
#define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ #define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
#include <algorithm> // std::for_each, std::sort
#include <array>
#include <cstdint> // std::int32_t #include <cstdint> // std::int32_t
#include <memory> #include <memory>
#include <stdexcept> // std::runtime_error #include <string>
#include <vector> #include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
...@@ -31,21 +28,20 @@ ...@@ -31,21 +28,20 @@
namespace Aidge { namespace Aidge {
enum class ReduceMeanAttr { Axes, KeepDims }; enum class ReduceMeanAttr { Axes, KeepDims };
template <DimIdx_t DIM>
class ReduceMean_Op : public OperatorTensor, class ReduceMean_Op : public OperatorTensor,
public Registrable<ReduceMean_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>, public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>,
public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> { public StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t> {
public: public:
static const std::string Type; static const std::string Type;
ReduceMean_Op() = delete; ReduceMean_Op() = delete;
using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t>; using Attributes_ = StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t>;
template <ReduceMeanAttr e> template <ReduceMeanAttr e>
using attr = typename Attributes_::template attr<e>; using attr = typename Attributes_::template attr<e>;
constexpr ReduceMean_Op(const std::array<std::int32_t, DIM> &axes, DimSize_t keep_dims) ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
: OperatorTensor(Type, 1, 0, 1), : OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<ReduceMeanAttr::Axes>(axes), Attributes_(attr<ReduceMeanAttr::Axes>(axes),
attr<ReduceMeanAttr::KeepDims>(keep_dims)) {} attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
...@@ -54,13 +50,13 @@ class ReduceMean_Op : public OperatorTensor, ...@@ -54,13 +50,13 @@ class ReduceMean_Op : public OperatorTensor,
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
ReduceMean_Op(const ReduceMean_Op<DIM>& op) ReduceMean_Op(const ReduceMean_Op& op)
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
if (op.mImpl){ if (op.mImpl){
SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.backend()); SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
}else{ } else {
mImpl = nullptr; mImpl = nullptr;
} }
} }
...@@ -70,72 +66,51 @@ class ReduceMean_Op : public OperatorTensor, ...@@ -70,72 +66,51 @@ class ReduceMean_Op : public OperatorTensor,
* @see Operator::ReduceMean_Op * @see Operator::ReduceMean_Op
*/ */
std::shared_ptr<Operator> clone() const override { std::shared_ptr<Operator> clone() const override {
return std::make_shared<ReduceMean_Op<DIM>>(*this); return std::make_shared<ReduceMean_Op>(*this);
} }
void computeOutputDims() override final { void computeOutputDims() override final;
if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
if (!getInput(0)->empty()) {
// make Axes attribute positive
std::array<std::int32_t, DIM>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
if (val < 0)
val+=static_cast<std::int32_t>(getInput(0)->nbDims());
});
std::sort(axes.begin(), axes.end());
// build output dimensions
std::vector<DimSize_t> outDims = getInput(0)->dims();
if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
std::for_each(axes.begin(), axes.end(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
}
else {
for (auto it = axes.crbegin(); it != axes.crend(); ++it)
outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
}
mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
}
}
void setBackend(const std::string &name, DeviceIdx_t device = 0) override { void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
}
static const std::vector<std::string> getInputsName(){ static const std::vector<std::string> getInputsName() {
return {"data_input"}; return {"data_input"};
} }
static const std::vector<std::string> getOutputsName(){ static const std::vector<std::string> getOutputsName() {
return {"data_output"}; return {"data_output"};
} }
}; };
template <std::array<DimSize_t, 1>::size_type DIM> /**
inline std::shared_ptr<Node> ReduceMean(const std::array<std::int32_t, DIM> &axes, * @brief Compute the mean value of a Tensor over the provided axes. Dimensions
* may be reduced by erasing the provided axes or not.
*
* @param axes Dimensions over which data mean should be computed.
* @param keep_dims Whether or not reduced dimensions are to be erased.
* @param name Name of the Operator.
* @return std::shared_ptr<Node> Node containing the Operator.
*/
inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
DimSize_t keep_dims=1, DimSize_t keep_dims=1,
const std::string& name = "") { const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases // FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported"); AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
return std::make_shared<Node>(std::make_shared<ReduceMean_Op<static_cast<DimIdx_t>(DIM)>>(axes, keep_dims), name); return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
} }
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM> // template <DimSize_t DIM>
inline std::shared_ptr<Node> ReduceMean( // inline std::shared_ptr<Node> ReduceMean(
std::int32_t const (&axes)[DIM], // std::int32_t const (&axes)[DIM],
DimSize_t keep_dims = 1, // DimSize_t keep_dims = 1,
const std::string& name = "") { // const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported"); // static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
return ReduceMean(to_array(axes), keep_dims, name); // return ReduceMean(to_array(axes), keep_dims, name);
} // }
template <DimIdx_t DIM> // template <DimIdx_t DIM>
const std::string ReduceMean_Op<DIM>::Type = "ReduceMean"; // const std::string ReduceMean_Op::Type = "ReduceMean";
} // namespace Aidge } // namespace Aidge
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment