Skip to content
Snippets Groups Projects
Commit ed790d61 authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

add noop_with_empty_axes attr

parent 59f06670
No related branches found
No related tags found
2 merge requests!212Version 0.3.0,!178Learning backend cuda
......@@ -26,7 +26,7 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ReduceMeanAttr { Axes, KeepDims };
enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
class ReduceMean_Op : public OperatorTensor,
public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> {
......@@ -37,6 +37,7 @@ public:
private:
using Attributes_ = StaticAttributes<ReduceMeanAttr,
std::vector<std::int32_t>,
DimSize_t,
DimSize_t>;
template <ReduceMeanAttr e>
using attr = typename Attributes_::template attr<e>;
......@@ -45,11 +46,12 @@ private:
public:
ReduceMean_Op() = delete;
ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims, DimSize_t noop_with_empty_axes)
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<ReduceMeanAttr::Axes>(axes),
attr<ReduceMeanAttr::KeepDims>(keep_dims)))
attr<ReduceMeanAttr::KeepDims>(keep_dims),
attr<ReduceMeanAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
{}
/**
......@@ -82,6 +84,7 @@ public:
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
inline DimSize_t& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
static const std::vector<std::string> getInputsName() {
......@@ -103,10 +106,11 @@ public:
*/
inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes={},
DimSize_t keep_dims=1,
DimSize_t noop_with_empty_axes=0,
const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims, noop_with_empty_axes), name);
}
......@@ -127,7 +131,7 @@ inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes={}
namespace {
template <>
const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims"};
const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
}
#endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
......@@ -26,7 +26,7 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ReduceSumAttr { Axes, KeepDims };
enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
class ReduceSum_Op : public OperatorTensor,
public Registrable<ReduceSum_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)> {
......@@ -37,6 +37,7 @@ public:
private:
using Attributes_ = StaticAttributes<ReduceSumAttr,
std::vector<std::int32_t>,
DimSize_t,
DimSize_t>;
template <ReduceSumAttr e>
using attr = typename Attributes_::template attr<e>;
......@@ -45,11 +46,12 @@ private:
public:
ReduceSum_Op() = delete;
ReduceSum_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
ReduceSum_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims, DimSize_t noop_with_empty_axes)
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<ReduceSumAttr::Axes>(axes),
attr<ReduceSumAttr::KeepDims>(keep_dims)))
attr<ReduceSumAttr::KeepDims>(keep_dims),
attr<ReduceSumAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
{}
/**
......@@ -82,6 +84,7 @@ public:
inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); }
inline DimSize_t& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); }
static const std::vector<std::string> getInputsName() {
......@@ -103,17 +106,18 @@ public:
*/
inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
DimSize_t keep_dims=1,
DimSize_t noop_with_empty_axes=0,
const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceSum, not supported");
return std::make_shared<Node>(std::make_shared<ReduceSum_Op>(axes, keep_dims), name);
return std::make_shared<Node>(std::make_shared<ReduceSum_Op>(axes, keep_dims, noop_with_empty_axes), name);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims"};
const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
}
#endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
......@@ -28,20 +28,22 @@ void declare_ReduceMeanOp(py::module &m) {
const std::string pyClassName("ReduceMeanOp");
py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, OperatorTensor>(
m, pyClassName.c_str(), py::multiple_inheritance())
.def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
.def(py::init<std::vector<std::int32_t>, DimSize_t, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
.def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
.def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
;
declare_registrable<ReduceMean_Op>(m, pyClassName);
m.def("ReduceMean", [](const std::vector<int>& axes,
DimSize_t keepDims,
const std::string& name) {
DimSize_t keepDims,
DimSize_t noopWithEmptyAxes,
const std::string& name) {
// AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM);
return ReduceMean(axes, keepDims, name);
return ReduceMean(axes, keepDims, noopWithEmptyAxes, name);
}, py::arg("axes") = std::vector<std::int32_t>(),
py::arg("keep_dims") = 1,
py::arg("noop_with_empty_axes") = 0,
py::arg("name") = "");
}
......
......@@ -28,7 +28,7 @@ void init_ReduceSum(py::module &m) {
const std::string pyClassName("ReduceSumOp");
py::class_<ReduceSum_Op, std::shared_ptr<ReduceSum_Op>, OperatorTensor>(
m, pyClassName.c_str(), py::multiple_inheritance())
.def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
.def(py::init<std::vector<std::int32_t>, DimSize_t, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
.def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
.def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
;
......@@ -36,10 +36,12 @@ void init_ReduceSum(py::module &m) {
m.def("ReduceSum", [](const std::vector<int>& axes,
DimSize_t keepDims,
DimSize_t noopWithEmptyAxes,
const std::string& name) {
return ReduceSum(axes, keepDims, name);
return ReduceSum(axes, keepDims, noopWithEmptyAxes, name);
}, py::arg("axes") = std::vector<std::int32_t>(),
py::arg("keep_dims") = 1,
py::arg("noop_with_empty_axes") = 0,
py::arg("name") = "");
}
} // namespace Aidge
......@@ -37,14 +37,20 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
});
std::sort(axes.begin(), axes.end());
if (axes.empty()) // if no axes are provided, reduce on all axes
// build output dimensions
std::vector<DimSize_t> outDims = getInput(0)->dims();
if (axes.empty())
{
if(mAttributes->template getAttr<ReduceMeanAttr::NoopWithEmptyAxes>()) {
mOutputs[0]->resize(outDims);
return true;
}
// if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
axes.resize(getInput(0)->nbDims());
std::iota(axes.begin(), axes.end(), 0);
}
// build output dimensions
std::vector<DimSize_t> outDims = getInput(0)->dims();
if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
}
......
......@@ -37,14 +37,20 @@ bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
});
std::sort(axes.begin(), axes.end());
if (axes.empty()) // if no axes are provided, reduce on all axes
// build output dimensions
std::vector<DimSize_t> outDims = getInput(0)->dims();
if (axes.empty())
{
if(mAttributes->template getAttr<ReduceSumAttr::NoopWithEmptyAxes>()) {
mOutputs[0]->resize(outDims);
return true;
}
// if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
axes.resize(getInput(0)->nbDims());
std::iota(axes.begin(), axes.end(), 0);
}
// build output dimensions
std::vector<DimSize_t> outDims = getInput(0)->dims();
if (mAttributes->template getAttr<ReduceSumAttr::KeepDims>()) {
std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment