Skip to content
Snippets Groups Projects
Commit 5138cb48 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'vit_operators' into 'tiling'

Vit operators

See merge request !47
parents ac47be4a 714f2ceb
No related branches found
No related tags found
No related merge requests found
Showing
with 887 additions and 10 deletions
...@@ -20,6 +20,18 @@ class test_recipies(unittest.TestCase): ...@@ -20,6 +20,18 @@ class test_recipies(unittest.TestCase):
def tearDown(self): def tearDown(self):
pass pass
def test_remove_dropout(self):
graph_view = aidge_core.sequential([
aidge_core.GenericOperator("Conv", 1, 0, 1, "Conv0"),
aidge_core.GenericOperator("Dropout", 1, 0, 1, name="Dropout0")
])
old_nodes = graph_view.get_nodes()
aidge_core.remove_dropout(graph_view)
self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 1)
self.assertTrue("Dropout0" not in [i.name for i in graph_view.get_nodes()])
self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
def test_remove_flatten(self): def test_remove_flatten(self):
graph_view = aidge_core.sequential([ graph_view = aidge_core.sequential([
aidge_core.GenericOperator("Flatten", 1, 0, 1, name="Flatten0"), aidge_core.GenericOperator("Flatten", 1, 0, 1, name="Flatten0"),
......
...@@ -35,7 +35,9 @@ ...@@ -35,7 +35,9 @@
#include "aidge/operator/Conv.hpp" #include "aidge/operator/Conv.hpp"
#include "aidge/operator/ConvDepthWise.hpp" #include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/Div.hpp" #include "aidge/operator/Div.hpp"
#include "aidge/operator/Erf.hpp"
#include "aidge/operator/FC.hpp" #include "aidge/operator/FC.hpp"
#include "aidge/operator/Gather.hpp"
#include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/MatMul.hpp" #include "aidge/operator/MatMul.hpp"
#include "aidge/operator/MaxPooling.hpp" #include "aidge/operator/MaxPooling.hpp"
...@@ -46,13 +48,15 @@ ...@@ -46,13 +48,15 @@
#include "aidge/operator/Pad.hpp" #include "aidge/operator/Pad.hpp"
#include "aidge/operator/Producer.hpp" #include "aidge/operator/Producer.hpp"
#include "aidge/operator/Pow.hpp" #include "aidge/operator/Pow.hpp"
#include "aidge/operator/ReduceMean.hpp"
#include "aidge/operator/ReLU.hpp" #include "aidge/operator/ReLU.hpp"
#include "aidge/operator/Reshape.hpp"
#include "aidge/operator/Scaling.hpp" #include "aidge/operator/Scaling.hpp"
#include "aidge/operator/Slice.hpp" #include "aidge/operator/Slice.hpp"
#include "aidge/operator/Softmax.hpp" #include "aidge/operator/Softmax.hpp"
#include "aidge/operator/Sqrt.hpp" #include "aidge/operator/Sqrt.hpp"
#include "aidge/operator/Sub.hpp" #include "aidge/operator/Sub.hpp"
#include "aidge/operator/Transpose.hpp"
#include "aidge/scheduler/Scheduler.hpp" #include "aidge/scheduler/Scheduler.hpp"
#include "aidge/recipies/Recipies.hpp" #include "aidge/recipies/Recipies.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_ERF_H_
#define AIDGE_CORE_OPERATOR_ERF_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Erf_Op : public OperatorTensor,
public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> {
public:
static const std::string Type;
Erf_Op() : OperatorTensor(Type, 1, 0, 1) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Erf_Op(const Erf_Op& op)
: OperatorTensor(op)
{
mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Erf_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Erf_Op>(*this);
}
void setBackend(const std::string& name) override {
mImpl = Registrar<Erf_Op>::create(name)(*this);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
getInput(0)->setBackend(name);
}
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Erf(const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
}
}
#endif /* AIDGE_CORE_OPERATOR_ERF_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_GATHER_H_
#define AIDGE_CORE_OPERATOR_GATHER_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class GatherAttr { Axis };
class Gather_Op : public OperatorTensor,
public Registrable<Gather_Op,
std::string,
std::unique_ptr<OperatorImpl>(const Gather_Op&)>,
public StaticAttributes<GatherAttr, int> {
public:
static const std::string Type;
Gather_Op() = delete;
using Attributes_ = StaticAttributes<GatherAttr, int>;
template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
Gather_Op(int axis)
: OperatorTensor(Type, 2, 0, 1),
Attributes_(
attr<GatherAttr::Axis>(axis))
{}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Gather_Op(const Gather_Op& op)
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Gather_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Gather_Op>(*this);
}
void computeOutputDims() override final;
void setBackend(const std::string& name) override {
mImpl = Registrar<Gather_Op>::create(name)(*this);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
getInput(0)->setBackend(name);
getInput(1)->setBackend(name);
}
static const std::vector<std::string> getInputsName(){
return {"data_input", "indexes"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Gather(int axis = 0, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Gather_Op>(axis), name);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis"};
}
#endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
#define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
#include <array>
#include <cmath>
#include <numeric>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ReduceMeanAttr { Axes, KeepDims };
template <DimIdx_t DIM>
class ReduceMean_Op : public OperatorTensor,
public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
public StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t> {
public:
static const std::string Type;
ReduceMean_Op() = delete;
using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t>;
template <ReduceMeanAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr ReduceMean_Op(const std::array<int, DIM> &axes, DimSize_t keep_dims)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<ReduceMeanAttr::Axes>(axes),
attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
ReduceMean_Op(const ReduceMean_Op<DIM>& op)
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::ReduceMean_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<ReduceMean_Op<DIM>>(*this);
}
void computeOutputDims() override final {
if (!getInput(0)->empty()) {
std::vector<DimSize_t> outDims;
for(std::size_t d=0; d<getInput(0)->dims().size(); ++d)
{
bool reducedDim = false;
for(std::size_t i=0; i<DIM; ++i)
{
int axis_ = this->template getAttr<ReduceMeanAttr::Axes>()[i];
std::size_t axis= axis_>=0? axis_: axis_ + getInput(0)->nbDims();
if(axis == d)
{
reducedDim = true;
break;
}
}
if(reducedDim)
{
if(this->template getAttr<ReduceMeanAttr::KeepDims>())
outDims.push_back(1);
}
else
outDims.push_back(getInput(0)->dims()[d]);
}
if(outDims.size()>0)
mOutputs[0]->resize(outDims);
else
mOutputs[0]->resize({1});
}
}
void setBackend(const std::string &name) override {
mImpl = Registrar<ReduceMean_Op<DIM>>::create(name)(*this);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
getInput(0)->setBackend(name);
}
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
DimSize_t keep_dims=1,
const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
return std::make_shared<Node>(std::make_shared<ReduceMean_Op<static_cast<DimIdx_t>(DIM)>>(axes, keep_dims), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> ReduceMean(
int const (&axes)[DIM],
DimSize_t keep_dims = 1,
const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
return ReduceMean(to_array(axes), keep_dims, name);
}
template <DimIdx_t DIM>
const std::string ReduceMean_Op<DIM>::Type = "ReduceMean";
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"Axes", "KeepDims"};
}
#endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_RESHAPE_H_
#define AIDGE_CORE_OPERATOR_RESHAPE_H_
#include <cassert>
#include <memory>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class ReshapeAttr { Shape };
class Reshape_Op : public OperatorTensor,
public Registrable<Reshape_Op, std::string, std::unique_ptr<OperatorImpl>(const Reshape_Op&)>,
public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>> {
public:
static const std::string Type;
Reshape_Op() = delete;
using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>>;
template <ReshapeAttr e>
using attr = typename Attributes_::template attr<e>;
Reshape_Op(const std::vector<std::int64_t>& shape)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<ReshapeAttr::Shape>(shape))
{}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Reshape_Op(const Reshape_Op& op)
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Reshape_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Reshape_Op>(*this);
}
void computeOutputDims() override final;
void setBackend(const std::string& name) override {
mImpl = Registrar<Reshape_Op>::create(name)(*this);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
getInput(0)->setBackend(name);
}
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape,
const std::string &name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape), name);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "Shape" };
}
#endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
...@@ -16,29 +16,44 @@ ...@@ -16,29 +16,44 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/data/Data.hpp" #include "aidge/data/Data.hpp"
#include "aidge/graph/Node.hpp" #include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
enum class SoftmaxAttr { AxisIdx };
class Softmax_Op : public OperatorTensor, class Softmax_Op : public OperatorTensor,
public Registrable<Softmax_Op, std::string, std::unique_ptr<OperatorImpl>(const Softmax_Op&)> { public Registrable<Softmax_Op,
std::string,
std::unique_ptr<OperatorImpl>(const Softmax_Op&)>,
public StaticAttributes<SoftmaxAttr, int> {
public: public:
static const std::string Type; static const std::string Type;
Softmax_Op() : OperatorTensor(Type, 1, 0, 1) {} Softmax_Op() = delete;
using Attributes_ = StaticAttributes<SoftmaxAttr, int>;
template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
Softmax_Op(int axis)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
/** /**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy. * @param op Operator to copy.
*/ */
Softmax_Op(const Softmax_Op& op) Softmax_Op(const Softmax_Op& op)
: OperatorTensor(op) : OperatorTensor(op),
Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
...@@ -67,9 +82,14 @@ public: ...@@ -67,9 +82,14 @@ public:
} }
}; };
inline std::shared_ptr<Node> Softmax(const std::string& name = "") { inline std::shared_ptr<Node> Softmax(int axis, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name); return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
} }
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"Axis"};
} }
#endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */ #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_TRANSPOSE_H_
#define AIDGE_CORE_OPERATOR_TRANSPOSE_H_
#include <array>
#include <cmath>
#include <numeric>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class TransposeAttr { OutputDimsOrder };
template <DimIdx_t DIM>
class Transpose_Op : public OperatorTensor,
public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
public StaticAttributes<TransposeAttr,
std::array<DimSize_t, DIM>> {
public:
static const std::string Type;
Transpose_Op() = delete;
using Attributes_ = StaticAttributes<TransposeAttr,
std::array<DimSize_t, DIM>>;
template <TransposeAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Transpose_Op(const std::array<DimSize_t, DIM> &output_dims_order)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order)) { }
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Transpose_Op(const Transpose_Op<DIM>& op)
: OperatorTensor(op),
Attributes_(op)
{
mImpl = op.mImpl ? Registrar<Transpose_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Transpose_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Transpose_Op<DIM>>(*this);
}
void computeOutputDims() override final {
if (!getInput(0)->empty()) {
auto attr = (this)->getStaticAttributes();
const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr));
std::vector<DimSize_t> outputDims;
for (std::size_t i = 0; i < DIM; ++i) {
outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
}
mOutputs[0]->resize(outputDims);
}
}
void setBackend(const std::string &name) override {
mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this);
mOutputs[0]->setBackend(name);
// FIXME: temporary workaround
getInput(0)->setBackend(name);
}
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Transpose(const std::array<DimSize_t, DIM> &output_dims_order,
const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
return std::make_shared<Node>(std::make_shared<Transpose_Op<static_cast<DimIdx_t>(DIM)>>(output_dims_order), name);
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> Transpose(
DimSize_t const (&output_dims_order)[DIM],
const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
return Transpose(to_array(output_dims_order), name);
}
template <DimIdx_t DIM>
const std::string Transpose_Op<DIM>::Type = "Transpose";
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"OutputDimsOrder"};
}
#endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
...@@ -42,6 +42,24 @@ void fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add); ...@@ -42,6 +42,24 @@ void fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add);
*/ */
void fuseMulAdd(std::shared_ptr<GraphView> graphView); void fuseMulAdd(std::shared_ptr<GraphView> graphView);
// REMOVE Dropout
/**
* @brief Remove ``Dropout`` Node.
*
* @param nodes Node to remove.
*/
void removeDropout(std::shared_ptr<Node> dropout);
void removeDropout(std::shared_ptr<MatchSolution> solution);
/**
* @brief Remove ``Dropout`` Node.
*
* @param graphView Graph view to use graph matching on, in order to apply transfomrations.
*/
void removeDropout(std::shared_ptr<GraphView> graphView);
// REMOVE FLATTEN + FC -> FC // REMOVE FLATTEN + FC -> FC
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include <string>
#include "aidge/operator/Concat.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Concat(py::module& m) {
py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor, Attributes>(m, "ConcatOp", py::multiple_inheritance())
.def("get_inputs_name", &Concat_Op::getInputsName)
.def("get_outputs_name", &Concat_Op::getOutputsName);
m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Erf.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Erf(py::module& m) {
py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
.def("get_inputs_name", &Erf_Op::getInputsName)
.def("get_outputs_name", &Erf_Op::getOutputsName);
m.def("Erf", &Erf, py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include <string>
#include "aidge/operator/Gather.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Gather(py::module& m) {
py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor, Attributes>(m, "GatherOp", py::multiple_inheritance())
.def("get_inputs_name", &Gather_Op::getInputsName)
.def("get_outputs_name", &Gather_Op::getOutputsName);
m.def("Gather", &Gather, py::arg("axis"), py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include <vector>
#include <array>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/ReduceMean.hpp"
#include "aidge/utils/Types.h"
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>(
m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
.def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
;
m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
DimSize_t keepDims,
const std::string& name) {
AIDGE_ASSERT(axes.size() == DIM, "axes size [%ld] does not match DIM [%d]", axes.size(), DIM);
return ReduceMean<DIM>(to_array<DIM>(axes.begin()), keepDims, name);
}, py::arg("axes"),
py::arg("keep_dims") = 1,
py::arg("name") = "");
}
void init_ReduceMean(py::module &m) {
declare_ReduceMeanOp<1>(m);
declare_ReduceMeanOp<2>(m);
declare_ReduceMeanOp<3>(m);
// FIXME:
// m.def("ReduceMean1D", static_cast<NodeAPI(*)(const char*, int, int, int const
// (&)[1])>(&ReduceMean));
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Reshape.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Reshape(py::module& m) {
py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
.def("get_inputs_name", &Reshape_Op::getInputsName)
.def("get_outputs_name", &Reshape_Op::getOutputsName);
m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = "");
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/operator/Slice.hpp"
#include "aidge/operator/OperatorTensor.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Slice(py::module& m) {
py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
.def("get_inputs_name", &Slice_Op::getInputsName)
.def("get_outputs_name", &Slice_Op::getOutputsName);
m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = "");
}
} // namespace Aidge
...@@ -19,10 +19,10 @@ namespace py = pybind11; ...@@ -19,10 +19,10 @@ namespace py = pybind11;
namespace Aidge { namespace Aidge {
void init_Softmax(py::module& m) { void init_Softmax(py::module& m) {
py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance()) py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance())
.def("get_inputs_name", &Softmax_Op::getInputsName) .def("get_inputs_name", &Softmax_Op::getInputsName)
.def("get_outputs_name", &Softmax_Op::getOutputsName); .def("get_outputs_name", &Softmax_Op::getOutputsName);
m.def("Softmax", &Softmax, py::arg("name") = ""); m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
} }
} // namespace Aidge } // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include <vector>
#include <array>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Transpose.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM>
void declare_Transpose(py::module &m) {
py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>(
m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
.def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName);
m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
const std::string& name) {
AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [%ld] does not match DIM [%d]", output_dims_order.size(), DIM);
return Transpose<DIM>(to_array<DIM>(output_dims_order.begin()), name);
}, py::arg("output_dims_order"),
py::arg("name") = "");
}
void init_Transpose(py::module &m) {
declare_Transpose<2>(m);
declare_Transpose<3>(m);
declare_Transpose<4>(m);
declare_Transpose<5>(m);
declare_Transpose<6>(m);
}
} // namespace Aidge
...@@ -24,10 +24,13 @@ void init_OperatorTensor(py::module&); ...@@ -24,10 +24,13 @@ void init_OperatorTensor(py::module&);
void init_Add(py::module&); void init_Add(py::module&);
void init_AvgPooling(py::module&); void init_AvgPooling(py::module&);
void init_BatchNorm(py::module&); void init_BatchNorm(py::module&);
void init_Concat(py::module&);
void init_Conv(py::module&); void init_Conv(py::module&);
void init_ConvDepthWise(py::module&); void init_ConvDepthWise(py::module&);
void init_Div(py::module&); void init_Div(py::module&);
void init_Erf(py::module&);
void init_FC(py::module&); void init_FC(py::module&);
void init_Gather(py::module&);
void init_GenericOperator(py::module&); void init_GenericOperator(py::module&);
void init_LeakyReLU(py::module&); void init_LeakyReLU(py::module&);
void init_MatMul(py::module&); void init_MatMul(py::module&);
...@@ -37,10 +40,14 @@ void init_Mul(py::module&); ...@@ -37,10 +40,14 @@ void init_Mul(py::module&);
void init_Producer(py::module&); void init_Producer(py::module&);
void init_Pad(py::module&); void init_Pad(py::module&);
void init_Pow(py::module&); void init_Pow(py::module&);
void init_ReduceMean(py::module&);
void init_ReLU(py::module&); void init_ReLU(py::module&);
void init_Reshape(py::module&);
void init_Slice(py::module&);
void init_Softmax(py::module&); void init_Softmax(py::module&);
void init_Sqrt(py::module&); void init_Sqrt(py::module&);
void init_Sub(py::module&); void init_Sub(py::module&);
void init_Transpose(py::module&);
void init_Identity(py::module&); void init_Identity(py::module&);
void init_Node(py::module&); void init_Node(py::module&);
...@@ -72,10 +79,13 @@ void init_Aidge(py::module& m){ ...@@ -72,10 +79,13 @@ void init_Aidge(py::module& m){
init_Add(m); init_Add(m);
init_AvgPooling(m); init_AvgPooling(m);
init_BatchNorm(m); init_BatchNorm(m);
init_Concat(m);
init_Conv(m); init_Conv(m);
init_ConvDepthWise(m); init_ConvDepthWise(m);
init_Div(m); init_Div(m);
init_Erf(m);
init_FC(m); init_FC(m);
init_Gather(m);
init_GenericOperator(m); init_GenericOperator(m);
init_LeakyReLU(m); init_LeakyReLU(m);
init_MatMul(m); init_MatMul(m);
...@@ -85,10 +95,14 @@ void init_Aidge(py::module& m){ ...@@ -85,10 +95,14 @@ void init_Aidge(py::module& m){
init_Pad(m); init_Pad(m);
init_Pow(m); init_Pow(m);
init_ReduceMean(m);
init_ReLU(m); init_ReLU(m);
init_Reshape(m);
init_Slice(m);
init_Softmax(m); init_Softmax(m);
init_Sqrt(m); init_Sqrt(m);
init_Sub(m); init_Sub(m);
init_Transpose(m);
init_Identity(m); init_Identity(m);
init_Producer(m); init_Producer(m);
......
...@@ -38,6 +38,13 @@ void init_Recipies(py::module &m) { ...@@ -38,6 +38,13 @@ void init_Recipies(py::module &m) {
// :type nodes: list of :py:class:`aidge_core.Node` // :type nodes: list of :py:class:`aidge_core.Node`
// )mydelimiter"); // )mydelimiter");
m.def("remove_dropout",static_cast<void(*)(std::shared_ptr<GraphView>)>(removeDropout), py::arg("graph_view"), R"mydelimiter(
Recipie to remove a dropout operator.
:param graph_view: Graph view on which we want to apply the recipie
:type graph_view: :py:class:`aidge_core.GraphView`
)mydelimiter");
m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter( m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
Recipie to remove a flatten operator. Recipie to remove a flatten operator.
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <string>
#include "aidge/operator/Erf.hpp"
const std::string Aidge::Erf_Op::Type = "Erf";
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment