Skip to content
Snippets Groups Projects
Commit e98dd055 authored by Maxence Naud's avatar Maxence Naud
Browse files

[Upd] Matmul to MatMul

parent eb8bda50
No related branches found
No related tags found
No related merge requests found
......@@ -40,7 +40,7 @@ class test_parameters(unittest.TestCase):
def test_matmul(self):
out_channels = 8
matmul_op = aidge_core.Matmul(out_channels).get_operator()
matmul_op = aidge_core.MatMul(out_channels).get_operator()
self.assertEqual(matmul_op.get("OutChannels"), out_channels)
def test_producer_1D(self):
......
......@@ -33,7 +33,7 @@
#include "aidge/operator/ConvDepthWise.hpp"
#include "aidge/operator/FC.hpp"
#include "aidge/operator/GenericOperator.hpp"
#include "aidge/operator/Matmul.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/operator/MaxPooling.hpp"
//#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/Operator.hpp"
......
......@@ -27,29 +27,29 @@
#include "aidge/utils/Registrar.hpp"
namespace Aidge {
enum class MatmulParam { OutChannels };
enum class MatMulParam { OutChannels };
class Matmul_Op : public Operator,
public Registrable<Matmul_Op,
class MatMul_Op : public Operator,
public Registrable<MatMul_Op,
std::string,
std::unique_ptr<OperatorImpl>(const Matmul_Op &)>,
public Parameterizable<MatmulParam, DimSize_t> {
std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
public Parameterizable<MatMulParam, DimSize_t> {
public:
std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "Matmul";
static constexpr const char* Type = "MatMul";
Matmul_Op() = delete;
MatMul_Op() = delete;
using Parameterizable_ = Parameterizable<MatmulParam, DimSize_t>;
template <MatmulParam e> using param = typename Parameterizable_::template param<e>;
using Parameterizable_ = Parameterizable<MatMulParam, DimSize_t>;
template <MatMulParam e> using param = typename Parameterizable_::template param<e>;
Matmul_Op(DimSize_t out_channels)
MatMul_Op(DimSize_t out_channels)
: Operator(Type),
Parameterizable_(
param<MatmulParam::OutChannels>(out_channels)),
param<MatMulParam::OutChannels>(out_channels)),
mOutput(std::make_shared<Tensor>())
{
setDatatype(DataType::Float32);
......@@ -64,9 +64,9 @@ public:
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
// <in_features**, out_channels>
std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()};
std::array<DimSize_t, 2> weightDims = {this->template get<MatMulParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
// <out_channels, batch>
std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()};
std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<MatMulParam::OutChannels>()};
mInputs[1]->resize(weightDims);
mOutput->resize(outputDims);
......@@ -107,7 +107,7 @@ public:
void setBackend(const std::string& name) {
mImpl = Registrar<Matmul_Op>::create(name)(*this);
mImpl = Registrar<MatMul_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
......@@ -129,17 +129,17 @@ public:
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
addProducer(matmul, 1, {1, out_channels}, "w");
inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
// FIXME: properly handle default w initialization in every cases
auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(out_channels), name);
addProducer(matmul, 1, {out_channels, 1}, "w");
return matmul;
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
const char *const EnumStrings<Aidge::MatMulParam>::data[] = {"OutChannels"};
}
#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
......@@ -11,7 +11,7 @@
#include <pybind11/pybind11.h>
#include "aidge/operator/Matmul.hpp"
#include "aidge/operator/MatMul.hpp"
#include "aidge/utils/Parameter.hpp"
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Operator.hpp"
......@@ -20,13 +20,13 @@
namespace py = pybind11;
namespace Aidge {
void declare_Matmul(py::module &m) {
py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, PyAbstractParametrizable>(m, "Matmul_Op", py::multiple_inheritance());
void declare_MatMul(py::module &m) {
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, PyAbstractParametrizable>(m, "MatMul_Op", py::multiple_inheritance());
m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = "");
m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
}
void init_Matmul(py::module &m) {
declare_Matmul(m);
void init_MatMul(py::module &m) {
declare_MatMul(m);
}
} // namespace Aidge
......@@ -28,7 +28,7 @@ void init_ConvDepthWise(py::module&);
void init_FC(py::module&);
void init_GenericOperator(py::module&);
void init_LeakyReLU(py::module&);
void init_Matmul(py::module&);
void init_MatMul(py::module&);
void init_MaxPooling(py::module&);
void init_Producer(py::module&);
void init_ReLU(py::module&);
......@@ -75,7 +75,7 @@ void init_Aidge(py::module& m){
init_FC(m);
init_GenericOperator(m);
init_LeakyReLU(m);
init_Matmul(m);
init_MatMul(m);
init_MaxPooling(m);
init_ReLU(m);
init_Softmax(m);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment