diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index cd56a55fa7e9cbcefba4715188fd270462e81976..62878a57d7f00d36eff92f95e5a2efff484e42df 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -17,6 +17,66 @@ build:ubuntu_cpp:
       - build_cpp/
       - install_cpp/
 
+build:ubuntu_cpp_g++10:
+  stage: build
+  needs: []
+  tags:
+    - docker
+
+  script:
+    - apt install -y g++-10
+    - mkdir -p build_cpp
+    - mkdir -p install_cpp
+    - cd build_cpp
+    - export CXX=/usr/bin/g++-10
+    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
+    - make -j4 all install
+
+build:ubuntu_cpp_g++12:
+  stage: build
+  needs: []
+  tags:
+    - docker
+
+  script:
+    - apt install -y g++-12
+    - mkdir -p build_cpp
+    - mkdir -p install_cpp
+    - cd build_cpp
+    - export CXX=/usr/bin/g++-12
+    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
+    - make -j4 all install
+
+build:ubuntu_cpp_clang12:
+  stage: build
+  needs: []
+  tags:
+    - docker
+
+  script:
+    - apt install -y clang-12
+    - mkdir -p build_cpp
+    - mkdir -p install_cpp
+    - cd build_cpp
+    - export CXX=/usr/bin/clang++-12
+    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
+    - make -j4 all install
+
+build:ubuntu_cpp_clang15:
+  stage: build
+  needs: []
+  tags:
+    - docker
+
+  script:
+    - apt install -y clang-15
+    - mkdir -p build_cpp
+    - mkdir -p install_cpp
+    - cd build_cpp
+    - export CXX=/usr/bin/clang++-15
+    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
+    - make -j4 all install
+
 build:ubuntu_python:
   stage: build
   needs: []
@@ -65,3 +125,22 @@ build:windows_cpp:
     paths:
       - build_cpp/
       - install_cpp/
+
+build:windows_python:
+  stage: build
+  needs: []
+  tags:
+    - docker
+
+  script:
+    - python3 -m pip install virtualenv
+    - virtualenv venv
+    - source venv/bin/activate
+    # Numpy dependancy for unit test
+    - python3 -m pip install numpy
+    - export AIDGE_INSTALL=`pwd`/install
+    - python3 -m pip install .
+  artifacts:
+    expire_in: 1 week
+    paths:
+      - venv/
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 1e0f17e6db9278e7edf2a11918472c084561a308..57403270d44d66e87675a3cadb227342c0cacd91 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -81,14 +81,14 @@ public:
     //     return *in;
     // }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
 
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             const auto expectedDims =  mInputs[0]->dims();
             std::size_t nonEmptyInputTensor = 1;
@@ -140,7 +140,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -150,7 +150,7 @@ public:
         }
     }
 
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index b29463c675eb8516e02b83ad47816e9e9aa5d147..b534f1f68c96ea0252e1b2506bd29ea4c07d2985 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -84,7 +84,7 @@ public:
         return std::make_shared<AvgPooling_Op<DIM>>(*this);
     }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 1 && "operators supports only 3 inputs");
         (void) inputIdx; // avoid unused warning
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
@@ -92,7 +92,7 @@ public:
         mInput = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
@@ -145,7 +145,7 @@ public:
     }
 
 
-    void setBackend(const std::string &name) {
+    void setBackend(const std::string &name) override {
         mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -153,7 +153,7 @@ public:
         mInput->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) {
+    void setDatatype(const DataType &datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 90a6be7222ee1b3e377520f2bc612a72c2ba4ab3..75c901a1f00d26cc8b65192815c6fe93575723f0 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -87,14 +87,14 @@ public:
     //     return *in;
     // }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 5 && "operators supports only 5 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             for (std::size_t i = nbDataInputs(); i < nbInputs(); ++i) {
                 if(mInputs[i]->size() != mInputs[0]->dims()[1]) {
@@ -136,7 +136,7 @@ public:
     }
 
 
-    void setBackend(const std::string &name) {
+    void setBackend(const std::string &name) override {
         mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -147,7 +147,7 @@ public:
         mInputs[4]->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) {
+    void setDatatype(const DataType &datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 22553080c6d4d8359149b3b34c5d040e5e900c4d..88b317331e4b78d1d853ccfdbfc4866badd280c4 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -100,14 +100,14 @@ public:
 
     // }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
@@ -160,7 +160,7 @@ public:
     }
 
 
-    void setBackend(const std::string &name) {
+    void setBackend(const std::string &name) override {
         mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -169,7 +169,7 @@ public:
         mInputs[2]->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) {
+    void setDatatype(const DataType &datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7a4db68bae2f42eb892dd7240463e7363753b5a7..2b80278994bc462b2b2c98b7aae68aa60f1e1e9b 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -92,14 +92,14 @@ class ConvDepthWise_Op : public Operator,
         return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
     }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInputs[0]->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
@@ -161,7 +161,7 @@ class ConvDepthWise_Op : public Operator,
 
 
 
-    void setBackend(const std::string &name) {
+    void setBackend(const std::string &name) override {
         mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -170,7 +170,7 @@ class ConvDepthWise_Op : public Operator,
         mInputs[2]->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) {
+    void setDatatype(const DataType &datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 127d39a8bdfdd233cdac9e1ca6cf0bf85f656d16..6f5b2a44f9a090fba599a3a92c4fc0a7d21e3ccb 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -135,7 +135,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<FC_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -145,7 +145,7 @@ public:
         mInputs[2]->setBackend(name);
     }
 
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 1e51866177acf80441f236070aea9dee6145bc19..b86286eda8afbb278dac06d75f40d5166872a799 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -165,8 +165,8 @@ class GenericOperator_Op
 
     ~GenericOperator_Op() = default;
 
-    void setBackend(const std::string & /*name*/) { printf("setBackend: not available yet.\n"); }
-    void setDatatype(const DataType & /*datatype*/) { printf("setDatatype: not available yet.\n"); }
+    void setBackend(const std::string & /*name*/) override { printf("setBackend: not available yet.\n"); }
+    void setDatatype(const DataType & /*datatype*/) override { printf("setDatatype: not available yet.\n"); }
     void forward() override final { printf("forward: not available yet.\n"); }
     void backward() override final { printf("backward: not available yet.\n"); }
 
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index c6ee01239e1ed065587276c1891d26ba3899fe89..7a6fc4cbb8648b04aa42158c34d022b11775b84c 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -120,14 +120,14 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
         mInput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index d0dadd847a59c9d2a1c0dd97f2f200437da71863..4c15f8ce369dbbda6ae0268cf3fd6762ab642232 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -127,7 +127,7 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<MatMul_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -136,7 +136,7 @@ public:
         mInputs[1]->setBackend(name);
     }
 
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index eae7e30df039c0514443e567032427f7a6556360..c23378eeee92d2b8958a23446b622cc98a79cf69 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -85,7 +85,7 @@ public:
         return std::make_shared<MaxPooling_Op<DIM>>(*this);
     }
 
-    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 1 && "operators supports only 3 inputs");
         (void) inputIdx; // avoid unused warning
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
@@ -93,7 +93,7 @@ public:
         mInput = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    constexpr void computeOutputDims() override final {
+    void computeOutputDims() override final {
         if (!mInput->empty()) {
             std::array<DimSize_t, DIM + 2> outputDims = {};
 
@@ -146,7 +146,7 @@ public:
     }
 
 
-    void setBackend(const std::string &name) {
+    void setBackend(const std::string &name) override {
         mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
         mOutput->setBackend(name);
 
@@ -154,7 +154,7 @@ public:
         mInput->setBackend(name);
     }
 
-    void setDatatype(const DataType &datatype) {
+    void setDatatype(const DataType &datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 593192c9f402e2646ac94cff68aa0c805f5aecd1..07d932bd0501832c78df2c3530f657b57251183f 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -121,11 +121,11 @@ public:
 
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutput->dims(); }
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<Producer_Op>::create(name)(*this);
         mOutput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
     }
 
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 433e353f05f8b4ffc3cfc0e047464e7f9257da02..0a7ec3b4fd9b51dbdb7cc95cd111337dad8553c4 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -108,14 +108,14 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<ReLU_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
         mInput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 0ea6ba39b3e4def2011ae5c7b2b9c348df5e2929..f18abaf320620bbffec646d1bbb752b834487dd4 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -130,13 +130,13 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<Scaling_Op>::create(name)(*this);
         mOutput->setBackend(name);
         // FIXME: temporary workaround
         mInput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 898bae4c31bb2c41947523a86bfb9cd5c7b732b4..095ea0aadb9b9684a472b8a437ace6f5151bc4cf 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -108,14 +108,14 @@ public:
     }
 
 
-    void setBackend(const std::string& name) {
+    void setBackend(const std::string& name) override {
         mImpl = Registrar<Softmax_Op>::create(name)(*this);
         mOutput->setBackend(name);
 
         // FIXME: temporary workaround
         mInput->setBackend(name);
     }
-    void setDatatype(const DataType& datatype) {
+    void setDatatype(const DataType& datatype) override {
         mOutput->setDatatype(datatype);
 
         // FIXME: temporary workaround
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 60f586edf947cef0e139049814263a29b4d01e24..af03ee2861e81d81171ccc2ea14289f2ce3aa9e3 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -194,7 +194,7 @@ public:
      * generic type caster for std::any is not feasable.
      * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
     */
-    py::object getAttrPy(const std::string& name) const {
+    py::object getAttrPy(const std::string& name) const override final {
         return mAttrsPy.at(name);
     };
 #endif
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index fb800cffbcff5d4113961f8e62977417336f2cb8..a549948ba7b0625fab3e4bce04362bef4098a612 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -87,7 +87,7 @@ public:
 
     // Runtime access with name
     template <typename R>
-    constexpr R& getAttr(const char* name) {
+    R& getAttr(const char* name) {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) {
                 return getAttr<R>(i);
@@ -98,7 +98,7 @@ public:
     }
 
     template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    constexpr typename std::enable_if<(SIZE > 0), R&>::type getAttr(std::size_t i) {
+    typename std::enable_if<(SIZE > 0), R&>::type getAttr(std::size_t i) {
         if (i == SIZE-1) {
             if (std::is_same<R, typename std::tuple_element<SIZE-1,std::tuple<T...>>::type>::value) {
                 return reinterpret_cast<R&>(std::get<SIZE-1>(mAttrs));
@@ -113,7 +113,7 @@ public:
     }
 
     template <typename R, std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), R&>::type getAttr(std::size_t /*i*/) {
+    [[noreturn]] typename std::enable_if<(SIZE == 0), R&>::type getAttr(std::size_t /*i*/) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
@@ -128,7 +128,7 @@ public:
     }
 
     template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    [[noreturn]] constexpr typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
+    [[noreturn]] typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
@@ -140,7 +140,7 @@ public:
     ///     Generic Attributes API
     //////////////////////////////////////
     // Runtime existance check with name
-    constexpr bool hasAttr(const std::string& name) const override final {
+    bool hasAttr(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return true;
@@ -151,7 +151,7 @@ public:
     }
 
     // Runtime type access with name
-    constexpr std::string getAttrType(const std::string& name) const override final {
+    std::string getAttrType(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return getAttrType(i).name();
@@ -170,7 +170,7 @@ public:
     }
 
     #ifdef PYBIND
-    py::object getAttrPy(const std::string& name) const {
+    py::object getAttrPy(const std::string& name) const override {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
diff --git a/setup.py b/setup.py
index 16305afdfdfa5de2e328460d9e96c77eb96a9d98..4611ac78aad0436f663b1348d012bb3c3bd0054a 100644
--- a/setup.py
+++ b/setup.py
@@ -70,7 +70,8 @@ class CMakeBuild(build_ext):
 
         self.spawn(['cmake', str(cwd), param_py, '-DTEST=OFF', f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'])
         if not self.dry_run:
-            self.spawn(['make', 'all', 'install', '-j', max_jobs])
+            self.spawn(['cmake', '--build', '.', '-j', max_jobs])
+            self.spawn(['cmake', '--install', '.'])
         os.chdir(str(cwd))
 
         aidge_package = build_lib / (get_project_name())
@@ -81,7 +82,7 @@ class CMakeBuild(build_ext):
         # Copy all shared object files from build_temp/lib to aidge_package
         for root, _, files in os.walk(build_temp.absolute()):
             for file in files:
-                if file.endswith('.so') and (root != str(aidge_package.absolute())):
+                if (file.endswith('.so') or file.endswith('.pyd')) and (root != str(aidge_package.absolute())):
                     currentFile=os.path.join(root, file)
                     shutil.copy(currentFile, str(aidge_package.absolute()))
 
@@ -100,7 +101,6 @@ if __name__ == '__main__':
         long_description_content_type="text/markdown",
         long_description="\n".join(DOCLINES[2:]),
         classifiers=[c for c in CLASSIFIERS.split('\n') if c],
-        platforms=["Linux"],
         packages=find_packages(where="."),
         include_package_data=True,
         ext_modules=[CMakeExtension(get_project_name())],
diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp
index 3a50ec3e7f83517267ef4ad04cb2c855f8f9df7e..e5e59582af68f66e6c54d09fac4cb1cc028493dd 100644
--- a/src/recipies/FuseBatchNorm.cpp
+++ b/src/recipies/FuseBatchNorm.cpp
@@ -79,10 +79,10 @@ void Aidge::fuseBatchNorm(std::set<std::shared_ptr<Node>> nodes){
         printf("variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
-    const DimSize_t channelsSize = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
+    const DimSize_t channelsSize = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<DimSize_t>("InChannels");
 
     // TODO : suppose we have Conv2D ...
-    const std::array<DimSize_t, 2> kernelDims = std::static_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+    const std::array<DimSize_t, 2> kernelDims = std::dynamic_pointer_cast<Conv_Op<2>>(conv->getOperator())->getAttr<std::array<DimSize_t, 2>>("KernelDims");
 
     std::shared_ptr<Tensor> weight  = conv->input(1).first->getOperator()->getOutput(conv->input(1).second);
     std::shared_ptr<Tensor> bias  = conv->input(2).first->getOperator()->getOutput(conv->input(2).second);