From fe58aba03030daa7175bbe98d10f866869c74e0a Mon Sep 17 00:00:00 2001
From: Wissam Boussella <wissam.boussella@cea.fr>
Date: Thu, 23 Jan 2025 17:14:34 +0100
Subject: [PATCH 1/7] Conv fwd_dims both for nhwc and nchw in input and output

---
 include/aidge/operator/Conv.hpp |  4 ++
 src/operator/Conv.cpp           | 77 ++++++++++++++++++++-------------
 2 files changed, 50 insertions(+), 31 deletions(-)

diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 135ff8860..e2faeb6ac 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -172,6 +172,8 @@ public:
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
         }
+        if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC) 
+            return getInput(1)->template dims<DIM+2>()[DIM+1];
         return getInput(1)->template dims<DIM+2>()[1];
     }
 
@@ -184,6 +186,8 @@ public:
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
         }
+        if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC) 
+            return getInput(1)->template dims<DIM+2>()[DIM+1];
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 836c47645..746c32dd4 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -40,42 +40,57 @@ Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    if (inputsAssociated()) {
-        // first check weight since it defines inChannels and outChannels
-        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
-                    "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
-        // check data
+    if (!inputsAssociated()) 
+        return false;
+    // first check weight since it defines inChannels and outChannels
+    if(getInput(0)->dataFormat() == Aidge::DataFormat::NHWC){
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
-                    (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
-                    "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
-        // check optional bias
-        if(getInput(2))
-            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
-                    (getInput(2)->template dims<1>()[0] == outChannels()),
-                    "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
-
-        std::array<DimSize_t, DIM + 2> outputDims{};
-        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
-            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
-                                            1;
-
-            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                            static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim])));
-        }
+                (getInput(0)->template dims<DIM+2>()[DIM+1] == inChannels()),
+                "Wrong input size ({}) for Conv operator. Expected dims are [{}, {}, x].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
+    }
+    else{ //For dataFormat in NCHW or Default Format
+        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
+                "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
+    }
 
-        outputDims[1] = outChannels();
-        outputDims[0] = inputDims[0];
-        mOutputs[0]->resize(outputDims);
-        return true;
+    AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
+                "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
+
+    if(getInput(2))
+        AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                (getInput(2)->template dims<1>()[0] == outChannels()),
+                "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
+
+    const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+    std::array<DimSize_t, DIM + 2> outputDims;
+
+    
+    unsigned int in_dims_index = (getInput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
+    unsigned int out_dims_index = (getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
+
+    for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size(); ++dim) {
+        const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+                                    1;
+        
+        outputDims[dim + out_dims_index] = 1 + static_cast<DimSize_t>(
+            floor(static_cast<float>(inputDims[dim + in_dims_index] - kernelExtent) /
+                static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim]))
+        );
     }
 
-    return false;
-}
+    if(getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC) 
+        outputDims[DIM+1] = outChannels();
+    else 
+        outputDims[1] = outChannels();
 
+    outputDims[0] = inputDims[0];
+    mOutputs[0]->resize(outputDims);
+    return true;
+    
+    
+}
 
 template <Aidge::DimIdx_t DIM>
 std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
-- 
GitLab


From 6c301e2f6ca3390b345cbe6abc4df8e308884bb6 Mon Sep 17 00:00:00 2001
From: Wissam Boussella <wissam.boussella@cea.fr>
Date: Tue, 28 Jan 2025 15:33:16 +0100
Subject: [PATCH 2/7] New unit_test for forward_conv and some fix about nhwc
 format

---
 include/aidge/operator/Conv.hpp      |  3 +-
 src/operator/Conv.cpp                |  2 +-
 unit_tests/operator/Test_Conv_Op.cpp | 84 ++++++++++++++++++++++++++++
 3 files changed, 86 insertions(+), 3 deletions(-)

diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index e2faeb6ac..c6bbd0e40 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -172,6 +172,7 @@ public:
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
         }
+        
         if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC) 
             return getInput(1)->template dims<DIM+2>()[DIM+1];
         return getInput(1)->template dims<DIM+2>()[1];
@@ -186,8 +187,6 @@ public:
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
         }
-        if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC) 
-            return getInput(1)->template dims<DIM+2>()[DIM+1];
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 746c32dd4..91aaad8ee 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -63,7 +63,7 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
                 "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
 
     const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-    std::array<DimSize_t, DIM + 2> outputDims;
+    std::array<DimSize_t, DIM + 2> outputDims{};
 
     
     unsigned int in_dims_index = (getInput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
index bc24fc808..103b8c624 100644
--- a/unit_tests/operator/Test_Conv_Op.cpp
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -22,6 +22,90 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+TEST_CASE("[core/operator] Conv_Op(ForwardDims) ", "[Operator][ForwardDims][Conv]") {
+    SECTION("I:NCHW O:NCHW W:NCHW"){
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450})); 
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4})); // Out_ch, In_ch_h,W,H
+
+        const std::vector<std::size_t> expectedOutputDims({16,4,222,447});
+        auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+
+        //Set DataFormat 
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
+        input->setDataFormat(Aidge::DataFormat::NCHW);
+        weight->setDataFormat(Aidge::DataFormat::NCHW);
+
+        //Set inputs
+        conv1.setInput(1,weight);
+        conv1.setInput(0,input);
+
+        REQUIRE(conv1.forwardDims());
+        REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    }
+
+    SECTION("I:NCHW O:NHWC W:NCHW") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450}));
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4}));
+
+        const std::vector<std::size_t> expectedOutputDims({16,222,447,4});
+        auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+
+        // Set DataFormat
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
+        input->setDataFormat(Aidge::DataFormat::NCHW);
+        weight->setDataFormat(Aidge::DataFormat::NCHW);
+
+        // Set inputs
+        conv1.setInput(1, weight);
+        conv1.setInput(0, input);
+
+        REQUIRE(conv1.forwardDims());
+        REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    }
+
+    SECTION("I:NHWC O:NCHW W:NHWC") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,224,450,3}));
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,4,3,3})); // H, W, In_ch, Out_ch
+
+        const std::vector<std::size_t> expectedOutputDims({16,4,222,447});
+        auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+
+        // Set DataFormat
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
+        input->setDataFormat(Aidge::DataFormat::NHWC);
+        weight->setDataFormat(Aidge::DataFormat::NHWC);
+
+        // Set inputs
+        conv1.setInput(1, weight);
+        conv1.setInput(0, input);
+
+        REQUIRE(conv1.forwardDims());
+        REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    }
+
+    SECTION("I:NHWC O:NHWC W:NCHW") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,224,450,3}));
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4})); // Out_ch, In_ch, H, W
+
+        const std::vector<std::size_t> expectedOutputDims({16,222,447,4});
+        auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+
+        // Set DataFormat
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
+        input->setDataFormat(Aidge::DataFormat::NHWC);
+        weight->setDataFormat(Aidge::DataFormat::NCHW);
+
+        // Set inputs
+        conv1.setInput(1, weight);
+        conv1.setInput(0, input);
+
+        REQUIRE(conv1.forwardDims());
+        REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    }
+
+}
+
+
 TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {5, 5}, "conv1");          // output dims: {16, 32, 220, 220}
-- 
GitLab


From a9acb2f86f949d74f7a660d1eddcbc6c4045a2a7 Mon Sep 17 00:00:00 2001
From: Wissam Boussella <wissam.boussella@cea.fr>
Date: Wed, 19 Feb 2025 16:06:52 +0100
Subject: [PATCH 3/7] new comments for Conv.hpp and Conv.cpp

---
 include/aidge/operator/Conv.hpp | 3 +++
 src/operator/Conv.cpp           | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c6bbd0e40..f9c910928 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -173,8 +173,10 @@ public:
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
         }
         
+        // check format
         if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC) 
             return getInput(1)->template dims<DIM+2>()[DIM+1];
+        // default format is NCHW
         return getInput(1)->template dims<DIM+2>()[1];
     }
 
@@ -187,6 +189,7 @@ public:
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
         }
+        // first weight dimension for both NCHW (Cout,Cin,H,W) and NHWC (Cout,H,W,Cin) data format
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 91aaad8ee..2077cab52 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -46,7 +46,7 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
     if(getInput(0)->dataFormat() == Aidge::DataFormat::NHWC){
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                 (getInput(0)->template dims<DIM+2>()[DIM+1] == inChannels()),
-                "Wrong input size ({}) for Conv operator. Expected dims are [{}, {}, x].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
+                "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), fmt::join(std::vector<std::string>(DIM, "x"), ", "), inChannels());
     }
     else{ //For dataFormat in NCHW or Default Format
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
-- 
GitLab


From d2689b32cd2c188b953c5c9be26f3ce0d6966569 Mon Sep 17 00:00:00 2001
From: Wissam Boussella <wissam.boussella@cea.fr>
Date: Thu, 20 Feb 2025 10:44:00 +0100
Subject: [PATCH 4/7] Refactor setDataFormat method to improve data type
 handling and transformation logic

---
 include/aidge/data/Tensor.hpp | 45 +++++++++++++++++++++++++++--------
 1 file changed, 35 insertions(+), 10 deletions(-)

diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 7aa2ed52b..5e4a817bb 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -452,19 +452,44 @@ public:
     }
 
     /**
-     * @brief Set the DataFormat of the Tensor and transpose data, only
-     * if the Tensor has already been initialized and copyTrans is true.
-     * In this case, a transposition occurs only if both previous format and
-     * new format are different from DataFormat::Default.
-     * @param df New DataFormat
-     * @param copyTrans If true (default), when both previous format and new
-     *                  format are different from DataFormat::Default, previous
-     *                  data is copy-transposed.
+     * @brief Set the DataType of the Tensor and converts data
+     * if the Tensor has already been initialized and copyCast is true.
+     * @param dt DataType
+     * @param copyCast If true (default), previous data is copy-casted. Otherwise
+     * previous data is lost.
      */
     void setDataFormat(const DataFormat df, bool copyTrans = true) {
-        if (mImpl && copyTrans && (dataFormat() != df) && df != DataFormat::Default && dataFormat() != DataFormat::Default) {
-            copyTranspose(*this, getDataFormatTranspose(dataFormat(), df));
+        if (!copyTrans || df == dataFormat()) {
+            mDataFormat = df;
+            return;
+        }
+        // Skip transformation if both formats are Default or NNCH
+        if ((df == DataFormat::Default && dataFormat() == DataFormat::Default) || df == DataFormat::NCHW && dataFormat() == DataFormat::NCHW) {
+            mDataFormat = df;
+            return;
+        }
+    
+        const auto transpose = getDataFormatTranspose(dataFormat(), df);
+        
+        if (mImpl) {
+            copyTranspose(*this, transpose);
+        } else {
+            std::vector<DimSize_t> newDims;
+            newDims.reserve(nbDims());
+            for (std::size_t i = 0; i < nbDims(); ++i) {
+                newDims.push_back(dims()[transpose[i]]);
+            }
+            mDims = std::move(newDims);
+
+            std::vector<std::size_t> newStrides(nbDims(), 1);
+            for (size_t i = 0; i < nbDims(); ++i) {
+                for (size_t j = i + 1; j < nbDims(); ++j) {
+                    newStrides[i] *= newDims[j];
+                }
+            }
+            mStrides = std::move(newStrides);
         }
+    
         mDataFormat = df;
     }
 
-- 
GitLab


From a9fbc969e6af0796e844c8ecd4525b751791334b Mon Sep 17 00:00:00 2001
From: Wissam Boussella <wissam.boussella@cea.fr>
Date: Thu, 20 Feb 2025 11:56:56 +0100
Subject: [PATCH 5/7] setDataFormat fix, working without impl

---
 include/aidge/data/Tensor.hpp | 32 +++++++++++++++++---------------
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 5e4a817bb..5c184e961 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -452,19 +452,23 @@ public:
     }
 
     /**
-     * @brief Set the DataType of the Tensor and converts data
-     * if the Tensor has already been initialized and copyCast is true.
-     * @param dt DataType
-     * @param copyCast If true (default), previous data is copy-casted. Otherwise
-     * previous data is lost.
+     * @brief Set the DataFormat of the Tensor and transpose data, only
+     * if the Tensor has already been initialized and copyTrans is true.
+     * In this case, a transposition occurs only if both previous format and
+     * new format are different from DataFormat::Default.
+     * @param df New DataFormat
+     * @param copyTrans If true (default), when both previous format and new
+     *                  format are different from DataFormat::Default, previous
+     *                  data is copy-transposed.
      */
     void setDataFormat(const DataFormat df, bool copyTrans = true) {
         if (!copyTrans || df == dataFormat()) {
             mDataFormat = df;
             return;
         }
-        // Skip transformation if both formats are Default or NNCH
-        if ((df == DataFormat::Default && dataFormat() == DataFormat::Default) || df == DataFormat::NCHW && dataFormat() == DataFormat::NCHW) {
+    
+        if ((df == DataFormat::Default && dataFormat() == DataFormat::Default) || 
+            (df == DataFormat::NCHW && dataFormat() == DataFormat::NCHW)) {
             mDataFormat = df;
             return;
         }
@@ -475,24 +479,22 @@ public:
             copyTranspose(*this, transpose);
         } else {
             std::vector<DimSize_t> newDims;
-            newDims.reserve(nbDims());
-            for (std::size_t i = 0; i < nbDims(); ++i) {
+            for (std::size_t i = 0; i < dims().size(); ++i) {
                 newDims.push_back(dims()[transpose[i]]);
             }
-            mDims = std::move(newDims);
-
-            std::vector<std::size_t> newStrides(nbDims(), 1);
-            for (size_t i = 0; i < nbDims(); ++i) {
-                for (size_t j = i + 1; j < nbDims(); ++j) {
+    
+            std::vector<std::size_t> newStrides(dims().size(), 1);
+            for (size_t i = 0; i < dims().size(); ++i) {
+                for (size_t j = i + 1; j < dims().size(); ++j) {
                     newStrides[i] *= newDims[j];
                 }
             }
+            mDims = std::move(newDims);
             mStrides = std::move(newStrides);
         }
     
         mDataFormat = df;
     }
-
     /**
      * @brief Get the Impl object
      * @return constexpr const std::shared_ptr<TensorImpl>&
-- 
GitLab


From a442b39c0cc45b83a4880c5be66742695aa3ffc1 Mon Sep 17 00:00:00 2001
From: Wissam Boussella <wissam.boussella@cea.fr>
Date: Thu, 20 Feb 2025 15:37:16 +0100
Subject: [PATCH 6/7] New tests

---
 unit_tests/operator/Test_Conv_Op.cpp | 130 +++++++++++++++++++--------
 1 file changed, 95 insertions(+), 35 deletions(-)

diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
index 103b8c624..002e52522 100644
--- a/unit_tests/operator/Test_Conv_Op.cpp
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -42,66 +42,126 @@ TEST_CASE("[core/operator] Conv_Op(ForwardDims) ", "[Operator][ForwardDims][Conv
         REQUIRE(conv1.forwardDims());
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
-
-    SECTION("I:NCHW O:NHWC W:NCHW") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450}));
-        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4}));
-
-        const std::vector<std::size_t> expectedOutputDims({16,222,447,4});
+    SECTION("I:NCHW O:NCHW W:NHWC") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450})); 
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // Out_ch, H, W, In_ch
+    
+        const std::vector<std::size_t> expectedOutputDims({16, 4, 222, 447});
         auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-
-        // Set DataFormat
-        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
+    
+        // Set DataFormat 
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
         input->setDataFormat(Aidge::DataFormat::NCHW);
-        weight->setDataFormat(Aidge::DataFormat::NCHW);
-
+        weight->setDataFormat(Aidge::DataFormat::NHWC); // NHWC weight format
+    
         // Set inputs
         conv1.setInput(1, weight);
         conv1.setInput(0, input);
-
+    
         REQUIRE(conv1.forwardDims());
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
-
-    SECTION("I:NHWC O:NCHW W:NHWC") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,224,450,3}));
-        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,4,3,3})); // H, W, In_ch, Out_ch
-
-        const std::vector<std::size_t> expectedOutputDims({16,4,222,447});
+    
+    SECTION("I:NHWC O:NHWC W:NCHW") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450})); 
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // Out_ch, In_ch, H, W
+    
+        const std::vector<std::size_t> expectedOutputDims({16, 222, 447, 4});
         auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-
-        // Set DataFormat
-        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
+    
+        // Set DataFormat 
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
         input->setDataFormat(Aidge::DataFormat::NHWC);
-        weight->setDataFormat(Aidge::DataFormat::NHWC);
-
+        weight->setDataFormat(Aidge::DataFormat::NCHW); // NCHW weight format
+    
         // Set inputs
         conv1.setInput(1, weight);
         conv1.setInput(0, input);
-
+    
         REQUIRE(conv1.forwardDims());
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
-
-    SECTION("I:NHWC O:NHWC W:NCHW") {
-        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,224,450,3}));
-        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4})); // Out_ch, In_ch, H, W
-
-        const std::vector<std::size_t> expectedOutputDims({16,222,447,4});
+    
+    SECTION("I:NHWC O:NHWC W:NHWC") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3,224, 450})); 
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // (Out_ch, H, W, In_ch)
+    
+        const std::vector<std::size_t> expectedOutputDims({16, 222, 447, 4});
         auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-
-        // Set DataFormat
+    
+        // Set DataFormat 
         conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
         input->setDataFormat(Aidge::DataFormat::NHWC);
-        weight->setDataFormat(Aidge::DataFormat::NCHW);
-
+        weight->setDataFormat(Aidge::DataFormat::NHWC);
+    
         // Set inputs
         conv1.setInput(1, weight);
         conv1.setInput(0, input);
-
+    
         REQUIRE(conv1.forwardDims());
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
+    
+
+    // SECTION("I:NCHW O:NHWC W:NCHW") {
+    //     std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450}));
+    //     std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4}));
+
+    //     const std::vector<std::size_t> expectedOutputDims({16,222,447,4});
+    //     auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+
+    //     // Set DataFormat
+    //     conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
+    //     input->setDataFormat(Aidge::DataFormat::NCHW);
+    //     weight->setDataFormat(Aidge::DataFormat::NCHW);
+
+    //     // Set inputs
+    //     conv1.setInput(1, weight);
+    //     conv1.setInput(0, input);
+
+    //     REQUIRE(conv1.forwardDims());
+    //     REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    // }
+
+    // SECTION("I:NHWC O:NCHW W:NHWC") {
+    //     std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,224,450,3}));
+    //     std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,4,3,3})); // H, W, In_ch, Out_ch
+
+    //     const std::vector<std::size_t> expectedOutputDims({16,4,222,447});
+    //     auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+
+    //     // Set DataFormat
+    //     conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
+    //     input->setDataFormat(Aidge::DataFormat::NHWC);
+    //     weight->setDataFormat(Aidge::DataFormat::NHWC);
+
+    //     // Set inputs
+    //     conv1.setInput(1, weight);
+    //     conv1.setInput(0, input);
+
+    //     REQUIRE(conv1.forwardDims());
+    //     REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    // }
+
+    // SECTION("I:NHWC O:NHWC W:NCHW") {
+    //     std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,224,450,3}));
+    //     std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4})); // Out_ch, In_ch, H, W
+
+    //     const std::vector<std::size_t> expectedOutputDims({16,222,447,4});
+    //     auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+
+    //     // Set DataFormat
+    //     conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
+    //     input->setDataFormat(Aidge::DataFormat::NHWC);
+    //     weight->setDataFormat(Aidge::DataFormat::NCHW);
+
+    //     // Set inputs
+    //     conv1.setInput(1, weight);
+    //     conv1.setInput(0, input);
+
+    //     REQUIRE(conv1.forwardDims());
+    //     REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    // }
 
 }
 
-- 
GitLab


From 7c87a79d712a4b15c8253c2b5e71fef377f95789 Mon Sep 17 00:00:00 2001
From: Wissam Boussella <wissam.boussella@cea.fr>
Date: Thu, 20 Feb 2025 16:18:10 +0100
Subject: [PATCH 7/7] remove redendency  in setDataFormat deleting old tests

---
 include/aidge/data/Tensor.hpp        |  6 ---
 unit_tests/operator/Test_Conv_Op.cpp | 61 ----------------------------
 2 files changed, 67 deletions(-)

diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 5c184e961..785caaa0e 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -467,12 +467,6 @@ public:
             return;
         }
     
-        if ((df == DataFormat::Default && dataFormat() == DataFormat::Default) || 
-            (df == DataFormat::NCHW && dataFormat() == DataFormat::NCHW)) {
-            mDataFormat = df;
-            return;
-        }
-    
         const auto transpose = getDataFormatTranspose(dataFormat(), df);
         
         if (mImpl) {
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
index 002e52522..de33ddd5a 100644
--- a/unit_tests/operator/Test_Conv_Op.cpp
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -101,67 +101,6 @@ TEST_CASE("[core/operator] Conv_Op(ForwardDims) ", "[Operator][ForwardDims][Conv
         REQUIRE(conv1.forwardDims());
         REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
     }
-    
-
-    // SECTION("I:NCHW O:NHWC W:NCHW") {
-    //     std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450}));
-    //     std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4}));
-
-    //     const std::vector<std::size_t> expectedOutputDims({16,222,447,4});
-    //     auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-
-    //     // Set DataFormat
-    //     conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
-    //     input->setDataFormat(Aidge::DataFormat::NCHW);
-    //     weight->setDataFormat(Aidge::DataFormat::NCHW);
-
-    //     // Set inputs
-    //     conv1.setInput(1, weight);
-    //     conv1.setInput(0, input);
-
-    //     REQUIRE(conv1.forwardDims());
-    //     REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
-    // }
-
-    // SECTION("I:NHWC O:NCHW W:NHWC") {
-    //     std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,224,450,3}));
-    //     std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,4,3,3})); // H, W, In_ch, Out_ch
-
-    //     const std::vector<std::size_t> expectedOutputDims({16,4,222,447});
-    //     auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-
-    //     // Set DataFormat
-    //     conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
-    //     input->setDataFormat(Aidge::DataFormat::NHWC);
-    //     weight->setDataFormat(Aidge::DataFormat::NHWC);
-
-    //     // Set inputs
-    //     conv1.setInput(1, weight);
-    //     conv1.setInput(0, input);
-
-    //     REQUIRE(conv1.forwardDims());
-    //     REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
-    // }
-
-    // SECTION("I:NHWC O:NHWC W:NCHW") {
-    //     std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,224,450,3}));
-    //     std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4})); // Out_ch, In_ch, H, W
-
-    //     const std::vector<std::size_t> expectedOutputDims({16,222,447,4});
-    //     auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
-
-    //     // Set DataFormat
-    //     conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
-    //     input->setDataFormat(Aidge::DataFormat::NHWC);
-    //     weight->setDataFormat(Aidge::DataFormat::NCHW);
-
-    //     // Set inputs
-    //     conv1.setInput(1, weight);
-    //     conv1.setInput(0, input);
-
-    //     REQUIRE(conv1.forwardDims());
-    //     REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
-    // }
 
 }
 
-- 
GitLab