diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 7aa2ed52b95e11598a2975558212b00a85dac598..785caaa0e8959ba34d438913a4c0e5bad3df0f86 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -462,12 +462,33 @@ public:
      *                  data is copy-transposed.
      */
     void setDataFormat(const DataFormat df, bool copyTrans = true) {
-        if (mImpl && copyTrans && (dataFormat() != df) && df != DataFormat::Default && dataFormat() != DataFormat::Default) {
-            copyTranspose(*this, getDataFormatTranspose(dataFormat(), df));
+        if (!copyTrans || df == dataFormat()) {
+            mDataFormat = df;
+            return;
         }
+    
+        const auto transpose = getDataFormatTranspose(dataFormat(), df);
+        
+        if (mImpl) {
+            copyTranspose(*this, transpose);
+        } else {
+            std::vector<DimSize_t> newDims;
+            for (std::size_t i = 0; i < dims().size(); ++i) {
+                newDims.push_back(dims()[transpose[i]]);
+            }
+    
+            std::vector<std::size_t> newStrides(dims().size(), 1);
+            for (size_t i = 0; i < dims().size(); ++i) {
+                for (size_t j = i + 1; j < dims().size(); ++j) {
+                    newStrides[i] *= newDims[j];
+                }
+            }
+            mDims = std::move(newDims);
+            mStrides = std::move(newStrides);
+        }
+    
         mDataFormat = df;
     }
-
     /**
      * @brief Get the Impl object
      * @return constexpr const std::shared_ptr<TensorImpl>&
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 135ff8860706d245ae6095322d6cf017456cc2e1..f9c9109282cb90dadfa9b26d6f830faf9fdecd7c 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -172,6 +172,11 @@ public:
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of input channel imposed.");
         }
+        
+        // check format
+        if(getInput(1)->dataFormat()==Aidge::DataFormat::NHWC) 
+            return getInput(1)->template dims<DIM+2>()[DIM+1];
+        // default format is NCHW
         return getInput(1)->template dims<DIM+2>()[1];
     }
 
@@ -184,6 +189,7 @@ public:
         if (!getInput(1)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Convolution operator has no weight Tensor associated so no specific number of output channel imposed.");
         }
+        // first weight dimension for both NCHW (Cout,Cin,H,W) and NHWC (Cout,H,W,Cin) data format
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 836c47645c20ff23539b836af8593cddfbb48498..2077cab52f613780e77bba80efacb41d06a7f3cf 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -40,42 +40,57 @@ Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    if (inputsAssociated()) {
-        // first check weight since it defines inChannels and outChannels
-        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
-                    "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
-        // check data
+    if (!inputsAssociated()) 
+        return false;
+    // first check weight since it defines inChannels and outChannels
+    if(getInput(0)->dataFormat() == Aidge::DataFormat::NHWC){
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
-                    (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
-                    "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
-        // check optional bias
-        if(getInput(2))
-            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
-                    (getInput(2)->template dims<1>()[0] == outChannels()),
-                    "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
-
-        std::array<DimSize_t, DIM + 2> outputDims{};
-        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
-            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
-                                            1;
-
-            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                            static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim])));
-        }
+                (getInput(0)->template dims<DIM+2>()[DIM+1] == inChannels()),
+                "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), fmt::join(std::vector<std::string>(DIM, "x"), ", "), inChannels());
+    }
+    else{ //For dataFormat in NCHW or Default Format
+        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
+                (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
+                "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
+    }
 
-        outputDims[1] = outChannels();
-        outputDims[0] = inputDims[0];
-        mOutputs[0]->resize(outputDims);
-        return true;
+    AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
+                "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
+
+    if(getInput(2))
+        AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
+                (getInput(2)->template dims<1>()[0] == outChannels()),
+                "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
+
+    const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+    std::array<DimSize_t, DIM + 2> outputDims{};
+
+    
+    unsigned int in_dims_index = (getInput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
+    unsigned int out_dims_index = (getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC) ? 1 : 2;
+
+    for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size(); ++dim) {
+        const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+                                    1;
+        
+        outputDims[dim + out_dims_index] = 1 + static_cast<DimSize_t>(
+            floor(static_cast<float>(inputDims[dim + in_dims_index] - kernelExtent) /
+                static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim]))
+        );
     }
 
-    return false;
-}
+    if(getOutput(0)->dataFormat() == Aidge::DataFormat::NHWC) 
+        outputDims[DIM+1] = outChannels();
+    else 
+        outputDims[1] = outChannels();
 
+    outputDims[0] = inputDims[0];
+    mOutputs[0]->resize(outputDims);
+    return true;
+    
+    
+}
 
 template <Aidge::DimIdx_t DIM>
 std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
index bc24fc8081d78dedf853450ff648b6d91b47c1dc..de33ddd5a7613cde16b96b23722f6d2ab412f373 100644
--- a/unit_tests/operator/Test_Conv_Op.cpp
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -22,6 +22,89 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+TEST_CASE("[core/operator] Conv_Op(ForwardDims) ", "[Operator][ForwardDims][Conv]") {
+    SECTION("I:NCHW O:NCHW W:NCHW"){
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16,3,224,450})); 
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4,3,3,4})); // Out_ch, In_ch_h,W,H
+
+        const std::vector<std::size_t> expectedOutputDims({16,4,222,447});
+        auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+
+        //Set DataFormat 
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
+        input->setDataFormat(Aidge::DataFormat::NCHW);
+        weight->setDataFormat(Aidge::DataFormat::NCHW);
+
+        //Set inputs
+        conv1.setInput(1,weight);
+        conv1.setInput(0,input);
+
+        REQUIRE(conv1.forwardDims());
+        REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    }
+    SECTION("I:NCHW O:NCHW W:NHWC") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450})); 
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // Out_ch, H, W, In_ch
+    
+        const std::vector<std::size_t> expectedOutputDims({16, 4, 222, 447});
+        auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+    
+        // Set DataFormat 
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NCHW);
+        input->setDataFormat(Aidge::DataFormat::NCHW);
+        weight->setDataFormat(Aidge::DataFormat::NHWC); // NHWC weight format
+    
+        // Set inputs
+        conv1.setInput(1, weight);
+        conv1.setInput(0, input);
+    
+        REQUIRE(conv1.forwardDims());
+        REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    }
+    
+    SECTION("I:NHWC O:NHWC W:NCHW") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3, 224, 450})); 
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // Out_ch, In_ch, H, W
+    
+        const std::vector<std::size_t> expectedOutputDims({16, 222, 447, 4});
+        auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+    
+        // Set DataFormat 
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
+        input->setDataFormat(Aidge::DataFormat::NHWC);
+        weight->setDataFormat(Aidge::DataFormat::NCHW); // NCHW weight format
+    
+        // Set inputs
+        conv1.setInput(1, weight);
+        conv1.setInput(0, input);
+    
+        REQUIRE(conv1.forwardDims());
+        REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    }
+    
+    SECTION("I:NHWC O:NHWC W:NHWC") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(std::vector<std::size_t>({16, 3,224, 450})); 
+        std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(std::vector<std::size_t>({4, 3, 3, 4})); // (Out_ch, H, W, In_ch)
+    
+        const std::vector<std::size_t> expectedOutputDims({16, 222, 447, 4});
+        auto conv1 = Conv_Op<2>(std::array<size_t, 2>{3, 4});
+    
+        // Set DataFormat 
+        conv1.getOutput(0)->setDataFormat(Aidge::DataFormat::NHWC);
+        input->setDataFormat(Aidge::DataFormat::NHWC);
+        weight->setDataFormat(Aidge::DataFormat::NHWC);
+    
+        // Set inputs
+        conv1.setInput(1, weight);
+        conv1.setInput(0, input);
+    
+        REQUIRE(conv1.forwardDims());
+        REQUIRE(conv1.getOutput(0)->dims() == expectedOutputDims);
+    }
+
+}
+
+
 TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {5, 5}, "conv1");          // output dims: {16, 32, 220, 220}