diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index a6ff03d36b662f4420424f930401844de25036d2..2752ec484b2112d5847bd8754dbe8c3be71fd608 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -12,6 +12,11 @@
 #ifndef AIDGE_DATA_H_
 #define AIDGE_DATA_H_
 
+#include <cstdint>
+#include <fmt/format.h>
+#include <string>
+#include <tuple>
+
 #include "aidge/data/half.hpp"
 #include "aidge/utils/Attributes.hpp"
 
@@ -64,14 +69,14 @@ template <typename T> struct NativeType { static const Aidge::DataType type; };
 template <> const Aidge::DataType NativeType<double>::type = Aidge::DataType::Float64;
 template <> const Aidge::DataType NativeType<float>::type = Aidge::DataType::Float32;
 template <> const Aidge::DataType NativeType<half_float::half>::type = Aidge::DataType::Float16;
-template <> const Aidge::DataType NativeType<int8_t>::type = Aidge::DataType::Int8;
-template <> const Aidge::DataType NativeType<int16_t>::type = Aidge::DataType::Int16;
-template <> const Aidge::DataType NativeType<int32_t>::type = Aidge::DataType::Int32;
-template <> const Aidge::DataType NativeType<int64_t>::type = Aidge::DataType::Int64;
-template <> const Aidge::DataType NativeType<uint8_t>::type = Aidge::DataType::UInt8;
-template <> const Aidge::DataType NativeType<uint16_t>::type = Aidge::DataType::UInt16;
-template <> const Aidge::DataType NativeType<uint32_t>::type = Aidge::DataType::UInt32;
-template <> const Aidge::DataType NativeType<uint64_t>::type = Aidge::DataType::UInt64;
+template <> const Aidge::DataType NativeType<std::int8_t>::type = Aidge::DataType::Int8;
+template <> const Aidge::DataType NativeType<std::int16_t>::type = Aidge::DataType::Int16;
+template <> const Aidge::DataType NativeType<std::int32_t>::type = Aidge::DataType::Int32;
+template <> const Aidge::DataType NativeType<std::int64_t>::type = Aidge::DataType::Int64;
+template <> const Aidge::DataType NativeType<std::uint8_t>::type = Aidge::DataType::UInt8;
+template <> const Aidge::DataType NativeType<std::uint16_t>::type = Aidge::DataType::UInt16;
+template <> const Aidge::DataType NativeType<std::uint32_t>::type = Aidge::DataType::UInt32;
+template <> const Aidge::DataType NativeType<std::uint64_t>::type = Aidge::DataType::UInt64;
 
 template <>
 const char* const EnumStrings<Aidge::DataType>::data[]
@@ -79,8 +84,26 @@ const char* const EnumStrings<Aidge::DataType>::data[]
        "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
        "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
        "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
+
+template <Aidge::DataType D> struct cpptype {
+    using type = void; // Placeholder
+};
+template <> struct cpptype<Aidge::DataType::Float16> { using type = half_float::half; };
+template <> struct cpptype<Aidge::DataType::Float32> { using type = float; };
+template <> struct cpptype<Aidge::DataType::Float64> { using type = double; };
+template <> struct cpptype<Aidge::DataType::Int8> { using type = std::int8_t; };
+template <> struct cpptype<Aidge::DataType::Int16> { using type = std::int16_t; };
+template <> struct cpptype<Aidge::DataType::Int32> { using type = std::int32_t; };
+template <> struct cpptype<Aidge::DataType::Int64> { using type = std::int64_t; };
+template <> struct cpptype<Aidge::DataType::UInt8> { using type = std::uint8_t; };
+template <> struct cpptype<Aidge::DataType::UInt16> { using type = std::uint16_t; };
+template <> struct cpptype<Aidge::DataType::UInt32> { using type = std::uint32_t; };
+template <> struct cpptype<Aidge::DataType::UInt64> { using type = std::uint64_t; };
+
+template <Aidge::DataType D> using cpptype_t = typename cpptype<D>::type;
 }
 
+
 namespace Aidge {
 inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; }
 }
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 7534b66951cc9d8074d0af7742ba5165013431f5..6680f2e1d6de5157024f9e7ca65b14256e53eae2 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -31,27 +31,26 @@ public:
     void forward() override;
 };
 
-enum class GatherAttr { Indices, GatheredShape, Axis };
+enum class GatherAttr { Axis, Indices, GatheredShape };
 
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
                                    std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
-                public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> {
+                public StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>> {
 
 public:
     static const std::string Type;
 
     Gather_Op() = delete;
 
-    using Attributes_ = StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t>;
+    using Attributes_ = StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>>;
     template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
-    Gather_Op(const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis)
-            : OperatorTensor(Type, 1, 0, 1),
-            Attributes_(
-                attr<GatherAttr::Indices>(indices),
-                attr<GatherAttr::GatheredShape>(gatheredShape),
-                attr<GatherAttr::Axis>(axis))
+    Gather_Op(std::int8_t axis, const std::vector<int64_t>& indices, const std::vector<DimSize_t>& gatheredShape)
+            : OperatorTensor(Type, 2, 0, 1),
+            Attributes_(attr<GatherAttr::Axis>(axis),
+                        attr<GatherAttr::Indices>(indices),
+                        attr<GatherAttr::GatheredShape>(gatheredShape))
     {
         mImpl = std::make_shared<Gather_OpImpl>(*this);
     }
@@ -85,21 +84,21 @@ public:
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input", "indices"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
 };
 
-inline std::shared_ptr<Node> Gather( const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis = 0, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Gather_Op>(indices, gatheredShape, axis), name);
+inline std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
 }
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Indices", "GatheredShape", "Axis"};
+const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis", "Indices", "GatheredShape"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 757e08fe97dd1cc572c08ac7c2b454daa234bdc1..3e46ca6c615e7db52b9c1705a9c639c6d7b64d7a 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -35,18 +35,17 @@ enum class SliceAttr { Starts, Ends, Axes };
 class Slice_Op
     : public OperatorTensor,
       public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>,
-      public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> {
+      public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>> {
+
 public:
     static const std::string Type;
 
     Slice_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>>;
-    template <SliceAttr e>
-    using attr = typename Attributes_::template attr<e>;
-
-    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>&  ends, const std::vector<std::int64_t>& axes)
-        : OperatorTensor(Type, 1, 0, 1),
+    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>>;
+    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
+    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes)
+        : OperatorTensor(Type, 4, 0, 1),
           Attributes_(attr<SliceAttr::Starts>(starts),
                       attr<SliceAttr::Ends>(ends),
                       attr<SliceAttr::Axes>(axes))
@@ -54,6 +53,7 @@ public:
         mImpl = std::make_shared<Slice_OpImpl>(*this);
     }
 
+
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
      * input tensors (the new operator has no input associated).
@@ -83,7 +83,7 @@ public:
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input", "starts", "ends", "axes"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
@@ -92,22 +92,13 @@ public:
 
 /**
  * @brief Exract a sub-Tensor from a bigger original Tensor.
- * @param starts Indexes for each dimension of the first element.
- * Can be a negative value. Negative values start their reference from the last index.
- * ``-1`` referes to the last index of a dimension.
- * @param ends Indexes for each dimension of the last element.
- * Can be a negative value. Negative values start their reference from the last index.
- * ``-1`` referes to the last index of a dimension.
- * @param axes Dimensions for which start/end indexes apply. Not specifying a dimensions
- * means the whole dimensions is extracted.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t> starts,
-                                   const std::vector<std::int64_t> ends,
-                                   const std::vector<std::int64_t> axes,
+inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
+                                   const std::vector<std::int64_t>& ends = {},
+                                   const std::vector<std::int8_t>& axes = {},
                                    const std::string &name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes), name);
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 8c32acfe2bd7e0118c186be8fa1297ee16fe6f6c..e5507e670c1ec0bf4758169a9ea9864ff3fe29be 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -11,6 +11,7 @@
 
 #include <pybind11/pybind11.h>
 #include <string>
+#include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Gather.hpp"
@@ -25,6 +26,7 @@ void init_Gather(py::module& m) {
     .def("get_outputs_name", &Gather_Op::getOutputsName)
     .def("attributes_name", &Gather_Op::staticGetAttrsName);
     declare_registrable<Gather_Op>(m, "GatherOp");
-    m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis")= 0, py::arg("name") = "");
+
+    m.def("Gather", &Gather, py::arg("axis") = 0, py::arg("indices") = std::vector<std::int64_t>(), py::arg("gathered_shape") = std::vector<std::size_t>(), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index 558fc98c172ea1a264ee8ac3ebbc70e09eba826d..68124262cbf8de062653e530a85147e0944ebad4 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <vector>
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Slice.hpp"
@@ -23,6 +24,12 @@ void init_Slice(py::module& m) {
     .def("get_inputs_name", &Slice_Op::getInputsName)
     .def("get_outputs_name", &Slice_Op::getOutputsName);
     declare_registrable<Slice_Op>(m, "SliceOp");
-    m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = "");
+
+    m.def("Slice",
+          &Slice,
+          py::arg("starts") = std::vector<std::int64_t>(),
+          py::arg("ends") = std::vector<std::int64_t>(),
+          py::arg("axes") = std::vector<std::int8_t>(),
+          py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 7b0945271660be8f309024f46c258e6a7e2193e5..4e5bd2573a0e1b0cc78256a68dad88332877067b 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -9,24 +9,22 @@
  *
  ********************************************************************************/
 
-#include "aidge/operator/Gather.hpp"
-
 #include <cstddef>  // std::size_t
 #include <cstdint>  // std::int64_t
 #include <string>
 #include <vector>
 
+#include "aidge/operator/Gather.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
 
 void Aidge::Gather_OpImpl::forward() {
     const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
-    const auto axis = op.template getAttr<std::int64_t>("Axis");
+    const auto axis = op.template getAttr<std::int8_t>("Axis");
 
-    const std::size_t axisIdx = axis>=0 ?
-                                axis :
-                                static_cast<std::size_t>(axis) + op.getInput(0)->dims().size();
+    const std::size_t axisIdx = static_cast<std::size_t>(axis) + (axis >= 0 ? 0 : op.getInput(0)->dims().size());
 
     std::size_t postAxisElems = 1;
     for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
@@ -37,13 +35,14 @@ void Aidge::Gather_OpImpl::forward() {
         preAxisElems *= op.getInput(0)->dims()[i];
     }
 
-    const auto indices = op.template getAttr<std::vector<std::int64_t>>("Indices");
     std::size_t outputOffset = 0;
     for (std::size_t i=0; i<preAxisElems; ++i)
     {
-        for(std::size_t j=0; j<indices.size(); ++j)
+        for(std::size_t j=0; j<op.template getAttr<std::vector<int64_t>>("Indices").size(); ++j)
         {
-            const std::size_t idx = indices[j] >= 0 ? indices[j] : static_cast<std::size_t>(indices[j]) + op.getInput(0)->dims()[axisIdx];
+            const std::size_t idx = op.template getAttr<std::vector<int64_t>>("Indices")[j] >= 0 ?
+                                        static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j]) :
+                                        static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
             op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
             outputOffset += postAxisElems;
         }
@@ -53,27 +52,58 @@ void Aidge::Gather_OpImpl::forward() {
 const std::string Aidge::Gather_Op::Type = "Gather";
 
 bool Aidge::Gather_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
+    // check data input has been associated
     if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
     }
 
     if (!getInput(0)->empty()) {
+        if (this->template getAttr<GatherAttr::Indices>().empty())
+        {
+            if(getInput(1)->empty()) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Either indices input or attribute must be provided", type());
+            }
+            this->template getAttr<GatherAttr::GatheredShape>() = getInput(1)->dims();
+            this->template getAttr<GatherAttr::Indices>().clear(); // If both are provided input would override attrs
+            this->template getAttr<GatherAttr::Indices>().reserve(getInput(1)->size());
+            switch (mInputs[1]->dataType()) {
+                case DataType::Float64:
+                    std::copy_n(static_cast<double*>(mInputs[1]->getImpl()->rawPtr()),
+                                getInput(1)->size(),
+                                std::back_inserter(this->template getAttr<GatherAttr::Indices>()));
+                    break;
+                case DataType::Float32:
+                    std::copy_n(static_cast<float*>(mInputs[1]->getImpl()->rawPtr()),
+                                getInput(1)->size(),
+                                std::back_inserter(this->template getAttr<GatherAttr::Indices>()));
+                    break;
+                case DataType::Int64:
+                    std::copy_n(static_cast<std::int64_t*>(mInputs[1]->getImpl()->rawPtr()),
+                                getInput(1)->size(),
+                                std::back_inserter(this->template getAttr<GatherAttr::Indices>()));
+                    break;
+                case DataType::Int32:
+                    std::copy_n(static_cast<std::int32_t*>(mInputs[1]->getImpl()->rawPtr()),
+                                getInput(1)->size(),
+                                std::back_inserter(this->template getAttr<GatherAttr::Indices>()));
+                    break;
+                default:
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Indices input DataType is not supported.", type());
+                    break;
+            }
+        }
         std::vector<DimSize_t> outDims = getInput(0)->dims();
-        const std::vector<DimSize_t> gatheredShape = this->template getAttr<GatherAttr::GatheredShape>();
-        // TODO: check indices and gatheredShape
 
-        const std::int64_t axisIdx = this->template getAttr<GatherAttr::Axis>() >= 0 ?
-                                        this->template getAttr<GatherAttr::Axis>() :
-                                        this->template getAttr<GatherAttr::Axis>() + outDims.size();
+        std::int8_t axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?
+                              this->template getAttr<GatherAttr::Axis>():
+                              this->template getAttr<GatherAttr::Axis>()+outDims.size();
         outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
-        if (!gatheredShape.empty())
+        if( !this->template getAttr<GatherAttr::GatheredShape>().empty())
         {
-            outDims.insert(outDims.cbegin() + static_cast<std::size_t>(axisIdx),
-                            gatheredShape.cbegin(),
-                            gatheredShape.cend());
+            outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
+                           this->template getAttr<GatherAttr::GatheredShape>().begin(),
+                           this->template getAttr<GatherAttr::GatheredShape>().end());
         }
-
         mOutputs[0]->resize(outDims);
         return true;
     }
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 97ec0a5171a8f13fee0a93557b6831443f10713a..76cf641199ce6236840de53eb18c08b860c8eaf1 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -8,71 +8,89 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
+
 #include "aidge/operator/Slice.hpp"
-#include "aidge/utils/Types.h"
-#include "aidge/utils/ErrorHandling.hpp"
 
 #include <cassert>
 #include <cstddef>
+#include <cstdint>
 #include <string>
 #include <utility>
 #include <vector>
 
+#include <fmt/format.h>
+
 #include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 void Aidge::Slice_OpImpl::forward() {
     const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
-    const auto inputDims = op.getInput(0)->dims();
-    auto slicedDims = op.getInput(0)->dims();
 
+    if (!op.getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type);
+    }
+    AIDGE_ASSERT((op.template getAttr<SliceAttr::Starts>().size() == op.template getAttr<SliceAttr::Ends>().size()) &&
+                 (op.template getAttr<SliceAttr::Starts>().size() == op.template getAttr<SliceAttr::Axes>().size()),
+                 "start, end and axes arguments should be the same size.");
+
+    const std::size_t nbDims = op.getInput(0)->nbDims();
+
+    const std::vector<std::size_t>& inputDims = op.getInput(0)->dims();
+    auto outputDims = op.getInput(0)->dims();
+
+    // compute index of the output's first element
+    // compute output dimension at the same time (may change between two forward calls)
     std::size_t beginning = 0;
-    DimSize_t nbAxes = op.getAttr<SliceAttr::Axes>().size();
+    const std::size_t nbAxes = op.template getAttr<SliceAttr::Axes>().size();
     for (std::size_t i = 0; i < nbAxes; ++i) {
         // For each slice operation get the params and cast them to size_t
-        const std::int64_t axis_ = op.getAttr<SliceAttr::Axes>()[i];
-        const std::int64_t start_ = op.getAttr<SliceAttr::Starts>()[i];
-        const std::int64_t end_ = op.getAttr<SliceAttr::Ends>()[i];
-        const std::size_t axis = axis_ >= 0 ? axis_ : static_cast<std::size_t>(axis_) + inputDims.size();
-        const std::size_t start = start_ >= 0 ? start_ : start_ + inputDims[axis];
-        const std::size_t end = end_ >= 0 ? end_ : end_ + inputDims[axis];
-        std::size_t stride = 1;
-        for (std::size_t j = inputDims.size() - 1; j > axis; --j) stride *= inputDims[j];
-        beginning += start * stride;
-        const std::size_t sliceLength = end - start + 1;
-        slicedDims[axis] = sliceLength;
+        DimIdx_t axis = op.template getAttr<SliceAttr::Axes>()[i] >= 0 ?
+                            static_cast<DimIdx_t>(op.template getAttr<SliceAttr::Axes>()[i]) :
+                            static_cast<DimIdx_t>(op.template getAttr<SliceAttr::Axes>()[i] + static_cast<DimIdx_t>(inputDims.size()));
+        DimSize_t start = op.template getAttr<SliceAttr::Starts>()[i] >= 0 ?
+                            static_cast<DimSize_t>(op.template getAttr<SliceAttr::Starts>()[i]) :
+                            static_cast<DimSize_t>(op.template getAttr<SliceAttr::Starts>()[i] + static_cast<DimSize_t>(inputDims[axis]));
+        DimSize_t end = op.template getAttr<SliceAttr::Ends>()[i] >= 0 ?
+                        static_cast<DimSize_t>(op.template getAttr<SliceAttr::Ends>()[i]) :
+                        static_cast<DimSize_t>(op.template getAttr<SliceAttr::Ends>()[i] + static_cast<DimSize_t>(inputDims[axis]));
+        const std::size_t stridePostAxis = std::accumulate(inputDims.cbegin()+axis+1, inputDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+        beginning += start * stridePostAxis;
+        const std::size_t sliceLength = end - start;
+        outputDims[axis] = sliceLength;
     }
+    op.getOutput(0)->resize(outputDims);
 
-    const std::size_t nbDims = slicedDims.size();
 
-    // for inputDims = {4,5,5,3} & slicedDims = {3,2,2,1}, substractDims = {1,5,5,3}
+    // for inputDims = {4,5,5,3} & outputDims = {3,2,2,1}: substractDims = {1,5,5,3}
     std::vector<std::size_t> substractedDims = std::vector<std::size_t>(nbDims);
     for (std::size_t i = 0; i < nbDims; ++i) {
-        substractedDims[i] = inputDims[i] - slicedDims[i];
+        substractedDims[i] = inputDims[i] - outputDims[i];
     }
 
-    // for slicedDims = {3,2,2,1}, prodSlicedDims = {12,4,2,1}
-    std::vector<std::size_t> prodSlicedDims = std::vector<std::size_t>(nbDims);
+    // for outputDims = {3,2,2,1}: prodOutputDims = {12,4,2,1}
+    std::vector<std::size_t> prodOutputDims = std::vector<std::size_t>(nbDims);
     std::vector<std::size_t> prodInputDims = std::vector<std::size_t>(nbDims + 1);
-    prodSlicedDims[nbDims - 1] = slicedDims[nbDims - 1];
+    prodOutputDims[nbDims - 1] = outputDims[nbDims - 1];
     prodInputDims[nbDims - 1] = inputDims[nbDims - 1];
     prodInputDims[nbDims] = 1;
     for (std::size_t i = 2; i <= nbDims; ++i) {
-        prodSlicedDims[nbDims - i] = prodSlicedDims[nbDims - i + 1] * slicedDims[nbDims - i];
+        prodOutputDims[nbDims - i] = prodOutputDims[nbDims - i + 1] * outputDims[nbDims - i];
         prodInputDims[nbDims - i] = prodInputDims[nbDims - i + 1] * inputDims[nbDims - i];
     }
 
     std::size_t i = beginning;
-    std::size_t size = 0;
+    std::size_t size = 0; // number of elements to copy
     std::size_t offset = 0;
-    for (std::size_t j = 0; j < prodSlicedDims[0];) {
+    for (std::size_t j = 0; j < prodOutputDims[0];) {
         ++size;
         ++i;
         ++j;
         bool newChunk = false;
         for (std::size_t idx = nbDims - 1; idx > 0; --idx) {
-            if (j % prodSlicedDims[idx] == 0) {
+            if (j % prodOutputDims[idx] == 0) {
                 i += substractedDims[idx] * prodInputDims[idx + 1];
                 newChunk = true;
             }
@@ -94,32 +112,103 @@ void Aidge::Slice_OpImpl::forward() {
 const std::string Aidge::Slice_Op::Type = "Slice";
 
 bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check input have been associated
-    if (!getInput(0) || (getInput(0)->empty())) {
+    // check inputs have been associated
+    if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
     }
 
-    const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for (std::size_t i = 0; i < nbAxes; ++i) {
-        // For each slice operation get the params and cast them to size_t
-        const std::int64_t axis_ = this->template getAttr<SliceAttr::Axes>()[i];
-        const std::int64_t start_ = this->template getAttr<SliceAttr::Starts>()[i];
-        const std::int64_t end_ = this->template getAttr<SliceAttr::Ends>()[i];
-        const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : static_cast<std::size_t>(axis_) + getInput(0)->nbDims();
-        const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : static_cast<std::size_t>(start_) + getInput(0)->dims()[axis];
-        const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : static_cast<std::size_t>(end_) + getInput(0)->dims()[axis];
-
-        const std::size_t sliceLength = end - start + 1;
-        // Check if slice length is valid
-        if (sliceLength > getInput(0)->dims()[axis])
+    if(!getInput(0)->empty())
+    {
+        if(this->template getAttr<SliceAttr::Starts>().empty() || this->template getAttr<SliceAttr::Ends>().empty() || this->template getAttr<SliceAttr::Axes>().empty())
         {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
+            if(getInput(1)->empty() || getInput(2)->empty() || getInput(3)->empty()) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Starts, Ends and Axes must be provided either as input or attributes", type());
+            }
+
+            AIDGE_ASSERT((mInputs[1]->dataType() == mInputs[2]->dataType()) && (mInputs[1]->dataType() == mInputs[3]->dataType()), "Slice inputs must have the same dataType.");
+
+            this->template getAttr<SliceAttr::Starts>().clear(); // If both are provided input would override attrs
+            this->template getAttr<SliceAttr::Starts>().reserve(getInput(1)->size());
+            this->template getAttr<SliceAttr::Ends>().clear();
+            this->template getAttr<SliceAttr::Ends>().reserve(getInput(1)->size());
+            this->template getAttr<SliceAttr::Axes>().clear();
+            this->template getAttr<SliceAttr::Axes>().reserve(getInput(1)->size());
+            switch (mInputs[1]->dataType()) {
+                case DataType::Float64:
+                    std::copy_n(static_cast<double*>(mInputs[1]->getImpl()->rawPtr()),
+                                getInput(1)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Starts>()));
+                    std::copy_n(static_cast<double*>(mInputs[2]->getImpl()->rawPtr()),
+                                getInput(2)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Ends>()));
+                    std::copy_n(static_cast<double*>(mInputs[3]->getImpl()->rawPtr()),
+                                getInput(3)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Axes>()));
+                    break;
+                case DataType::Float32:
+                    std::copy_n(static_cast<float*>(mInputs[1]->getImpl()->rawPtr()),
+                                getInput(1)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Starts>()));
+                    std::copy_n(static_cast<float*>(mInputs[2]->getImpl()->rawPtr()),
+                                getInput(2)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Ends>()));
+                    std::copy_n(static_cast<float*>(mInputs[3]->getImpl()->rawPtr()),
+                                getInput(3)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Axes>()));
+                    break;
+                case DataType::Int64:
+                    std::copy_n(static_cast<std::int64_t*>(mInputs[1]->getImpl()->rawPtr()),
+                                getInput(1)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Starts>()));
+                    std::copy_n(static_cast<std::int64_t*>(mInputs[2]->getImpl()->rawPtr()),
+                                getInput(2)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Ends>()));
+                    std::copy_n(static_cast<std::int64_t*>(mInputs[3]->getImpl()->rawPtr()),
+                                getInput(3)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Axes>()));
+                    break;
+                case DataType::Int32:
+                    std::copy_n(static_cast<std::int32_t*>(mInputs[1]->getImpl()->rawPtr()),
+                                getInput(1)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Starts>()));
+                    std::copy_n(static_cast<std::int32_t*>(mInputs[2]->getImpl()->rawPtr()),
+                                getInput(2)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Ends>()));
+                    std::copy_n(static_cast<std::int32_t*>(mInputs[3]->getImpl()->rawPtr()),
+                                getInput(3)->size(),
+                                std::back_inserter(this->template getAttr<SliceAttr::Axes>()));                                
+                    break;
+                default:
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Indices input DataType is not supported.", type());
+                    break;
+            }
+        }
+
+        DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbAxes; ++i) {
+            DimIdx_t axis = this->template getAttr<SliceAttr::Axes>()[i] >= 0 ?
+                            static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i]) :
+                            static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
+            DimSize_t start = this->template getAttr<SliceAttr::Starts>()[i] >= 0 ?
+                              static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i]) :
+                              static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            DimSize_t end = this->template getAttr<SliceAttr::Ends>()[i] >= 0 ?
+                            static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i]) :
+                            static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+
+            const std::size_t sliceLength = end - start;
+            // Check if slice length is valid
+            if (sliceLength > getInput(0)->dims()[axis])
+            {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
+            }
+            outDims[axis] = sliceLength;
         }
-        outDims[axis] = sliceLength;
+        mOutputs[0]->resize(outDims);
+        return true;
     }
-    mOutputs[0]->resize(outDims);
-    return true;
+    return false;
 }
 
 void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index 7959e1b70acab617b9c6f92160c6d501712f5945..dbd954d1b39adb298a34917f41cfac09177adae7 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -23,6 +23,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/operator/Producer.hpp"
 
 #include "aidge/operator/Add.hpp"
 #include "aidge/operator/Concat.hpp"
@@ -33,32 +34,35 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
                                                             const Aidge::DimIdx_t axis,
                                                             const std::size_t nbSlices)
 {
+    // for now, Tiling works only with Conv Operators
     if (node->getOperator()->type() != "Conv") {
         AIDGE_INTERNAL_ASSERT("Operator should be a Convolution.");
     }
-    AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+    // TODO: back when tiling works with other Operators
+    // AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
     const auto& op = std::static_pointer_cast<OperatorTensor>(node->getOperator());
-    if (op->nbOutputs() != 1 || op->nbData() > 1) {
-        AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
-    }
+    // TODO: back when tiling works with other Operators
+    // if (op->nbOutputs() != 1 || op->nbData() > 1) {
+    //     AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
+    // }
     if (!op->dimsForwarded()) {
         AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling");
     }
+
+    const std::shared_ptr<Tensor>& outTensor = op->getOutput(0);
+    std::vector<DimSize_t> outputDims = outTensor->dims();
+
     // start by doing a tiling with strict dimensions division
-    const auto& outTensor = op->getOutput(0);
-    if (op->getOutput(0)->dims()[axis] % nbSlices != 0) {
+    if (outputDims[axis] % nbSlices != 0) {
         AIDGE_INTERNAL_ASSERT("axis should be a multiple of nbSlices");
     }
 
     // dimensions of a Slice
-    std::vector<DimSize_t> outputDims = outTensor->dims();
     outputDims[axis] /= nbSlices;
 
-    std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
 
-    std::set<std::shared_ptr<Aidge::Node>> res;
     auto concat = Concat(nbSlices, axis);
-    res.insert(concat);
+    std::set<std::shared_ptr<Aidge::Node>> tiledOperator{concat};
 
     // check slice sizes
     // const auto inputDims = op->computeReceptiveField(currentFirstDims[axis], outputDims, 0);
@@ -72,34 +76,42 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     std::vector<std::shared_ptr<Node>> clonedInputs = std::vector<std::shared_ptr<Node>>(node->nbInputs(), nullptr);
     for (std::size_t i = node->nbData(); i < node ->nbInputs(); ++i) {
         clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
-        clonedInputs[i] -> setName(node -> name() + "_0");
-        res.insert(clonedInputs[i]);
+        clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
+        tiledOperator.insert(clonedInputs[i]);
     }
 
+    const std::vector<std::string> sliceInputsNames = Slice_Op::getInputsName();
+    // coordinates of the first value of the current output slice
+    std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
     for (IOIndex_t i = 0; currentFirstDims[axis] < outTensor->dims()[axis]; currentFirstDims[axis] += outputDims[axis], ++i) {
         const auto inputDims = op->computeReceptiveField(currentFirstDims, outputDims, 0);
         auto newNode = node -> clone(); // no input associated to clones
         newNode -> setName(node->name() + "_" + std::to_string(currentFirstDims[axis]));
         clonedInputs[1] -> addChild(newNode, 0, 1);
         clonedInputs[2] -> addChild(newNode, 0, 2);
-        // Slice for input and each parameter
-        std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size());
-        for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) {
-            inputDimsEnd[dim] = static_cast<std::int64_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1;
-        }
+
+        auto backend = outTensor->getImpl()->backend();
+        // Create Slice's Starts attribute
         std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
         for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
             inputDimsStart[dim] = static_cast<std::int64_t>(inputDims[0].first[dim]);
         }
-        std::vector<std::int64_t> usedDims(inputDimsEnd.size());
-        std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int64_t>(0));
+        // Create Slice's Ends attribute
+        std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size());
+        for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) {
+            inputDimsEnd[dim] = static_cast<std::int64_t>(inputDims[0].first[dim] + inputDims[0].second[dim]);
+        }
+
+        // Create Slice's Axes attribute
+        std::vector<std::int8_t> usedDims(inputDimsEnd.size());
+        std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int8_t>(0));
+
         auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, "Slice_" + std::to_string(currentFirstDims[axis]));
         slice -> addChild(newNode, 0, 0);
         newNode -> addChild(concat, 0, i);
 
-        res.insert(slice);
-        res.insert(newNode);
+        tiledOperator.insert({slice, newNode});
     }
 
-    return res;
+    return tiledOperator;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_GatherImpl.cpp b/unit_tests/operator/Test_GatherImpl.cpp
index 2995963a35cda5b0c5794b1d15e4064438b58ece..02e8e74890918726212e09fdd9f969ce0863af83 100644
--- a/unit_tests/operator/Test_GatherImpl.cpp
+++ b/unit_tests/operator/Test_GatherImpl.cpp
@@ -14,12 +14,13 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Gather.hpp"
 
+#include <cstdint>
 #include <memory>
 
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Gather(forward)") {
+TEST_CASE("[cpu/operator] Gather(forward)", "[Gather][CPU]") {
     SECTION("2D Tensor axis 0") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
             {
@@ -42,10 +43,10 @@ TEST_CASE("[cpu/operator] Gather(forward)") {
             }
         });
 
-        std::shared_ptr<Node> myGather = Gather({1, 2}, {1, 2}, 0);
+        std::shared_ptr<Node> myGather = Gather(std::int8_t(0));
         auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
         op->associateInput(0,input);
-        // op->associateInput(1,indexes);
+        op->associateInput(1,indexes);
         op->setDataType(DataType::Int32);
         op->setBackend("cpu");
         myGather->forward();
@@ -82,10 +83,42 @@ TEST_CASE("[cpu/operator] Gather(forward)") {
             }
         });
 
-        std::shared_ptr<Node> myGather = Gather({0, 2}, {1, 2}, 1);
+        std::shared_ptr<Node> myGather = Gather(1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
+        op->associateInput(0,input);
+        op->associateInput(1,indexes);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        myGather->forward();
+
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+
+    }
+    SECTION("Init with attributes") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
+            {
+                {1, 2, 3},
+                {4, 5, 6},
+                {7, 8, 9}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,3,1,2> {
+            {
+                {
+                    {1, 3}
+                },
+                {
+                    {4, 6}
+                },
+                {
+                    {7, 9}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myGather = Gather(1, {0, 2}, {1, 2});
         auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
         op->associateInput(0,input);
-        // op->associateInput(1,indexes);
         op->setDataType(DataType::Int32);
         op->setBackend("cpu");
         myGather->forward();
diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp
index 91ae92848b552a6038a4cb5f8dd3848b20ac2168..b0fc2bc9b86445de1e770223a18eb0d03e21d337 100644
--- a/unit_tests/operator/Test_SliceImpl.cpp
+++ b/unit_tests/operator/Test_SliceImpl.cpp
@@ -19,15 +19,21 @@ using namespace Aidge;
 TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
     SECTION("1D Tensor") {
         std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
-            {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
+            {0, 1, -2,-3, 4,-5,-6, 7, 8, 9}
         });
-        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,4> {
-            {0, 1, 2,-3}
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,3> {
+            {0, 1, -2}
         });
+        std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,1>{{0}});
+        std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,1>{{3}});
+        std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,1>{{0}});
 
-        std::shared_ptr<Node> mySlice = Slice({0}, {3}, {0});
+        std::shared_ptr<Node> mySlice = Slice();
         auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
         mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->associateInput(1,starts);
+        mySlice->getOperator()->associateInput(2,ends);
+        mySlice->getOperator()->associateInput(3,axes);
         mySlice->getOperator()->setDataType(DataType::Int32);
         mySlice->getOperator()->setBackend("cpu");
         mySlice->forward();
@@ -50,10 +56,16 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
                 {-5,-6, 7}
             }
         });
+        std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,2>{{0,5}});
+        std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,2>{{2,8}});
+        std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,2>{{0,1}});
 
-        std::shared_ptr<Node> mySlice = Slice({0,5}, {1,7}, {0,1});
+        std::shared_ptr<Node> mySlice = Slice();
         auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
         mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->associateInput(1,starts);
+        mySlice->getOperator()->associateInput(2,ends);
+        mySlice->getOperator()->associateInput(3,axes);
         mySlice->getOperator()->setDataType(DataType::Int32);
         mySlice->getOperator()->setBackend("cpu");
         mySlice->forward();
@@ -83,10 +95,16 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
                 }
             }
         });
+        std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,3>{{0,1,4}});
+        std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,3>{{1,2,7}});
+        std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,3>{{0,1,2}});
 
-        std::shared_ptr<Node> mySlice = Slice({0,1,4}, {0,1,6}, {0,1,2});
+        std::shared_ptr<Node> mySlice = Slice();
         auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
         mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->associateInput(1,starts);
+        mySlice->getOperator()->associateInput(2,ends);
+        mySlice->getOperator()->associateInput(3,axes);
         mySlice->getOperator()->setDataType(DataType::Int32);
         mySlice->getOperator()->setBackend("cpu");
         mySlice->forward();
@@ -145,8 +163,61 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
                 }
             }
         });
+        std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,4>{{0,0,0,0}});
+        std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,4>{{2,2,2,10}});
+        std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,4>{{0,1,2,3}});
 
-        std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,9}, {0,1,2,3});
+        std::shared_ptr<Node> mySlice = Slice();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
+        mySlice->getOperator()->associateInput(0,input0);
+        mySlice->getOperator()->associateInput(1,starts);
+        mySlice->getOperator()->associateInput(2,ends);
+        mySlice->getOperator()->associateInput(3,axes);
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        mySlice->forward();
+        // mySlice->getOperator()->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
+        REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
+    }
+
+    SECTION("Attributes instead of inputs") {
+        std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    }
+                },
+                {
+                    {
+                        { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
+                    },
+                    {
+                        { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
+                        {-5, 4, 2,-3,11,-5,-6, 7,-1,10}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,1,1,5> {
+            {
+                {
+                    {
+                        { 0, 1, 2,-3, 4}
+                    }
+                }
+            }
+        });
+
+        std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,5}, {0,1,2,3});
         auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
         mySlice->getOperator()->associateInput(0,input0);
         mySlice->getOperator()->setDataType(DataType::Int32);