diff --git a/.gitignore b/.gitignore
index f37378e300efeb5362882eb8d6eb59f028563a0e..306c9d2f5409bdf2a003e63b795885f268af391a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,16 +1,26 @@
+# general 
+.cache
+
 # C++ Build
 build*/
 install*/
+cppcheck-result.xml
 
 # VSCode
 .vscode
 
-# Python
+## Python
+# build/packaging artifacts
 *.so
 __pycache__
 *.pyc
-*.egg-info
 dist*/
+*.egg-info
+wheelhouse/*
+aidge_core/_version.py
+# test artifact
+aidge_core/dummy_export/*
+*xmlrunner-results.xml
 
 # Mermaid
 *.mmd
@@ -19,4 +29,4 @@ dist*/
 xml*/
 
 # ONNX
-*.onnx
\ No newline at end of file
+*.onnx
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 3efb308fa0f78dce35973ccb47d1303d7c8634af..62219b39769d32978f44b983632fd8a117d04205 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,21 +1,34 @@
 ###############################################################################
-#            Aidge Continious Integration and Continious Deployment            #
+#                 Aidge Continuous Integration and Deployment                 #
 #                                                                             #
 ###############################################################################
 
 stages:
-  # Analyse code
   - static_analysis
-  # Build Aidge
   - build
-  # Unit test stage
   - test
-  # Code coverage
   - coverage
+  - release
+  - deploy
 
 include:
-  - local: '/.gitlab/ci/_global.gitlab-ci.yml'
-  - local: '/.gitlab/ci/static_analysis.gitlab-ci.yml'
-  - local: '/.gitlab/ci/build.gitlab-ci.yml'
-  - local: '/.gitlab/ci/test.gitlab-ci.yml'
-  - local: '/.gitlab/ci/coverage.gitlab-ci.yml'
+  - project: 'eclipse/aidge/gitlab_shared_files'
+    ref: 'main'
+    file: 
+      # choose which jobs to run by including the corresponding files.
+      - '.gitlab/ci/ubuntu_cpp.gitlab-ci.yml'
+
+      - '.gitlab/ci/ubuntu_python.gitlab-ci.yml'
+      - '.gitlab/ci/release/cibuildwheel_ubuntu.gitlab-ci.yml'   
+
+      - '.gitlab/ci/windows_cpp.gitlab-ci.yml'
+
+      - '.gitlab/ci/windows_python.gitlab-ci.yml'   
+      - '.gitlab/ci/release/cibuildwheel_windows.gitlab-ci.yml'
+
+# Required bc of test_export that cannot run in parallel in test and in coverage
+coverage:ubuntu_python:
+  needs: 
+    - build:ubuntu_python
+    - test:ubuntu_python 
+  
diff --git a/.gitlab/ci/_global.gitlab-ci.yml b/.gitlab/ci/_global.gitlab-ci.yml
deleted file mode 100644
index 94e5658ff6adc8e07036d3d59ea39a68fbddc4bf..0000000000000000000000000000000000000000
--- a/.gitlab/ci/_global.gitlab-ci.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-################################################################################
-# Centralized definitions of common job parameter values.                      #
-# Parameters with many optional configurations may be in separate files.       #
-#                                                                              #
-################################################################################
-variables:
-  GIT_SUBMODULE_STRATEGY: recursive
-  OMP_NUM_THREADS: 4
-  GIT_SSL_NO_VERIFY: 1
-  DEBIAN_FRONTEND: noninteractive
-
-# See https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
-workflow:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "merge_request_event"
-    - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
-      when: never
-    - if: $CI_COMMIT_BRANCH
-
-default:
-  image: nvidia/cuda:12.2.0-devel-ubuntu22.04
-  before_script:
-    - apt update
-    - apt install -y cmake cppcheck python-is-python3 pip git gcovr
diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
deleted file mode 100644
index a4579e2951ccbafc4335ae428c62eba94c0757e5..0000000000000000000000000000000000000000
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ /dev/null
@@ -1,154 +0,0 @@
-build:ubuntu_cpp:
-  stage: build
-  needs: []
-  tags:
-    - docker
-
-  script:
-    - mkdir -p build_cpp
-    - mkdir -p install_cpp
-    - cd build_cpp
-    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
-    - make -j4 all install
-
-  artifacts:
-    expire_in: 1 week
-    paths:
-      - build_cpp/
-      - install_cpp/
-
-build:ubuntu_cpp_g++10:
-  stage: build
-  needs: []
-  tags:
-    - docker
-
-  script:
-    - apt install -y g++-10
-    - mkdir -p build_cpp
-    - mkdir -p install_cpp
-    - cd build_cpp
-    - export CXX=/usr/bin/g++-10
-    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
-    - make -j4 all install
-
-build:ubuntu_cpp_g++12:
-  stage: build
-  needs: []
-  tags:
-    - docker
-
-  script:
-    - apt install -y g++-12
-    - mkdir -p build_cpp
-    - mkdir -p install_cpp
-    - cd build_cpp
-    - export CXX=/usr/bin/g++-12
-    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
-    - make -j4 all install
-
-build:ubuntu_cpp_clang12:
-  stage: build
-  needs: []
-  tags:
-    - docker
-
-  script:
-    - apt install -y clang-12
-    - mkdir -p build_cpp
-    - mkdir -p install_cpp
-    - cd build_cpp
-    - export CXX=/usr/bin/clang++-12
-    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
-    - make -j4 all install
-
-build:ubuntu_cpp_clang15:
-  stage: build
-  needs: []
-  tags:
-    - docker
-
-  script:
-    - apt install -y clang-15
-    - mkdir -p build_cpp
-    - mkdir -p install_cpp
-    - cd build_cpp
-    - export CXX=/usr/bin/clang++-15
-    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug -DWERROR=ON -DCOVERAGE=ON ..
-    - make -j4 all install
-
-build:ubuntu_python:
-  stage: build
-  needs: []
-  tags:
-    - docker
-
-  script:
-    - python3 -m pip install virtualenv
-    - virtualenv venv
-    - source venv/bin/activate
-    # Numpy dependancy for unit test
-    - python3 -m pip install -r requirements.txt
-    - python3 -m pip install .
-  artifacts:
-    expire_in: 1 week
-    paths:
-      - venv/
-
-build:windows_cpp:
-  stage: build
-  needs: []
-  tags:
-    - windows
-
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install git -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    - mkdir -p build_cpp
-    - mkdir -p install_cpp
-    - cd build_cpp
-    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
-    - cmake --build . -j2
-    - cmake --install . --config Debug
-
-  artifacts:
-    expire_in: 1 week
-    paths:
-      - build_cpp/
-      - install_cpp/
-
-build:windows_python:
-  stage: build
-  needs: []
-  tags:
-    - windows
-
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install git -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    - python -m pip install virtualenv
-    - virtualenv venv
-    - venv\Scripts\Activate.ps1
-    # Numpy dependancy for unit test
-    - python -m pip install -r requirements.txt
-    - python -m pip install .
-  artifacts:
-    expire_in: 1 week
-    paths:
-      - venv/
diff --git a/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1 b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
new file mode 100644
index 0000000000000000000000000000000000000000..c2715ea5550432838d3cc8692e97204b278d2c85
--- /dev/null
+++ b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
@@ -0,0 +1,23 @@
+$ErrorActionPreference = "Stop"
+
+# Retrieve and clean the dependencies string from the environment variable
+$AIDGE_DEPENDENCIES = $env:AIDGE_DEPENDENCIES -split ' '
+Write-Host "Aidge dependencies : $AIDGE_DEPENDENCIES"
+if ( $($AIDGE_DEPENDENCIES.Length) -eq 0) {
+        Write-Host "- No dependencies provided for current repsitory"
+        New-Item -ItemType Directory -Force -Path ".\build" | Out-Null
+        Remove-Item -Path ".\build\*" -Recurse -Force
+    } else {
+        Write-Host "Retrieving given dependencies to build current package : $AIDGE_DEPENDENCIES"
+    foreach ($dep in $($AIDGE_DEPENDENCIES -split " ")) {
+        Write-Host "Retrieving : $dep"
+        $curr_loc=$(Get-Location)
+        Set-Location ../$dep
+        Get-Location 
+        Get-ChildItem .
+        New-Item -Path ".\build" -ItemType Directory -Force | Out-Null
+        Get-ChildItem -Path ".\build" -File | Remove-Item -Force
+        python -m pip install . -v
+        Set-Location $curr_loc
+    }
+}
diff --git a/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.sh b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0303db5f056772d9f6227bf7a8b7910c2572ea1b
--- /dev/null
+++ b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -e
+if [[ "$1" == "" ]]; then 
+  echo "build aidge deps in cibuildwheel container before building wheel."
+  echo "search path defines where the dependencies will be searched."
+  echo "Hint : In wheel containers, files are mounted on /host by default."
+  echo "\nusage : ./cibuildwheel_build_deps_before_build_wheel.sh $search_path"
+fi
+set -x
+if [[ $AIDGE_DEPENDENCIES ==  "" ]]; then # case for aidge_ core
+  mkdir -p build # creating build if its not already there to hold the build of cpp files
+  rm -rf build/* # build from scratch
+else 
+  for repo in $AIDGE_DEPENDENCIES ; do # case for other projects
+    search_path=$1
+    REPO_PATH=$(find $search_path ! -writable -prune -o  -type d \
+                                    -name "$repo"                    \
+                                    -not -path "*/install/*"         \
+                                    -not -path "*/.git/*"            \
+                                    -not -path "*/miniconda/*"       \
+                                    -not -path "*/conda/*"           \
+                                    -not -path "*/.local/*"          \
+                                    -not -path "*/lib/*"             \
+                                    -not -path "*/$repo/$repo/*"     \
+                                    -not -path "*/proc/*"           \
+                                    -print -quit)
+    if [[ -z "$REPO_PATH" ]]; then 
+      echo "ERROR : dependency $repo not found in search_path \"$search_path\". ABORTING."
+      exit -1
+    fi
+
+    cd $REPO_PATH
+    mkdir -p build # creating build if its not already there to hold the build of cpp files
+    rm -rf build/* # build from scratch
+    pip install . -v
+    cd -
+  done
+fi
+set +x
+set +e
diff --git a/.gitlab/ci/coverage.gitlab-ci.yml b/.gitlab/ci/coverage.gitlab-ci.yml
deleted file mode 100644
index 3c7b7654190e0768adc6a904f1cb548f020b0c92..0000000000000000000000000000000000000000
--- a/.gitlab/ci/coverage.gitlab-ci.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-coverage:ubuntu_cpp:
-  stage: coverage
-  needs: ["build:ubuntu_cpp"]
-  tags:
-    - docker
-  script:
-    - cd build_cpp
-    - ctest --output-on-failure
-    - gcovr --xml-pretty --exclude-unreachable-branches --print-summary -o coverage.xml --root ${CI_PROJECT_DIR} --filter '\.\./include/' --filter '\.\./src/'
-  coverage: /^\s*lines:\s*\d+.\d+\%/
-  artifacts:
-    name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
-    expire_in: 2 days
-    reports:
-      coverage_report:
-        coverage_format: cobertura
-        path: build_cpp/coverage.xml
-
-coverage:ubuntu_python:
-  stage: coverage
-  needs: ["build:ubuntu_python"]
-  tags:
-    - docker
-  script:
-    - source venv/bin/activate
-    - python3 -m pip install numpy coverage
-    - cd ${CI_PROJECT_NAME}
-    # Retrieve the installation path of the module, since it is installed with pip.
-    - export MODULE_LOCATION=`python -c "import ${CI_PROJECT_NAME} as _; print(_.__path__[0])"`
-    - python3 -m coverage run --source=$MODULE_LOCATION -m unittest discover -s unit_tests/ -v -b
-    - python3 -m coverage report
-    - python3 -m coverage xml
-  coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
-  artifacts:
-    reports:
-      coverage_report:
-        coverage_format: cobertura
-        path: ${CI_PROJECT_NAME}/coverage.xml
diff --git a/.gitlab/ci/static_analysis.gitlab-ci.yml b/.gitlab/ci/static_analysis.gitlab-ci.yml
deleted file mode 100644
index 3955b87d4efdd9b3610b661779ab9709320754f2..0000000000000000000000000000000000000000
--- a/.gitlab/ci/static_analysis.gitlab-ci.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-static_analysis:cpp:
-  stage: static_analysis
-  tags:
-    - static_analysis
-  allow_failure: true
-  script:
-    - mkdir -p $CI_COMMIT_REF_NAME
-    - cppcheck -j 4 --enable=all --inconclusive --force --xml --xml-version=2 . 2> cppcheck-result.xml
-    - python -m pip install Pygments
-    - cppcheck-htmlreport --file=cppcheck-result.xml --report-dir=$CI_COMMIT_REF_NAME --source-dir=.
-    - python3 -m pip install -U cppcheck_codequality
-    - cppcheck-codequality --input-file=cppcheck-result.xml --output-file=cppcheck.json
-    - mkdir -p public/cpp
-    - mv $CI_COMMIT_REF_NAME public/cpp/
-  artifacts:
-    paths: 
-      - public
-    reports:
-      codequality: cppcheck.json
-
-static_analysis:python:
-  stage: static_analysis
-  tags:
-    - static_analysis
-  allow_failure: true
-  script:
-    - pip install pylint
-    - pip install pylint-gitlab
-    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabCodeClimateReporter ${CI_PROJECT_NAME}/ > codeclimate.json
-    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabPagesHtmlReporter ${CI_PROJECT_NAME}/ > pylint.html
-    - mkdir -p public/python/$CI_COMMIT_REF_NAME
-    - mv pylint.html public/python/$CI_COMMIT_REF_NAME/
-  artifacts:
-    paths:
-      - public
-    reports:
-      codequality: codeclimate.json
\ No newline at end of file
diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml
deleted file mode 100644
index 81e6ca9ac5b868287aa0ef27040c0ead785d3639..0000000000000000000000000000000000000000
--- a/.gitlab/ci/test.gitlab-ci.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-test:ubuntu_cpp:
-  stage: test
-  needs: ["build:ubuntu_cpp"]
-  tags:
-    - docker
-  script:
-    - cd build_cpp
-    - ctest --output-junit ctest-results.xml --output-on-failure
-  artifacts:
-    reports:
-      junit: build_cpp/ctest-results.xml
-
-test:ubuntu_python:
-  stage: test
-  needs: ["build:ubuntu_python"]
-  tags:
-    - docker
-  script:
-    - source venv/bin/activate
-    - cd ${CI_PROJECT_NAME}
-    - python3 -m pip install unittest-xml-reporting
-    - python3 -m pip list
-    # Run on discovery all tests located in core/unit_tests/python
-    - python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml
-  artifacts:
-    reports:
-      junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
-
-test:windows_cpp:
-  stage: test
-  needs: ["build:windows_cpp"]
-  tags:
-    - windows
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    - cd build_cpp
-    - ctest --output-junit ctest-results.xml --output-on-failure
-  artifacts:
-    reports:
-      junit: build_cpp/ctest-results.xml
diff --git a/.pylintrc b/.pylintrc
deleted file mode 100644
index 03c0cf31f3e63bcae09a45e9a8e6694a78d2f4b1..0000000000000000000000000000000000000000
--- a/.pylintrc
+++ /dev/null
@@ -1,644 +0,0 @@
-[MASTER]
-
-# A comma-separated list of package or module names from where C extensions may
-# be loaded. Extensions are loading into the active Python interpreter and may
-# run arbitrary code.
-extension-pkg-allow-list= aidge_core, torch, tensorflow
-
-# A comma-separated list of package or module names from where C extensions may
-# be loaded. Extensions are loading into the active Python interpreter and may
-# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
-# for backward compatibility.)
-extension-pkg-whitelist=
-
-# Return non-zero exit code if any of these messages/categories are detected,
-# even if score is above --fail-under value. Syntax same as enable. Messages
-# specified are enabled, while categories only check already-enabled messages.
-fail-on=
-
-# Specify a score threshold to be exceeded before program exits with error.
-fail-under=0.0
-
-# Files or directories to be skipped. They should be base names, not paths.
-ignore=CVS
-
-# Add files or directories matching the regex patterns to the ignore-list. The
-# regex matches against paths.
-ignore-paths=
-
-# Files or directories matching the regex patterns are skipped. The regex
-# matches against base names, not paths.
-ignore-patterns=
-
-# Python code to execute, usually for sys.path manipulation such as
-# pygtk.require().
-#init-hook=
-
-# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
-# number of processors available to use.
-jobs=1
-
-# Control the amount of potential inferred values when inferring a single
-# object. This can help the performance when dealing with large functions or
-# complex, nested conditions.
-limit-inference-results=100
-
-# List of plugins (as comma separated values of python module names) to load,
-# usually to register additional checkers.
-load-plugins=
-
-# Pickle collected data for later comparisons.
-persistent=yes
-
-# When enabled, pylint would attempt to guess common misconfiguration and emit
-# user-friendly hints instead of false-positive error messages.
-suggestion-mode=yes
-
-# Allow loading of arbitrary C extensions. Extensions are imported into the
-# active Python interpreter and may run arbitrary code.
-unsafe-load-any-extension=no
-
-
-[MESSAGES CONTROL]
-
-# Only show warnings with the listed confidence levels. Leave empty to show
-# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
-confidence=
-
-# Disable the message, report, category or checker with the given id(s). You
-# can either give multiple identifiers separated by comma (,) or put this
-# option multiple times (only on the command line, not in the configuration
-# file where it should appear only once). You can also use "--disable=all" to
-# disable everything first and then reenable specific checks. For example, if
-# you want to run only the similarities checker, you can use "--disable=all
-# --enable=similarities". If you want to run only the classes checker, but have
-# no Warning level messages displayed, use "--disable=all --enable=classes
-# --disable=W".
-disable=print-statement,
-        parameter-unpacking,
-        unpacking-in-except,
-        old-raise-syntax,
-        backtick,
-        long-suffix,
-        old-ne-operator,
-        old-octal-literal,
-        import-star-module-level,
-        non-ascii-bytes-literal,
-        raw-checker-failed,
-        bad-inline-option,
-        locally-disabled,
-        file-ignored,
-        suppressed-message,
-        useless-suppression,
-        deprecated-pragma,
-        use-symbolic-message-instead,
-        apply-builtin,
-        basestring-builtin,
-        buffer-builtin,
-        cmp-builtin,
-        coerce-builtin,
-        execfile-builtin,
-        file-builtin,
-        long-builtin,
-        raw_input-builtin,
-        reduce-builtin,
-        standarderror-builtin,
-        unicode-builtin,
-        xrange-builtin,
-        coerce-method,
-        delslice-method,
-        getslice-method,
-        setslice-method,
-        no-absolute-import,
-        old-division,
-        dict-iter-method,
-        dict-view-method,
-        next-method-called,
-        metaclass-assignment,
-        indexing-exception,
-        raising-string,
-        reload-builtin,
-        oct-method,
-        hex-method,
-        nonzero-method,
-        cmp-method,
-        input-builtin,
-        round-builtin,
-        intern-builtin,
-        unichr-builtin,
-        map-builtin-not-iterating,
-        zip-builtin-not-iterating,
-        range-builtin-not-iterating,
-        filter-builtin-not-iterating,
-        using-cmp-argument,
-        eq-without-hash,
-        div-method,
-        idiv-method,
-        rdiv-method,
-        exception-message-attribute,
-        invalid-str-codec,
-        sys-max-int,
-        bad-python3-import,
-        deprecated-string-function,
-        deprecated-str-translate-call,
-        deprecated-itertools-function,
-        deprecated-types-field,
-        next-method-defined,
-        dict-items-not-iterating,
-        dict-keys-not-iterating,
-        dict-values-not-iterating,
-        deprecated-operator-function,
-        deprecated-urllib-function,
-        xreadlines-attribute,
-        deprecated-sys-function,
-        exception-escape,
-        comprehension-escape,
-        c-extension-no-member,
-        too-many-locals,
-        missing-class-docstring,
-        missing-function-docstring,
-        too-many-ancestor,
-        too-many-arguments,
-        protected-access,
-        too-many-branches,
-        too-many-ancestors,
-        wrong-import-order,
-        wrong-import-position,
-
-# Enable the message, report, category or checker with the given id(s). You can
-# either give multiple identifier separated by comma (,) or put this option
-# multiple time (only on the command line, not in the configuration file where
-# it should appear only once). See also the "--disable" option for examples.
-enable=c-extension-no-member
-
-
-[REPORTS]
-
-# Python expression which should return a score less than or equal to 10. You
-# have access to the variables 'error', 'warning', 'refactor', and 'convention'
-# which contain the number of messages in each category, as well as 'statement'
-# which is the total number of statements analyzed. This score is used by the
-# global evaluation report (RP0004).
-evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-
-# Template used to display messages. This is a python new-style format string
-# used to format the message information. See doc for all details.
-#msg-template=
-
-# Set the output format. Available formats are text, parseable, colorized, json
-# and msvs (visual studio). You can also give a reporter class, e.g.
-# mypackage.mymodule.MyReporterClass.
-output-format=text
-
-# Tells whether to display a full report or only the messages.
-reports=no
-
-# Activate the evaluation score.
-score=yes
-
-
-[REFACTORING]
-
-# Maximum number of nested blocks for function / method body
-max-nested-blocks=5
-
-# Complete name of functions that never returns. When checking for
-# inconsistent-return-statements if a never returning function is called then
-# it will be considered as an explicit return statement and no message will be
-# printed.
-never-returning-functions=sys.exit,argparse.parse_error
-
-
-[BASIC]
-
-# Naming style matching correct argument names.
-argument-naming-style=snake_case
-
-# Regular expression matching correct argument names. Overrides argument-
-# naming-style.
-#argument-rgx=
-
-# Naming style matching correct attribute names.
-attr-naming-style=snake_case
-
-# Regular expression matching correct attribute names. Overrides attr-naming-
-# style.
-#attr-rgx=
-
-# Bad variable names which should always be refused, separated by a comma.
-bad-names=foo,
-          bar,
-          baz,
-          toto,
-          tutu,
-          tata
-
-# Bad variable names regexes, separated by a comma. If names match any regex,
-# they will always be refused
-bad-names-rgxs=
-
-# Naming style matching correct class attribute names.
-class-attribute-naming-style=any
-
-# Regular expression matching correct class attribute names. Overrides class-
-# attribute-naming-style.
-#class-attribute-rgx=
-
-# Naming style matching correct class constant names.
-class-const-naming-style=UPPER_CASE
-
-# Regular expression matching correct class constant names. Overrides class-
-# const-naming-style.
-#class-const-rgx=
-
-# Naming style matching correct class names.
-class-naming-style=PascalCase
-
-# Regular expression matching correct class names. Overrides class-naming-
-# style.
-#class-rgx=
-
-# Naming style matching correct constant names.
-const-naming-style=UPPER_CASE
-
-# Regular expression matching correct constant names. Overrides const-naming-
-# style.
-#const-rgx=
-
-# Minimum line length for functions/classes that require docstrings, shorter
-# ones are exempt.
-docstring-min-length=-1
-
-# Naming style matching correct function names.
-function-naming-style=snake_case
-
-# Regular expression matching correct function names. Overrides function-
-# naming-style.
-#function-rgx=
-
-# Good variable names which should always be accepted, separated by a comma.
-good-names=i,
-           j,
-           k,
-           ex,
-           Run,
-           _,
-
-# Good variable names regexes, separated by a comma. If names match any regex,
-# they will always be accepted
-good-names-rgxs=
-
-# Include a hint for the correct naming format with invalid-name.
-include-naming-hint=no
-
-# Naming style matching correct inline iteration names.
-inlinevar-naming-style=any
-
-# Regular expression matching correct inline iteration names. Overrides
-# inlinevar-naming-style.
-#inlinevar-rgx=
-
-# Naming style matching correct method names.
-method-naming-style=snake_case
-
-# Regular expression matching correct method names. Overrides method-naming-
-# style.
-#method-rgx=
-
-# Naming style matching correct module names.
-module-naming-style=snake_case
-
-# Regular expression matching correct module names. Overrides module-naming-
-# style.
-#module-rgx=
-
-# Colon-delimited sets of names that determine each other's naming style when
-# the name regexes allow several styles.
-name-group=
-
-# Regular expression which should only match function or class names that do
-# not require a docstring.
-no-docstring-rgx=^_
-
-# List of decorators that produce properties, such as abc.abstractproperty. Add
-# to this list to register other decorators that produce valid properties.
-# These decorators are taken in consideration only for invalid-name.
-property-classes=abc.abstractproperty
-
-# Naming style matching correct variable names.
-variable-naming-style=snake_case
-
-# Regular expression matching correct variable names. Overrides variable-
-# naming-style.
-#variable-rgx=
-
-
-[FORMAT]
-
-# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
-expected-line-ending-format=
-
-# Regexp for a line that is allowed to be longer than the limit.
-ignore-long-lines=^\s*(# )?<?https?://\S+>?$
-
-# Number of spaces of indent required inside a hanging or continued line.
-indent-after-paren=4
-
-# String used as indentation unit. This is usually "    " (4 spaces) or "\t" (1
-# tab).
-indent-string='    '
-
-# Maximum number of characters on a single line.
-max-line-length=200
-
-# Maximum number of lines in a module.
-max-module-lines=1000
-
-# Allow the body of a class to be on the same line as the declaration if body
-# contains single statement.
-single-line-class-stmt=no
-
-# Allow the body of an if to be on the same line as the test if there is no
-# else.
-single-line-if-stmt=no
-
-
-[LOGGING]
-
-# The type of string formatting that logging methods do. `old` means using %
-# formatting, `new` is for `{}` formatting.
-logging-format-style=old
-
-# Logging modules to check that the string format arguments are in logging
-# function parameter format.
-logging-modules=logging
-
-
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,
-      XXX,
-      TODO
-
-# Regular expression of note tags to take in consideration.
-#notes-rgx=
-
-
-[SIMILARITIES]
-
-# Comments are removed from the similarity computation
-ignore-comments=yes
-
-# Docstrings are removed from the similarity computation
-ignore-docstrings=yes
-
-# Imports are removed from the similarity computation
-ignore-imports=no
-
-# Signatures are removed from the similarity computation
-ignore-signatures=no
-
-# Minimum lines number of a similarity.
-min-similarity-lines=4
-
-
-[SPELLING]
-
-# Limits count of emitted suggestions for spelling mistakes.
-max-spelling-suggestions=4
-
-# Spelling dictionary name. Available dictionaries: none. To make it work,
-# install the 'python-enchant' package.
-spelling-dict=
-
-# List of comma separated words that should be considered directives if they
-# appear and the beginning of a comment and should not be checked.
-spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
-
-# List of comma separated words that should not be checked.
-spelling-ignore-words=
-
-# A path to a file that contains the private dictionary; one word per line.
-spelling-private-dict-file=
-
-# Tells whether to store unknown words to the private dictionary (see the
-# --spelling-private-dict-file option) instead of raising a message.
-spelling-store-unknown-words=no
-
-
-[STRING]
-
-# This flag controls whether inconsistent-quotes generates a warning when the
-# character used as a quote delimiter is used inconsistently within a module.
-check-quote-consistency=no
-
-# This flag controls whether the implicit-str-concat should generate a warning
-# on implicit string concatenation in sequences defined over several lines.
-check-str-concat-over-line-jumps=no
-
-
-[TYPECHECK]
-
-# List of decorators that produce context managers, such as
-# contextlib.contextmanager. Add to this list to register other decorators that
-# produce valid context managers.
-contextmanager-decorators=contextlib.contextmanager
-
-# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E1101 when accessed. Python regular
-# expressions are accepted.
-generated-members=
-
-# Tells whether missing members accessed in mixin class should be ignored. A
-# mixin class is detected if its name ends with "mixin" (case insensitive).
-ignore-mixin-members=yes
-
-# Tells whether to warn about missing members when the owner of the attribute
-# is inferred to be None.
-ignore-none=yes
-
-# This flag controls whether pylint should warn about no-member and similar
-# checks whenever an opaque object is returned when inferring. The inference
-# can return multiple potential results while evaluating a Python object, but
-# some branches might not be evaluated, which results in partial inference. In
-# that case, it might be useful to still emit no-member and other checks for
-# the rest of the inferred objects.
-ignore-on-opaque-inference=yes
-
-# List of class names for which member attributes should not be checked (useful
-# for classes with dynamically set attributes). This supports the use of
-# qualified names.
-ignored-classes=optparse.Values,
-                thread._local,
-                _thread._local,
-                aidge.global_variables,
-                aidge.cells.abstract_cell.Trainable,
-                torch,
-                tensorflow,
-
-# List of module names for which member attributes should not be checked
-# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis). It
-# supports qualified module names, as well as Unix pattern matching.
-ignored-modules= aidge_core
-
-# Show a hint with possible names when a member name was not found. The aspect
-# of finding the hint is based on edit distance.
-missing-member-hint=yes
-
-# The minimum edit distance a name should have in order to be considered a
-# similar match for a missing member name.
-missing-member-hint-distance=1
-
-# The total number of similar names that should be taken in consideration when
-# showing a hint for a missing member.
-missing-member-max-choices=1
-
-# List of decorators that change the signature of a decorated function.
-signature-mutators=
-
-
-[VARIABLES]
-
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid defining new builtins when possible.
-additional-builtins=
-
-# Tells whether unused global variables should be treated as a violation.
-allow-global-unused-variables=yes
-
-# List of names allowed to shadow builtins
-allowed-redefined-builtins=
-
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,
-          _cb
-
-# A regular expression matching the name of dummy variables (i.e. expected to
-# not be used).
-dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
-
-# Argument names that match this expression will be ignored. Default to name
-# with leading underscore.
-ignored-argument-names=_.*|^ignored_|^unused_
-
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
-
-# List of qualified module names which can have objects that can redefine
-# builtins.
-redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
-
-
-[CLASSES]
-
-# Warn about protected attribute access inside special methods
-check-protected-access-in-special-methods=no
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,
-                      __new__,
-                      setUp,
-                      __post_init__
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,
-                  _fields,
-                  _replace,
-                  _source,
-                  _make
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=cls
-
-
-[DESIGN]
-
-# List of qualified class names to ignore when countint class parents (see
-# R0901)
-ignored-parents=
-
-# Maximum number of arguments for function / method.
-max-args=5
-
-# Maximum number of attributes for a class (see R0902).
-max-attributes=7
-
-# Maximum number of boolean expressions in an if statement (see R0916).
-max-bool-expr=5
-
-# Maximum number of branch for function / method body.
-max-branches=12
-
-# Maximum number of locals for function / method body.
-max-locals=15
-
-# Maximum number of parents for a class (see R0901).
-max-parents=7
-
-# Maximum number of public methods for a class (see R0904).
-max-public-methods=20
-
-# Maximum number of return / yield for function / method body.
-max-returns=6
-
-# Maximum number of statements in function / method body.
-max-statements=50
-
-# Minimum number of public methods for a class (see R0903).
-min-public-methods=2
-
-
-[IMPORTS]
-
-# List of modules that can be imported at any level, not just the top level
-# one.
-allow-any-import-level=
-
-# Allow wildcard imports from modules that define __all__.
-allow-wildcard-with-all=no
-
-# Analyse import fallback blocks. This can be used to support both Python 2 and
-# 3 compatible code, which means that the block might have code that exists
-# only in one or another interpreter, leading to false positives when analysed.
-analyse-fallback-blocks=no
-
-# Deprecated modules which should not be used, separated by a comma.
-deprecated-modules=
-
-# Output a graph (.gv or any supported image format) of external dependencies
-# to the given file (report RP0402 must not be disabled).
-ext-import-graph=
-
-# Output a graph (.gv or any supported image format) of all (i.e. internal and
-# external) dependencies to the given file (report RP0402 must not be
-# disabled).
-import-graph=
-
-# Output a graph (.gv or any supported image format) of internal dependencies
-# to the given file (report RP0402 must not be disabled).
-int-import-graph=
-
-# Force import order to recognize a module as part of the standard
-# compatibility libraries.
-known-standard-library=
-
-# Force import order to recognize a module as part of a third party library.
-known-third-party=enchant
-
-# Couples of modules and preferred modules, separated by a comma.
-preferred-modules=
-
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "BaseException, Exception".
-overgeneral-exceptions=BaseException,
-                       Exception
\ No newline at end of file
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ec6aacd723a50eba2bfed0184941410340c6a7aa..499c2971cb60f979e72419cf65b9897d0613bf0a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,21 +1,26 @@
-cmake_minimum_required(VERSION 3.15)
+cmake_minimum_required(VERSION 3.18)
+set(CXX_STANDARD 14)
 
-file(READ "${CMAKE_SOURCE_DIR}/version.txt" version)
-file(READ "${CMAKE_SOURCE_DIR}/project_name.txt" project)
+file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
 
-message(STATUS "Project name: ${project}")
+project(aidge_core
+        VERSION ${version}
+        DESCRIPTION "Core algorithms for operators and graph of the AIDGE framework" 
+        LANGUAGES CXX)
+message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
 message(STATUS "Project version: ${version}")
+add_definitions(-DPROJECT_VERSION="${version}")
 
-# Note : project name is {project} and python module name is also {project}
-set(module_name _${project}) # target name
-
+message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
+message(STATUS "Project version: ${version}")
 
-project(${project})
-set(CXX_STANDARD 14)
+# Note : project name is {project} and python module name is also {project}
+set(module_name _${CMAKE_PROJECT_NAME}) # target name
+set(pybind_module_name ${CMAKE_PROJECT_NAME}) # name of submodule for python bindings
 
 ##############################################
 # Define options
-option(PYBIND "python binding" ON)
+option(PYBIND "python binding" OFF)
 option(WERROR "Warning as error" OFF)
 option(TEST "Enable tests" ON)
 option(COVERAGE "Enable coverage" OFF)
@@ -24,7 +29,6 @@ option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memor
 ##############################################
 # Import utils CMakeLists
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
-include(PybindModuleCreation)
 
 if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
     Include(CodeCoverage)
@@ -34,10 +38,12 @@ endif()
 # Find system dependencies
 Include(FetchContent)
 
+set(FMT_VERSION 10.2.1)
+message(STATUS "Retrieving fmt ${FMT_VERSION} from git")
 FetchContent_Declare(
     fmt
     GIT_REPOSITORY https://github.com/fmtlib/fmt.git
-    GIT_TAG        10.2.1 # or a later release
+    GIT_TAG        ${FMT_VERSION} # or a later release
 )
 
 set(FMT_SYSTEM_HEADERS ON)
@@ -78,16 +84,27 @@ if( ${ENABLE_ASAN} )
 endif()
 
 # PYTHON BINDING
+set(AIDGE_REQUIRES_PYTHON FALSE) # Will be set if aidge_core lib depends upon python interpreter
+set(AIDGE_PYTHON_HAS_EMBED FALSE)  # Will be set if python interpreter is found on the system
 if (PYBIND)
-    generate_python_binding(${project} ${module_name})
+    # Python binding lib is by default installed in <prefix>/python_packages/<package>/
+    # When installed from python, setup.py should set it to the python package dir
+    set(PYBIND_INSTALL_PREFIX python_packages/${pybind_module_name} CACHE PATH "Python package install prefix")
 
-    # Handles Python + pybind11 headers dependencies
-    target_link_libraries(${module_name}
-        PUBLIC
-            pybind11::pybind11
-        PRIVATE
-            Python::Python
-        )
+    include(PybindModuleCreation)
+    generate_python_binding(${pybind_module_name} ${module_name})
+
+    ##
+    # As of now, when PYBIND is set, the core archive itself depends upon pybind/python,
+    # we define -DPYBIND and the dependencies on pybind/python runtime where necessary.
+
+    # Add -DPYBIND to compilation and interface
+    target_compile_definitions(${module_name} PUBLIC PYBIND)
+
+    # Add dependencies on pybind/python. See details in add_pybind_dependency()
+    include(PybindDependency)
+    add_pybind_dependency(${module_name})
+    ##
 endif()
 
 target_link_libraries(${module_name} PUBLIC Threads::Threads fmt::fmt)
@@ -133,11 +150,15 @@ endif()
 
 ##############################################
 # Installation instructions
+if(NOT $ENV{AIDGE_INSTALL} STREQUAL "")
+    set(CMAKE_INSTALL_PREFIX $ENV{AIDGE_INSTALL})
+    message(WARNING "CMAKE_INSTALL_PREFIX set to env variable AIDGE_INSTALL by default = ${CMAKE_INSTALL_PREFIX}")
+endif()
 
 include(GNUInstallDirs)
-set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${project})
+set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${CMAKE_PROJECT_NAME})
 
-install(TARGETS ${module_name} EXPORT ${project}-targets
+install(TARGETS ${module_name} EXPORT ${CMAKE_PROJECT_NAME}-targets
   LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
   ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
   RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
@@ -146,10 +167,16 @@ install(TARGETS ${module_name} EXPORT ${project}-targets
 
 install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
 
+if (PYBIND)
+    install(TARGETS ${pybind_module_name}
+        DESTINATION ${PYBIND_INSTALL_PREFIX}
+    )
+endif()
+
 #Export the targets to a script
 
-install(EXPORT ${project}-targets
- FILE "${project}-targets.cmake"
+install(EXPORT ${CMAKE_PROJECT_NAME}-targets
+ FILE "${CMAKE_PROJECT_NAME}-targets.cmake"
  DESTINATION ${INSTALL_CONFIGDIR}
 #  COMPONENT ${module_name}
 )
@@ -158,32 +185,37 @@ install(EXPORT ${project}-targets
 include(CMakePackageConfigHelpers)
 
 write_basic_package_version_file(
-    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
+    "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-config-version.cmake"
     VERSION ${version}
     COMPATIBILITY AnyNewerVersion
 )
 
-configure_package_config_file("${project}-config.cmake.in"
-    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
+configure_package_config_file("${CMAKE_PROJECT_NAME}-config.cmake.in"
+    "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-config.cmake"
     INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
 )
 
 #Install the config, configversion and custom find modules
 install(FILES
-    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config.cmake"
-    "${CMAKE_CURRENT_BINARY_DIR}/${project}-config-version.cmake"
+    "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-config.cmake"
+    "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-config-version.cmake"
     DESTINATION ${INSTALL_CONFIGDIR}
 )
 
 ##############################################
 ## Exporting from the build tree
-export(EXPORT ${project}-targets
-    FILE "${CMAKE_CURRENT_BINARY_DIR}/${project}-targets.cmake")
+message(STATUS "Exporting created targets to use them in another build")
+export(EXPORT ${CMAKE_PROJECT_NAME}-targets
+    FILE "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-targets.cmake")
 
 
 ##############################################
 ## Add test
 if(TEST)
-    enable_testing()
-    add_subdirectory(unit_tests)
+    if (AIDGE_REQUIRES_PYTHON AND NOT AIDGE_PYTHON_HAS_EMBED)
+        message(WARNING "Skipping compilation of tests: missing Python embedded interpreter")
+    else()
+        enable_testing()
+        add_subdirectory(unit_tests)
+    endif()
 endif()
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..ae5b7c7c2e07eef97ef72bdb79cca94f8124981b
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,9 @@
+include README.md LICENCE
+recursive-include aidge_core *.py 
+recursive-exclude aidge_core/unit_tests *.py
+
+recursive-include aidge_core/aidge_export_aidge *
+recursive-include include *.hpp
+recursive-include src *.cpp
+recursive-include python_binding *.cpp
+include CMakeLists.txt
diff --git a/README.md b/README.md
index 5b07e147cb05c2fa1a6d275d567dda218b131996..fe8fd5a4252054c730be8e948d0d2e415c009d47 100644
--- a/README.md
+++ b/README.md
@@ -4,21 +4,66 @@
 
 You can find here the C++ code of the Core library of Aidge.
 
-## Pip installation
-
+[TOC]
 
+## Pip installation
 
 To install aidge_core using pip, run the following command in your python environnement :
 ``` bash
 pip install . -v
 ```
+> **TIPS :** Use environment variables to change compilation options:
+> - `AIDGE_INSTALL` : to set the installation folder. Defaults to `<python_prefix>/lib/libAidge`
+> - `AIDGE_PYTHON_BUILD_TYPE` : to set the compilation mode to **Debug** or **Release** or "" (for default flags). Defaults to **Release**.
+> - `AIDGE_BUILD_GEN` : to set the build backend (for development mode) or "" for the cmake default. Default to "".
+> - `AIDGE_BUILD_TEST` : to build the C++ unit tests. Set to "ON" or "OFF". Default to "OFF".
+
+
+## Pip installation for development
+
+To setup aidge_core using pip in development (or editable mode), use the `--no-build-isolation -e` options to pip.
 
-**Note:** you can specify a custom install folder by setting an environment variable:
+For instance run the following command in your python environnement for a typical setup :
+``` bash
+export AIDGE_BUILD_TEST=ON              # enable C++ unit tests
+export AIDGE_PYTHON_BUILD_TYPE=         # default flags (no debug info but fastest build time)
+export AIDGE_PYTHON_BUILD_TYPE=Debug    # or if one really need to debug the C++ code
+pip install -U pip setuptools setuptools_scm[toml] cmake   # Pre-install build requirements (refer to the pyproject.toml [build-system] section)
+pip install -v --no-build-isolation -e .
+```
+
+In this configuration python files can be modified directly without re-installation.
+
+The C++ build dir will be created in `build/` and recompilation and install of python bindings can be done directly with:
+```bash
+make -C build install -j $(nproc)
+# or with cmake
+cmake --build build -j $(nproc) && cmake --install build
+```
 
+One can also use an alternate cmake build backend such as ninja which can be installed easily though pip, for instance :
 ``` bash
-export AIDGE_INSTALL='<path_to_aidge>/install'
+pip install -U ninja
+export AIDGE_BUILD_GEN=Ninja
+pip install -v --no-build-isolation -e .
+```
+
+In this case ninja is used instead of make as build backend, and recompilation when needed is done with:
+```bash
+ninja -C build install  # note that by default ninja use available parallelism, no need for -j option
+# or with cmake
+cmake --build build && cmake --install build
 ```
 
+Note that python development (or editable mode) is not always robust to changes in the python package setup,
+or when changing the build backend with `AIDGE_BUILD_GEN`.
+In order to re-install when the build breaks, re-execute the commands:
+```bash
+rm -rf *-egg-info build/
+pip install -v --no-build-isolation -e .
+```
+
+
 ## Standard C++ Compilation
 
 Create two directories ``build`` and ``ìnstall``.
@@ -40,22 +85,22 @@ make all install
 |   Option   | Value type | Description |
 |:----------:|:----------:|:-----------:|
 | *-DCMAKE_INSTALL_PREFIX:PATH* | ``str``  | Path to the install folder |
-| *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimisations, default= ``Release`` |
+| *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimisations or "" (empty) , default= ``Release`` |
 | *-DWERROR*                    | ``bool`` | If ``ON`` show warning as error during compilation phase, default=``OFF`` |
-| *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``ON`` |
+| *-DTEST*                      | ``bool`` | If ``ON`` build C++ unit tests, default=``ON`` |
+| *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``OFF`` |
+| *-DPYBIND_INSTALL_PREFIX:PATH*| ``str`` | Path to the python module install folder when ``-DPYBIND=ON``, defaults to ``$CMAKE_INSTALL_PREFIX/python_packages/<module>`` |
 
-If you have compiled with PyBind you can find at the root of the ``build`` file the python lib ``aidge_core.cpython*.so``
+If one compiles with ``-DPYBIND=ON``, ``-DPYBIND_INSTALL_PREFIX:PATH`` can be used to install the python module directly in the
+python sources tree (for instance ``$PWD/aidge_core``). ``setup.py`` takes care of this and installs the module at the right place.
 
 ## Run tests
-
 ### CPP
 
 Inside of the build file run:
 
 ```bash
-
 ctest --output-on-failure
-
 ```
 
 ### Python
diff --git a/aidge_core-config.cmake.in b/aidge_core-config.cmake.in
index d97afe8a2a1ca98eb862d66c388081bca7b72edc..abe55b6faef64aa61d4df4076c035ac0c5f998b4 100644
--- a/aidge_core-config.cmake.in
+++ b/aidge_core-config.cmake.in
@@ -3,6 +3,11 @@
 include(CMakeFindDependencyMacro)
 find_dependency(fmt)
 find_dependency(Threads)
+set(AIDGE_REQUIRES_PYTHON @AIDGE_REQUIRES_PYTHON@)
+set(AIDGE_PYTHON_HAS_EMBED @AIDGE_PYTHON_HAS_EMBED@)
+if (AIDGE_REQUIRES_PYTHON AND AIDGE_PYTHON_HAS_EMBED)
+    find_dependency(Python COMPONENTS Interpreter Development)
+endif()
 
 include(${CMAKE_CURRENT_LIST_DIR}/aidge_core-config-version.cmake)
 
diff --git a/aidge_core/__init__.py b/aidge_core/__init__.py
index 4b5c448355a17fd4274ba45f5cd98afa70b1ae53..652f485a9d3de6869b55613549172d49913e8509 100644
--- a/aidge_core/__init__.py
+++ b/aidge_core/__init__.py
@@ -7,6 +7,8 @@ http://www.eclipse.org/legal/epl-2.0.
 
 SPDX-License-Identifier: EPL-2.0
 """
-from aidge_core.aidge_core import * # import so generated by PyBind
-from aidge_core.export import ExportNode, generate_file, generate_str
-import aidge_core.utils
+from .aidge_core import * # import so generated by PyBind
+from .export_utils import ExportNode, generate_file, generate_str
+from .aidge_export_aidge import *
+from . import utils
+from ._version import *
diff --git a/aidge_core/aidge_export_aidge/__init__.py b/aidge_core/aidge_export_aidge/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5d6f96b3300c0e86147baac30d7cae8a3a0b798
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/__init__.py
@@ -0,0 +1,8 @@
+from pathlib import Path
+
+# Constants
+FILE = Path(__file__).resolve()
+ROOT_EXPORT = FILE.parents[0]
+
+from .operator_export import *
+from .export import export
diff --git a/aidge_core/aidge_export_aidge/export.py b/aidge_core/aidge_export_aidge/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e859b71e77bb03b95acb6ca75dcf09a8af0722
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/export.py
@@ -0,0 +1,104 @@
+import aidge_core
+import shutil
+import os
+from pathlib import Path
+from .utils import supported_operators, OPERATORS_REGISTRY
+from . import ROOT_EXPORT
+
+
+from aidge_core import ExportNode, generate_file
+
+
+
+def export(export_folder: str,
+           graph_view: aidge_core.GraphView,
+           enable_python_binding: bool = True,
+           ):
+    export_folder_path = Path(export_folder)
+    export_name = export_folder_path.name
+
+    ### Creating export folder ###
+    # Create export directory
+    os.makedirs(export_folder, exist_ok=True)
+
+    ### Cpy static files ###
+    shutil.copytree(ROOT_EXPORT / "static/include",
+                    export_folder_path / "include", dirs_exist_ok=True)
+    shutil.copytree(ROOT_EXPORT / "static/cmake",
+                    export_folder_path / "cmake", dirs_exist_ok=True)
+    shutil.copyfile(ROOT_EXPORT / "static/CMakeLists.txt",
+                    export_folder_path / "CMakeLists.txt")
+    shutil.copyfile(ROOT_EXPORT / "static/version.txt",
+                    export_folder_path / "version.txt")
+    shutil.copyfile(ROOT_EXPORT / "static/README.md",
+                    export_folder_path / "README.md")
+    shutil.copyfile(ROOT_EXPORT / "static/main.cpp",
+                    export_folder_path / "main.cpp")
+    shutil.copyfile(ROOT_EXPORT / "static/export-config.cmake.in",
+                    export_folder_path / f"{export_name}-config.cmake.in")
+
+    # Create project_name file
+    with open(export_folder_path / "project_name.txt", "w") as f:
+        f.write(export_name)
+
+    # Add files related to python binding if
+    if enable_python_binding:
+        os.makedirs(export_folder_path / "python_binding", exist_ok=True)
+        generate_file(
+            export_folder_path / "python_binding/pybind.cpp",
+            ROOT_EXPORT / "templates/pybind.jinja",
+            name=export_name,
+        )
+        # TODO: Add a main.py file ?
+
+    ### Generating an export for each nodes and dnn file ###
+    list_configs = []  # List of headers to include in dnn.cpp to access attribute and parameters
+    list_actions = []  # List of string to construct graph
+    set_operator = set()
+    # Queue of Aidge nodes to explore, guarantee a topological exploration of the graph
+    open_nodes = list(graph_view.get_input_nodes())
+    # List of Aidge nodes already explored
+    closed_nodes = []
+
+    while open_nodes:
+        node = open_nodes.pop(0)
+        if node in closed_nodes:
+            continue  # Node already converted, moving on ...
+        parents_not_converted = False
+        # Check all parents have been converted
+        for parent in node.get_parents():
+            if parent is not None and \
+                    parent not in closed_nodes:
+                # If parents have not been converted, push back current node
+                if not parents_not_converted:
+                    open_nodes.insert(0, node)
+                    parents_not_converted = True
+                # Add to the stack the not converted parent as next node to convert
+                open_nodes.insert(0, parent)
+        if parents_not_converted:
+
+            continue
+        # Next nodes to treat are children of current node
+        open_nodes += list(node.get_children())
+
+        if node.type() in supported_operators():
+            set_operator.add(node.type())
+            op = OPERATORS_REGISTRY[node.type()](node)
+
+            # TODO: list_configs and list_actions don't need to be passed by argument
+            # Export the configuration
+            list_configs = op.export(export_folder_path, list_configs)
+
+            # Add forward kernel
+            list_actions = op.forward(list_actions)
+        else:
+            raise RuntimeError(f"Operator: {node.type()} is not supported")
+        closed_nodes.append(node)
+    # Generate full dnn.cpp
+    aidge_core.generate_file(
+        export_folder_path / "src/dnn.cpp",
+        ROOT_EXPORT / "templates/dnn.jinja",
+        headers=list_configs,
+        operators=set_operator,
+        actions=list_actions,
+    )
diff --git a/aidge_core/aidge_export_aidge/operator_export/__init__.py b/aidge_core/aidge_export_aidge/operator_export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..37d674ac84f72d643ba1a628a86fbcde9780f4a4
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/__init__.py
@@ -0,0 +1,14 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+from pathlib import Path
+
+DIR_PATH = Path(__file__).parent
+modules = [Path(module).stem for module in DIR_PATH.glob("*.py")]
+__all__ = [ f for f in modules if f != "__init__"]
diff --git a/aidge_core/aidge_export_aidge/operator_export/conv.py b/aidge_core/aidge_export_aidge/operator_export/conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb7092fb18982a3cc3f11a1ca47394ce2f77d0b6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/conv.py
@@ -0,0 +1,31 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("Conv")
+class Conv(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/conv.jinja",
+            name=self.name,
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT /"templates/graph_ctor/conv.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/fc.py b/aidge_core/aidge_export_aidge/operator_export/fc.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcd528528707dc6eec917790b46e509c2984fa66
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/fc.py
@@ -0,0 +1,37 @@
+from aidge_core.aidge_export_aidge.utils import operator_register,parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("FC")
+class FC(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+
+    def export(self, export_folder:Path, list_configs:list):
+
+
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/fc.jinja",
+            name=self.name,
+            InChannels=self.inputs_dims[1][1],
+            OutChannels=self.operator.out_channels(),
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/fc.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/maxpooling.py b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c63e71b423b90f62536cafd25c61101e76e0562
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/maxpooling.py
@@ -0,0 +1,32 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import ExportNode, generate_file, generate_str
+from pathlib import Path
+
+@operator_register("MaxPooling")
+class MaxPooling(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+
+    def export(self, export_folder:Path, list_configs:list):
+        include_path = f"attributes/{self.name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/attributes/maxpooling.jinja",
+            name=self.name,
+            **self.attributes
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/maxpooling.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/producer.py b/aidge_core/aidge_export_aidge/operator_export/producer.py
new file mode 100644
index 0000000000000000000000000000000000000000..d082e9726b7ca33fbe6f4692bf7b55930b69cb9d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/producer.py
@@ -0,0 +1,65 @@
+from aidge_core.aidge_export_aidge.utils import operator_register
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from aidge_core import dtype, ExportNode, generate_file, generate_str
+import numpy as np
+from pathlib import Path
+
+# Convert aidge datatype to C++ type
+datatype_converter = {
+    dtype.float64 : "double",
+    dtype.float32 : "float",
+    dtype.float16 : "half_float::half",
+    dtype.int8    : "int8_t",
+    dtype.int16   : "int16_t",
+    dtype.int32   : "int32_t",
+    dtype.int64   : "int64_t",
+    dtype.uint8   : "uint8_t",
+    dtype.uint16  : "uint16_t",
+    dtype.uint32  : "uint32_t",
+    dtype.uint64  : "uint64_t"
+}
+
+
+@operator_register("Producer")
+class Producer(ExportNode):
+    """
+    If there is a standardization of the export operators
+    then this class should be just a inheritance of ProducerCPP
+    """
+    def __init__(self, node):
+        super().__init__(node)
+        child, in_idx = self.node.output(0)[0]
+        self.tensor_name = f"{child.name()}_{in_idx}"
+        self.values = np.array(self.operator.get_output(0))
+
+    def export(self, export_folder:Path, list_configs:list):
+        assert(len(self.node.output(0)) == 1)
+
+        include_path = f"parameters/{self.tensor_name}.hpp"
+        filepath = export_folder / f"include/{include_path}"
+
+        aidge_tensor = self.operator.get_output(0)
+        aidge_type = aidge_tensor.dtype()
+        if aidge_type in datatype_converter:
+            datatype = datatype_converter[aidge_type]
+        else:
+            raise RuntimeError(f"No conversion found for data type {aidge_type}.")
+        generate_file(
+            filepath,
+            ROOT_EXPORT / "templates/parameter.jinja",
+            dims = aidge_tensor.dims(),
+            data_t = datatype, # TODO : get data from producer
+            name = self.tensor_name,
+            values = str(aidge_tensor)
+        )
+        list_configs.append(include_path)
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/producer.jinja",
+            name=self.name,
+            tensor_name=self.tensor_name,
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/relu.py b/aidge_core/aidge_export_aidge/operator_export/relu.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0f4f6afdc35737a8967f51c1859bda0c9773f88
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/relu.py
@@ -0,0 +1,21 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from pathlib import Path
+
+@operator_register("ReLU")
+class ReLU(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/relu.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/operator_export/sub.py b/aidge_core/aidge_export_aidge/operator_export/sub.py
new file mode 100644
index 0000000000000000000000000000000000000000..efcdd0924fbcf6944b0fb95a967e1a3e16ccc3c5
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/operator_export/sub.py
@@ -0,0 +1,21 @@
+from aidge_core.aidge_export_aidge.utils import operator_register, parse_node_input
+from aidge_core import ExportNode, generate_str
+from aidge_core.aidge_export_aidge import ROOT_EXPORT
+from pathlib import Path
+
+@operator_register("Sub")
+class Sub(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+    def export(self, export_folder:Path, list_configs:list):
+        return list_configs
+
+    def forward(self, list_actions:list):
+        list_actions.append(generate_str(
+            ROOT_EXPORT / "templates/graph_ctor/sub.jinja",
+            name=self.name,
+            inputs=parse_node_input(self.node.inputs()),
+            **self.attributes
+        ))
+        return list_actions
diff --git a/aidge_core/aidge_export_aidge/static/CMakeLists.txt b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d7fe26d9c286f72d898a21d07baae2c91d08b71a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/CMakeLists.txt
@@ -0,0 +1,155 @@
+cmake_minimum_required(VERSION 3.18)
+set(CXX_STANDARD 14)
+
+file(STRINGS "${CMAKE_SOURCE_DIR}/project_name.txt" project_name)
+file(STRINGS "${CMAKE_SOURCE_DIR}/version.txt" version)
+
+project(${project_name}
+        VERSION ${version}
+        DESCRIPTION "Export of aidge"
+        LANGUAGES CXX)
+
+message(STATUS "Project name: ${CMAKE_PROJECT_NAME}")
+message(STATUS "Project version: ${version}")
+
+# Note : project name is ${CMAKE_PROJECT_NAME} and python module name is also ${CMAKE_PROJECT_NAME}
+set(module_name _${CMAKE_PROJECT_NAME}) # target name
+
+##############################################
+# Define options
+option(PYBIND "python binding" ON)
+option(STANDALONE "Build standalone executable" ON)
+option(WERROR "Warning as error" OFF)
+option(TEST "Enable tests" OFF)
+option(COVERAGE "Enable coverage" OFF)
+option(ENABLE_ASAN "Enable ASan (AddressSanitizer) for runtime analysis of memory use (over/underflow, memory leak, ...)" OFF)
+
+##############################################
+# Import utils CMakeLists
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake")
+
+if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
+    Include(CodeCoverage)
+endif()
+
+##############################################
+# FIND Dependencies
+if(NOT $ENV{AIDGE_INSTALL} STREQUAL "")
+    set(CMAKE_INSTALL_PREFIX $ENV{AIDGE_INSTALL})
+    list(APPEND CMAKE_PREFIX_PATH $ENV{AIDGE_INSTALL})
+    message(WARNING "Env var AIDGE_INSTALL detected : $ENV{AIDGE_INSTALL}. Set CMAKE_INSTALL_PREFIX to AIDGE_INSTALL & added to CMAKE_PREFIX_PATH"
+                    "\n\tCMAKE_INSTALL_PREFIX = ${CMAKE_INSTALL_PREFIX}"
+                    "\n\tCMAKE_PREFIX_PATH = ${CMAKE_PREFIX_PATH}")
+endif()
+find_package(aidge_core REQUIRED)
+# find_package(aidge_backend_cpu REQUIRED) # example if you want to add aidge_backend_cpu as dependency to your export
+
+##############################################
+# Create target and set properties
+file(GLOB_RECURSE src_files "src/*.cpp")
+file(GLOB_RECURSE inc_files "include/*.hpp")
+
+add_library(${module_name} ${src_files} ${inc_files})
+
+target_link_libraries(${module_name}
+    PUBLIC
+        _aidge_core # _ is added because we link the exported target and not the project
+        # _aidge_backend_cpu # example if you want to add aidge_backend_cpu as dependency to your export
+)
+
+#Set target properties
+set_property(TARGET ${module_name} PROPERTY POSITION_INDEPENDENT_CODE ON)
+
+# PYTHON BINDING
+if (PYBIND)
+    include(PybindModuleCreation)
+    generate_python_binding(${CMAKE_PROJECT_NAME} ${module_name})
+endif()
+
+if( ${ENABLE_ASAN} )
+    message("Building ${module_name} with ASAN.")
+    set(SANITIZE_FLAGS -fsanitize=address -fno-omit-frame-pointer)
+    target_link_libraries(${module_name}
+        PUBLIC
+            -fsanitize=address
+    )
+    target_compile_options(${module_name}
+        PRIVATE
+            ${SANITIZE_FLAGS}
+    )
+endif()
+
+target_include_directories(${module_name}
+    PUBLIC
+        $<INSTALL_INTERFACE:include>
+        $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+    PRIVATE
+        ${CMAKE_CURRENT_SOURCE_DIR}/src
+)
+
+target_compile_features(${module_name} PRIVATE cxx_std_14)
+
+target_compile_options(${module_name} PRIVATE
+    $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
+    -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
+target_compile_options(${module_name} PRIVATE
+    $<$<CXX_COMPILER_ID:MSVC>:
+    /W4>)
+
+if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
+    append_coverage_compiler_flags()
+endif()
+
+##############################################
+# Installation instructions
+include(GNUInstallDirs)
+set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/${CMAKE_PROJECT_NAME})
+
+install(TARGETS ${module_name} EXPORT ${CMAKE_PROJECT_NAME}-targets
+  LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+  ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+  RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+)
+install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
+
+#Export the targets to a script
+install(EXPORT ${CMAKE_PROJECT_NAME}-targets
+ FILE "${CMAKE_PROJECT_NAME}-targets.cmake"
+ DESTINATION ${INSTALL_CONFIGDIR}
+ COMPONENT ${module_name}
+)
+
+#Create a ConfigVersion.cmake file
+include(CMakePackageConfigHelpers)
+write_basic_package_version_file(
+    "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-config-version.cmake"
+    VERSION ${version}
+    COMPATIBILITY AnyNewerVersion
+)
+
+configure_package_config_file("${CMAKE_PROJECT_NAME}-config.cmake.in"
+    "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-config.cmake"
+    INSTALL_DESTINATION ${INSTALL_CONFIGDIR}
+)
+
+#Install the config, configversion and custom find modules
+install(FILES
+    "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-config.cmake"
+    "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-config-version.cmake"
+    DESTINATION ${INSTALL_CONFIGDIR}
+)
+
+##############################################
+## Exporting from the build tree
+message(STATUS "Exporting created targets to use them in another build")
+export(EXPORT ${CMAKE_PROJECT_NAME}-targets
+    FILE "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_PROJECT_NAME}-targets.cmake")
+
+if(STANDALONE)
+    if(AIDGE_REQUIRES_PYTHON AND NOT AIDGE_PYTHON_HAS_EMBED)
+        message(WARNING "Skipping compilation of standalone executable: missing Python embedded interpreter")
+    else()
+        add_executable(main main.cpp)
+        target_link_libraries(main PRIVATE ${module_name})
+    endif()
+endif()
diff --git a/aidge_core/aidge_export_aidge/static/README.md b/aidge_core/aidge_export_aidge/static/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1ce48d5274cbc2f007bde7c7be01e41e617cb19a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/README.md
@@ -0,0 +1,5 @@
+To compile:
+
+> mkdir build && cd build
+> cmake -DCMAKE_INSTALL_PREFIX:PATH=/data1/is156025/cm264821/anaconda3/envs/aidge_demo/lib/libAidge ..
+> make all install
diff --git a/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..217a48351def531cf7da39c9e78e0627fdba87f4
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/cmake/PybindModuleCreation.cmake
@@ -0,0 +1,22 @@
+function(generate_python_binding name target_to_bind)
+
+    find_package(Python COMPONENTS Interpreter Development.Module)
+
+    Include(FetchContent)
+    FetchContent_Declare(
+    PyBind11
+    GIT_REPOSITORY https://github.com/pybind/pybind11.git
+    GIT_TAG        v2.10.4 # or a later release
+    )
+    FetchContent_MakeAvailable(PyBind11)
+
+    message(STATUS "Creating binding for module ${name}")
+    file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
+
+    pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
+    target_include_directories(${name} PRIVATE "python_binding")
+
+    # Link target library to bind
+    target_link_libraries(${name} PRIVATE ${target_to_bind})
+
+endfunction()
diff --git a/aidge_core/aidge_export_aidge/static/export-config.cmake.in b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
new file mode 100644
index 0000000000000000000000000000000000000000..f0be5e076dbdfef359fc00fd41c25c0bba815839
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/export-config.cmake.in
@@ -0,0 +1,8 @@
+@PACKAGE_INIT@
+
+include(CMakeFindDependencyMacro)
+find_dependency(aidge_core)
+
+include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-config-version.cmake)
+
+include(${CMAKE_CURRENT_LIST_DIR}/aidge_backend_cpu-targets.cmake)
diff --git a/aidge_core/aidge_export_aidge/static/include/dnn.hpp b/aidge_core/aidge_export_aidge/static/include/dnn.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3a4d5c02eceee1054c93b8ad635a71d3d04ac2fc
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/include/dnn.hpp
@@ -0,0 +1,17 @@
+#ifndef DNN_HPP
+#define DNN_HPP
+#include <aidge/graph/GraphView.hpp>
+/**
+ * @brief This file contains all of what is related to the construction of the
+ * neural network
+ *
+ */
+
+/**
+ * @brief This function generate the exported Aidge::GraphView.
+ *
+ * @return std::shared_ptr<Aidge::GraphView>
+ */
+std::shared_ptr<Aidge::GraphView> generateModel();
+
+#endif /* DNN_HPP */
diff --git a/aidge_core/aidge_export_aidge/static/main.cpp b/aidge_core/aidge_export_aidge/static/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..61bc3ebeb915be12570c6300965e3b64ac2870dd
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/main.cpp
@@ -0,0 +1,20 @@
+#include <iostream>
+#include <aidge/backend/cpu.hpp>
+
+/* Register default cpu Tensor implementation */
+#include <aidge/backend/cpu/data/TensorImpl.hpp>
+
+/* Include model generator */
+#include "include/dnn.hpp"
+
+int main()
+{
+
+    std::cout << "BEGIN" << std::endl;
+
+    std::shared_ptr<Aidge::GraphView> graph = generateModel();
+
+    std::cout << "END" << std::endl;
+
+    return 0;
+}
diff --git a/aidge_core/aidge_export_aidge/static/project_name.txt b/aidge_core/aidge_export_aidge/static/project_name.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d5637593fe045bb602bd181bf0f242f0943fb9fd
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/project_name.txt
@@ -0,0 +1 @@
+export
diff --git a/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp b/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..072fc4cd6012996a4fcda1d18b8244209c69797a
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/python_binding/pybind.cpp
@@ -0,0 +1,14 @@
+#include <pybind11/pybind11.h>
+
+#include "dnn.hpp"
+
+namespace py = pybind11;
+
+
+void init_export(py::module& m){
+    m.def("generate_model", generateModel);
+}
+
+PYBIND11_MODULE(export, m) {
+    init_export(m);
+}
diff --git a/aidge_core/aidge_export_aidge/static/version.txt b/aidge_core/aidge_export_aidge/static/version.txt
new file mode 100644
index 0000000000000000000000000000000000000000..77d6f4ca23711533e724789a0a0045eab28c5ea6
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/static/version.txt
@@ -0,0 +1 @@
+0.0.0
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..48d07e8db8d5fb116148e9d41100fffa01fcf622
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/conv.jinja
@@ -0,0 +1,17 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
+#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+
+{% for i in range(KernelDims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{%- endfor %}
+{% for i in range(StrideDims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{%- endfor %}
+{% for i in range(DilationDims|length) %}
+#define _{{name|upper}}_DILATION_{{i}} {{DilationDims[i]}}
+{%- endfor %}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..e292f9b611978877c47b15e91f926f30d27a1cc5
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/fc.jinja
@@ -0,0 +1,7 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+#define _{{name|upper}}_IN_CHANNELS  {{InChannels}}
+#define _{{name|upper}}_OUT_CHANNELS {{OutChannels}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d258f580e6ff9c523a87b834fdccf2f3b14fb133
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/attributes/maxpooling.jinja
@@ -0,0 +1,13 @@
+#ifndef EXPORT_ATTRIBUTES_{{name|upper}}_H
+#define EXPORT_ATTRIBUTES_{{name|upper}}_H
+
+{% for i in range(KernelDims|length) %}
+#define _{{name|upper}}_KERNEL_{{i}} {{KernelDims[i]}}
+{%- endfor %}
+{% for i in range(StrideDims|length) %}
+#define _{{name|upper}}_STRIDE_{{i}} {{StrideDims[i]}}
+{%- endfor %}
+
+#define _{{name|upper}}_CEIL_MODE {{CeilMode|int}}
+
+#endif /* EXPORT_ATTRIBUTES_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/dnn.jinja b/aidge_core/aidge_export_aidge/templates/dnn.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..5da46b2d8a439a359dfb1c7ec8ebc18e8d516767
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/dnn.jinja
@@ -0,0 +1,36 @@
+/********************************************************************************
+ * This file has been generated by the Aidge export.
+ ********************************************************************************/
+
+/*** STD INCLUDES ***/
+#include <memory>  // std::shared_ptr
+
+/*** AIDGE INCLUDES ***/
+#include <aidge/graph/GraphView.hpp>  // Aidge::GraphView
+#include <aidge/graph/Node.hpp>       // Aidge::Node
+#include <aidge/graph/OpArgs.hpp>     // Aidge::Sequential
+
+/*** AIDGE OPERATORS ***/
+{%- for operator in operators %}
+#include <aidge/operator/{{operator}}.hpp>
+{%- endfor %}
+
+/*** OPERATOR ATTRIBUTES & PARAMETERS ***/
+{%- for header in headers %}
+#include "{{ header }}"
+{%- endfor %}
+
+/*** HEADER ***/
+#include "dnn.hpp"
+
+
+std::shared_ptr<Aidge::GraphView> generateModel() {
+    /*** BUILDING GRAPH ***/
+    std::shared_ptr<Aidge::GraphView> graph = std::make_shared<Aidge::GraphView>();
+
+    {%- for action in actions %}
+    {{ action }}
+    {%- endfor %}
+
+    return graph;
+}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8e841ea2a10c71b884736dcbd7cfd03b52c5ad4f
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/_set_input.jinja
@@ -0,0 +1,7 @@
+{# NOTE: Trying a shorter notation like {%- for input in inputs if input[0] %}
+will mess up loop.index as the input set up at None will not increment ! #}
+{%- for input in inputs %}
+{%- if input[0] %}
+{{input[0]}}->addChild({{name}}, {{input[1]}}, {{loop.index - 1}}); {# NOTE: loop.index begin at 1 #}
+{%- endif %}
+{%- endfor %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..a805f8065e87244bf0546ca42d294b86f144a26d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/conv.jinja
@@ -0,0 +1,26 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Conv(
+            _{{name|upper}}_IN_CHANNELS,
+            _{{name|upper}}_OUT_CHANNELS,
+            {
+            {%- for i in range(KernelDims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(StrideDims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            {
+            {%- for i in range(DilationDims|length) -%}
+                _{{name|upper}}_DILATION_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            }
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..df6dbc83492174fc49348b8073deb47a5deca313
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/fc.jinja
@@ -0,0 +1,11 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::FC(
+            _{{name|upper}}_IN_CHANNELS,
+            _{{name|upper}}_OUT_CHANNELS,
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..c6587c128509712e1a8e903e7484476548e9347d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/maxpooling.jinja
@@ -0,0 +1,20 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::MaxPooling(
+            {
+            {%- for i in range(KernelDims|length) -%}
+                _{{name|upper}}_KERNEL_{{i}}{%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            "{{name}}",
+            {
+            {%- for i in range(StrideDims|length) -%}
+                _{{name|upper}}_STRIDE_{{i}} {%- if not loop.last %}, {% endif -%}
+            {%- endfor -%}
+            },
+            _{{name|upper}}_CEIL_MODE
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8e0a465a044ebfcc249206f9f5886c7a38fc3252
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/producer.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Producer(
+            {{tensor_name}},
+            "{{name}}"
+        );
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..8fd58a30bddd39647dd3b25b2982e67174220381
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/relu.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::ReLU(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja b/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..d9417751e4117cb296ec874d946b93c2c64df614
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/graph_ctor/sub.jinja
@@ -0,0 +1,9 @@
+{% filter indent(width=4, first=False) %}
+/*** {{name|upper}} ***/
+std::shared_ptr<Aidge::Node> {{name}} =
+        Aidge::Sub(
+            "{{name}}"
+        );
+{% include "./_set_input.jinja" %}
+graph->add({{name}});
+{% endfilter %}
diff --git a/aidge_core/aidge_export_aidge/templates/parameter.jinja b/aidge_core/aidge_export_aidge/templates/parameter.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..11a407cc89f72f24167871a594decc6d90ab489d
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/parameter.jinja
@@ -0,0 +1,11 @@
+#ifndef EXPORT_PARAMETERS_{{name|upper}}_H
+#define EXPORT_PARAMETERS_{{name|upper}}_H
+
+#include <aidge/data/Tensor.hpp>
+#include <memory>
+
+std::shared_ptr<Aidge::Tensor> {{name}} = std::make_shared<Aidge::Tensor>(Aidge::Array{{dims|length}}D<{{data_t}}, {{ dims|join(", ") }}> {
+{{ values }}
+});
+
+#endif /* EXPORT_PARAMETERS_{{name|upper}}_H */
diff --git a/aidge_core/aidge_export_aidge/templates/pybind.jinja b/aidge_core/aidge_export_aidge/templates/pybind.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..9b48cc97f242813cc33b77bfc028e85c08b0cec7
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/templates/pybind.jinja
@@ -0,0 +1,14 @@
+#include <pybind11/pybind11.h>
+
+#include "dnn.hpp"
+
+namespace py = pybind11;
+
+
+void init_{{name}}(py::module& m){
+    m.def("generate_model", generateModel);
+}
+
+PYBIND11_MODULE({{name}}, m) {
+    init_{{name}}(m);
+}
diff --git a/aidge_core/aidge_export_aidge/utils/__init__.py b/aidge_core/aidge_export_aidge/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecdf2aec2692a48e108d5f4ad05ed05803319525
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/utils/__init__.py
@@ -0,0 +1,11 @@
+from .operator_registry import *
+
+def parse_node_input(node_inputs: list) -> list:
+    """Parse node intputs in order to adapt the list for Jinja.
+
+    :param node_inputs: return of node.inputs()
+    :type node_inputs: list of tuple of aidge_core.Node, output idx.
+    :return: list of tuple of node name, output idx.
+    :rtype: list
+    """
+    return [None if parent_node is None else (parent_node.name(), outId) for parent_node, outId in node_inputs]
diff --git a/aidge_core/aidge_export_aidge/utils/operator_registry.py b/aidge_core/aidge_export_aidge/utils/operator_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd6fbaaceeba9c2125b38354eca9cc116acd29b1
--- /dev/null
+++ b/aidge_core/aidge_export_aidge/utils/operator_registry.py
@@ -0,0 +1,18 @@
+OPERATORS_REGISTRY = {}
+
+def operator_register(*args):
+
+    key_list = [arg for arg in args]
+
+    def decorator(operator):
+        def wrapper(*args, **kwargs):
+            return operator(*args, **kwargs)
+
+        for key in key_list:
+            OPERATORS_REGISTRY[key] = operator
+
+        return wrapper
+    return decorator
+
+def supported_operators():
+    return list(OPERATORS_REGISTRY.keys())
diff --git a/aidge_core/export/code_generation.py b/aidge_core/export/code_generation.py
deleted file mode 100644
index b18b5476f8e083bcbe3d4f6c4a57132ebe7b780f..0000000000000000000000000000000000000000
--- a/aidge_core/export/code_generation.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-from jinja2 import Environment, FileSystemLoader
-
-
-def generate_file(file_path: str, template_path: str, **kwargs) -> None:
-    """Generate a file at `file_path` using the jinja template located at `file_path`.
-
-    kwargs are used to fill the template.
-
-    :param file_path: path where to generate the file
-    :type file_path: str
-    :param template_path: Path to the template to use for code generation
-    :type template_path: str
-    """
-    # Get directory name of the file
-    dirname = os.path.dirname(file_path)
-
-    # If directory doesn't exist, create it
-    if not os.path.exists(dirname):
-        os.makedirs(dirname)
-
-    # Get directory name and name of the template
-    template_dir = os.path.dirname(template_path)
-    template_name = os.path.basename(template_path)
-
-    # Select template
-    template = Environment(loader=FileSystemLoader(
-        template_dir)).get_template(template_name)
-
-    # Generate file
-    content = template.render(kwargs)
-    with open(file_path, mode="w", encoding="utf-8") as message:
-        message.write(content)
-
-def generate_str(template_path:str, **kwargs) -> str:
-    """Generate a string using the jinja template located at `file_path`.
-    kwargs are used to fill the template.
-
-    :param template_path: Path to the template to use for code generation
-    :type template_path: str
-    :return: A string of the interpreted template
-    :rtype: str
-    """
-    dirname = os.path.dirname(template_path)
-    filename = os.path.basename(template_path)
-    template = Environment(loader=FileSystemLoader(dirname)).get_template(filename)
-    return template.render(kwargs)
diff --git a/aidge_core/export/__init__.py b/aidge_core/export_utils/__init__.py
similarity index 100%
rename from aidge_core/export/__init__.py
rename to aidge_core/export_utils/__init__.py
diff --git a/aidge_core/export_utils/code_generation.py b/aidge_core/export_utils/code_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..a02fc0966702cec7a2cbe33f8411bb71e3035e90
--- /dev/null
+++ b/aidge_core/export_utils/code_generation.py
@@ -0,0 +1,46 @@
+from pathlib import Path
+from jinja2 import Environment, FileSystemLoader
+from typing import Union
+
+
+def generate_file(file_path: Union[Path, str], template_path: Union[Path, str], **kwargs) -> None:
+    """Generate a file at `file_path` using the jinja template located at `file_path`.
+
+    kwargs are used to fill the template.
+
+    :param file_path: path where to generate the file
+    :type file_path: pathlib.Path or str
+    :param template_path: Path to the template to use for code generation
+    :type template_path: pathlib.Path or str
+    """
+    # Convert str -> Path for compatibility !
+    if isinstance(file_path, str):
+        file_path = Path(file_path)
+    if isinstance(template_path, str):
+        template_path = Path(template_path)
+    # Make dir
+    file_path.parent.mkdir(parents=True, exist_ok=True)
+
+    # Select template
+    template = Environment(loader=FileSystemLoader(
+        template_path.parent)).get_template(template_path.name)
+
+    # Generate file
+    with open(file_path, mode="w", encoding="utf-8") as file:
+        file.write(template.render(kwargs))
+
+
+def generate_str(template_path: Union[Path, str], **kwargs) -> str:
+    """Generate a string using the jinja template located at `file_path`.
+    kwargs are used to fill the template.
+
+    :param template_path: Path to the template to use for code generation
+    :type template_path: pathlib.Path or str
+    :return: A string of the interpreted template
+    :rtype: str
+    """
+    # Convert str -> Path for compatibility !
+    if isinstance(template_path, str):
+        template_path = Path(template_path)
+    return Environment(loader=FileSystemLoader(
+        template_path.parent)).get_template(template_path.name).render(kwargs)
diff --git a/aidge_core/export/node_export.py b/aidge_core/export_utils/node_export.py
similarity index 83%
rename from aidge_core/export/node_export.py
rename to aidge_core/export_utils/node_export.py
index 7262e9a837424158b8896f305894dcc57769520c..7aceaa0ccc1f07674241d6f35bbeff90330f2596 100644
--- a/aidge_core/export/node_export.py
+++ b/aidge_core/export_utils/node_export.py
@@ -1,4 +1,4 @@
-import aidge_core
+from aidge_core import Node, Attributes
 
 from abc import ABC, abstractmethod
 
@@ -8,7 +8,7 @@ class ExportNode(ABC):
     """
 
     @abstractmethod
-    def __init__(self, aidge_node: aidge_core.Node) -> None:
+    def __init__(self, aidge_node: Node) -> None:
         """Create ExportNode and retieve attirubtes from ``aidge_node``:
 
         - name: aidge Node name
@@ -20,10 +20,7 @@ class ExportNode(ABC):
         self.node = aidge_node
         self.operator = aidge_node.get_operator()
         self.name = self.node.name()
-        self.attributes = {} # Attributes are auto fetched from aidge operators
-        if isinstance(self.operator, aidge_core.Attributes):
-            for attr_name in self.operator.get_attrs_name():
-                self.attributes[attr_name] = self.operator.get_attr(attr_name)
+        self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators
 
         # rename is_leaf ?
         self.is_last = len(self.node.get_children()) == 0
diff --git a/aidge_core/unit_tests/static/main.cpp b/aidge_core/unit_tests/static/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..640fc1fe60b55070de41ca4ce35ccd08084498b9
--- /dev/null
+++ b/aidge_core/unit_tests/static/main.cpp
@@ -0,0 +1,23 @@
+/*
+Example main.cpp used to test aidge export.
+This file is copied in the test export.
+*/
+#include <iostream>
+
+/* Register default cpu Tensor implementation */
+#include <aidge/backend/cpu/data/TensorImpl.hpp>
+
+/* Include model generator */
+#include "include/dnn.hpp"
+
+int main()
+{
+
+    std::cout << "BEGIN" << std::endl;
+
+    std::shared_ptr<Aidge::GraphView> graph = generateModel();
+
+    std::cout << "END" << std::endl;
+
+    return 0;
+}
diff --git a/aidge_core/unit_tests/test_export.py b/aidge_core/unit_tests/test_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d2e700a86925d1455cdee83e7d40cd891e72ba6
--- /dev/null
+++ b/aidge_core/unit_tests/test_export.py
@@ -0,0 +1,158 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import aidge_core
+from aidge_core.utils import run_command
+import unittest
+import os
+import pathlib
+import shutil
+import subprocess
+import sys
+
+
+def initFiller(model):
+    # Initialize parameters (weights and biases)
+    for node in model.get_nodes():
+        if node.type() == "Producer":
+            prod_op = node.get_operator()
+            value = prod_op.get_output(0)
+            value.set_backend("cpu")
+            tuple_out = node.output(0)[0]
+            # No conv in current network
+            if tuple_out[0].type() == "Conv" and tuple_out[1] == 1:
+                # Conv weight
+                aidge_core.xavier_uniform_filler(value)
+            elif tuple_out[0].type() == "Conv" and tuple_out[1] == 2:
+                # Conv bias
+                aidge_core.constant_filler(value, 0.01)
+            elif tuple_out[0].type() == "FC" and tuple_out[1] == 1:
+                # FC weight
+                aidge_core.normal_filler(value)
+            elif tuple_out[0].type() == "FC" and tuple_out[1] == 2:
+                # FC bias
+                aidge_core.constant_filler(value, 0.01)
+            else:
+                pass
+
+
+def clean_dir(dir: pathlib.Path) -> None:
+    if not dir.is_dir():
+        print(f"Error : directory {dir} doesn't exist. Exiting clean_dir().")
+        return
+    for filename in os.listdir(dir):
+        file_path = os.path.join(dir, filename)
+        try:
+            if os.path.isfile(file_path) or os.path.islink(file_path):
+                os.unlink(file_path)
+            elif os.path.isdir(file_path):
+                shutil.rmtree(file_path)
+        except Exception as e:
+            print(f"Failed to delete {file_path}. Reason: {e}")
+    return
+
+
+class test_export(unittest.TestCase):
+    """Test aidge export"""
+
+    def setUp(self):
+        self.EXPORT_PATH: pathlib.Path = pathlib.Path("dummy_export")
+        self.BUILD_DIR: pathlib.Path = self.EXPORT_PATH / "build"
+        self.INSTALL_DIR: pathlib.Path = (self.EXPORT_PATH / "install").absolute()
+
+    def tearDown(self):
+        pass
+
+    def test_generate_export(self):
+        # Create model
+
+        model = aidge_core.sequential(
+            [
+                aidge_core.FC(
+                    in_channels=32 * 32 * 3, out_channels=512, name="InputNode"
+                ),
+                aidge_core.ReLU(name="Relu0"),
+                aidge_core.FC(in_channels=512, out_channels=256, name="FC1"),
+                aidge_core.ReLU(name="Relu1"),
+                aidge_core.FC(in_channels=256, out_channels=128, name="FC2"),
+                aidge_core.ReLU(name="Relu2"),
+                aidge_core.FC(in_channels=128, out_channels=10, name="OutputNode"),
+            ]
+        )
+
+        initFiller(model)
+
+        # Export model
+        aidge_core.export(self.EXPORT_PATH, model)
+
+        self.assertTrue(
+            self.EXPORT_PATH.is_dir(), "Export folder has not been generated"
+        )
+        os.makedirs(self.BUILD_DIR, exist_ok=True)
+        clean_dir(self.BUILD_DIR)  # if build dir existed already ensure its emptyness
+        clean_dir(self.INSTALL_DIR)
+
+        # Test compilation of export
+        search_path = (
+            os.path.join(sys.prefix, "lib", "libAidge")
+            if "AIDGE_INSTALL" not in os.environ
+            else os.environ["AIDGE_INSTALL"]
+        )
+
+        shutil.copyfile(
+            pathlib.Path(__file__).parent / "static/main.cpp",
+            self.EXPORT_PATH / "main.cpp",
+        )
+
+        ##########################
+        # CMAKE EXPORT
+        try:
+            for std_line in run_command(
+                [
+                    "cmake",
+                    str(self.EXPORT_PATH.absolute()),
+                    "-DPYBIND=ON",
+                    f"-DCMAKE_PREFIX_PATH={search_path}", # search dependencies
+                    f"-DCMAKE_INSTALL_PREFIX:PATH={self.INSTALL_DIR}", # local install
+                ],
+                cwd=str(self.BUILD_DIR),
+            ):
+                print(std_line, end="")
+        except subprocess.CalledProcessError as e:
+            print(f"An error occurred: {e}\nFailed to configure export.")
+            raise SystemExit(1)
+
+        ##########################
+        # BUILD EXPORT
+        try:
+            for std_line in run_command(
+                ["cmake", "--build", "."],
+                cwd=str(self.BUILD_DIR),
+            ):
+                print(std_line, end="")
+        except subprocess.CalledProcessError as e:
+            print(f"An error occurred: {e}\nFailed to build export.")
+            raise SystemExit(1)
+
+        ##########################
+        # INSTALL EXPORT
+        try:
+            for std_line in run_command(
+                ["cmake", "--install", "."],
+                cwd=str(self.BUILD_DIR),
+            ):
+                print(std_line, end="")
+        except subprocess.CalledProcessError as e:
+            print(f"An error occurred: {e}\nFailed to install export.")
+            raise SystemExit(1)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
index 6e0c1f9b9a0828e266ef3bf19ee75df3e275b282..26d60f2fbaf0f3903baf191cf0a2ad5550fb3275 100644
--- a/aidge_core/unit_tests/test_impl.py
+++ b/aidge_core/unit_tests/test_impl.py
@@ -41,6 +41,7 @@ class test_OperatorImpl(unittest.TestCase):
         generic_matmul_op = matmul.get_operator()
         generic_matmul_op.set_forward_dims(lambda x: x)
         generic_matmul_op.set_impl(testImpl(generic_matmul_op))
+        generic_matmul_op.set_input(0, aidge_core.Tensor(np.arange(18).reshape(1,2,3,3)))
         generic_matmul_op.forward()
         self.assertEqual(GLOBAL_CPT, 1)
 
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 164aee726255e0478b629ee853d9a1f619945f3a..8d6f2686d9010ac4ebed80cd04f74effe763e977 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -30,42 +30,39 @@ class test_operator_binding(unittest.TestCase):
         self.assertNotEqual(gop.name(), "")
 
     def test_param_bool(self):
-        self.generic_operator.add_attr("bool", True)
-        self.assertEqual(self.generic_operator.has_attr("bool"), True)
-        self.assertEqual(self.generic_operator.get_attr("bool"), True)
-        self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool")
-        self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"})
-        self.generic_operator.del_attr("bool")
-        self.assertEqual(self.generic_operator.has_attr("bool"), False)
-        self.assertEqual(len(self.generic_operator.get_attrs_name()), 0)
+        self.generic_operator.attr.add_attr("bool", True)
+        self.assertEqual(self.generic_operator.attr.has_attr("bool"), True)
+        self.assertEqual(self.generic_operator.attr.get_attr("bool"), True)
+        self.generic_operator.attr.del_attr("bool")
+        self.assertEqual(self.generic_operator.attr.has_attr("bool"), False)
 
     def test_param_int(self):
-        self.generic_operator.add_attr("int", 1)
-        self.assertEqual(self.generic_operator.get_attr("int"), 1)
+        self.generic_operator.attr.add_attr("int", 1)
+        self.assertEqual(self.generic_operator.attr.get_attr("int"), 1)
 
     def test_param_float(self):
-        self.generic_operator.add_attr("float", 2.0)
-        self.assertEqual(self.generic_operator.get_attr("float"), 2.0)
+        self.generic_operator.attr.add_attr("float", 2.0)
+        self.assertEqual(self.generic_operator.attr.get_attr("float"), 2.0)
 
     def test_param_str(self):
-        self.generic_operator.add_attr("str", "value")
-        self.assertEqual(self.generic_operator.get_attr("str"), "value")
+        self.generic_operator.attr.add_attr("str", "value")
+        self.assertEqual(self.generic_operator.attr.get_attr("str"), "value")
 
     def test_param_l_int(self):
-        self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
-        self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.generic_operator.attr.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
 
     def test_param_l_bool(self):
-        self.generic_operator.add_attr("l_bool", [True, False, False, True])
-        self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True])
+        self.generic_operator.attr.add_attr("l_bool", [True, False, False, True])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_bool"), [True, False, False, True])
 
     def test_param_l_float(self):
-        self.generic_operator.add_attr("l_float", [2.0, 1.0])
-        self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0])
+        self.generic_operator.attr.add_attr("l_float", [2.0, 1.0])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_float"), [2.0, 1.0])
 
     def test_param_l_str(self):
-        self.generic_operator.add_attr("l_str", ["ok"])
-        self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"])
+        self.generic_operator.attr.add_attr("l_str", ["ok"])
+        self.assertEqual(self.generic_operator.attr.get_attr("l_str"), ["ok"])
 
     def test_dynamicattribute_binding(self):
         # Check original C++ attributes are binded
@@ -76,22 +73,50 @@ class test_operator_binding(unittest.TestCase):
         self.assertEqual(attrs.get_attr("b"), "test")
         self.assertEqual(attrs.has_attr("c"), True)
         self.assertEqual(attrs.get_attr("c"), [True, False, True])
-        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "mem", "impl"})
         self.assertEqual(attrs.has_attr("d"), False)
+        self.assertEqual(attrs.has_attr("mem.a"), True)
+        self.assertEqual(attrs.get_attr("mem.a"), 1)
+        self.assertEqual(attrs.has_attr("mem.data.b"), True)
+        self.assertEqual(attrs.get_attr("mem.data.b"), 1.0)
+        self.assertEqual(attrs.get_attr("mem").get_attr("data").get_attr("b"), 1.0)
+        self.assertEqual(attrs.has_attr("impl.c"), True)
+        self.assertEqual(attrs.get_attr("impl.c"), "test")
 
         # Add Python attributes
         attrs.add_attr("d", 18.56)
         self.assertEqual(attrs.get_attr("d"), 18.56)
         self.assertEqual(attrs.has_attr("d"), True)
-        self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d", "mem", "impl"})
         self.assertEqual(attrs.has_attr("e"), False)
+        attrs.add_attr("mem.data.c", 19.36)
+        self.assertEqual(attrs.get_attr("mem.data.c"), 19.36)
+        self.assertEqual(attrs.has_attr("mem.data.c"), True)
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d", "mem", "impl"})
 
         # Check that added Python attribute is accessible in C++
         # Return the value of an attribute named "d" of type float64 (double in C++)
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56)
-        attrs.set_attr("d", 23.89)
+        attrs.d = 23.89
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
+        op = aidge_core.GenericOperatorOp("any_type", 1,0,1)
+        with self.assertRaises(RuntimeError):
+            op.attr.something
+
+        op.attr.something = aidge_core.DynamicAttributes()
+        try:
+            self.assertEqual(str(op.attr), "AttrDict({'something': AttrDict({})})")
+        except Exception:
+            self.fail("op.attr.something raised Exception unexpectedly!")
+
+        op.attr.something.arg1 = 4
+        self.assertEqual(op.attr.something.arg1, 4)
+
+        # auto create the namespace another_thing (not enabled)
+        #op.attr.another_thing.arg = 44
+        #self.assertEqual(op.attr.another_thing.arg, 44)
+
     def test_forward_dims(self):
         in_dims=[25, 25]
         input = aidge_core.Producer(in_dims, name="In")
@@ -129,18 +154,18 @@ class test_operator_binding(unittest.TestCase):
         myVar = 2
         myBool = True
         # Test dynamic attribute set
-        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
-        gop.myBool = myBool
+        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", my_var=myVar).get_operator()
+        gop.attr.my_bool = myBool
         # Test variable set by kwargs
-        self.assertEqual(gop.myVar, myVar)
+        self.assertEqual(gop.attr.my_var, myVar)
         # Test set attr
-        self.assertEqual(gop.myBool, myBool)
+        self.assertEqual(gop.attr.my_bool, myBool)
 
         # Test static attribute set !
         prod = aidge_core.Producer([1]).get_operator()
-        self.assertEqual(prod.Constant, False)
-        prod.Constant = True # By default Constant is False
-        self.assertEqual(prod.Constant, True)
+        self.assertEqual(prod.attr.constant, False)
+        prod.attr.constant = True # By default Constant is False
+        self.assertEqual(prod.attr.constant, True)
 
 
 
diff --git a/aidge_core/unit_tests/test_operator_squeeze.py b/aidge_core/unit_tests/test_operator_squeeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..b43605893f32f17e7b544b2fea09b16bdd982050
--- /dev/null
+++ b/aidge_core/unit_tests/test_operator_squeeze.py
@@ -0,0 +1,194 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+from aidge_core import Log
+import numpy as np
+from numpy import testing as npt
+
+
+class TestSqueeze(unittest.TestCase):
+    """
+    Test squeeze operator
+    """
+
+    def setUp(self):
+        ############DEFINING INPUT AND OUTPUTS FOR TESTS
+        axes_to_squeeze_0 = [0]
+        axes_to_squeeze_many = [0, 1, 4]
+        axes_to_squeeze_all = []
+        axes_to_squeeze_error = [1, 2, 4, 5, 10, 3, 42, 127, 12, 3, 4, 1, 4, 50]
+
+        squeeze_dim_0 = aidge_core.Squeeze(axes_to_squeeze_0, name="squeeze_dim_0")
+        squeeze_many = aidge_core.Squeeze(axes_to_squeeze_many, name="squeeze_many")
+        squeeze_all = aidge_core.Squeeze(axes_to_squeeze_all, name="squeeze_all")
+        squeeze_error = aidge_core.Squeeze(axes_to_squeeze_error, name="squeeze_error")
+
+        input_1_data_shape = np.array([1, 2, 3])
+        input_2_data_hape = np.array([1, 1, 3, 3, 1, 9])
+        input_3_data_shape = np.array([1])
+        input_4_data_shape = np.array([1, 1, 4])
+
+        input_axes_0 = axes_to_squeeze_0
+        input_axes_many = axes_to_squeeze_many
+        input_axes_all = axes_to_squeeze_all
+        # input_axes_error = aidge_core.Tensor(axes_to_squeeze_error)
+
+        ####################### DEFINING TEST RUNS
+        self.tests_axes_defined_by_attribute = [
+            (input_1_data_shape, squeeze_dim_0, np.array([2, 3])),
+            (input_1_data_shape, squeeze_all, np.array([2, 3])),
+            (input_2_data_hape, squeeze_dim_0, np.array([1, 3, 3, 1, 9])),
+            (input_2_data_hape, squeeze_many, np.array([3, 3, 9])),
+            (input_2_data_hape, squeeze_all, np.array([3, 3, 9])),
+            (input_3_data_shape, squeeze_dim_0, np.array([])),
+            (input_3_data_shape, squeeze_all, np.array([])),
+            (input_4_data_shape, squeeze_dim_0, np.array([1, 4])),
+            (input_4_data_shape, squeeze_all, np.array([4])),
+        ]
+
+        # operators are puprposefully chosen with different predefined attribute than the input_axes tensor
+        self.tests_axes_defined_by_input = [
+            (input_1_data_shape, input_axes_0, squeeze_error, np.array([2, 3])),
+            (input_1_data_shape, input_axes_all, squeeze_error, np.array([2, 3])),
+            (input_2_data_hape, input_axes_0, squeeze_error, np.array([1, 3, 3, 1, 9])),
+            (input_2_data_hape, input_axes_many, squeeze_error, np.array([3, 3, 9])),
+            (input_2_data_hape, input_axes_all, squeeze_error, np.array([3, 3, 9])),
+            (input_3_data_shape, input_axes_0, squeeze_error, np.array([])),
+            (input_3_data_shape, input_axes_all, squeeze_error, np.array([])),
+            (input_4_data_shape, input_axes_0, squeeze_error, np.array([1, 4])),
+            (input_4_data_shape, input_axes_all, squeeze_error, np.array([4])),
+        ]
+        self.test_error = [
+            (input_1_data_shape, squeeze_error),
+            (input_1_data_shape, squeeze_many),
+            (input_3_data_shape, squeeze_many),
+            (input_4_data_shape, squeeze_many),
+        ]
+        return
+
+    def tearDown(self):
+        pass
+
+    def test_axes_defined_via_tensor_input(self):
+        Log.notice("\ntest_axes_defined_via_tensor_input")
+        for index, (
+            input_shape,
+            input_axes_to_squeeze,
+            squeeze_node_template,
+            output_shape,
+        ) in enumerate(self.tests_axes_defined_by_input):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input shape : {input_shape}")
+            print(f"input axes: {np.array(input_axes_to_squeeze)}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_op.set_backend("cpu")
+            test_squeeze_op.set_datatype(aidge_core.dtype.float32)
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+
+            input_axes = aidge_core.Tensor(
+                np.array(input_axes_to_squeeze, dtype=np.float32)
+            )
+            input_axes.set_datatype(aidge_core.dtype.int8)
+            input_axes.set_backend("cpu")
+
+            test_squeeze_op.set_input(0, input_data)
+            test_squeeze_op.set_input(1, input_axes)
+
+            self.assertEqual(test_squeeze_op.forward_dims(True), True)
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            # self.assertEqual(test_squeeze_op.dims_forwarded(), True, "SQUEEZE_FAILURE : dims_forwarded failed.")
+        return
+
+    def test_axes_defined_via_attribute(self):
+        Log.notice("\ntest_axes_defined_via_attribute")
+        for index, (input_shape, squeeze_node_template, output_shape) in enumerate(
+            self.tests_axes_defined_by_attribute
+        ):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input size : {input_shape.shape}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_node.get_operator().set_backend("cpu")
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+            test_squeeze_op.set_input(0, input_data)
+
+            test_squeeze_op.forward_dims()
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+        return
+
+    def test_error(self):
+        for input_shape, squeeze_node_template in self.test_error:
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            input_values = np.ones(shape=input_shape)
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+            test_squeeze_op.set_input(0, input_data)
+
+            with self.assertRaises((RuntimeError, AssertionError)):
+                test_squeeze_op.forward_dims()
+                test_squeeze_op.forward()
+        return
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_operator_unsqueeze.py b/aidge_core/unit_tests/test_operator_unsqueeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..12f55fa30bc027fa5a3cea6ccb6a8d2970cad018
--- /dev/null
+++ b/aidge_core/unit_tests/test_operator_unsqueeze.py
@@ -0,0 +1,211 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+import numpy as np
+from numpy import testing as npt
+
+
+class TestUnsqueeze(unittest.TestCase):
+    """
+    Test unsqueeze operator
+    """
+
+    def setUp(self):
+        axis_to_unsqueeze_dim_0 = [0]
+        axis_to_unsqueeze_many = [1, 4, 5]
+        axis_to_unsqueeze_error_identical_index = [0, 0, 0]
+        axis_to_unsqueeze_error_too_high_index = [50]
+        axis_to_unsqueeze_onnx_test = [0, 4]
+        unsqueeze_dim_0 = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_dim_0, name="unsqueeze_dim_0"
+        )
+        unsqueeze_many = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_many, name="unsqueeze_many"
+        )
+        unsqueeze_error_identical_index = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_error_identical_index,
+            name="unsqueeze_error_identical_index",
+        )
+        unsqueeze_error_node = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_error_too_high_index,
+            name="unsqueeze_error_index_too_high",
+        )
+        unsqueeze_onnx_test = aidge_core.Unsqueeze(
+            axis_to_unsqueeze_onnx_test, name="unsqueeze taken from onnx documentation"
+        )
+
+        input_1_data_shape = np.array([1, 2, 3])
+        input_2_data_shape = np.array([2, 1, 3, 3])
+        input_3_data_shape = np.array([1, 1, 4])
+        input_onnx_data_shape = np.array([3, 4, 5])
+
+        input_axes_dim_0 = axis_to_unsqueeze_dim_0
+        input_axes_many = axis_to_unsqueeze_many
+        input_axes_onnx_test = axis_to_unsqueeze_onnx_test
+
+        self.tests_axes_defined_by_attribute = [
+            (input_1_data_shape, unsqueeze_dim_0, np.array([1, 1, 2, 3])),
+            (input_2_data_shape, unsqueeze_dim_0, np.array([1, 2, 1, 3, 3])),
+            (input_2_data_shape, unsqueeze_many, np.array([2, 1, 1, 3, 1, 1, 3])),
+            (input_3_data_shape, unsqueeze_dim_0, np.array([1, 1, 1, 4])),
+            (input_3_data_shape, unsqueeze_many, np.array([1, 1, 1, 4, 1, 1])),
+            (input_onnx_data_shape, unsqueeze_onnx_test, np.array([1, 3, 4, 5, 1])),
+        ]
+
+        self.tests_axes_defined_by_tensor = [
+            (
+                input_1_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 1, 2, 3]),
+            ),
+            (
+                input_2_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 2, 1, 3, 3]),
+            ),
+            (
+                input_2_data_shape,
+                input_axes_many,
+                unsqueeze_error_node,
+                np.array([2, 1, 1, 3, 1, 1, 3]),
+            ),
+            (
+                input_3_data_shape,
+                input_axes_dim_0,
+                unsqueeze_error_node,
+                np.array([1, 1, 1, 4]),
+            ),
+            (
+                input_3_data_shape,
+                input_axes_many,
+                unsqueeze_error_node,
+                np.array([1, 1, 1, 4, 1, 1]),
+            ),
+            (
+                input_onnx_data_shape,
+                input_axes_onnx_test,
+                unsqueeze_error_node,
+                np.array([1, 3, 4, 5, 1]),
+            ),
+        ]
+
+        self.test_error = [
+            (input_1_data_shape, unsqueeze_error_identical_index),
+            (input_1_data_shape, unsqueeze_error_node),
+            (input_1_data_shape, unsqueeze_many),  # dims too high
+        ]
+        return
+
+    def tearDown(self):
+        pass
+
+    def test_axes_defined_by_attribute(self):
+        for index, (
+            input_shape,
+            unsqueeze_template,
+            expected_output_shape,
+        ) in enumerate(self.tests_axes_defined_by_attribute):
+            test_unsqueeze = unsqueeze_template
+            test_unsqueeze_op = test_unsqueeze.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input size : {input_shape}")
+            print(f"operator : {test_unsqueeze}")
+            print(f"expected output_shape : {expected_output_shape}")
+
+            test_unsqueeze_op.set_backend("cpu")
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            expected_output_values = np.ones(
+                shape=expected_output_shape, dtype=np.float32
+            )
+            input_tensor = aidge_core.Tensor(input_values)
+            test_unsqueeze_op.set_input(0, input_tensor)
+
+            test_unsqueeze_op.forward_dims()
+            test_unsqueeze_op.forward()
+
+            unsqueeze_output = test_unsqueeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                unsqueeze_output.dims(),
+                expected_output_shape,
+                err_msg=f"UNSQUEEZE FAILURE : expected result dimensions differs from output's\n\toperator : {test_unsqueeze}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(unsqueeze_output),
+                expected_output_values,
+                7,
+                err_msg=f"UNSQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_unsqueeze}\n\tinput.shape : {input_shape.shape}",
+            )
+        return
+
+    def test_axes_defined_via_tensor_input(self):
+        for index, (
+            input_shape,
+            input_axes_to_squeeze,
+            squeeze_node_template,
+            output_shape,
+        ) in enumerate(self.tests_axes_defined_by_tensor):
+            test_squeeze_node = squeeze_node_template
+            test_squeeze_op = test_squeeze_node.get_operator()
+
+            print(f"\nTest {index}")
+            print(f"input shape : {input_shape}")
+            print(f"input axes: {np.array(input_axes_to_squeeze)}")
+            print(f"operator : {test_squeeze_node}")
+            print(f"expected output_shape : {output_shape}")
+
+            test_squeeze_op.set_backend("cpu")
+            test_squeeze_op.set_datatype(aidge_core.dtype.float32)
+
+            input_values = np.ones(shape=input_shape, dtype=np.float32)
+            output_values = np.ones(shape=output_shape, dtype=np.float32)
+
+            input_data = aidge_core.Tensor(input_values)
+            input_data.set_datatype(aidge_core.dtype.float32)
+            input_data.set_backend("cpu")
+
+            input_axes = aidge_core.Tensor(
+                np.array(input_axes_to_squeeze, dtype=np.float32)
+            )
+            input_axes.set_datatype(aidge_core.dtype.int8)
+            input_axes.set_backend("cpu")
+
+            test_squeeze_op.set_input(0, input_data)
+            test_squeeze_op.set_input(1, input_axes)
+
+            self.assertEqual(test_squeeze_op.forward_dims(True), True)
+            test_squeeze_op.forward()
+
+            squeeze_output = test_squeeze_op.get_output(0)
+
+            npt.assert_array_equal(
+                squeeze_output.dims(),
+                output_shape,
+                err_msg=f"SQUEEZE FAILURE : expected result differs from output size\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            npt.assert_array_almost_equal(
+                np.array(squeeze_output, dtype=np.float32),
+                output_values,
+                7,
+                err_msg=f"SQUEEZE FAILURE : output tensor values differs from expected values\n\toperator : {test_squeeze_node}\n\tinput.shape : {input_shape.shape}",
+            )
+            # self.assertEqual(test_squeeze_op.dims_forwarded(), True, "SQUEEZE_FAILURE : dims_forwarded failed.")
+        return
+
+
+if __name__ == "__main__":
+    unittest.main()
+
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index e7b16963f4c26e5d014ce90fa289c043e2eb0be4..7c3bc0f6f68506c02af1723b263455a9c72b1f3a 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -29,15 +29,13 @@ class test_attributes(unittest.TestCase):
         conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator()
         self.assertEqual(conv_op.in_channels(), in_channels)
         self.assertEqual(conv_op.out_channels(), out_channels)
-        self.assertEqual(conv_op.get_attr("KernelDims"), k_dims)
+        self.assertEqual(conv_op.attr.get_attr("kernel_dims"), k_dims)
 
     def test_fc(self):
         in_channels = 4
         out_channels = 8
-        nb_bias = True
-        fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator()
+        fc_op = aidge_core.FC(in_channels, out_channels).get_operator()
         self.assertEqual(fc_op.out_channels(), out_channels)
-        self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
     def test_producer_1D(self):
         dims = [5]
@@ -67,7 +65,7 @@ class test_attributes(unittest.TestCase):
     def test_leaky_relu(self):
         negative_slope = 0.25
         leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator()
-        self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope)
+        self.assertEqual(leakyrelu_op.attr.get_attr("negative_slope"), negative_slope)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/aidge_core/unit_tests/test_recipes.py b/aidge_core/unit_tests/test_recipes.py
index 240bcd9501aa1fd64985fa59c87f01dfdf9343aa..c8dd4c727fbaf8224e8d04111a5054caeb5e5c99 100644
--- a/aidge_core/unit_tests/test_recipes.py
+++ b/aidge_core/unit_tests/test_recipes.py
@@ -49,27 +49,23 @@ class test_recipes(unittest.TestCase):
         add0 = aidge_core.Add(2, name="Add0")
         matmul1 = aidge_core.MatMul(name="MatMul1")
         add1 = aidge_core.Add(2, name="Add1")
-
-        graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
-
         w0 = aidge_core.Producer([1, 1], name="W0")
-        w0.add_child(matmul0, 0, 1)
-        graph_view.add(w0)
-
+        w0.add_child(matmul0, 0, 0)
         b0 = aidge_core.Producer([1], name="B0")
         b0.add_child(add0, 0, 1)
-        graph_view.add(b0)
-
         w1 = aidge_core.Producer([1, 1], name="W1")
-        w1.add_child(matmul1, 0, 1)
-        graph_view.add(w1)
-
+        w1.add_child(matmul1, 0, 0)
         b1 = aidge_core.Producer([1], name="B1")
         b1.add_child(add1, 0, 1)
+
+        graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
+        graph_view.add(w0)
+        graph_view.add(b0)
+        graph_view.add(w1)
         graph_view.add(b1)
 
         old_nodes = graph_view.get_nodes()
-        aidge_core.fuse_mul_add(graph_view)
+        aidge_core.matmul_to_fc(graph_view)
 
         self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 2)
         self.assertTrue("MatMul0" not in [i.name() for i in graph_view.get_nodes()])
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
index d479c98b20534daa804f6019b63d528883c2b568..6348ba8dd1a635ce0299760b6fd31dcef58716cf 100644
--- a/aidge_core/unit_tests/test_tensor.py
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -42,7 +42,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int32)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n)
         for i,j in zip(t.dims(), np_array.shape):
@@ -62,7 +62,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int64)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n)
         for i,j in zip(t.dims(), np_array.shape):
@@ -73,7 +73,7 @@ class test_tensor(unittest.TestCase):
         np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
-        self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
+        self.assertEqual(t.dtype(), aidge_core.dtype.float32)
         for i_t, i_n in zip(t, np_array.flatten()):
             self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
         for i,j in zip(t.dims(), np_array.shape):
diff --git a/aidge_core/unit_tests/test_tensor_scalar.py b/aidge_core/unit_tests/test_tensor_scalar.py
new file mode 100644
index 0000000000000000000000000000000000000000..c054d3b877877c01b84b20983699758087bf05d8
--- /dev/null
+++ b/aidge_core/unit_tests/test_tensor_scalar.py
@@ -0,0 +1,136 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import numpy as np
+
+import aidge_core
+
+class test_tensor_scalar(unittest.TestCase):
+    """Test tensor binding for scalar (0-rank) tensors
+    """
+    def setUp(self):
+        pass
+    def tearDown(self):
+        pass
+
+    def _scalar_np_array(self, dtype=None):
+        return np.array(1, dtype=dtype)
+
+    def _scalar_np(self, dtype=None):
+        return np.int32(1).astype(dtype)
+    
+    def test_np_array_int_to_tensor(self):
+        np_array = self._scalar_np_array(dtype="int8")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int8)
+
+        np_array = self._scalar_np_array(dtype="int16")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int16)
+
+        np_array = self._scalar_np_array(dtype="int32")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int32)
+
+        np_array = self._scalar_np_array(dtype="int64")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int64)
+
+    def test_np_array_uint_to_tensor(self):
+        np_array = self._scalar_np_array(dtype="uint8")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.uint8)
+
+        np_array = self._scalar_np_array(dtype="uint16")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.uint16)
+
+        np_array = self._scalar_np_array(dtype="uint32")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.uint32)
+
+        np_array = self._scalar_np_array(dtype="uint64")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.uint64)
+        
+    def test_np_scalar_int_to_tensor(self):
+        np_array = self._scalar_np(dtype="int8")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int8)
+
+        np_array = self._scalar_np(dtype="int16")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int16)
+
+        np_array = self._scalar_np(dtype="int32")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int32)
+
+        np_array = self._scalar_np(dtype="int64")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.int64)
+
+    def test_np_scalar_uint_to_tensor(self):
+        np_array = self._scalar_np(dtype="uint8")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.uint8)
+
+        np_array = self._scalar_np(dtype="uint16")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.uint16)
+
+        np_array = self._scalar_np(dtype="uint32")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.uint32)
+
+        np_array = self._scalar_np(dtype="uint64")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.uint64)
+        
+    def test_np_array_float_to_tensor(self):
+        np_array = self._scalar_np_array(dtype="float32")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.float32)
+        np_array = self._scalar_np_array(dtype="float64")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.float64)
+
+    def test_np_scalar_float_to_tensor(self):
+        np_array = self._scalar_np(dtype="float32")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.float32)
+        np_array = self._scalar_np(dtype="float64")
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.dtype.float64)
+
+    def test_getcoord_getidx_scalar(self):
+        np_array = self._scalar_np_array()
+        t = aidge_core.Tensor(np_array)
+        coord = t.get_coord(0)
+        self.assertEqual(tuple(coord), ())
+        idx = t.get_idx(coord)
+        self.assertEqual(idx, 0)
+
+    def test_indexing_scalar(self):
+        np_array = self._scalar_np_array()
+        t = aidge_core.Tensor(np_array)
+        val = t[0]
+        self.assertEqual(val, np_array[()])
+
+    def test_coord_indexing_scalar(self):
+        np_array = self._scalar_np_array()
+        t = aidge_core.Tensor(np_array)
+        val = t[()]
+        self.assertEqual(val, np_array[()])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/aidge_core/unit_tests/test_topological_order.py b/aidge_core/unit_tests/test_topological_order.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e7f2e2d9b9770c2fae1e5c2812ba33113589134
--- /dev/null
+++ b/aidge_core/unit_tests/test_topological_order.py
@@ -0,0 +1,67 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+
+class test_topological_order(unittest.TestCase):
+    """Test python binding for nodes ordering"""
+
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    def test_generic_loop_order_0(self):
+        # Defines a Generic recurring loop header operator with
+        # inputs: (init, back) and outputs (loop, last)
+        # Note that one must specify the back edge as otherwise the
+        # generated order may not schedule the loop header before the add
+        loop0 = aidge_core.GenericOperator("Loop", 2, 0, 2, "Loop#0")
+        loop0.get_operator().set_back_edges({1})
+        assert not loop0.get_operator().is_back_edge(0)
+        assert loop0.get_operator().is_back_edge(1)
+        add0 = aidge_core.Add(2, "add0")
+
+        loop0.add_child(add0, 0, 1)
+        add0.add_child(loop0, 0, 1)
+        graph = aidge_core.GraphView()
+        graph.add(loop0)
+        graph.add(add0)
+
+        nodes = graph.get_ordered_nodes()
+        assert len(nodes) == 2
+        assert nodes == [loop0, add0]
+
+    def test_generic_loop_order_1(self):
+        # Defines a Generic recurring loop header operator with
+        # inputs: (back, init) and outputs (loop, last)
+        # Note that one must specify the back edge as otherwise the
+        # generated order may not schedule the loop header before the add
+        loop0 = aidge_core.GenericOperator("Loop", 2, 0, 2, "Loop#0")
+        loop0.get_operator().set_back_edges({0})
+        assert not loop0.get_operator().is_back_edge(1)
+        assert loop0.get_operator().is_back_edge(0)
+        add0 = aidge_core.Add(2, "add0")
+
+        loop0.add_child(add0, 0, 1)
+        add0.add_child(loop0, 0, 0)
+        graph = aidge_core.GraphView()
+        graph.add(loop0)
+        graph.add(add0)
+
+        nodes = graph.get_ordered_nodes()
+        assert len(nodes) == 2
+        assert nodes == [loop0, add0]
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/aidge_core/utils.py b/aidge_core/utils.py
index d82d524b7e886ed396507376a5934a748a89e44c..b6890bc2432b29499d1b06e7229c8c524a36cb06 100644
--- a/aidge_core/utils.py
+++ b/aidge_core/utils.py
@@ -1,3 +1,20 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import queue
+import threading
+import subprocess
+import pathlib
+from typing import List
+
+
 def template_docstring(template_keyword, text_to_replace):
     """Method to template docstring
 
@@ -6,11 +23,87 @@ def template_docstring(template_keyword, text_to_replace):
     :param text_to_replace: Text to replace your template with.
     :type text_to_replace: str
     """
+
     def dec(func):
-        if "{"+template_keyword+"}" not in func.__doc__:
+        if "{" + template_keyword + "}" not in func.__doc__:
             raise RuntimeError(
-                f"The function {function.__name__} docstring does not contain the template keyword: {template_keyword}.")
+                f"The function {func.__name__} docstring does not contain the template keyword: {template_keyword}."
+            )
         func.__doc__ = func.__doc__.replace(
-            "{"+template_keyword+"}", text_to_replace)
+            "{" + template_keyword + "}", text_to_replace
+        )
         return func
+
     return dec
+
+
+
+
+def run_command(command: List[str], cwd: pathlib.Path = None):
+    """
+    This function has the job to run a command and return stdout and stderr that are not shown
+    by subprocess.check_call / call.
+    If the subprocess returns smthg else than 0, it will raise an error.
+    Arg:
+        command : written with the same syntax as subprocess.call
+        cwd : path from where the command must be called
+
+    Call example:
+    ```python
+        try:
+            for std_line in run_command(
+                [
+                    "cmake",
+                    str(self.EXPORT_PATH.absolute()),
+                    "-DPYBIND=1",
+                    f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}",
+                ],
+                cwd=str(self.BUILD_DIR),
+            ):
+                print(std_line, end="")
+        except subprocess.CalledProcessError as e:
+            print(f"An error occurred: {e}\nFailed to configure export.")
+    ```
+    """
+    process = subprocess.Popen(
+        command, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
+    )
+
+    stdout_queue = queue.Queue()
+    stderr_queue = queue.Queue()
+
+    def enqueue_output(stream, queue_to_append):
+        for line in iter(stream.readline, ""):
+            queue_to_append.put(line)
+        stream.close()
+
+    stdout_thread = threading.Thread(
+        target=enqueue_output, args=(process.stdout, stdout_queue)
+    )
+    stderr_thread = threading.Thread(
+        target=enqueue_output, args=(process.stderr, stderr_queue)
+    )
+    stdout_thread.start()
+    stderr_thread.start()
+
+    while (
+        stdout_thread.is_alive()
+        or stderr_thread.is_alive()
+        or not stdout_queue.empty()
+        or not stderr_queue.empty()
+    ):
+        try:
+            stdout_line = stdout_queue.get_nowait()
+            yield stdout_line
+        except queue.Empty:
+            pass
+
+        try:
+            stderr_line = stderr_queue.get_nowait()
+            yield stderr_line
+        except queue.Empty:
+            pass
+
+    return_code = process.wait()
+    if return_code != 0:
+        raise subprocess.CalledProcessError(return_code, command)
diff --git a/cmake/PybindDependency.cmake b/cmake/PybindDependency.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..1f4e7d426fa8d78a98d6bcce44d9d7dfab17ec1e
--- /dev/null
+++ b/cmake/PybindDependency.cmake
@@ -0,0 +1,56 @@
+function(add_pybind_dependency target_name)
+
+    # This function add dependencies on pybind/python in the
+    # case where a target depends on it. This is orthogonal to
+    # the creation of a pybind python module.
+
+    # In this case we need to add additional dependencies and distinguish the two link time usage for the archive:
+
+    #### 1. link for producing a python binding module, which must not include the python interpreter
+
+    # For the case 1, the archive is bound to a python module which will provide the runtime,
+    # hence we add dependency only on the pybind and python headers. Also we install the pybind headers
+    # for backward compatibility of dependent build systems which may not depend upon pybind.
+
+    #### 2. link for producing an executable (tests for instance) which must include the python interpreter
+
+    # For the case 2, a library or executable must also depend on the embedded python libraries,
+    # hence we add dependency on Python::Python when the target is not a module. Also we account for
+    # the case where the python libraries are not present (such as on cibuildwheel). In this case
+    # only python modules can be built, not standalone executables.
+
+    # Make detection of Development.Embed optional, we need to separate the components detections
+    # otherwise the variables set by the Interpreter components may be undefined.
+    find_package(Python COMPONENTS Interpreter)
+    find_package(Python COMPONENTS Development)
+    if(NOT Python_Development.Embed_FOUND)
+        message(WARNING "Could not find Python embed libraries, fall back to Python Module only mode. If you are running this from `cibuildwheel, this warning is nominal.")
+        find_package(Python COMPONENTS Development.Module)
+    endif()
+
+    # Set these variables which are used in the package config (aidge_core-config.cmake.in)
+    # and for conditional build on the presence on the python interpreter library
+    set(AIDGE_REQUIRES_PYTHON TRUE PARENT_SCOPE)
+    set(AIDGE_PYTHON_HAS_EMBED ${Python_Development.Embed_FOUND} PARENT_SCOPE)
+
+    # Add pybind11 headers dependencies, the headers for the package interface are installed below
+    target_include_directories(${target_name} SYSTEM PUBLIC
+        $<INSTALL_INTERFACE:include/_packages_deps/${target_name}>
+        $<BUILD_INTERFACE:${pybind11_INCLUDE_DIR}>)
+
+    # Add include dirs for Python.h
+    target_include_directories(${target_name} SYSTEM PUBLIC ${Python_INCLUDE_DIRS})
+
+    # Add Python embedded interpreter when the target is not a module (tests executables for instance)
+    # Also requires to have Development.Embed installed on the system
+    if (Python_Development.Embed_FOUND)
+         set(target_is_module $<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>)
+         target_link_libraries(${target_name} INTERFACE $<$<NOT:${target_is_module}>:Python::Python>)
+    endif()
+
+    # Install pybind headers such that dependent modules can find them
+    install(DIRECTORY ${pybind11_INCLUDE_DIR}/pybind11
+        DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/_packages_deps/${target_name}
+    )
+
+endfunction()
diff --git a/cmake/PybindModuleCreation.cmake b/cmake/PybindModuleCreation.cmake
index 8030c1a8639e4b7ae0c5fb865e928a4260c6ae7d..853810e24b40eadb0830645a4373c238177ad649 100644
--- a/cmake/PybindModuleCreation.cmake
+++ b/cmake/PybindModuleCreation.cmake
@@ -1,21 +1,21 @@
 function(generate_python_binding name target_to_bind)
-    add_definitions(-DPYBIND)
-    Include(FetchContent)
 
+    find_package(Python COMPONENTS Interpreter Development.Module)
+
+    Include(FetchContent)
     FetchContent_Declare(
     PyBind11
     GIT_REPOSITORY https://github.com/pybind/pybind11.git
     GIT_TAG        v2.10.4 # or a later release
     )
-
-    # Use the New FindPython mode, recommanded. Requires CMake 3.15+
-    find_package(Python COMPONENTS Interpreter Development)
     FetchContent_MakeAvailable(PyBind11)
 
     message(STATUS "Creating binding for module ${name}")
     file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
 
     pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
-    target_include_directories(${name} PUBLIC "python_binding")
-    target_link_libraries(${name} PUBLIC ${target_to_bind})
+    target_include_directories(${name} PRIVATE "python_binding")
+
+    # Link specified target to bind
+    target_link_libraries(${name} PRIVATE ${target_to_bind})
 endfunction()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 940440bad52e367fe04872a308c99e4c802fa242..cadd8c85ca541862cc6f298fa055713a6f65e3ed 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -36,8 +36,11 @@
 #include "aidge/nodeTester/ConditionalInterpreter.hpp"
 
 #include "aidge/operator/Add.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/operator/ArgMax.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/BitShift.hpp"
 #include "aidge/operator/Concat.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
@@ -47,6 +50,7 @@
 #include "aidge/operator/Gather.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/GlobalAveragePooling.hpp"
+#include "aidge/operator/GridSample.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/operator/MetaOperator.hpp"
@@ -57,8 +61,10 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Pow.hpp"
 #include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/ReduceSum.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Resize.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
@@ -70,6 +76,10 @@
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/stimuli/Stimulus.hpp"
 
+#include "aidge/operator/ShiftMax.hpp"
+#include "aidge/scheduler/ShiftGELU.hpp"
+#include "aidge/stimuli/ILayerNorm.hpp"
+
 #include "aidge/recipes/Recipes.hpp"
 
 #include "aidge/utils/Attributes.hpp"
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 1fc9168da120ba87c916b1a6a346997be69184b4..4af7da64ebca3c02eb9aabca1f2dad88fd8b9829 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -14,73 +14,172 @@
 
 #include <string>
 #include <vector>
+#include <functional>
 
 #include "aidge/utils/Types.h"
+#include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/data/Data.hpp"
 #include "aidge/data/Elts.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 
 namespace Aidge {
+class Node;
 class Operator;
 
+/**
+ * @brief ImplSpec stores the requirements or the specifications of an implementation.
+ * 
+ */
+struct ImplSpec {
+    struct IOSpec {
+        IOSpec(DataType type_, DataFormat format_ = DataFormat::Any, const std::vector<std::pair<int, int>>& dims_ = {}):
+            type(type_),
+            format(format_),
+            dims(dims_)
+        {}
+
+        DataType type;
+        DataFormat format;
+        std::vector<std::pair<int, int>> dims;
+    };
+
+    ImplSpec(const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const IOSpec& io, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const IOSpec& i, const IOSpec& o, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec>& o, const DynamicAttributes& attrs_ = DynamicAttributes());
+    ImplSpec(const Aidge::ImplSpec&);
+    ~ImplSpec() noexcept;
+
+    std::vector<IOSpec> inputs;
+    std::vector<IOSpec> outputs;
+    DynamicAttributes attrs;
+};
+
+inline bool operator==(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
+    return (lhs.type == rhs.type)
+        && (lhs.format == rhs.format)
+        && (lhs.dims == rhs.dims);
+}
+
+inline bool operator<(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
+    return (lhs.type < rhs.type)
+        || (lhs.type == rhs.type && lhs.format < rhs.format)
+        || (lhs.type == rhs.type && lhs.format == rhs.format && lhs.dims < rhs.dims);
+}
+
+inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) {
+    return (lhs.inputs < rhs.inputs)
+        || (lhs.inputs == rhs.inputs && lhs.outputs < rhs.outputs)
+        || (lhs.inputs == rhs.inputs && lhs.outputs == rhs.outputs && lhs.attrs < rhs.attrs);
+}
+
+/**
+ * @brief Impl stores the details of a specific implementation.
+ * It is associated to a ImplSpec in a registry.
+ * 
+ */
+template <class FwdFunc, class BwdFunc>
+struct Impl {
+    Impl(std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso_,
+      std::function<FwdFunc> forward_,
+      std::function<BwdFunc> backward_ = nullptr):
+        prodConso(prodConso_), forward(forward_), backward(backward_) {}
+
+    std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso;
+    std::function<FwdFunc> forward;
+    std::function<BwdFunc> backward;
+};
+
 class OperatorImpl {
 public:
     OperatorImpl(const Operator& op, const std::string& backend = "");
     virtual void forward();
     virtual void backward();
+    virtual std::shared_ptr<ProdConso> prodConso();
 
     const std::string& backend() const noexcept {
         return mBackend;
     }
-    /**
-     * @brief Minimum amount of data from a specific input required by the
-     * implementation to be run.
-     *
-     * @param inputIdx Index of the input analysed.
-     * @return std::size_t
-     */
-    virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
-    // Amount of input data that cannot be overwritten during the execution.
-    virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
-
-    // Memory required at an output for a given input size.
-    virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
+    const Operator& getOperator() const noexcept {
+        return mOp;
+    }
 
     /**
-     * @brief Total amount of consumed data from a specific input.
-     *
-     * @param inputIdx Index of the input analysed.
-     * @return DimSize_t
+     * @brief Get the operator required implementation specification, according
+     * to the current operator configuration.
+     * 
      */
-    virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+    ImplSpec getRequiredSpec() const;
 
     /**
-     * @brief Total amount of produced data ready to be used on a specific output.
-     *
-     * @param outputIdx Index of the output analysed.
-     * @return DimSize_t
+     * @brief Get the best implementation that matches \p requiredSpecs.
+     * If no implementation matches \p requiredSpecs, \p requiredSpecs is
+     * returned.
+     * 
      */
-    virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
+    ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
-     *
+     * @brief Get an adapted meta operator corresponding to the required 
+     * specifications \p requiredSpecs from the implementation specifications
+     * \p spec.
+     * 
+     * @param spec Implementation specification
+     * @param requiredSpecs Required specifications
+     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    virtual void updateConsummerProducer();
+    std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Reset the Consummer Producer system.
-     *
+     * @brief Get the best adapted meta operator corresponding to the required 
+     * specifications \p requiredSpecs.
+     * The best adaptation is the one with the lowest overhead cost.
+     * Currently, it is the one requiring the least number of additionnal 
+     * operators to match the available implementations.
+     * 
+     * @param requiredSpecs Required specifications
+     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    virtual void resetConsummerProducer();
+    std::shared_ptr<Node> getBestAdaptation(const ImplSpec& requiredSpecs) const;
 
     virtual ~OperatorImpl() = default;
 
 protected:
+    virtual std::shared_ptr<ProdConso> getProdConso() const;
+    virtual std::set<ImplSpec> getAvailableImplSpecs() const;
+    bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const;
+
     const Operator &mOp;
     const std::string mBackend;
-    std::vector<Elts_t> mNbConsumedData;
-    std::vector<Elts_t> mNbProducedData;
+    std::shared_ptr<ProdConso> mProdConso;
 };
 } // namespace Aidge
 
+template<>
+struct fmt::formatter<Aidge::ImplSpec::IOSpec> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::ImplSpec::IOSpec const& ioSpec, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}, {}, {}", ioSpec.type, ioSpec.format, ioSpec.dims);
+    }
+};
+
+template<>
+struct fmt::formatter<Aidge::ImplSpec> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::ImplSpec const& implSpec, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}, {}", implSpec.inputs, implSpec.outputs);
+    }
+};
+
 #endif /* AIDGE_BACKEND_OPERATORIMPL_H_ */
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index e11a6d26fd8d2977cbee39719ce32c8bf98cb057..57c6c385d5fdcc9f2439983bd04cc8ece0d8d8f5 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -201,9 +201,7 @@ public:
     /**
      * @brief Set every element of the implementation to zero.
      */
-    virtual void zeros() {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Function not implented");
-    }
+    virtual void zeros() = 0;
 
     const std::string backend() const { return mBackend; }
 
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 526a2dd2eec5e7ed1d2736a5c0ab9c9065622ad7..234bd0ab78557294cbf4f2393bb9ca9471467779 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -136,10 +136,14 @@ static Registrar<Tensor> registrarTensorImpl_cpu_UInt32(
         {"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
         {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
-        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
         {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt64(
+        {"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt32(
+        {"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
+        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
         {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
 }  // namespace
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 2752ec484b2112d5847bd8754dbe8c3be71fd608..23221e653ba725e4463b06cfabb5483a20756701 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -16,6 +16,7 @@
 #include <fmt/format.h>
 #include <string>
 #include <tuple>
+#include <array>
 
 #include "aidge/data/half.hpp"
 #include "aidge/utils/Attributes.hpp"
@@ -47,9 +48,44 @@ enum class DataType {
     UInt8,
     UInt16,
     UInt32,
-    UInt64
+    UInt64,
+    Any
 };
 
+enum class DataFormat {
+    Default,
+    NCHW,
+    NHWC,
+    CHWN,
+    NCDHW,
+    NDHWC,
+    CDHWN,
+    Any
+};
+
+using DataFormatTranspose = std::array<size_t, 5>;
+// Permutation arrays dict to obtain DataFormat (same order as DataFormat enum)
+constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
+    // Important: in this array only, dimension index must start at 1, not 0!
+    // (0 is the default value)
+    {},
+    {1, 2, 3, 4},
+    {1, 3, 4, 2},
+    {2, 3, 4, 1},
+    {1, 2, 3, 4, 5},
+    {1, 3, 4, 5, 2},
+    {2, 3, 4, 5, 1}
+}};
+
+/**
+ * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
+ * @param src Source DataFormat
+ * @param dst Destinatin DataFormat
+ * @return DataFormatTranspose Permutation array to achieve a transposition
+ *         from src to dst DataFormat.
+*/
+DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst);
+
 class Data {
 public:
     Data(const std::string& type): mType(type) {};
@@ -83,7 +119,11 @@ const char* const EnumStrings<Aidge::DataType>::data[]
     = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary",
        "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
        "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
-       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
+       "UInt7", "UInt8", "UInt16", "UInt32", "UInt64", "Any"};
+
+template <>
+const char* const EnumStrings<Aidge::DataFormat>::data[]
+    = {"Default", "NCHW", "NHWC", "CHWN", "NCDHW", "NDHWC", "CDHWN", "Any"};
 
 template <Aidge::DataType D> struct cpptype {
     using type = void; // Placeholder
@@ -106,6 +146,7 @@ template <Aidge::DataType D> using cpptype_t = typename cpptype<D>::type;
 
 namespace Aidge {
 inline auto format_as(DataType dt) { return EnumStrings<Aidge::DataType>::data[static_cast<int>(dt)]; }
+inline auto format_as(DataFormat df) { return EnumStrings<Aidge::DataFormat>::data[static_cast<int>(df)]; }
 }
 
 #endif /* AIDGE_DATA_H_ */
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 62d10a6983e8cf5fd8e2730d3203bed97284e336..6c19b5355e406454a2e20bc8994d0ab04d53576a 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -35,6 +35,9 @@ private:
     // Desired size of the produced batches
     const std::size_t mBatchSize;
 
+    // The backend for data tensors
+    std::string mBackend;
+
     // Enable random shuffling for learning
     const bool mShuffle;
 
@@ -67,7 +70,7 @@ public:
      * @param database database from which to load the data.
      * @param batchSize number of data samples per batch.
      */
-    DataProvider(const Database& database, const std::size_t batchSize, const bool shuffle = false, const bool dropLast = false);
+    DataProvider(const Database& database, const std::size_t batchSize, const std::string& backend = "cpu", const bool shuffle = false, const bool dropLast = false);
 
 public:
     /**
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 3737eb07e38d7651ca496c99be43d387ab5c4fa0..58e893ca5d5339d93799415f076dd69d54db69ca 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -24,10 +24,7 @@
 
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/Div.hpp"
-#include "aidge/operator/Mul.hpp"
-#include "aidge/operator/Sub.hpp"
+
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -39,9 +36,10 @@ namespace Aidge {
  * Contains a pointer to an actual contiguous implementation of data.
  */
 class Tensor : public Data,
-               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
+               public Registrable<Tensor, std::tuple<std::string, DataType>, std::function<std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> {
    private:
     DataType mDataType = DataType::Float32; /** enum to specify data type. */
+    DataFormat mDataFormat = DataFormat::Default; /** enum to specify data format. */
     std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
     std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */
     std::shared_ptr<TensorImpl> mImpl = nullptr; /** Pointer to the actual data implementation. */
@@ -59,14 +57,16 @@ class Tensor : public Data,
 
     /**
      * @brief Construct a new empty Tensor object.
-     * It has the features of an undefined scalar.
+     * It is considered undefined, i.e. dims can't be forwarded from such a Tensor.
+     * @ref undefined() method for details
      */
-    Tensor(DataType dtype = DataType::Float32)
+    Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default)
         : Data(Type),
           mDataType(dtype),
+          mDataFormat(dformat),
           mDims(std::vector<DimSize_t>({})),
           mStrides({1}),
-          mSize(1)
+          mSize(0)
     {
         // ctor
     }
@@ -83,6 +83,7 @@ class Tensor : public Data,
     Tensor(T val)
         : Data(Type),
           mDataType(NativeType<VT>::type),
+          mDataFormat(DataFormat::Default),
           mDims({}),
           mStrides({1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
@@ -128,6 +129,7 @@ class Tensor : public Data,
     constexpr Tensor(Array1D<T, SIZE_0> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
+          mDataFormat(DataFormat::Default),
           mDims({SIZE_0}),
           mStrides({1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
@@ -146,6 +148,7 @@ class Tensor : public Data,
     constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
+          mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
@@ -164,6 +167,7 @@ class Tensor : public Data,
     constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
+          mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1, SIZE_2}),
           mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
@@ -183,6 +187,7 @@ class Tensor : public Data,
     constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
         : Data(Type),
           mDataType(NativeType<T>::type),
+          mDataFormat(DataFormat::Default),
           mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
           mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
@@ -266,19 +271,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator+(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
-        auto add_ = Add_Op(2);
-        add_.associateInput(0, std::make_shared<Tensor>(*this));
-        add_.associateInput(1, std::make_shared<Tensor>(other));
-        add_.setDataType(dataType());
-        add_.setBackend(mImpl->backend());
-        add_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return add_.getOutput(0)->clone();
-    }
+    Tensor operator+(const Tensor& other) const;
 
     /**
      * @brief Element-wise substraction operation for two ``Tensor``s.
@@ -289,19 +282,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator-(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
-        auto sub_ = Sub_Op();
-        sub_.associateInput(0, std::make_shared<Tensor>(*this));
-        sub_.associateInput(1, std::make_shared<Tensor>(other));
-        sub_.setDataType(dataType());
-        sub_.setBackend(mImpl->backend());
-        sub_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return sub_.getOutput(0)->clone();
-    }
+    Tensor operator-(const Tensor& other) const;
 
     /**
      * @brief Element-wise multiplication operation for two ``Tensor``s.
@@ -312,19 +293,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator*(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
-        auto mul_ = Mul_Op();
-        mul_.associateInput(0, std::make_shared<Tensor>(*this));
-        mul_.associateInput(1, std::make_shared<Tensor>(other));
-        mul_.setDataType(dataType());
-        mul_.setBackend(mImpl->backend());
-        mul_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return mul_.getOutput(0)->clone();
-    }
+    Tensor operator*(const Tensor& other) const;
 
     /**
      * @brief Element-wise division operation for two ``Tensor``s.
@@ -335,19 +304,25 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator/(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
-        auto div_ = Div_Op();
-        div_.associateInput(0, std::make_shared<Tensor>(*this));
-        div_.associateInput(1, std::make_shared<Tensor>(other));
-        div_.setDataType(dataType());
-        div_.setBackend(mImpl->backend());
-        div_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return div_.getOutput(0)->clone();
-    }
+    Tensor operator/(const Tensor& other) const;
+
+    /**
+     * @brief Element-wise sqrt operation for Tensor.
+     * @return Tensor
+     */
+    Tensor sqrt() const;
+
+    /**
+     * @brief Element-wise abs operation for Tensor.
+     * @return Tensor
+     */
+    Tensor abs() const;
+
+    /**
+     * @brief Mean operation for Tensor.
+     * @return Tensor
+     */
+    Tensor mean() const;
 
     ~Tensor() noexcept;
 
@@ -383,22 +358,7 @@ public:
      * @param copyFrom If true (default), move data from previous backend/device
      * to the new one. Previous data is lost otherwise.
      */
-    inline void setBackend(const std::string &name, DeviceIdx_t device = 0, bool copyFrom = true) {
-        if (mImpl) {
-            if (mImpl->device() != std::make_pair(name, device)) {
-                // Backend change: create new impl, copy from old to new and replace
-                // impl
-                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
-                if (copyFrom) {
-                    newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
-                }
-                setImpl(newImpl);
-            }
-        }
-        else {
-            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
-        }
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0, bool copyFrom = true);
 
     /**
      * @brief Get a list of available backends.
@@ -412,6 +372,12 @@ public:
      */
     constexpr DataType dataType() const noexcept { return mDataType; }
 
+    /**
+     * @brief Get the data format enum.
+     * @return constexpr DataFormat
+     */
+    constexpr DataFormat dataFormat() const noexcept { return mDataFormat; }
+
     /**
      * @brief Set the DataType of the Tensor and converts data
      * if the Tensor has already been initialized and copyCast is true.
@@ -430,6 +396,23 @@ public:
         mDataType = dt;
     }
 
+    /**
+     * @brief Set the DataFormat of the Tensor and transpose data, only
+     * if the Tensor has already been initialized and copyTrans is true.
+     * In this case, a transposition occurs only if both previous format and
+     * new format are different from DataFormat::Default.
+     * @param df New DataFormat
+     * @param copyTrans If true (default), when both previous format and new
+     *                  format are different from DataFormat::Default, previous
+     *                  data is copy-transposed.
+     */
+    void setDataFormat(const DataFormat df, bool copyTrans = true) {
+        if (mImpl && copyTrans && (dataFormat() != df) && df != DataFormat::Default && dataFormat() != DataFormat::Default) {
+            copyTranspose(*this, getDataFormatTranspose(dataFormat(), df));
+        }
+        mDataFormat = df;
+    }
+
     /**
      * @brief Get the Impl object
      * @return constexpr const std::shared_ptr<TensorImpl>&
@@ -478,12 +461,16 @@ public:
      */
     constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
 
+    inline DimSize_t dim(DimIdx_t idx) const { return mDims[idx]; }
+
     /**
      * @brief Get strides of the Tensor object.
      * @return constexpr const std::vector<DimSize_t>&
      */
     constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
 
+    inline DimSize_t stride(DimIdx_t idx) const { return mStrides[idx]; }
+
     /**
      * @brief Return true if Tensor is contiguous in memory.
      * @return bool
@@ -496,6 +483,18 @@ public:
      */
     constexpr std::size_t size() const noexcept { return mSize; }
 
+    /**
+     * @brief Return the current capacity of the tensor, i.e. the actual memory
+     * currently being allocated. It can be different from the size:
+     * - Capacity can be 0 if the tensor memory was not yet initialized (because
+     *   of lazy initialization, memory is allocated only when it needs to be
+     *   accessed the first time).
+     * - Capacity can be > size if the tensor was downsized but memory was not
+     *   reallocated.
+    */
+    inline std::size_t capacity() const noexcept { return mImpl->capacity(); }
+
+
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
@@ -526,14 +525,30 @@ public:
     void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
 
     /**
-     * @brief Return if the Tensor object has at leastone element.
-     * @return true
-     * @return false
+     * @brief Return whether the Tensor object as a rank of 0, i.e. dimensions == {}.
+     * For defined Tensors, this implies that the Tensor is scalar.
+     * For backward compatibility reasons, it is valid to call this predicate
+     * even on undefined Tensors, in which case it returns true.
+     * Hence before test the rank with this method, always check that the
+     * Tensor is not undefined().
+     * In particular for operations such as forwardDims(), one should always
+     * use undefined() to test whether the Tensor dimensions have been defined.
+     * In this case empty() can be used to distinguish scalars from N-D Tensors.
+     * @return true if rank is 0 or the tensor is undefined
      */
     bool empty() const { return mDims.empty(); }
-    // bool newempty() const noexcept {
-    //     return mSize == 0;
-    // }
+
+     /**
+     * @brief Returns whether the Tensor object is undefined.
+     * An undefined Tensor is equivalent to a tensor for which dimensions have not
+     * been defined yet. Hence, dimensions forwarding can't be done from undefined tensors.
+     * The only cases where a tensor is undefined is after the default constructor
+     * and before any call to resize().
+     * Also, as soon as the resize() method has been called, the Tensor is irreversibly defined.
+     * @ref empty() method for distinguishing an undefined from a scalar
+     * @return true if undefined
+     */
+    bool undefined() const { return mSize == 0; }
 
     /**
      * @brief Set each element of the tensor to zero.
@@ -575,31 +590,30 @@ public:
 
     inline void print() const { fmt::print("{}\n", toString()); }
 
-    std::shared_ptr<Tensor> grad() {
-        return mGrad;
-    }
-    void setGrad(std::shared_ptr<Tensor> newGrad) {
-        mGrad = newGrad;
-    }
-
     /**
-     * @brief Associate the gradient with a Tensor instance and set its implementation
-     * if none was previously set.
+     * @brief Get the gradient Tensor. If not initialized, set a Tensor instance
+     * and set its implementation if none was previously set.
      * @note Dimensions for the Tensor instance are copied from the original current Tensor.
      * @note If a Tensor instance was already associated, only the implementation is created
      * with values set to 0.
      * @note If Tensor instance and implementation already existed for the gradient
      * nothing is done.
      */
-    void initGrad() {
+    std::shared_ptr<Tensor> grad() {
         if (!mGrad) {
             mGrad = std::make_shared<Tensor>(mDims);
         }
         if (!mGrad->hasImpl()) {
             mGrad->setDataType(dataType());
+            mGrad->setDataFormat(dataFormat());
             mGrad->setBackend(hasImpl() ? mImpl->backend() : "cpu");
             mGrad->zeros();
         }
+        return mGrad;
+    }
+
+    void setGrad(std::shared_ptr<Tensor> newGrad) {
+        mGrad = newGrad;
     }
 
     /**
@@ -626,6 +640,7 @@ public:
      * the remaining coordinates are assumed to be 0.
      * Beware: the contiguous index will only correspond to the storage index
      * if the tensor is contiguous!
+     * Note that the coordIdx may be an empty vector.
      *
      * @param coordIdx Coordinate to an element in the tensor
      * @return DimSize_t Contiguous index
@@ -633,12 +648,13 @@ public:
     std::size_t getIdx(const std::vector<std::size_t>& coordIdx) const {
         AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
         std::size_t flatIdx = 0;
-        std::size_t i = 0;
-        for(; i < coordIdx.size() - 1; ++i) {
-            AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
-            flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
+        for(std::size_t i = 0; i < mDims.size(); ++i) {
+            auto coord = i < coordIdx.size() ? coordIdx[i]: 0;
+            AIDGE_ASSERT(coord < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
+            auto nextDimSize  = i + 1 < mDims.size() ? mDims[i + 1]: 1;
+            flatIdx = (flatIdx + coord) * nextDimSize;
         }
-        return flatIdx + coordIdx[i];
+        return flatIdx;
     }
 
     /**
@@ -650,10 +666,10 @@ public:
      * @return DimSize_t Storage index
      */
     std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const {
+        AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
         for(std::size_t i = 0; i < coordIdx.size(); ++i) {
             AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
         }
-        AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
         return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0));
     }
 
@@ -708,6 +724,13 @@ public:
     */
     void copyFrom(const Tensor& src);
 
+    /**
+     * Transpose data from another Tensor (which can be itself).
+     * @param src Source tensor to copy from.
+    */
+    void copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose);
+    void copyTranspose(const Tensor& src, const DataFormatTranspose& transpose);
+
     /**
      * Copy-cast data from a Tensor.
      * @param src Source tensor to copy-cast from.
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 627e78790020c04d50f839f01de2130ba8d8d774..efdb06c4ac6d0e6898d899cc639a88d1da301000 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -20,9 +20,18 @@
 #include <utility>
 #include <vector>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
 namespace Aidge {
 enum class DataType;
 
@@ -87,6 +96,12 @@ public:
      */
     inline void setName(const std::string &name) { mName = name; }
 
+    /**
+     * @brief Set the name of every Node based on the current GraphView name in
+     * following form: "name_type#type-id"
+     */
+    void setNodesName()const;
+
     /**
      * @brief Save the GraphView as a Mermaid graph in a .md file at the
      * specified location.
@@ -103,6 +118,13 @@ public:
     */
     bool inView(const NodePtr& nodePtr) const;
 
+    /**
+     * Check that a node is in the current GraphView.
+     * @param nodeName Name of the node to test the existence of.
+     * @return true if the GraphView contains a Node with the name ``nodeName``.
+     */
+    bool inView(const std::string& nodeName) const;
+
     inline NodePtr rootNode() const noexcept {
         return mRootNode;
     }
@@ -128,6 +150,24 @@ public:
     void setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs);
     void setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs);
 
+    /**
+     * @brief Get a topological node order for an acyclic walk of the graph
+     * Graph cycles are broken on operator back edges such that resolution on
+     * single level lattice can be done in a single pass as it is
+     * the case generally for static resolution of Tensor shapes/datatypes.
+     * When reversed is true, gets a topological order on the reversed graph
+     * which is equivalent to a post-dfs order of the graph.
+     * The returned order is deterministic given the graph node set and the
+     * graph ordered output nodes.
+     * The output nodes connectivity must cover all nodes of the graph,
+     * otherwise a runtime exception is thrown.
+     * The returned order is biased toward left-to-right child order both
+     * for topological and post-dfs order.
+     * @param reversed returns a topological order of the reversed graph
+     * @return the ordered list of nodes
+     */
+    std::vector<Aidge::NodePtr> getOrderedNodes(bool reversed = false) const;
+
     /**
      * @brief Get inputs of the current GraphView with their associated id.
      * The rank of the nodes are their rank in the vector.
@@ -211,7 +251,7 @@ public:
      * GraphView object's Nodes, by calling Node::forwardDims().
      * This function verifies the following conditions:
      * - Every node will forwardDims() regardless of if dims were previously forwarded or not;
-     * - forwadDims() calls are made in node dependencies order, because if dims have changed 
+     * - forwadDims() calls are made in node dependencies order, because if dims have changed
      *   at any point in the graph, it must de propagated correctly to all succeeding nodes;
      * - It handles cyclic dependencies correctly (currently only induced by the Memorize_Op).
      */
@@ -219,8 +259,10 @@ public:
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
-    /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
+    /** @brief Set the same data type for each Operator of the GraphView object's Nodes. */
     void setDataType(const DataType& datatype) const;
+    /** @brief Set the same data format for each Operator of the GraphView object's Nodes. */
+    void setDataFormat(const DataFormat& dataformat) const;
 
 ///////////////////////////////////////////////////////
 //        TOPOLOGY
@@ -257,7 +299,7 @@ public:
      * @brief Get the Nodes pointed to by the GraphView object.
      * @return std::set<NodePtr>
      */
-    inline const std::set<NodePtr>& getNodes() const { return mNodes; }
+    inline const std::set<NodePtr>& getNodes() const noexcept { return mNodes; }
 
     /**
      * @brief Get the operator with the corresponding name if it is in the
@@ -341,7 +383,8 @@ public:
      * @param other_graph GraphView containing the Nodes to include.
      * @return true if graph ordering is unique (meaning inputs/outputs order is well defined).
      */
-    bool add(std::shared_ptr<GraphView> otherGraph);
+    bool add(std::shared_ptr<GraphView> otherGraph,
+             bool includeLearnableParam = true);
 
     /**
      * @brief Include a Node in the current GraphView and link it to another
@@ -381,10 +424,17 @@ public:
         addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
     }
 
-    inline void updateNodeName(const std::string& oldName, const std::string& newName){
-        AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name());
-        mNodeRegistry[newName] = mNodeRegistry[oldName];
-        mNodeRegistry.erase(oldName);
+    inline void updateNodeName(NodePtr nodeToRename, const std::string& newName){
+        const std::string& oldName = nodeToRename->name();
+        AIDGE_ASSERT(mNodeRegistry.find(newName) != mNodeRegistry.end(), "Name {} is already used in graph {}.", newName, name());
+
+        if (nodeToRename->name() != ""){ // Case node already had a name
+            AIDGE_ASSERT(mNodeRegistry.find(oldName) != mNodeRegistry.end(), "No node named {} in graph {}, the graph may be corrupted !", oldName, name());
+            mNodeRegistry[newName] = mNodeRegistry[oldName];
+            mNodeRegistry.erase(oldName);
+        }else{ // Case node did not had a name
+            mNodeRegistry[newName] = nodeToRename;
+        }
     }
 
     /**
@@ -450,8 +500,8 @@ public:
      * @return true replacement has been performed
      * @return false no replacement has been performed
      */
-    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
     static bool replace(const std::set<NodePtr>& oldNodes, const std::set<NodePtr>& newNodes);
+    static bool replace(const std::shared_ptr<GraphView>& oldG, const std::shared_ptr<GraphView>& newG);
 
     /**
      * @brief Clone the GraphView with shared Operators. It is a new GraphView, with cloned Nodes, but the new Nodes refer to the same Operators as the original ones.
@@ -499,6 +549,11 @@ public:
      */
     void updateInputsOutputs();
 
+#ifdef PYBIND
+    std::string repr() const {
+        return fmt::format("GraphView(name='{}', Nodes: {} (inputs: {}, outputs: {}))", name(), mNodes.size(), mInputNodes.size(), mOutputNodes.size());
+    }
+#endif
 private:
 ///////////////////////////////////////////////////////
 //        TENSOR MANAGEMENT
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..951aa6b29d73d9055cf9f13c8ddc6313cb506879
--- /dev/null
+++ b/include/aidge/graph/Matching.hpp
@@ -0,0 +1,234 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_GRAPH_MATCHING_H_
+#define AIDGE_CORE_GRAPH_MATCHING_H_
+
+#include <map>
+#include <memory>
+#include <set>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+
+namespace Aidge {
+/**
+ * A simple experimental graph matching class which works by direct, single pass
+ * parse and match, without constructing any intermediate representation.
+ * Due to its single pass nature, it has some constrains on how the queries must
+ * be formulated.
+*/
+class SinglePassGraphMatching {
+public:
+    struct Context {
+        Context();
+        Context(const Context&); // explicitly define Context copy constructor
+                                 // to avoid automatic inlining
+        Context& operator=(const Context&);
+        ~Context() noexcept;
+
+        std::string query;
+        bool firstSequence = true;
+        bool firstNode = true;
+        bool inSequence = false;
+        bool lookForChild = true;
+        bool singleOutput = true;
+        IOIndex_t edgeLeftIdx = 0;
+        IOIndex_t edgeRightIdx = 0;
+        NodePtr startNode;
+
+        // For check & debug purpose:
+        size_t depth = 0;
+        std::set<std::string> anchors;
+    };
+
+    struct MatchingResult {
+        // Mutable is required to allow modifying MatchingResult members with a std::set
+        // iterator. Any change should not modify the set ordering.
+        // We use graph->rootNode() as the std::set key, which is garanteed
+        // to never change after insertion!
+        mutable std::shared_ptr<GraphView> graph;
+        mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
+        mutable NodePtr startNode;
+
+        MatchingResult();
+
+        MatchingResult(const MatchingResult& other);
+        MatchingResult& operator=(const MatchingResult& other);
+        ~MatchingResult() noexcept;
+    };
+
+    SinglePassGraphMatching(std::shared_ptr<GraphView> graph) : mGraph(graph) {}
+    SinglePassGraphMatching(const SinglePassGraphMatching& other);
+    SinglePassGraphMatching& operator=(const SinglePassGraphMatching& other);
+    ~SinglePassGraphMatching() noexcept;
+
+    /**
+     * Matches a query by direct, single pass parse and match.
+     * The returned matches are non-ordered and therefore stored in a std::set.
+     *
+     * Some rules:
+     * - The first node of the first sequence is the root node and cannot be optional
+     *   WRONG: Conv?->ReLU (will throw an error)
+     *   GOOD: ReLU<-Conv?
+     *
+     * - The first node of any further sequence must be an existing anchor
+     *   (the anchor cannot be in the middle of the sequence)
+     *   WRONG: Conv->ReLU;Pad->Conv (will throw an error)
+     *          Pad->Conv;Conv->ReLU (will throw an error)
+     *   GOOD: Conv#->ReLU;Conv#<-Pad
+     *         Pad->Conv#;Conv#->ReLU
+     *
+     * - Any node already matched cannot be matched again (except for anchors)
+     *
+     * - By default, an edge matches the first output to the first input.
+     *   EXAMPLE: ReLU->Conv is equivalent to ReLU-0-0>Conv
+     *            To match the second input, use ReLU-0-1>Conv (or ReLU-1>Conv)
+     *            To match the second output, use ReLU-1-0>Conv
+     *            To match any input and/or any output, use *, like ReLU-1-*>Conv
+     *            or ReLU-*-0>Conv or ReLU-*-*>Conv
+     *            The same is true for the "<-" edge syntax.
+     *
+     * - When several nodes could match for a given node query, the first one
+     *   not already in the matching result is matched, following the
+     *   childs/parents ordered node list
+     *   EXAMPLE: Producer in "Conv<*-Producer" will match the weights Producer first
+     *   EXAMPLE: Producer in "Conv#<1-.;Conv#<*-Producer" will match the bias Producer
+     *            because the weights Producer has already been matched
+     *
+     * - One always matches a sub-graph: additional connections can exist anywhere
+     *   in the matched sub-graph
+     *   EXAMPLE: "Add<*-." will match the Add operator and its first input, any
+     *            additional inputs will not be included in the result
+     *   EXAMPLE: "(Add#<*-.)+" will match the Add operator and all of its inputs
+     *            Note that the anchor is required since we intend to match several
+     *            inputs of the same node!
+     *
+     * - In Aidge, a node output can be connected to multiple other nodes. In
+     *   your query, you can allow it or not, with the "~" or "-" modifier.
+     *   EXAMPLE: "Conv->ReLU" will match the Conv that are **only** connected
+     *            to a ReLU node at their output #0.
+     *            "Conv~>ReLU" will match all the Conv connected to a ReLU even
+     *            if they are also connected to other nodes at the same output #0.
+     *   When implementing a match & replace recipe, beware that you don't break
+     *   branches in the middle of your matching result if you use "~"!
+     *
+     * - The matching results can be overlapping, meaning that some nodes may be
+     *   found in multiple results. Some results may be subsets of other results.
+     *   EXAMPLE: assume graph Conv#1->ReLU#1->Conv#2->ReLU#2
+     *            "Conv->ReLU?->Conv?->ReLU?" will return both
+     *            Conv#1->ReLU#1->Conv#2->ReLU#2 and Conv#2->ReLU#2
+     *   To avoid this behavior, set the disjoint argument to true. In this case,
+     *   only Conv#1->ReLU#1->Conv#2->ReLU#2 will be kept in the example above.
+     *
+     * - Whitespaces are allowed anywhere in the query
+     *
+     * QUERY = SEQ | NODE_OR_BLOCK (';' (SEQ | NODE_OR_BLOCK))*
+     *
+     * @param query The query to search.
+     * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
+     * @return std::set<MatchingResult> Set of matches, each stored in a MatchingResult struct.
+    */
+    std::set<MatchingResult> match(const std::string& query, bool disjoint = false);
+
+    /**
+     * @brief Same as match() but with a mandatory start node.
+     * 
+     * @param startNode Mandatory start node for the query.
+     * @param query The query to search.
+     * @return MatchingResult MatchingResult struct, with empty graph if query
+     * is not found, or the graph corresponding to the query.
+     */
+    MatchingResult matchFrom(NodePtr startNode, const std::string& query);
+
+    /**
+     * Filter to keep only the longuest disjoint (non-overlapping) matches.
+    */
+    std::set<MatchingResult> filterLonguestDisjoint(const std::set<MatchingResult>& matches);
+
+    inline void addNodeLambda(const std::string& name, bool(func)(const NodePtr&)) {
+        mLambda[name] = func;
+    }
+
+private:
+    std::shared_ptr<GraphView> mGraph;
+    std::map<std::string, bool(*)(const NodePtr&)> mLambda;
+
+    /**
+     * QUANTIFIER = '?' | '*' | '+' | ('{' [0-9]+ '}')
+     * NODE_OR_BLOCK = (BLOCK | NODE) QUANTIFIER?
+    */
+    bool matchNodeOrBlock(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * BLOCK = '(' SEQ | PAR | ALT | BLOCK | NODE ')'
+    */
+    bool matchBlock(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * SEQ = NODE_OR_BLOCK (EDGE NODE_OR_BLOCK)+
+    */
+    bool matchSequence(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * PAR = NODE_OR_BLOCK ('&' NODE_OR_BLOCK)+
+    */
+    bool matchParallel(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * ALT = NODE_OR_BLOCK ('|' NODE_OR_BLOCK)+
+    */
+    bool matchAlternative(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * IO_INDEX_ANY = '*'
+     * IO_INDEX = IO_INDEX_ANY | [0-9]+
+     * CHILD_EDGE = ('-' | '~') (IO_INDEX '-')? IO_INDEX? '>'
+     * PARENT_EDGE = '<' (IO_INDEX '-')? IO_INDEX? ('-' | '~')
+     * EDGE = CHILD_EDGE | PARENT_EDGE
+    */
+    bool matchEdge(Context& ctx, std::set<MatchingResult>& matches);
+
+    /**
+     * TYPE = [A-Za-z0-9_]+
+     * ANCHOR = [A-Za-z0-9_]+
+     * LAMBDA = [A-Za-z0-9_]+
+     * NODE = ((TYPE | '.') ('#' ANCHOR)? ('[' LAMBDA ']')?) | '$'
+    */
+    bool matchNode(Context& ctx, std::set<MatchingResult>& matches);
+
+    inline void removeWhiteSpace(std::string& str) {
+        str.erase(str.begin(),
+            std::find_if(str.begin(),
+                        str.end(),
+                        [](char c) { return !std::isspace(c); }));
+    }
+
+    struct CompareMatchingResultSize {
+        bool operator()(const MatchingResult& lhs, const MatchingResult& rhs) const {
+            // Some matches size could be the same
+            if (lhs.graph->getNodes().size() == rhs.graph->getNodes().size()) {
+                // In this case, use rootNode which is garanteed to be different!
+                return lhs.graph->rootNode() < rhs.graph->rootNode();
+            }
+
+            return lhs.graph->getNodes().size() > rhs.graph->getNodes().size();
+        }
+    };
+};
+
+inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
+    // Matches rootNode are garanteed to be different!
+    return lhs.graph->rootNode() < rhs.graph->rootNode();
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CORE_GRAPH_MATCHING_H_ */
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 2a0a4a3b703670c8ace05e03fc5c797fe861a423..32932fa6f598737644f74d4e2ce5da89557b5d3d 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -17,12 +17,22 @@
 #include <set>
 #include <string>
 #include <vector>
+#include <deque>
 #include <utility>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
 #include "aidge/graph/Connector.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/utils/Types.h"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
+
 namespace Aidge {
 
 using NodePtr = std::shared_ptr<Node>;
@@ -54,6 +64,9 @@ private:
   std::vector<std::vector<IOIndex_t>> mIdInChildren; /** List of input index for each Node linked to each output of the Node. */
   std::vector<IOIndex_t> mIdOutParents; /** index of the output linked to each input of the Node. Default: gk_IODefaultIndex. */
 
+  std::deque<std::function<bool()>> mForward;
+  std::deque<std::function<bool()>> mBackward;
+
 public:
   Node() = delete;
 
@@ -64,12 +77,28 @@ public:
    */
   Node(std::shared_ptr<Operator> op, const std::string& name = "");
 
-  virtual ~Node() = default;
+  virtual ~Node();
 
   friend bool operator==(const Node &lhs, const Node &rhs) {
     return lhs.shared_from_this() == rhs.shared_from_this();
   }
 
+  void addBeforeForward(std::function<bool()> func) {
+    mForward.push_front(func);
+  }
+
+  void addAfterForward(std::function<bool()> func) {
+    mForward.push_back(func);
+  }
+
+  void addBeforeBackward(std::function<bool()> func) {
+    mBackward.push_front(func);
+  }
+
+  void addAfterBackward(std::function<bool()> func) {
+    mBackward.push_back(func);
+  }
+
 public:
   ///////////////////////////////////////////////////////
   //        FUNCTIONAL DESCRIPTION
@@ -100,6 +129,17 @@ public:
    */
   void setName(const std::string &name);
 
+  /**
+   * @brief Given the parameter name generate a new name which is unique
+   * in all the GraphView which contains this node.
+   * To generate the new name the method is called recursively and append
+   * the caracter ``_``.
+   * If no duplicate return name, this is the exit condition.
+   * @param name Base name to make unique.
+   * @return A unique name in all the GraphView which contains this one.
+  */
+  std::string createUniqueName(std::string name);
+
   /**
    * @brief Type of the node.
    * @return std::string
@@ -173,9 +213,14 @@ public:
    */
   inline IOIndex_t getFirstFreeDataInput() const {
     IOIndex_t i = 0;
-    for (; (i < nbData()) && (input(i).second != gk_IODefaultIndex); ++i) {}
-    // assert((i<nbData()) && "No free data input for Node");
-    return (i < nbData()) ? i : gk_IODefaultIndex;
+    for (; i < nbInputs(); ++i) {
+      if ((inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData)
+        && input(i).second == gk_IODefaultIndex)
+      {
+        break;
+      }
+    }
+    return (i < nbInputs()) ? i : gk_IODefaultIndex;
   }
 
 
@@ -207,13 +252,22 @@ public:
   inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
 
   /**
-   * @brief Number of input specifically for data.
+   * @brief Category of a specific input (Data or Param, optional or not).
    * Data inputs exclude inputs expecting parameters (weights or bias).
-   * @details [data, data, weight, bias] => 2
-   * @return IOIndex_t
+   * @return InputCategory
    */
-  inline IOIndex_t nbData() const noexcept {
-    return getOperator()->nbData();
+  inline InputCategory inputCategory(IOIndex_t idx) const {
+    return getOperator()->inputCategory(idx);
+  }
+
+  /**
+   * @brief Returns whether the given node parent index is a back edge
+   * A back edge is defined by the operator and node parent index
+   * correspond to operator input index.
+   * @return true if the operator defines it as a back edge
+   */
+  inline bool parentIsBackEdge(IOIndex_t idx) const {
+    return getOperator()->isBackEdge(idx);
   }
 
   /**
@@ -241,7 +295,9 @@ public:
   inline std::set<std::shared_ptr<GraphView>> views() const noexcept {
     std::set<std::shared_ptr<GraphView>> res;
     for (const auto &v : mViews) {
-      res.insert(v.lock());
+      if (auto p = v.lock()) {
+        res.insert(p);
+      }
     }
     return res;
   }
@@ -406,6 +462,27 @@ public:
 
   std::set<NodePtr> getNodeDelta(int delta,std::set<Aidge::NodePtr> nodeSee);
 
+#ifdef PYBIND
+    std::string repr() const {
+        std::string nodeString{fmt::format("Node(name='{}', optype='{}'", name(), type())};
+        if (mParents.size() > 0) {
+            std::vector<std::int8_t> connectedParents(mParents.size(), 0);
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if (mParents[i])
+                    connectedParents[i] = std::int8_t(1);
+            }
+            nodeString = fmt::format("{}, parents: {}", nodeString, connectedParents);
+        }
+        if (mChildren.size() > 0) {
+            std::vector<std::vector<std::int8_t>> connectedChildren{};
+            for (std::size_t i = 0; i < nbOutputs(); ++i) {
+                connectedChildren.push_back(std::vector<std::int8_t>(mChildren[i].size(), std::int8_t(1)));
+            }
+            nodeString = fmt::format("{}, children: {}", nodeString, connectedChildren);
+        }
+        return fmt::format("{})", nodeString);
+    }
+#endif
 
 private:
   ///////////////////////////////////////////////////////
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index 9d1ba6fd1e1df594634bfd93a24663ff178b7ee6..70a431b5621270a6b6083a436aba145ce9dafbf3 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -12,8 +12,10 @@
 #ifndef AIDGE_CORE_GRAPH_OPARGS_H_
 #define AIDGE_CORE_GRAPH_OPARGS_H_
 
-#include <memory>
 #include <cassert>
+#include <memory>
+#include <string>
+#include <vector>
 
 namespace Aidge {
 class Node;
@@ -34,6 +36,10 @@ public:
     OpArgs(const std::shared_ptr<Node>& node_)
      : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
 
+    OpArgs(const OpArgs&);
+    OpArgs& operator=(const OpArgs&);
+    ~OpArgs() noexcept;
+
     inline std::shared_ptr<Node> node() const noexcept {
         return mNode;
     }
@@ -52,20 +58,22 @@ public:
  * one in a sequential way. Nodes linked with the Sequential graph
  * generation instructions must have a single output.
  * Sequential(A, B, C) returns A-->B-->C.
- * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> Pointer to the generated view.
  */
-std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs, std::string name = "");
 
 /////////////////////////////
 // Parallel
 
 /**
  * @brief Creates a GraphView with provided Nodes without linking them.
- * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs, std::string name = "");
 
 /////////////////////////////
 // Residual
@@ -77,9 +85,10 @@ std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
  * generation instructions must have a single output.
  * Recursive(A, B, C) returns A-->B-->C , A-->C.
  * @param inputs List of Node and GraphView to link sequentially.
+ * @param[in] name : name of the graphview to return
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs);
+std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs, std::string name = "");
 
 }
 
diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp
index b62a42fcfeb258e5c659eaeb6681190482f37aa4..573447cf934b196e8b0c32d7a58e1977f5aa5f9a 100644
--- a/include/aidge/graphRegex/GraphRegex.hpp
+++ b/include/aidge/graphRegex/GraphRegex.hpp
@@ -12,13 +12,12 @@
 namespace Aidge{
 
 /**
- * type for recipes function use in query and resolve  
-*/
+ * @brief type for recipes function use in query and resolve  
+ */
 using RecipesFunctionType = std::function<void(std::shared_ptr<MatchSolution>)>;
 
 /**
- * @brief class which is the hight level interface for graph matching, used to simplify match definition  
- * 
+ * @brief high level interface for graph matching, used to simplify match definition 
  */
 class GraphRegex{
 
diff --git a/include/aidge/hook/Hook.hpp b/include/aidge/hook/Hook.hpp
index 5e00db5d68f11aadd4f3b6eb8174ba61b33e4a49..5edf231d51f913f58351b4817e145b5f48953ddd 100644
--- a/include/aidge/hook/Hook.hpp
+++ b/include/aidge/hook/Hook.hpp
@@ -24,8 +24,8 @@
 namespace Aidge {
 
 class Operator;
-class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)> {
-//class Hook : public Registrable<Hook, std::tuple<std::string>, std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>{
+class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>> {
+//class Hook : public Registrable<Hook, std::tuple<std::string>, std::function<std::shared_ptr<Hook>(const std::shared_ptr<Operator>)>>{
 protected:
     const std::shared_ptr<Operator> mOperator;
 
diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f1dc37003fbff9463d041030818ec0534c5ac1fd
--- /dev/null
+++ b/include/aidge/operator/Abs.hpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ABS_H_
+#define AIDGE_CORE_OPERATOR_ABS_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Abs_Op : public OperatorTensor,
+    public Registrable<Abs_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> {
+public:
+    static const std::string Type;
+
+    Abs_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Abs_Op(const Abs_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(Abs_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Abs_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Abs_Op>(*this);
+    }
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Abs(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ABS_H_ */
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 4ac14bdaecd16e90586d14699f3b6f1bd6d88cab..daf50771703d6608dbbe90364aac8667aefbdd1d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -24,17 +24,11 @@
 namespace Aidge {
 
 class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> {
+    public Registrable<Add_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>> {
 public:
     static const std::string Type;
 
-    Add_Op(const IOIndex_t nbIn)
-        : OperatorTensor(Type, nbIn, 0, 1)
-    {
-        if (nbIn == 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-        }
-    }
+    Add_Op(const IOIndex_t nbIn);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -46,9 +40,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Add_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Add_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -63,6 +55,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input_0", "data_input_n"};
@@ -72,9 +65,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
-}
+std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e4f04e2fa3ec2a4a01f023b9ab203e6b2ab36e76
--- /dev/null
+++ b/include/aidge/operator/And.hpp
@@ -0,0 +1,82 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_AND_H_
+#define AIDGE_CORE_OPERATOR_AND_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+/**
+ * @brief Tensor element-wise logical and operation.
+ */
+class And_Op : public OperatorTensor,
+    public Registrable<And_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const And_Op&)>> {
+public:
+    static const std::string Type;
+
+    /**
+     * @brief Compute element-wise and operation on two given inputs.
+     * @details supports broadcasting of both operands.
+     */
+    And_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    And_Op(const And_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(And_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::And_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<And_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input_1", "data_input_2"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> And(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<And_Op>(), name);
+}
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_OPERATOR_AND_H_ */
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..13f63ce98c526f0c57a363ada4e7f50ccdbfb83b
--- /dev/null
+++ b/include/aidge/operator/ArgMax.hpp
@@ -0,0 +1,136 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ARGMAX_H_
+#define AIDGE_CORE_OPERATOR_ARGMAX_H_
+
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ArgMaxAttr { Axis, KeepDims, SelectLastIndex };
+
+/**
+ * @brief This operator has as purpose to reduce given dimension by replacing with the Max value's index.
+*/
+class ArgMax_Op : public OperatorTensor,
+                public Registrable<ArgMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ArgMax_Op &)>> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ArgMaxAttr,
+                                        std::int32_t,
+                                        bool,
+                                        bool>;
+    template <ArgMaxAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ArgMax_Op() = delete;
+
+    /**
+     * @brief constructor for ArgMax op
+     * @param[in] axis around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and 
+     * if false we remove the dimension completely
+     * @param[in] select_last_index in case we have many maximum, if true the last index is returned 
+     * if false the first index is returned. 
+     */
+    ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ArgMaxAttr::Axis>(axis),
+            attr<ArgMaxAttr::KeepDims>(keep_dims),
+            attr<ArgMaxAttr::SelectLastIndex>(select_last_index)))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ArgMax_Op(const ArgMax_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ArgMax_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ArgMax_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::Axis>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::KeepDims>(); }
+    inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
+
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Compute the max value of a Tensor over the provided axes. Dimensions
+ * may be reduced by erasing the provided axis or not.
+ *
+ * @param axis Dimension over which data max should be computed.
+ * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param select_last_index Whether to select the last index of max elements in case there are many maximums.
+ * By default the first max element index is 
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
+inline std::shared_ptr<Node> ArgMax(std::int32_t axis=0,
+                                    bool keep_dims=true,
+                                    bool select_last_index=false,
+                                    const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
+
+}
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 9a9fced142ebc345c095c1eeca6b9a6c4270cf36..54b40907e8b4127b7b96b95b229440d782149c3d 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -28,27 +28,31 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
-                public StaticAttributes<AvgPoolingAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>> {
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
 
-    AvgPooling_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
     template <AvgPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    AvgPooling_Op() = delete;
+
 
     constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+                        attr<AvgPoolingAttr::StrideDims>(stride_dims),
+                        attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -60,9 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::AvgPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<AvgPooling_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
@@ -75,6 +77,11 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
@@ -85,12 +92,9 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
-                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
-}
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -101,8 +105,6 @@ inline std::shared_ptr<Node> AvgPooling(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
     return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
-
-
 }  // namespace Aidge
 
 extern template class Aidge::AvgPooling_Op<1>;
@@ -112,8 +114,10 @@ extern template class Aidge::AvgPooling_Op<4>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
-                                                          "KernelDims"};
+const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+    "stride_dims",
+    "kernel_dims"
+};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index aa53f8c43f0be2a0e094946d66fd263bc19e39f5..cdac7935f6ded752201c04b2dda6cfb9e06438ec 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -28,21 +28,31 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
-                public StaticAttributes<BatchNormAttr, float, float> {
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
-    BatchNorm_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
     template <BatchNormAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    BatchNorm_Op() = delete;
 
     constexpr BatchNorm_Op(float epsilon, float momentum)
-        : OperatorTensor(Type, 1, 4, 1),
-          Attributes_(attr<BatchNormAttr::Epsilon>(epsilon),
-                           attr<BatchNormAttr::Momentum>(momentum)) {}
+        : OperatorTensor(Type,
+                            {InputCategory::Data,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param,
+                                InputCategory::Param},
+                            1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<BatchNormAttr::Epsilon>(epsilon),
+            attr<BatchNormAttr::Momentum>(momentum))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -54,9 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::BatchNorm_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<BatchNorm_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -71,6 +79,11 @@ public:
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
+    inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
@@ -89,15 +102,15 @@ std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
                                        const float epsilon = 1.0e-5F,
                                        const float momentum = 0.1F,
                                        const std::string& name = "");
+}  // namespace Aidge
 
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
 extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
-}  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
+const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum" };
 }
 
 #endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..bd14bea76937fbfc42cbafa9636df9b55832fa9d
--- /dev/null
+++ b/include/aidge/operator/BitShift.hpp
@@ -0,0 +1,125 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_BITSHIFT_H_
+#define AIDGE_CORE_OPERATOR_BITSHIFT_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
+
+
+namespace Aidge {
+    enum class BitShiftAttr { BitShiftdirection };
+
+/**
+ * @brief Tensor BitShift Operator
+ */
+class BitShift_Op : public OperatorTensor,
+    public Registrable<BitShift_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const BitShift_Op&)>> {
+public:
+    enum BitShiftDirection {left,right};
+    static const std::string Type;
+private:     
+
+    using Attributes_ = StaticAttributes<BitShiftAttr,BitShiftDirection>;
+    template <BitShiftAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+public:
+
+    BitShift_Op(BitShiftDirection direction) 
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+                attr<BitShiftAttr::BitShiftdirection>(direction))) 
+                {}
+
+    /**¨PPPP
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    BitShift_Op(const BitShift_Op& op)
+        : OperatorTensor(op),mAttributes(op.mAttributes)
+    {
+        if (op.mImpl) {
+            SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::BitShift_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<BitShift_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+   
+    /**
+     * @brief Setter to specify which backend to use
+     * 
+     * @return Boolean
+     */
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+    
+    /**
+     * @brief Getter to retrieve Attributes of the bitshift class
+     * 
+     * @return Attributes
+     */
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    /**
+     * @brief Retrieve the direction in which the shift should be applied (right or left)
+     * 
+     * @return BitShiftDirection 
+     */
+    inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<BitShiftAttr::BitShiftdirection>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"InputTensor", "ShiftAmount"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"OutputTensor"};
+    }
+
+
+};
+/**
+ * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in 
+    the direction specified by "direction" 
+ * @param[in] direction Direction of the bitshift (Left or Right)
+ * @param[in] name Name of the node
+ * @return std::shared_ptr<Node> 
+ */
+    inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") {
+        return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name);
+    }
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::BitShiftAttr>::data[] = {"BitShiftdirection"};
+
+}
+
+#endif /* AIDGE_CORE_OPERATOR_BITSHIFT_H_ */
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 6efbc0a214dde3ca969226f734b5ee903fe5ab50..3fa1bb22a0dd9def11e0621b67cbd8395b5344fa 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -19,8 +19,8 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -30,21 +30,31 @@ public:
     void forward() override;
 };
 
+enum class CastAttr { TargetType };
+
 class Cast_Op : public OperatorTensor,
-    public Registrable<Cast_Op, std::string, std::unique_ptr<OperatorImpl>(const Cast_Op&)> {
+    public Registrable<Cast_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Cast_Op&)>> {
 public:
     static const std::string Type;
 
-    Cast_Op() : OperatorTensor(Type, 1, 0, 1) {
-        mImpl = std::make_shared<Cast_OpImpl>(*this);
-    }
+private:
+    using Attributes_ = StaticAttributes<CastAttr, DataType>;
+    template <CastAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Cast_Op() = delete;
+
+    Cast_Op(const DataType targetType);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Cast_Op(const Cast_Op& op)
-        : OperatorTensor(op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Cast_Op, *this, op.backend());
@@ -63,6 +73,10 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -72,9 +86,14 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Cast(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Cast_Op>(), name);
-}
+
+std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "");
+
+} // namespace Aidge
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index a9a4c9253f3af9f9cd82390256ec70d066017cc5..98835dd2a4b02e51b50636ee8606382a50ba7b89 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -28,59 +28,49 @@
 namespace Aidge {
 class Concat_OpImpl : public OperatorImpl {
 public:
-    Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Concat_OpImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
     void forward() override;
 };
 
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
-    public StaticAttributes<ConcatAttr, DimSize_t> {
+    public Registrable<Concat_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Concat_Op&)>> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>;
+private:
+    using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>;
     template <ConcatAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Concat_Op(const IOIndex_t nbIn, const DimSize_t axis)
-        : OperatorTensor(Type, nbIn, 0, 1),
-          Attributes_(attr<ConcatAttr::Axis>(axis))
-    {
-        if (nbIn == 0) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
-        }
-        mImpl = std::make_shared<Concat_OpImpl>(*this);
-    }
+public:
+    Concat_Op() = delete;
+
+    Concat_Op(const IOIndex_t nbIn, const std::int32_t axis);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Concat_Op(const Concat_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Concat_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Concat_OpImpl>(*this);
-        }
-    }
+    Concat_Op(const Concat_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Concat_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Concat_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
@@ -90,15 +80,13 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const DimIdx_t axis = 0, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
-}
+std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "");
 }
 
 namespace {
     template <>
     const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
-        "Axis"
+        "axis"
     };
 }
 
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..18e626544606fd150b2843d2367aa8858669c2ba
--- /dev/null
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -0,0 +1,136 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
+#define AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
+
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <limits>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+enum class ConstantOfShapeAttr {
+  /**
+   * @brief value to fill the output tensor with.
+   * Its a scalar tensor holding a value with a fixed datatype
+   */
+  Value,
+};
+
+/**
+ * @brief This operator's purpose is to generate a tensor of shape given via
+ * input and filled with a given value set via attribute.
+ */
+class ConstantOfShape_Op
+    : public OperatorTensor,
+      public Registrable<ConstantOfShape_Op, std::string,
+                         std::function<std::shared_ptr<OperatorImpl>(
+                             const ConstantOfShape_Op &)>> {
+
+public:
+  // name of the type of the operation
+  static const std::string Type;
+
+private:
+  using Attributes_ = StaticAttributes<ConstantOfShapeAttr, Tensor>;
+  template <ConstantOfShapeAttr e>
+  using attr = typename Attributes_::template attr<e>;
+  const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+  /**
+   * @brief constructor for ConstantOfShape_op
+   * @param[in] value : a scalar tensor which holds the value that will 
+   * fill the output tensor
+   */
+  ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
+      : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+            attr<ConstantOfShapeAttr::Value>(value))) {}
+
+  /**
+   * @brief Copy-constructor. Copy the operator attributes and its output
+   * tensor(s), but not its input tensors (the new operator has no input
+   * associated).
+   * @param op Operator to copy.
+   */
+  ConstantOfShape_Op(const ConstantOfShape_Op &op)
+      : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (op.mImpl) {
+      SET_IMPL_MACRO(ConstantOfShape_Op, *this, op.backend());
+    } else {
+      mImpl = nullptr;
+    }
+  }
+
+  /**
+   * @brief Clone the operator using its copy-constructor.
+   * @see Operator::MatMul_Op
+   */
+  std::shared_ptr<Operator> clone() const override final {
+    return std::make_shared<ConstantOfShape_Op>(*this);
+  }
+
+  /**
+   * @brief Compute dimensions for the output Tensor
+   * @param allowDataDependency specify if the output shape of this operator
+   * depends on its inputs.
+   */
+  bool forwardDims(bool allowDataDependency = false) override final;
+
+  void setBackend(const std::string &name,
+                  DeviceIdx_t device = 0) override final;
+  std::set<std::string> getAvailableBackends() const override;
+
+  inline std::shared_ptr<Attributes> attributes() const override {
+    return mAttributes;
+  }
+  inline Tensor &value() const noexcept {
+    return mAttributes->template getAttr<ConstantOfShapeAttr::Value>();
+  }
+
+  static const std::vector<std::string> getInputsName() { return {"input"}; }
+  static const std::vector<std::string> getOutputsName() {
+    return {"constant_of_shape"};
+  }
+};
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
+                                             const std::string &name = "") {
+  return std::make_shared<Node>(std::make_shared<ConstantOfShape_Op>(value),
+                                name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ConstantOfShapeAttr>::data[] = {"Value"};
+}
+
+#endif // AIDGE_CORE_OPERATOR_CONSTANT_OF_SHAPE_H_
+
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c30282f3438889e233f3d9ed22ab7c7e795b2951..cd1a57dd9ac52d2f5cdff3b5ed54c6dd2aeeed34 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,41 +30,36 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias };
+enum class ConvAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
-                public StaticAttributes<ConvAttr,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        bool> {
+                public Registrable<Conv_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>> {
 
 public:
     static const std::string Type;
 
-    Conv_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ConvAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
-                                        std::array<DimSize_t, DIM>,
-                                        bool>;
+                                        std::array<DimSize_t, DIM>>;
     template <ConvAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Conv_Op() = delete;
 
     constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims,
                       const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
-                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                      bool noBias = false)
-        : OperatorTensor(Type, 1, 2, 1),
-          Attributes_(attr<ConvAttr::StrideDims>(strideDims),
-                      attr<ConvAttr::DilationDims>(dilationDims),
-                    //   attr<ConvAttr::InChannels>(inChannels),
-                    //   attr<ConvAttr::OutChannels>(outChannels),
-                      attr<ConvAttr::KernelDims>(kernelDims),
-                      attr<ConvAttr::NoBias>(noBias)) {}
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConvAttr::StrideDims>(strideDims),
+            attr<ConvAttr::DilationDims>(dilationDims),
+            attr<ConvAttr::KernelDims>(kernelDims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -102,6 +97,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     DimSize_t inChannels() const {
         if (!getInput(1)) {
@@ -117,6 +113,12 @@ public:
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
+
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -139,21 +141,13 @@ public:
  * @return std::shared_ptr<Node> A Node containing the operator.
  */
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t inChannels,
+std::shared_ptr<Node> Conv(DimSize_t inChannels,
                                   DimSize_t outChannels,
                                   const std::array<DimSize_t, DIM> &kernelDims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                  bool noBias = false) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
-    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
-    addProducer(conv, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
-
-    return conv;
-}
+                                  bool noBias = false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -170,15 +164,15 @@ inline std::shared_ptr<Node> Conv(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Conv_Op<1>;
 extern template class Aidge::Conv_Op<2>;
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
-    "StrideDims",
-    "DilationDims",
-    "KernelDims",
-    "NoBias"
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"
 };
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7091421720aaf4291198823a6d7dcd732a8d9f99..f0a55a299094add58bd3938e9cca9bbb48e21da8 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,38 +29,36 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias };
+enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
-                public StaticAttributes<ConvDepthWiseAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       bool> {
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
-    ConvDepthWise_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
-                                             std::array<DimSize_t, DIM>,
-                                             bool>;
+                                             std::array<DimSize_t, DIM>>;
     template <ConvDepthWiseAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    ConvDepthWise_Op() = delete;
 
     constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                                const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                               bool no_bias=false)
-        : OperatorTensor(Type, 1, 2, 1),
-          Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
-                      attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-                      attr<ConvDepthWiseAttr::KernelDims>(kernel_dims),
-                      attr<ConvDepthWiseAttr::NoBias>(no_bias)) {}
+                               const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
+            attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
+            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
+    {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -85,6 +83,7 @@ public:
                           const IOIndex_t outputIdx = 0) const override;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     DimSize_t nbChannels() const {
         if (!getInput(1)) {
@@ -93,6 +92,11 @@ public:
         return getInput(1)->template dims<DIM+2>()[0];
     }
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
     }
@@ -102,19 +106,12 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
+std::shared_ptr<Node> ConvDepthWise(const DimSize_t nbChannels,
                                            const std::array<DimSize_t, DIM> &kernelDims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
                                            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1),
-                                           bool noBias=false) {
-    // FIXME: properly handle default w&b initialization in every cases
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
-    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims, noBias), name);
-    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
-    addProducer(convDW, 2, {(noBias ? 0 : nbChannels)}, "b");
-    return convDW;
-}
+                                           bool noBias=false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -130,12 +127,13 @@ inline std::shared_ptr<Node> ConvDepthWise(
 }
 }  // namespace Aidge
 
+extern template class Aidge::ConvDepthWise_Op<1>;
 extern template class Aidge::ConvDepthWise_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
-                                                          "KernelDims", "NoBias"};
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"stride_dims", "dilation_dims",
+                                                          "kernel_dims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..856cd0e85d1abb47d3c163115bef6cbfb59bb66f
--- /dev/null
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -0,0 +1,96 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
+#define AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class DepthToSpace_OpImpl : public OperatorImpl {
+public:
+    DepthToSpace_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class DepthToSpaceAttr { BlockSize, Mode };
+
+
+class DepthToSpace_Op : public OperatorTensor,
+                public Registrable<DepthToSpace_Op,
+                    std::string,
+                    std::function<std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)>> {
+public:
+    static const std::string Type;
+    enum class Mode { DCR, CRD };
+
+private:
+    using Attributes_ = StaticAttributes<DepthToSpaceAttr, std::uint32_t, Mode>;
+    template <DepthToSpaceAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    DepthToSpace_Op() = delete;
+
+    DepthToSpace_Op(const std::uint32_t blockSize, const Mode mode = Mode::CRD);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    DepthToSpace_Op(const DepthToSpace_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::DepthToSpace_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); }
+    inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); }
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
+                                    const DepthToSpace_Op::Mode mode = DepthToSpace_Op::Mode::CRD,
+                                    const std::string& name = "");
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+}
+
+#endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 566f4a6ae69b090b3a035b034406d463eeb77317..5ed9e789deab71b107a6071ab11452c3cf73fa9d 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -25,12 +25,12 @@
 namespace Aidge {
 
 class Div_Op : public OperatorTensor,
-    public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> {
+    public Registrable<Div_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Div_Op&)>> {
 
 public:
     static const std::string Type;
 
-    Div_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Div_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -57,6 +57,7 @@ public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
@@ -66,9 +67,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Div(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
-}
-}
+std::shared_ptr<Node> Div(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_DIV_H_ */
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 5ec10522e889bb1188b2304940fd892c0928b414..88a4bfd29e7d27e7eaea00d967e0ba631354d253 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -25,35 +25,26 @@
 namespace Aidge {
 
 class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> {
+    public Registrable<Erf_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>> {
 public:
     static const std::string Type;
 
-    Erf_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Erf_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Erf_Op(const Erf_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Erf_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Erf_Op(const Erf_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Erf_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Erf_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -63,9 +54,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Erf(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
-}
+std::shared_ptr<Node> Erf(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ERF_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 9f10970c4fd5b21a1cb92b334167d353f066e05b..592ba4e2b796ba1aede24a737e296ddf1e285499 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -24,24 +24,15 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class FCAttr { NoBias };
-
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const FC_Op &)>,
-              public StaticAttributes<FCAttr, bool> {
+                                 std::function<std::shared_ptr<OperatorImpl>(const FC_Op &)>> {
 public:
     static const std::string Type;
 
-    FC_Op() = delete;
-
-    using Attributes_ = StaticAttributes<FCAttr, bool>;
-    template <FCAttr e> using attr = typename Attributes_::template attr<e>;
-
-    FC_Op(bool noBias)
-    : OperatorTensor(Type, 1, 2, 1),
-      Attributes_(attr<FCAttr::NoBias>(noBias))
+    FC_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1)
     {}
 
     /**
@@ -49,12 +40,11 @@ public:
      * @param op Operator to copy.
      */
     FC_Op(const FC_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
+        : OperatorTensor(op)
     {
-        if (op.mImpl){
+        if (op.mImpl) {
             SET_IMPL_MACRO(FC_Op, *this, op.backend());
-        }else{
+        } else {
             mImpl = nullptr;
         }
     }
@@ -63,15 +53,21 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::FC_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<FC_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    DimSize_t inChannels() const {
+        if (!getInput(1)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Fully Connected (FC) operator has no weight Tensor associated so no specific number of input channel imposed.");
+        }
+        return getInput(1)->template dims<2>()[1];
+    }
 
     DimSize_t outChannels() const {
         if (!getInput(1)) {
@@ -88,18 +84,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), name);
-    addProducer(fc, 1, {outChannels, inChannels}, "w");
-    addProducer(fc, 2, {(noBias ? 0 : outChannels)}, "b"); // already sets bias dims
-    return fc;
-}
-} // namespace Aidge
+std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "");
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::FCAttr>::data[] = {"NoBias"};
-}
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..517d63adc59ed848c53852697ab9f8511dfc2a2a
--- /dev/null
+++ b/include/aidge/operator/Fold.hpp
@@ -0,0 +1,128 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_FOLD_H_
+#define AIDGE_CORE_OPERATOR_FOLD_H_
+
+#include <array>
+#include <cmath>    // std::floor
+#include <cstddef>  // std::size_t
+#include <string>
+#include <utility>  // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
+
+template <DimIdx_t DIM>
+class Fold_Op : public OperatorTensor,
+                public Registrable<Fold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<FoldAttr,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>>;
+    template <FoldAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Fold_Op() = delete;
+
+    constexpr Fold_Op(const std::array<DimSize_t, DIM> &outputDims,
+                      const std::array<DimSize_t, DIM> &kernelDims,
+                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<FoldAttr::OutputDims>(outputDims),
+            attr<FoldAttr::StrideDims>(strideDims),
+            attr<FoldAttr::DilationDims>(dilationDims),
+            attr<FoldAttr::KernelDims>(kernelDims))) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
+     * input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Fold_Op(const Fold_Op<DIM> &op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Fold_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& outputDims() const { return mAttributes->template getAttr<FoldAttr::OutputDims>(); }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<FoldAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<FoldAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<FoldAttr::KernelDims>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
+                                  const std::array<DimSize_t, DIM> &kernelDims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Fold(
+    DimSize_t const (&outputDims)[DIM],
+    DimSize_t const (&kernelDims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
+    return Fold(to_array(outputDims), to_array(kernelDims), name, strideDims, dilationDims);
+}
+}  // namespace Aidge
+
+extern template class Aidge::Fold_Op<2>;
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::FoldAttr>::data[] = {
+    "output_dims",
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"
+};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index a04e4be69c9fd1a6ed7753ed512c7f5e45b925d9..80dcdd67883529c710b142b6b547d4b02e85cd44 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CORE_OPERATOR_GATHER_H_
 #define AIDGE_CORE_OPERATOR_GATHER_H_
 
-#include <cstdint>  // std::int64_t
+#include <cstdint>  // std::int8_t, std::int64_t
 #include <memory>
 #include <string>
 #include <vector>
@@ -36,53 +36,49 @@ enum class GatherAttr { Axis, Indices, GatheredShape };
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
-                public StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>> {
-
+                                   std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> {
 public:
     static const std::string Type;
 
+    using Attributes_ = StaticAttributes<GatherAttr,
+                                            std::int8_t,
+                                            std::vector<int64_t>,
+                                            std::vector<DimSize_t>>;
+private:
+    template <GatherAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
     Gather_Op() = delete;
 
-    using Attributes_ = StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>>;
-    template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
-    Gather_Op(std::int8_t axis, const std::vector<int64_t>& indices, const std::vector<DimSize_t>& gatheredShape)
-            : OperatorTensor(Type, 2, 0, 1),
-            Attributes_(attr<GatherAttr::Axis>(axis),
-                        attr<GatherAttr::Indices>(indices),
-                        attr<GatherAttr::GatheredShape>(gatheredShape))
-    {
-        mImpl = std::make_shared<Gather_OpImpl>(*this);
-    }
+    Gather_Op(std::int8_t axis,
+              const std::vector<int64_t>& indices,
+              const std::vector<DimSize_t>& gatheredShape);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Gather_Op(const Gather_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Gather_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Gather_OpImpl>(*this);
-        }
-    }
+    Gather_Op(const Gather_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Gather_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Gather_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
+    inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); }
+    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "indices"};
@@ -92,14 +88,12 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
-}
+std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis", "Indices", "GatheredShape"};
+const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index f0b7e92d708dfef65eea0ec7649ccc8716533679..2812da066887d63133ede2d69b5804f0b8a8101e 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -26,47 +26,54 @@
 namespace Aidge {
 class GenericOperator_Op
     : public OperatorTensor,
-      public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>,
-      public DynamicAttributes {
+      public Registrable<GenericOperator_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>> {
 private:
     using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>;
 
     ComputeDimsFunc mForwardDims;
 
+    const std::shared_ptr<DynamicAttributes> mAttributes;
+
 public:
-    GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut)
-        : OperatorTensor(type, nbData, nbParam, nbOut)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    GenericOperator_Op(const std::string& type, const std::vector<InputCategory>& inputsCategory, IOIndex_t nbOut);
+
+    GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    GenericOperator_Op(const GenericOperator_Op& op)
-        : OperatorTensor(op)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
-    }
+    GenericOperator_Op(const GenericOperator_Op& op);
 
-    ~GenericOperator_Op() = default;
+    ~GenericOperator_Op() noexcept;
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::GenericOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<GenericOperator_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 public:
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    bool dimsForwarded() const override final;
-
-    void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); }
-    void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override { return std::set<std::string>(); };
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    template <class T>
+    inline T& getAttr(const std::string& name)
+    { return mAttributes -> template getAttr<T>(name); }
+    template <class T>
+    inline const T& getAttr(const std::string& name) const
+    { return mAttributes -> template getAttr<T>(name); }
+
+    ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
+    ///\tparam T expected Attribute type
+    ///\param name Attribute name
+    ///\param value Attribute value
+    template <class T>
+    inline void addAttr(const std::string& name, const T& value) const
+    { mAttributes -> template addAttr<T>(name, value); }
 
     // Helper functions that can be used with setForwardDims():
     static const ComputeDimsFunc Identity;
@@ -76,6 +83,18 @@ public:
     }
 };
 
+/**
+ * @brief Fictive custom operator not associated with any implementation.
+ * Allows to import unknown operators and simulate new ones.
+ * @param type Type of the fictive operator.
+ * @param inputCategory List inputs with their category
+ * @param nbOut Number of output data.
+ * @param name (optional) name of the Operator.
+ * @return std::shared_ptr<Node> Node associated with the Generic Operator.
+ */
+std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector<InputCategory>& inputCategory, IOIndex_t nbOut,
+                                             const std::string& name = "");
+
 /**
  * @brief Fictive custom operator not associated with any implementation.
  * Allows to import unknown operators and simulate new ones.
@@ -86,10 +105,8 @@ public:
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
-                                             const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
-}
+std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
+                                             const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 74529a0ba9481bf6280df8d3ce496f67635a5aef..ef440e8c697ff221aa8df42e459de7ac697e8a0c 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -32,29 +32,21 @@ namespace Aidge {
 class GlobalAveragePooling_Op
     : public OperatorTensor,
       public Registrable<GlobalAveragePooling_Op, std::string,
-                         std::shared_ptr<OperatorImpl>(
-                             const GlobalAveragePooling_Op &)> {
+                         std::function<std::shared_ptr<OperatorImpl>(
+                             const GlobalAveragePooling_Op &)>> {
 public:
   static const std::string Type;
 
-  GlobalAveragePooling_Op() : OperatorTensor(Type, 1, 0, 1) {}
+  GlobalAveragePooling_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
-  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op)
-      : OperatorTensor(op) {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-  }
+  GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
 
-  std::shared_ptr<Operator> clone() const override {
-    return std::make_shared<GlobalAveragePooling_Op>(*this);
-  }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
   void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
   static const std::vector<std::string> getInputsName() {
     return {"data_input"};
@@ -64,11 +56,8 @@ public:
   }
 };
 
-inline std::shared_ptr<Node>
-GlobalAveragePooling(const std::string &name = "") {
-  return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(),
-                                name);
-}
+std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GLOBAL_AVERAGE_POOLING_H_ */
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..dc2b2059e75711572e0f7fa94cc6ccb9f58c970b
--- /dev/null
+++ b/include/aidge/operator/GridSample.hpp
@@ -0,0 +1,94 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_
+#define AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_
+
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+
+namespace Aidge {
+
+enum class GridSampleAttr { Mode, PaddingMode, AlignCorners };
+
+class GridSample_Op : public OperatorTensor,
+	public Registrable<GridSample_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const GridSample_Op&)>> {
+
+public:
+	static const std::string Type;
+
+	enum class Mode { Linear, Nearest, Cubic };
+	enum class PaddingMode { Zeros, Border, Reflection };
+
+private:
+	using Attributes_ = StaticAttributes<GridSampleAttr, Mode, PaddingMode, bool>;
+	template <GridSampleAttr e>
+	using attr = typename Attributes_::template attr<e>;
+	const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+	GridSample_Op(Mode mode = Mode::Linear,
+			PaddingMode paddingMode = PaddingMode::Zeros,
+			bool alignCorners = false);
+
+	GridSample_Op(const GridSample_Op& other);
+	~GridSample_Op() noexcept;
+
+public:
+
+	std::shared_ptr<Operator> clone() const override;
+
+	bool forwardDims(bool /*allowDataDependencies*/ = false) override final;
+
+	void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+	inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+	inline Mode mode() const { return mAttributes->template getAttr<GridSampleAttr::Mode>(); }
+	inline PaddingMode paddingMode() const { return mAttributes->template getAttr<GridSampleAttr::PaddingMode>(); }
+	inline bool alignCorners() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
+
+	static const std::vector<std::string> getInputsName() {
+		return {"data_input", "grid_field"};
+	}
+	static const std::vector<std::string> getOutputsName() {
+		return {"data_output"};
+	}
+};
+
+std::shared_ptr<Node> GridSample(
+                        typename GridSample_Op::Mode mode = GridSample_Op::Mode::Linear,
+                        typename GridSample_Op::PaddingMode paddingMode = GridSample_Op::PaddingMode::Zeros,
+                        bool alignCorners = false,
+                        const std::string& name = "");
+
+} // namespace Aidge
+
+
+namespace {
+template <>
+const char* const EnumStrings<Aidge::GridSampleAttr>::data[] = {
+    "mode",
+    "padding_mode",
+    "align_corners"
+};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_GRIDSAMPLE_H_ */
diff --git a/include/aidge/operator/ILayerNorm.hpp b/include/aidge/operator/ILayerNorm.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f660cc64eb65770cc6cf5335d9c070b155d03c0f
--- /dev/null
+++ b/include/aidge/operator/ILayerNorm.hpp
@@ -0,0 +1,81 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 10.09.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ILAYERNORM_H_
+#define AIDGE_CORE_OPERATOR_ILAYERNORM_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ILayerNorm_Op : public OperatorTensor,
+    public Registrable<ILayerNorm_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ILayerNorm_Op&)>> {
+public:
+    static const std::string Type;
+
+    ILayerNorm_Op()
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::Param}, 1)
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ILayerNorm_Op(const ILayerNorm_Op& op)
+        : OperatorTensor(op)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ILayerNorm_Op, *this, op.backend());
+        }else{
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ILayerNorm_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ILayerNorm_Op>(*this);
+    }
+
+    void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> ILayerNorm(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<ILayerNorm_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ILAYERNORM_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index bcbe1c6c69e0a666d7a976558d558f101c5b8fca..24476f231806bf38ae48b9e2d5ec405e072afdb2 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -26,8 +26,11 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
-
-
+class Identity_OpImpl : public OperatorImpl {
+public:
+    Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
 
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
@@ -37,59 +40,27 @@ namespace Aidge {
  *
  */
 class Identity_Op : public OperatorTensor,
-    public Registrable<Identity_Op, std::string, std::unique_ptr<OperatorImpl>(const Identity_Op&)> {
+    public Registrable<Identity_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Identity_Op&)>> {
 public:
     static const std::string Type;
 
-    Identity_Op()
-        : OperatorTensor(Type, 1, 0, 1)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+    Identity_Op();
 
     /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Identity_Op(const Identity_Op& op)
-        : OperatorTensor(op)
-    {
-        mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
-    }
+    Identity_Op(const Identity_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Identity_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Identity_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
-    // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
-
-    /**
-     * @brief Check if output dimensions have been computed.
-     * @note Since Indentity has no output Tensor, this function checks if its
-     * only input's dimensions have been computed.
-     *
-     * @return true Input has dimensions.
-     * @return false Input has no dimensions or is a nullptr.
-     */
-    bool dimsForwarded() const override final {
-        return mInputs[0] ? (mInputs[0]->empty() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
-    }
-
-
-    void forward() override final;
-
-    void backward() override final { }
-
-    void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
-        // setBackend do nothing, Identity node has no backend it just pass the same Tensor
-    }
-    void setDataType(const DataType& /*dataType*/) const override final {
-        // setDatatype do nothing, Identity node has no backend it just pass the same Tensor
-    }
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -99,9 +70,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Identity(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
-}
+std::shared_ptr<Node> Identity(const std::string& name = "");
+
 }
 
 #endif /* AIDGE_CORE_OPERATOR_IDENTITY_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 83a7c30fce7e0f68576f367d4b0bfe48edf4b3b6..179eb90b39bb5d527781289b9b233d3a29d14494 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -12,16 +12,15 @@
 #ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 #define AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 
-#include <vector>
 #include <memory>
+#include <vector>
 
-#include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -30,51 +29,43 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
-    public StaticAttributes<LeakyReLUAttr, float> {
+    public Registrable<LeakyReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>> {
 public:
     static const std::string Type;
 
-    LeakyReLU_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
     template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    LeakyReLU_Op() = delete;
 
     LeakyReLU_Op(float negativeSlope)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(
-            attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(
+            std::make_shared<Attributes_>(
+                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
     {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    LeakyReLU_Op(const LeakyReLU_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    LeakyReLU_Op(const LeakyReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::LeakyReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<LeakyReLU_Op>(*this);
-    }
-
+    std::shared_ptr<Operator> clone() const override;
 
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -84,15 +75,13 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
-}
+std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
 }
 
 namespace {
 template <>
 const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
-    = {"NegativeSlope"};
+    = {"negative_slope"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Ln.hpp
new file mode 100755
index 0000000000000000000000000000000000000000..22fc51664b89bcdeb5970b0cc92beafdde52e43f
--- /dev/null
+++ b/include/aidge/operator/Ln.hpp
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_LN_H_
+#define AIDGE_CORE_OPERATOR_LN_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Ln_Op : public OperatorTensor,
+    public Registrable<Ln_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>> {
+public:
+    static const std::string Type;
+
+    Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Ln_Op(const Ln_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Ln_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> Ln(const std::string& name = "");
+}
+
+#endif /* AIDGE_CORE_OPERATOR_LN_H_ */
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 580d720e617e5b20c0acc7ce5e7f200fe5b25606..bf6ab84c7373962e71434050427c9b6ecae3b034 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -26,32 +26,23 @@ namespace Aidge {
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
-                                 std::shared_ptr<OperatorImpl>(const MatMul_Op &)> {
+                                 std::function<std::shared_ptr<OperatorImpl>(const MatMul_Op &)>> {
 public:
     static const std::string Type;
 
-    MatMul_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    MatMul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    MatMul_Op(const MatMul_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MatMul_Op
      */
-    std::shared_ptr<Operator> clone() const override final {
-        return std::make_shared<MatMul_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override final;
 
     /**
      * @brief Compute dimensions for the output Tensor following the same rules as
@@ -68,6 +59,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input1", "data_input2"};
@@ -77,9 +69,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
-}
+std::shared_ptr<Node> MatMul(const std::string& name = "");
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 8aff1582604a9e23e248e7c01521567483c793ad..0cc43a6fbe50849b169a59d048962668d3e4666c 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -20,7 +20,6 @@
 #include <stdexcept>   // std::runtime_error
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -34,90 +33,48 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
-                public StaticAttributes<MaxPoolingAttr,
-                                       std::array<DimSize_t, DIM>,
-                                       std::array<DimSize_t, DIM>,
-                                       bool> {
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
-    MaxPooling_Op() = delete;
-
     using Attributes_ = StaticAttributes<MaxPoolingAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              bool>;
+
+private:
     template <MaxPoolingAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+public:
+    MaxPooling_Op() = delete;
+
+    MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                            bool ceil_mode = false)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims),
-                      attr<MaxPoolingAttr::KernelDims>(kernel_dims),
-                      attr<MaxPoolingAttr::CeilMode>(ceil_mode))
-        {}
+                            bool ceil_mode = false);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MaxPooling_Op(const MaxPooling_Op<DIM>& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    MaxPooling_Op(const MaxPooling_Op<DIM>& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MaxPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MaxPooling_Op<DIM>>(*this);
-    }
-
+    std::shared_ptr<Operator> clone() const override;
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-        }
-        if (!(getInput(0)->empty())) {
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
-
-            std::function<float(float)> roundingFunction;
-            if (this->template getAttr<MaxPoolingAttr::CeilMode>()) {
-                roundingFunction = [](float x) { return std::ceil(x); };
-            } else {
-                roundingFunction = [](float x) { return std::floor(x); };
-            }
-
-            for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
-                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            roundingFunction(static_cast<float>(inputDims[dim+2] -
-                                                                    this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
-                                            static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
-            }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-            return true;
-        }
-        return false;
-    }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); }
+    inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -127,17 +84,15 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string MaxPooling_Op<DIM>::Type = "MaxPooling";
+extern template class Aidge::MaxPooling_Op<1>;
+extern template class Aidge::MaxPooling_Op<2>;
+extern template class Aidge::MaxPooling_Op<3>;
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                            const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
-                                           bool ceil_mode=false) {
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
-    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
-}
+                                           bool ceil_mode=false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -153,7 +108,7 @@ inline std::shared_ptr<Node> MaxPooling(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims", "CeilMode"};
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 6b0ace2eb09fde069f8b9b104f92fc33811c25aa..2b05b5fffed98a7df99a450a5f99c88efa2f7288 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -25,68 +25,65 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class Memorize_OpImpl : public OperatorImpl {
+class Memorize_ProdConso : public ProdConso {
 public:
-    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Memorize_ProdConso(const Operator& op): ProdConso(op) {}
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
     void updateConsummerProducer() override;
+};
+
+class Memorize_OpImpl : public OperatorImpl {
+public:
+    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
     void forward() override;
 };
 
 enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
-    public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)>,
-    public StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int> {
+    public Registrable<Memorize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Memorize_Op&)>> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int>;
+private:
+    using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>;
     template <MemorizeAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Memorize_Op(const unsigned int endStep)
-        : OperatorTensor(Type, 1, 1, 2),
-          Attributes_(attr<MemorizeAttr::ScheduleStep>(0),
-                      attr<MemorizeAttr::ForwardStep>(0),
-                      attr<MemorizeAttr::EndStep>(endStep))
-    {
-        mOutputs[1] = mOutputs[0];
-    }
+public:
+    Memorize_Op() = delete;
+
+    Memorize_Op(const std::uint32_t endStep);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Memorize_Op(const Memorize_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-        mOutputs[1] = mOutputs[0];
-    }
+    Memorize_Op(const Memorize_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Memorize_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Memorize_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
     bool dimsForwarded() const override;
     void updateConsummerProducer() override;
     void forward() override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); }
+    inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "data_input_init"};
     }
@@ -95,17 +92,15 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Memorize(const unsigned int endStep, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
-}
+std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
-    "ScheduleStep",
-    "ForwardStep",
-    "EndStep"
+    "schedule_step",
+    "forward_step",
+    "end_step"
 };
 }
 
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index fb8c73af33dd081664c82427ea8aa6876117d695..ccff976cbb7cf8efc59223dfd658ca2a4d03a80b 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -21,13 +21,14 @@
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/scheduler/SequentialScheduler.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 class MetaOperator_Op : public OperatorTensor,
-                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)> {
+                public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
 public:
     // outputs shared with micro-graph output Tensors
     // Micro-graph handling:
@@ -50,7 +51,7 @@ public:
     /**
      * Set the node that should be used for the scheduling.
     */
-    void setUpperNode(std::shared_ptr<Node> node) {
+    inline void setUpperNode(std::shared_ptr<Node> node) {
         mUpperNode = node;
     }
 
@@ -58,9 +59,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MetaOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<MetaOperator_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept {
         return mGraph;
@@ -74,32 +73,17 @@ public:
     void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
     bool forwardDims(bool allowDataDependency = false) override final {
-        // Check first that all required inputs are available, otherwise
-        // mGraph->forwardDims() will fail!
-        bool forwarded = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
-        }
-
-        if (forwarded) {
+        if (inputsAssociated()) {
             // Forward dims of micro-graph
             return mGraph->forwardDims({}, allowDataDependency);
         }
         return false;
     }
 
+    std::string backend() const noexcept override;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        if (Registrar<MetaOperator_Op>::exists({name, type()})) {
-            // A custom implementation exists for this meta operator
-            mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
-        }
-
-        // The micro-graph should always be set to the right backend, since it
-        // shares input/output tensors.
-        // Input/output tensors backend are updated here.
-        mGraph->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     void setDataType(const DataType &datatype) const override {
         // The micro-graph should always be set to the right data type, since it
@@ -108,6 +92,8 @@ public:
         mGraph->setDataType(datatype);
     }
 
+    std::shared_ptr<Attributes> attributes() const override;
+
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
     Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override;
     Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override;
@@ -115,6 +101,7 @@ public:
     Elts_t getNbProducedData(IOIndex_t outputIdx) const override;
 
     void updateConsummerProducer() override;
+    void resetConsummerProducer() override;
     void forward() override;
     void backward() override {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for a MetaOperator");
@@ -124,15 +111,9 @@ public:
 
 };
 
-inline std::shared_ptr<Node> MetaOperator(const char *type,
+std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
-                                  const std::string& name = "")
-{
-    auto op = std::make_shared<MetaOperator_Op>(type, graph);
-    auto node = std::make_shared<Node>(op, name);
-    op->setUpperNode(node);
-    return node;
-}
+                                  const std::string& name = "");
 }  // namespace Aidge
 
 #endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index eb57761cc5927cb4eedfb6cb12b1d49a0ee50b9c..bc3348377525cdd2e5b2c030c8fc6b7cb8177e7b 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -33,42 +33,25 @@ namespace Aidge {
 
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
+extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
                                   DimSize_t out_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    // Construct micro-graph
-    auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
-
-    auto metaOp = MetaOperator("PaddedConv", Sequential({pad, conv}), name);
-    addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
-    addProducer(metaOp, 2, {out_channels}, "b");
-    return metaOp;
-}
+                                  bool no_bias = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
+extern std::shared_ptr<MetaOperator_Op> PaddedConv_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
-
-    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
-}
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
-inline std::shared_ptr<Node> PaddedConv(
+extern std::shared_ptr<Node> PaddedConv(
     DimSize_t in_channels,
     DimSize_t out_channels,
     DimSize_t const (&kernel_dims)[DIM],
@@ -76,45 +59,25 @@ inline std::shared_ptr<Node> PaddedConv(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false)
-{
-    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
-}
+    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
+std::shared_ptr<Node> PaddedConvDepthWise(const DimSize_t nb_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    // Construct micro-graph
-    auto pad = Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), (!name.empty()) ? name + "_conv" : "");
-
-    auto metaOp = MetaOperator("PaddedConvDepthWise", Sequential({pad, conv}), name);
-    addProducer(metaOp, 1, append(nb_channels, append(DimSize_t(1), kernel_dims)), "w");
-    addProducer(metaOp, 2, {nb_channels}, "b");
-    return metaOp;
-}
+                                  bool no_bias = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
+std::shared_ptr<MetaOperator_Op> PaddedConvDepthWise_Op(
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
-                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-                                  bool no_bias = false)
-{
-    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
-    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims, no_bias), "");
-
-    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
-}
+                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1));
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -125,10 +88,7 @@ inline std::shared_ptr<Node> PaddedConvDepthWise(
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, 2*DIM> &padding_dims = create_array<DimSize_t,2*DIM>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1),
-    bool no_bias = false)
-{
-    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
-}
+    bool no_bias = false);
 
 ////////////////////////////////////////////////////////////////////////////////
 
@@ -203,8 +163,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
                            bool noBias = false,
                            const std::string& name = "");
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length,
-                                         bool noBias = false);
+std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
 
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index e9bcaa871619828a50dcd407d39744e7983fe2c4..49d92cd12f68a0b23530039c1df70ced9b2d2080 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -31,38 +31,26 @@ public:
 };
 
 class Move_Op : public OperatorTensor,
-    public Registrable<Move_Op, std::tuple<std::string, std::string>, std::unique_ptr<OperatorImpl>(const Move_Op&)> {
+    public Registrable<Move_Op, std::tuple<std::string, std::string>, std::function<std::unique_ptr<OperatorImpl>(const Move_Op&)>> {
 public:
     static const std::string Type;
 
-    Move_Op() : OperatorTensor(Type, 1, 0, 1) {
-        mImpl = std::make_shared<Move_OpImpl>(*this);
-    }
+    Move_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Move_Op(const Move_Op& op)
-        : OperatorTensor(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
-        }
-        else {
-            mImpl = std::make_shared<Move_OpImpl>(*this);
-        }
-    }
+    Move_Op(const Move_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Move_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Move_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -72,9 +60,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Move(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
-}
-}
+std::shared_ptr<Node> Move(const std::string& name = "");
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index f53a38a82a6771e416435222137e72366f5f69f3..bfe4fcb0de1cb7dda4a0ea8fc7b99638bc813f47 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -28,38 +28,29 @@ namespace Aidge {
  * @brief Tensor element-wise multiplication.
  */
 class Mul_Op : public OperatorTensor,
-    public Registrable<Mul_Op, std::string, std::shared_ptr<OperatorImpl>(const Mul_Op&)> {
+    public Registrable<Mul_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Mul_Op&)>> {
 public:
     static const std::string Type;
 
-    Mul_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Mul_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
      * but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Mul_Op(const Mul_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl) {
-            SET_IMPL_MACRO(Mul_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Mul_Op(const Mul_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Mul_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Mul_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
@@ -69,9 +60,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Mul(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
-}
+std::shared_ptr<Node> Mul(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 09172f9d59d417132da7577fdec148e882e3d613..87aa4080e57d14d0d8a738afed2e976521b42048 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -18,11 +18,21 @@
 #include <utility>
 #include <cstddef>
 
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <fmt/format.h>
+#endif
+
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/hook/Hook.hpp"
 
+#ifdef PYBIND
+namespace py = pybind11;
+#endif
 namespace Aidge {
 
 enum class OperatorType {
@@ -30,6 +40,13 @@ enum class OperatorType {
     Tensor
 };
 
+enum class InputCategory {
+    Data,
+    Param,
+    OptionalData,
+    OptionalParam
+};
+
 class Operator : public std::enable_shared_from_this<Operator> {
 protected:
     std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
@@ -38,17 +55,16 @@ protected:
 private:
     std::string mType;
     const OperatorType mOperatorType;
-    const IOIndex_t mNbData;
-    const IOIndex_t mNbParam;
+    const std::vector<InputCategory> mInputsCategory;
     const IOIndex_t mNbOut;
+    std::set<IOIndex_t> mBackEdges;
 
 public:
     Operator() = delete;
-    Operator(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
+    Operator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
     : mType(type),
       mOperatorType(operatorType),
-      mNbData(nbData),
-      mNbParam(nbParam),
+      mInputsCategory(inputsCategory),
       mNbOut(nbOut)
     {
         // ctor
@@ -57,9 +73,9 @@ public:
     Operator(const Operator& op):
         std::enable_shared_from_this<Operator>(),
         mOperatorType(op.mOperatorType),
-        mNbData(op.mNbData),
-        mNbParam(op.mNbParam),
-        mNbOut(op.mNbOut)
+        mInputsCategory(op.mInputsCategory),
+        mNbOut(op.mNbOut),
+        mBackEdges(op.mBackEdges)
     {
         mType = op.mType;
         mImpl = nullptr;
@@ -73,12 +89,14 @@ public:
 public:
     virtual std::shared_ptr<Operator> clone() const = 0;
 
+    virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
     /**
      * @brief Set the specified input with a shallow copy.
      * @param inputIdx Index of the input to set.
      * @param data Data to copy.
      */
     virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void resetInput(const IOIndex_t inputIdx) = 0;
 
     /**
      * @brief Set the specified input value by performing a deep copy of the given data.
@@ -93,7 +111,7 @@ public:
      * The pointer itself is not changed, thus keeping the current connections.
      * @param inputIdx Index of the input to set.
      */
-    virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
     std::shared_ptr<Hook> getHook(const std::string& hookName) {
@@ -108,12 +126,16 @@ public:
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
-    std::string backend() const noexcept {
+    virtual std::string backend() const noexcept {
         return mImpl ? mImpl->backend() : "";
     }
 
     virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0;
+    void setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends);
     virtual void setDataType(const DataType& dataType) const = 0;
+    virtual void setDataFormat(const DataFormat& dataFormat) const = 0;
+
+    virtual std::set<std::string> getAvailableBackends() const = 0;
 
     /**
      * @brief Set a new OperatorImpl to the Operator
@@ -178,19 +200,48 @@ public:
         return mOperatorType;
     }
 
+    inline InputCategory inputCategory(IOIndex_t idx) const {
+        // AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size());
+        return mInputsCategory.at(idx);
+    }
+
     virtual inline bool isAtomic() const noexcept { return true; }
 
-    inline IOIndex_t nbInputs() const noexcept { return mNbData+mNbParam; };
-    inline IOIndex_t nbData() const noexcept { return mNbData; };
-    inline IOIndex_t nbParam() const noexcept { return mNbParam; };
+    inline IOIndex_t nbInputs() const noexcept { return mInputsCategory.size(); };
     inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
 
+    /**
+     * @brief Set the back edge input indexes for recurring operators.
+     * Any recuring operators should specify it's back edges, otherwise
+     * the interpretation of the data flow graph may not be possible.
+     */
+    inline void setBackEdges(const std::set<IOIndex_t>& backEdges) { mBackEdges = backEdges; }
+
+    /**
+     * @brief Returns whether the given input index is a back edge.
+     * @return true if the input index is in the back edge set
+     */
+    inline bool isBackEdge(IOIndex_t inputIdx) const {
+        return mBackEdges.find(inputIdx) != mBackEdges.end();
+    }
+
     static const std::vector<std::string> getInputsName() {
         return {};
     }
     static const std::vector<std::string> getOutputsName() {
         return {};
     }
+
+#ifdef PYBIND
+    std::string repr() const {
+        return fmt::format("Operator(type = '{}', nb_in = {}, nb_out = {}, attr = {}, backend = {})",
+                    type(),
+                    nbInputs(),
+                    nbOutputs(),
+                    (attributes() ? attributes()->repr() : "None"),
+                    (mImpl ? "'"+backend()+"'" : "None"));
+    }
+#endif
 };
 } // namespace Aidge
 
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index f2a59dda743af52647ad650aae516ef07ba89ac4..c8cdd93810e18bd3cdd0a2d080e54aae2d787c66 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -40,7 +40,15 @@ protected:
 public:
     OperatorTensor() = delete;
 
-    OperatorTensor(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam,
+    /**
+     * @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor
+     * every operator class derive from this class.
+     * 
+	 * @param[in] type     : type of operator (i.e. "Add", "AveragePool",...)
+	 * @param[in] inputsCategory : describes the type of each input.
+	 * @param[in] nbOut    : Number of tensors this operator will output
+     */
+    OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory,
                    const IOIndex_t nbOut);
 
     OperatorTensor(const OperatorTensor& other);
@@ -51,6 +59,7 @@ public:
     ///////////////////////////////////////////////////
     virtual void associateInput(const IOIndex_t inputIdx,
                                 const std::shared_ptr<Data>& data) override;
+    void resetInput(const IOIndex_t inputIdx) override final;
     ///////////////////////////////////////////////////
 
     ///////////////////////////////////////////////////
@@ -61,7 +70,7 @@ public:
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
 
     // output management
-    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
     virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
     std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
     ///////////////////////////////////////////////////
@@ -78,13 +87,26 @@ public:
      * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
      */
     virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
+
+	/**
+	 * @brief Will compute the dimensions of operator's output tensor given the input sizes
+ 	 *        If the output dimensions cannot be computed because it depends on some undefined inputs then forwardDims will return false and enter in TOKEN mode for subsequent tensors.
+ 	 *        - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
+ 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors.
+ 	 * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
+ 	 *      
+     */
     virtual bool forwardDims(bool allowDataDependency = false);
     virtual bool dimsForwarded() const;
     ///////////////////////////////////////////////////
 
     virtual void setDataType(const DataType& dataType) const override;
-    
+    virtual void setDataFormat(const DataFormat& dataFormat) const override;
+
     virtual void forward() override;
+
+protected:
+    bool inputsAssociated(bool checkNonEmpty = true) const;
 };
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index a4e4ebdce801971de118ca8a263999046a13777d..2c670bf23d4703a5a9e8502c8b356fdde32e2561 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -13,16 +13,14 @@
 #define AIDGE_CORE_OPERATOR_PAD_H_
 
 #include <array>
-#include <numeric>
+#include <memory>
+#include <string>
 #include <vector>
-#include <cmath>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -31,30 +29,31 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
-                public StaticAttributes<PadAttr,
-                                       std::array<DimSize_t, 2*DIM>,
-                                       PadBorderType,
-                                       double> {
+                public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>> {
 public:
     static const std::string Type;
 
-    Pad_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<PadAttr,
-                                             std::array<DimSize_t, 2*DIM>,
-                                             PadBorderType,
-                                             double>;
+                                            std::array<DimSize_t, 2*DIM>,
+                                            PadBorderType,
+                                            double>;
     template <PadAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    Pad_Op() = delete;
 
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                      const PadBorderType &borderType = PadBorderType::Constant,
                      double borderValue = 0.0)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples),
-                           attr<PadAttr::BorderType>(borderType),
-                           attr<PadAttr::BorderValue>(borderValue)) {}
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<PadAttr::BeginEndBorders>(beginEndTuples),
+            attr<PadAttr::BorderType>(borderType),
+            attr<PadAttr::BorderValue>(borderValue))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -62,47 +61,25 @@ public:
      */
     Pad_Op(const Pad_Op& op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pad_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pad_Op<DIM>>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final {
-        bool associated = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            std::array<DimSize_t, DIM + 2> outputDims{};
-            const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
-
-            for (std::size_t dim = 0; dim < DIM; ++dim) {
-                outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
-                                    + inputDims[dim+2]
-                                    + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
-            }
-            outputDims[1] = inputDims[1];
-            outputDims[0] = inputDims[0];
-            mOutputs[0]->resize(outputDims);
-        }
-
-        return associated;
-    }
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
-    void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
-        mOutputs[0]->setBackend(name, device);
-    }
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
+    inline PadBorderType& borderType() const noexcept { return mAttributes->template getAttr<PadAttr::BorderType>(); }
+    inline double& borderValue() const noexcept { return mAttributes->template getAttr<PadAttr::BorderValue>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -112,18 +89,11 @@ public:
     }
 };
 
-template <DimIdx_t DIM>
-const std::string Pad_Op<DIM>::Type = "Pad";
-
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                                           const std::string& name = "",
-                                           const PadBorderType &borderType = PadBorderType::Constant,
-                                           double borderValue = 0.0)
-{
-    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Pad, not supported");
-    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
-}
+std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
+                        const std::string& name = "",
+                        const PadBorderType &borderType = PadBorderType::Constant,
+                        double borderValue = 0.0);
 
 // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -137,9 +107,12 @@ inline std::shared_ptr<Node> Pad(
 }
 }  // namespace Aidge
 
+extern template class Aidge::Pad_Op<1>;
+extern template class Aidge::Pad_Op<2>;
+
 namespace {
 template <>
-const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
+const char *const EnumStrings<Aidge::PadAttr>::data[] = {"begin_end_borders", "border_type", "border_value"};
 
 template <>
 const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 2219f30ec9db7acf55491882a78e7a1ed2931cf0..d5898b3630721b036b3acb916e6dec87455009f7 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -24,62 +24,56 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+class Pop_ProdConso : public ProdConso {
+public:
+    Pop_ProdConso(const Operator& op): ProdConso(op) {}
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+};
+
 class Pop_OpImpl : public OperatorImpl {
 public:
     Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); };
     void forward() override;
 };
 
 enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
-    public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)>,
-    public StaticAttributes<PopAttr, unsigned int> {
+    public Registrable<Pop_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Pop_Op&)>> {
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<PopAttr, unsigned int>;
-    template <PopAttr e>
-    using attr = typename Attributes_::template attr<e>;
+private:
+    using Attributes_ = StaticAttributes<PopAttr, std::uint32_t>;
+    template <PopAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Pop_Op()
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<PopAttr::ForwardStep>(0))
-    {
-        mImpl = std::make_shared<Pop_OpImpl>(*this);
-    }
+public:
+    Pop_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Pop_Op(const Pop_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Pop_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Pop_OpImpl>(*this);
-        }
-    }
+    Pop_Op(const Pop_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pop_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Pop_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
     void updateConsummerProducer() override;
     void forward() override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
     }
@@ -88,15 +82,13 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Pop(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
-}
+std::shared_ptr<Node> Pop(const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::PopAttr>::data[] = {
-    "ForwardStep"
+    "forward_step"
 };
 }
 
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index 08c4de2a254dd267eda4040b54108f93a0c2d922..f6762dd33088f486184bdfd0a5b8dbdbd0c641da 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -25,11 +25,11 @@
 namespace Aidge {
 
 class Pow_Op : public OperatorTensor,
-    public Registrable<Pow_Op, std::string, std::shared_ptr<OperatorImpl>(const Pow_Op&)> {
+    public Registrable<Pow_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pow_Op&)>> {
 public:
     static const std::string Type;
 
-    Pow_Op() : OperatorTensor(Type, 2, 0, 1) {}
+    Pow_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -57,6 +57,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input_1", "data_input_2"};
@@ -66,9 +67,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Pow(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
-}
+std::shared_ptr<Node> Pow(const std::string& name = "");
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index c376bab3db22b6710a0915f7fcf2f749a60b7b61..115ddcb5549b1c0daa01b3ab67946655cda7287c 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -30,25 +30,22 @@ enum class ProdAttr { Constant };
 
 class Producer_Op
     : public OperatorTensor,
-      public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>(
-                                          const Producer_Op &)>,
-      public StaticAttributes<ProdAttr, bool> {
+      public Registrable<Producer_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(
+                                          const Producer_Op &)>> {
 public:
     static const std::string Type;
 
+private:
     using Attributes_ = StaticAttributes<ProdAttr, bool>;
-    template <ProdAttr e>
-    using attr = typename Attributes_::template attr<e>;
+    template <ProdAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Producer_Op() = delete;
 
     template <std::size_t DIM>
     Producer_Op(const std::array<DimSize_t, DIM>& dims,
-                bool constant = false)
-        : OperatorTensor(Type, 0, 0, 1),
-          Attributes_(attr<ProdAttr::Constant>(constant))
-    {
-        mOutputs[0]->resize(dims);
-        mImpl = std::make_shared<OperatorImpl>(*this);
-    }
+                bool constant = false);
 
     /**
      * @brief Construct a new Producer_Op object from a Tensor.
@@ -78,15 +75,13 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Producer_Op(const Producer_Op&)
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Producer_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
-    bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
+    inline bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
     inline bool dimsForwarded() const noexcept override final { return true; }
 
@@ -94,6 +89,10 @@ public:
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {};
@@ -105,22 +104,14 @@ public:
     void forward() override final;
 
     void backward() override final {
-        // fmt::print("Basic Producer backward() function.\n");
+        Log::debug("Basic Producer backward() function.");
     }
 
-    void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
-        if (getAttr<ProdAttr::Constant>()) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
-        }
-        OperatorTensor::setOutput(outputIdx, data);
-    }
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false) {
-  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
-  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
-}
+std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "", bool constant = false);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
@@ -128,31 +119,25 @@ inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::s
   return Producer(to_array(dims), name, constant);
 }
 
-inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false) {
-  return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
-}
+std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "", bool constant = false);
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
-    assert(inputIdx != gk_IODefaultIndex);
-    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
-    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
-    auto prod = Producer(dims, prodName);
-    prod->addChild(otherNode, 0, inputIdx);
-    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
-}
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode,
+            const IOIndex_t inputIdx,
+            const std::array<DimSize_t, DIM>& dims,
+            const std::string& extension);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <std::size_t DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
-    addProducer(otherNode, inputIdx, to_array(dims), extension);
+std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
+    return addProducer(otherNode, inputIdx, to_array(dims), extension);
 }
 } // namespace Aidge
 
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ProdAttr>::data[] = {
-    "Constant"
+    "constant"
 };
 }
 #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 963de31c49f48784e92434b2b563d6c008e2d4fd..9b264c1d3d7955f71538dd90f105cfd7ee469d0a 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -26,36 +26,27 @@
 namespace Aidge {
 
 class ReLU_Op : public OperatorTensor,
-    public Registrable<ReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const ReLU_Op&)> {
+    public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> {
 public:
     static const std::string Type;
 
-    ReLU_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    ReLU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ReLU_Op(const ReLU_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    ReLU_Op(const ReLU_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReLU_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -65,9 +56,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
-}
+std::shared_ptr<Node> ReLU(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index ff8d8b0696aafdab48cd37d049fa0473078d7ea6..5d5895a8fb279f1efa5c6321614199f44402b83a 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -26,52 +26,61 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ReduceMeanAttr { Axes, KeepDims };
+enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
 
+/**
+ * @brief This operator has as purpose to reduce given axes by replacing with the mean value.
+*/
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>,
-                public StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t> {
+                public Registrable<ReduceMean_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>> {
 
-   public:
+public:
     static const std::string Type;
 
-    ReduceMean_Op() = delete;
-
-    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t>;
+private:
+    using Attributes_ = StaticAttributes<ReduceMeanAttr,
+                                            std::vector<std::int32_t>,
+                                            bool,
+                                            bool>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<ReduceMeanAttr::Axes>(axes),
-                      attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
+public:
+    ReduceMean_Op() = delete;
+
+    /**
+     * @brief constructor for ReduceMean op
+     * @param[in] axes around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * if false we remove the dimension completely
+     * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
+     * and if false, we reduce on all axes
+     */
+    ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    ReduceMean_Op(const ReduceMean_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    ReduceMean_Op(const ReduceMean_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReduceMean_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<ReduceMean_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
+
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
@@ -79,6 +88,8 @@ class ReduceMean_Op : public OperatorTensor,
     static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
+
+    virtual ~ReduceMean_Op() noexcept;
 };
 
 /**
@@ -90,14 +101,7 @@ class ReduceMean_Op : public OperatorTensor,
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
-inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
-                                        DimSize_t keep_dims=1,
-                                        const std::string& name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
-    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name);
 
-}
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 // template <DimSize_t DIM>
@@ -111,12 +115,16 @@ inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
 
 // template <DimIdx_t DIM>
 // const std::string ReduceMean_Op::Type = "ReduceMean";
+std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
+                                        bool keep_dims=true,
+                                        bool noop_with_empty_axes=false,
+                                        const std::string& name = "");
 
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"Axes", "KeepDims"};
+const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..bae03cb7d2e3ac855537eb22e54bf706ec0e0b4a
--- /dev/null
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -0,0 +1,136 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_REDUCESUM_H_
+#define AIDGE_CORE_OPERATOR_REDUCESUM_H_
+
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
+
+
+/**
+ * @brief This operator has as purpose to reduce given axes by replacing with the sum value.
+*/
+class ReduceSum_Op : public OperatorTensor,
+                public Registrable<ReduceSum_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)>> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<ReduceSumAttr,
+                                            std::vector<std::int32_t>,
+                                            bool,
+                                            bool>;
+    template <ReduceSumAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    ReduceSum_Op() = delete;
+
+    /**
+     * @brief constructor for ReduceSum op
+     * @param[in] axes around which perform the operation
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * if false we remove the dimension completely
+     * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
+     * and if false, we reduce on all axes
+     */
+    ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<ReduceSumAttr::Axes>(axes),
+            attr<ReduceSumAttr::KeepDims>(keep_dims),
+            attr<ReduceSumAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ReduceSum_Op(const ReduceSum_Op& op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReduceSum_Op, *this, op.backend());
+        } else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ReduceSum_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ReduceSum_Op>(*this);
+    }
+
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); }
+
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+/**
+ * @brief Compute the sum value of a Tensor over the provided axes. Dimensions
+ * may be reduced by erasing the provided axes or not.
+ *
+ * @param axes Dimensions over which data sum should be computed.
+ * @param keep_dims Whether or not reduced dimensions are to be erased.
+ * @param name Name of the Operator.
+ * @return std::shared_ptr<Node> Node containing the Operator.
+ */
+inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
+                                        bool keep_dims=true,
+                                        bool noop_with_empty_axes=false,
+                                        const std::string& name = "") {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceSum, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceSum_Op>(axes, keep_dims, noop_with_empty_axes), name);
+
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 12fbda88b0044f836b298e0cf818724f53f821a7..721b964d3ff4cd87121d43e8719a8fde1445761b 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -32,54 +32,44 @@ public:
 enum class ReshapeAttr { Shape, AllowZero };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>,
-                   public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>, bool> {
+                   public Registrable<Reshape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Reshape_Op&)>> {
 
 public:
     static const std::string Type;
 
-    Reshape_Op() = delete;
+private:
+    using Attributes_ = StaticAttributes<ReshapeAttr,
+                                            std::vector<std::int64_t>,
+                                            bool>;
+    template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>,  bool>;
-    template <ReshapeAttr e>
-    using attr = typename Attributes_::template attr<e>;
+public:
+    Reshape_Op() = delete;
 
-    Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
-        : OperatorTensor(Type, 2, 0, 1),
-          Attributes_(attr<ReshapeAttr::Shape>(shape),
-                      attr<ReshapeAttr::AllowZero>(allowzero))
-    {
-        mImpl = std::make_shared<Reshape_OpImpl>(*this);
-    }
+    Reshape_Op(const std::vector<std::int64_t>& shape = {}, bool allowzero = false);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Reshape_Op(const Reshape_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Reshape_OpImpl>(*this);
-        }
-    }
+    Reshape_Op(const Reshape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Reshape_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Reshape_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); }
+    inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -89,17 +79,14 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
-                                     bool allowzero = false,
-                                   	 const std::string &name = "") {
-    // FIXME: properly handle default w&b initialization in every cases
-    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
-}
+std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
+                            bool allowzero = false,
+                            const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "Shape", "AllowZero" };
+const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "shape", "allow_zero" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a48b95aff2a18750d83f12a62c408ad41b20afee
--- /dev/null
+++ b/include/aidge/operator/Resize.hpp
@@ -0,0 +1,68 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_Resize_H_
+#define AIDGE_CORE_OPERATOR_Resize_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Resize_Op : public OperatorTensor,
+                  public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{
+
+public:
+    static const std::string Type;
+
+    Resize_Op();
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Resize_Op(const Resize_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Resize_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    bool dimsForwarded() const override final;
+    bool forwardDims(bool allowDataDependency = false) override final;
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        //  roi, scales, sizes, even if considered as const parameters/input
+        return {"data_input", "roi ", "scales", "sizes"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> Resize(const std::string &name = "");
+
+}  // namespace Aidge
+
+
+#endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index c864bd045d8a5a1fc5f4ee591d1d81fcaf241bac..4ef39f63a2f9af34cd3fe28b01cf2fc195bdfc6e 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -12,65 +12,57 @@
 #ifndef AIDGE_CORE_OPERATOR_SCALING_H_
 #define AIDGE_CORE_OPERATOR_SCALING_H_
 
+#include <cstddef>  // std::size_t
 #include <vector>
 #include <memory>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
 enum class ScalingAttr {
-    scalingFactor, quantizedNbBits, isOutputUnsigned
+    ScalingFactor, QuantizedNbBits, IsOutputUnsigned
 };
 
-class Scaling_Op 
+class Scaling_Op
     : public OperatorTensor,
-      public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)>,
-      public StaticAttributes<ScalingAttr, float, size_t, bool> {
+      public Registrable<Scaling_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Scaling_Op&)>> {
 public:
     static const std::string Type;
 
-    Scaling_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>;
     template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
 
-    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(
-            attr<ScalingAttr::scalingFactor>(scalingFactor),
-            attr<ScalingAttr::quantizedNbBits>(nbBits),
-            attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned))
-    {}
+public:
+    Scaling_Op() = delete;
+
+    Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Scaling_Op(const Scaling_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Scaling_Op(const Scaling_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Scaling_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Scaling_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
+    inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); }
+    inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
@@ -85,19 +77,16 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri
     return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name);
 }
 */
-inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, 
-                                     std::size_t quantizedNbBits=8, 
-                                     bool isOutputUnsigned=true, 
-                                     const std::string& name = "") 
-{
-    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
-}
+std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
+                                     std::size_t quantizedNbBits=8,
+                                     bool isOutputUnsigned=true,
+                                     const std::string& name = "");
 } // namespace Aidge
 
 namespace {
 template <>
 const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
+    = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 3132e4ab7adcc331772d627147cc31c25597570a..cfd43fa0dd5a064ee21eafc2d0f50c12ad6e3272 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -36,51 +36,41 @@ enum class ShapeAttr { Start, End };
 class Shape_Op : public OperatorTensor,
                 public Registrable<Shape_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Shape_Op&)>,
-                public StaticAttributes<ShapeAttr, std::int64_t, std::int64_t> {
+                                   std::function<std::shared_ptr<OperatorImpl>(const Shape_Op&)>> {
 
 public:
     static const std::string Type;
 
-    Shape_Op() = delete;
-
+private:
     using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
     template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
-    Shape_Op(std::int64_t start, std::int64_t end)
-            : OperatorTensor(Type, 1, 0, 1),
-            Attributes_(attr<ShapeAttr::Start>(start),
-                        attr<ShapeAttr::End>(end))
-    {
-        mImpl = std::make_shared<Shape_OpImpl>(*this);
-    }
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Shape_Op() = delete;
+
+    Shape_Op(const std::int64_t start, const std::int64_t end);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Shape_Op(const Shape_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Shape_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Shape_OpImpl>(*this);
-        }
-    }
+    Shape_Op(const Shape_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Shape_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Shape_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
+    inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<ShapeAttr::End>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -90,14 +80,12 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Shape(std::int64_t start = 0, std::int64_t end = -1, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
-}
+std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"Start", "End"};
+const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..30f1d71e0a56d92a70830a5def81040e0c5a186c
--- /dev/null
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 10.09.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHIFTGELU_H_
+#define AIDGE_CORE_OPERATOR_SHIFTGELU_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ShiftGELU_Op : public OperatorTensor,
+    public Registrable<ShiftGELU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>> {
+public:
+    static const std::string Type;
+
+    ShiftGELU_Op();
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ShiftGELU_Op(const ShiftGELU_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ShiftGELU_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> ShiftGELU(const std::string& name = "");
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9fbd81aedef1eb640a7ce805d745297edb640560
--- /dev/null
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 10.09.2024
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SHIFTMAX_H_
+#define AIDGE_CORE_OPERATOR_SHIFTMAX_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class ShiftMax_Op : public OperatorTensor,
+    public Registrable<ShiftMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)>> {
+public:
+    static const std::string Type;
+
+    ShiftMax_Op();
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ShiftMax_Op(const ShiftMax_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ShiftMax_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> ShiftMax(const std::string& name = "");
+}
+
+#endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index bea9fc45eaa7f17f71963106b5bd3e1340a48a92..24bc3321673f4dcffd3e3663f7e0a0e584389492 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -26,36 +26,18 @@
 namespace Aidge {
 
 class Sigmoid_Op : public OperatorTensor,
-    public Registrable<Sigmoid_Op, std::string, std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)> {
+    public Registrable<Sigmoid_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
 public:
     static const std::string Type;
 
-    Sigmoid_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Sigmoid_Op();
 
-    /**
-     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
-     */
-    Sigmoid_Op(const Sigmoid_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
-
-    /**
-     * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Sigmoid_Op
-     */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sigmoid_Op>(*this);
-    }
+    Sigmoid_Op(const Sigmoid_Op& op);
 
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -65,9 +47,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sigmoid(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
-}
+std::shared_ptr<Node> Sigmoid(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index c8f16bb1ad769299a89d3f8a05e46960fe824711..811402420df170c011e478148cf646e6c585cc84 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -29,23 +29,26 @@ enum class SliceAttr { Starts, Ends, Axes, Steps };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>,
-      public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>> {
-
+      public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SliceAttr,
+                                            std::vector<std::int64_t>,
+                                            std::vector<std::int64_t>,
+                                            std::vector<std::int8_t>,
+                                            std::vector<std::int64_t>>;
+    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Slice_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>>;
-    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
-    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps)
-        : OperatorTensor(Type, 5, 0, 1),
-          Attributes_(attr<SliceAttr::Starts>(starts),
-                      attr<SliceAttr::Ends>(ends),
-                      attr<SliceAttr::Axes>(axes),
-                      attr<SliceAttr::Steps>(steps))
-    {}
+    Slice_Op(const std::vector<std::int64_t>& starts,
+            const std::vector<std::int64_t>& ends,
+            const std::vector<std::int8_t>& axes,
+            const std::vector<std::int64_t>& steps);
 
 
     /**
@@ -53,29 +56,26 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Slice_Op(const Slice_Op &op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Slice_Op, *this, op.backend());
-        }
-        else {
-            mImpl = nullptr;
-        }
-    }
+    Slice_Op(const Slice_Op &op);
 
 public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Slice_Op
      */
-    std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
-    bool forwardDims(bool allowDataDependency = false) override final;
+    bool forwardDims(bool allowDataDependency = true) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); }
+    inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<SliceAttr::Ends>(); }
+    inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<SliceAttr::Axes>(); }
+    inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "starts", "ends", "axes", "steps"};
@@ -91,18 +91,16 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
+std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
                                    const std::vector<std::int64_t>& ends = {},
                                    const std::vector<std::int8_t>& axes = {},
                                    const std::vector<std::int64_t>& steps = {},
-                                   const std::string &name = "") {
-    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
-}
+                                   const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "Starts", "Ends", "Axes", "Steps" };
+const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 1868dc6e3df48401ef3f8a126b07572e2f45144d..72ea56dd6293e416ddcca12ac38fd57d76071354 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -24,49 +24,44 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class SoftmaxAttr { AxisIdx };
+enum class SoftmaxAttr { Axis };
 
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
-                public StaticAttributes<SoftmaxAttr, std::size_t> {
+                                   std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> {
 
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SoftmaxAttr, std::int32_t>;
+    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Softmax_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SoftmaxAttr, std::size_t>;
-    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
-    Softmax_Op(std::size_t axis)
-            :  OperatorTensor(Type, 1, 0, 1),
-            Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
+    Softmax_Op(std::int32_t axis);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Softmax_Op(const Softmax_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Softmax_Op(const Softmax_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Softmax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Softmax_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<SoftmaxAttr::Axis>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -76,14 +71,12 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(std::size_t axis, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
-}
+std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"Axis"};
+const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index ff50a6aa7b8de971431515a09ca4e684dcc51865..8c3a111c42dfeb2b4e27269839e41f3b362bdda3 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -34,23 +34,20 @@ enum class SplitAttr { Axis, Split };
 
 class Split_Op
     : public OperatorTensor,
-      public Registrable<Split_Op, std::string, std::shared_ptr<OperatorImpl>(const Split_Op &)>,
-      public StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>> {
+      public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> {
 
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>>;
+    template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Split_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SplitAttr,  std::int8_t, std::vector<DimSize_t>>;
-    template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
-    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
-        : OperatorTensor(Type, 2, 0, nbOutputs),
-          Attributes_(attr<SplitAttr::Axis>(axis),
-                      attr<SplitAttr::Split>(split))
-    {
-        mImpl = std::make_shared<Split_OpImpl>(*this);
-    }
+    Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split);
 
 
     /**
@@ -58,28 +55,24 @@ public:
      * input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Split_Op(const Split_Op &op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Split_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<Split_OpImpl>(*this);
-        }
-    }
+    Split_Op(const Split_Op &op);
+
 public:
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Split_Op
      */
-    std::shared_ptr<Operator> clone() const override { return std::make_shared<Split_Op>(*this); }
+    std::shared_ptr<Operator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int8_t& axis() const { return mAttributes->template getAttr<SplitAttr::Axis>(); }
+    inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<SplitAttr::Split>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "split"};
@@ -95,17 +88,15 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Split(DimSize_t nbOutput,
+std::shared_ptr<Node> Split(DimSize_t nbOutput,
                                    std::int8_t axis = 0,
                                    const std::vector<DimSize_t>& split = {},
-                                   const std::string &name = "") {
-    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
-}
+                                   const std::string &name = "");
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "Axis", "Split" };
+const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "axis", "split" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index f5ffa431192d73a703c1ce973cb485dadb31420d..4858cdcd164d6be0582ddabe67c780461a9667aa 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -14,8 +14,8 @@
 
 #include <memory>
 #include <vector>
+#include <string>
 
-#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -24,40 +24,28 @@
 namespace Aidge {
 
 class Sqrt_Op : public OperatorTensor,
-    public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+                public Registrable<Sqrt_Op,
+                                std::string,
+                                std::function<std::shared_ptr<OperatorImpl>(const Sqrt_Op&)>> {
 public:
     static const std::string Type;
 
-    Sqrt_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Sqrt_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Sqrt_Op(const Sqrt_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
-        }else{
-            mImpl = nullptr;
-        }
-    }
+    Sqrt_Op(const Sqrt_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sqrt_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sqrt_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -67,9 +55,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sqrt(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
-}
+std::shared_ptr<Node> Sqrt(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..64a775eb4209ecad0e29decd8336ebb77bbe652f
--- /dev/null
+++ b/include/aidge/operator/Squeeze.hpp
@@ -0,0 +1,160 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_SQUEEZE_H_
+#define AIDGE_CORE_OPERATOR_SQUEEZE_H_
+
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <limits>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/**
+ * @brief implementation of the operator squeeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Squeeze_OpImpl : public OperatorImpl {
+public:
+  Squeeze_OpImpl(const Operator &op, const std::string &backend = "")
+      : OperatorImpl(op, backend) {}
+  void forward() override;
+};
+
+enum class SqueezeAttr {
+  /**
+   * @brief axes to squeeze, if left empty all 1 sized
+   * dimensions will be removed.
+   */
+  Axes
+};
+
+/**
+ * @brief This operator has as purpose to remove dummy dimensions around given
+ * axes.
+ * input#0 : Tensor to squeeze
+ * input#1 Optionnal : 1D tensor that lists the axes to squeeze
+ * @note the axes to squeeze can either be given via attribute or via input #1,
+ * for the sake of simplicity of the example unders, the axes to squeeze are
+ * given via attribute
+ * @example Calling squeeze(1) on a tensor of dimensions (2,1,3,4) will result
+ * in a tensor of dim (2,3,4).
+ * @example Calling squeeze(1) on a tensor of dimensions (1,2,3,4) will result
+ * in a tensor of dim (1,2,3,4).
+ * @example Calling squeeze() with no argument will result in the removal of
+ * every 1-sized dimension in the tensor.
+ */
+class Squeeze_Op
+    : public OperatorTensor,
+      public Registrable<Squeeze_Op, std::string,
+                         std::function<std::shared_ptr<OperatorImpl>(const Squeeze_Op &)>> {
+
+public:
+  static const std::string
+      Type; // name of the type of the operation (Here "Squeeze")
+
+private:
+  using Attributes_ = StaticAttributes<SqueezeAttr, std::vector<int8_t>>;
+  template <SqueezeAttr e> using attr = typename Attributes_::template attr<e>;
+  const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+  /**
+   * @brief constructor for Squeeze op
+   * @param[in] axes around which perform the operation
+   */
+  Squeeze_Op(const std::vector<int8_t> &axes = {})
+      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
+                       1),
+        mAttributes(
+            std::make_shared<Attributes_>(attr<SqueezeAttr::Axes>(axes))) {
+    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+  }
+
+  /**
+   * @brief Copy-constructor. Copy the operator attributes and its output
+   * tensor(s), but not its input tensors (the new operator has no input
+   * associated).
+   * @param op Operator to copy.
+   */
+  Squeeze_Op(const Squeeze_Op &op)
+      : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+      SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
+    } else {
+      mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+    }
+  }
+
+  /**
+   * @brief Clone the operator using its copy-constructor.
+   * @see Operator::MatMul_Op
+   */
+  std::shared_ptr<Operator> clone() const override final {
+    return std::make_shared<Squeeze_Op>(*this);
+  }
+
+  /**
+   * @brief Compute dimensions for the output Tensor
+   */
+  bool forwardDims(bool allowDataDependency = false) override final;
+  bool dimsForwarded() const override final;
+
+  void setBackend(const std::string &name,
+                  DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
+
+  inline std::shared_ptr<Attributes> attributes() const override {
+    return mAttributes;
+  }
+
+  /**
+   * @brief axes to squeeze, if left empty all 1 sized
+   * dimensions will be removed.
+   */
+  inline std::vector<int8_t> &axes() const noexcept {
+    return mAttributes->template getAttr<SqueezeAttr::Axes>();
+  }
+
+  static const std::vector<std::string> getInputsName() {
+    return {"data_input", "axes_to_squeeze"};
+  }
+  static const std::vector<std::string> getOutputsName() {
+    return {"squeezed"};
+  }
+};
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
+                                     const std::string &name = "") {
+  return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::SqueezeAttr>::data[] = {"Axes"};
+}
+
+#endif // AIDGE_CORE_OPERATOR_SQUEEZE_H_
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index e5d8442851c35e9232fdd77d862fb48b71c76f1f..170baf6fd0f38668f64cbd36044c856fae261737 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -12,6 +12,7 @@
 #ifndef AIDGE_CORE_OPERATOR_SUB_H_
 #define AIDGE_CORE_OPERATOR_SUB_H_
 
+#include <array>
 #include <memory>
 #include <vector>
 
@@ -24,43 +25,30 @@
 namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
-    public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
-public:
-    // FIXME: change accessibility
-    std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
-    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
-
+    public Registrable<Sub_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sub_Op&)>> {
 public:
     static const std::string Type;
 
-    Sub_Op() : OperatorTensor(Type, 2, 0, 1) {}
+public:
+    Sub_Op() : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Sub_Op(const Sub_Op& op)
-        : OperatorTensor(op)
-    {
-        if (op.mImpl){
-            SET_IMPL_MACRO(Sub_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Sub_Op(const Sub_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sub_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Sub_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_1", "data_input_2"};
@@ -70,9 +58,8 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Sub(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
-}
+std::shared_ptr<Node> Sub(const std::string& name = "");
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index 3fd5377d30cfff864743dcab2da9e690e26e5263..f1a30e3f08ce3886cc1ca39a55a3b23979a47860 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -24,36 +24,27 @@
 namespace Aidge {
 
 class Tanh_Op : public OperatorTensor,
-    public Registrable<Tanh_Op, std::string, std::unique_ptr<OperatorImpl>(const Tanh_Op&)> {
+    public Registrable<Tanh_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>> {
 public:
     static const std::string Type;
 
-    Tanh_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Tanh_Op();
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Tanh_Op(const Tanh_Op& op)
-        : OperatorTensor(op)
-    {
-       if (op.mImpl){
-            SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
-        } else {
-            mImpl = nullptr;
-        }
-    }
+    Tanh_Op(const Tanh_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Tanh_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Tanh_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
+    std::set<std::string> getAvailableBackends() const override;
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -63,9 +54,7 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Tanh(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
-}
+std::shared_ptr<Node> Tanh(const std::string& name = "");
 }
 
 #endif /* AIDGE_CORE_OPERATOR_TANH_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 31420110f19761442b67e9701aeca566976aee1b..155627f2cfd3173ccfbbe2a1ce8c23784cd06d71 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -17,7 +17,6 @@
 #include <numeric>
 #include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
@@ -28,59 +27,50 @@
 namespace Aidge {
 class TransposeImpl : public OperatorImpl {
 public:
-    TransposeImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    TransposeImpl(const Operator& op, const std::string& backend = "")
+        : OperatorImpl(op, backend)
+    {}
     void forward() override;
 };
 
 enum class TransposeAttr { OutputDimsOrder };
 
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)>,
-                public StaticAttributes<TransposeAttr, std::vector<DimSize_t>> {
+                public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
 
-   public:
+public:
     static const std::string Type;
 
-    Transpose_Op() = delete;
 
+private:
     using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
-    template <TransposeAttr e>
-    using attr = typename Attributes_::template attr<e>;
-
-    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder)
-        : OperatorTensor(Type, 1, 0, 1),
-          Attributes_(attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder))
-    {
-        mImpl = std::make_shared<TransposeImpl>(*this);
-    }
+    template <TransposeAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Transpose_Op() = delete;
+
+    Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder);
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Transpose_Op(const Transpose_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
-    {
-        if (!op.backend().empty()) {
-            SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
-        }
-        else {
-            mImpl = std::make_shared<TransposeImpl>(*this);
-        }
-    }
+    Transpose_Op(const Transpose_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Transpose_Op
      */
-    std::shared_ptr<Operator> clone() const override {
-        return std::make_shared<Transpose_Op>(*this);
-    }
+    std::shared_ptr<Operator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -90,15 +80,13 @@ class Transpose_Op : public OperatorTensor,
     }
 };
 
-inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
-                                           const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
-}
+std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
+                                           const std::string& name = "");
 }  // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"OutputDimsOrder"};
+const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..09a689528a6814eca6bb56ef326e2da527f14843
--- /dev/null
+++ b/include/aidge/operator/Unfold.hpp
@@ -0,0 +1,123 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_UNFOLD_H_
+#define AIDGE_CORE_OPERATOR_UNFOLD_H_
+
+#include <array>
+#include <cmath>    // std::floor
+#include <cstddef>  // std::size_t
+#include <string>
+#include <utility>  // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+template <DimIdx_t DIM>
+class Unfold_OpImpl : public OperatorImpl {
+public:
+    Unfold_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
+
+template <DimIdx_t DIM>
+class Unfold_Op : public OperatorTensor,
+                public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)>> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<UnfoldAttr,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>>;
+    template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Unfold_Op() = delete;
+
+    Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
+            const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+            const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
+     * input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Unfold_Op(const Unfold_Op<DIM> &op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Unfold_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+    std::set<std::string> getAvailableBackends() const override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<UnfoldAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<UnfoldAttr::KernelDims>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1));
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Unfold(
+    DimSize_t const (&kernelDims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
+    return Unfold(to_array(kernelDims), name, strideDims, dilationDims);
+}
+}  // namespace Aidge
+
+extern template class Aidge::Unfold_Op<2>;
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::UnfoldAttr>::data[] = {
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"
+};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_UNFOLD_H_ */
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c0710540576959b62bbdf235ff6ea15f9d18cacd
--- /dev/null
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -0,0 +1,158 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
+#define AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+/**
+ * @brief implementation of the operator unsqueeze.
+ * @note Since this operator implementation is agnostic to the backend it is
+ * located here instead of in aidge_backend_cpu/cuda.
+ */
+class Unsqueeze_OpImpl : public OperatorImpl {
+public:
+  Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "")
+      : OperatorImpl(op, backend) {}
+  void forward() override;
+};
+
+enum class UnsqueezeAttr {
+  /**
+   * @brief vector of axes to unsqueeze.
+   * values must be comprised within
+   * [ -a ; a-1 ]
+   * with a = input_tensor.nbDim() + dims_to_unsqueeze.size()
+   */
+  Axes
+};
+
+/**
+ * @brief This operator has as purpose to add a dummy dimension around given
+ * axis. Unsqueezing the 2nd dim of a tensor of dim (1,2,3,4) will result in a
+ * tensor of dim (1,2,1,3,4)
+ * You can also unsqueeze dimensions whose index is higher than the nb of input
+ * dimensions as long as :
+ * dims_to_unsqueeze[i] < tensor.nbDim() +
+ * dims_to_unsqueeze.size()
+ */
+class Unsqueeze_Op
+    : public OperatorTensor,
+      public Registrable<Unsqueeze_Op, std::string,
+                         std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> {
+
+public:
+  static const std::string
+      Type; // name of the type of the operation (Here "Unsqueeze")
+
+private:
+  using Attributes_ = StaticAttributes<UnsqueezeAttr, std::vector<int8_t>>;
+  template <UnsqueezeAttr e>
+  using attr = typename Attributes_::template attr<e>;
+  const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+  Unsqueeze_Op() =
+      delete; // no default constructor since this class has attributes
+
+  /**
+   * @brief constructor for Unsqueeze op
+   * @param[in] axis around which perform the operation
+   */
+  Unsqueeze_Op(const std::vector<int8_t> &axes)
+      : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
+                       1),
+        mAttributes(
+            std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes))) {
+    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+  }
+
+  /**
+   * @brief Copy-constructor. Copy the operator attributes and its output
+   * tensor(s), but not its input tensors (the new operator has no input
+   * associated).
+   * @param op Operator to copy.
+   */
+  Unsqueeze_Op(const Unsqueeze_Op &op)
+      : OperatorTensor(op), mAttributes(op.mAttributes) {
+    if (!op.backend().empty()) {
+      SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
+    } else {
+      mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+    }
+  }
+
+  /**
+   * @brief Clone the operator using its copy-constructor.
+   * @see Operator::MatMul_Op
+   */
+  std::shared_ptr<Operator> clone() const override final {
+    return std::make_shared<Unsqueeze_Op>(*this);
+  }
+
+  /**
+   * @brief Compute dimensions for the output Tensor
+   */
+  bool forwardDims(bool allowDataDependency = false) override final;
+  bool dimsForwarded() const override final;
+
+  void setBackend(const std::string &name,
+                  DeviceIdx_t device = 0) override final;
+  std::set<std::string> getAvailableBackends() const override;
+
+  inline std::shared_ptr<Attributes> attributes() const override {
+    return mAttributes;
+  }
+  /**
+   * @brief vector of axes to unsqueeze.
+   * values must be comprised within
+   * [ -a ; a-1 ]
+   * with : a = input_tensor.nbDim() + dims_to_unsqueeze.size()
+   */
+  inline std::vector<int8_t> &axes() const noexcept {
+    return mAttributes->template getAttr<UnsqueezeAttr::Axes>();
+  }
+
+  static const std::vector<std::string> getInputsName() {
+    return {"data_input", "axes_to_unsqueeze"};
+  }
+  static const std::vector<std::string> getOutputsName() {
+    return {"unsqueezed"};
+  }
+};
+
+// helper with C-style array instead of std::array for kernel_dims to allow
+// automatic template DIM deduction
+inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
+                                       const std::string &name = "") {
+  return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"Axes"};
+}
+
+#endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
diff --git a/include/aidge/recipes/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp
index a2c571bf4ed164729f7c3416c814b913b4d07e6f..3b8ba7627362c945a6bfbe587ec952fdda013e98 100644
--- a/include/aidge/recipes/GraphViewHelper.hpp
+++ b/include/aidge/recipes/GraphViewHelper.hpp
@@ -39,8 +39,6 @@ std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview
  */
 std::set<std::shared_ptr<Tensor>> parameters(std::shared_ptr<GraphView> graphview);
 
-void compile_gradient(std::shared_ptr<Aidge::GraphView> gv);
-
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ */
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index a56d914721081fd04fc782a7a8c9689371225b48..687698e205afc36c06578fb25d6a714eb3963a66 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -31,37 +31,46 @@ void constantFolding(std::shared_ptr<GraphView> graph);
  *
  * @param nodes Strict set of Node to merge.
  */
-//void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
-
-void fuseMulAdd(std::shared_ptr<MatchSolution> solution);
-
-void fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add);
+void matMulToFC(std::shared_ptr<Node> matmul, std::shared_ptr<Node> add = nullptr);
 
 /**
  * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
  *
  * @param graphView Graph view to use graph matching on, in order to apply transformations.
  */
-void fuseMulAdd(std::shared_ptr<GraphView> graphView);
-
-// REMOVE Dropout
+void matMulToFC(std::shared_ptr<GraphView> graphView);
 
 /**
- * @brief Remove ``Dropout`` Node.
+ * @brief Remove a node type.
  *
- * @param nodes Node to remove.
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ * @param type Type of the nodes to remove
+ * @param incProducers If true, also remove the producers attached to the removed nodes
+ * @return size_t Number of identity nodes removed
  */
-void removeDropout(std::shared_ptr<Node> dropout);
-
+size_t removeNode(std::shared_ptr<GraphView> graphView, const std::string& type, bool incProducers = false);
 
-void removeDropout(std::shared_ptr<MatchSolution> solution);
+/**
+ * @brief Fuses constant => Generic | constantOfShape and transforms it into a Producer
+ * @param graph Graph to manipulate
+ * @return size_t Number of replacement
+ */
+size_t removeConstantOfShape(std::shared_ptr<GraphView> graph_view);
 
 /**
  * @brief Remove ``Dropout`` Node.
  *
  * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ * @return size_t Number of identity nodes removed
  */
-void removeDropout(std::shared_ptr<GraphView> graphView);
+size_t removeDropout(std::shared_ptr<GraphView> graphView);
+
+/**
+ * Remove all identity nodes
+ * @param graph Graph to manipulate
+ * @return size_t Number of identity nodes removed
+*/
+size_t removeIdentity(std::shared_ptr<GraphView> graph);
 
 // REMOVE FLATTEN + FC -> FC
 
@@ -92,10 +101,6 @@ void removeFlatten(std::shared_ptr<GraphView> graphView);
  */
 void fuseBatchNorm(std::shared_ptr<Node> conv,std::shared_ptr<Node> batchnorm);
 
-
-
-void fuseBatchNorm(std::shared_ptr<MatchSolution> solution);
-
 /**
  * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
  * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
@@ -111,11 +116,17 @@ std::set<std::shared_ptr<Node>> getConvHorizontalTiling(const std::shared_ptr<No
 
 
 /**
- * Add Convert operators where needed to ensure no conversion needs to be done
+ * Add Cast and Move operators where needed to ensure no conversion needs to be done
  * at the Operator level.
 */
 void explicitCastMove(std::shared_ptr<GraphView> graphView);
 
+/**
+ * Add Transpose operators where needed to ensure no transposition needs to be done
+ * at the Operator level.
+*/
+void explicitTranspose(std::shared_ptr<GraphView> graphView);
+
 /**
  * Flatten the graph by replacing the meta operators by their micro graph.
  * @param recursive If true, recursively replace meta operators until there is
@@ -125,6 +136,29 @@ void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
 
 void matMulTiling(NodePtr matMul, const std::vector<DimSize_t>& maxDims);
 
+/**
+ * Fuse each sub-graph matching a query in a Meta Operator.
+ * @param graph Graph to manipulate
+ * @param query Sub-graph matching query
+ * @param type Type name of the resulting meta operators
+ * @return size_t Number of replacement
+*/
+size_t fuseToMetaOps(std::shared_ptr<GraphView> graph, const std::string& query, const std::string& type = "");
+
+/**
+ * Transform Conv layers with MatMul.
+ * @param graph Graph to manipulate
+ * @return size_t Number of replacement
+*/
+size_t convToMatMul(std::shared_ptr<GraphView> graph);
+
+/**
+ * @brief Adapt a graph to the available kernels of a backend.
+ * 
+ * @param graph Graph to manipulate
+ */
+void adaptToBackend(std::shared_ptr<GraphView> graph);
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
index 360b01f76e7a9b51f36b83d4d35286eced35016a..2e397d1dbaa1cc8d8f586d15363cbd2245963152 100644
--- a/include/aidge/scheduler/MemoryManager.hpp
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -19,6 +19,25 @@
 #include "aidge/graph/Node.hpp"
 
 namespace Aidge {
+/**
+ * @brief The MemoryManager can be used to generate an optimized static memory 
+ * layout for a computing graph in a global memory space.
+ * The are some assumptions:
+ * - A MemoryManager represents a single global memory space, filled with 
+ *   contiguous, non-overlapping MemorySpace chunks.
+ * - A MemorySpace contains one or multiple MemoryPlane, each MemoryPlane
+ *   corresponding to the allocation of a specific Tensor. When a Tensor can re-
+ *   use the memory of the preceding one (for in-place or partially in-place
+ *   operators), multiple overlapping MemoryPlane can be created in the same 
+ *   MemorySpace (remember, MemorySpace **cannot** be overlapping!).
+ * - A MemoryPlane is tailored for handling (N)HWC data with two properties:
+ *   - Possibility of wrapping: on the H axis (each W*C block is contiguous).
+ *   - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
+ * - All the sizes and offets specified in a MemoryManager are expressed in
+ *   number of data elements, or **words**, meaning currently a uniform data 
+ *   precision is expected in a MemoryManager (for instance, if the precision is
+ *   16-bits, each data element will be 2 bytes, which will be the size of a word).
+ */
 class MemoryManager {
 public:
     typedef int Clock_T;
@@ -45,18 +64,45 @@ public:
             allocated(clock_),
             released(-1) {}
 
+        /// Offset of the MemorySpace in the MemoryManager global memory space (in words)
         unsigned int offset;
+        /// Size of the MemorySpace (in words)
         unsigned int size;
         std::set<std::shared_ptr<Node> > dependencies;
         Clock_T allocated;
         Clock_T released;
     };
 
-    // MemoryPlane belongs to a MemorySpace. Any number of potentially
-    // overlapping planes can be associated to a MemorySpace.
-    // MemoryPlane can be non-contiguous (in case of stride, or wrapping, when
-    // offset + size > memSpace.size).
-    // MemoryPlane cannot be re-arranged inside a MemorySpace.
+    /**
+     * @brief MemoryPlane belongs to a MemorySpace. Any number of potentiall
+     * overlapping planes can be associated to a MemorySpace.
+     * MemoryPlane can be non-contiguous (in case of stride, or wrapping, when
+     * offset + size > memSpace.size).
+     * MemoryPlane cannot be re-arranged inside a MemorySpace.
+     * 
+     * A MemoryPlane is tailored for handling (N)HWC data with two properties:
+     * - Possibility of wrapping: on the H axis (each W*C block is contiguous).
+     * - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
+     * 
+     * Detail of (N)HWC data handling:
+     * - \p length is the size of contiguous and non-breakable memory line (W in HWC);
+     * - \p count is the number of memory lines of size \p length constituting a memory block (H in HWC);
+     * - \p stride is the number of channels, or memory blocks, *in total*, 
+     *   of \p count lines of size \p length (C in NHWC);
+     * - \p size is the number of channels, or memory blocks, *in this MemoryPlane*,
+     *   of \p count lines of size \p length.
+     *   In the case of concatenation, there can be multiple overlapping MemoryPlane
+     *   with different size, like NHWC = NHW(C1+C2):
+     *   - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
+     *   - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
+     *                    (with an additionnal relative offset of +C1)
+     * In this mode, wrapping can only occur on the H (\p count) axis. W*C chunks
+     * are garanteed to be contiguous (\p length * \p stride).
+     * 
+     * By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
+     * there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
+     * In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
+     */
     struct MemoryPlane {
         MemoryPlane(std::shared_ptr<MemorySpace> memSpace_,
                     Clock_T clock_,
@@ -92,36 +138,91 @@ public:
                 <= memSpace->offset + memSpace->size);
         }
 
+        /**
+         * @brief Get the total size of the MemoryPlane, including the stride.
+         * 
+         * @return unsigned int Total size in words
+         */
         inline unsigned int getSize() const {
             return stride * length * count;
         }
 
+        /**
+         * @brief Get the useful size of the MemoryPlane, as if its memory blocks
+         * were contiguous, without stride.
+         * 
+         * @return unsigned int Useful size in words
+         */
         inline unsigned int getUsefulSize() const {
             return size * length * count;
         }
 
+        /**
+         * @brief Get the absolute offset of the beginning of the memory plane.
+         * 
+         * @return unsigned int Contiguous offset in words
+         */
         inline unsigned int getContiguousOffset() const {
             return memSpace->offset + offset;
         }
 
+        /**
+         * @brief Get the size of the contiguous part of the memory plane, from
+         * its beginning to the limit of the MemorySpace size.
+         * If the MemoryPlane fill the MemorySpace without wrapping, the contiguous
+         * size will be the same as the total size of the MemoryPlane.
+         * 
+         * @return unsigned int Contiguous size in words
+         */
         inline unsigned int getContiguousSize() const {
             return std::min(getSize(), getLimit());
         }
 
+        /**
+         * @brief Get the absolute offset of the wrapped part of the memory plane.
+         * Since the wrapped part of the memory plane begins at the beginning of
+         * the MemorySpace, the returned offset is always the same as the MemorySpace
+         * offset.
+         * 
+         * @return unsigned int Wrapped offset in words
+         */
         inline unsigned int getWrappedOffset() const {
             return memSpace->offset;
         }
 
+        /**
+         * @brief Get the size of the wrapped part of the memory plane, from
+         * the beginning of the MemorySpace to the total size of the MemoryPlane,
+         * including the stride.
+         * If the MemoryPlane fill the MemorySpace without wrapping, the wrapped
+         * size will 0.
+         * 
+         * @return unsigned int Wrapped size in words
+         */
         inline unsigned int getWrappedSize() const {
             return getSize() - getContiguousSize();
         }
 
+        /**
+         * @brief Get the absolute offset after the end of the memory plane (if it
+         * is wrapped, the offset will correspond to the end of the wrapped part).
+         * The word at the final offset is not included in the MemoryPlane.
+         * 
+         * @return unsigned int Final offset in words
+         */
         inline unsigned int getFinalOffset() const {
             return (getWrappedSize() > 0)
                 ? getWrappedOffset() + getWrappedSize()
                 : getContiguousOffset() + getContiguousSize();
         }
 
+        /**
+         * @brief Get the absolute offset after the end of the contiguous part
+         * of the memory plane.
+         * The word at the upper offset is not included in the MemoryPlane.
+         * 
+         * @return unsigned int Upper offset in words
+         */
         inline unsigned int getUpperOffset() const {
             return (getContiguousOffset() + getContiguousSize());
         }
@@ -146,10 +247,29 @@ public:
 
         std::shared_ptr<MemorySpace> memSpace;
         Clock_T allocated;
+        /// Relative offset of the MemoryPlane in the MemorySpace (in words)
         unsigned int offset;
+        /// Number of channels, or memory blocks, *in this MemoryPlane*,
+        /// of \p count lines of size \p length.
+        /// In the case of concatenation, there can be multiple overlapping MemoryPlane
+        /// with different size, like NHWC = NHW(C1+C2):
+        /// - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
+        /// - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
+        ///                  (with an additionnal relative offset of +C1)
+        /// By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
+        /// there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
+        /// In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
         unsigned int size;
+        /// Number of channels, or memory blocks *in total*,
+        /// of \p count lines of size \p length (the C in NHWC).
+        /// There should be C blocks of H*W size.
         unsigned int stride;
+        /// Size of an elementary, contiguous and non-breakable, memory line 
+        /// (the W in NHWC), in words. A MemoryPlane wrapping cannot occur in
+        /// the middle of a memory line.
         unsigned int length;
+        /// Number of memory lines of size \p length constituting a memory block
+        /// (the H in NHWC). The size of a memory block is H*W.
         unsigned int count;
     };
 
@@ -182,16 +302,7 @@ public:
                         const std::shared_ptr<MemorySpace>& p1);
     };
 
-    struct CompByNodeName {
-        bool operator()(const std::shared_ptr<Node>& lhs,
-                        const std::shared_ptr<Node>& rhs) const
-        {
-            return lhs->name() < rhs->name();
-        }
-    };
-
-    typedef std::map<std::shared_ptr<Node>, std::vector<MemoryPlane>,
-        CompByNodeName> MemMap_T;
+    typedef std::map<std::shared_ptr<Node>, std::vector<MemoryPlane>> MemMap_T;
 
 public:
     MemoryManager(): mClock(0) {}
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a7c0ed5ae73d1f891744e835f0da5ad14a37f850
--- /dev/null
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_SCHEDULER_PRODCONSO_H_
+#define AIDGE_SCHEDULER_PRODCONSO_H_
+
+#include <string>
+#include <vector>
+
+#include "aidge/utils/Types.h"
+#include "aidge/data/Elts.hpp"
+
+namespace Aidge {
+class Operator;
+
+class ProdConso {
+public:
+    ProdConso(const Operator& op, bool inPlace = false);
+
+    static std::unique_ptr<ProdConso> defaultModel(const Operator& op) {
+        return std::make_unique<ProdConso>(op, false);
+    }
+
+    static std::unique_ptr<ProdConso> inPlaceModel(const Operator& op) {
+        return std::make_unique<ProdConso>(op, true);
+    }
+
+    /**
+     * @brief Minimum amount of data from a specific input required by the
+     * implementation to be run.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return std::size_t
+     */
+    virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
+
+    // Amount of input data that cannot be overwritten during the execution.
+    virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
+
+    // Memory required at an output for a given input size.
+    virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
+
+    /**
+     * @brief Total amount of consumed data from a specific input.
+     *
+     * @param inputIdx Index of the input analysed.
+     * @return DimSize_t
+     */
+    virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
+
+    /**
+     * @brief Total amount of produced data ready to be used on a specific output.
+     *
+     * @param outputIdx Index of the output analysed.
+     * @return DimSize_t
+     */
+    virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
+
+    /**
+     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
+     *
+     */
+    virtual void updateConsummerProducer();
+
+    /**
+     * @brief Reset the Consummer Producer system.
+     *
+     */
+    virtual void resetConsummerProducer();
+
+    virtual ~ProdConso() = default;
+
+protected:
+    const Operator &mOp;
+    const bool mInPlace;
+    std::vector<Elts_t> mNbConsumedData;
+    std::vector<Elts_t> mNbProducedData;
+};
+} // namespace Aidge
+
+#endif /* AIDGE_SCHEDULER_PRODCONSO_H_ */
diff --git a/include/aidge/scheduler/SequentialScheduler.hpp b/include/aidge/scheduler/SequentialScheduler.hpp
index a7929fde8a2affdd562d70d11a7c809aaf3357d0..35dafead6dc424550df7d83d54f5ec998c3b4d86 100644
--- a/include/aidge/scheduler/SequentialScheduler.hpp
+++ b/include/aidge/scheduler/SequentialScheduler.hpp
@@ -54,7 +54,7 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void backward(bool instantiateGrad = true);
+    void backward();
 
 private:
     SchedulingPolicy mSchedulingPolicy;
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
index 80e7c76d4857f577f30b90588f4c3998be80bdb8..3def790b65f441c567e5d43150f465233cb64557 100644
--- a/include/aidge/stimuli/Stimulus.hpp
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -26,7 +26,7 @@ namespace Aidge {
  * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
  * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
  */
-class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::unique_ptr<StimulusImpl>(const std::string&)> {
+class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::function<std::unique_ptr<StimulusImpl>(const std::string&)>> {
 private:
     /// Stimulus data path
     const std::string mDataPath;
diff --git a/include/aidge/utils/ArrayHelpers.hpp b/include/aidge/utils/ArrayHelpers.hpp
index 4999ea53a11e0c2784ed4ae40243b18aabcda218..6648c654d28197dc018b94e8fa300366af52db4a 100644
--- a/include/aidge/utils/ArrayHelpers.hpp
+++ b/include/aidge/utils/ArrayHelpers.hpp
@@ -103,6 +103,8 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
 // Generic helper for initializing a Tensor
 template <typename T>
 struct Vector {
+    Vector(const std::vector<T>& data_) : data(data_) {}
+    template <typename U> Vector(const std::vector<U>& data_) : data(data_.begin(), data_.end()) {}
     std::vector<T> data;
 };
 
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index 927686cfd5cca910c5ffb25364ae4bc971ad18bf..cf71ed0b5953fa1759e04c66311d3d829a603a01 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -12,15 +12,17 @@
 #ifndef AIDGE_CORE_UTILS_ATTRIBUTES_H_
 #define AIDGE_CORE_UTILS_ATTRIBUTES_H_
 
-#ifdef PYBIND
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#endif
-#include <vector>
 #include <string>
 #include <set>
+#include <map>
+
+#include "aidge/utils/future_std/any.hpp"
 
 #ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <fmt/format.h>
+
 namespace py = pybind11;
 #endif
 
@@ -36,6 +38,7 @@ namespace Aidge {
 template<class T, std::size_t N>
 constexpr std::size_t size(T (&)[N]) { return N; }
 
+
 /* This abstract class allows to avoid binding Attributes.
 *  Otherwise we would need to bind every template possible of Attributes.
 *  Every operators can access the methods of this class by inheriting from
@@ -48,14 +51,14 @@ public:
      * @param name Name of the attribute to check.
      * @return bool True if the attribute exists, false otherwise.
     */
-    virtual bool hasAttr(const std::string& name) const = 0;
+    virtual bool hasAttr(const std::string& /*name*/) const = 0;
 
     /**
      * @brief Get the (implementation defined) name of the type of an attribute, returned by std::type_info::name.
      * @param name Name of the attribute.
      * @return std::string Name of the type as returned by std::type_info::name.
     */
-    virtual std::string getAttrType(const std::string& name) const = 0;
+    virtual std::string getAttrType(const std::string& /*name*/) const = 0;
 
     /**
      * @brief Get the attribute's name list.
@@ -63,18 +66,30 @@ public:
     */
     virtual std::set<std::string> getAttrsName() const = 0;
 
+    virtual std::map<std::string, future_std::any> getAttrs() const = 0;
+
 #ifdef PYBIND
+    virtual bool hasAttrPy(const std::string& name) const = 0;
+
     /* Bindable get function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
     */
-    virtual py::object getAttrPy(const std::string& name) const = 0;
+    virtual py::object getAttrPy(const std::string& name) const  = 0;
     /* Bindable set function, does not recquire any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from ``value`` type.
     */
-    virtual void setAttrPy(const std::string& name, py::object&& value) = 0;
+    virtual void setAttrPy(const std::string& /*name*/, py::object&& /*value*/) = 0;
+
+    virtual std::string str() const = 0;
+
+    virtual std::string repr() const = 0;
+
+    virtual py::dict dict() const = 0;
+
 #endif
+
     virtual ~Attributes() {}
 };
 }
diff --git a/include/aidge/utils/Directories.hpp b/include/aidge/utils/Directories.hpp
index 3bc07b9dd58e472096102c1b0c66971164d632a3..ca49e1b57cc5d01f9f0ff7fe8dc85520697c6821 100644
--- a/include/aidge/utils/Directories.hpp
+++ b/include/aidge/utils/Directories.hpp
@@ -14,11 +14,22 @@
 #define AIDGE_DIRECTORIES_H_
 
 
-#include <string>  // std::string
-#include <sstream> // std::stringstream
+#include <algorithm>
+#include <errno.h>
 #include <iostream>
+#include <sstream> // std::stringstream
+#include <string>  // std::string
 #include <sys/stat.h>
-#include <errno.h>
+#ifndef _S_ISTYPE
+#define _S_ISTYPE(mode, mask)  (((mode) & _S_IFMT) == (mask))
+#endif
+#ifndef S_ISREG
+#define S_ISREG(mode) _S_ISTYPE((mode), _S_IFREG)
+#endif
+#ifndef S_ISDIR
+#define S_ISDIR(mode) _S_ISTYPE((mode), _S_IFDIR)
+#endif
+
 
 #ifdef WIN32
 #include <direct.h>
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 113377b33d9827c3428eeb0adc92111f75c22abb..04ed58f7e636d6a0d528f1946ead110857312576 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -18,6 +18,7 @@
 #include <typeinfo>
 #include <cassert>
 #include <string>
+#include <typeindex>
 
 #include "aidge/utils/future_std/any.hpp"
 #include "aidge/utils/Attributes.hpp"
@@ -38,6 +39,9 @@ namespace Aidge {
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
 class DynamicAttributes : public Attributes {
 public:
+    DynamicAttributes() = default;
+    DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
+
     /**
      * \brief Returning an Attribute identified by its name
      * \tparam T expected Attribute type
@@ -46,38 +50,51 @@ public:
      *  exist
      * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
      */
-    template<class T> T& getAttr(const std::string& name)
+    template<class T> const T& getAttr(const std::string& name) const
     {
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
 #ifdef PYBIND
-        // If attribute does not exist in C++, it might have been created or modified in Python
-        auto it = mAttrs.find(name);
-        if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
-            if (itPy != mAttrsPy.end()) {
-                // Insert the attribute back in C++
-                mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
-            }
-        }
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
 #endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
 
-        return future_std::any_cast<T&>(mAttrs.at(name));
-    }
-
-    template<class T> const T& getAttr(const std::string& name) const
-    {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
 #ifdef PYBIND
-        // If attribute does not exist in C++, it might have been created or modified in Python
-        auto it = mAttrs.find(name);
-        if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
-            if (itPy != mAttrsPy.end()) {
-                // Insert the attribute back in C++
-                mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
+            // If attribute does not exist in C++, it might have been created or modified in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    // Insert the attribute back in C++
+                    mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
+                }
             }
-        }
 #endif
 
-        return future_std::any_cast<const T&>(mAttrs.at(name));
+            return future_std::any_cast<const T&>(mAttrs.at(name));
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAttr<T>(nsName);
+        }
+    }
+
+    template<class T> T& getAttr(const std::string& name) {
+        // Scott Meyers' solution to avoid code duplication
+        return const_cast<T&>(
+            static_cast<const DynamicAttributes&>(*this).getAttr<T>(name));
     }
 
     ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
@@ -86,16 +103,42 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
-        const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
-        AIDGE_ASSERT(res.second, "attribute already exists");
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
+#ifdef PYBIND
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
+#endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
+
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
+            AIDGE_ASSERT(res.second, "addAttr(): attribute \"{}\" already exists. Use setAttr() if this is expected.", name);
 
 #ifdef PYBIND
-        // We cannot handle Python object if the Python interpreter is not running
-        if (Py_IsInitialized()) {
-            // Keep a copy of the attribute in py::object that is updated everytime
-            mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-        }
+            // We cannot handle Python object if the Python interpreter is not running
+            if (Py_IsInitialized()) {
+                // Keep a copy of the attribute in py::object that is updated everytime
+                const auto& resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+                AIDGE_ASSERT(resPy.second, "addAttr(): attribute \"{}\" already exists (added in Python). Use setAttr() if this is expected.", name);
+            }
 #endif
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            const auto& res = mAttrs.emplace(std::make_pair(ns, future_std::any(DynamicAttributes())));
+            future_std::any_cast<DynamicAttributes&>(res.first->second).addAttr(nsName, value);
+        }
     }
 
     ///\brief Set an Attribute value, identified by its name. If it already exists, its value (and type, if different) is changed.
@@ -104,77 +147,197 @@ public:
     ///\param value Attribute value
     template<class T> void setAttr(const std::string& name, const T& value)
     {
-        auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
-        if (!res.second)
-            res.first->second = future_std::any(value);
+        mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(T),
+            [](const future_std::any& lhs, const future_std::any& rhs) {
+#ifdef PYBIND
+                if (lhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
+                }
+                else if (rhs.type() == typeid(py::object)) {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
+                }
+                else
+#endif
+                {
+                    return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
+                }
+            }));
+
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
+            if (!res.second)
+                res.first->second = future_std::any(value);
 
 #ifdef PYBIND
-        // We cannot handle Python object if the Python interpreter is not running
-        if (Py_IsInitialized()) {
-            // Keep a copy of the attribute in py::object that is updated everytime
-            auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-            if (!resPy.second)
-                resPy.first->second = std::move(py::cast(value));
-        }
+            // We cannot handle Python object if the Python interpreter is not running
+            if (Py_IsInitialized()) {
+                // Keep a copy of the attribute in py::object that is updated everytime
+                auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+                if (!resPy.second)
+                    resPy.first->second = std::move(py::cast(value));
+            }
 #endif
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            auto res = mAttrs.emplace(std::make_pair(ns, future_std::any(DynamicAttributes())));
+            future_std::any_cast<DynamicAttributes&>(res.first->second).setAttr<T>(nsName, value);
+        }
     }
 
     void delAttr(const std::string& name) {
-        mAttrs.erase(name);
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            mAttrs.erase(name);
 #ifdef PYBIND
-        mAttrsPy.erase(name);
+            mAttrsPy.erase(name);
 #endif
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            future_std::any_cast<DynamicAttributes&>(mAttrs.at(ns)).delAttr(nsName);
+        }
     }
 
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        auto it = mAttrs.find(name);
-        AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists");
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto it = mAttrs.find(name);
+            AIDGE_ASSERT(it == mAttrs.end(), "add_attr(): attribute \"{}\" already exists (added in C++). Use set_attr() if this is expected.", name);
+
+            const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
+            AIDGE_ASSERT(res.second, "add_attr(): attribute \"{}\" already exists. Use set_attr() if this is expected.", name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
 
-        const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
-        AIDGE_ASSERT(res.second, "attribute already exists");
+            future_std::any_cast<DynamicAttributes&>(res.first->second).addAttrPy(nsName, std::move(value));
+        }
     }
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
-        auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
-        if (!resPy.second)
-            resPy.first->second = std::move(value);
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
+            if (!resPy.second)
+                resPy.first->second = std::move(value);
+
+            // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
+            mAttrs.erase(name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
 
-        // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
-        mAttrs.erase(name);
+            future_std::any_cast<DynamicAttributes&>(res.first->second).setAttrPy(nsName, std::move(value));
+        }
     }
+
+    py::dict dict() const override {
+        py::dict attributes;
+        for (const auto& elt : mAttrs) {
+            if (elt.second.type() == typeid(DynamicAttributes)) {
+                attributes[elt.first.c_str()] = future_std::any_cast<const DynamicAttributes&>(elt.second).dict();
+            }
+        }
+        for (const auto& elt : mAttrsPy) {
+            attributes[elt.first.c_str()] = elt.second;
+        }
+        return attributes;
+    }
+
+    std::string str() const override {
+        return repr();
+    }
+
+    std::string repr() const override {
+        // Call the __repr__ method of the base class py::dict
+        return fmt::format("AttrDict({})",  static_cast<std::string>(py::str(dict())));
+        // return fmt::format("AttrDict({})",  dict().attr("__repr__")().cast<std::string>());
+    }
+
 #endif
 
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
     bool hasAttr(const std::string& name) const override final {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
 #ifdef PYBIND
-        // Attributes might have been created in Python, the second condition is necessary.
-        return (mAttrs.find(name) != mAttrs.end() || mAttrsPy.find(name) != mAttrsPy.end());
+            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
+
 #else
-        return (mAttrs.find(name) != mAttrs.end());
+            return (mAttrs.find(name) != mAttrs.cend());
 #endif
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto it = mAttrs.find(ns);
+            if (it != mAttrs.cend()) {
+                const auto nsName = name.substr(dot + 1);
+                return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttr(nsName);
+            }
+            else {
+                return false;
+            }
+        }
+    }
+
+#ifdef PYBIND
+    bool hasAttrPy(const std::string& name) const override final {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            // Attributes might have been created in Python, the second condition is necessary.
+            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto it = mAttrs.find(ns);
+            if (it != mAttrs.cend()) {
+                const auto nsName = name.substr(dot + 1);
+                return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttrPy(nsName);
+            }
+            else {
+                return false;
+            }
+        }
     }
+#endif
 
     std::string getAttrType(const std::string& name) const override final {
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
         // - C-style for C++ created attributes
         // - Python-style for Python created attributes
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
 #ifdef PYBIND
-        // If attribute does not exist in C++, it might have been created in Python
-        auto it = mAttrs.find(name);
-        if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
-            if (itPy != mAttrsPy.end()) {
-                return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
+            // If attribute does not exist in C++, it might have been created in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
+                }
             }
-        }
 #endif
 
-        return mAttrs.at(name).type().name();
+            return mAttrs.at(name).type().name();
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAttrType(nsName);
+        }
     }
 
     std::set<std::string> getAttrsName() const override final {
@@ -195,13 +358,67 @@ public:
      * generic type caster for std::any is not feasable.
      * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
     */
-    py::object getAttrPy(const std::string& name) const override final {
-        return mAttrsPy.at(name);
+    inline py::object getAttrPy(const std::string& name) const override final {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy == mAttrsPy.end()) {
+                // Attribute may be a namespace
+                auto it = mAttrs.find(name);
+                AIDGE_ASSERT(it != mAttrs.end() && it->second.type() == typeid(DynamicAttributes), "get_attr(): attribute \"{}\" not found", name);
+                return py::cast(future_std::any_cast<const DynamicAttributes&>(it->second));
+            }
+            else {
+                return itPy->second;
+            }
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAttrPy(nsName);
+        }
     };
 #endif
 
+    future_std::any getAny(const std::string& name) const
+    {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+#ifdef PYBIND
+            // If attribute does not exist in C++, it might have been created or modified in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    // Attribute exists in Python, but its type is not known
+                    // Return a std::any of py::object, which will be comparable
+                    mAnyCompare.emplace(std::make_pair<std::type_index, bool(*)(const future_std::any&, const future_std::any&)>(typeid(py::object),
+                        [](const future_std::any& lhs, const future_std::any& rhs) {
+                            return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
+                        }));
+
+                    return future_std::any(itPy->second);
+                }
+            }
+#endif
+
+            return mAttrs.at(name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAny(nsName);
+        }
+    }
+
+    std::map<std::string, future_std::any> getAttrs() const override {
+        return mAttrs;
+    }
+
     virtual ~DynamicAttributes() {}
 
+    friend bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs);
+
 private:
 #ifdef PYBIND
     // Stores C++ attributes (copy) and Python-only attributes
@@ -217,8 +434,19 @@ private:
 #else
     std::map<std::string, future_std::any> mAttrs;
 #endif
+
+public:
+    // Stores the comparison function for each attribute type ever used
+    static std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> mAnyCompare;
 };
 
+inline bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs) {
+    return (lhs.mAttrs < rhs.mAttrs);
+}
+}
+
+namespace future_std {
+bool operator<(const future_std::any& lhs, const future_std::any& rhs);
 }
 
 #endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index a01f81629c8425f9d860bf1ea03bfe421dbd04fa..6b2ace1c6aa013ae81e5144665e2edde830cdc54 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -14,6 +14,7 @@
 #define AIDGE_LOG_H_
 
 #include <memory>
+#include <vector>
 
 #include <fmt/format.h>
 #include <fmt/ranges.h>
@@ -73,7 +74,7 @@ public:
      * inducing no runtime overhead for Release.
     */
     template <typename... Args>
-    constexpr static void debug(Args&&... args) {
+    static void debug(Args&&... args) {
 #ifndef NDEBUG
         // only when compiled in Debug
         log(Debug, fmt::format(std::forward<Args>(args)...));
@@ -89,7 +90,7 @@ public:
      * performed nominally.
     */
     template <typename... Args>
-    constexpr static void info(Args&&... args) {
+    static void info(Args&&... args) {
         log(Info, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -100,7 +101,7 @@ public:
      * performed normally.
     */
     template <typename... Args>
-    constexpr static void notice(Args&&... args) {
+    static void notice(Args&&... args) {
         log(Notice, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -111,7 +112,7 @@ public:
      * still provide an exploitable result.
     */
     template <typename... Args>
-    constexpr static void warn(Args&&... args) {
+    static void warn(Args&&... args) {
         log(Warn, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -122,7 +123,7 @@ public:
      * further operations.
     */
     template <typename... Args>
-    constexpr static void error(Args&&... args) {
+    static void error(Args&&... args) {
         log(Error, fmt::format(std::forward<Args>(args)...));
     }
 
@@ -133,17 +134,25 @@ public:
      * impossible.
     */
     template <typename... Args>
-    constexpr static void fatal(Args&&... args) {
+    static void fatal(Args&&... args) {
         log(Fatal, fmt::format(std::forward<Args>(args)...));
     }
 
     /**
      * Set the minimum log level displayed in the console.
     */
-    constexpr static void setConsoleLevel(Level level) {
+    static void setConsoleLevel(Level level) {
         mConsoleLevel = level;
     }
 
+    /**
+     * Set or disable colors on console.
+     * Initial value should be assumed true.
+    */
+    static void setConsoleColor(bool enabled) {
+        mConsoleColor = enabled;
+    }
+
     /**
      * Set the minimum log level saved in the log file.
     */
@@ -172,6 +181,7 @@ private:
     static void initFile(const std::string& fileName);
 
     static Level mConsoleLevel;
+    static bool mConsoleColor;
     static Level mFileLevel;
     static std::string mFileName;
     static std::unique_ptr<FILE, decltype(&std::fclose)> mFile;
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index b0acdaff7cb75afec78f0564fb95c98f2b32f47b..0468ae2616997c306bbd475fe6eb73cc033b0bcc 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -23,7 +23,7 @@
 
 #include <functional>
 #include <map>
-#include <vector>
+#include <set>
 
 namespace Aidge {
 #ifdef PYBIND
@@ -37,21 +37,21 @@ template <class DerivedClass, class Key, class Func> // curiously rucurring temp
 class Registrable {
 public:
     typedef Key registrar_key;
-    typedef std::function<Func> registrar_type;
+    typedef Func registrar_type;
 
-    static std::map<Key, std::function<Func>>& registry()
+    static std::map<Key, Func>& registry()
     {
         #ifdef PYBIND
         #define _CRT_SECURE_NO_WARNINGS
         if (Py_IsInitialized()){
             std::string name = std::string("registrar_")+typeid(Registrable<DerivedClass, Key, Func>).name();
-            static auto shared_data = reinterpret_cast<std::map<Key, std::function<Func>> *>(py::get_shared_data(name));
+            static auto shared_data = reinterpret_cast<std::map<Key, Func> *>(py::get_shared_data(name));
             if (!shared_data)
-                shared_data = static_cast<std::map<Key, std::function<Func>> *>(py::set_shared_data(name, new std::map<Key, std::function<Func>>()));
+                shared_data = static_cast<std::map<Key, Func> *>(py::set_shared_data(name, new std::map<Key, Func>()));
             return *shared_data;
         }
         #endif // PYBIND
-        static std::map<Key, std::function<Func>> rMap;
+        static std::map<Key, Func> rMap;
         return rMap;
     }
 
@@ -75,16 +75,14 @@ struct Registrar {
         return (C::registry().find(key) != C::registry().cend());
     }
 
-    static auto create(const registrar_key& key){
-        const auto it = C::registry().find(key);
-        AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {}\nDid you include/import the corresponding module?", key);
-
-        return (*it).second;
+    static auto create(const registrar_key& key) {
+        AIDGE_ASSERT(exists(key), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name());
+        return C::registry().at(key);
     }
-    static std::vector<registrar_key> getKeys(){
-        std::vector<registrar_key> keys;
+    static std::set<registrar_key> getKeys(){
+        std::set<registrar_key> keys;
         for(const auto& keyValue : C::registry())
-            keys.push_back(keyValue.first);
+            keys.insert(keyValue.first);
         return keys;
     }
 };
@@ -103,11 +101,14 @@ template <class C>
 void declare_registrable(py::module& m, const std::string& class_name){
     typedef typename C::registrar_key registrar_key;
     typedef typename C::registrar_type registrar_type;
-    m.def(("register_"+ class_name).c_str(), [](registrar_key& key, registrar_type function){
+    m.def(("register_"+ class_name).c_str(), [](const registrar_key& key, registrar_type function){
         Registrar<C>(key, function);
     })
     .def(("get_keys_"+ class_name).c_str(), [](){
         return Registrar<C>::getKeys();
+    })
+    .def(("get_key_value_"+ class_name).c_str(), [](const registrar_key& key){
+        return Registrar<C>::create(key);
     });
 }
 #endif
@@ -143,4 +144,13 @@ void declare_registrable(py::module& m, const std::string& class_name){
 
 }
 
+#define CONCAT(a, b) CONCAT_INNER(a, b)
+#define CONCAT_INNER(a, b) a ## b
+
+#define REGISTRAR(cls, ...) \
+    namespace { \
+    static Registrar<cls> CONCAT(CONCAT(aidge_register_ , cls), __COUNTER__)(__VA_ARGS__); \
+    } \
+    static_assert(true, "")
+
 #endif //AIDGE_CORE_UTILS_REGISTRAR_H_
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 6bf59155373cf73d158fce4eb5bda58f7d279e69..414381891ce52046ee7c2df5b82a17e1314773cd 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -12,11 +12,16 @@
 #ifndef AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
 #define AIDGE_CORE_UTILS_STATICATTRIBUTES_H_
 
-#include <tuple>
+#include <array>
 #include <cassert>
 #include <cstddef>
+#include <string>
+#include <tuple>
 #include <typeinfo>
-#include <array>
+
+#ifdef PYBIND
+#include <fmt/format.h>
+#endif
 
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -149,18 +154,24 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    constexpr typename std::enable_if<(SIZE > 0), const std::type_info&>::type getAttrType(std::size_t i) const {
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value,
+                std::enable_if_t<(SIZE > 0), bool> = true>
+    constexpr const std::type_info& getAttrType(std::size_t i) const {
         if (i == SIZE-1) {
-            return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
+            // Workaround for NVCC from 12.2.1 to 12.4.1
+            // error: no suitable constructor exists to convert from "const char *" to "std::type_info"
+            typename std::tuple_element<SIZE-1,std::tuple<T...>>::type dummy{};
+            return typeid(dummy);
+            //return typeid(typename std::tuple_element<SIZE-1,std::tuple<T...>>::type);
         }
         else {
             return getAttrType<SIZE-1>(i);
         }
     }
 
-    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value>
-    [[noreturn]] typename std::enable_if<(SIZE == 0), const std::type_info&>::type getAttrType(std::size_t /*i*/) const {
+    template <std::size_t SIZE = std::tuple_size<std::tuple<T...>>::value,
+                std::enable_if_t<(SIZE == 0), bool> = true>
+    [[noreturn]] const std::type_info& getAttrType(std::size_t /*i*/) const {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute not found");
     }
 
@@ -168,6 +179,12 @@ public:
         return mAttrs;
     }
 
+    virtual std::map<std::string, future_std::any> getAttrs() const override {
+        std::map<std::string, future_std::any> attrs;
+        appendAttr(mAttrs, attrs);
+        return attrs;
+    }
+
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
@@ -182,6 +199,18 @@ public:
         return false;
     }
 
+#ifdef PYBIND
+        bool hasAttrPy(const std::string& name) const override final {
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+#endif
+
     // Runtime type access with name
     std::string getAttrType(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
@@ -212,22 +241,31 @@ public:
     static std::set<std::string> staticGetAttrsName() {
         std::set<std::string> attrsName;
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+            attrsName.insert(std::string(EnumStrings<ATTRS_ENUM>::data[i]));
         }
         return attrsName;
     }
 
 
     py::object getAttrPy(const std::string& name) const override {
+        if (name == "__dict__") {
+            return py::none();
+        }
+
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work has we convert the tuple to a py::object which can be anything
+                // Normal accessor would not work as we convert the tuple to a py::object which can be anything
                 return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
             }
         }
-
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
+        // if (name == "_ipython_canary_method_should_not_exist_") {
+            // fmt::print("dict call {}", py::str(dict().attr("__getitem__")(name)).cast<std::string>());
+        // }
+        // ipython tries special methods and attributes (e.g "_ipython_canary_method_should_not_exist_") that require to throw
+        throw py::attribute_error(fmt::format("attribute \"{}\" not found.", name));
+        // AIDGE_THROW_OR_ABORT(py::key_error, "attribute \"{}\" not found in Python attribute getter", name);
+        // return py::none();
     }
 
 
@@ -242,8 +280,40 @@ public:
                 return;
             }
         }
-        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name);
+        throw py::attribute_error(fmt::format("attribute \"{}\" not found.", name));
+    }
+
+    py::dict dict() const override {
+        py::dict attributes;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+            // Normal accessor would not work as we convert the tuple to a py::object which can be anything
+            attributes[EnumStrings<ATTRS_ENUM>::data[i]] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+        }
+        return attributes;
     }
+
+    std::string str() const override {
+        return repr();
+    }
+
+    std::string repr() const override {
+        // Call the __repr__ method of the base class py::dict
+        return fmt::format("AttrDict({})",  static_cast<std::string>(py::str(dict())));
+        // return fmt::format("AttrDict({})",  dict().attr("__repr__")().cast<std::string>());
+    }
+
+    std::size_t len() const {
+        return size(EnumStrings<ATTRS_ENUM>::data);
+    }
+    // AttrDict get_a() const {
+    //     py::dict attributes_;
+    //     for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+    //         attributes_[EnumStrings<ATTRS_ENUM>::data[i]] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+    //     }
+    //     return AttrDict(attributes_);
+    // }
+
     #endif
 
 private:
@@ -259,6 +329,15 @@ private:
 
         return false;
     }
+    
+    template<std::size_t I = 0, typename... Tp>
+    inline typename std::enable_if<I == sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& /*t*/, std::map<std::string, future_std::any>& /*attrs*/) const {}
+
+    template<std::size_t I = 0, typename... Tp>
+    inline typename std::enable_if<I < sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& t, std::map<std::string, future_std::any>& attrs) const {
+        attrs.insert(std::make_pair(EnumStrings<ATTRS_ENUM>::data[I], future_std::any(std::get<I>(t))));
+        appendAttr<I + 1, Tp...>(t, attrs);
+    }
 
     std::tuple<T...> mAttrs;
 };
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..b838aca5ee100d182ba88b79f23f3a2ebff9acf3
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,398 @@
+[project]
+name = "aidge_core"
+description="Core algorithms for operators and graph of the AIDGE framework"
+dependencies = [
+    "numpy>=1.21.6",
+    "Jinja2>=3.1.2"
+]
+requires-python = ">= 3.7"
+readme = "README.md"
+license = { file = "LICENSE" }
+classifiers = [ 
+    "Development Status :: 2 - Pre-Alpha",
+    "Programming Language :: Python :: 3"
+    ]
+dynamic = ["version"] # defined in tool.setuptools_scm
+
+[project.optional-dependencies]
+test = [
+    "pytest"
+]
+
+[build-system]
+requires = [
+    "setuptools>=64",
+    "setuptools_scm[toml]==7.1.0",
+    "cmake>=3.18.4.post1"
+]
+build-backend = "setuptools.build_meta"
+
+#####################################################
+# SETUPTOOLS
+[tool.setuptools]
+[tool.setuptools.packages.find]
+where = ["."]  # list of folders that contain the packages (["."] by default)
+include = [ # package names should match these glob patterns (["*"] by default)
+    "aidge_core*"
+]
+exclude = [ # exclude packages matching these glob patterns (empty by default)
+    ".unit_tests",
+    ".unit_tests.static",
+    ".aidge_export_aidge.__pycache__",
+    ".aidge_export_aidge.utils.__pycache__",
+] 
+
+# SETUPTOOLS_SCM
+[tool.setuptools_scm]
+write_to = "aidge_core/_version.py"
+
+#####################################################
+# CIBUILDWHEEL
+[tool.cibuildwheel]
+build-frontend = "build"
+test-requires = "pytest"
+# WARNING: in the test suite the `test_export.py` used to be skipped
+# because it did not build when the python embedded interpreter is not available
+# as it is the case for cibuildwheel containers.
+# Now the build system takes care of this and skips the generation of a standalone
+# executable when it is not possible.
+# The root causes for this conditional build is that 1. the python embedded interpreter
+# is not alweays available, and 2. the aidge_core library depends on it as of now.
+# Hopefully this latter dependency may be removed in the future, simplifying the build.
+test-command = "pytest -v --capture=no {package}/aidge_core/unit_tests"
+# uncomment to run cibuildwheel locally on selected distros
+# build=[
+# "cp38-manylinux_x86_64",
+# "cp39-manylinux_x86_64",
+# "cp38-win_amd64",
+# "cp39-win_amd64",
+# "cp310-win_amd64",
+# ]
+
+### AIDGE DEPENDENCIES DECLARATION
+[tool.cibuildwheel.environment]
+# aidge_core do not rely on any aidge dependency, hence this string is empty
+AIDGE_DEPENDENCIES = "" # format => "dep_1 dep_2 ... dep_n"
+AIDGE_INSTALL="/AIDGE_INSTALL_CIBUILDWHEEL"
+[tool.cibuildwheel.linux]
+before-build = [
+    "bash .gitlab/ci/cibuildwheel_build_deps_before_build_wheel.sh /host"
+]
+[tool.cibuildwheel.windows]
+before-build = [
+    "python -m pip debug -v",
+    "powershell -File .\\.gitlab\\ci\\cibuildwheel_build_deps_before_build_wheel.ps1"
+]
+
+
+#####################################################
+# PYLINT
+[tool.pylint.main]
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code.
+extension-pkg-allow-list = ["aidge_core", "torch", "tensorflow"]
+
+# Files or directories to be skipped. They should be base names, not paths.
+ignore = ["CVS"]
+
+# List of module names for which member attributes should not be checked (useful
+# for modules/projects where namespaces are manipulated during runtime and thus
+# existing member attributes cannot be deduced by static analysis). It supports
+# qualified module names, as well as Unix pattern matching.
+ignored-modules = ["aidge_core"]
+
+
+# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
+# number of processors available to use, and will cap the count on Windows to
+# avoid hangs.
+jobs = 1
+
+# Control the amount of potential inferred values when inferring a single object.
+# This can help the performance when dealing with large functions or complex,
+# nested conditions.
+limit-inference-results = 100
+
+# Pickle collected data for later comparisons.
+persistent = true
+
+# Minimum Python version to use for version dependent checks. Will default to the
+# version used to run pylint.
+py-version = "3.7"
+
+# When enabled, pylint would attempt to guess common misconfiguration and emit
+# user-friendly hints instead of false-positive error messages.
+suggestion-mode = true
+
+[tool.pylint.basic]
+# Naming style matching correct argument names.
+argument-naming-style = "snake_case"
+
+# Naming style matching correct attribute names.
+attr-naming-style = "snake_case"
+
+# Bad variable names which should always be refused, separated by a comma.
+bad-names = ["foo", "bar", "baz", "toto", "tutu", "tata"]
+
+# Naming style matching correct class attribute names.
+class-attribute-naming-style = "any"
+
+# Naming style matching correct class constant names.
+class-const-naming-style = "UPPER_CASE"
+
+# Naming style matching correct class names.
+class-naming-style = "PascalCase"
+
+# Naming style matching correct constant names.
+const-naming-style = "UPPER_CASE"
+
+# Minimum line length for functions/classes that require docstrings, shorter ones
+# are exempt.
+docstring-min-length = -1
+
+# Naming style matching correct function names.
+function-naming-style = "snake_case"
+
+# Good variable names which should always be accepted, separated by a comma.
+good-names = ["i", "j", "k", "ex", "Run", "_"]
+
+# Naming style matching correct inline iteration names.
+inlinevar-naming-style = "any"
+
+# Naming style matching correct method names.
+method-naming-style = "snake_case"
+
+# Naming style matching correct module names.
+module-naming-style = "snake_case"
+
+# Regular expression which should only match function or class names that do not
+# require a docstring.
+no-docstring-rgx = "^_"
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties. These
+# decorators are taken in consideration only for invalid-name.
+property-classes = ["abc.abstractproperty"]
+
+# Naming style matching correct variable names.
+variable-naming-style = "snake_case"
+
+[tool.pylint.classes]
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods = ["__init__", "__new__", "setUp", "__post_init__"]
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected = ["_asdict", "_fields", "_replace", "_source", "_make"]
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg = ["cls"]
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg = ["cls"]
+
+[tool.pylint.design]
+# Maximum number of arguments for function / method.
+max-args = 5
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes = 7
+
+# Maximum number of boolean expressions in an if statement (see R0916).
+max-bool-expr = 5
+
+# Maximum number of branch for function / method body.
+max-branches = 12
+
+# Maximum number of locals for function / method body.
+max-locals = 15
+
+# Maximum number of parents for a class (see R0901).
+max-parents = 7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods = 20
+
+# Maximum number of return / yield for function / method body.
+max-returns = 6
+
+# Maximum number of statements in function / method body.
+max-statements = 50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods = 2
+
+[tool.pylint.exceptions]
+# Exceptions that will emit a warning when caught.
+overgeneral-exceptions = ["BaseException", "Exception"]
+
+[tool.pylint.format]
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+# expected-line-ending-format =
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines = "^\\s*(# )?<?https?://\\S+>?$"
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren = 4
+
+# String used as indentation unit. This is usually "    " (4 spaces) or "\t" (1
+# tab).
+indent-string = "    "
+
+# Maximum number of characters on a single line.
+max-line-length = 200
+
+# Maximum number of lines in a module.
+max-module-lines = 1000
+
+[tool.pylint.imports]
+# Force import order to recognize a module as part of a third party library.
+known-third-party = ["enchant"]
+
+[tool.pylint.logging]
+# The type of string formatting that logging methods do. `old` means using %
+# formatting, `new` is for `{}` formatting.
+logging-format-style = "old"
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format.
+logging-modules = ["logging"]
+
+[tool.pylint."messages control"]
+# Only show warnings with the listed confidence levels. Leave empty to show all.
+# Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
+confidence = ["HIGH", "CONTROL_FLOW", "INFERENCE", "INFERENCE_FAILURE", "UNDEFINED"]
+
+# Disable the message, report, category or checker with the given id(s). You can
+# either give multiple identifiers separated by comma (,) or put this option
+# multiple times (only on the command line, not in the configuration file where
+# it should appear only once). You can also use "--disable=all" to disable
+# everything first and then re-enable specific checks. For example, if you want
+# to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use "--disable=all --enable=classes
+# --disable=W".
+disable = ["raw-checker-failed", "bad-inline-option", "locally-disabled", "file-ignored", "suppressed-message", "useless-suppression", "deprecated-pragma", "use-symbolic-message-instead", "use-implicit-booleaness-not-comparison-to-string", "use-implicit-booleaness-not-comparison-to-zero", "too-many-locals", "missing-class-docstring", "missing-function-docstring", "too-many-arguments", "protected-access", "too-many-branches", "too-many-ancestors", "wrong-import-order", "wrong-import-position"]
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where it
+# should appear only once). See also the "--disable" option for examples.
+enable = ["c-extension-no-member"]
+
+[tool.pylint.method_args]
+# List of qualified names (i.e., library.method) which require a timeout
+# parameter e.g. 'requests.api.get,requests.api.post'
+timeout-methods = ["requests.api.delete", "requests.api.get", "requests.api.head", "requests.api.options", "requests.api.patch", "requests.api.post", "requests.api.put", "requests.api.request"]
+
+[tool.pylint.miscellaneous]
+# List of note tags to take in consideration, separated by a comma.
+notes = ["FIXME", "XXX", "TODO"]
+
+# Regular expression of note tags to take in consideration.
+# notes-rgx =
+
+[tool.pylint.refactoring]
+# Maximum number of nested blocks for function / method body
+max-nested-blocks = 5
+
+# Complete name of functions that never returns. When checking for inconsistent-
+# return-statements if a never returning function is called then it will be
+# considered as an explicit return statement and no message will be printed.
+never-returning-functions = ["sys.exit", "argparse.parse_error"]
+
+# Let 'consider-using-join' be raised when the separator to join on would be non-
+# empty (resulting in expected fixes of the type: ``"- " + " - ".join(items)``)
+suggest-join-with-non-empty-separator = true
+
+[tool.pylint.reports]
+# Python expression which should return a score less than or equal to 10. You
+# have access to the variables 'fatal', 'error', 'warning', 'refactor',
+# 'convention', and 'info' which contain the number of messages in each category,
+# as well as 'statement' which is the total number of statements analyzed. This
+# score is used by the global evaluation report (RP0004).
+evaluation = "10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)"
+
+# Activate the evaluation score.
+score = true
+
+[tool.pylint.similarities]
+# Comments are removed from the similarity computation
+ignore-comments = true
+
+# Docstrings are removed from the similarity computation
+ignore-docstrings = true
+
+# Minimum lines number of a similarity.
+min-similarity-lines = 4
+
+[tool.pylint.spelling]
+# Limits count of emitted suggestions for spelling mistakes.
+max-spelling-suggestions = 4
+
+# List of comma separated words that should be considered directives if they
+# appear at the beginning of a comment and should not be checked.
+spelling-ignore-comment-directives = "fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:"
+
+[tool.pylint.typecheck]
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators = ["contextlib.contextmanager"]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# class is considered mixin if its name matches the mixin-class-rgx option.
+# Tells whether to warn about missing members when the owner of the attribute is
+# inferred to be None.
+ignore-none = true
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference can
+# return multiple potential results while evaluating a Python object, but some
+# branches might not be evaluated, which results in partial inference. In that
+# case, it might be useful to still emit no-member and other checks for the rest
+# of the inferred objects.
+ignore-on-opaque-inference = true
+
+# List of symbolic message names to ignore for Mixin members.
+ignored-checks-for-mixins = ["no-member", "not-async-context-manager", "not-context-manager", "attribute-defined-outside-init"]
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes = ["optparse.Values", "thread._local", "_thread._local", "aidge.global_variables", "aidge.cells.abstract_cell.Trainable", "torch", "tensorflow"]
+
+# Show a hint with possible names when a member name was not found. The aspect of
+# finding the hint is based on edit distance.
+missing-member-hint = true
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance = 1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices = 1
+
+# Regex pattern to define which classes are considered mixins.
+mixin-class-rgx = ".*[Mm]ixin"
+
+[tool.pylint.variables]
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables = true
+
+# List of strings which can identify a callback function by name. A callback name
+# must start or end with one of those strings.
+callbacks = ["cb_", "_cb"]
+
+# A regular expression matching the name of dummy variables (i.e. expected to not
+# be used).
+dummy-variables-rgx = "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_"
+
+# Argument names that match this expression will be ignored.
+ignored-argument-names = "_.*|^ignored_|^unused_"
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules = ["six.moves", "past.builtins", "future.builtins", "builtins", "io"]
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 6a83805fc1af2e111dd1c9f49c669e0c2f9422aa..04172c3ff68641a9fe0d14f9a326cd17e7002912 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -13,6 +13,7 @@
 #include <pybind11/stl.h>
 #include <string>
 
+#include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/backend/OperatorImpl.hpp"
 
@@ -31,102 +32,70 @@ public:
         PYBIND11_OVERRIDE(
             void,
             OperatorImpl,
-            forward,
+            forward
 
         );
     }
+
     void backward() override {
         PYBIND11_OVERRIDE(
             void,
             OperatorImpl,
-            backward,
+            backward
 
         );
     }
-    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_required_data",
-            getNbRequiredData,
-            inputIdx
-        );
-    }
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_required_protected",
-            getNbRequiredProtected,
-            inputIdx
 
-        );
-    }
-    Elts_t getRequiredMemory(const IOIndex_t outputIdx,
-    const std::vector<DimSize_t> &inputsSize) const override {
+    std::shared_ptr<ProdConso> getProdConso() const override {
         PYBIND11_OVERRIDE_NAME(
-            Elts_t,
+            std::shared_ptr<ProdConso>,
             OperatorImpl,
-            "get_required_memory",
-            getRequiredMemory,
-            outputIdx,
-            inputsSize
-
+            "get_prod_conso",
+            getProdConso
         );
     }
-    Elts_t getNbConsumedData(const IOIndex_t inputIdx) const override {
-        PYBIND11_OVERRIDE_NAME(
-            Elts_t,
-            OperatorImpl,
-            "get_nb_consumed_data",
-            getNbConsumedData,
-            inputIdx
 
-        );
-    }
-    Elts_t getNbProducedData(const IOIndex_t outputIdx) const override {
+    std::set<ImplSpec> getAvailableImplSpecs() const noexcept override {
         PYBIND11_OVERRIDE_NAME(
-            Elts_t,
+            std::set<ImplSpec>,
             OperatorImpl,
-            "get_nb_produced_data",
-            getNbProducedData,
-            outputIdx
-
+            "get_available_impl_specs",
+            getAvailableImplSpecs
         );
     }
-    void updateConsummerProducer() override {
-        PYBIND11_OVERRIDE_NAME(
-            void,
-            OperatorImpl,
-            "update_consummer_producer",
-            updateConsummerProducer,
-
-        );
-    }
-    void resetConsummerProducer() override {
-        PYBIND11_OVERRIDE_NAME(
-            void,
-            OperatorImpl,
-            "reset_consummer_producer",
-            resetConsummerProducer,
+};
 
-        );
-    }
+// See https://pybind11.readthedocs.io/en/stable/advanced/classes.html#binding-protected-member-functions
+class OperatorImpl_Publicist : public OperatorImpl {
+public:
+    using OperatorImpl::getProdConso;
+    using OperatorImpl::getAvailableImplSpecs;
 };
 
 void init_OperatorImpl(py::module& m){
+    py::class_<ImplSpec::IOSpec>(m, "IOSpec")
+    .def(py::init<DataType, DataFormat, const std::vector<std::pair<int, int>>&>(), py::arg("type"), py::arg("format") = DataFormat::Any, py::arg("dims") = std::vector<std::pair<int, int>>{})
+    ;
+
+    py::class_<ImplSpec>(m, "ImplSpec")
+    .def(py::init<const DynamicAttributes&>(), py::arg("attr") = DynamicAttributes())
+    .def(py::init<const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("io"), py::arg("attr") = DynamicAttributes())
+    .def(py::init<const ImplSpec::IOSpec&, const ImplSpec::IOSpec&, const DynamicAttributes&>(), py::arg("i"), py::arg("o"), py::arg("attr") = DynamicAttributes())
+    ;
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
     .def(py::init<const Operator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
     .def("forward", &OperatorImpl::forward)
     .def("backward", &OperatorImpl::backward)
-    .def("get_nb_required_data", &OperatorImpl::getNbRequiredData)
-    .def("get_nb_required_protected", &OperatorImpl::getNbRequiredProtected)
-    .def("get_required_memory", &OperatorImpl::getRequiredMemory)
-    .def("get_nb_consumed_data", &OperatorImpl::getNbConsumedData)
-    .def("get_nb_produced_data", &OperatorImpl::getNbProducedData)
-    .def("update_consummer_producer", &OperatorImpl::updateConsummerProducer)
-    .def("reset_consummer_producer", &OperatorImpl::resetConsummerProducer)
+    .def("prod_conso", &OperatorImpl::prodConso)
+    .def("backend", &OperatorImpl::backend)
+    .def("get_operator", &OperatorImpl::getOperator)
+    .def("get_required_spec", &OperatorImpl::getRequiredSpec)
+    .def("get_best_match", &OperatorImpl::getBestMatch)
+    .def("get_adaptation", &OperatorImpl::getAdaptation)
+    .def("get_best_adaptation", &OperatorImpl::getBestAdaptation)
+    .def("get_prod_conso", &OperatorImpl_Publicist::getProdConso)
+    .def("get_available_impl_specs", &OperatorImpl_Publicist::getAvailableImplSpecs)
     ;
 }
 }
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index bca246c94434b280a12d070526ad4ffb2c7fbe7b..e91f345d7974cb06aa7aec9e27300b9cf9230985 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -10,27 +10,57 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+
 #include "aidge/data/Data.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
+template <class T>
+void bindEnum(py::module& m, const std::string& name) {
+    // Define enumeration names for python as lowercase type name
+    // This defined enum names compatible with basic numpy type
+    // name such as: float32, flot64, [u]int32, [u]int64, ...
+    auto python_enum_name = [](const T& type) {
+        auto str_lower = [](std::string& str) {
+            std::transform(str.begin(), str.end(), str.begin(),
+                           [](unsigned char c){
+                               return std::tolower(c);
+                           });
+        };
+        auto type_name = std::string(Aidge::format_as(type));
+        str_lower(type_name);
+        return type_name;
+    };
+    // Auto generate enumeration names from lowercase type strings
+    std::vector<std::string> enum_names;
+    for (auto type_str : EnumStrings<T>::data) {
+        auto type = static_cast<T>(enum_names.size());
+        auto enum_name = python_enum_name(type);
+        enum_names.push_back(enum_name);
+    }
+
+    // Define python side enumeration aidge_core.type
+    auto e_type = py::enum_<T>(m, name.c_str());
+
+    // Add enum value for each enum name
+    for (std::size_t idx = 0; idx < enum_names.size(); idx++) {
+        e_type.value(enum_names[idx].c_str(), static_cast<T>(idx));
+    }
+
+    // Define str() to return the bare enum name value, it allows
+    // to compare directly for instance str(tensor.type())
+    // with str(nparray.type)
+    e_type.def("__str__", [enum_names](const T& type) {
+        return enum_names[static_cast<int>(type)];
+    }, py::prepend());;
+}
+
 void init_Data(py::module& m){
-    // TODO : extend with more values !
-    py::enum_<DataType>(m, "DataType")
-    .value("Float64", DataType::Float64)
-    .value("Float32", DataType::Float32)
-    .value("Float16", DataType::Float16)
-    .value("Int8", DataType::Int8)
-    .value("Int32", DataType::Int32)
-    .value("Int64", DataType::Int64)
-    .value("UInt8", DataType::UInt8)
-    .value("UInt32", DataType::UInt32)
-    .value("UInt64", DataType::UInt64)
-    ;
+    bindEnum<DataType>(m, "dtype");
+    bindEnum<DataFormat>(m, "dformat");
 
     py::class_<Data, std::shared_ptr<Data>>(m,"Data");
 
-
 }
 }
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
index 2f652aff5008f8008952ffb1bb6fb1738021b436..77abd1f39bb4d5375d2fc57c5bd5595e79f135fb 100644
--- a/python_binding/data/pybind_DataProvider.cpp
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -27,7 +27,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> DataProvider::next() {
 void init_DataProvider(py::module& m){
 
     py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
-        .def(py::init<Database&, std::size_t, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("shuffle"), py::arg("drop_last"))
+        .def(py::init<Database&, std::size_t, std::string, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("backend") = "cpu", py::arg("shuffle") = false, py::arg("drop_last") = false)
         .def("__iter__", &DataProvider::iter)
         .def("__next__", &DataProvider::next)
         .def("__len__", &DataProvider::getNbBatch);
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 005175ab613594c48959073c4674e6d69b60b29f..91f13c17dd923d9f4b9ed6e468ce7897059d8749 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -23,17 +23,267 @@
 namespace py = pybind11;
 namespace Aidge {
 
+using registrableTensor = Registrable<Tensor,
+                                      std::tuple<std::string, DataType>,
+                                      std::function<std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>;
+
+using pyTensorClass = py::class_<Tensor,
+                                 std::shared_ptr<Tensor>,
+                                 Data,
+                                 registrableTensor>;
+
+using pyTensorRegistrableClass = py::class_<registrableTensor,
+                                            std::shared_ptr<registrableTensor>>;
+
+using NumpyDType = py::detail::npy_api::constants;
+
+// Map Numpy dtype ids to aidge datatypes.
+// If a numpy dtype is not present, np array of this type is rejected.
+static const std::map<NumpyDType, DataType> NumpyTypeNameAsNativeType = {
+    { NumpyDType::NPY_INT8_, NativeType<std::int8_t>::type },
+    { NumpyDType::NPY_INT16_, NativeType<std::int16_t>::type },
+    { NumpyDType::NPY_INT32_, NativeType<std::int32_t>::type },
+    { NumpyDType::NPY_INT64_, NativeType<std::int64_t>::type },
+    { NumpyDType::NPY_UINT8_, NativeType<std::uint8_t>::type },
+    { NumpyDType::NPY_UINT16_, NativeType<std::uint16_t>::type },
+    { NumpyDType::NPY_UINT32_, NativeType<std::uint32_t>::type },
+    { NumpyDType::NPY_UINT64_, NativeType<std::uint64_t>::type },
+    { NumpyDType::NPY_FLOAT_, NativeType<float>::type },
+    { NumpyDType::NPY_DOUBLE_, NativeType<double>::type },
+};
+
+// The Numpy API indexes that we need to convert bare numpy scalars
+// They are not provided by the pybind API, hence we have to redo
+// the API mapping for these.
+// Ref for instance to the merge request proposal to add support
+// for numpy scalars: https://github.com/pybind/pybind11/pull/3544/
+// If merged upstream, we will be able to remove this code.
+enum NUMPY_API_Slots {
+    PyArray_GetNDArrayCFeatureVersion = 211,
+    PyArray_TypeObjectFromType = 46,
+    PyArray_ScalarAsCtype = 62,
+};
+
+// Get the Numpy API ptr, we can't reuse the implementation of pybind
+// as it is private. We use the same scheme and return the pointer to the
+// Numpy API array.
+static void **NumpyAPIPtr() {
+    static void **api_ptr = []() {
+        py::module_ m = py::module_::import("numpy.core.multiarray");
+        auto c = m.attr("_ARRAY_API");
+        void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), nullptr);
+        if (api_ptr == nullptr) {
+            AIDGE_THROW_OR_ABORT(py::import_error, "numpy binding: unable to get numpy _ARRAY_API pointer.");
+        }
+        using ftype = unsigned int (*)();
+        auto version = ftype(api_ptr[NUMPY_API_Slots::PyArray_GetNDArrayCFeatureVersion])();
+        if (version < 0x7) {
+            AIDGE_THROW_OR_ABORT(py::import_error, "numpy binding: requires numpy >= 1.7.0");
+        }
+        return api_ptr;
+    }();
+    return api_ptr;
+}
+
+// Wrapper for the Numpy API PyArray_ScalarAsCtype
+static void NumpyScalarAsCtype(const py::object val, void *dst_ptr) {
+    using ftype = void (*)(PyObject *, void *);
+    void **api_ptr = NumpyAPIPtr();
+    ((ftype)api_ptr[NUMPY_API_Slots::PyArray_ScalarAsCtype])(val.ptr(), dst_ptr);
+}
+
+// Wrapper for the Numpy API PyArray_TypeObjectFromType
+static PyObject *NumpyTypeObjectFromType(const NumpyDType npy_dtype) {
+    using ftype = PyObject *(*)(int);
+    void **api_ptr = NumpyAPIPtr();
+    auto obj = ((ftype)api_ptr[NUMPY_API_Slots::PyArray_TypeObjectFromType])(npy_dtype);
+    return obj;
+}
+
+// Detects and convert (without cast) a numpy scalar of npy_dtype or returns false.
+// If matches, fills the value and aidge dtype in the provided pointers.
+static bool NPScalarGetValue(const py::object val_obj, const NumpyDType npy_dtype, void* dst_ptr, DataType* aidge_dtype_ptr) {
+    auto search_datatype = NumpyTypeNameAsNativeType.find(npy_dtype);
+    if (search_datatype == NumpyTypeNameAsNativeType.end()) {
+        return false;
+    }
+    auto pyobj_dtype = NumpyTypeObjectFromType(npy_dtype);
+    if (!isinstance(val_obj, pyobj_dtype)) {
+        return false;
+    }
+    *aidge_dtype_ptr = search_datatype->second;
+    NumpyScalarAsCtype(val_obj, dst_ptr);
+    return true;
+}
+
+using NativeValue = union {
+    std::int8_t i8; std::int16_t i16; std::int32_t i32; std::int64_t i64;
+    std::uint8_t u8; std::uint16_t u16; std::uint32_t u32; std::uint64_t u64;
+    float f32; double f64;
+};
+
+static bool getNPScalarNativeVal(const py::object obj, NativeValue* val_ptr, DataType* aidge_dtype_ptr) {
+    NativeValue native_val;
+    DataType native_dtype;
+    bool found = (NPScalarGetValue(obj, NumpyDType::NPY_INT32_, &native_val.i32, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_FLOAT_, &native_val.f32, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_INT8_, &native_val.i8, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_INT16_, &native_val.i16, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_INT64_, &native_val.i64, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_UINT8_, &native_val.u8, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_UINT16_, &native_val.u16, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_UINT32_, &native_val.u32, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_UINT64_, &native_val.u64, &native_dtype) ||
+                  NPScalarGetValue(obj, NumpyDType::NPY_DOUBLE_, &native_val.f64, &native_dtype));
+    if (found) {
+        *val_ptr = native_val;
+        *aidge_dtype_ptr = native_dtype;
+    }
+    return found;
+}
+
+static bool getScalarNativeVal(const py::object obj, NativeValue* val_ptr, DataType* aidge_dtype_ptr) {
+    NativeValue native_val;
+    DataType native_dtype;
+    bool found;
+    // Try to match actual numpy scalars first in order to avoid unexpected casting
+    // when matching native python types as numpy does some automatic conversions
+    // behind the scene.
+    found = getNPScalarNativeVal(obj, &native_val, &native_dtype);
+    if (!found) {
+        // Then try to match int and float python scalar objects
+        if (py::isinstance<py::int_>(obj)) {
+            // Note that we use the following strategy for casting native python int:
+            // in order, either: int32, int64 or float32, the first that does not overflows
+            using caster_i32 = py::detail::type_caster<std::int32_t>;
+            using caster_i64 = py::detail::type_caster<std::int64_t>;
+            using caster_f32 = py::detail::type_caster<float>;
+            if (caster_i32().load(obj, false)) {
+                native_dtype = NativeType<std::int32_t>::type;
+                native_val.i32 = py::cast<std::int32_t>(obj);
+            } else if (caster_i64().load(obj, false)) {
+                native_dtype = NativeType<std::int64_t>::type;
+                native_val.i64 = py::cast<std::int64_t>(obj);
+            } else {
+                native_dtype = NativeType<float>::type;
+                native_val.f32 = py::cast<float>(obj);
+            }
+            found = true;
+        } else if (py::isinstance<py::float_>(obj)) {
+            // Note that for native python float, we cast to float32 which may loss
+            // precision as python floats are of type float64.
+            native_dtype = NativeType<float>::type;
+            native_val.f32 = py::cast<float>(obj);
+            found = true;
+        }
+    }
+    if (found) {
+        *val_ptr = native_val;
+        *aidge_dtype_ptr = native_dtype;
+    }
+    return found;
+}
+
+static void getConservativeNativeVal(const py::object obj, NativeValue *val_ptr, DataType * aidge_dtype_ptr) {
+    NativeValue native_val;
+    DataType native_dtype;
+    bool found;
+    found = getNPScalarNativeVal(obj, &native_val, &native_dtype);
+    if (!found) {
+        if (py::isinstance<py::int_>(obj)) {
+            // Note that for conservative cast we use our largests int types in order
+            // and otherwise fallback to double, i.e.: int64, then uint64, then double
+            using caster_i64 = py::detail::type_caster<std::int64_t>;
+            using caster_u64 = py::detail::type_caster<std::uint64_t>;
+            if (caster_i64().load(obj, false)) {
+                native_dtype = NativeType<std::int64_t>::type;
+                native_val.i64 = py::cast<std::int64_t>(obj);
+            } else if (caster_u64().load(obj, false)) {
+                native_dtype = NativeType<std::uint64_t>::type;
+                native_val.u64 = py::cast<std::uint64_t>(obj);
+            } else {
+                native_dtype = NativeType<double>::type;
+                native_val.f64 = py::cast<double>(obj);
+            }
+            found = true;
+        } else if (py::isinstance<py::float_>(obj)) {
+            // Note that for conservative cast we use double which is our larger float
+            native_dtype = NativeType<double>::type;
+            native_val.f64 = py::cast<double>(obj);
+            found = true;
+        }
+    }
+    if (!found) {
+        AIDGE_THROW_OR_ABORT(py::value_error, "Unsupported python type passed as scalar");
+    }
+    *val_ptr = native_val;
+    *aidge_dtype_ptr = native_dtype;
+}
 
 template<typename T>
-void addCtor(py::class_<Tensor,
-                        std::shared_ptr<Tensor>,
-                        Data,
-                        Registrable<Tensor,
-                                    std::tuple<std::string, DataType>,
-                                    std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>& mTensor){
-    mTensor.def(py::init([](
-        py::array_t<T, py::array::c_style | py::array::forcecast> b,
-        std::string backend = "cpu") {
+static T castToNativeType(const py::object val_obj) {
+    NativeValue val;
+    DataType dtype;
+    getConservativeNativeVal(val_obj, &val, &dtype);
+    switch (dtype) {
+    case DataType::Int8:
+        return (T)val.i8;
+    case DataType::Int16:
+        return (T)val.i16;
+    case DataType::Int32:
+        return (T)val.i32;
+    case DataType::Int64:
+        return (T)val.i64;
+    case DataType::UInt8:
+        return (T)val.u8;
+    case DataType::UInt16:
+        return (T)val.u16;
+    case DataType::UInt32:
+        return (T)val.u32;
+    case DataType::UInt64:
+        return (T)val.u64;
+    case DataType::Float32:
+        return (T)val.f32;
+    case DataType::Float64:
+        return (T)val.f64;
+    }
+    AIDGE_THROW_OR_ABORT(py::cast_error, "Unexpected ly missing conversion to scalar type");
+}
+
+static void addScalarCtor(pyTensorClass& mTensor) {
+    // Contructor based on bare py::object in order to match either
+    // python scalars (int, float) or numpy scalars (np.int32, np.int64, ...).
+    // There is a merge request to support numpy scalars in pybind, through py::numpy_scalar<T>
+    // though it is not merged: https://github.com/pybind/pybind11/pull/3544/.
+    // Hence we use some helper functions defined above to try matching the different numpy scalar types.
+    mTensor.def(py::init([](py::object obj,
+                            const std::string& backend = "cpu") {
+        NativeValue native_val;
+        DataType native_dtype;
+        bool found = getScalarNativeVal(obj, &native_val, &native_dtype);
+        if (!found) {
+            AIDGE_THROW_OR_ABORT(py::value_error, "Unsupported python type passed to Tensor constructor");
+        }
+        Tensor* newTensor = new Tensor();
+        newTensor->setDataType(native_dtype);
+        const std::vector<DimSize_t> input_dims(0);
+        newTensor->resize(input_dims);
+        std::set<std::string> availableBackends = Tensor::getAvailableBackends();
+        if (availableBackends.find(backend) != availableBackends.end()){
+            newTensor->setBackend(backend);
+            newTensor->getImpl()->copyFromHost(static_cast<void *>(&native_val), newTensor->size());
+        }else{
+            AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend {}, verify you have `import aidge_backend_{}`.\n", backend, backend);
+        }
+
+        return newTensor;
+    }), py::arg("val"), py::arg("backend")="cpu", py::kw_only());
+}
+
+template<typename T>
+void addArrayCtor(pyTensorClass& mTensor) {
+    mTensor.def(py::init([](const py::array_t<T, py::array::c_style|py::array::forcecast> b,
+                            const std::string& backend = "cpu") {
         /* Request a buffer descriptor from Python */
         py::buffer_info info = b.request();
         Tensor* newTensor = new Tensor();
@@ -44,62 +294,56 @@ void addCtor(py::class_<Tensor,
         std::set<std::string> availableBackends = Tensor::getAvailableBackends();
         if (availableBackends.find(backend) != availableBackends.end()){
             newTensor->setBackend(backend);
-            newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
+            newTensor->getImpl()->copyFromHost(static_cast<const T*>(info.ptr), newTensor->size());
         }else{
             AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend {}, verify you have `import aidge_backend_{}`.\n", backend, backend);
         }
 
         return newTensor;
-    }), py::arg("array"), py::arg("backend")="cpu")
-    .def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
-    .def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
-    ;
+    }), py::arg("array"), py::arg("backend")="cpu", py::kw_only());
 }
 
 
 void init_Tensor(py::module& m){
-    py::class_<Registrable<Tensor,
-                           std::tuple<std::string, DataType>,
-                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>,
-               std::shared_ptr<Registrable<Tensor,
-                                           std::tuple<std::string, DataType>,
-                                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>>(m,"TensorRegistrable");
-
-    py::class_<Tensor, std::shared_ptr<Tensor>,
-               Data,
-               Registrable<Tensor,
-                           std::tuple<std::string, DataType>,
-                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> pyClassTensor
+    pyTensorRegistrableClass(m,"TensorRegistrable");
+
+    pyTensorClass pyClassTensor
         (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
-    pyClassTensor.def(py::init<>())
+    pyClassTensor
     .def(py::self + py::self)
     .def(py::self - py::self)
     .def(py::self * py::self)
     .def(py::self / py::self)
+	.def("sqrt", &Tensor::sqrt)
     .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true)
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
     .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
-    .def("init_grad", &Tensor::initGrad)
     .def("size", &Tensor::size)
-    .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
+    .def("capacity", &Tensor::capacity)
+    .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize, py::arg("dims"), py::arg("strides") = std::vector<DimSize_t>())
     .def("has_impl", &Tensor::hasImpl)
     .def("get_coord", &Tensor::getCoord)
     .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
+    .def("undefined", &Tensor::undefined)
     .def("__str__", [](Tensor& b) {
-        return b.toString();
+        if (b.empty()) {
+            return std::string("{}");
+        } else {
+            return b.toString();
+        }
     })
     .def("__repr__", [](Tensor& b) {
-        return "Tensor(dtype = " + std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]) + ",\n" + b.toString() + ")";
+        return fmt::format("Tensor(dims = {}, dtype = {})", b.dims(), std::string(EnumStrings<DataType>::data[static_cast<int>(b.dataType())]));
     })
     .def("__len__", [](Tensor& b) -> size_t{
         return b.size();
     })
-    .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
+    .def("__getitem__", [](const Tensor& b, const size_t idx)-> py::object {
         if (idx >= b.size()) throw py::index_error();
         switch(b.dataType()){
             case DataType::Float64:
@@ -118,11 +362,15 @@ void init_Tensor(py::module& m){
                 return py::cast(b.get<std::uint8_t>(idx));
             case DataType::UInt16:
                 return py::cast(b.get<std::uint16_t>(idx));
+            case DataType::UInt32:
+                return py::cast(b.get<std::uint32_t>(idx));
+            case DataType::UInt64:
+                return py::cast(b.get<std::uint64_t>(idx));
             default:
                 return py::none();
         }
     })
-    .def("__getitem__", [](Tensor& b, std::vector<size_t> coordIdx)-> py::object {
+    .def("__getitem__", [](const Tensor& b, const std::vector<size_t>& coordIdx)-> py::object {
         if (b.getIdx(coordIdx) >= b.size()) throw py::index_error();
         switch(b.dataType()){
             case DataType::Float64:
@@ -141,10 +389,90 @@ void init_Tensor(py::module& m){
                 return py::cast(b.get<std::uint8_t>(coordIdx));
             case DataType::UInt16:
                 return py::cast(b.get<std::uint16_t>(coordIdx));
+            case DataType::UInt32:
+                return py::cast(b.get<std::uint32_t>(coordIdx));
+            case DataType::UInt64:
+                return py::cast(b.get<std::uint64_t>(coordIdx));
             default:
                 return py::none();
         }
     })
+    .def("__setitem__", [](Tensor& b, const std::size_t idx, const py::object val) {
+        if (idx >= b.size()) throw py::index_error();
+        switch(b.dataType()){
+            case DataType::Float64:
+                b.set(idx, castToNativeType<double>(val));
+                break;
+            case DataType::Float32:
+                b.set(idx, castToNativeType<float>(val));
+                break;
+            case DataType::Int8:
+                b.set(idx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Int16:
+                b.set(idx, castToNativeType<std::int16_t>(val));
+                break;
+            case DataType::Int32:
+                b.set(idx, castToNativeType<std::int32_t>(val));
+                break;
+            case DataType::Int64:
+                b.set(idx, castToNativeType<std::int64_t>(val));
+                break;
+            case DataType::UInt8:
+                b.set(idx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::UInt16:
+                b.set(idx, castToNativeType<std::uint16_t>(val));
+                break;
+            case DataType::UInt32:
+                b.set(idx, castToNativeType<std::uint32_t>(val));
+                break;
+            case DataType::UInt64:
+                b.set(idx, castToNativeType<std::uint64_t>(val));
+                break;
+            default:
+                break;
+
+        }
+    })
+    .def("__setitem__", [](Tensor& b, const std::vector<size_t>& coordIdx, const py::object val) {
+        if (b.getIdx(coordIdx) >= b.size()) throw py::index_error();
+        switch(b.dataType()){
+            case DataType::Float64:
+                b.set(coordIdx, castToNativeType<double>(val));
+                break;
+            case DataType::Float32:
+                b.set(coordIdx, castToNativeType<float>(val));
+                break;
+            case DataType::Int8:
+                b.set(coordIdx, castToNativeType<std::int8_t>(val));
+                break;
+            case DataType::Int16:
+                b.set(coordIdx, castToNativeType<std::int16_t>(val));
+                break;
+            case DataType::Int32:
+                b.set(coordIdx, castToNativeType<std::int32_t>(val));
+                break;
+            case DataType::Int64:
+                b.set(coordIdx, castToNativeType<std::int64_t>(val));
+                break;
+            case DataType::UInt8:
+                b.set(coordIdx, castToNativeType<std::uint8_t>(val));
+                break;
+            case DataType::UInt16:
+                b.set(coordIdx, castToNativeType<std::uint16_t>(val));
+                break;
+            case DataType::UInt32:
+                b.set(coordIdx, castToNativeType<std::uint32_t>(val));
+                break;
+            case DataType::UInt64:
+                b.set(coordIdx, castToNativeType<std::uint64_t>(val));
+                break;
+            default:
+                break;
+
+        }
+    })
     .def_buffer([](Tensor& b) -> py::buffer_info {
         const std::shared_ptr<TensorImpl>& tensorImpl = b.getImpl();
 
@@ -186,6 +514,12 @@ void init_Tensor(py::module& m){
             case DataType::UInt16:
                 dataFormatDescriptor = py::format_descriptor<std::uint16_t>::format();
                 break;
+            case DataType::UInt32:
+                dataFormatDescriptor = py::format_descriptor<std::uint32_t>::format();
+                break;
+            case DataType::UInt64:
+                dataFormatDescriptor = py::format_descriptor<std::uint64_t>::format();
+                break;
             default:
                 throw py::value_error("Unsupported data format");
         }
@@ -200,14 +534,56 @@ void init_Tensor(py::module& m){
         );
     });
 
-    // TODO : If the ctor with the right data type does not exist, pybind will always convert the data to INT !
-    // Need to find a way to avoid this !
-    addCtor<std::int32_t>(pyClassTensor);
-    addCtor<std::int64_t>(pyClassTensor);
-    addCtor<float>(pyClassTensor);
-// #if SIZE_MAX != 0xFFFFFFFF
-    addCtor<double>(pyClassTensor);
-// #endif
+    //
+    // Constructors overloads follow
+    // The implemented python constructor interface is:
+    // __init__(self, val: float|int|nd.ndarray = None, backend: str = "cpu", *, dims: list|tuple = None):
+    //
+    // Where:
+    // - if no arg is specified we get an undefined Tensor (no dims, no val);
+    // - if only dims is specified, will create an uninitialized tensor of the given dims and dtype float32;
+    // - otherwise if val is specified, dims is ignored and if val is a:
+    //   - scalar: it will create a 0-rank scalar tensor of dtype:
+    //     - if val is float: float32
+    //     - if val is int: in this order: int32, int64 or float32 (the firsts which doe not overflows)
+    //   - np.ndarray of a given np.dtype: it will create an equivalent tensor of dtype == np.dtype when supported
+    //   - np.dtype scalar: it will create an equivalent scalar tensor of dtype == np.dtype when supported
+    //
+    // In order to implement this, we provide several overloads which are carefully ordered in order to fullfil
+    // the above requirements.
+    //
+
+    // Undefined Tensor
+    pyClassTensor.def(py::init<>());
+
+    // Uninitialized tensor of given dims and dtype float32
+    // Note that we force dims to be a keyword only argument
+    pyClassTensor.def(py::init<const std::vector<std::size_t>&>(), py::kw_only(), py::arg("dims"));
+
+    // N-D array tensors (including 0-D for from numpy 0-rank arrays)
+    // Note that in this case we have to define all supported Tensor dtypes
+    // otherwise the dtypes will be promoted by pybind unexpectedly.
+    // Note that these overloads must appear before the scalars overloads below
+    // otherwise pybind will try to demote 0-D arrays to scalar without preserving the
+    // np array dtype.
+    // TODO: Note that the list of supported numpy dtype is possibly incomplete there
+    // TODO: need to add some conversion functions to target dtypes not supported by numpy
+    // such as int4, int7, bfloat, ...
+    addArrayCtor<std::int8_t>(pyClassTensor);
+    addArrayCtor<std::int16_t>(pyClassTensor);
+    addArrayCtor<std::int32_t>(pyClassTensor);
+    addArrayCtor<std::int64_t>(pyClassTensor);
+    addArrayCtor<std::uint8_t>(pyClassTensor);
+    addArrayCtor<std::uint16_t>(pyClassTensor);
+    addArrayCtor<std::uint32_t>(pyClassTensor);
+    addArrayCtor<std::uint64_t>(pyClassTensor);
+    addArrayCtor<float>(pyClassTensor);
+    addArrayCtor<double>(pyClassTensor);
+
+    // Numpy Scalar argument
+    // Handles python scalars and numpy scalars with a single overload
+    addScalarCtor(pyClassTensor);
 
+    declare_registrable<Tensor>(m, "Tensor");
 }
 }
diff --git a/python_binding/data/pybind_TensorImpl.cpp b/python_binding/data/pybind_TensorImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4c664274ec2c33174f51dad34ba1591c323b2d87
--- /dev/null
+++ b/python_binding/data/pybind_TensorImpl.cpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/operators.h>
+#include <pybind11/numpy.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/TensorImpl.hpp"
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_TensorImpl(py::module& m){
+  py::class_<TensorImpl, std::shared_ptr<TensorImpl>>(m, "TensorImpl");
+
+  py::class_<TensorImpl_cpu<double>, std::shared_ptr<TensorImpl_cpu<double>>, TensorImpl>(m, "TensorImpl_cpu_float64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+    
+  py::class_<TensorImpl_cpu<float>, std::shared_ptr<TensorImpl_cpu<float>>, TensorImpl>(m, "TensorImpl_cpu_float32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<half_float::half>, std::shared_ptr<TensorImpl_cpu<half_float::half>>, TensorImpl>(m, "TensorImpl_cpu_float16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int64_t>, std::shared_ptr<TensorImpl_cpu<int64_t>>, TensorImpl>(m, "TensorImpl_cpu_int64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int32_t>, std::shared_ptr<TensorImpl_cpu<int32_t>>, TensorImpl>(m, "TensorImpl_cpu_int32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int16_t>, std::shared_ptr<TensorImpl_cpu<int16_t>>, TensorImpl>(m, "TensorImpl_cpu_int16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<int8_t>, std::shared_ptr<TensorImpl_cpu<int8_t>>, TensorImpl>(m, "TensorImpl_cpu_int8")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint64_t>, std::shared_ptr<TensorImpl_cpu<uint64_t>>, TensorImpl>(m, "TensorImpl_cpu_uint64")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint32_t>, std::shared_ptr<TensorImpl_cpu<uint32_t>>, TensorImpl>(m, "TensorImpl_cpu_uint32")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint16_t>, std::shared_ptr<TensorImpl_cpu<uint16_t>>, TensorImpl>(m, "TensorImpl_cpu_uint16")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+  py::class_<TensorImpl_cpu<uint8_t>, std::shared_ptr<TensorImpl_cpu<uint8_t>>, TensorImpl>(m, "TensorImpl_cpu_uint8")
+    .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
+
+}
+}
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 1000374454020625aada7f2043893b229deec833..c0ee183b072398e2e393bdbd7446de0155519169 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -30,6 +30,12 @@ void init_GraphView(py::module& m) {
           :param path: save location
           :type path: str
           )mydelimiter")
+          .def("in_view", (bool (GraphView::*)(const NodePtr&) const) &GraphView::inView)
+          .def("in_view", (bool (GraphView::*)(const std::string&) const) &GraphView::inView)
+          .def("root_node", &GraphView::rootNode)
+          .def("set_root_node", &GraphView::setRootNode, py::arg("node"))
+          .def("__repr__", &GraphView::repr)
+          .def("__len__", [](const GraphView& g){ return g.getNodes().size(); })
           .def("log_outputs", &GraphView::logOutputs, py::arg("path"))
           .def("get_ordered_inputs", &GraphView::getOrderedInputs)
           .def("get_ordered_outputs", &GraphView::getOrderedOutputs)
@@ -61,13 +67,15 @@ void init_GraphView(py::module& m) {
           :type include_learnable_parameters: bool, optional
           )mydelimiter")
 
-          .def("add", (bool (GraphView::*)(std::shared_ptr<GraphView>)) & GraphView::add,
-               py::arg("other_graph"),
+          .def("add", (bool (GraphView::*)(std::shared_ptr<GraphView>, bool)) & GraphView::add,
+               py::arg("other_graph"), py::arg("include_learnable_parameters") = true,
           R"mydelimiter(
           Include a GraphView to the current GraphView object.
 
           :param other_graph: GraphView to add
           :type other_graph: GraphView
+          :param include_learnable_parameters: include non-data inputs, like weights and biases, default True.
+          :type include_learnable_parameters: bool, optional
           )mydelimiter")
 
           .def("add_child",
@@ -122,6 +130,10 @@ void init_GraphView(py::module& m) {
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
           .def("set_backend", &GraphView::setBackend, py::arg("backend"), py::arg("device") = 0)
+          .def("get_ordered_nodes", &GraphView::getOrderedNodes, py::arg("reversed") = false,
+               R"mydelimiter(
+               Get ordered nodes for the graph view
+               )mydelimiter")
           //   .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
           //      // TODO : Should return error if backend not compatible with get
           //      if (idx >= b.size()) throw py::index_error();
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index b22ebdd0f6cdb5bd738cd164b3fc2e9fe36d9987..d8e77bb259cbcbae7940a09dc405bb8f50b5b79b 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -48,6 +48,18 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
+    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), 
+    R"mydelimiter(
+    Given a base name, generate a new name which is unique in all the GraphViews containing this node.
+
+    :param base_name: proposed name for the node.
+    :type base_name: str
+    :rtype: str
+    )mydelimiter")
+
+
+    .def("__repr__", &Node::repr)
+
     .def("add_child",
          (void (Node::*)(std::shared_ptr<Node>, const IOIndex_t, IOIndex_t)) &
                  Node::addChild,
@@ -132,11 +144,12 @@ void init_Node(py::module& m) {
     :rtype: int
     )mydelimiter")
 
-    .def("get_nb_data", &Node::nbData,
+    .def("input_category", &Node::inputCategory, py::arg("idx"),
     R"mydelimiter(
-    Number of data inputs.
+    Category of a specific input (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
 
-    :rtype: int
+    :rtype: InputCategory
     )mydelimiter")
 
     .def("get_nb_outputs", &Node::nbOutputs,
@@ -166,7 +179,11 @@ void init_Node(py::module& m) {
                 if (pybind11::isinstance<Connector>(arg)) {
                     // Convert Python object to C++ object adn push it ot vector
                     connectors.push_back(arg.cast<Connector>());
-                } else {
+                }
+                else if (arg.is(py::none())) {
+                    connectors.push_back(Connector());
+                }
+                else {
                     throw std::runtime_error("One of the arguments was not a Connector.");
                 }
             }
diff --git a/python_binding/graph/pybind_OpArgs.cpp b/python_binding/graph/pybind_OpArgs.cpp
index 6ea89f91945ac44f2142c5b9e8440b11ec6a1663..a129ca51c27367ceb1f7518ca85afe134e98cc4a 100644
--- a/python_binding/graph/pybind_OpArgs.cpp
+++ b/python_binding/graph/pybind_OpArgs.cpp
@@ -31,9 +31,9 @@ void init_OpArgs(py::module& m){
     py::implicitly_convertible<Node, OpArgs>();
     py::implicitly_convertible<GraphView, OpArgs>();
 
-    m.def("sequential", &Sequential, py::arg("inputs"));
-    m.def("parallel", &Parallel, py::arg("inputs"));
-    m.def("residual", &Residual, py::arg("inputs"));
+    m.def("sequential", &Sequential, py::arg("inputs"), py::arg("name") =  "");
+    m.def("parallel", &Parallel, py::arg("inputs"), py::arg("name") =  "");
+    m.def("residual", &Residual, py::arg("inputs"), py::arg("name") =  "");
 
 }
 }
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 103e7c1e4db6e197a1dac959a25d266e031d3e55..8a00a1cb4a419f1125411b5b1c823bf91570d62e 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -24,7 +24,8 @@ void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
     .def(py::init<const IOIndex_t>(), py::arg("nb_inputs"))
     .def_static("get_inputs_name", &Add_Op::getInputsName)
-    .def_static("get_outputs_name", &Add_Op::getOutputsName);
+    .def_static("get_outputs_name", &Add_Op::getOutputsName)
+    .def_readonly_static("Type", &Add_Op::Type);
 
   declare_registrable<Add_Op>(m, "AddOp");
 
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..08dddfc8168bb77086a3dd72aca45b110a4cbce9
--- /dev/null
+++ b/python_binding/operator/pybind_And.cpp
@@ -0,0 +1,34 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_And(py::module& m) {
+    py::class_<And_Op, std::shared_ptr<And_Op>, OperatorTensor>(m, "AndOp", py::multiple_inheritance(),
+          R"mydelimiter( Initialize an And operator.)mydelimiter")
+    .def(py::init<>())
+    .def_static("get_inputs_name", &And_Op::getInputsName)
+    .def_static("get_outputs_name", &And_Op::getOutputsName);
+    declare_registrable<And_Op>(m, "AndOp");
+    m.def("And", &And, py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an And operator.
+			:param name : name of the node.
+		)mydelimiter");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3de54afd7a669347cc2b272cff9b87cf152be09a
--- /dev/null
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ArgMax.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ArgMax(py::module &m) {
+  const std::string pyClassName("ArgMaxOp");
+  py::class_<ArgMax_Op, std::shared_ptr<ArgMax_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize an ArgMax operator.
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axis: int
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+									of the max value. If False (default), selects the first occurrence.
+			:type select_last_index: bool
+		)mydelimiter")
+    .def(py::init<std::int32_t, bool, bool>(), py::arg("axis"), py::arg("keep_dims"), py::arg("select_last_index"))
+    .def_static("get_inputs_name", &ArgMax_Op::getInputsName)
+    .def_static("get_outputs_name", &ArgMax_Op::getOutputsName)
+    ;
+  declare_registrable<ArgMax_Op>(m, pyClassName);
+
+  m.def("ArgMax", [](std::int32_t axis,
+                    bool keepDims,
+                    bool selectLastIndex,
+                    const std::string& name) {
+        return ArgMax(axis, keepDims, selectLastIndex, name);
+    }, py::arg("axis") = 0,
+       py::arg("keep_dims") = true,
+       py::arg("select_last_index") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing an ArgMax operator.
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axis: int
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+									of the max value. If False (default), selects the first occurrence.
+			:type select_last_index: bool
+			:param name : name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 966def88033dee8cd6cee06d80dc32114050b430..b98a642111402050fd3cba6dd8a12b11a3bbde8a 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -21,22 +21,32 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
+
   const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
+  const std::string pyStaticAttrClassName("StaticAttributes" + pyClassName);
+//   py::class_<StaticAttributes<AvgPoolingAttr,
+//                                              std::array<DimSize_t, DIM>,
+//                                              std::array<DimSize_t, DIM>>,
+//     std::shared_ptr<StaticAttributes<AvgPoolingAttr,
+//                                              std::array<DimSize_t, DIM>,
+//                                              std::array<DimSize_t, DIM>>>, Attributes>(m, pyStaticAttrClassName.c_str());
+
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor>(
         m, pyClassName.c_str(),
         py::multiple_inheritance())
     .def(py::init<const std::array<DimSize_t, DIM> &,
                   const std::array<DimSize_t, DIM> &>(),
             py::arg("kernel_dims"),
-            py::arg("stride_dims"))
+            py::arg("stride_dims") = create_array<DimSize_t,DIM>(1))
     .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
     .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
-    .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
+    .def_readonly_static("Type", &AvgPooling_Op<DIM>::Type);
 
   declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
 
@@ -54,6 +64,9 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
 
 
 void init_AvgPooling(py::module &m) {
+//   py::enum_<AvgPoolingAttr>(m, "_AvgPoolingAttr")
+    // .value("kernel_dims", AvgPoolingAttr::KernelDims)
+    // .value("stride_dims", AvgPoolingAttr::StrideDims);
   declare_AvgPoolingOp<1>(m);
   declare_AvgPoolingOp<2>(m);
   declare_AvgPoolingOp<3>(m);
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 4ec25e02a50330bdf764b598b598836a251d65ea..9a1bdacd169beebc843448d23bdaf8502de437b4 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -9,9 +9,10 @@
  *
  ********************************************************************************/
 
-#include <pybind11/pybind11.h>
 #include <string>
 
+#include <pybind11/pybind11.h>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
@@ -23,13 +24,14 @@ namespace Aidge {
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance())
         .def(py::init<float, float>(),
             py::arg("epsilon"),
             py::arg("momentum"))
         .def_static("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
-        .def_static("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+        .def_readonly_static("Type", &BatchNorm_Op<DIM>::Type);
 
     declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b4f6c90e54e781b011459be6e8e6e252e7347b00
--- /dev/null
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -0,0 +1,58 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include <string>
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_BitShift(py::module &m) {
+    // Binding for BitShiftOp class
+    auto pyBitShiftOp = py::class_<BitShift_Op, std::shared_ptr<BitShift_Op>, OperatorTensor>(m, "BitShiftOp", py::multiple_inheritance(),R"mydelimiter(
+        BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
+        This class allows shifting tensor values either to the left or right based on the 
+        specified direction. The direction can be accessed and controlled using the 
+        BitShiftDirection enum.
+        :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
+        :type direction: BitShiftDirection
+        :param name: name of the node.
+    )mydelimiter")
+        .def(py::init<BitShift_Op::BitShiftDirection>(), py::arg("direction"))
+        .def("direction", &BitShift_Op::direction, "Get the direction of the bit shift (left or right).")
+        .def_static("get_inputs_name", &BitShift_Op::getInputsName, "Get the names of the input tensors.")
+        .def_static("get_outputs_name", &BitShift_Op::getOutputsName, "Get the names of the output tensors.");
+
+    // Enum binding under BitShiftOp class
+    py::enum_<BitShift_Op::BitShiftDirection>(pyBitShiftOp, "BitShiftDirection")
+        .value("Right", BitShift_Op::BitShiftDirection::right)
+        .value("Left", BitShift_Op::BitShiftDirection::left)
+        .export_values();
+
+    // Binding for the BitShift function
+    m.def("BitShift", &BitShift, py::arg("direction") = BitShift_Op::BitShiftDirection::right, py::arg("name") = "",
+        R"mydelimiter(
+        BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
+        This class allows shifting tensor values either to the left or right based on the 
+        specified direction. The direction can be accessed and controlled using the 
+        BitShiftDirection enum.
+        :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
+        :type direction: BitShiftDirection
+        :param name: name of the node.
+    )mydelimiter");
+}
+} // namespace Aidge
\ No newline at end of file
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 07bb9f2fc16fcbefb693aeec00c380661f4a6e44..854f3783e9961bb5fd29746b88352438a43dd6e4 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -20,13 +20,13 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Concat(py::module& m) {
-    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
-        .def(py::init<const IOIndex_t, const DimSize_t>(),
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
+        .def(py::init<const IOIndex_t, const int>(),
                 py::arg("nb_inputs"),
                 py::arg("axis"))
         .def_static("get_inputs_name", &Concat_Op::getInputsName)
         .def_static("get_outputs_name", &Concat_Op::getOutputsName)
-        .def_static("attributes_name", &Concat_Op::staticGetAttrsName);
+        .def_readonly_static("Type", &Concat_Op::Type);
 
     declare_registrable<Concat_Op>(m, "ConcatOp");
 
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b0d5ef2ef78380422ca1a137608f5289fa519aed
--- /dev/null
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -0,0 +1,44 @@
+
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ConstantOfShape(py::module &m) {
+  py::class_<ConstantOfShape_Op, std::shared_ptr<ConstantOfShape_Op>, OperatorTensor>(
+      m, "ConstantOfShapeOp", py::multiple_inheritance())
+      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
+      .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
+      .def("value", &ConstantOfShape_Op::value);
+  // Here we bind the constructor of the ConstantOfShape Node. We add an argument for
+  // each attribute of the operator (in here we only have 'axes') and the last
+  // argument is the node's name.
+  m.def("ConstantOfShape", &ConstantOfShape, py::arg("value") = Tensor(0.f),
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing an constantOfShape operator.
+	:param value : tensor with a given datatype that contains the value that will fill the output tensor
+	:type value  : :py:class: Tensor
+    :param name  : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
+
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index c1a4f1319e4e715add01417f86d17bddadb992f1..bc72825b2161d8733334817e095c251c788e7eba 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -27,27 +27,25 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
   const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D");
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
         .def(py::init([](const std::vector<DimSize_t>& kernel_dims,
                          const std::vector<DimSize_t> &stride_dims,
-                         const std::vector<DimSize_t> &dilation_dims,
-                         bool no_bias) {
+                         const std::vector<DimSize_t> &dilation_dims) {
             AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
             AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
             AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+            return new Conv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
         }), py::arg("kernel_dims"),
             py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
-            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-            py::arg("no_bias") = false)
+            py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1))
         .def_static("get_inputs_name", &Conv_Op<DIM>::getInputsName)
         .def_static("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
-        .def_static("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
         .def("in_channels", &Conv_Op<DIM>::inChannels)
         .def("out_channels", &Conv_Op<DIM>::outChannels)
+        .def_readonly_static("Type", &Conv_Op<DIM>::Type)
         ;
 
   declare_registrable<Conv_Op<DIM>>(m, pyClassName);
@@ -75,7 +73,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
 
 
 void init_Conv(py::module &m) {
-//   declare_ConvOp<1>(m);
+  declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
 //   declare_ConvOp<3>(m);
 }
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index ce286094d6606d8b7161acf9e3fb3c6cbcbb88c9..377d0fca5d78dff20b8df0cc0d5521eb9a3685a2 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -28,21 +28,19 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &,
-                const std::array<DimSize_t, DIM> &,
-                bool>(),
+                const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"),
-        py::arg("no_bias"))
+        py::arg("dilation_dims"))
   .def_static("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
-  .def_static("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName)
-  .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels);
+  .def("nb_channels", &ConvDepthWise_Op<DIM>::nbChannels)
+  .def_readonly_static("Type", &ConvDepthWise_Op<DIM>::Type);
 
   declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
@@ -67,7 +65,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
 
 
 void init_ConvDepthWise(py::module &m) {
-//   declare_ConvDepthWiseOp<1>(m);
+  declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
 //   declare_ConvDepthWiseOp<3>(m);
 
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 9dcb98a54596f32525d2880dd6e955d4643f6e7c..d2ad60725533be0b9db269ce5e022ac8560e1d91 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -22,7 +22,8 @@ void init_Div(py::module& m) {
     py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Div_Op::getInputsName)
-        .def_static("get_outputs_name", &Div_Op::getOutputsName);
+        .def_static("get_outputs_name", &Div_Op::getOutputsName)
+        .def_readonly_static("Type", &Div_Op::Type);
     declare_registrable<Div_Op>(m, "DivOp");
     m.def("Div", &Div, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index c248753ca8de46293d49ce4dc614ae258c313256..6ca25f9569a53505385f37a02f3ab478a11f82a6 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -22,7 +22,8 @@ void init_Erf(py::module& m) {
     py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Erf_Op::getInputsName)
-        .def_static("get_outputs_name", &Erf_Op::getOutputsName);
+        .def_static("get_outputs_name", &Erf_Op::getOutputsName)
+        .def_readonly_static("Type", &Erf_Op::Type);
 
     declare_registrable<Erf_Op>(m, "ErfOp");
 
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 6cff90d0ad3aacf4cf8a465408eb490e3f21abda..2e9c41a16292d1e643415182d660b80105369d33 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -15,18 +15,29 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 namespace Aidge {
 
+
+
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
-    .def(py::init<bool>(), py::arg("no_bias"))
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
+    .def(py::init<>())
     .def_static("get_inputs_name", &FC_Op::getInputsName)
     .def_static("get_outputs_name", &FC_Op::getOutputsName)
-    .def_static("attributes_name", &FC_Op::staticGetAttrsName)
-    .def("out_channels", &FC_Op::outChannels);
+    .def_readonly_static("Type", &FC_Op::Type)
+    .def("out_channels", &FC_Op::outChannels)
+    // .def_property_readonly("a", &FC_Op::get_a)
+    // .def_property_readonly("a", [](const FC_Op& self) {
+    //     const AttrDict a = AttrDict(self.get_a());
+    //     return a;
+    // })
+    .def("__repr__", [](FC_Op& b) {
+        return fmt::format("Operator(type='{}')", b.Type);
+    });
 
   declare_registrable<FC_Op>(m, "FCOp");
 
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 83891624deede4b1f6f6f0c649358e9ed8de0a24..0aac0bbad69abb5faaaea3afd0183573db64b31f 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -21,7 +21,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Gather(py::module& m) {
-    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
         .def(py::init<std::int8_t,
                       const std::vector<int64_t>,
                       const std::vector<DimSize_t>>(),
@@ -30,7 +30,7 @@ void init_Gather(py::module& m) {
                 py::arg("gathered_shape"))
         .def_static("get_inputs_name", &Gather_Op::getInputsName)
         .def_static("get_outputs_name", &Gather_Op::getOutputsName)
-        .def_static("attributes_name", &Gather_Op::staticGetAttrsName);
+        .def_readonly_static("Type", &Gather_Op::Type);
 
     declare_registrable<Gather_Op>(m, "GatherOp");
 
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 7078ca3b0e84d7251aadbc6035e348ac9cd72571..6af8fef88e411af0a3ecbe5a771bf7af24de411a 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -11,6 +11,7 @@
 
 #include <stdio.h>
 
+#include <memory>
 #include <string>
 
 #include <pybind11/functional.h>
@@ -27,7 +28,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
         .def(py::init<const std::string&, IOIndex_t, IOIndex_t, IOIndex_t>(),
                 py::arg("type"),
@@ -54,10 +55,11 @@ void init_GenericOperator(py::module& m) {
             );
             if (kwargs){
                 std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
+                std::shared_ptr<DynamicAttributes> attr = std::dynamic_pointer_cast<DynamicAttributes>(gop->attributes());
                 for (auto item : kwargs) {
                     std::string key = py::cast<std::string>(item.first);
                     py::object value = py::reinterpret_borrow<py::object>(item.second);
-                    gop->setAttrPy(key, std::move(value));
+                    attr->setAttrPy(key, std::move(value));
                 }
             }
             return genericNode;
diff --git a/python_binding/operator/pybind_GlobalAveragePooling.cpp b/python_binding/operator/pybind_GlobalAveragePooling.cpp
index d4d2a921addaef676913cee2a16991ad36686767..f37ac11f5c62d0334e34aff59561b2014d1977bd 100644
--- a/python_binding/operator/pybind_GlobalAveragePooling.cpp
+++ b/python_binding/operator/pybind_GlobalAveragePooling.cpp
@@ -25,7 +25,8 @@ void init_GlobalAveragePooling(py::module &m) {
                              py::multiple_inheritance())
       .def(py::init<>())
       .def_static("get_inputs_name", &GlobalAveragePooling_Op::getInputsName)
-      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName);
+      .def_static("get_outputs_name", &GlobalAveragePooling_Op::getOutputsName)
+      .def_readonly_static("Type", &GlobalAveragePooling_Op::Type);
 
   declare_registrable<GlobalAveragePooling_Op>(m, pyClassName);
 
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6d6c03b82ad4f905c41bb0cf849fc4e05fda4cb2
--- /dev/null
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -0,0 +1,82 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/GridSample.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
+
+
+static typename Aidge::GridSample_Op::Mode stringToInterpolationMode(const std::string& mode) {
+    static std::unordered_map<std::string, typename Aidge::GridSample_Op::Mode> map = {
+        {"linear", Aidge::GridSample_Op::Mode::Linear},
+        {"nearest", Aidge::GridSample_Op::Mode::Nearest},
+        {"cubic", Aidge::GridSample_Op::Mode::Cubic}
+    };
+    return map[mode];
+}
+
+static typename Aidge::GridSample_Op::PaddingMode stringToPaddingMode(const std::string& mode) {
+    static std::unordered_map<std::string, typename Aidge::GridSample_Op::PaddingMode> map = {
+        {"zeros", Aidge::GridSample_Op::PaddingMode::Zeros},
+        {"border", Aidge::GridSample_Op::PaddingMode::Border},
+        {"reflection", Aidge::GridSample_Op::PaddingMode::Reflection}
+    };
+    return map[mode];
+}
+
+namespace py = pybind11;
+namespace Aidge {
+
+void declare_GridSampleOp(py::module &m) {
+  const std::string pyClassName("GridSampleOp");
+  py::class_<GridSample_Op, std::shared_ptr<GridSample_Op>, OperatorTensor>(
+    m, pyClassName.c_str(),
+    py::multiple_inheritance())
+        .def(py::init([](const std::string& mode,
+                         const std::string& padding_mode,
+                         bool align_corners) {
+            return new GridSample_Op(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners);
+        }), py::arg("mode") = "linear",
+            py::arg("padding_mode") = "zeros",
+            py::arg("alogn_corners") = false)
+        .def_static("get_inputs_name", &GridSample_Op::getInputsName)
+        .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
+        .def_readonly_static("Type", &GridSample_Op::Type)
+        ;
+
+  declare_registrable<GridSample_Op>(m, pyClassName);
+
+  m.def("GridSample", [](const std::string& mode,
+                        const std::string& padding_mode,
+                        bool align_corners,
+                        const std::string& name) {
+        return GridSample(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners, name);
+    }, py::arg("mode"),
+       py::arg("padding_mode"),
+       py::arg("align_corners"),
+       py::arg("name") = "");
+}
+
+
+void init_GridSample(py::module &m) {
+  declare_GridSampleOp(m);
+}
+
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 560f2889f20233ef928557aa230e6dab7f0a5d2b..7599197226b2f8734c989755c6e7d3581a52974d 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -22,7 +22,8 @@ void init_Identity(py::module& m) {
     py::class_<Identity_Op, std::shared_ptr<Identity_Op>, OperatorTensor>(m, "IdentityOp", py::multiple_inheritance())
         .def(py::init<>())
         .def_static("get_inputs_name", &Identity_Op::getInputsName)
-        .def_static("get_outputs_name", &Identity_Op::getOutputsName);
+        .def_static("get_outputs_name", &Identity_Op::getOutputsName)
+        .def_readonly_static("Type", &Identity_Op::Type);
 
     m.def("Identity", &Identity, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index b859b3be5b3dd2606d227a3ca26bd1b4eb8e75a9..e031d3dfb3348c5aec5bd497b40ff261528725ad 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -19,11 +19,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
         .def(py::init<float>(), py::arg("negative_slope"))
         .def_static("get_inputs_name", &LeakyReLU_Op::getInputsName)
         .def_static("get_outputs_name", &LeakyReLU_Op::getOutputsName)
-        .def_static("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
+        .def_readonly_static("Type", &LeakyReLU_Op::Type);
     declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp
new file mode 100755
index 0000000000000000000000000000000000000000..50aa755821c257c174c4603404144dab4da26296
--- /dev/null
+++ b/python_binding/operator/pybind_Ln.cpp
@@ -0,0 +1,30 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Ln.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Ln(py::module& m) {
+    py::class_<Ln_Op, std::shared_ptr<Ln_Op>, OperatorTensor>(m, "LnOp", py::multiple_inheritance())
+    .def(py::init<>())
+    .def_static("get_inputs_name", &Ln_Op::getInputsName)
+    .def_static("get_outputs_name", &Ln_Op::getOutputsName)
+    .def_readonly_static("Type", &Ln_Op::Type);
+
+    m.def("Ln", &Ln, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 09e11f89ea579b5a3aa75f177958d981c53f1dce..f4f175afcb35eb1c10dcd1a1d9d2f2b1691dcfc0 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -24,7 +24,8 @@ void init_MatMul(py::module &m) {
   py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &MatMul_Op::getInputsName)
-    .def_static("get_outputs_name", &MatMul_Op::getOutputsName);
+    .def_static("get_outputs_name", &MatMul_Op::getOutputsName)
+    .def_readonly_static("Type", &MatMul_Op::Type);
   declare_registrable<MatMul_Op>(m, "MatMulOp");
   m.def("MatMul", &MatMul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index befba918dff37e7d47a76c0c71bf48008244c2d0..b59a4c5574ce5e56af13f9aea13e7514c9402c22 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -27,7 +27,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -38,7 +38,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("ceil_mode"))
   .def_static("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def_static("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
-  .def_static("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
+  .def_readonly_static("Type", &MaxPooling_Op<DIM>::Type);
   declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3ac1122111aae1a9b7eb353399e46562ae51b0b1
--- /dev/null
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -0,0 +1,33 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/Memorize.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Memorize(py::module& m) {
+    py::class_<Memorize_Op, std::shared_ptr<Memorize_Op>, OperatorTensor>(m, "MemorizeOp", py::multiple_inheritance())
+        .def(py::init<const std::uint32_t>(), py::arg("end_step"))
+        .def_static("get_inputs_name", &Memorize_Op::getInputsName)
+        .def_static("get_outputs_name", &Memorize_Op::getOutputsName);
+
+    declare_registrable<Memorize_Op>(m, "MemorizeOp");
+
+    m.def("Memorize", &Memorize, py::arg("end_step"), py::arg("name") = "");
+}
+
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index ee3f85b6578054512df7b0087d1a972176cd50a3..d021a79c5ff4e337bebf424465458ddabf056a56 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -51,20 +51,18 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims,
-                                                         bool no_bias)
+                                                         const std::vector<DimSize_t> &dilation_dims)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        return PaddedConv_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias")= false);
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 }
 
 template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
@@ -93,20 +91,18 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
                                                          const std::vector<DimSize_t>& kernel_dims,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims,
-                                                         const std::vector<DimSize_t> &dilation_dims,
-                                                         bool no_bias)
+                                                         const std::vector<DimSize_t> &dilation_dims)
     {
         AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
         AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
         AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
         AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
-        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()), no_bias);
+        return PaddedConvDepthWise_Op<DIM>(to_array<DIM>(kernel_dims.begin()), to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("kernel_dims"),
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(2*DIM,0),
-       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1),
-       py::arg("no_bias") = false);
+       py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
 
 }
 
@@ -180,15 +176,14 @@ void declare_LSTMOp(py::module &m) {
        py::arg("nobias") = false,
        py::arg("name") = "");
   m.def("LSTMOp", &LSTM_Op,
-       py::arg("seq_length"),
-       py::arg("nobias") = false);
+       py::arg("seq_length"));
 }
 
 void init_MetaOperatorDefs(py::module &m) {
-//   declare_PaddedConvOp<1>(m);
+  declare_PaddedConvOp<1>(m);
   declare_PaddedConvOp<2>(m);
 //   declare_PaddedConvOp<3>(m);
-//   declare_PaddedConvDepthWiseOp<1>(m);
+  declare_PaddedConvDepthWiseOp<1>(m);
   declare_PaddedConvDepthWiseOp<2>(m);
 //   declare_PaddedConvDepthWiseOp<3>(m);
 //   declare_PaddedAvgPoolingOp<1>(m);
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 1658b0d959c0882d53e078f6d68b4474b34c739e..23949b5fe3b22edf5b7105abd0de29b727740e35 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -22,7 +22,8 @@ void init_Mul(py::module& m) {
     py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Mul_Op::getInputsName)
-    .def_static("get_outputs_name", &Mul_Op::getOutputsName);
+    .def_static("get_outputs_name", &Mul_Op::getOutputsName)
+    .def_readonly_static("Type", &Mul_Op::Type);
     declare_registrable<Mul_Op>(m, "MulOp");
     m.def("Mul", &Mul, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index e00f70413614a96919c2a068303b3fbc3f6eca8d..6ffbdd007b9f929ccac18de12f2319dcd68b1eda 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -24,26 +24,47 @@
 namespace py = pybind11;
 namespace Aidge {
 void init_Operator(py::module& m){
+    py::enum_<OperatorType>(m, "OperatorType")
+        .value("Data", OperatorType::Data)
+        .value("Tensor", OperatorType::Tensor);
+
+    py::enum_<InputCategory>(m, "InputCategory")
+        .value("Data", InputCategory::Data)
+        .value("Param", InputCategory::Param)
+        .value("OptionalData", InputCategory::OptionalData)
+        .value("OptionalParam", InputCategory::OptionalParam);
+
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
+    .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
-    .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
+    .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
     .def("nb_inputs", &Operator::nbInputs)
-    .def("nb_data", &Operator::nbData)
-    .def("nb_param", &Operator::nbParam)
     .def("nb_outputs", &Operator::nbOutputs)
+    .def("input_category", &Operator::inputCategory, py::arg("idx"),
+    R"mydelimiter(
+    Category of a specific input (Data or Param, optional or not).
+    Data inputs exclude inputs expecting parameters (weights or bias).
+
+    :rtype: InputCategory
+    )mydelimiter")
     .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
-    .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0)
+    .def("set_backend", py::overload_cast<const std::string&, DeviceIdx_t>(&Operator::setBackend), py::arg("name"), py::arg("device") = 0)
+    .def("set_backend", py::overload_cast<const std::vector<std::pair<std::string, DeviceIdx_t>>&>(&Operator::setBackend), py::arg("backends"))
     .def("forward", &Operator::forward)
     // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted !
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
+    .def("type", &Operator::type)
     .def("get_impl", &Operator::getImpl)
     .def("get_hook", &Operator::getHook)
     .def("add_hook", &Operator::addHook)
+    .def_property_readonly("attr", &Operator::attributes)
+    .def("set_back_edges", &Operator::setBackEdges, py::arg("input_indexes"))
+    .def("is_back_edge", &Operator::isBackEdge, py::arg("input_index"))
     ;
 }
 }
diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
index 4d4541ab36468bc6b531e0242888dd70c5afc71f..8c515e321207605c20acc9e5b02271906c9707d1 100644
--- a/python_binding/operator/pybind_OperatorTensor.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -28,7 +28,7 @@ void init_OperatorTensor(py::module& m){
     .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
     .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
 
-    .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
+    .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&) const) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
     .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
     .def("forward_dims", &OperatorTensor::forwardDims, py::arg("allow_data_dependency") = false)
     .def("dims_forwarded", &OperatorTensor::dimsForwarded)
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 2bb635635c0be456071507aaf0bfbb76590f9a66..04882b7f5b86c7c09ed8b8e5a15c4bfabd03bb55 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -26,7 +26,7 @@ namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
   const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
-  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
+  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
@@ -37,7 +37,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def_static("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def_static("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
-    .def_static("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
+    .def_readonly_static("Type", &Pad_Op<DIM>::Type)
     ;
   declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
@@ -63,6 +63,6 @@ void init_Pad(py::module &m) {
     .export_values();
   declare_PadOp<1>(m);
   declare_PadOp<2>(m);
-  declare_PadOp<3>(m);
+  //declare_PadOp<3>(m);
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index d8873636d029435706cfb9766262ae0b8409d8a5..2040f642bbfc0428be48a6f7ec21fa3aed20a371 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -19,10 +19,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Pop(py::module& m) {
-    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor, Attributes>(m, "PopOp", py::multiple_inheritance())
+    py::class_<Pop_Op, std::shared_ptr<Pop_Op>, OperatorTensor>(m, "PopOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Pop_Op::getInputsName)
-    .def_static("get_outputs_name", &Pop_Op::getOutputsName);
+    .def_static("get_outputs_name", &Pop_Op::getOutputsName)
+    .def_readonly_static("Type", &Pop_Op::Type);
 
     m.def("Pop", &Pop, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index e5d67542cd1acc5b2982081e4cf3a91948542147..ec29e3faa7c3efbc2b2dbe23372f57c30568b769 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -22,7 +22,8 @@ void init_Pow(py::module& m) {
     py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Pow_Op::getInputsName)
-    .def_static("get_outputs_name", &Pow_Op::getOutputsName);
+    .def_static("get_outputs_name", &Pow_Op::getOutputsName)
+    .def_readonly_static("Type", &Pow_Op::Type);
     declare_registrable<Pow_Op>(m, "PowOp");
 
     m.def("Pow", &Pow, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 71347554fdc9cd937b1f14df16e370db2f77a267..3467ed970c3f830298b46897717d123a0ab11800 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -31,15 +31,15 @@ void declare_Producer(py::module &m) {
 
 
 void init_Producer(py::module &m) {
-    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
+    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, OperatorTensor>(
         m,
         "ProducerOp",
         py::multiple_inheritance())
-    .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
-    .def("dims", &Producer_Op::dims)
-    .def_static("get_inputs_name", &Producer_Op::getInputsName)
-    .def_static("get_outputs_name", &Producer_Op::getOutputsName)
-    .def_static("attributes_name", &Producer_Op::staticGetAttrsName);
+        .def(py::init<const std::shared_ptr<Tensor>, bool>(), py::arg("tensor"), py::arg("constant"))
+        .def("dims", &Producer_Op::dims)
+        .def_static("get_inputs_name", &Producer_Op::getInputsName)
+        .def_static("get_outputs_name", &Producer_Op::getOutputsName)
+        .def_readonly_static("Type", &Producer_Op::Type);
 
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(
                                         const std::shared_ptr<Tensor>,
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index d611523f15a7007b0e9ab9cce323ed9a57d8ecdf..79720845cf21103d3a9257880e8d2068673e36f0 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -22,7 +22,8 @@ void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &ReLU_Op::getInputsName)
-    .def_static("get_outputs_name", &ReLU_Op::getOutputsName);
+    .def_static("get_outputs_name", &ReLU_Op::getOutputsName)
+    .def_readonly_static("Type", &ReLU_Op::Type);
     declare_registrable<ReLU_Op>(m, "ReLUOp");
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 00201c9bdf4ecd7ad76202c2fe78180317b736dd..028e45755fb10bb01602959f721cf003cb1e5136 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -26,24 +26,51 @@ namespace Aidge {
 
 void declare_ReduceMeanOp(py::module &m) {
   const std::string pyClassName("ReduceMeanOp");
-  py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, Attributes, OperatorTensor>(
-    m, pyClassName.c_str(), py::multiple_inheritance())
-    .def(py::init<std::vector<std::int32_t>, DimSize_t>(), py::arg("axes"), py::arg("keep_dims"))
+  py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+		)mydelimiter")
+    .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes") = std::vector<std::int32_t>(), py::arg("keep_dims") = true, py::arg("noop_with_empty_axes") = false)
     .def_static("get_inputs_name", &ReduceMean_Op::getInputsName)
     .def_static("get_outputs_name", &ReduceMean_Op::getOutputsName)
-    .def_static("attributes_name", &ReduceMean_Op::staticGetAttrsName)
+    .def_readonly_static("Type", &ReduceMean_Op::Type)
     ;
   declare_registrable<ReduceMean_Op>(m, pyClassName);
 
   m.def("ReduceMean", [](const std::vector<int>& axes,
-                                                                DimSize_t keepDims,
-                                                                const std::string& name) {
+                          bool keepDims,
+                          bool noopWithEmptyAxes,
+                          const std::string& name) {
         // AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM);
 
-        return ReduceMean(axes, keepDims, name);
-    }, py::arg("axes"),
-       py::arg("keep_dims") = 1,
-       py::arg("name") = "");
+        return ReduceMean(axes, keepDims, noopWithEmptyAxes, name);
+    }, py::arg("axes") = std::vector<std::int32_t>(),
+       py::arg("keep_dims") = true,
+       py::arg("noop_with_empty_axes") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+			:param name : name of the node.
+		)mydelimiter");
 }
 
 
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eaa57ef1c663a03cfd59ce02c13c3c7028b69e01
--- /dev/null
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ReduceSum.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_ReduceSum(py::module &m) {
+  const std::string pyClassName("ReduceSumOp");
+  py::class_<ReduceSum_Op, std::shared_ptr<ReduceSum_Op>, OperatorTensor>(
+    m, pyClassName.c_str(), py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+		)mydelimiter")
+    .def(py::init<std::vector<std::int32_t>, bool, bool>(), py::arg("axes"), py::arg("keep_dims"), py::arg("noop_with_empty_axes"))
+    .def_static("get_inputs_name", &ReduceSum_Op::getInputsName)
+    .def_static("get_outputs_name", &ReduceSum_Op::getOutputsName)
+    ;
+  declare_registrable<ReduceSum_Op>(m, pyClassName);
+
+  m.def("ReduceSum", [](const std::vector<int>& axes,
+                        bool keepDims,
+                        bool noopWithEmptyAxes,
+                        const std::string& name) {
+        return ReduceSum(axes, keepDims, noopWithEmptyAxes, name);
+    }, py::arg("axes") = std::vector<std::int32_t>(),
+       py::arg("keep_dims") = true,
+       py::arg("noop_with_empty_axes") = false,
+       py::arg("name") = "",
+	   R"mydelimiter(
+        Initialize a node containing a ReduceMean operator.
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+						where r is the rank of the input tensor.
+			:type axes: List[int]
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+							the reduced dimensions are removed.
+			:type keepdims: bool
+			:param noop_with_empty_axes: If True, the operator just copies the input, 
+      if False, the operatpr reduces all the dimensions.
+			:type noop_with_empty_axes: bool
+			:param name : name of the node.
+		)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index 5a07de2f00399b761c0652e5dcdccdc0d49938de..c0b0e8c30ef127d5cdcaf24ded75b83f06c86588 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -19,10 +19,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Reshape(py::module& m) {
-    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
-    .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
-    .def_static("get_inputs_name", &Reshape_Op::getInputsName)
-    .def_static("get_outputs_name", &Reshape_Op::getOutputsName);
+    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
+        .def(py::init<const std::vector<std::int64_t>&, bool>(), py::arg("shape"), py::arg("allowzero"))
+        .def_static("get_inputs_name", &Reshape_Op::getInputsName)
+        .def_static("get_outputs_name", &Reshape_Op::getOutputsName)
+        .def_readonly_static("Type", &Reshape_Op::Type);
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..35321f525e486107af3715ce1c09f48b7c5cd60f
--- /dev/null
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -0,0 +1,30 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Resize.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Resize(py::module& m) {
+    py::class_<Resize_Op, std::shared_ptr<Resize_Op>, OperatorTensor>(m, "ResizeOp", py::multiple_inheritance())
+        .def_static("get_inputs_name", &Resize_Op::getInputsName)
+        .def_static("get_outputs_name", &Resize_Op::getOutputsName)
+        .def_readonly_static("Type", &Resize_Op::Type);
+
+    declare_registrable<Resize_Op>(m, "ResizeOp");
+
+    m.def("Resize", &Resize, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index 0660cdb003ed4d5946f54786c0a51d9051d83d5a..22e8011a9cd37f80a0678f2629809d4412ba6fd2 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -21,11 +21,11 @@ namespace Aidge {
 
 void init_Scaling(py::module& m)
 {
-    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, Attributes, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
-    .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
-    .def_static("get_inputs_name", &Scaling_Op::getInputsName)
-    .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
-    .def_static("attributes_name", &Scaling_Op::staticGetAttrsName);
+    py::class_<Scaling_Op, std::shared_ptr<Scaling_Op>, OperatorTensor>(m, "ScalingOp", py::multiple_inheritance())
+        .def(py::init<float, size_t, bool>(), py::arg("scaling_factor"), py::arg("nb_bits"), py::arg("is_output_unsigned"))
+        .def_static("get_inputs_name", &Scaling_Op::getInputsName)
+        .def_static("get_outputs_name", &Scaling_Op::getOutputsName)
+        .def_readonly_static("Type", &Scaling_Op::Type);
     declare_registrable<Scaling_Op>(m, "ScalingOp");
     m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index dbae1d95d81ef65d27167bcd0774366dcc41b325..b3511f31eeab7d5df679d16c3bfb89f51d75cdbe 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -9,11 +9,10 @@
  *
  ********************************************************************************/
 
+#include <cstdint>  // std::int64_t
+
 #include <pybind11/pybind11.h>
-#include <string>
-#include <vector>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Shape.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 
@@ -21,14 +20,14 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Shape(py::module& m) {
-    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, Attributes, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
-        .def(py::init<std::int64_t,
-                      std::int64_t>(),
+    py::class_<Shape_Op, std::shared_ptr<Shape_Op>, OperatorTensor>(m, "ShapeOp", py::multiple_inheritance())
+        .def(py::init<const std::int64_t,
+                      const std::int64_t>(),
                 py::arg("start"),
                 py::arg("end"))
         .def_static("get_inputs_name", &Shape_Op::getInputsName)
         .def_static("get_outputs_name", &Shape_Op::getOutputsName)
-        .def_static("attributes_name", &Shape_Op::staticGetAttrsName);
+        .def_readonly_static("Type", &Shape_Op::Type);
 
     declare_registrable<Shape_Op>(m, "ShapeOp");
 
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index 0ba94c73fcd1fb435194f8485567771a147ec616..db7fc7bfb60ff8360933e5f84ab54d4cec8df724 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -22,7 +22,8 @@ void init_Sigmoid(py::module& m) {
     py::class_<Sigmoid_Op, std::shared_ptr<Sigmoid_Op>, OperatorTensor>(m, "SigmoidOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sigmoid_Op::getInputsName)
-    .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sigmoid_Op::getOutputsName)
+    .def_readonly_static("Type", &Sigmoid_Op::Type);
 
     m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index b87cc8da4874c666de21a6e798a66e3c7fad9c10..c8cae2592b966fff7ebfde1e5905ed31d5b22455 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -30,7 +30,8 @@ void init_Slice(py::module& m) {
                   py::arg("axes"),
                   py::arg("steps"))
     .def_static("get_inputs_name", &Slice_Op::getInputsName)
-    .def_static("get_outputs_name", &Slice_Op::getOutputsName);
+    .def_static("get_outputs_name", &Slice_Op::getOutputsName)
+    .def_readonly_static("Type", &Slice_Op::Type);
     declare_registrable<Slice_Op>(m, "SliceOp");
 
     m.def("Slice",
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index becb6f35fb7413c042f6a902aadb602e4547ee01..3b98ab9dfa1590093c567a363f67d32d613651a2 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -20,11 +20,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
-    .def(py::init<std::size_t>(), py::arg("axis"))
-    .def_static("get_inputs_name", &Softmax_Op::getInputsName)
-    .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
-    .def_static("attributes_name", &Softmax_Op::staticGetAttrsName);
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
+        .def(py::init<std::int32_t>(), py::arg("axis"))
+        .def_static("get_inputs_name", &Softmax_Op::getInputsName)
+        .def_static("get_outputs_name", &Softmax_Op::getOutputsName)
+        .def_readonly_static("Type", &Softmax_Op::Type);
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index 6efc123864f21bf8ea02008b29fe59f31685f17c..9b3feda9f791e65a9c32f2bda3da4da450838b40 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -21,14 +21,14 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Split(py::module& m) {
-    py::class_<Split_Op, std::shared_ptr<Split_Op>, Attributes, OperatorTensor>(m, "SplitOp", py::multiple_inheritance())
+    py::class_<Split_Op, std::shared_ptr<Split_Op>, OperatorTensor>(m, "SplitOp", py::multiple_inheritance())
         .def(py::init<DimSize_t, std::int8_t, std::vector<DimSize_t>&>(),
                 py::arg("nb_outputs"),
                 py::arg("axis"),
                 py::arg("split"))
         .def_static("get_inputs_name", &Split_Op::getInputsName)
         .def_static("get_outputs_name", &Split_Op::getOutputsName)
-        .def_static("attributes_name", &Split_Op::staticGetAttrsName);
+        .def_readonly_static("Type", &Split_Op::Type);
 
     declare_registrable<Split_Op>(m, "SplitOp");
 
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 9425eba06574c73339e8e4628ffded3449a8b4ab..ba0c5aab02349df4c50f960bbeb7df2082aa9233 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -11,7 +11,6 @@
 
 #include <pybind11/pybind11.h>
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 
@@ -22,7 +21,8 @@ void init_Sqrt(py::module& m) {
     py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sqrt_Op::getInputsName)
-    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sqrt_Op::getOutputsName)
+    .def_readonly_static("Type", &Sqrt_Op::Type);
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
     m.def("Sqrt", &Sqrt, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ca90fb46af40189dbe66c320ecdd237470ffa112
--- /dev/null
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Squeeze.hpp"
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Squeeze(py::module &m) {
+  py::class_<Squeeze_Op, std::shared_ptr<Squeeze_Op>, OperatorTensor>(
+      m, "SqueezeOp", py::multiple_inheritance(),
+		R"mydelimiter(
+		Initialize squeeze operator
+		:param axes :   axes to squeeze between [-r;r-1] 
+						with r = input_tensor.nbDims()
+						& r in [-128 , 127]
+		:type axes : :py:class: List[Int]
+		)mydelimiter")
+      .def("get_inputs_name", &Squeeze_Op::getInputsName)
+      .def("get_outputs_name", &Squeeze_Op::getOutputsName)
+      .def("axes", &Squeeze_Op::axes);
+  // Here we bind the constructor of the Squeeze Node. We add an argument
+  // for each attribute of the operator (in here we only have 'axes') and
+  // the last argument is the node's name.
+  m.def("Squeeze", &Squeeze, py::arg("axes") = std::vector<int8_t>({}),
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing a squeeze operator.
+	:param axes :   axes to squeeze between [-r;r-1] 
+					with r = input_tensor.nbDims()
+					& r in [-128 , 127]
+	:type axes : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index 752490a72bc35ec8a0ab08dd8d51a31c887b4dc6..52a622f0fdf6480a375d17c9729017fca32b3092 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -22,7 +22,8 @@ void init_Sub(py::module& m) {
     py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Sub_Op::getInputsName)
-    .def_static("get_outputs_name", &Sub_Op::getOutputsName);
+    .def_static("get_outputs_name", &Sub_Op::getOutputsName)
+    .def_readonly_static("Type", &Sub_Op::Type);
     declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index 74cde8dd3831c8d29ca87e2314afc27276ec025f..ded15ee78951d389d614d932e4a9c22bf310b814 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -22,7 +22,8 @@ void init_Tanh(py::module& m) {
     py::class_<Tanh_Op, std::shared_ptr<Tanh_Op>, OperatorTensor>(m, "TanhOp", py::multiple_inheritance())
     .def(py::init<>())
     .def_static("get_inputs_name", &Tanh_Op::getInputsName)
-    .def_static("get_outputs_name", &Tanh_Op::getOutputsName);
+    .def_static("get_outputs_name", &Tanh_Op::getOutputsName)
+    .def_readonly_static("Type", &Tanh_Op::Type);
 
     m.def("Tanh", &Tanh, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index f3c000291dfca954bbed93b9400ac0bd8df8025b..930dd95f3c3e4b10d2b4f8b496dfbbbcc6822050 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -27,12 +27,12 @@ namespace Aidge {
 
 void declare_Transpose(py::module &m) {
   const std::string pyClassName("TransposeOp");
-  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, Attributes, OperatorTensor>(
+  py::class_<Transpose_Op, std::shared_ptr<Transpose_Op>, OperatorTensor>(
     m, "TransposeOp", py::multiple_inheritance())
-  .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
-  .def_static("get_inputs_name", &Transpose_Op::getInputsName)
-  .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
-  .def_static("attributes_name", &Transpose_Op::staticGetAttrsName);
+    .def(py::init<const std::vector<DimSize_t>&>(), py::arg("output_dims_order"))
+    .def_static("get_inputs_name", &Transpose_Op::getInputsName)
+    .def_static("get_outputs_name", &Transpose_Op::getOutputsName)
+    .def_readonly_static("Type", &Transpose_Op::Type);
   declare_registrable<Transpose_Op>(m, pyClassName);
   m.def("Transpose", &Transpose, py::arg("output_dims_order"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..40c179c4064f07896113732a7e3c32db5f19c060
--- /dev/null
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -0,0 +1,48 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Unsqueeze.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Unsqueeze(py::module &m) {
+  py::class_<Unsqueeze_Op, std::shared_ptr<Unsqueeze_Op>, OperatorTensor>(
+      m, "UnsqueezeOp", py::multiple_inheritance(),
+      R"mydelimiter(
+		Initialize an unsqueeze operator.
+		:param axes :   axes to unsqueeze between [-r;r-1] 
+						with r = input_tensor.nbDims() + len(axes)
+		:type axes : :py:class: List[Int]
+		)mydelimiter")
+      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
+      .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
+      .def("axes", &Unsqueeze_Op::axes);
+  // Here we bind the constructor of the Unsqueeze Node. We add an argument for
+  // each attribute of the operator (in here we only have 'axes') and the last
+  // argument is the node's name.
+  m.def("Unsqueeze", &Unsqueeze, py::arg("axes") = std::vector<int8_t>({}),
+        py::arg("name") = "",
+        R"mydelimiter(
+    Initialize a node containing an unsqueeze operator.
+	:param axes :   axes to unsqueeze between [-r;r-1] 
+					with r = input_tensor.nbDims() + len(axes)
+	:type axes : :py:class: List[Int]
+    :param name : name of the node.
+)mydelimiter");
+}
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 42e29fd43324d12ea4cac2c16c88a056903b7c54..52c8cc8a0199ac64b0f7bae97442178614ea5622 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -21,16 +21,21 @@ void init_Data(py::module&);
 void init_Database(py::module&);
 void init_DataProvider(py::module&);
 void init_Tensor(py::module&);
-void init_OperatorImpl(py::module&);
+void init_TensorImpl(py::module&);
 void init_Attributes(py::module&);
+void init_OperatorImpl(py::module&);
 void init_Log(py::module&);
 void init_Operator(py::module&);
 void init_OperatorTensor(py::module&);
 
 void init_Add(py::module&);
+void init_And(py::module&);
+void init_ArgMax(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
+void init_BitShift(py::module&);
 void init_Concat(py::module&);
+void init_ConstantOfShape(py::module&);
 void init_Conv(py::module&);
 void init_ConvDepthWise(py::module&);
 void init_Div(py::module&);
@@ -39,18 +44,23 @@ void init_FC(py::module&);
 void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_GlobalAveragePooling(py::module&);
+void init_GridSample(py::module&);
+void init_Identity(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
 void init_MaxPooling(py::module&);
+void init_Memorize(py::module&);
 void init_MetaOperatorDefs(py::module&);
 void init_Mul(py::module&);
-void init_Producer(py::module&);
 void init_Pad(py::module&);
 void init_Pop(py::module&);
 void init_Pow(py::module&);
-void init_ReduceMean(py::module&);
+void init_Producer(py::module&);
 void init_ReLU(py::module&);
+void init_ReduceMean(py::module&);
+void init_ReduceSum(py::module&);
 void init_Reshape(py::module&);
+void init_Resize(py::module&);
 void init_Scaling(py::module&);
 void init_Shape(py::module&);
 void init_Sigmoid(py::module&);
@@ -58,10 +68,11 @@ void init_Slice(py::module&);
 void init_Softmax(py::module&);
 void init_Split(py::module&);
 void init_Sqrt(py::module&);
+void init_Squeeze(py::module&);
 void init_Sub(py::module&);
 void init_Tanh(py::module&);
 void init_Transpose(py::module&);
-void init_Identity(py::module&);
+void init_Unsqueeze(py::module&);
 
 void init_Node(py::module&);
 void init_GraphView(py::module&);
@@ -76,6 +87,7 @@ void init_GraphViewHelper(py::module&);
 
 void init_Scheduler(py::module&);
 void init_MemoryManager(py::module&);
+void init_ProdConso(py::module& m);
 void init_TensorUtils(py::module&);
 void init_Filler(py::module&);
 
@@ -86,6 +98,8 @@ void init_Aidge(py::module& m) {
     init_Database(m);
     init_DataProvider(m);
     init_Tensor(m);
+    init_TensorImpl(m);
+    init_Attributes(m);
 
     init_Node(m);
     init_GraphView(m);
@@ -93,34 +107,42 @@ void init_Aidge(py::module& m) {
     init_Connector(m);
 
     init_OperatorImpl(m);
-    init_Attributes(m);
     init_Log(m);
     init_Operator(m);
     init_OperatorTensor(m);
+
     init_Add(m);
+    init_And(m);
+    init_ArgMax(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
+    init_BitShift(m);
     init_Concat(m);
     init_Conv(m);
     init_ConvDepthWise(m);
+    init_ConstantOfShape(m);
     init_Div(m);
     init_Erf(m);
     init_FC(m);
     init_Gather(m);
     init_GenericOperator(m);
     init_GlobalAveragePooling(m);
+    init_GridSample(m);
+    init_Identity(m);
     init_LeakyReLU(m);
     init_MatMul(m);
     init_MaxPooling(m);
+    init_Memorize(m);
     init_MetaOperatorDefs(m);
     init_Mul(m);
     init_Pad(m);
-
     init_Pop(m);
     init_Pow(m);
-    init_ReduceMean(m);
     init_ReLU(m);
+    init_ReduceMean(m);
+    init_ReduceSum(m);
     init_Reshape(m);
+    init_Resize(m);
     init_Scaling(m);
     init_Shape(m);
     init_Sigmoid(m);
@@ -128,10 +150,11 @@ void init_Aidge(py::module& m) {
     init_Softmax(m);
     init_Split(m);
     init_Sqrt(m);
+    init_Squeeze(m);
     init_Sub(m);
     init_Tanh(m);
     init_Transpose(m);
-    init_Identity(m);
+    init_Unsqueeze(m);
 
     init_Producer(m);
 
@@ -142,9 +165,11 @@ void init_Aidge(py::module& m) {
     init_GraphViewHelper(m);
     init_Scheduler(m);
     init_MemoryManager(m);
+    init_ProdConso(m);
     init_TensorUtils(m);
     init_Filler(m);
 }
 
-PYBIND11_MODULE(aidge_core, m) { init_Aidge(m); }
 }  // namespace Aidge
+
+PYBIND11_MODULE(aidge_core, m) { Aidge::init_Aidge(m); }
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index e65b790d3eba6072e3e1b112c7d841959d4a5672..ac56fb4b43eb5b0a737157ec9e64c6771a692816 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -24,6 +24,5 @@ namespace py = pybind11;
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
     m.def("producers", &producers, py::arg("graphview"));
-    m.def("compile_gradient", &compile_gradient, py::arg("graphview"));
 }
 } // namespace Aidge
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index b85d1c41ed90a5774a9b24062dfda4186c2294d5..6908cbd912b506a7adb7f33a02416d0173174969 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -15,57 +15,76 @@
 #include <cstddef>
 #include <string>
 
+#include "aidge/graph/GraphView.hpp"
 #include "aidge/recipes/Recipes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace py = pybind11;
 
 namespace Aidge {
-void init_Recipes(py::module &m) 
+void init_Recipes(py::module &m)
 {
 
 
-  m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
+  m.def("matmul_to_fc", static_cast<void(*)(std::shared_ptr<GraphView>)>(matMulToFC), py::arg("graph_view"), R"mydelimiter(
     Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
-  // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+  // m.def("matmul_to_fc", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(matMulToFC), py::arg("nodes"), R"mydelimiter(
   //   recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
   //   :param nodes: The MatMul and Add nodes to fuse.
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
-  m.def("remove_dropout",static_cast<void(*)(std::shared_ptr<GraphView>)>(removeDropout), py::arg("graph_view"), R"mydelimiter(
-    Recipe to remove a dropout operator.
+  m.def("remove_node", removeNode, py::arg("graph_view"), py::arg("type"), py::arg("incProducers") = false, R"mydelimiter(
+    Recipe to remove operators of a given type.
 
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
+    :param type: Type of the operators to remove
+    :type type: str
+    :param incProducers: If true, also removed attached Producers
+    :type incProducers: bool
+    :return: Number of removed operators.
+    :rtype: int
     )mydelimiter");
 
-  m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
-    Recipe to remove a flatten operator.
+  m.def("remove_dropout", removeDropout, py::arg("graph_view"), R"mydelimiter(
+    Recipe to remove dropout operators.
 
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
+    :return: Number of removed operators.
+    :rtype: int
     )mydelimiter");
 
-  // m.def("remove_flatten", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(removeFlatten), py::arg("nodes"), R"mydelimiter(
-  //   Recipe to remove a flatten operator.
+  m.def("remove_identity", removeIdentity, py::arg("graph_view"), R"mydelimiter(
+    Recipe to remove identity operators.
 
-  //   :param nodes: The flatten operator to remove.
-  //   :type nodes: list of :py:class:`aidge_core.Node`
-  //   )mydelimiter");
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    :return: Number of removed operators.
+    :rtype: int
+    )mydelimiter");
 
-  // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-  //   Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
+  m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
+    Recipe to remove a Flatten operator if it is followed by a FC or a MatMul.
+    The recipe can remove multiple Flatten operator if they are one after the other.
 
-  //   :param nodes: The MatMul and Add nodes to fuse.
-  //   :type nodes: list of :py:class:`aidge_core.Node`
-  //   )mydelimiter");
+    :param graph_view: Graph view on which we want to apply the recipe.
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
+
+  m.def("remove_constantOfShape", static_cast<size_t(*)(std::shared_ptr<GraphView>)>(removeConstantOfShape), py::arg("graph_view"), R"mydelimiter(
+    Fuses constant => Generic | constantOfShape and transforms it into a Producer
+
+    :param graph_view: Graph view on which we want to apply the recipe.
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
 
   m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
     Recipe to remove a flatten operator.
@@ -84,7 +103,34 @@ void init_Recipes(py::module &m)
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
-  m.def("expand_metaops", static_cast<void(*)(std::shared_ptr<GraphView>, bool)>(expandMetaOps), py::arg("graph_view"), py::arg("recursive") = false);
+  m.def("expand_metaops", static_cast<void(*)(std::shared_ptr<GraphView>, bool)>(expandMetaOps), py::arg("graph_view"), py::arg("recursive") = false, R"mydelimiter(
+    Flatten the graph by replacing the meta operators by their micro graph.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    :param recursive: If true, recursively replace meta operators until there is no more meta operator in the graph.
+    :type recursive: bool
+    )mydelimiter");
+
+  m.def("fuse_to_metaops", fuseToMetaOps, py::arg("graph_view"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
+    Fuse each sub-graph matching a query in a Meta Operator.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    :param query: Sub-graph matching query
+    :type query: str
+    :param type: Type name of the resulting meta operators
+    :type type: str, optional
+    :return: Number of sub-graph actually fused in a Meta Operator.
+    :rtype: int
+    )mydelimiter");
+
+  m.def("adapt_to_backend", adaptToBackend, py::arg("graph_view"), R"mydelimiter(
+    Adapt the graph to a specific backend.
+
+    :param graph_view: Graph view on which we want to apply the recipe
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
 }
 
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_ProdConso.cpp b/python_binding/scheduler/pybind_ProdConso.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..abd6d5379178916b5842095d50a1de2155345b6f
--- /dev/null
+++ b/python_binding/scheduler/pybind_ProdConso.cpp
@@ -0,0 +1,116 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+
+#include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+/**
+ * @brief Trampoline class for binding
+ *
+ */
+class pyProdConso: public ProdConso {
+public:
+    using ProdConso::ProdConso; // Inherit constructors
+
+    Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_required_data",
+            getNbRequiredData,
+            inputIdx
+        );
+    }
+    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_required_protected",
+            getNbRequiredProtected,
+            inputIdx
+
+        );
+    }
+    Elts_t getRequiredMemory(const IOIndex_t outputIdx,
+    const std::vector<DimSize_t> &inputsSize) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_required_memory",
+            getRequiredMemory,
+            outputIdx,
+            inputsSize
+
+        );
+    }
+    Elts_t getNbConsumedData(const IOIndex_t inputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_consumed_data",
+            getNbConsumedData,
+            inputIdx
+
+        );
+    }
+    Elts_t getNbProducedData(const IOIndex_t outputIdx) const override {
+        PYBIND11_OVERRIDE_NAME(
+            Elts_t,
+            ProdConso,
+            "get_nb_produced_data",
+            getNbProducedData,
+            outputIdx
+
+        );
+    }
+    void updateConsummerProducer() override {
+        PYBIND11_OVERRIDE_NAME(
+            void,
+            ProdConso,
+            "update_consummer_producer",
+            updateConsummerProducer,
+
+        );
+    }
+    void resetConsummerProducer() override {
+        PYBIND11_OVERRIDE_NAME(
+            void,
+            ProdConso,
+            "reset_consummer_producer",
+            resetConsummerProducer,
+
+        );
+    }
+};
+
+void init_ProdConso(py::module& m){
+
+    py::class_<ProdConso, std::shared_ptr<ProdConso>, pyProdConso>(m, "ProdConso", py::dynamic_attr())
+    .def(py::init<const Operator&, bool>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
+    .def_static("default_model", &ProdConso::defaultModel)
+    .def_static("in_place_model", &ProdConso::inPlaceModel)
+    .def("get_nb_required_data", &ProdConso::getNbRequiredData)
+    .def("get_nb_required_protected", &ProdConso::getNbRequiredProtected)
+    .def("get_required_memory", &ProdConso::getRequiredMemory)
+    .def("get_nb_consumed_data", &ProdConso::getNbConsumedData)
+    .def("get_nb_produced_data", &ProdConso::getNbProducedData)
+    .def("update_consummer_producer", &ProdConso::updateConsummerProducer)
+    .def("reset_consummer_producer", &ProdConso::resetConsummerProducer)
+    ;
+}
+}
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index b16134da324383a4542965393257288c49dceed0..472af2a9465b121593613492f5120ddc9d7fe254 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -25,16 +25,18 @@ void init_Scheduler(py::module& m){
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("graph_view", &Scheduler::graphView)
     .def("save_scheduling_diagram", &Scheduler::saveSchedulingDiagram, py::arg("file_name"))
+    .def("save_static_scheduling_diagram", &Scheduler::saveStaticSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &Scheduler::resetScheduling)
     .def("generate_scheduling", &Scheduler::generateScheduling)
     .def("get_static_scheduling", &Scheduler::getStaticScheduling, py::arg("step") = 0)
+    .def("graph_view", &Scheduler::graphView)
     .def("generate_memory", &Scheduler::generateMemory, py::arg("inc_producers") = false, py::arg("wrap_around_buffer") = false)
     ;
 
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("data")=std::vector<Tensor>())
-    .def("backward", &SequentialScheduler::backward, py::arg("instanciate_grad")=true)
+    .def("backward", &SequentialScheduler::backward)
     ;
 
     py::class_<ParallelScheduler, std::shared_ptr<ParallelScheduler>, Scheduler>(m, "ParallelScheduler")
diff --git a/python_binding/utils/pybind_Attributes.cpp b/python_binding/utils/pybind_Attributes.cpp
index bfce891176822a3b1c07b1ded0c46c9c94a43c0a..bc0ccb3f4053e37c186acd919fcadae9d5d19a40 100644
--- a/python_binding/utils/pybind_Attributes.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -1,15 +1,32 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
+
 DynamicAttributes test_DynamicAttributes_binding() {
     DynamicAttributes attrs;
     attrs.addAttr<int>("a", 42);
     attrs.addAttr<std::string>("b", "test");
     attrs.addAttr<std::vector<bool>>("c", {true, false, true});
+    attrs.addAttr("mem.a", 1);
+    attrs.addAttr("mem.data.b", 1.0f);
+    attrs.addAttr("impl.c", std::string("test"));
     return attrs;
 }
 
@@ -19,15 +36,18 @@ double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
 
 void init_Attributes(py::module& m){
     py::class_<Attributes, std::shared_ptr<Attributes>>(m, "Attributes")
-    .def("has_attr", &Attributes::hasAttr, py::arg("name"))
-    .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
-    .def("get_attrs_name", &Attributes::getAttrsName)
+    .def("has_attr", &Attributes::hasAttrPy, py::arg("name"))
     .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
     .def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
     .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
-    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
+    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("dict", &Attributes::dict)
+    .def("__str__", &Attributes::str)
+    .def("__repr__", &Attributes::repr);
+
 
     py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
+    .def(py::init<>())
     .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
     .def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
 
@@ -35,5 +55,4 @@ void init_Attributes(py::module& m){
     m.def("test_DynamicAttributes_binding_check", &test_DynamicAttributes_binding_check, py::arg("attrs"));
 }
 
-}
-
+} // namespace Aidge
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index 7b5e7548b3126ed2ebfe3d9243248dc070c54076..ca8d1f33086fb5093c76826e5a2f53df873badf5 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -78,13 +78,35 @@ void init_Log(py::module& m){
     .def_static("set_console_level", &Log::setConsoleLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level displayed in the console.
+          Available `Level`s in ascending order : 
+            - Level.Debug
+            - Level.Info
+            - Level.Notice
+            - Level.Warn
+            - Level.Error
+            - Level.Fatal          
 
           :param level: Log level.
           :type level: Level
           )mydelimiter")
+    .def_static("set_console_color", &Log::setConsoleColor, py::arg("enabled"),
+          R"mydelimiter(
+          Enables or disable color output on comsole.
+          Initial value should be assumed True.
+
+          :param enabled: Activate or deactivate colors on console.
+          :type enabled: bool
+          )mydelimiter")
     .def_static("set_file_level", &Log::setFileLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level saved in the log file.
+          Available `Level`s in ascending order : 
+            - Level.Debug
+            - Level.Info
+            - Level.Notice
+            - Level.Warn
+            - Level.Error
+            - Level.Fatal          
 
           :param level: Log level.
           :type level: Level
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 32ec29bb9b826038eb21ce2927f2fef08973b2b8..0000000000000000000000000000000000000000
--- a/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-numpy
-Jinja2
diff --git a/setup.py b/setup.py
index 60807df560510ad4cfacfdd2b178aca957306439..4f2e21711f193eb7d5c37ace7b5ad83ac63d3635 100644
--- a/setup.py
+++ b/setup.py
@@ -1,49 +1,33 @@
-#!/usr/bin/env python3
-""" Aidge
-
-#TODO To change
-POC of the next framework named Aidge
-"""
-
-DOCLINES = (__doc__ or '').split("\n")
-
 import sys
 import os
-
-# Python supported version checks
-if sys.version_info[:2] < (3, 7):
-    raise RuntimeError("Python version >= 3.7 required.")
-
-
-CLASSIFIERS = """\
-Development Status :: 2 - Pre-Alpha
-"""
-
 import shutil
 import pathlib
-import subprocess
 import multiprocessing
 
 from math import ceil
 
 from setuptools import setup, Extension
-from setuptools import find_packages
 from setuptools.command.build_ext import build_ext
 
-def get_project_name() -> str:
-    return open(pathlib.Path().absolute() / "project_name.txt", "r").read()
 
-def get_project_version() -> str:
-    aidge_root = pathlib.Path().absolute()
-    version = open(aidge_root / "version.txt", "r").read().strip()
-    return version
+PROJECT_NAME = "aidge_core"
 
+SETUP_DIR = pathlib.Path(__file__).parent
 
 class CMakeExtension(Extension):
     def __init__(self, name):
         super().__init__(name, sources=[])
 
+
 class CMakeBuild(build_ext):
+    def __init__(self, dist, *args, **kwargs):
+        super().__init__(dist, *args, **kwargs)
+        # Detect editable_mode for old versions of setuptools
+        if not hasattr(self, "editable_mode"):
+            if hasattr(dist, "commands"):
+                self.editable_mode = "develop" in dist.commands
+            else:
+                self.editable_mode = False
 
     def run(self):
         # This lists the number of processors available on the machine
@@ -60,54 +44,53 @@ class CMakeBuild(build_ext):
         if not build_lib.exists():
             build_lib.mkdir(parents=True, exist_ok=True)
 
-        os.chdir(str(build_temp))
+        package_prefix = build_lib if not self.editable_mode else SETUP_DIR
+        pybind_install_prefix = (package_prefix / PROJECT_NAME).absolute()
 
-        # Impose to use the executable of the python
-        # used to launch setup.py to setup PythonInterp
-        param_py = "-DPYTHON_EXECUTABLE=" + sys.executable
+        os.chdir(str(build_temp))
 
-        compile_type = 'Debug'
-        install_path = os.path.join(sys.prefix, "lib", "libAidge")  if "AIDGE_INSTALL" not in os.environ else os.environ["AIDGE_INSTALL"]
+        compile_type = os.environ.get("AIDGE_PYTHON_BUILD_TYPE", "Release")
+        install_path = (
+            os.path.join(sys.prefix, "lib", "libAidge")
+            if "AIDGE_INSTALL" not in os.environ
+            else os.environ["AIDGE_INSTALL"]
+        )
+        build_gen = os.environ.get("AIDGE_BUILD_GEN", "")
+        build_gen_opts = (
+            ["-G", build_gen]
+            if build_gen
+            else []
+        )
+        test_onoff = os.environ.get("AIDGE_BUILD_TEST", "OFF")
+
+        self.spawn(
+            [
+                "cmake",
+                *build_gen_opts,
+                str(cwd),
+                f"-DTEST={test_onoff}",
+                f"-DCMAKE_INSTALL_PREFIX:PATH={install_path}",
+                f"-DCMAKE_BUILD_TYPE={compile_type}",
+                "-DPYBIND=ON",
+                f"-DPYBIND_INSTALL_PREFIX:PATH={pybind_install_prefix}",
+                "-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
+                "-DCOVERAGE=OFF",
+            ]
+        )
 
-        self.spawn(['cmake', str(cwd), param_py, '-DTEST=OFF', f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}', f'-DCMAKE_BUILD_TYPE={compile_type}'])
         if not self.dry_run:
-            self.spawn(['cmake', '--build', '.', '--config', compile_type, '-j', max_jobs])
-            self.spawn(['cmake', '--install', '.', '--config', compile_type])
+            self.spawn(
+                ["cmake", "--build", ".", "--config", compile_type, "-j", max_jobs]
+            )
+            self.spawn(["cmake", "--install", ".", "--config", compile_type])
         os.chdir(str(cwd))
 
-        aidge_package = build_lib / (get_project_name())
-
-        # Get "aidge core" package
-        # ext_lib = build_temp
-        print(build_temp.absolute())
-        # Copy all shared object files from build_temp/lib to aidge_package
-        for root, _, files in os.walk(build_temp.absolute()):
-            for file in files:
-                if (file.endswith('.so') or file.endswith('.pyd')) and (root != str(aidge_package.absolute())):
-                    currentFile=os.path.join(root, file)
-                    shutil.copy(currentFile, str(aidge_package.absolute()))
-
-        # Copy version.txt in aidge_package
-        os.chdir(os.path.dirname(__file__))
-        shutil.copy("version.txt", str(aidge_package.absolute()))
-
-
-if __name__ == '__main__':
 
+if __name__ == "__main__":
     setup(
-        name=get_project_name(),
-        version=get_project_version(),
-        python_requires='>=3.7',
-        description=DOCLINES[0],
-        long_description_content_type="text/markdown",
-        long_description="\n".join(DOCLINES[2:]),
-        classifiers=[c for c in CLASSIFIERS.split('\n') if c],
-        packages=find_packages(where="."),
-        include_package_data=True,
-        ext_modules=[CMakeExtension(get_project_name())],
+        ext_modules=[CMakeExtension(PROJECT_NAME)],
         cmdclass={
-            'build_ext': CMakeBuild,
+            "build_ext": CMakeBuild,
         },
         zip_safe=False,
-
     )
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 8a5b40e44308111c5778c5260155b644234103c8..0fa2cfdadb3af350a5668444c0a330e023818a41 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -14,118 +14,345 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/operator/Cast.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
+Aidge::ImplSpec::ImplSpec(const DynamicAttributes& attrs_):
+    attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const IOSpec& io, const DynamicAttributes& attrs_):
+    inputs(1, io), outputs(1, io), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const IOSpec& i, const IOSpec& o, const DynamicAttributes& attrs_):
+    inputs(1, i), outputs(1, o), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec>& o, const DynamicAttributes& attrs_):
+    inputs(i), outputs(o), attrs(attrs_) {}
+Aidge::ImplSpec::ImplSpec(const Aidge::ImplSpec&) = default;
+Aidge::ImplSpec::~ImplSpec() noexcept = default;
+
 Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend):
     mOp(op),
-    mBackend(backend),
-    mNbConsumedData(mOp.nbInputs(), Elts_t::NoneElts()),
-    mNbProducedData(mOp.nbOutputs(), Elts_t::NoneElts())
+    mBackend(backend)
 {
     //ctor
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
-        "a valid input is required at index {} for operator type {}",
-        inputIdx, mOp.type());
+std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::prodConso() {
+    if (!mProdConso) {
+        mProdConso = getProdConso();
+    }
+    return mProdConso;
+}
+
+Aidge::ImplSpec Aidge::OperatorImpl::getRequiredSpec() const {
+    const auto& opTensor = dynamic_cast<const OperatorTensor&>(mOp);
+
+    ImplSpec requiredSpec;
+    // Inputs specs
+    for (size_t i = 0; i < opTensor.nbInputs(); ++i) {
+        if (opTensor.getInput(i)) {
+            std::vector<std::pair<int, int>> dims;
+            for (auto dim : opTensor.getInput(i)->dims()) {
+                dims.push_back(std::make_pair<int, int>(dim, dim));
+            }
 
-    if (mOp.getRawInput(inputIdx)) {
-        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->empty()) {
-            // Known amount of data: requires the whole tensor by default
-            return Elts_t::DataElts(input->size());
+            requiredSpec.inputs.push_back({opTensor.getInput(i)->dataType(), opTensor.getInput(i)->dataFormat(), dims});
         }
         else {
-            // Unknown amount of data: require a single token by default
-            return Elts_t::TokenElts(1);
+            requiredSpec.inputs.push_back({DataType::Any});
         }
     }
+    // Outputs specs
+    for (size_t i = 0; i < opTensor.nbOutputs(); ++i) {
+        std::vector<std::pair<int, int>> dims;
+        for (auto dim : opTensor.getOutput(i)->dims()) {
+            dims.push_back(std::make_pair<int, int>(dim, dim));
+        }
 
-    // Input not connected, meaning it is an optional input: do no require anything!
-    return Elts_t::NoneElts();
+        requiredSpec.outputs.push_back({opTensor.getOutput(i)->dataType(), opTensor.getOutput(i)->dataFormat(), dims});
+    }
+    // Attributes
+    if (!mOp.isAtomic()) {
+        requiredSpec.attrs.setAttr("type:!", mOp.type()); // :! mandatory qualifier
+    }
+    else {
+        requiredSpec.attrs.setAttr("type", mOp.type());
+    }
+    return requiredSpec;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
-        "a valid input is required at index {} for operator type {}",
-        inputIdx, mOp.type());
+Aidge::ImplSpec Aidge::OperatorImpl::getBestMatch(const ImplSpec& requiredSpecs) const {
+    Log::debug("getBestMatch() for requirements: {}", requiredSpecs);
 
-    if (mOp.getRawInput(inputIdx)) {
-        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->empty()) {
-            // Known amount of data: protect the whole tensor by default
-            return Elts_t::DataElts(input->size());
+    const auto availableSpecsSet = getAvailableImplSpecs();
+    const std::vector<ImplSpec> availableSpecs(availableSpecsSet.begin(), availableSpecsSet.end());
+    std::vector<int> matchingSpecs(availableSpecs.size(), -1);
+
+    for (size_t s = 0; s < availableSpecs.size(); ++s) {
+        auto spec = availableSpecs[s];
+        bool match = true;
+        int priority = 0;
+
+        // Check inputs
+        for (size_t i = 0; i < requiredSpecs.inputs.size(); ++i) {
+            const auto inputSpec = (i < spec.inputs.size()) ? spec.inputs[i] : spec.inputs.back();
+            if (!checkIOSpec(requiredSpecs.inputs[i], inputSpec)) {
+                match = false;
+                break;
+            }
         }
-        else {
-            // Unknown amount of data: protect a single token by default
-            // (this does not really make sense for now, as getNbRequiredProtected()
-            // is supposed to give a precise amount of data to protect for
-            // memory management purpose...)
-            return Elts_t::TokenElts(1);
+
+        // Check outputs
+        for (size_t i = 0; i < requiredSpecs.outputs.size(); ++i) {
+            const auto outputSpec = (i < spec.outputs.size()) ? spec.outputs[i] : spec.outputs.back();
+            if (!checkIOSpec(requiredSpecs.outputs[i], outputSpec)) {
+                match = false;
+                break;
+            }
+        }
+
+        // Check attributes
+        for (const auto& attrName : requiredSpecs.attrs.getAttrsName()) {
+            std::string name = attrName;
+            std::string qualifier;
+            const auto qualifierPos = std::find_if(attrName.begin(), attrName.end(),
+                [](char c) { return c == ':'; });
+            if (qualifierPos != attrName.begin()) {
+                name = attrName.substr(0, qualifierPos - attrName.begin());
+                qualifier = attrName.substr(qualifierPos - attrName.begin());
+            }
+
+            const bool mandatory = (qualifier == "!");
+            if (mandatory) {
+                // Required attribute:
+                if (!spec.attrs.hasAttr(name)) {
+                    // Missing attribute
+                    match = false;
+                    break;
+                }
+                else if (requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name)
+                    || spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName))
+                {
+                    // Attribute value mismatch
+                    match = false;
+                    break;
+                }
+            }
+            else {
+                const int attrPriority = (!qualifier.empty()) ? std::stoi(qualifier) : 0;
+
+                if (spec.attrs.hasAttr(name)
+                    && !(requiredSpecs.attrs.getAny(attrName) < spec.attrs.getAny(name))
+                    && !(spec.attrs.getAny(name) < requiredSpecs.attrs.getAny(attrName)))
+                {
+                    // Attribute value match
+                    priority = std::max(priority, attrPriority);
+                }
+            }
         }
+
+        if (match) {
+            matchingSpecs[s] = priority;
+        }
+
+        Log::debug("  {}:{} - {}", (match) ? "MATCH" : "MISMATCH", priority, spec);
+    }
+
+    // Return best match
+    const auto bestMatch = std::max_element(matchingSpecs.begin(), matchingSpecs.end());
+    if (*bestMatch >= 0) {
+        const auto bestSpecIdx = bestMatch - matchingSpecs.begin();
+        return availableSpecs[bestSpecIdx];
     }
 
-    // Input not connected, meaning it is an optional input: do no require anything!
-    return Elts_t::NoneElts();
+    // If there is no match, return the required specs for the registrar, which
+    // will throw a "missing or invalid registrar key"
+    return requiredSpecs;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    AIDGE_ASSERT(mOp.getRawOutput(outputIdx),
-        "a valid output is required at index {} for operator type {}",
-        outputIdx, mOp.type());
+bool Aidge::OperatorImpl::checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const {
+    // Check type
+    if (required.type != DataType::Any
+        && spec.type != DataType::Any
+        && required.type != spec.type)
+    {
+        return false;
+    }
+
+    // Check format
+    if (required.format != DataFormat::Any
+        && spec.format != DataFormat::Any
+        && required.format != spec.format)
+    {
+        const auto transpose = getDataFormatTranspose(required.format, spec.format);
+        std::vector<size_t> identity(transpose.size());
+        std::iota(std::begin(identity), std::end(identity), 0);
 
-    if (mOp.getRawOutput(outputIdx)) {
-        const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
-        if (!output->empty()) {
-            // Known amount of data: requires the whole tensor by default,
-            // regardless of available data on inputs
-            return Elts_t::DataElts(output->size());
+        if (!std::equal(transpose.begin(), transpose.end(), identity.begin())) {
+            return false;
         }
-        else {
-            // Unknown amount of data: require a single token by default
-            // (this does not really make sense for now, as getRequiredMemory()
-            // is supposed to give a precise amount of data to allocate for
-            // memory management purpose...)
-            return Elts_t::TokenElts(1);
+    }
+
+    // Check dims
+    if (!required.dims.empty() && !spec.dims.empty()) {
+        if (required.dims.size() != spec.dims.size()) {
+            return false;
+        }
+
+        for (size_t dim = 0; dim < required.dims.size(); ++dim) {
+            const auto requiredDim = required.dims[dim];
+            const auto specDim = spec.dims[dim];
+
+            if (requiredDim.first != -1
+                && specDim.first != -1
+                && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+            {
+                return false;
+            }
         }
     }
 
-    // Output not set, meaning it is an optional output: do no require anything!
-    return Elts_t::NoneElts();
+    return true;
 }
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(),
-        "input index ({}) is out of bound ({}) for operator type {}",
-        inputIdx, mNbConsumedData.size(), mOp.type());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
+std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const {
+    auto op = std::static_pointer_cast<OperatorTensor>(mOp.clone());
+    auto node = std::make_shared<Node>(op);
 
-Aidge::Elts_t Aidge::OperatorImpl::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(),
-        "output index ({}) is out of bound ({}) for operator type {}",
-        outputIdx, mNbProducedData.size(), mOp.type());
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
+    // Adapt inputs
+    for (size_t i = 0; i < requiredSpecs.inputs.size(); ++i) {
+        const auto IOSpec = (i < spec.inputs.size()) ? spec.inputs[i] : spec.inputs.back();
+        const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.inputs[i];
+        std::shared_ptr<Node> parent = node;
+
+        // Input type
+        if (requiredIOSpec.type != DataType::Any
+            && IOSpec.type != DataType::Any
+            && requiredIOSpec.type != IOSpec.type)
+        {
+            const auto cast = Cast(IOSpec.type);
+            cast->addChild(parent, 0, i);
 
-void Aidge::OperatorImpl::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx) {
-        // each input is consumed by the minimum amount for a forward pass
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));
+            op->getInput(i)->setDataType(IOSpec.type);
+        }
+
+        // Input format
+        if (requiredIOSpec.format != DataFormat::Any
+            && IOSpec.format != DataFormat::Any
+            && requiredIOSpec.format != IOSpec.format)
+        {
+            const auto transpose = getDataFormatTranspose(requiredIOSpec.format, IOSpec.format);
+            auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+            transposeOp->getOperator()->setDataFormat(IOSpec.format);
+            transposeOp->getOperator()->setDataType(IOSpec.type);
+            transposeOp->addChild(parent, 0, i);
+
+            op->getInput(i)->setDataFormat(IOSpec.format);
+        }
+
+        // Input dims
+        if (!requiredIOSpec.dims.empty() && !IOSpec.dims.empty()) {
+            if (requiredIOSpec.dims.size() != IOSpec.dims.size()) {
+                return nullptr;
+            }
+
+            for (size_t dim = 0; dim < requiredIOSpec.dims.size(); ++dim) {
+                const auto requiredDim = requiredIOSpec.dims[dim];
+                const auto specDim = IOSpec.dims[dim];
+
+                if (requiredDim.first != -1
+                    && specDim.first != -1
+                    && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+                {
+                    return nullptr;
+                }
+            }
+        }
     }
 
-    for (std::size_t outputIdx = 0; outputIdx < mNbProducedData.size(); ++outputIdx) {
-        mNbProducedData[outputIdx] += getRequiredMemory(outputIdx, {});
+    // Adapt outputs
+    for (size_t i = 0; i < requiredSpecs.outputs.size(); ++i) {
+        const auto IOSpec = (i < spec.outputs.size()) ? spec.outputs[i] : spec.outputs.back();
+        const ImplSpec::IOSpec& requiredIOSpec = requiredSpecs.outputs[i];
+        std::shared_ptr<Node> parent = node;
+
+        // Output type
+        if (requiredIOSpec.type != DataType::Any
+            && IOSpec.type != DataType::Any
+            && requiredIOSpec.type != IOSpec.type)
+        {
+            const auto cast = Cast(requiredIOSpec.type);
+            parent->addChild(cast, i, 0);
+
+            op->getOutput(i)->setDataType(IOSpec.type);
+        }
+
+        // Output format
+        if (requiredIOSpec.format != DataFormat::Any
+            && IOSpec.format != DataFormat::Any
+            && requiredIOSpec.format != IOSpec.format)
+        {
+            const auto transpose = getDataFormatTranspose(IOSpec.format, requiredIOSpec.format);
+            auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+            transposeOp->getOperator()->setDataFormat(requiredIOSpec.format);
+            transposeOp->getOperator()->setDataType(requiredIOSpec.type);
+            parent->addChild(transposeOp, i, 0);
+
+            op->getOutput(i)->setDataFormat(IOSpec.format);
+        }
+
+        // Output dims
+        if (!requiredIOSpec.dims.empty() && !IOSpec.dims.empty()) {
+            if (requiredIOSpec.dims.size() != IOSpec.dims.size()) {
+                return nullptr;
+            }
+
+            for (size_t dim = 0; dim < requiredIOSpec.dims.size(); ++dim) {
+                const auto requiredDim = requiredIOSpec.dims[dim];
+                const auto specDim = IOSpec.dims[dim];
+
+                if (requiredDim.first != -1
+                    && specDim.first != -1
+                    && !(specDim.first <= requiredDim.first && specDim.second >= requiredDim.second))
+                {
+                    return nullptr;
+                }
+            }
+        }
     }
+
+    return MetaOperator(std::string("Adapted_" + op->type()).c_str(), getConnectedGraphView(node));
 }
 
-void Aidge::OperatorImpl::resetConsummerProducer(){
-    std::fill(mNbConsumedData.begin(), mNbConsumedData.end(), Elts_t::NoneElts());
-    std::fill(mNbProducedData.begin(), mNbProducedData.end(), Elts_t::NoneElts());
+std::shared_ptr<Aidge::Node> Aidge::OperatorImpl::getBestAdaptation(const ImplSpec& requiredSpecs) const {
+    const auto availableSpecs = getAvailableImplSpecs();
+    Log::debug("Adapt operator type {}: {} impl. available", mOp.type(), availableSpecs.size());
+
+    using AdaptationCost = int;
+    std::map<std::shared_ptr<Node>, AdaptationCost> adaptations;
+
+    for (const auto& availableSpec : availableSpecs) {
+        auto adaptation = getAdaptation(availableSpec, requiredSpecs);
+
+        if (adaptation) {
+            auto microGraph = std::dynamic_pointer_cast<MetaOperator_Op>(adaptation->getOperator())->getMicroGraph();
+            adaptations.insert(std::make_pair(adaptation, microGraph->getNodes().size()));
+        }
+    }
+
+    Log::debug("Adapt operator type {}: found {} possible adaptations", mOp.type(), adaptations.size());
+
+    if (!adaptations.empty()) {
+        // Return best adaptation (with min. AdaptationCost)
+        const auto bestAdaptation = std::min_element(adaptations.begin(), adaptations.end(),
+            [](const auto& lhs, const auto& rhs) { return lhs.second < rhs.second; });
+        return bestAdaptation->first;
+    }
+
+    return nullptr;
 }
 
 void Aidge::OperatorImpl::forward() {
@@ -135,3 +362,11 @@ void Aidge::OperatorImpl::forward() {
 void Aidge::OperatorImpl::backward() {
     AIDGE_THROW_OR_ABORT(std::runtime_error, "backward() not implemented yet for operator of type {}", mOp.type());
 }
+
+std::shared_ptr<Aidge::ProdConso> Aidge::OperatorImpl::getProdConso() const {
+    return std::make_shared<ProdConso>(mOp);
+}
+
+std::set<Aidge::ImplSpec> Aidge::OperatorImpl::getAvailableImplSpecs() const {
+    return std::set<ImplSpec>();
+}
diff --git a/src/backend/cpu/data/TensorImpl.cpp b/src/backend/cpu/data/TensorImpl.cpp
index 39ca71de9ea1bdb89f8138c3e38a016b516686dd..ed3c96f80c1b8bafd70425451d6618428d1888f0 100644
--- a/src/backend/cpu/data/TensorImpl.cpp
+++ b/src/backend/cpu/data/TensorImpl.cpp
@@ -38,12 +38,7 @@ bool Aidge::TensorImpl_cpu<T>::operator==(const Aidge::TensorImpl &other) const
 
 template <typename T>
 void Aidge::TensorImpl_cpu<T>::zeros() {
-    if (mData.empty()) {
-        lazyInit();
-    }
-    for (std::size_t i = 0; i < mData.size(); ++i) {
-        *(mData.data() + i) = T(0);
-    }
+    std::memset(rawPtr(), T(0), mNbElts * sizeof(T));
 }
 
 template <typename T>
diff --git a/src/data/Data.cpp b/src/data/Data.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..62a883d08a401e02c86408214a061f893ffbfb4a
--- /dev/null
+++ b/src/data/Data.cpp
@@ -0,0 +1,42 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/data/Data.hpp"
+
+Aidge::DataFormatTranspose Aidge::getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
+    // Permutation array from default format to src format
+    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
+    // Permutation array from default format to dst format
+    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
+    // Compute permutation array from src format to default format:
+    DataFormatTranspose srcFormatToDef{};
+    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
+        if (srcDefToFormat[i] > 0) {
+            srcFormatToDef[srcDefToFormat[i] - 1] = i;
+        }
+        else {
+            srcFormatToDef[i] = i;
+        }
+    }
+
+    // Compute permutation array from src format to dst format:
+    DataFormatTranspose srcToDst{};
+    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
+        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
+            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
+        }
+        else {
+            srcToDst[i] = i;
+        }
+    }
+
+    return srcToDst;
+}
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
index fc6b842edef17c80a4ef80667fc814bf85df25a4..7f4eb71aa1f1e05c42aef8090988d0ea05aa6cb2 100644
--- a/src/data/DataProvider.cpp
+++ b/src/data/DataProvider.cpp
@@ -23,9 +23,10 @@
 #include "aidge/utils/Random.hpp"
 
 
-Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const bool shuffle, const bool dropLast)
+Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const std::string& backend, const bool shuffle, const bool dropLast)
     : mDatabase(database),
       mBatchSize(batchSize),
+      mBackend(backend),
       mShuffle(shuffle),
       mDropLast(dropLast),
       mNumberModality(database.getItem(0).size()),
@@ -63,7 +64,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
         dataBatchDims[i].insert(dataBatchDims[i].begin(), current_batch_size);
         auto batchData = std::make_shared<Tensor>();
         batchData->resize(dataBatchDims[i]);
-        batchData->setBackend("cpu");
+        batchData->setBackend(mBackend);
         batchData->setDataType(mDataTypes[i]);
         batchTensors.push_back(batchData);
     }
@@ -78,6 +79,8 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
 
         // Browse each modularity in the database item
         for (std::size_t j = 0; j < mNumberModality; ++j) {
+
+            dataItem[j]->setBackend(mBackend);
             auto dataSample = dataItem[j];
 
             // Assert tensor sizes
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 677bd0246e145ebf760f210000728bd2d99a3807..abfc91c6cdf9fd4f6eb46100074b22083514d82e 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -16,8 +16,124 @@
 
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Abs.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Sqrt.hpp"
+#include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
+
+Aidge::Tensor::~Tensor() noexcept = default;
+
+
+Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto add_ = Add_Op(2);
+    add_.associateInput(0, std::make_shared<Tensor>(*this));
+    add_.associateInput(1, std::make_shared<Tensor>(other));
+    add_.setDataType(dataType());
+    add_.setDataFormat(dataFormat());
+    add_.setBackend(mImpl->backend());
+    add_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return add_.getOutput(0)->clone();
+}
+
+
+Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto sub_ = Sub_Op();
+    sub_.associateInput(0, std::make_shared<Tensor>(*this));
+    sub_.associateInput(1, std::make_shared<Tensor>(other));
+    sub_.setDataType(dataType());
+    sub_.setDataFormat(dataFormat());
+    sub_.setBackend(mImpl->backend());
+    sub_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return sub_.getOutput(0)->clone();
+}
+
+
+Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto mul_ = Mul_Op();
+    mul_.associateInput(0, std::make_shared<Tensor>(*this));
+    mul_.associateInput(1, std::make_shared<Tensor>(other));
+    mul_.setDataType(dataType());
+    mul_.setDataFormat(dataFormat());
+    mul_.setBackend(mImpl->backend());
+    mul_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return mul_.getOutput(0)->clone();
+}
+
+
+Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto div_ = Div_Op();
+    div_.associateInput(0, std::make_shared<Tensor>(*this));
+    div_.associateInput(1, std::make_shared<Tensor>(other));
+    div_.setDataType(dataType());
+    div_.setDataFormat(dataFormat());
+    div_.setBackend(mImpl->backend());
+    div_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return div_.getOutput(0)->clone();
+}
+
+Aidge::Tensor Aidge::Tensor::sqrt() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    auto sqrt_ = Sqrt_Op();
+    sqrt_.associateInput(0, std::make_shared<Tensor>(*this));
+    sqrt_.setDataType(dataType());
+    sqrt_.setDataFormat(dataFormat());
+    sqrt_.setBackend(mImpl->backend());
+    sqrt_.forward();
+    return sqrt_.getOutput(0)->clone();
+}
+
+Aidge::Tensor Aidge::Tensor::abs() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    auto abs_ = Abs_Op();
+    abs_.associateInput(0, std::make_shared<Tensor>(*this));
+    abs_.setDataType(dataType());
+    abs_.setDataFormat(dataFormat());
+    abs_.setBackend(mImpl->backend());
+    abs_.forward();
+    return abs_.getOutput(0)->clone();
+}
+
+Aidge::Tensor Aidge::Tensor::mean() const {
+    AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
+    // TODO: should be the default behavior of ReduceMean_Op
+    // No need to specify the list of all axes!
+    std::vector<std::int32_t> axes(nbDims());
+    std::iota(std::begin(axes), std::end(axes), 0);
+    auto mean_ = ReduceMean_Op(axes, false, false);
+    mean_.associateInput(0, std::make_shared<Tensor>(*this));
+    mean_.setDataType(dataType());
+    mean_.setDataFormat(dataFormat());
+    mean_.setBackend(mImpl->backend());
+    mean_.forward();
+    return mean_.getOutput(0)->clone();
+}
+
 Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     if (this == &other) {
         return *this;
@@ -37,17 +153,32 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     return *this;
 }
 
-Aidge::Tensor::~Tensor() noexcept = default;
+
+void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) {
+    if (mImpl) {
+        if (mImpl->device() != std::make_pair(name, device)) {
+            // Backend change: create new impl, copy from old to new and replace
+            // impl
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
+            if (copyFrom) {
+                newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
+            }
+            setImpl(newImpl);
+        }
+    }
+    else {
+        mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
+    }
+    }
 
 void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
                            std::vector<Aidge::DimSize_t> strides) {
-    // TODO: scalar Tensor not handled
     if (dims.empty()) {  // scalar
         mDims = std::vector<DimSize_t>(0);
         mStrides = std::vector<DimSize_t>({1});
         mContiguous = true;
 
-        computeSize();
+        computeSize(); // will set mSize to 1
         if (mImpl) {
             mImpl->resize(mDims);
         }
@@ -105,7 +236,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
 
 std::string Aidge::Tensor::toString() const {
     AIDGE_ASSERT(
-        mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) ||
+        mImpl && (undefined() || (dims() == std::vector<DimSize_t>({0})) ||
                   (mImpl->hostPtr() != nullptr)),
         "tensor should have a valid host pointer");
 
@@ -319,6 +450,52 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
                         mImplOffset);
 }
 
+void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
+    std::vector<DimSize_t> newDims;
+    for (std::size_t i = 0; i < src.dims().size(); ++i) {
+        newDims.push_back(src.dims()[transpose[i]]);
+    }
+
+    std::vector<std::size_t> newStrides(newDims.size(), 1);
+    for (size_t i = 0; i < newDims.size(); ++i) {
+        for (size_t j = i + 1; j < newDims.size(); ++j) {
+            newStrides[i] *= newDims[j];
+        }
+    }
+
+    std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, newDims);
+
+    std::vector<size_t> indices(newDims.size(), 0);
+    for (size_t i = 0; i < src.size(); ++i) {
+        size_t idx = 0;
+        // Permute indices based on OutputDimsOrder attr
+        for (int j = newDims.size() -1; j >=0; --j) {
+            idx += indices[transpose[j]] * newStrides[j];
+        }
+
+        // Copy the value in output
+        newImpl->copy(src.getImpl()->rawPtr(i), 1, idx);
+
+        // Update indices for the next iteration
+        for (int j = newDims.size() - 1; j >= 0; --j) {
+            if (indices[j] < src.dims()[j] - 1) {
+                indices[j]++;
+                break;
+            }
+            else {
+                indices[j] = 0;
+            }
+        }
+    }
+
+    resize(newDims);
+    setImpl(newImpl);
+}
+
+void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
+    copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+}
+
 void Aidge::Tensor::copyCastFrom(const Tensor& src,
                                  std::shared_ptr<Tensor>& movedSrcPtr) {
     if (&src == this) {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 77ca0b00c40e578f45834a16da65ae37ac4b7d3c..ef322fe5b795b9cb9c62c3593abdd330fd471575 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -46,23 +46,29 @@ const std::shared_ptr<Aidge::Node> Aidge::GraphView::operator[](const std::strin
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::GraphView::operator()(
-    const std::vector<Aidge::Connector> ctors) {
+    const std::vector<Aidge::Connector> ctors)
+{
   // TODO: allow for multiple inputNodes?
-  assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour");
+  AIDGE_ASSERT(inputNodes().size() == 1U, "Multiple input Nodes for the GraphView is not supported for Connectors");
   std::shared_ptr<Node> inNode = *inputNodes().begin();
-  assert((ctors.size() == static_cast<std::size_t>(inNode->nbData())) && "Wrong number of arguments.\n");
-  for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
-    assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
-    (void)input; // avoid unused warning
-  }
 
-  IOIndex_t inID = 0;
-  for (const Connector &ctor : ctors) {
-    assert((ctor.node() != nullptr) &&
-           "Input Connector must be associated with a node");
-    ctor.node()->addChild(shared_from_this(), static_cast<std::size_t>(ctor.index()),
-                          {inNode, inID++});
+  IOIndex_t ctorIdx = 0;
+  const auto& inputs = inNode->inputs();
+  for (IOIndex_t idx = 0; idx < inNode->nbInputs(); ++idx) {
+    if (inNode->inputCategory(idx) == InputCategory::Data || inNode->inputCategory(idx) == InputCategory::OptionalData) {
+      if (ctorIdx < ctors.size()) {
+        AIDGE_ASSERT(ctors[ctorIdx].node() != nullptr, "Input Connector #{} must be associated with a node", ctorIdx);
+        AIDGE_ASSERT(inputs[idx].second == gk_IODefaultIndex, "Data input#{} connection is not free.", idx);
+        ctors[ctorIdx].node()->addChild(shared_from_this(), static_cast<std::size_t>(ctors[ctorIdx].index()),
+                              {inNode, idx});
+        ++ctorIdx;
+      }
+      else {
+        AIDGE_ASSERT(inNode->inputCategory(idx) == InputCategory::OptionalData, "Missing an input connector for non-optional Data input#{}", idx);
+      }
+    }
   }
+  AIDGE_ASSERT(ctorIdx == ctors.size(), "Too many input connectors ({}) vs available node inputs ({}).", ctors.size(), ctorIdx);
   return Connector(*(outputNodes().begin()));
 }
 
@@ -74,6 +80,9 @@ bool Aidge::GraphView::inView(const std::shared_ptr<Aidge::Node>& nodePtr) const
     return mNodes.find(nodePtr) != mNodes.cend();
 }
 
+bool Aidge::GraphView::inView(const std::string& nodeName) const {
+  return mNodeRegistry.find(nodeName) != mNodeRegistry.end();
+}
 
 void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProducers) const {
     auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((path + ".mmd").c_str(), "w"), &std::fclose);
@@ -143,7 +152,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
                 // Add-on to display the operator's output dimensions
                 std::string dims = "";
                 const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
-                if (op && !op->getOutput(outputIdx)->dims().empty()) {
+                if (op && !op->getOutput(outputIdx)->undefined()) {
                   dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
                 }
 
@@ -166,10 +175,16 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     }
 
     size_t inputIdx = 0;
-    for (auto input : mInputNodes) {
+    for (const auto& input : mInputNodes) {
       if (input.first != nullptr) {
-        fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&rarr;{}\"|{}_{}\n", inputIdx, inputIdx,
+        const auto& op_ = std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator());
+        if (op_->getInput(input.second) && (!op_->getInput(input.second)->empty())) {
+            fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&rarr;{}{}\"|{}_{}\n", inputIdx, inputIdx,
+                    input.second, op_->getInput(input.second)->dims(), input.first->type(), namePtrTable.at(input.first));
+        } else {
+            fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|\"&rarr;{}\"|{}_{}\n", inputIdx, inputIdx,
                     input.second, input.first->type(), namePtrTable.at(input.first));
+        }
       }
       else {
         fmt::print(fp.get(), "input{}((in#{})):::inputCls\n", inputIdx, inputIdx);
@@ -183,7 +198,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         // Add-on to display the operator's output dimensions
         std::string dims = "";
         const auto op = std::dynamic_pointer_cast<OperatorTensor>(output.first->getOperator());
-        if (op && op->getOutput(output.second) && !op->getOutput(output.second)->dims().empty()) {
+        if (op && op->getOutput(output.second) && !op->getOutput(output.second)->undefined()) {
           dims += " " + fmt::format("{}", op->getOutput(output.second)->dims());
         }
 
@@ -210,6 +225,16 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
     fmt::print(fp.get(), "\n");
 }
 
+void Aidge::GraphView::setNodesName() const {
+    std::map<std::string, std::int32_t> typeIds;
+    for (const auto& nodePtr: getNodes()) {
+        const std::string& t = nodePtr->getOperator()->type();
+        typeIds.emplace(t, 0);
+        const std::string nodeName = name() + std::string("_") + t + std::string("#") + std::to_string(typeIds[t]++);
+        nodePtr->setName(nodeName);
+    }
+}
+
 void Aidge::GraphView::logOutputs(const std::string& dirName) const {
   if (!Aidge::createDirectories(dirName)){
     AIDGE_THROW_OR_ABORT(std::runtime_error, "Failed to create directory: {}.", dirName);
@@ -278,19 +303,20 @@ void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOI
     // it into account.
     if (input.first != nullptr) {
       auto it = std::find(ignoredInputs.begin(), ignoredInputs.end(), input);
-      AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input");
+      AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input: {} (of type {})", input.first->name(), input.first->type());
       ignoredInputs.erase(it);
       ++nbInputs;
     }
   }
 
-  AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs");
+  AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs: {} specified vs {} available", nbInputs, mInputNodes.size());
 
   mInputNodes = inputs;
   mInputNodes.insert(mInputNodes.end(), ignoredInputs.begin(), ignoredInputs.end());
 }
 
 void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs) {
+  // Note: one can specify any node as graph output!
   size_t nbOutputs = 0;
   std::vector<std::pair<NodePtr, IOIndex_t>> ignoredOutputs(mOutputNodes);
   for (auto output : outputs) {
@@ -299,14 +325,13 @@ void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IO
     // it into account.
     if (output.first != nullptr) {
       auto it = std::find(ignoredOutputs.begin(), ignoredOutputs.end(), output);
-      AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output");
-      ignoredOutputs.erase(it);
+      if (it != ignoredOutputs.end()) {
+        ignoredOutputs.erase(it);
+      }
       ++nbOutputs;
     }
   }
 
-  AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs");
-
   mOutputNodes = outputs;
   mOutputNodes.insert(mOutputNodes.end(), ignoredOutputs.begin(), ignoredOutputs.end());
 }
@@ -395,14 +420,41 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
 }
 
 bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
+    // remove current Data connections and use dummy inputs to propagate dimensions
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
     if (!dims.empty()){
-      AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size());
-      for (std::size_t i = 0; i < dims.size(); ++i){
-        auto tensor = std::make_shared<Tensor>(dims[i]);
-        mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+      Log::debug("forwardDims(): setting graph input dims ({} dims provided).", dims.size());
+
+      std::size_t i = 0;
+      for (auto& input : mInputNodes) {
+        const auto& currentTensorPtr =
+            std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second);
+        if (i < dims.size() && !dims[i].empty()) {
+          if (currentTensorPtr) { // tensor detected
+              AIDGE_ASSERT(currentTensorPtr->dims() == dims[i],
+                "forwardDims(): mismatch between existing and provided size for graph input#{} (existing size: {}, provided size: {})",
+                i, currentTensorPtr->dims(), dims[i])
+          } else {
+              auto tensor = std::make_shared<Tensor>(dims[i]);
+              input.first->getOperator()->setInput(input.second, tensor);
+          }
+        }
+        else {
+          const bool optional = (input.first->inputCategory(input.second) == InputCategory::OptionalData
+            || input.first->inputCategory(input.second) == InputCategory::OptionalParam);
+
+          if (currentTensorPtr) {
+            Log::debug("forwardDims(): existing dims are {} for graph input#{} for input#{} of node {} (of type {})",
+              i, input.second, input.first->name(), input.first->type(), currentTensorPtr->dims());
+          }
+          else if (!optional) {
+            Log::warn("forwardDims(): did not specify dims for mandatory graph input#{} for input#{} of node {} (of type {})",
+              i, input.second, input.first->name(), input.first->type());
+          }
+        }
+        ++i;
       }
     }
 
@@ -415,12 +467,12 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i) == inputI.first->getOperator()->getRawOutput(inputI.second),
                   "Input#{} for node {} ({}) is not properly connected to output#{} of node {} ({}): Data or Tensor mismatch!",
                     i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
-            } else {
+            } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && nodePtr->inputCategory(i) != InputCategory::OptionalParam) {
                 // Input is missing
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
-                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
-                  "Empty input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
+                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined(),
+                  "Undefined input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
             }
 
         }
@@ -518,6 +570,12 @@ void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) const {
     }
 }
 
+void Aidge::GraphView::setDataFormat(const Aidge::DataFormat &dataformat) const {
+    for (const auto& node : getNodes()) {
+        node->getOperator()->setDataFormat(dataformat);
+    }
+}
+
 std::vector<
     std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
 Aidge::GraphView::outputs() const {
@@ -574,15 +632,17 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
 
   // add learnable parameters to the graph
   if (includeLearnableParam) {
-    for (IOIndex_t i = node->nbData(); i < node->nbInputs(); ++i) {
-      std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
-      if (parentNode) {
-          parentNode->addView(shared_from_this());
-          mNodes.insert(parentNode);
-          if (!(parentNode->name()).empty())
-            mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
-          // check if the parentNode is an input/output node
-          updateInputsOutputsNew(parentNode);
+    for (IOIndex_t i = 0; i < node->nbInputs(); ++i) {
+      if (node->inputCategory(i) == InputCategory::Param || node->inputCategory(i) == InputCategory::OptionalParam) {
+        std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
+        if (parentNode) {
+            parentNode->addView(shared_from_this());
+            mNodes.insert(parentNode);
+            if (!(parentNode->name()).empty())
+              mNodeRegistry.insert(std::make_pair(parentNode->name(), parentNode));
+            // check if the parentNode is an input/output node
+            updateInputsOutputsNew(parentNode);
+        }
       }
     }
   }
@@ -590,9 +650,12 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
 
 std::pair<std::vector<Aidge::NodePtr>, size_t> Aidge::GraphView::getRankedNodes() const {
   std::set<NodePtr> nodesToRank(mNodes);
-  nodesToRank.erase(mRootNode);
   std::vector<NodePtr> rankedNodes;
-  rankedNodes.push_back(mRootNode);
+
+  if (mRootNode) {
+    nodesToRank.erase(mRootNode);
+    rankedNodes.push_back(mRootNode);
+  }
 
   for (size_t curNodeIdx = 0; curNodeIdx < rankedNodes.size(); ++curNodeIdx) {
     NodePtr curNode = rankedNodes[curNodeIdx];
@@ -622,6 +685,61 @@ std::pair<std::vector<Aidge::NodePtr>, size_t> Aidge::GraphView::getRankedNodes(
   return std::make_pair(rankedNodes, orderUnicityLimit);
 }
 
+std::vector<Aidge::NodePtr> Aidge::GraphView::getOrderedNodes(bool reversed) const {
+    // We compute the order from a post-dfs walk on the reverse graph starting from
+    // ordered output nodes.
+    // Also, we walk the graph upward left to right in order
+    // to get a topological left-right order when possible.
+    // For the case where reversed is true, we walk the graph upward right to left
+    // and reverse the final order to get a post-dfs left-right order when possible.
+    std::vector<std::pair<NodePtr,std::pair<size_t, std::vector<NodePtr>>>> stack;
+    std::vector<NodePtr> reversePostDfs;
+    std::set<NodePtr> visited;
+    std::vector<NodePtr> outNodes(mNodes.size());
+    auto reverse_if_dfs = [reversed](auto &parents) {
+        if (reversed) std::reverse(parents.begin(), parents.end());
+    };
+    for (const auto& output : mOutputNodes) {
+            outNodes.push_back(output.first);
+    }
+    reverse_if_dfs(outNodes);
+    stack.push_back(std::make_pair(nullptr, std::make_pair(0, std::move(outNodes))));
+    while (!stack.empty()) {
+        auto node = stack.back().first;
+        auto& parentIdx = stack.back().second.first;
+        auto& parents = stack.back().second.second;
+        if (parentIdx == parents.size()) {
+            stack.pop_back();
+            if (node) {
+                reversePostDfs.push_back(node);
+            }
+        } else {
+            auto backEdgeIdx = reversed ? parents.size() - 1 - parentIdx: parentIdx;
+            auto isBackEdge = node != nullptr ? node->parentIsBackEdge(backEdgeIdx): false;
+            auto parent = parents[parentIdx++];
+            if (parent != nullptr && inView(parent) &&
+                visited.find(parent) == visited.end()) {
+                if (isBackEdge) {
+                    stack[0].second.second.push_back(parent);
+                } else {
+                    visited.insert(parent);
+                    auto next_parents = parent->getParents();
+                    reverse_if_dfs(next_parents);
+                    stack.push_back(std::make_pair(parent, std::make_pair(0, std::move(next_parents))));
+                }
+            }
+        }
+    }
+
+    if (reversePostDfs.size() != mNodes.size()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+                             "Could not enumerate all nodes, set output nodes such that all graph nodes are connected.");
+    }
+
+    reverse_if_dfs(reversePostDfs);
+    return reversePostDfs;
+}
+
 std::map<Aidge::NodePtr, std::string> Aidge::GraphView::getRankedNodesName(const std::string& format, bool markNonUnicity) const {
   const auto rankedNodes = getRankedNodes();
   std::map<NodePtr, std::string> rankedNodesName;
@@ -652,6 +770,15 @@ bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool incl
   std::set<NodePtr> nodesToAdd;
   std::set_difference(otherNodes.begin(), otherNodes.end(), mNodes.begin(), mNodes.end(), std::inserter(nodesToAdd, nodesToAdd.begin()));
 
+  // Check no name is common with the name in the current Graph
+  for (auto node : nodesToAdd) {
+    if (mNodeRegistry.find(node->name()) != mNodeRegistry.end()){
+      std::string newName = node->createUniqueName(node->name());
+      fmt::print("Warning: node name \"{}\" is a duplicate, renaming to {}.\n", node->name(), newName);
+      node->setName(newName);
+
+    }
+  }
   // List the nodes to rank, initially all the nodes in the GraphView
   std::set<NodePtr> nodesToRank(mNodes);
   nodesToRank.insert(nodesToAdd.begin(), nodesToAdd.end());
@@ -746,10 +873,10 @@ bool Aidge::GraphView::add(std::pair<NodePtr, std::set<NodePtr>> nodes, bool inc
   return add(nodes.second, includeLearnableParam);
 }
 
-bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph) {
+bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph, bool includeLearnableParam) {
     // set the rootNode to the other graphView rootNode if no rootNode yet
     mRootNode = mRootNode ? mRootNode : graph->rootNode();
-    return add(graph->getNodes(), false);
+    return add(graph->getNodes(), includeLearnableParam);
 }
 
 void Aidge::GraphView::addChild(std::shared_ptr<Node> toOtherNode,
@@ -861,29 +988,31 @@ Aidge::GraphView::getNode(const std::string& nodeName) const {
 void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnableParam) {
   // remove learnable params
   if (includeLearnableParam) {
-    for (IOIndex_t i = nodePtr->nbData(); i < nodePtr->nbInputs(); ++i) {
-      auto inputI = nodePtr->input(i);
-      if (inputI.first != nullptr) {
-        bool removeNode = true;
-        for (const auto& parentOutput : inputI.first->outputs()) {
-          for (const auto& childOfParentOutput : parentOutput) {
-            // only remove the learnable parameter if not related to any other Node in the GraphView
-            if (childOfParentOutput.first != nodePtr) {
-              removeNode = false;
-              break;
+    for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
+      if (nodePtr->inputCategory(i) == InputCategory::Param || nodePtr->inputCategory(i) == InputCategory::OptionalParam) {
+        auto inputI = nodePtr->input(i);
+        if (inputI.first != nullptr) {
+          bool removeNode = true;
+          for (const auto& parentOutput : inputI.first->outputs()) {
+            for (const auto& childOfParentOutput : parentOutput) {
+              // only remove the learnable parameter if not related to any other Node in the GraphView
+              if (childOfParentOutput.first != nodePtr) {
+                removeNode = false;
+                break;
+              }
             }
           }
-        }
-        if (removeNode) {
-          // assert Learnable Parameter in the GraphView scope
-          if (mNodes.find(inputI.first) != mNodes.end()) {
-            mNodes.erase(inputI.first);
-            inputI.first->removeView(shared_from_this());
-          }
-          if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
+          if (removeNode) {
+            // assert Learnable Parameter in the GraphView scope
+            if (mNodes.find(inputI.first) != mNodes.end()) {
+              mNodes.erase(inputI.first);
+              inputI.first->removeView(shared_from_this());
+            }
+            if (!inputI.first->name().empty()) { mNodeRegistry.erase(inputI.first->name()); }
 
-          // check if the node was an input/output node
-          updateInputsOutputsDelete(inputI.first);
+            // check if the node was an input/output node
+            updateInputsOutputsDelete(inputI.first);
+          }
         }
       }
     }
@@ -1027,6 +1156,10 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
                       for (const auto& child : outputChildren[i]) {
                         inputParents[i].first -> addChild(child.first, inputParents[i].second, child.second);
                       }
+                    } else {
+                      for (const auto& child : outputChildren[i]) {
+                        child.first->getOperator()->resetInput(child.second);
+                      }
                     }
                 }
             }
@@ -1046,7 +1179,10 @@ bool Aidge::GraphView::replace(const std::shared_ptr<GraphView>& oldGraph, const
             // Case 2
             if ((oldOIn.size() == 1) && (inputParents[0].first)) {
                 for (std::size_t i = 0; i < newOIn.size(); ++i) {
-                    inputParents[0].first -> addChild(newOIn[i].first, inputParents[0].second, newOIn[i].second);
+                    // Only re-connect the same input category
+                    if (newOIn[i].first->inputCategory(newOIn[i].second) == oldOIn[0].first->inputCategory(oldOIn[0].second)) {
+                      inputParents[0].first -> addChild(newOIn[i].first, inputParents[0].second, newOIn[i].second);
+                    }
                 }
             } else {
                 for (std::size_t i = 0; i < oldOIn.size(); ++i) {
@@ -1332,13 +1468,13 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     auto clonedNode = cloneNode(node_ptr);
     if (clonedNode == nullptr) {
       AIDGE_ASSERT(node_ptr->getChildren().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple children");
-      AIDGE_ASSERT(node_ptr->nbData() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
+      AIDGE_ASSERT(node_ptr->dataInputs().size() <= 1, "deleted nodes in GraphView::clone() cannot have multiple data input parents");
     }
     oldToNewNodes[node_ptr] = clonedNode;
   }
 
   // For each node, convert old node -> new node connections
-  for (auto &oldToNewNode : oldToNewNodes) {
+  for (const auto &oldToNewNode : oldToNewNodes) {
     if (oldToNewNode.second == nullptr) {
       continue;  // deleted node
     }
@@ -1346,12 +1482,12 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     // Connect parent nodes. Nodes that were removed with cloneNode() are set to nullptr
     size_t parentId = 0;
     for (auto parent : oldToNewNode.first->inputs()) {
-      if (parent.first != nullptr) {
+      if (parent.first != nullptr && inView(parent.first)) {
         while (oldToNewNodes[parent.first] == nullptr) {
           // Find next valid parent in line, going backward in the graph
           AIDGE_INTERNAL_ASSERT(parent.first->getChildren().size() == 1);
-          AIDGE_INTERNAL_ASSERT(parent.first->nbData() <= 1);
           const auto& parents = parent.first->dataInputs();
+          AIDGE_INTERNAL_ASSERT(parents.size() <= 1);
 
           if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
             && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
@@ -1382,7 +1518,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
     newGraph->add(oldToNewNodes[mRootNode], false);
   }
 
-  for (auto &oldToNewNode : oldToNewNodes) {
+  for (const auto &oldToNewNode : oldToNewNodes) {
     if (oldToNewNode.second == nullptr)
       continue;  // deleted node
 
@@ -1432,9 +1568,9 @@ std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*clone
   for (auto it = newOutputNodes.begin(); it != newOutputNodes.end(); ) {
     // If output node was removed, find previous valid output
     while (oldToNewNodes[it->first] == nullptr) {
-      // Removed node should have only one connected data input, otherwise cloning is invalid
-      AIDGE_INTERNAL_ASSERT(it->first->nbData() <= 1);
       auto parents = it->first->dataInputs();
+      // Removed node should have only one connected data input, otherwise cloning is invalid
+      AIDGE_INTERNAL_ASSERT(parents.size() <= 1);
 
       if (!parents.empty() && parents[0].first != nullptr // a valid parent exists
         && oldToNewNodes.find(parents[0].first) != oldToNewNodes.end()) // parent is in the GraphView
diff --git a/src/graph/Matching.cpp b/src/graph/Matching.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4a62019a7aa044ebcf2089d91f3ba097d85218e7
--- /dev/null
+++ b/src/graph/Matching.cpp
@@ -0,0 +1,835 @@
+#include "aidge/graph/Matching.hpp"
+
+#include <fmt/color.h>
+
+Aidge::SinglePassGraphMatching::Context::Context() = default;
+Aidge::SinglePassGraphMatching::Context::Context(const Context& other) = default;
+Aidge::SinglePassGraphMatching::Context& Aidge::SinglePassGraphMatching::Context::operator=(const Context& other) = default;
+Aidge::SinglePassGraphMatching::Context::~Context() = default;
+
+////////////////////////////////////////////////////////////
+
+Aidge::SinglePassGraphMatching::MatchingResult::MatchingResult() : graph(std::make_shared<GraphView>()), startNode(nullptr) {}
+Aidge::SinglePassGraphMatching::MatchingResult::MatchingResult(const Aidge::SinglePassGraphMatching::MatchingResult& other) {
+    graph = std::make_shared<GraphView>(*(other.graph.get()));
+    anchors = other.anchors;
+    startNode = other.startNode;
+}
+Aidge::SinglePassGraphMatching::MatchingResult& Aidge::SinglePassGraphMatching::MatchingResult::operator=(const Aidge::SinglePassGraphMatching::MatchingResult& other) {
+    graph = std::make_shared<GraphView>(*(other.graph.get()));
+    anchors = other.anchors;
+    startNode = other.startNode;
+    return *this;
+}
+Aidge::SinglePassGraphMatching::MatchingResult::~MatchingResult() noexcept = default;
+
+//////////////////////////////////////////////////////////
+
+Aidge::SinglePassGraphMatching::SinglePassGraphMatching(const Aidge::SinglePassGraphMatching& other) = default;
+Aidge::SinglePassGraphMatching& Aidge::SinglePassGraphMatching::operator=(const Aidge::SinglePassGraphMatching& other) = default;
+Aidge::SinglePassGraphMatching::~SinglePassGraphMatching() noexcept = default;
+
+std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::match(const std::string& query, bool disjoint) {
+    Context ctx;
+    ctx.query = query;
+    std::set<MatchingResult> matches;
+
+    while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
+        removeWhiteSpace(ctx.query);
+        if (!ctx.query.empty() && ctx.query[0] == ';') {
+            ctx.query.erase(0, 1);
+        }
+        else {
+            break;
+        }
+    }
+
+    removeWhiteSpace(ctx.query);
+    if (!ctx.query.empty()) {
+        Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
+    }
+
+    if (disjoint) {
+        matches = filterLonguestDisjoint(matches);
+    }
+
+    return matches;
+}
+
+Aidge::SinglePassGraphMatching::MatchingResult Aidge::SinglePassGraphMatching::matchFrom(NodePtr startNode, const std::string& query) {
+    Context ctx;
+    ctx.query = query;
+    ctx.startNode = startNode;
+    std::set<MatchingResult> matches;
+
+    while (matchSequence(ctx, matches) || matchNodeOrBlock(ctx, matches)) {
+        removeWhiteSpace(ctx.query);
+        if (!ctx.query.empty() && ctx.query[0] == ';') {
+            ctx.query.erase(0, 1);
+        }
+        else {
+            break;
+        }
+    }
+
+    removeWhiteSpace(ctx.query);
+    if (!ctx.query.empty()) {
+        Log::warn("Syntax error, unable to parse remaining query: {}", ctx.query);
+    }
+
+    AIDGE_INTERNAL_ASSERT(matches.size() <= 1);
+    return (!matches.empty()) ? *matches.begin() : MatchingResult();
+}
+
+std::set<Aidge::SinglePassGraphMatching::MatchingResult> Aidge::SinglePassGraphMatching::filterLonguestDisjoint(const std::set<MatchingResult>& matches) {
+    // Sort matches by highest number of nodes first, thanks to the CompareMatchingResultSize function
+    std::set<MatchingResult, CompareMatchingResultSize> sortedMatches(matches.begin(), matches.end());
+    // Keep all the nodes that are already in previous (selected) matches
+    std::set<NodePtr> selectedNodes;
+    std::set<MatchingResult> filteredMatches;
+
+    for (const auto& match : sortedMatches) {
+        const auto& nodes = match.graph->getNodes();
+        bool isNonOverlapping = true;
+        for (const auto& node : nodes) {
+            if (selectedNodes.find(node) != selectedNodes.end()) {
+                isNonOverlapping = false;
+                break;
+            }
+        }
+
+        if (isNonOverlapping) {
+            // If no node of the current match is already in a previous match,
+            // the match is disjoint from previous matches and can be kept!
+            filteredMatches.insert(match);
+            selectedNodes.insert(nodes.begin(), nodes.end());
+        }
+    }
+
+    return filteredMatches;
+}
+
+bool Aidge::SinglePassGraphMatching::matchNodeOrBlock(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}node-or-block", std::string(2*newCtx.depth, ' '));
+    auto newMatches = matches;
+    ++newCtx.depth;
+
+    // (BLOCK | NODE)
+    if (!matchBlock(newCtx, newMatches) && !matchNode(newCtx, newMatches)) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // QUANTIFIER?
+    bool matchMore = false;
+    size_t matchQuantity = 0;
+    removeWhiteSpace(newCtx.query);
+    if (!newCtx.query.empty() && (newCtx.query[0] == '?' || newCtx.query[0] == '*')) {
+        AIDGE_ASSERT(!(ctx.firstSequence && ctx.firstNode),
+            "Ill-formed query; the root node cannot be optional in query at: {}", ctx.query);
+
+        for (const auto& match : matches) {
+            bool found = false;
+            for (const auto& newMatch : newMatches) {
+                if (match.graph->rootNode() == newMatch.graph->rootNode()) {
+                    found = true;
+                }
+            }
+
+            if (!found) {
+                newMatches.insert(match);
+            }
+        }
+
+        if (newCtx.query[0] == '*') {
+            matchMore = true;
+        }
+
+        newCtx.query.erase(0, 1);
+    }
+    else if (!newCtx.query.empty() && newCtx.query[0] == '+') {
+        newCtx.query.erase(0, 1);
+        matchMore = true;
+    }
+    else if (!newCtx.query.empty() && newCtx.query[0] == '{') {
+        newCtx.query.erase(0, 1);
+
+        removeWhiteSpace(newCtx.query);
+        const auto endQuantity = std::find_if(newCtx.query.begin(), newCtx.query.end(),
+            [](char c) { return !isdigit(c); });
+        if (endQuantity != newCtx.query.begin()) {
+            matchQuantity = std::stoi(newCtx.query.substr(0, endQuantity - newCtx.query.begin()));
+            newCtx.query = newCtx.query.substr(endQuantity - newCtx.query.begin());
+        }
+        else {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        if (matchQuantity == 0) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        removeWhiteSpace(newCtx.query);
+        if (!newCtx.query.empty() && newCtx.query[0] == '}') {
+            newCtx.query.erase(0, 1);
+        }
+        else {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        if (matchQuantity > 1) {
+            matchMore = true;
+        }
+    }
+
+    if (matchMore) {
+        std::set<MatchingResult> additionalMatches;
+
+        do {
+            auto additionalCtx = ctx;
+            additionalCtx.firstNode = newCtx.firstNode;
+            additionalCtx.firstSequence = newCtx.firstSequence;
+            additionalCtx.anchors = newCtx.anchors;
+            ++additionalCtx.depth;
+            additionalMatches = newMatches;
+
+            if (!matchBlock(additionalCtx, additionalMatches) && !matchNode(additionalCtx, additionalMatches)) {
+                Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+                return false;
+            }
+
+            for (const auto& additionalMatch : additionalMatches) {
+                for (auto& match : newMatches) {
+                    if (match.graph->rootNode() == additionalMatch.graph->rootNode()) {
+                        match.graph = std::make_shared<GraphView>(*(additionalMatch.graph.get()));
+                        match.anchors = additionalMatch.anchors;
+                        match.startNode = additionalMatch.startNode;
+                        break;
+                    }
+                }
+            }
+
+            --matchQuantity;
+        }
+        while (!additionalMatches.empty() && matchQuantity > 1);
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchBlock(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}block", std::string(2*newCtx.depth, ' '));
+    auto newMatches = matches;
+    ++newCtx.depth;
+
+    // '('
+    removeWhiteSpace(newCtx.query);
+    if (!newCtx.query.empty() && newCtx.query[0] == '(') {
+        newCtx.query.erase(0, 1);
+    }
+    else {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // SEQ | PAR | BLOCK | ALT | NODE
+    if (!matchSequence(newCtx, newMatches)
+        && !matchParallel(newCtx, newMatches)
+        && !matchAlternative(newCtx, newMatches)
+        && !matchBlock(newCtx, newMatches)
+        && !matchNode(newCtx, newMatches))
+    {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // ')'
+    removeWhiteSpace(newCtx.query);
+    if (!newCtx.query.empty() && newCtx.query[0] == ')') {
+        newCtx.query.erase(0, 1);
+    }
+    else {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchSequence(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}sequence", std::string(2*newCtx.depth, ' '));
+    auto newMatches = matches;
+    ++newCtx.depth;
+
+    if (!ctx.inSequence) {
+        newCtx.inSequence = true;
+        newCtx.firstNode = true;
+    }
+
+    // NODE_OR_BLOCK
+    if (!matchNodeOrBlock(newCtx, newMatches)) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    newCtx.firstNode = false;
+
+    bool found = false;
+    while (true) {
+        // (EDGE NODE_OR_BLOCK)+
+        //   EDGE
+        if (matchEdge(newCtx, newMatches)) {
+            found = true;
+        }
+        else {
+            break;
+        }
+
+        //   NODE_OR_BLOCK
+        if (!matchNodeOrBlock(newCtx, newMatches)) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+    }
+
+    if (!found) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    if (!ctx.inSequence) {
+        newCtx.inSequence = false;
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchParallel(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}parallel", std::string(2*newCtx.depth, ' '));
+    ++newCtx.depth;
+    auto newMatches = matches;
+
+    // NODE_OR_BLOCK
+    auto parCtx = newCtx;
+    if (!matchNodeOrBlock(parCtx, newMatches)) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+    newCtx.query = parCtx.query;
+
+    bool found = false;
+    while (true) {
+        // ('&' NODE_OR_BLOCK)+
+        //   '&'
+        removeWhiteSpace(newCtx.query);
+        if (!newCtx.query.empty() && newCtx.query[0] == '&') {
+            newCtx.query.erase(0, 1);
+            found = true;
+        }
+        else {
+            break;
+        }
+
+        //   NODE_OR_BLOCK
+        // reset the ctx to the beginning
+        parCtx = newCtx;
+        // reset the startNode to the beginning
+        for (auto& newMatch : newMatches) {
+            for (const auto& match : matches) {
+                if (match.graph->rootNode() == newMatch.graph->rootNode()) {
+                    newMatch.startNode = match.startNode;
+                }
+            }
+        }
+
+        if (!matchNodeOrBlock(parCtx, newMatches)) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+        newCtx.query = parCtx.query;
+    }
+
+    // Keep the last context for further query
+    newCtx = parCtx;
+
+    if (!found) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchAlternative(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}alternative", std::string(2*newCtx.depth, ' '));
+    ++newCtx.depth;
+    std::set<MatchingResult> newMatches;
+
+    // NODE_OR_BLOCK
+    auto altCtx = newCtx;
+    auto altMatches = matches;
+    if (!matchNodeOrBlock(altCtx, altMatches)) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+    newCtx.query = altCtx.query;
+    newCtx.anchors.insert(altCtx.anchors.begin(), altCtx.anchors.end());
+    bool firstSequence = altCtx.firstSequence;
+    bool firstNode = altCtx.firstNode;
+    newMatches.insert(altMatches.begin(), altMatches.end());
+
+    bool found = false;
+    while (true) {
+        // ('|' NODE_OR_BLOCK)+
+        //    '|'
+        removeWhiteSpace(newCtx.query);
+        if (!newCtx.query.empty() && newCtx.query[0] == '|') {
+            newCtx.query.erase(0, 1);
+            found = true;
+        }
+        else {
+            break;
+        }
+
+        //    NODE_OR_BLOCK
+        altCtx = newCtx;
+        altMatches = matches;
+        if (!matchNodeOrBlock(altCtx, altMatches)) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+        newCtx.query = altCtx.query;
+        newCtx.anchors.insert(altCtx.anchors.begin(), altCtx.anchors.end());
+        AIDGE_ASSERT(firstSequence == altCtx.firstSequence,
+            "Ill-formed query; inconsistency between alternatives regarding first sequence in query at: {}", ctx.query);
+        AIDGE_ASSERT(firstNode == altCtx.firstNode,
+            "Ill-formed query; inconsistency between alternatives regarding first node in query at: {}", ctx.query);
+        newMatches.insert(altMatches.begin(), altMatches.end());
+    }
+
+    if (!found) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    newCtx.firstSequence = firstSequence;
+    newCtx.firstNode = firstNode;
+
+    --newCtx.depth;
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchEdge(Context& ctx, std::set<MatchingResult>& /*matches*/) {
+    auto newCtx = ctx;
+    Log::debug("{}edge", std::string(2*newCtx.depth, ' '));
+
+    // ('-' | '~') or '<'
+    removeWhiteSpace(newCtx.query);
+    if (!newCtx.query.empty() && (newCtx.query[0] == '-' || newCtx.query[0] == '~')) {
+        newCtx.singleOutput = (newCtx.query[0] == '-');
+        newCtx.query.erase(0, 1); // drop '-'
+        newCtx.lookForChild = true;
+    }
+    else if (!newCtx.query.empty() && newCtx.query[0] == '<') {
+        newCtx.query.erase(0, 1); // drop '<'
+        newCtx.lookForChild = false;
+    }
+    else {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // optional first IO_INDEX
+    int firstIdx = 0;
+    bool foundFirst = false;
+    const auto endOutputIdx = std::find_if(newCtx.query.begin(), newCtx.query.end(),
+        [](char c) { return !isdigit(c); });
+    if (endOutputIdx != newCtx.query.begin()) {
+        firstIdx = std::stoi(newCtx.query.substr(0, endOutputIdx - newCtx.query.begin()));
+        newCtx.query = newCtx.query.substr(endOutputIdx - newCtx.query.begin());
+        foundFirst = true;
+    }
+    else if (newCtx.query[0] == '*') {
+        newCtx.query.erase(0, 1); // drop '*'
+        firstIdx = -1;
+        foundFirst = true;
+    }
+
+    // optional second IO_INDEX, preceded by '-'
+    int secondIdx = 0;
+    bool foundSecond = false;
+    if (foundFirst && !newCtx.query.empty() && newCtx.query[0] == '-') {
+        auto query = newCtx.query;
+        query.erase(0, 1); // drop '-'
+
+        const auto endInputIdx = std::find_if(query.begin(), query.end(),
+            [](char c) { return !isdigit(c); });
+        if (endInputIdx != query.begin()) {
+            secondIdx = std::stoi(query.substr(0, endInputIdx - query.begin()));
+            query = query.substr(endInputIdx - query.begin());
+            foundSecond = true;
+        }
+        else if (query[0] == '*') {
+            query.erase(0, 1); // drop '*'
+            secondIdx = -1;
+            foundSecond = true;
+        }
+
+        if (foundSecond) {
+            newCtx.query = query;
+        }
+    }
+
+    // '>' or ('-' | '~')
+    if (newCtx.lookForChild && !newCtx.query.empty() && newCtx.query[0] == '>') {
+        newCtx.query.erase(0, 1); // drop '>'
+    }
+    else if (!newCtx.lookForChild && !newCtx.query.empty() && (newCtx.query[0] == '-' || newCtx.query[0] == '~')) {
+        newCtx.singleOutput = (newCtx.query[0] == '-');
+        newCtx.query.erase(0, 1);
+    }
+    else {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    // Parsing is done, update the remaining context
+    newCtx.edgeLeftIdx = 0;
+    newCtx.edgeRightIdx = 0;
+    if (foundFirst && foundSecond) {
+        newCtx.edgeLeftIdx = firstIdx;
+        newCtx.edgeRightIdx = secondIdx;
+    }
+    else if (foundFirst) {
+        if (newCtx.lookForChild) {
+            newCtx.edgeRightIdx = firstIdx;
+        }
+        else {
+            newCtx.edgeLeftIdx = firstIdx;
+        }
+    }
+
+    if (newCtx.lookForChild) {
+        Log::debug("{}-{}-{}>", std::string(2*newCtx.depth + 2, ' '),
+            newCtx.edgeLeftIdx, newCtx.edgeRightIdx);
+    }
+    else {
+        Log::debug("{}<{}-{}-", std::string(2*newCtx.depth + 2, ' '),
+            newCtx.edgeLeftIdx, newCtx.edgeRightIdx);
+    }
+
+    ctx = newCtx;
+    return true;
+}
+
+bool Aidge::SinglePassGraphMatching::matchNode(Context& ctx, std::set<MatchingResult>& matches) {
+    auto newCtx = ctx;
+    Log::debug("{}node", std::string(2*newCtx.depth, ' '));
+    auto newMatches = matches;
+
+    // (TYPE | '.' | '$')
+    removeWhiteSpace(newCtx.query);
+    if (newCtx.query.empty()) {
+        Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+        return false;
+    }
+
+    std::string type;
+    bool unconnected = false;
+    if (newCtx.query[0] == '.') {
+        // '.'
+        newCtx.query.erase(0, 1); // drop '.'
+    }
+    else if (newCtx.query[0] == '$') {
+        // '$'
+        newCtx.query.erase(0, 1); // drop '$'
+        unconnected = true;
+    }
+    else {
+        // TYPE
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
+            [](char c) { return (!isalnum(c) && c != '_'); });
+
+        if (endIdentifier == newCtx.query.begin()) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        type = newCtx.query.substr(0, endIdentifier - newCtx.query.begin());
+        newCtx.query = newCtx.query.substr(endIdentifier - newCtx.query.begin());
+    }
+
+    // ('#' ANCHOR)?
+    std::string anchor = "";
+    if (!newCtx.query.empty() && newCtx.query[0] == '#') {
+        AIDGE_ASSERT(!unconnected,
+            "Ill-formed query; an anchor cannot be specified for end of graph ($) in query at: {}", ctx.query);
+
+        // '#'
+        newCtx.query.erase(0, 1); // drop '#'
+
+        // ANCHOR
+        const auto endAnchor = std::find_if(newCtx.query.begin(), newCtx.query.end(),
+            [](char c) { return (!isalnum(c) && c != '_'); });
+        anchor = "#" + newCtx.query.substr(0, endAnchor - newCtx.query.begin());
+        newCtx.query = newCtx.query.substr(endAnchor - newCtx.query.begin());
+    }
+
+    // ('[' LAMBDA ']')?
+    std::string lambda = "";
+    if (!newCtx.query.empty() && newCtx.query[0] == '[') {
+        AIDGE_ASSERT(!unconnected,
+            "Ill-formed query; a lambda cannot be specified for end of graph ($) in query at: {}", ctx.query);
+
+        // '['
+        newCtx.query.erase(0, 1);
+
+        // LAMBDA
+        const auto endIdentifier = std::find_if(newCtx.query.begin(), newCtx.query.end(),
+            [](char c) { return (!isalnum(c) && c != '_'); });
+
+        if (endIdentifier == newCtx.query.begin()) {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+
+        lambda = newCtx.query.substr(0, endIdentifier - newCtx.query.begin());
+        newCtx.query = newCtx.query.substr(endIdentifier - newCtx.query.begin());
+
+        // ']'
+        if (!newCtx.query.empty() && newCtx.query[0] == ']') {
+            newCtx.query.erase(0, 1);
+        }
+        else {
+            Log::debug("{}{}", std::string(2*ctx.depth, ' '), fmt::styled("×", fmt::fg(fmt::color::red)));
+            return false;
+        }
+    }
+
+    // Parsing is done, try to match the node
+    if (unconnected) {
+        for (auto it = newMatches.begin(); it != newMatches.end(); ) {
+            bool found = false;
+
+            if (newCtx.lookForChild) {
+                const auto outputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbOutputs())
+                        ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                        : std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>())
+                    : it->startNode->outputs();
+
+                for (const auto& output : outputs) {
+                    for (const auto& node : output) {
+                        if (!node.first) {
+                            continue;
+                        }
+
+                        if (newCtx.edgeRightIdx == gk_IODefaultIndex || node.second == newCtx.edgeRightIdx) {
+                            if (mGraph->inView(node.first) && !it->graph->inView(node.first)) {
+                                found = true;
+                                break;
+                            }
+                        }
+                    }
+
+                    if (found) {
+                        break;
+                    }
+                }
+            }
+            else {
+                const auto inputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbInputs())
+                        ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                        : std::vector<std::pair<NodePtr, IOIndex_t>>())
+                    : it->startNode->inputs();
+
+                for (const auto& input : inputs) {
+                    if (!input.first) {
+                        continue;
+                    }
+
+                    if (newCtx.edgeRightIdx == gk_IODefaultIndex || input.second == newCtx.edgeRightIdx) {
+                        if (mGraph->inView(input.first) && !it->graph->inView(input.first)) {
+                            found = true;
+                            break;
+                        }
+                    }
+                }
+            }
+
+            if (found) {
+                it = newMatches.erase(it);
+            }
+            else {
+                ++it;
+            }
+        }
+
+        Log::debug("{}node $, found: {}", std::string(2*newCtx.depth + 2, ' '), newMatches.size());
+    }
+    else if (newCtx.firstSequence && newCtx.firstNode) {
+        // First node of first sequence = root node
+        const auto nodes = (newCtx.startNode) ? std::set<NodePtr>{newCtx.startNode} : mGraph->getNodes();
+
+        for (auto node : nodes) {
+            if ((type.empty() || node->type() == type)
+                && (lambda.empty() || mLambda.at(lambda)(node)))
+            {
+                MatchingResult result;
+                result.graph->add(node, false);
+                if (!anchor.empty()) {
+                    result.anchors[type][anchor] = node;
+                }
+                result.startNode = node;
+                newMatches.insert(result);
+            }
+        }
+        newCtx.firstSequence = false;
+
+        Log::debug("{}root node {}{}, found: {}", std::string(2*newCtx.depth + 2, ' '), fmt::styled(type.empty() ? "." : type, fmt::fg(fmt::color::yellow)), anchor, newMatches.size());
+    }
+    else if (newCtx.firstNode) {
+        // First node of a (new) sequence: it has to be an existing anchor
+        AIDGE_ASSERT(!anchor.empty(),
+            "Ill-formed query; an anchor is expected in query at: {}", ctx.query);
+        AIDGE_ASSERT(newCtx.anchors.find(type + anchor) != newCtx.anchors.end(),
+            "Ill-formed query; the node anchor {} has to be an existing anchor in query at: {}", type + anchor, ctx.query);
+
+        for (auto it = newMatches.begin(); it != newMatches.end(); ) {
+            const auto anchors = it->anchors[type];
+            const auto anchorNode = anchors.find(anchor);
+            if (anchorNode != anchors.end()) {
+                it->startNode = anchorNode->second;
+                ++it;
+            }
+            else {
+                it = newMatches.erase(it);
+            }
+        }
+
+        Log::debug("{}anchor node {}{}, found: {}", std::string(2*newCtx.depth + 2, ' '), fmt::styled(type.empty() ? "." : type, fmt::fg(fmt::color::yellow)), anchor, newMatches.size());
+    }
+    else {
+        for (auto it = newMatches.begin(); it != newMatches.end(); ) {
+            bool found = false;
+
+            if (newCtx.lookForChild) {
+                const auto outputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbOutputs())
+                        ? std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>(1, std::vector<std::pair<NodePtr, IOIndex_t>>(it->startNode->output(newCtx.edgeLeftIdx)))
+                        : std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>())
+                    : it->startNode->outputs();
+
+                for (const auto& output : outputs) {
+                    if (newCtx.singleOutput && output.size() > 1) {
+                        continue;
+                    }
+
+                    for (const auto& node : output) {
+                        if (!node.first) {
+                            continue;
+                        }
+
+                        if ((type.empty() || node.first->type() == type)
+                            && (lambda.empty() || mLambda.at(lambda)(node.first))
+                            && (newCtx.edgeRightIdx == gk_IODefaultIndex || node.second == newCtx.edgeRightIdx))
+                        {
+                            if (mGraph->inView(node.first) && !it->graph->inView(node.first)) {
+                                it->graph->add(node.first, false);
+                                if (!anchor.empty()) {
+                                    it->anchors[type][anchor] = node.first;
+                                }
+                                it->startNode = node.first;
+                                found = true;
+                                break;
+                            }
+                            else if (!anchor.empty() && it->anchors[type].find(anchor) != it->anchors[type].end()) {
+                                it->startNode = node.first;
+                                found = true;
+                                break;
+                            }
+                        }
+                    }
+
+                    if (found) {
+                        break;
+                    }
+                }
+            }
+            else {
+                const auto inputs = (newCtx.edgeLeftIdx != gk_IODefaultIndex)
+                    ? ((newCtx.edgeLeftIdx < it->startNode->nbInputs())
+                        ? std::vector<std::pair<NodePtr, IOIndex_t>>(1, it->startNode->input(newCtx.edgeLeftIdx))
+                        : std::vector<std::pair<NodePtr, IOIndex_t>>())
+                    : it->startNode->inputs();
+
+                for (const auto& input : inputs) {
+                    if (!input.first) {
+                        continue;
+                    }
+
+                    if ((type.empty() || input.first->type() == type)
+                        && (lambda.empty() || mLambda.at(lambda)(input.first))
+                        && (newCtx.edgeRightIdx == gk_IODefaultIndex || input.second == newCtx.edgeRightIdx))
+                    {
+                        if (newCtx.singleOutput && input.first->getChildren(input.second).size() > 1) {
+                            continue;
+                        }
+
+                        if (mGraph->inView(input.first) && !it->graph->inView(input.first)) {
+                            it->graph->add(input.first, false);
+                            if (!anchor.empty()) {
+                                it->anchors[type][anchor] = input.first;
+                            }
+                            it->startNode = input.first;
+                            found = true;
+                            break;
+                        }
+                        else if (!anchor.empty() && it->anchors[type].find(anchor) != it->anchors[type].end()) {
+                            it->startNode = input.first;
+                            found = true;
+                            break;
+                        }
+                    }
+                }
+            }
+
+            if (found) {
+                ++it;
+            }
+            else {
+                it = newMatches.erase(it);
+            }
+        }
+
+        Log::debug("{}node {}{}, found: {}", std::string(2*newCtx.depth + 2, ' '), fmt::styled(type.empty() ? "." : type, fmt::fg(fmt::color::yellow)), anchor, newMatches.size());
+    }
+
+    newCtx.anchors.insert(type + anchor);
+    ctx = newCtx;
+    matches = newMatches;
+    return true;
+}
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index d8ae5532239e34874ed4195c5a5be629b064e77d..b2ceb903d51dbb880979cd2191825a6310f9e5ff 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -29,8 +29,13 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
       mIdInChildren(std::vector<std::vector<IOIndex_t>>(static_cast<std::size_t>(op->nbOutputs()),
                                                         std::vector<IOIndex_t>())),
       mIdOutParents(
-              std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex)) {
+              std::vector<IOIndex_t>(static_cast<std::size_t>(op->nbInputs()), gk_IODefaultIndex))
+{
     // ctor
+    if (op) {
+        mForward.push_back([this](){ this->mOperator->forward(); return true; });
+        mBackward.push_back([this](){ this->mOperator->backward(); return true; });
+    }
 }
 
 ///////////////////////////////////////////////////////
@@ -38,17 +43,24 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
 ///////////////////////////////////////////////////////
 
 Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
-    assert((ctors.size() == nbData()) && "Wrong number of arguments.\n");
-    for (std::size_t i = 0; i < nbData(); i++) {
-        assert((gk_IODefaultIndex == input(i).second) &&
-               "At least one input connection is not free.\n");
-    }
-    IOIndex_t i = 0;
-    for (const Connector& ctor : ctors) {
+    IOIndex_t idx = 0;
+    for (const auto& ctor : ctors) {
+        // Skip to next possible input idx
+        for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
+
+        AIDGE_ASSERT(idx < nbInputs(), "Too many input connectors ({}) vs available node inputs.", ctors.size());
+        AIDGE_ASSERT(input(idx).second == gk_IODefaultIndex, "Data input#{} connection is not free.", idx);
+
         if (ctor.node() != nullptr) {  // ctor must be associated with a node
-            ctor.node()->addChild(shared_from_this(), ctor.index(), i++);
+            ctor.node()->addChild(shared_from_this(), ctor.index(), idx);
         }
+        ++idx;
     }
+
+    // Skip to next possible input idx
+    for (; idx < nbInputs() && (inputCategory(idx) != InputCategory::Data && inputCategory(idx) != InputCategory::OptionalData); ++idx) {}
+    AIDGE_ASSERT(idx == nbInputs(), "Missing an input connector for Data input#{}", idx);
+
     return Connector(shared_from_this());
 }
 
@@ -57,22 +69,56 @@ Aidge::Connector Aidge::Node::operator()(const std::vector<Connector>& ctors) {
 ///////////////////////////////////////////////////////
 
 void Aidge::Node::setName(const std::string& name) {
-    for (auto graphView : views()) graphView->updateNodeName(mName, name);
+    for (auto graphView : views()) graphView->updateNodeName(shared_from_this(), name);
     mName = name;
 }
 
+std::string Aidge::Node::createUniqueName(std::string baseName)
+{
+    int index = 0;
+    bool nameAlreadyUsed = true;
+    std::string newName;
+    while (nameAlreadyUsed) {
+        std::string suffix = "_" + std::to_string(index);
+        newName = (index == 0) ? baseName : baseName + suffix;
+        nameAlreadyUsed = false;
+        for (auto graphView : views()) {
+            if (graphView->inView(newName)) {
+                nameAlreadyUsed = true;
+                break;
+            }
+        }
+        index++;
+    }
+    return newName;
+}
+
 ///////////////////////////////////////////////////////
 //        OPERATORS
 ///////////////////////////////////////////////////////
 
 void Aidge::Node::forward() {
-    assert((mOperator != nullptr) && "No Operator interface provided, can't run forward().\n");
-    mOperator->forward();
+    for (auto it = mForward.begin(); it != mForward.end(); ) {
+        const auto keep = (*it)();
+        if (!keep) {
+            it = mForward.erase(it);
+        }
+        else {
+            ++it;
+        }
+    }
 }
 
 void Aidge::Node::backward() {
-    assert((mOperator != nullptr) && "No Operator interface provided, can't run backward().\n");
-    mOperator->backward();
+    for (auto it = mBackward.begin(); it != mBackward.end(); ) {
+        const auto keep = (*it)();
+        if (!keep) {
+            it = mBackward.erase(it);
+        }
+        else {
+            ++it;
+        }
+    }
 }
 
 ///////////////////////////////////////////////////////
@@ -100,10 +146,11 @@ Aidge::IOIndex_t Aidge::Node::getNbFreeDataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::Node::dataInputs()
         const {
-    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res =
-            std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>(nbData());
-    for (std::size_t i = 0; i < static_cast<std::size_t>(nbData()); ++i) {
-        res[i] = std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]);
+    std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> res;
+    for (std::size_t i = 0; i < static_cast<std::size_t>(nbInputs()); ++i) {
+        if (inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData) {
+            res.push_back(std::pair<std::shared_ptr<Node>, IOIndex_t>(mParents[i], mIdOutParents[i]));
+        }
     }
     return res;
 }
@@ -175,11 +222,11 @@ void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId)
         "Input index ({}) is out of bound ({}) for node {} (of type {})",
         inId, nbInputs(), name(), type());
     if (mIdOutParents[inId] != gk_IODefaultIndex) {
-        Log::notice("Notice: filling a Tensor already attributed");
+        Log::notice("Filling a Tensor already attributed.");
         auto originalParent = input(inId);
         // remove original parent reference to child
         // find the output ID for original Parent
-        // find first occurence of child in the output's children
+        // find first occurrence of child in the output's children
         originalParent.first->removeChild(shared_from_this(), originalParent.second);
     }
     mIdOutParents[inId] = newNodeoutId;
@@ -243,7 +290,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
 
 void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
     if (getParent(inId) != nullptr) {
-        Log::notice("Notice: you are replacing an existing parent for node {} (of type {})", name(), type());
+        Log::notice("You are replacing an existing parent for node {} (of type {}).", name(), type());
     }
     AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
         "Input index ({}) is out of bound ({}) for node {} (of type {})",
@@ -319,18 +366,19 @@ bool Aidge::Node::removeChild(const std::shared_ptr<Aidge::Node> nodePtr,
 
 void Aidge::Node::resetConnections(bool includeLearnableParam) {
     // remove every parents reference to it
-    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbData();
-    for (IOIndex_t i = 0; i < nbRemovedInputs; ++i) {
-        std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
-        if (parent.first) {
-            // number of children linked to the parent's output
-            while (parent.first->removeChild(shared_from_this(), parent.second) == true) {
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (includeLearnableParam || inputCategory(i) == InputCategory::Data || inputCategory(i) == InputCategory::OptionalData) {
+            std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
+            if (parent.first) {
+                // number of children linked to the parent's output
+                while (parent.first->removeChild(shared_from_this(), parent.second) == true) {
+                }
             }
+            // every reference to this object as child has been removed
+            // removing reference to parents.
+            mParents[i] = nullptr;
+            mIdOutParents[i] = gk_IODefaultIndex;
         }
-        // every reference to this object as child has been removed
-        // removing reference to parents.
-        mParents[i] = nullptr;
-        mIdOutParents[i] = gk_IODefaultIndex;
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         for (std::pair<std::shared_ptr<Node>, IOIndex_t> child : output(i)) {
@@ -393,6 +441,9 @@ std::set<Aidge::NodePtr> Aidge::Node::getNodeDelta(int delta, std::set<Aidge::No
     return out;
 }
 
+
+Aidge::Node::~Node() = default;
+
 // namespace Aidge {
 // std::ostream& operator << (std::ostream& os, Aidge::Node& n) {
 //     using namespace std;
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index e1a378c3db0d79d7816e9882f790540cdc26cd88..6fe2320ea0ed6a71b6c4fad6a3fab4e1b6472abf 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -9,16 +9,23 @@
  *
  ********************************************************************************/
 
-#include "aidge/graph/Node.hpp"
-#include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/OpArgs.hpp"
 
+#include <memory>
+#include <string>
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+
+Aidge::OpArgs::OpArgs(const OpArgs&) = default;
+Aidge::OpArgs& Aidge::OpArgs::operator=(const OpArgs&) = default;
+Aidge::OpArgs::~OpArgs() noexcept = default;
 
-std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
+std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(name);
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
-            // Connect the first output (ordered) of each output node (ordered) 
+            // Connect the first output (ordered) of each output node (ordered)
             // to the next available input of the input node.
             AIDGE_ASSERT(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size(),
                 "Sequential(): not enough free data inputs ({}) for input node {} (of type {}) to connect to all previous output nodes ({})",
@@ -33,7 +40,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs)
             gv->add(elt.node());
         }
         else {
-            // For each input node, connect the first output (ordered) of each 
+            // For each input node, connect the first output (ordered) of each
             // output node (ordered) to the next available input
             std::set<NodePtr> connectedInputs;
             for (const auto& node_in : elt.view()->getOrderedInputs()) {
@@ -58,8 +65,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs)
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
+std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(name);
     for(const OpArgs& elt : inputs) {
         if (elt.node()!=nullptr)
             gv->add(elt.node());
@@ -70,8 +77,8 @@ std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs) {
-    std::shared_ptr<GraphView> gv = Sequential(inputs);
+std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs, std::string name) {
+    std::shared_ptr<GraphView> gv = Sequential(inputs,name);
     AIDGE_ASSERT(gv->outputNodes().size() == 1U,
         "Residual(): Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection");
     std::shared_ptr<Node> lastNode = *gv->outputNodes().begin();
diff --git a/src/graph/Testing.cpp b/src/graph/Testing.cpp
index f30ad6e25b81e1ce7768fcc201ddf00c2226eebf..774ee8912da2ddaa19583debdac063a95b5aa461 100644
--- a/src/graph/Testing.cpp
+++ b/src/graph/Testing.cpp
@@ -45,7 +45,7 @@ std::pair<Aidge::NodePtr, std::set<Aidge::NodePtr>> Aidge::RandomGraph::gen(std:
     std::vector<NodePtr> nodes(nbNodes, nullptr);
     for (auto idx : nodesSeq) {
         const std::string name = nodesType[idx] + std::to_string(idx);
-        nodes[idx] = GenericOperator(nodesType[idx], nbIOs[idx].first, 0, nbIOs[idx].second, name);
+        nodes[idx] = GenericOperator(nodesType[idx], std::vector<InputCategory>(nbIOs[idx].first, InputCategory::Data), nbIOs[idx].second, name);
     }
 
     for (std::size_t i = 0; i < nbNodes; ++i) {
diff --git a/src/operator/Abs.cpp b/src/operator/Abs.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1dd7836ad220d031d60356a5663db84adaa486ec
--- /dev/null
+++ b/src/operator/Abs.cpp
@@ -0,0 +1,29 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Abs.hpp"
+
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Abs_Op::Type = "Abs";
+
+void Aidge::Abs_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Abs_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Abs_Op::getAvailableBackends() const {
+    return Registrar<Abs_Op>::getKeys();
+}
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 9b77ffcbe0117292ed0aa520309febf709e8dd68..033c476c8a9e865fdf9d5670e295c3e4fb6101b3 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -22,6 +22,14 @@
 
 const std::string Aidge::Add_Op::Type = "Add";
 
+Aidge::Add_Op::Add_Op(const IOIndex_t nbIn)
+    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1)
+{
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+    }
+}
+
 Aidge::Add_Op::Add_Op(const Add_Op& op)
     : OperatorTensor(op)
 {
@@ -32,16 +40,12 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const {
+    return std::make_shared<Add_Op>(*this);
+}
+
 bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = (nbInputs() > 0); // do not compute anything if no input
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
         for (std::size_t i = 0; i < nbInputs(); i++) {
             inputsDims[i] = getInput(i)->dims();
@@ -70,12 +74,21 @@ bool Aidge::Add_Op::forwardDims(bool /*allowDataDependency*/) {
             }
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(Add_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Add_Op::getAvailableBackends() const {
+    return Registrar<Add_Op>::getKeys();
+}
+
+std::shared_ptr<Aidge::Node> Aidge::Add(const IOIndex_t nbIn, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Add_Op>(nbIn), name);
 }
\ No newline at end of file
diff --git a/src/operator/And.cpp b/src/operator/And.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..aebd5a71725f0999635f3844d8b2589bfb885138
--- /dev/null
+++ b/src/operator/And.cpp
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/And.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::And_Op::Type = "And";
+
+bool Aidge::And_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for And Operation: {} for input#0 vs {} for input#1",
+                    inputsDims0, inputsDims1);
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::And_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(And_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::And_Op::getAvailableBackends() const {
+    return Registrar<And_Op>::getKeys();
+}
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4808b730d2261ba0c1ea6d0d09871b1f322fc8fb
--- /dev/null
+++ b/src/operator/ArgMax.cpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2024 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ArgMax.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ArgMax_Op::Type = "ArgMax";
+
+bool Aidge::ArgMax_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // make Axis attribute positive
+        std::int32_t axis = mAttributes->template getAttr<ArgMaxAttr::Axis>();
+        axis = axis >= 0 ? axis: axis+static_cast<std::int32_t>(getInput(0)->nbDims());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        if (mAttributes->template getAttr<ArgMaxAttr::KeepDims>()) {
+            outDims[axis] = 1;
+        }
+        else {
+            outDims.erase(outDims.begin() + static_cast<std::size_t>(axis));
+        }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
+    }
+    return false;
+}
+
+void Aidge::ArgMax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ArgMax_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ArgMax_Op::getAvailableBackends() const {
+    return Registrar<ArgMax_Op>::getKeys();
+}
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 07123bc88aa1da22bfa98166d6a01af8d66be98d..f8c8e5e3f32fff8306184dfdf3baa87392479ebf 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -26,8 +26,12 @@
 template <Aidge::DimIdx_t DIM>
 const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling";
 
+
 template <Aidge::DimIdx_t DIM>
-Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): OperatorTensor(op), Attributes_(op) {
+Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
     if (op.mImpl) {
         SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
     } else {
@@ -35,23 +39,24 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): Operator
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
+    return std::make_shared<AvgPooling_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-    if (!(getInput(0)->empty())) {
+    if (inputsAssociated()) {
         std::array<DimSize_t, DIM + 2> outputDims;
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
         outputDims[0] = inputDims[0];
         outputDims[1] = inputDims[1];
 
-        for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                         std::floor(static_cast<float>(inputDims[dim+2] -
-                                                                this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
-                                        static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
+                                                            mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
+                                        static_cast<float>(mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
         }
         getOutput(0)->resize(outputDims);
         return true;
@@ -89,10 +94,10 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz
 
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
-            inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
+            inputIdxDims[2+i] *= mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
         std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
         res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
@@ -108,7 +113,25 @@ void Aidge::AvgPooling_Op<DIM>::setBackend(const std::string &name, Aidge::Devic
     mOutputs[0]->setBackend(name, device);
 }
 
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::AvgPooling_Op<DIM>::getAvailableBackends() const {
+    return Registrar<AvgPooling_Op<DIM>>::getKeys();
+}
+
 template class Aidge::AvgPooling_Op<1>;
 template class Aidge::AvgPooling_Op<2>;
 template class Aidge::AvgPooling_Op<3>;
-template class Aidge::AvgPooling_Op<4>;
\ No newline at end of file
+template class Aidge::AvgPooling_Op<4>;
+
+////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::AvgPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims) {
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", AvgPooling_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
+}
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&);
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&);
+template std::shared_ptr<Aidge::Node> Aidge::AvgPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&);
\ No newline at end of file
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index 14bf65763c024ffe28d30654a49c9630737a12fd..bcf3b29c45abe2c40788fd1ec0bad87db8ee227b 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm";
 
 template <Aidge::DimIdx_t DIM>
-Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTensor(op), Attributes_(op) {
+Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
     if (op.mImpl) {
         SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
     } else {
@@ -35,25 +38,26 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTen
     }
 }
 
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::BatchNorm_Op<DIM>::clone() const {
+    return std::make_shared<BatchNorm_Op<DIM>>(*this);
+}
+
 template <Aidge::DimIdx_t DIM>
 bool Aidge::BatchNorm_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = true;
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         const DimSize_t nbFeatures =  getInput(0)->dims()[1];
-        for (std::size_t i = nbData(); i < nbInputs(); ++i) {
-            if(getInput(i)->size() != nbFeatures) {
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            if(inputCategory(i) == InputCategory::Param && getInput(i)->size() != nbFeatures) {
                 // /!\ Input size should be handled BEFORE calling this function
                 // This should raise an error
                 getInput(i)->resize({getInput(0)->dims()[1]});
             }
         }
         mOutputs[0]->resize(getInput(0)->dims());
+        return true;
     }
-    return associated;
+    return false;
 }
 
 template <Aidge::DimIdx_t DIM>
@@ -62,10 +66,38 @@ void Aidge::BatchNorm_Op<DIM>::setBackend(const std::string &name, Aidge::Device
     mOutputs[0]->setBackend(name, device);
 
     // By default, automatically set backend for scale, shift, mean and variance
-    getInput(1)->setBackend(name, device);
-    getInput(2)->setBackend(name, device);
-    getInput(3)->setBackend(name, device);
-    getInput(4)->setBackend(name, device);
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    else {
+        Log::notice("BatchNorm_Op::setBackend(): could not set backend for scale input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        getInput(2)->setBackend(name, device);
+    }
+    else {
+        Log::notice("BatchNorm_Op::setBackend(): could not set backend for shift input, because input is not connected");
+    }
+
+    if (getInput(3)) {
+        getInput(3)->setBackend(name, device);
+    }
+    else {
+        Log::notice("BatchNorm_Op::setBackend(): could not set backend for variance input, because input is not connected");
+    }
+
+    if (getInput(4)) {
+        getInput(4)->setBackend(name, device);
+    }
+    else {
+        Log::notice("BatchNorm_Op::setBackend(): could not set backend for mean input, because input is not connected");
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::BatchNorm_Op<DIM>::getAvailableBackends() const {
+    return Registrar<BatchNorm_Op<DIM>>::getKeys();
 }
 
 template class Aidge::BatchNorm_Op<2>;
@@ -73,7 +105,7 @@ template class Aidge::BatchNorm_Op<3>;
 template class Aidge::BatchNorm_Op<4>;
 
 template <Aidge::DimSize_t DIM>
-inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const DimSize_t nbFeatures,
+inline std::shared_ptr<Aidge::Node> Aidge::BatchNorm(const Aidge::DimSize_t nbFeatures,
                                        const float epsilon,
                                        const float momentum,
                                        const std::string& name) {
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7595590f7811f08eb2b790a259cff6a8ee72ffbf
--- /dev/null
+++ b/src/operator/BitShift.cpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::BitShift_Op::Type = "BitShift";
+
+bool Aidge::BitShift_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (!inputsAssociated()) {
+    return false;
+    }
+
+    const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+    const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+    std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+    const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+    std::size_t out_id = outDims.size() - 1;
+    std::size_t low_id = lowDims.size() - 1;
+    std::size_t i = 0;
+
+    while (i++ < lowDims.size()) {
+        if (outDims[out_id] == 1) {
+            outDims[out_id] = lowDims[low_id];
+        }
+        else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for BitShift Operation: {} for input#0 vs {} for input#1",
+                inputsDims0, inputsDims1);
+        }
+        --out_id;
+        --low_id;
+    }
+    mOutputs[0]->resize(outDims);
+    return true;
+}
+
+
+void Aidge::BitShift_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(BitShift_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::BitShift_Op::getAvailableBackends() const {
+    return Registrar<BitShift_Op>::getKeys();
+}
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index f1c8e25e17c80d58d444a1ddddbaa428b2fc4c41..54eef17b67b320ef244881cee44ed8cabaa9bf47 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -27,6 +27,16 @@ void Aidge::Cast_OpImpl::forward() {
 
 const std::string Aidge::Cast_Op::Type = "Cast";
 
+Aidge::Cast_Op::Cast_Op(const DataType targetType)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<CastAttr::TargetType>(targetType)))
+{
+    mImpl = std::make_shared<Cast_OpImpl>(*this);
+    mOutputs[0]->setDataType(targetType);
+}
+
+
 void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Cast_Op>::exists({name})) {
         SET_IMPL_MACRO(Cast_Op, *this, name);
@@ -36,3 +46,11 @@ void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Cast_Op::getAvailableBackends() const {
+    return Registrar<Cast_Op>::getKeys();
+}
+
+std::shared_ptr<Aidge::Node> Aidge::Cast(const Aidge::DataType targetType, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
+}
\ No newline at end of file
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index ee06ce69b135e11fe3ed5be8fa9f501debb6acd5..55efdd51d56f7db4f64880b967def661e5354af5 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -18,15 +18,43 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+const std::string Aidge::Concat_Op::Type = "Concat";
+
+Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis)
+    : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ConcatAttr::Axis>(axis)))
+{
+    if (nbIn == 0) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input.");
+    }
+    mImpl = std::make_shared<Concat_OpImpl>(*this);
+}
+
+Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Concat_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Concat_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
+    return std::make_shared<Concat_Op>(*this);
+}
+
 void Aidge::Concat_OpImpl::forward() {
     const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
-    const DimSize_t axis = op.template getAttr<DimSize_t>("Axis");
+    const DimSize_t axis = op.axis();
 
     assert(op.getInput(0) && "missing input in Concat operator");
-    DataType datatypeFirstInput = op.getInput(0)->dataType();
     for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
         assert(op.getInput(i) && "missing input in Concat operator");
-        assert(op.getInput(i)->dataType() == datatypeFirstInput);
+        assert(op.getInput(i)->dataType() == op.getInput(0)->dataType());
     }
 
     DimSize_t outputAxisValue = 0;
@@ -57,39 +85,43 @@ void Aidge::Concat_OpImpl::forward() {
     }
 }
 
-const std::string Aidge::Concat_Op::Type = "Concat";
 
 bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
-    // Every input is non-empty with the same number of dimensions
-    bool associated = (getInput(0) != nullptr);
-    associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
-    auto outputDims =  getInput(0)->dims();
-    const auto firstInputNbDims = getInput(0) -> nbDims();
+    if (!inputsAssociated()) {
+        return false;
+    }
+    const std::size_t nbDimsInput0 = getInput(0)->nbDims();
+    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is scalar", type());
     for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
+        AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
+            "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
+            i, type(), nbDimsInput0, getInput(i)->nbDims());
+    }
+    // Check validity of attributes with inputs
+    // Axis
+    std::int32_t axis = mAttributes->template getAttr<ConcatAttr::Axis>();
+    axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
+    AIDGE_ASSERT(((axis >= 0) && (axis < static_cast<std::int32_t>(nbDimsInput0))),
+                "'Axis' attribute not compatible with provided inputs.")
+    const std::size_t axis_u64 = static_cast<std::size_t>(axis);
 
-        if (getInput(i)->nbDims() == firstInputNbDims) {
-            for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) {
-                if (dim == getAttr<ConcatAttr::Axis>()) {
-                    outputDims[dim] += getInput(i)->dims()[dim];
-                }
-                else {
-                    associated &= (getInput(i)->dims()[dim] == outputDims[dim]);
-                }
+    // Check validity of inputs
+    auto outputDims =  getInput(0)->dims();
+    for (IOIndex_t i = 1; i < nbInputs(); ++i) {
+        for (DimSize_t dim = 0; dim < nbDimsInput0; ++dim) {
+            if (dim == axis_u64) {
+                outputDims[axis_u64] += getInput(i)->dims()[axis_u64];
+            }
+            else {
+                AIDGE_ASSERT(getInput(i)->dims()[dim] == outputDims[dim],
+                    "Incomatible dimensions between input 0 {} and input {} {}",
+                    getInput(0)->dims(), i, getInput(i)->dims());
             }
         }
-        else {
-            associated = false;
-            break;
-        }
-    }
-    if (associated) {
-        getOutput(0)->resize(outputDims);
     }
 
-    return associated;
+    getOutput(0)->resize(outputDims);
+    return true;
 }
 
 void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
@@ -101,3 +133,13 @@ void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Concat_Op::getAvailableBackends() const {
+    return Registrar<Concat_Op>::getKeys();
+}
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Concat(const Aidge::IOIndex_t nbIn, const std::int32_t axis, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name);
+}
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7fe9dc1309080f844961a8e8a28c4a05964ae741
--- /dev/null
+++ b/src/operator/ConstantOfShape.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ConstantOfShape.hpp"
+
+#include <cstdint>
+#include <fmt/format.h>
+#include <memory>
+#include <stdexcept> // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/half.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+const std::string ConstantOfShape_Op::Type = "ConstantOfShape";
+
+bool ConstantOfShape_Op::forwardDims(bool allowDataDependency) {
+  if (!inputsAssociated()) {
+    return false;
+  }
+
+  if (!allowDataDependency) {
+    Log::warn("{} : unable to forwardDims() because output dims are data "
+              "dependent on input#0",
+              type());
+    return false;
+  }
+
+  AIDGE_ASSERT(getInput(0)->nbDims() == 1,
+               "{} : Input tensor should have only 1 dimension. {} dimensions"
+               "received : {}",
+               __func__, getInput(0)->nbDims(), getInput(0)->dims());
+  AIDGE_ASSERT(getInput(0)->dataType() == DataType::Int64,
+               "{} : Input tensor data type should be int64t, received : {}",
+               __func__, getInput(0)->nbDims(), getInput(0)->dims());
+  std::vector<DimSize_t> output_dims;
+  output_dims.reserve(getInput(0)->size());
+  for (std::size_t i = 0; i < getInput(0)->size(); ++i) {
+    auto temp = getInput(0)->template get<std::int64_t>(i);
+    output_dims.push_back(temp);
+  }
+  mOutputs[0]->resize(output_dims);
+  return true;
+}
+
+void ConstantOfShape_Op::setBackend(const std::string &name,
+                                       Aidge::DeviceIdx_t device) {
+  SET_IMPL_MACRO(ConstantOfShape_Op, *this, name);
+  mOutputs[0]->setBackend(name, device);
+  value().setBackend(name,device);
+}
+
+std::set<std::string> Aidge::ConstantOfShape_Op::getAvailableBackends() const {
+  return Registrar<ConstantOfShape_Op>::getKeys();
+}
+
+} // namespace Aidge
+
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 99b40fcb277ce1f22c5cd3a571eaaaa4910b6ba5..e055c7e5ebb9a6cff9f774da444cc582ed7de34c 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -29,7 +29,7 @@ const std::string Aidge::Conv_Op<DIM>::Type = "Conv";
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
@@ -40,46 +40,40 @@ Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = true;
-    for (IOIndex_t i = 0; i < 3; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
-                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+                    "Wrong weight Tensor dimension: {} for Conv{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
         // check data
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                     (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
-                    "Wrong input size for Conv operator.");
+                    "Wrong input size ({}) for Conv operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), inChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
         // check optional bias
-        if(!this->template getAttr<ConvAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == outChannels()),
-                    "Wrong bias size for Conv operator.");
+                    "Wrong bias size ({}) for Conv operator. Expected dims are [{}].", getInput(2)->dims(), outChannels());
+
         std::array<DimSize_t, DIM + 2> outputDims{};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
-        for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
-            const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
-                                                    (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
+                                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
                                             1;
 
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                     floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                            static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
+                            static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim])));
         }
 
         outputDims[1] = outChannels();
         outputDims[0] = inputDims[0];
         mOutputs[0]->resize(outputDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 
@@ -113,18 +107,18 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-            inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
 
         // Weight
         // same output value, every input channel is used
         std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
         for (std::size_t i = 0; i < DIM; ++i) {
-            weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
+            weightDims.push_back(mAttributes->template getAttr<ConvAttr::KernelDims>()[i]);
         }
         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
         weightIdxDims[0] = firstEltDims[1];
@@ -135,7 +129,7 @@ Aidge::Conv_Op<DIM>::computeReceptiveField(
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
 
         // Bias
-        if (! this->template getAttr<ConvAttr::NoBias>()){
+        if (getInput(2)){
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
             res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
@@ -151,8 +145,46 @@ void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 
     // By default, automatically set backend for weight and bias inputs
-    getInput(1)->setBackend(name, device);
-    getInput(2)->setBackend(name, device);
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    else {
+        Log::notice("Conv_Op::setBackend(): could not set backend for weight input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        // Bias is optional
+        getInput(2)->setBackend(name, device);
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Conv_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Conv_Op<DIM>>::getKeys();
+}
+
+template class Aidge::Conv_Op<1>;
+template class Aidge::Conv_Op<2>;
+
+/////////////////////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Conv(Aidge::DimSize_t inChannels,
+                                  Aidge::DimSize_t outChannels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+                                  bool noBias) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+    addProducer(conv, 1, append(outChannels, append(inChannels, kernelDims)), "w");
+    if (!noBias) {
+        addProducer(conv, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return conv;
 }
 
-template class Aidge::Conv_Op<2>;
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::Conv<1>(Aidge::DimSize_t, Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Conv<2>(Aidge::DimSize_t, Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index 12aa0818b244ef0f3195de49467a464e057f2c73..f4d524356bd207a7ed101c2887c2fcda53f3bb83 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -30,7 +30,7 @@ const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise";
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
@@ -41,47 +41,40 @@ Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM
 
 template <Aidge::DimIdx_t DIM>
 bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    // TODO : add a check of inputs dimensions ?
-    bool associated = true;
-    for (IOIndex_t i = 0; i < 3; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines nbChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
-                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
+                    "Wrong weight Tensor dimension: {} for ConvDepthWise{}D operator. Expected number of dimensions is {}.", getInput(1)->nbDims(), DIM, DIM+2);
         // check data
         AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                     (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
-                    "Wrong input size for Conv operator.");
+                    "Wrong input size ({}) for ConvDepthWise operator. Expected dims are [x, {}, {}].", getInput(0)->dims(), nbChannels(), fmt::join(std::vector<std::string>(DIM, "x"), ", "));
         // check optional bias
-        if(!this->template getAttr<ConvDepthWiseAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                     (getInput(2)->template dims<1>()[0] == nbChannels()),
-                    "Wrong bias size for Conv operator.");
+                    "Wrong bias size ({}) for ConvDepthWise operator. Expected dims are [{}].", getInput(2)->dims(), nbChannels());
+
         std::array<DimSize_t, DIM + 2> outputDims = {};
         const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
 
-        for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
-            const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
-                                                    (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
+            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
+                                                    (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
                                             1;
 
             outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                     floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
-                            static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
+                            static_cast<float>(mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
         }
 
         outputDims[1] = inputDims[1];
         outputDims[0] = inputDims[0];
         mOutputs[0]->resize(outputDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 
@@ -114,17 +107,17 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
         std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
         for (DimIdx_t i = 0; i < DIM; ++i) {
             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                        * mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
                         + 1
-                        + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-                        * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-            inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+                        + (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                        * mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
         }
 
         // Weight
         std::vector<DimSize_t> weightDims{outputDims[1], 1};
         for (std::size_t i = 0; i < DIM; ++i) {
-            weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
+            weightDims.push_back(mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
         }
         std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
         weightIdxDims[0] = firstEltDims[1];
@@ -135,7 +128,7 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
         res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
         // Bias
-        if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){
+        if (getInput(2)){
             const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
             res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
@@ -151,8 +144,45 @@ void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::De
     mOutputs[0]->setBackend(name, device);
 
     // By default, automatically set backend for weight and bias inputs
-    getInput(1)->setBackend(name, device);
-    getInput(2)->setBackend(name, device);
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    else {
+        Log::notice("ConvDepthWise_Op::setBackend(): could not set backend for weight input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        // Bias is optional
+        getInput(2)->setBackend(name, device);
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::ConvDepthWise_Op<DIM>::getAvailableBackends() const {
+    return Registrar<ConvDepthWise_Op<DIM>>::getKeys();
+}
+
+template class Aidge::ConvDepthWise_Op<1>;
+template class Aidge::ConvDepthWise_Op<2>;
+
+////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise(const Aidge::DimSize_t nbChannels,
+                                           const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                           const std::array<Aidge::DimSize_t, DIM> &dilationDims,
+                                           bool noBias) {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(DIM<=MaxDim,"Too many kernel dimensions required by {}, not supported", ConvDepthWise_Op<DIM>::Type);
+    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
+    if (!noBias) {
+        addProducer(convDW, 2, {nbChannels}, "b");
+    }
+    return convDW;
 }
 
-template class Aidge::ConvDepthWise_Op<2>;
\ No newline at end of file
+template std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise<1>(Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise<2>(Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6b8d05625b99aec05be4f531460a5d25c120a5e0
--- /dev/null
+++ b/src/operator/DepthToSpace.cpp
@@ -0,0 +1,126 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/DepthToSpace.hpp"
+
+#include <array>
+#include <cstddef>  // std::size_t
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::DepthToSpace_OpImpl::forward() {
+    const DepthToSpace_Op& op = dynamic_cast<const DepthToSpace_Op&>(mOp);
+    // suppose an NCHW Tensor format
+
+    // Get input dimensions
+    const auto& dims = op.getInput(0)->dims<4>();
+    // get final output dimension
+    const std::array<DimSize_t, 4> final_dims = op.getOutput(0)->dims<4>();
+
+    std::size_t b = dims[0];
+    std::size_t c = dims[1] / (static_cast<DimSize_t>(op.blockSize()) * static_cast<DimSize_t>(op.blockSize()));
+    std::size_t h = dims[2];
+    std::size_t w = dims[3];
+
+    // Copt input tensor to output
+    op.setOutput(0, op.getInput(0));
+
+    // Step 1: Resize
+    const std::vector<DimSize_t> resize_dims =
+        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
+            std::vector<DimSize_t>({b, c, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), h, w}) :
+            std::vector<DimSize_t>({b, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), c, h, w});
+    op.getOutput(0)->resize(resize_dims);
+
+    // Step 2: Transpose
+    const std::vector<DimSize_t> transpose_order =
+        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
+            std::vector<DimSize_t>({0, 1, 4, 2, 5, 3}) :
+            std::vector<DimSize_t>({0, 3, 4, 1, 5, 2});
+    op.getOutput(0)->copyTranspose(*(op.getOutput(0)), transpose_order);
+
+    // Step 3: Final resize
+    op.getOutput(0)->resize(final_dims);
+}
+
+//////////////////////////////////////////////////////
+
+const std::string Aidge::DepthToSpace_Op::Type = "DepthToSpace";
+
+Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aidge::DepthToSpace_Op::Mode mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<DepthToSpaceAttr::BlockSize>(blockSize),
+        attr<DepthToSpaceAttr::Mode>(mode)))
+{
+    // ctor
+}
+
+Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const {
+    return std::make_shared<DepthToSpace_Op>(*this);
+}
+
+bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->nbDims() == 4, "{} Operator only accepts 4-D input Tensors.", DepthToSpace_Op::Type);
+        AIDGE_ASSERT(getInput(0)->dims()[1] % (blockSize() * blockSize()) == 0, "Number of channels must be divisible by blocksize squared");
+
+        // Compute output dims
+        const std::array<DimSize_t, 4>& inDims = getInput(0)->dims<4>();
+        const std::vector<DimSize_t> outDims =
+                {inDims[0],
+                 inDims[1] / (static_cast<DimSize_t>(blockSize()) * static_cast<DimSize_t>(blockSize())),
+                 inDims[2] * static_cast<DimSize_t>(blockSize()),
+                 inDims[3] * static_cast<DimSize_t>(blockSize())};
+
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<DepthToSpace_Op>::exists({name})) {
+        SET_IMPL_MACRO(DepthToSpace_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<DepthToSpace_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::DepthToSpace_Op::getAvailableBackends() const {
+    return Registrar<DepthToSpace_Op>::getKeys();
+}
+
+//////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
+                                    const Aidge::DepthToSpace_Op::Mode mode,
+                                    const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<DepthToSpace_Op>(blockSize, mode), name);
+}
\ No newline at end of file
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index e6300d08c2c792c8a3eb66b307aca53f9d2acc73..96eea3df966b273445be8a6e9d9a5acf2d6fafb2 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -23,13 +23,7 @@
 const std::string Aidge::Div_Op::Type = "Div";
 
 bool Aidge::Div_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
@@ -62,3 +56,13 @@ void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Div_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Div_Op::getAvailableBackends() const {
+    return Registrar<Div_Op>::getKeys();
+}
+
+///////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Div(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Div_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index 81c87f10b10210c2af203a05df53e3330bb33b72..bd5f76f8aa7c0889311e4f922fec8d20168e24b5 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -19,7 +19,31 @@
 
 const std::string Aidge::Erf_Op::Type = "Erf";
 
+Aidge::Erf_Op::Erf_Op(const Aidge::Erf_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Erf_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Erf_Op::clone() const {
+    return std::make_shared<Erf_Op>(*this);
+}
+
 void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Erf_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Erf_Op::getAvailableBackends() const {
+    return Registrar<Erf_Op>::getKeys();
+}
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Erf(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index d3bfd4557044c49b452de7690541a1c0a2ac62d9..dd3ed7aba65cf1875d691d9bc2c8c94bb03856c7 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -23,6 +23,10 @@
 
 const std::string Aidge::FC_Op::Type = "FC";
 
+std::shared_ptr<Aidge::Operator> Aidge::FC_Op::clone() const {
+    return std::make_shared<FC_Op>(*this);
+}
+
 void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
     AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
@@ -37,14 +41,7 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh
 }
 
 bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
-    bool associated = true;
-    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-        }
-        associated &= !(getInput(i)->empty());
-    }
-    if (associated) {
+    if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
         AIDGE_ASSERT((getInput(1)->nbDims() == 2),
                     "Wrong weight Tensor dimension: {} for FC operator (should have 2 dimensions).", getInput(1)->nbDims());
@@ -64,15 +61,16 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
                     nbInputFeatures, inChannels);
         }
         // check optional bias
-        if(!this->template getAttr<FCAttr::NoBias>())
+        if(getInput(2))
             AIDGE_ASSERT((getInput(2)->nbDims() == 1) &&
                     (getInput(2)->template dims<1>()[0] == outChannels),
                     "Wrong bias size for FC operator.");
         // <batch, OutChannels>
         mOutputs[0]->resize({getInput(0)->dims()[0], outChannels});
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -80,6 +78,32 @@ void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device
     mOutputs[0]->setBackend(name, device);
 
     // By default, automatically set backend for weight and bias inputs
-    getInput(1)->setBackend(name, device);
-    getInput(2)->setBackend(name, device);
+    if (getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    else {
+        Log::notice("FC_Op::setBackend(): could not set backend for weight input, because input is not connected");
+    }
+
+    if (getInput(2)) {
+        // Bias is optional
+        getInput(2)->setBackend(name, device);
+    }
+}
+
+std::set<std::string> Aidge::FC_Op::getAvailableBackends() const {
+    return Registrar<FC_Op>::getKeys();
+}
+
+std::shared_ptr<Aidge::Node> Aidge::FC(const Aidge::DimSize_t inChannels,
+                                       const Aidge::DimSize_t outChannels,
+                                       bool noBias,
+                                       const std::string& name) {
+    // FIXME: properly handle default w&b initialization in every cases
+    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(), name);
+    addProducer(fc, 1, {outChannels, inChannels}, "w");
+    if (!noBias) {
+        addProducer(fc, 2, {outChannels}, "b"); // already sets bias dims
+    }
+    return fc;
 }
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..99ccb7505cd959178e4bd7132e32552ea5a72ecf
--- /dev/null
+++ b/src/operator/Fold.cpp
@@ -0,0 +1,105 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Fold.hpp"
+
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Fold_Op<DIM>::clone() const {
+    return std::make_shared<Fold_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        auto dims(getInput(0)->dims());
+        DimSize_t k = 1;
+        DimSize_t l = 1;
+
+        for (std::size_t dim = 0; dim < this->kernelDims().size() ; ++dim) {
+            const DimSize_t kernelExtent = this->dilationDims()[dim] *
+                                                    (this->kernelDims()[dim] - 1) + 1;
+
+            k *= this->kernelDims()[dim];
+            l *= 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(this->outputDims()[dim] - kernelExtent) /
+                            static_cast<float>(this->strideDims()[dim])));
+        }
+
+        AIDGE_ASSERT(dims[dims.size() - 2] % k == 0 , "Fold: input number of channels ({}) is not divisible by the product of provided kernel dims ({})!",
+            dims[dims.size() - 2], k);
+        AIDGE_ASSERT(dims[dims.size() - 1] == l, "Fold: mismatch between expected input 3rd dim {} and provided input 3rd dim {}",
+            dims[dims.size() - 1], l);
+
+        dims[dims.size() - 2] /= k;
+        dims.pop_back();
+        dims.insert(dims.end(), this->outputDims().begin(), this->outputDims().end());
+        mOutputs[0]->resize(dims);
+        return true;
+    }
+
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Fold_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Fold_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Fold_Op<DIM>>::getKeys();
+}
+
+template class Aidge::Fold_Op<2>;
+
+///////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM> &outputDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims) {
+    // FIXME: properly handle default w&b initialization in every cases
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by Fold, not supported", Fold_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index b0b9a0e84882cae55a9a3c336684d43e208cb503..0ebc3e3bc81b15d9414d01f12a2768be6a7ddc42 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -20,11 +20,40 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
+const std::string Aidge::Gather_Op::Type = "Gather";
+
+
+Aidge::Gather_Op::Gather_Op(std::int8_t axis,
+              const std::vector<int64_t>& indices,
+              const std::vector<Aidge::DimSize_t>& gatheredShape)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<GatherAttr::Axis>(axis),
+        attr<GatherAttr::Indices>(indices),
+        attr<GatherAttr::GatheredShape>(gatheredShape)))
+{
+    mImpl = std::make_shared<Gather_OpImpl>(*this);
+}
+
+Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
+    : OperatorTensor(op), mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Gather_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Gather_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
+    return std::make_shared<Gather_Op>(*this);
+}
+
 void Aidge::Gather_OpImpl::forward() {
     const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
-    const auto axis = op.template getAttr<std::int8_t>("Axis");
 
-    const std::size_t axisIdx = static_cast<std::size_t>(axis) + (axis >= 0 ? 0 : op.getInput(0)->dims().size());
+    const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size());
 
     std::size_t postAxisElems = 1;
     for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
@@ -38,21 +67,19 @@ void Aidge::Gather_OpImpl::forward() {
     std::size_t outputOffset = 0;
     for (std::size_t i=0; i<preAxisElems; ++i)
     {
-        for(std::size_t j=0; j<op.template getAttr<std::vector<int64_t>>("Indices").size(); ++j)
+        for(std::size_t j = 0; j < op.indices().size(); ++j)
         {
-            const std::size_t idx = op.template getAttr<std::vector<int64_t>>("Indices")[j] >= 0 ?
-                                        static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j]) :
-                                        static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
+            const std::size_t idx = op.indices()[j] >= 0 ?
+                                        static_cast<std::size_t>(op.indices()[j]) :
+                                        static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
             op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
             outputOffset += postAxisElems;
         }
     }
 }
 
-const std::string Aidge::Gather_Op::Type = "Gather";
-
 bool Aidge::Gather_Op::dimsForwarded() const {
-    if (getInput(1) && !getInput(1)->empty()) {
+    if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
         return false;
     }
@@ -61,51 +88,48 @@ bool Aidge::Gather_Op::dimsForwarded() const {
 }
 
 bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
-    // check data input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Indices
+        if (getInput(1)) {
+            if (!this->indices().empty()) {
+                Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Gather_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->gatheredShape() = getInput(1)->dims();
+            this->indices().clear(); // If both are provided input would override attrs
+            this->indices().reserve(getInput(1)->size());
+            const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
+                        indices.size(),
+                        std::back_inserter(this->indices()));
+        }
 
-    if (getInput(0)->empty()) {
-        return false;
-    }
+        AIDGE_ASSERT(!this->indices().empty(), "Missing input#1 or Indices attribute");
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!this->template getAttr<GatherAttr::Indices>().empty()) {
-            Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence");
-        }
+        // Compute output dims
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
 
-        if (!allowDataDependency) {
-            Log::warn("Gather_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
+        std::int8_t axisIdx = this->axis()>=0?
+                                this->axis():
+                                this->axis()+outDims.size();
+        outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
+        if( !this->gatheredShape().empty())
+        {
+            outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
+                            this->gatheredShape().begin(),
+                            this->gatheredShape().end());
         }
-
-        std::shared_ptr<Tensor> fallback;
-        this->template getAttr<GatherAttr::GatheredShape>() = getInput(1)->dims();
-        this->template getAttr<GatherAttr::Indices>().clear(); // If both are provided input would override attrs
-        this->template getAttr<GatherAttr::Indices>().reserve(getInput(1)->size());
-        const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()),
-                    indices.size(),
-                    std::back_inserter(this->template getAttr<GatherAttr::Indices>()));
+        mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    AIDGE_ASSERT(!this->template getAttr<GatherAttr::Indices>().empty(), "Missing input#1 or Indices attribute");
-
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-
-    std::int8_t axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?
-                            this->template getAttr<GatherAttr::Axis>():
-                            this->template getAttr<GatherAttr::Axis>()+outDims.size();
-    outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
-    if( !this->template getAttr<GatherAttr::GatheredShape>().empty())
-    {
-        outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx),
-                        this->template getAttr<GatherAttr::GatheredShape>().begin(),
-                        this->template getAttr<GatherAttr::GatheredShape>().end());
-    }
-    mOutputs[0]->resize(outDims);
-    return true;
+    return false;
 }
 
 void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -117,3 +141,16 @@ void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Gather_Op::getAvailableBackends() const {
+    return Registrar<Gather_Op>::getKeys();
+}
+
+/////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Gather(std::int8_t axis,
+                                        const std::vector<int64_t>& indices,
+                                        const std::vector<Aidge::DimSize_t>& gatheredShape,
+                                        const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis, indices, gatheredShape), name);
+}
\ No newline at end of file
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index fdf3036fe7eeccb2dfd9e21faf834e27854e45f3..e8c66085de5bc7c808b7f2307a9a82b22a426bb2 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -18,6 +18,42 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
+                                            const std::vector<Aidge::InputCategory>& inputsCategory,
+                                            Aidge::IOIndex_t nbOut)
+    : OperatorTensor(type, inputsCategory, nbOut)
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const std::string& type,
+                                            Aidge::IOIndex_t nbData,
+                                            Aidge::IOIndex_t nbParam,
+                                            Aidge::IOIndex_t nbOut)
+    : OperatorTensor(type, [nbData, nbParam]() {
+                            std::vector<InputCategory> inputsCategory(nbData, InputCategory::Data);
+                            inputsCategory.resize(nbData + nbParam, InputCategory::Param);
+                            return inputsCategory;
+                        }(), nbOut),
+        mAttributes(std::make_shared<DynamicAttributes>())
+{
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
+
+Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>())
+{
+    mImpl = std::make_shared<OperatorImpl>(*this, op.backend());
+}
+
+Aidge::GenericOperator_Op::~GenericOperator_Op() noexcept = default;
+
+std::shared_ptr<Aidge::Operator> Aidge::GenericOperator_Op::clone() const {
+    return std::make_shared<GenericOperator_Op>(*this);
+}
+
 const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity
     = [](const std::vector<std::vector<std::size_t>>& inputsDims) { return inputsDims; };
 
@@ -26,9 +62,10 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu
 }
 
 bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (mForwardDims) {
+    if (mForwardDims && inputsAssociated(false)) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>());
         for (std::size_t i = 0; i < nbInputs(); ++i) {
+            // Check for input, as it may be optional
             if (getInput(i)) {
                 inputsDims[i] = getInput(i)->dims();
             }
@@ -47,12 +84,27 @@ bool Aidge::GenericOperator_Op::forwardDims(bool /*allowDataDependency*/) {
     }
 }
 
-bool Aidge::GenericOperator_Op::dimsForwarded() const {
-    if (mForwardDims) {
-        return !(mOutputs[0]->empty());
-    }
-    else {
-        Log::notice("GenericOperator: not output dims forwarded, no ComputeDimsFunc function provided.");
-        return false;
+void Aidge::GenericOperator_Op::setBackend(const std::string & name, DeviceIdx_t device) {
+    Log::warn("GenericOperator::setBackend(): cannot set backend for a generic operator, as no implementation has been provided!");
+
+    for (std::size_t i = 0; i < nbOutputs(); ++i) {
+        mOutputs[i]->setBackend(name, device);
     }
 }
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                            const std::vector<Aidge::InputCategory>& inputCategory,
+                                            Aidge::IOIndex_t nbOut,
+                                            const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, inputCategory, nbOut), name);
+}
+
+std::shared_ptr<Aidge::Node> Aidge::GenericOperator(const std::string& type,
+                                                Aidge::IOIndex_t nbData,
+                                                Aidge::IOIndex_t nbParam,
+                                                Aidge::IOIndex_t nbOut,
+                                                const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbData, nbParam, nbOut), name);
+}
\ No newline at end of file
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index b09426f8f835eda5600b630488ef18c5b08ba32a..bbcfd0d28ca039318647d206af876727793e1bfc 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -21,30 +21,48 @@
 
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
+Aidge::GlobalAveragePooling_Op::GlobalAveragePooling_Op(const Aidge::GlobalAveragePooling_Op &op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::GlobalAveragePooling_Op::clone() const {
+    return std::make_shared<GlobalAveragePooling_Op>(*this);
+}
+
 bool Aidge::GlobalAveragePooling_Op::forwardDims(bool /*allowDataDependency*/) {
-  // error checking
-  if (!getInput(0)) {
-    AIDGE_THROW_OR_ABORT(std::runtime_error,
-                         "GlobalAveragePooling : The input was not connected");
-  }
-  else if (!getInput(0)->empty()) {
-    AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
-                 "GlobalAveragePooling :  needs at least a 3 dimensions input, "
-                 "number of input dim : {}",
-                 getInput(0)->dims().size());
-    // Global average pooling takes each filter, averages its values and uses
-    // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
-    // number of filter
-    const std::vector<DimSize_t> out_dims{getInput(0)->dims().at(0),
-                                          getInput(0)->dims().at(1)};
-    mOutputs[0]->resize(out_dims);
-    return true;
-  }
-
-  return false;
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
+                    "GlobalAveragePooling :  needs at least a 3 dimensions input, "
+                    "number of input dim : {}",
+                    getInput(0)->dims().size());
+        // Global average pooling takes each filter, averages its values and uses
+        // it as an output(Much like a fancier flatten). 1st dim is batch 2nd is
+        // number of filter
+        mOutputs[0]->resize({getInput(0)->dims().at(0),
+                             getInput(0)->dims().at(1)});
+        return true;
+    }
+
+    return false;
 }
 
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(GlobalAveragePooling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
-}
\ No newline at end of file
+}
+
+std::set<std::string> Aidge::GlobalAveragePooling_Op::getAvailableBackends() const {
+    return Registrar<GlobalAveragePooling_Op>::getKeys();
+}
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::GlobalAveragePooling(const std::string &name) {
+  return std::make_shared<Node>(std::make_shared<GlobalAveragePooling_Op>(), name);
+}
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d26679f8337390879c8f4c4d10deb883fb40e6da
--- /dev/null
+++ b/src/operator/GridSample.cpp
@@ -0,0 +1,118 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/GridSample.hpp"
+
+#include <cstddef>    // std::size_t
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+
+const std::string Aidge::GridSample_Op::Type = "GridSample";
+
+
+Aidge::GridSample_Op::GridSample_Op(
+    typename Aidge::GridSample_Op::Mode mode,
+    typename Aidge::GridSample_Op::PaddingMode paddingMode,
+    bool alignCorners)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<GridSampleAttr::Mode>(mode),
+        attr<GridSampleAttr::PaddingMode>(paddingMode),
+        attr<GridSampleAttr::AlignCorners>(alignCorners)))
+{
+    // ctor
+}
+
+
+Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
+    : OperatorTensor(other),
+      mAttributes(other.mAttributes)
+{
+    if (other.mImpl) {
+        SET_IMPL_MACRO(GridSample_Op, *this, other.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+
+Aidge::GridSample_Op::~GridSample_Op() noexcept = default;
+
+
+std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op::clone() const {
+    return std::make_shared<GridSample_Op>(*this);
+}
+
+
+bool Aidge::GridSample_Op::forwardDims(bool /*allowDataDependency*/) {
+    // TODO: adapt for other formats than NCHW
+    if (inputsAssociated()) {
+        // check data has batch and channel dimensions: (N, C, D0, D1, ..., DN)
+        AIDGE_ASSERT(getInput(0)->nbDims() > 2, "Input should have at least one spatial dimension.");
+        const std::size_t nbSpatialFeat = getInput(0)->nbDims() -2; // all except channels and batchs
+        // check grid field
+        // should be (N, D0_out, D1_out, ..., DN_out, N+1)
+        AIDGE_ASSERT(((getInput(1)->nbDims() == nbSpatialFeat + 2) &&
+            (getInput(1)->dims()[nbSpatialFeat+1] == nbSpatialFeat) &&
+            (getInput(1)->dims()[0] == getInput(0)->dims()[0])),
+            "Wrong grid size {} for {} operator.", getInput(1)->dims(), type());
+
+        std::vector<DimSize_t> outputDims{};
+        outputDims.reserve(nbSpatialFeat+2);
+        const std::vector<DimSize_t>& inputDims(getInput(1)->dims());
+        outputDims.push_back(inputDims[0]);
+        outputDims.push_back(getInput(0)->dims()[1]);
+        for (std::size_t i = 2; i < nbSpatialFeat+2; ++i) {
+            outputDims.push_back(inputDims[i-1]);
+        }
+
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+
+    return false;
+}
+
+
+
+void Aidge::GridSample_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(GridSample_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::GridSample_Op::getAvailableBackends() const {
+    return Registrar<GridSample_Op>::getKeys();
+}
+
+
+////////////////////////////////////////////////
+
+
+std::shared_ptr<Aidge::Node> Aidge::GridSample(
+                        typename Aidge::GridSample_Op::Mode mode,
+                        typename Aidge::GridSample_Op::PaddingMode paddingMode,
+                        bool alignCorners,
+                        const std::string& name)
+{
+    return std::make_shared<Node>(
+        std::make_shared<GridSample_Op>(
+                mode,
+                paddingMode,
+                alignCorners),
+            name);
+}
diff --git a/src/operator/ILayerNorm.cpp b/src/operator/ILayerNorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..daa7ecf86b7ea9a9b10b962d356581f926e92eed
--- /dev/null
+++ b/src/operator/ILayerNorm.cpp
@@ -0,0 +1,56 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 10.09.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ILayerNorm.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ILayerNorm_Op::Type = "ILayerNorm";
+
+void Aidge::ILayerNorm_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
+    AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs());
+    AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
+    mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
+    if (inputIdx == 0 && getInput(0)->nbDims() == 1)
+        mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
+}
+
+bool Aidge::ILayerNorm_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const DimSize_t nbFeatures =  getInput(0)->dims()[1];
+        for (std::size_t i = 0; i < nbInputs(); ++i) {
+            if(inputCategory(i) == InputCategory::Param && getInput(i)->size() != nbFeatures) {
+                getInput(i)->resize({getInput(0)->dims()[1]});
+            }
+        }
+        mOutputs[0]->resize(getInput(0)->dims());
+        return true;
+    }
+    return false;
+}
+
+
+void Aidge::ILayerNorm_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ILayerNorm_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+    getInput(1)->setBackend(name, device);
+    getInput(2)->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ILayerNorm_Op::getAvailableBackends() const {
+    return Registrar<ILayerNorm_Op>::getKeys();
+}
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index 2b8107bfc77ef70b33a97032d350a42ec5f3f466..f0b8720bc1e22d8d6308460eabe436db8a4c9f6d 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -13,10 +13,39 @@
 
 #include "aidge/operator/Identity.hpp"
 
+void Aidge::Identity_OpImpl::forward() {
+    const Identity_Op& op = dynamic_cast<const Identity_Op&>(mOp);
+    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
+}
+
+//////////////////////////////////////////////////
+
 const std::string Aidge::Identity_Op::Type = "Identity";
 
-void Aidge::Identity_Op::forward() {
-    // Perform a shallow copy
-    *(mOutputs[0]) = *(mInputs[0]);
-    runHooks();
+Aidge::Identity_Op::Identity_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1)
+{
+    mImpl = std::make_shared<Identity_OpImpl>(*this);
+}
+
+Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
+    : OperatorTensor(op)
+{
+    mImpl = std::make_shared<Identity_OpImpl>(*this, op.backend());
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
+    return std::make_shared<Identity_Op>(*this);
+}
+
+void Aidge::Identity_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Identity_Op::getAvailableBackends() const {
+    return Registrar<Identity_Op>::getKeys();
+}
+
+std::shared_ptr<Aidge::Node> Aidge::Identity(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Identity_Op>(), name);
 }
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index 32e050ee1595cf83b5cd0ffbfeba6153dc2243af..dea73f3101887c5213a02b029d344a34f74ba4af 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -9,8 +9,41 @@
  *
  ********************************************************************************/
 
+#include "aidge/operator/LeakyReLU.hpp"
+
+#include <memory>
 #include <string>
 
-#include "aidge/operator/LeakyReLU.hpp"
+#include "aidge/data/Tensor.hpp"
+
+const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
+
+Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::LeakyReLU_Op::clone() const {
+    return std::make_shared<LeakyReLU_Op>(*this);
+}
+
+void Aidge::LeakyReLU_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::LeakyReLU_Op::getAvailableBackends() const {
+    return Registrar<LeakyReLU_Op>::getKeys();
+}
+
+/////////////////////////////////////
 
-const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
\ No newline at end of file
+std::shared_ptr<Aidge::Node> Aidge::LeakyReLU(float negativeSlope, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
+}
\ No newline at end of file
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
new file mode 100755
index 0000000000000000000000000000000000000000..90ae8d8c7dac464665828248c923a1f278dad79b
--- /dev/null
+++ b/src/operator/Ln.cpp
@@ -0,0 +1,49 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Ln.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Ln_Op::Type = "Ln";
+
+Aidge::Ln_Op::Ln_Op(const Aidge::Ln_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Ln_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Ln_Op::clone() const {
+    return std::make_shared<Ln_Op>(*this);
+}
+
+void Aidge::Ln_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    mImpl = Registrar<Ln_Op>::create(name)(*this);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Ln_Op::getAvailableBackends() const {
+    return Registrar<Ln_Op>::getKeys();
+}
+
+/////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Ln(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Ln_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 8f7548155cde4c7187f7a7fe96a44c4accd2c302..668ffd04b7acb0e72b4a3313805fa89ca3466f32 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -20,59 +20,75 @@
 
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
-bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
-    }
-    if (getInput(0)->empty() && getInput(1)->empty()) {
-        // both inputs are scalar
-        mOutputs[0]->resize({});
-        return true;
+Aidge::MatMul_Op::MatMul_Op(const Aidge::MatMul_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(MatMul_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
     }
-    else if (!getInput(0)->empty() && !getInput(1)->empty())
-    {
-        std::vector<std::size_t> dims0 = getInput(0)->dims();
-        std::vector<std::size_t> dims1 = getInput(1)->dims();
-
-        // keep second-to-last dimension of dims0
-        const bool keepDim0 = dims0.size() > 1;
-        // keep last dimension of dims1
-        const bool keepDim1 = dims1.size() > 1;
-
-        if (dims0.size() == 1) {
-            dims0.insert(dims0.cbegin(), 1);
-        }
-        if (dims1.size() == 1) {
-            dims1.push_back(1);
-        }
-        const std::size_t dims_size = std::max(dims0.size(), dims1.size());
+}
 
+std::shared_ptr<Aidge::Operator> Aidge::MatMul_Op::clone() const {
+    return std::make_shared<MatMul_Op>(*this);
+}
 
-        if (dims0.size() > dims1.size()) {
-            dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
-        }
-        else if (dims1.size() > dims0.size()) {
-            dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated(false)) {
+        if (getInput(0)->empty() && getInput(1)->empty()) {
+            // both inputs are scalar
+            mOutputs[0]->resize({});
+            return true;
         }
+        else if (!getInput(0)->empty() && !getInput(1)->empty())
+        {
+            std::vector<std::size_t> dims0 = getInput(0)->dims();
+            std::vector<std::size_t> dims1 = getInput(1)->dims();
 
-        AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
+            // keep second-to-last dimension of dims0
+            const bool keepDim0 = dims0.size() > 1;
+            // keep last dimension of dims1
+            const bool keepDim1 = dims1.size() > 1;
+
+            if (dims0.size() == 1) {
+                dims0.insert(dims0.cbegin(), 1);
+            }
+            if (dims1.size() == 1) {
+                dims1.push_back(1);
+            }
+            const std::size_t dims_size = std::max(dims0.size(), dims1.size());
 
-        std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
-        for (std::size_t i = 0; i < dims_size-2; ++i) {
-            AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
-            outDims[i] = std::max(dims0[i], dims1[i]);
-        }
 
-        // use keepDim0 instead of dims0.size() because dims0 has been modified
-        if (keepDim0)
-            outDims.push_back(dims0[dims_size-2]);
-        if (keepDim1)
-            outDims.push_back(dims1[dims_size-1]);
+            if (dims0.size() > dims1.size()) {
+                dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+            }
+            else if (dims1.size() > dims0.size()) {
+                dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+            }
+
+            AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes: {} vs {}.", dims0, dims1);
+
+            std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
+            for (std::size_t i = 0; i < dims_size-2; ++i) {
+                AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
+                outDims[i] = std::max(dims0[i], dims1[i]);
+            }
+
+            // use keepDim0 instead of dims0.size() because dims0 has been modified
+            if (keepDim0)
+                outDims.push_back(dims0[dims_size-2]);
+            if (keepDim1)
+                outDims.push_back(dims1[dims_size-1]);
+
+            mOutputs[0]->resize(outDims);
+            return true;
+        } else {
+          AIDGE_ASSERT(false, "Incompatible scalar and N-D sizes.");
+        }
 
-        mOutputs[0]->resize(outDims);
-        return true;
     }
-    
+
     return false;
 }
 
@@ -80,3 +96,13 @@ void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     SET_IMPL_MACRO(MatMul_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::MatMul_Op::getAvailableBackends() const {
+    return Registrar<MatMul_Op>::getKeys();
+}
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::MatMul(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5ce137fe6b6c0e4b7150bfc0f1182f6f8ee94850
--- /dev/null
+++ b/src/operator/MaxPooling.cpp
@@ -0,0 +1,109 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MaxPooling.hpp"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::MaxPooling_Op<DIM>::Type = "MaxPooling";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                            const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                            bool ceil_mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+    attr<MaxPoolingAttr::StrideDims>(stride_dims),
+    attr<MaxPoolingAttr::KernelDims>(kernel_dims),
+    attr<MaxPoolingAttr::CeilMode>(ceil_mode)))
+{}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::MaxPooling_Op<DIM>::clone() const {
+    return std::make_shared<MaxPooling_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::MaxPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+
+        std::function<float(float)> roundingFunction;
+        if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) {
+            roundingFunction = [](float x) { return std::ceil(x); };
+        } else {
+            roundingFunction = [](float x) { return std::floor(x); };
+        }
+
+        for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) {
+            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                        roundingFunction(static_cast<float>(inputDims[dim+2] -
+                                                                mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) /
+                                        static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim])));
+        }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::MaxPooling_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::MaxPooling_Op<DIM>::getAvailableBackends() const {
+    return Registrar<MaxPooling_Op<DIM>>::getKeys();
+}
+
+template class Aidge::MaxPooling_Op<1>;
+template class Aidge::MaxPooling_Op<2>;
+template class Aidge::MaxPooling_Op<3>;
+
+///////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::MaxPooling(const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name,
+                                           const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                           bool ceil_mode)
+{
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, ceil_mode), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::MaxPooling<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, const std::array<Aidge::DimSize_t, 3>&, bool);
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index e08b5f1054f07a9dcc1722d219ebce022f994d61..61239071a99a9dfca8613ef78eba17757c4276b7 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -20,35 +20,32 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData(
+Aidge::Elts_t Aidge::Memorize_ProdConso::getNbRequiredData(
     Aidge::IOIndex_t inputIdx) const
 {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
 
-    if (scheduleStep == 0 && inputIdx == 0) {
+    if (op.scheduleStep() == 0 && inputIdx == 0) {
         // No data input is required for the initial step.
         // Initialization data is required however.
         return Elts_t::NoneElts();
     }
-    else if (scheduleStep > 0 && inputIdx == 1) {
+    else if (op.scheduleStep() > 0 && inputIdx == 1) {
         // No initialization data is required after the initial step.
         return Elts_t::NoneElts();
     }
     else {
-        return OperatorImpl::getNbRequiredData(inputIdx);
+        return ProdConso::getNbRequiredData(inputIdx);
     }
 }
 
-Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+Aidge::Elts_t Aidge::Memorize_ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
     assert(mOp.getRawOutput(outputIdx) && "requires valid output");
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
 
-    if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) {
+    if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) {
         return Elts_t::NoneElts();
     }
     else {
@@ -56,22 +53,19 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t o
     }
 }
 
-void Aidge::Memorize_OpImpl::updateConsummerProducer() {
-    OperatorImpl::updateConsummerProducer();
+void Aidge::Memorize_ProdConso::updateConsummerProducer() {
+    ProdConso::updateConsummerProducer();
 
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
-    AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded");
+    AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded");
 }
 
 void Aidge::Memorize_OpImpl::forward() {
     const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp);
-    const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>();
-    const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>();
-    AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded");
 
-    if (forwardStep == 0) {
+    AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded");
+
+    if (op.forwardStep() == 0) {
         op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size());
     }
     else {
@@ -81,30 +75,55 @@ void Aidge::Memorize_OpImpl::forward() {
 
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
+Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 2),
+        mAttributes(std::make_shared<Attributes_>(
+                    attr<MemorizeAttr::ScheduleStep>(0),
+                    attr<MemorizeAttr::ForwardStep>(0),
+                    attr<MemorizeAttr::EndStep>(endStep)))
+{
+    // The input idx 0 is a back edge for Memorize where inputs are (back, init)
+    setBackEdges({0});
+    mOutputs[1] = mOutputs[0];
+}
+
+Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+    mOutputs[1] = mOutputs[0];
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const {
+    return std::make_shared<Memorize_Op>(*this);
+}
+
+
 void Aidge::Memorize_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    ++this->template getAttr<MemorizeAttr::ScheduleStep>();
-    this->template getAttr<MemorizeAttr::ForwardStep>() = 0;
+    ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>();
+    mAttributes->template getAttr<MemorizeAttr::ForwardStep>() = 0;
 }
 
 bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
-    for (size_t i = 0; i < 2; ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+    if (inputsAssociated(false)) {
+        // Only require one of the input to have dims defined
+        // Otherwise, forwardDims() won't converge!
+        if (!(getInput(0)->undefined())) {
+            const auto expectedDims =  getInput(0)->dims();
+            mOutputs[0]->resize(expectedDims);
+            return true;
+        }
+        else if (!(getInput(1)->undefined())) {
+            const auto expectedDims =  getInput(1)->dims();
+            mOutputs[0]->resize(expectedDims);
+            return true;
         }
-    }
-
-    // Only require one of the input to have dims defined
-    // Otherwise, forwardDims() won't converge!
-    if (!(getInput(0)->empty())) {
-        const auto expectedDims =  getInput(0)->dims();
-        mOutputs[0]->resize(expectedDims);
-        return true;
-    }
-    else if (!(getInput(1)->empty())) {
-        const auto expectedDims =  getInput(1)->dims();
-        mOutputs[0]->resize(expectedDims);
-        return true;
     }
 
     return false;
@@ -115,7 +134,7 @@ bool Aidge::Memorize_Op::dimsForwarded() const {
     bool forwarded = true;
     // check outputs have been filled
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
-        forwarded &= !(getOutput(i)->empty());
+        forwarded &= !(getOutput(i)->undefined());
     }
     return forwarded;
 }
@@ -132,6 +151,16 @@ void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
 
 void Aidge::Memorize_Op::forward() {
     Operator::forward();
-    ++this->template getAttr<MemorizeAttr::ForwardStep>();
-    this->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
+    ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>();
+    mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0;
+}
+
+std::set<std::string> Aidge::Memorize_Op::getAvailableBackends() const {
+    return Registrar<Memorize_Op>::getKeys();
+}
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Memorize(const std::uint32_t endStep, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name);
 }
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index 1397b69b9c126c0e2d0ec84bf900a320b95f0d80..e3acba9b4cccdf525d80f85344ba500cc7ac885f 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -18,17 +18,25 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
 
 Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph)
-    : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()),
+    : OperatorTensor(type, [graph]() {
+        std::vector<InputCategory> inputsCategory;
+        for (const auto& in : graph->getOrderedInputs()) {
+            if (in.first) {
+                inputsCategory.push_back(in.first->getOperator()->inputCategory(in.second));
+            }
+            else {
+                // Dummy input, default to OptionalData
+                inputsCategory.push_back(InputCategory::OptionalData);
+            }
+        }
+        return inputsCategory;
+    }(), graph->getOrderedOutputs().size()),
         mGraph(graph)
 {
-    mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedInputs().size());
-    for (std::size_t i = 0; i < mInputs.size(); ++i) {
-        mInputs[i] = std::make_shared<Tensor>();
-    }
     // Associate outputs to micro-graph outputs for custom implementation
-    mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->getOrderedOutputs().size());
     for (size_t outputIdx = 0; outputIdx < mOutputs.size(); ++outputIdx) {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
         if (outputOp.first) {
@@ -37,6 +45,10 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
+    return std::make_shared<MetaOperator_Op>(*this);
+}
+
 void Aidge::MetaOperator_Op::associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type");
     AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx);
@@ -58,9 +70,54 @@ void Aidge::MetaOperator_Op::setInput(const Aidge::IOIndex_t inputIdx, const std
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(inputOp.first->getOperator()->getRawInput(inputOp.second));
 }
 
+std::string Aidge::MetaOperator_Op::backend() const noexcept {
+    return (mImpl)
+        ? mImpl->backend()
+        : mGraph->rootNode()->getOperator()->backend();
+}
+
+void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    if (Registrar<MetaOperator_Op>::exists({name, type()})) {
+        // A custom implementation exists for this meta operator
+        mImpl = Registrar<MetaOperator_Op>::create({name, type()})(*this);
+    }
+
+    // The micro-graph should always be set to the right backend, since it
+    // shares input/output tensors.
+    // Input/output tensors backend are updated here.
+    mGraph->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::MetaOperator_Op::getAvailableBackends() const {
+    std::set<std::string> backendsList;
+    for (const auto& tupleKey : Registrar<MetaOperator_Op>::getKeys()) {
+        if (std::get<1>(tupleKey) == type()) {
+            backendsList.insert(std::get<0>(tupleKey));
+        }
+    }
+    return backendsList;
+}
+
+std::shared_ptr<Aidge::Attributes> Aidge::MetaOperator_Op::attributes() const {
+    auto attrs = std::make_shared<DynamicAttributes>();
+
+    for (const auto& node : mGraph->getRankedNodesName("{3}")) {
+        const auto attributes = node.first->getOperator()->attributes();
+        if (attributes) {
+            const auto nodeAttrs = DynamicAttributes(attributes->getAttrs());
+            attrs->addAttr(node.first->type() + "#" + node.second, nodeAttrs);
+            if (node.second == "0") {
+                attrs->addAttr(node.first->type(), nodeAttrs);
+            }
+        }
+    }
+
+    return attrs;
+}
+
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbRequiredData(inputIdx);
+        return mImpl->prodConso()->getNbRequiredData(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -75,7 +132,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredProtected(const IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbRequiredProtected(inputIdx);
+        return mImpl->prodConso()->getNbRequiredProtected(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -90,7 +147,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbRequiredProtected(const IOIndex_t inp
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
     if (mImpl) {
-        return mImpl->getRequiredMemory(outputIdx, inputsSize);
+        return mImpl->prodConso()->getRequiredMemory(outputIdx, inputsSize);
     }
     else {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
@@ -105,7 +162,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getRequiredMemory(const IOIndex_t outputId
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) const {
     if (mImpl) {
-        return mImpl->getNbConsumedData(inputIdx);
+        return mImpl->prodConso()->getNbConsumedData(inputIdx);
     }
     else {
         const auto& inputOp = mGraph->getOrderedInputs()[inputIdx];
@@ -120,7 +177,7 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) cons
 
 Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) const {
     if (mImpl) {
-        return mImpl->getNbProducedData(outputIdx);
+        return mImpl->prodConso()->getNbProducedData(outputIdx);
     }
     else {
         const auto& outputOp = mGraph->getOrderedOutputs()[outputIdx];
@@ -133,9 +190,23 @@ Aidge::Elts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) con
     }
 }
 
+void Aidge::MetaOperator_Op::resetConsummerProducer() {
+    if (mImpl) {
+        mImpl->prodConso()->resetConsummerProducer();
+    }
+    else {
+        if (!mScheduler) {
+            // Lazy initialization
+            mScheduler = std::make_shared<SequentialScheduler>(mGraph, mUpperNode.lock());
+        }
+
+        mScheduler->resetScheduling();
+    }
+}
+
 void Aidge::MetaOperator_Op::updateConsummerProducer() {
     if (mImpl) {
-        mImpl->updateConsummerProducer();
+        mImpl->prodConso()->updateConsummerProducer();
     }
     else {
         if (!mScheduler) {
@@ -167,3 +238,15 @@ void Aidge::MetaOperator_Op::forward() {
         mScheduler->forward(false);
     }
 }
+
+/////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::MetaOperator(const char *type,
+                                  const std::shared_ptr<Aidge::GraphView>& graph,
+                                  const std::string& name)
+{
+    auto op = std::make_shared<MetaOperator_Op>(type, graph);
+    auto node = std::make_shared<Node>(op, name);
+    op->setUpperNode(node);
+    return node;
+}
\ No newline at end of file
diff --git a/src/operator/MetaOperatorDefs/LSTM.cpp b/src/operator/MetaOperatorDefs/LSTM.cpp
index cd993f9e5cd127a005101284b78c416150b3c99a..910e7c67aad0068679ca2d240b23312add3e42d7 100644
--- a/src/operator/MetaOperatorDefs/LSTM.cpp
+++ b/src/operator/MetaOperatorDefs/LSTM.cpp
@@ -38,9 +38,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     auto add = Add(2, (!name.empty()) ? name + "_add" : "");
 
     // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateX" : "");
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateX" : "");
     input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_forgetGateH" : "");
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_forgetGateH" : "");
     hiddenState->addChild(forgetGateH, 1, 0);
     auto forgetGate = Add(2, (!name.empty()) ? name + "_forgetGate" : "");
     forgetGateX->addChild(forgetGate, 0, 0);
@@ -53,9 +53,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     cellState->addChild(forgetGateMul, 1, 1);
 
     // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateX" : "");
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateX" : "");
     input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_inputGateH" : "");
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_inputGateH" : "");
     hiddenState->addChild(inputGateH, 1, 0);
     auto inputGate = Add(2, (!name.empty()) ? name + "_inputGate" : "");
     inputGateX->addChild(inputGate, 0, 0);
@@ -67,9 +67,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     inputGateMul->addChild(add, 0, 1);
 
     // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateX" : "");
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateX" : "");
     input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_cellCandidateH" : "");
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_cellCandidateH" : "");
     hiddenState->addChild(cellCandidateH, 1, 0);
     auto cellCandidate = Add(2, (!name.empty()) ? name + "_cellCandidate" : "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
@@ -79,9 +79,9 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     cellCandidateAct->addChild(inputGateMul, 0, 1);
 
     // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateX" : "");
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateX" : "");
     input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), (!name.empty()) ? name + "_outputGateH" : "");
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), (!name.empty()) ? name + "_outputGateH" : "");
     hiddenState->addChild(outputGateH, 1, 0);
     auto outputGate = Add(2, (!name.empty()) ? name + "_outputGate" : "");
     outputGateX->addChild(outputGate, 0, 0);
@@ -124,19 +124,20 @@ std::shared_ptr<Node> LSTM(const DimSize_t inChannel,
     addProducer(metaOp, 6, {hiddenChannel, hiddenChannel}, "ro");
     addProducer(metaOp, 7, {hiddenChannel, hiddenChannel}, "rf");
     addProducer(metaOp, 8, {hiddenChannel, hiddenChannel}, "rc");
-    addProducer(metaOp, 9, {(noBias ? 0 : hiddenChannel)}, "wbi");
-    addProducer(metaOp, 10, {(noBias ? 0 : hiddenChannel)}, "wbo");
-    addProducer(metaOp, 11, {(noBias ? 0 : hiddenChannel)}, "wbf");
-    addProducer(metaOp, 12, {(noBias ? 0 : hiddenChannel)}, "wbc");
-    addProducer(metaOp, 13, {(noBias ? 0 : hiddenChannel)}, "rbi");
-    addProducer(metaOp, 14, {(noBias ? 0 : hiddenChannel)}, "rbo");
-    addProducer(metaOp, 15, {(noBias ? 0 : hiddenChannel)}, "rbf");
-    addProducer(metaOp, 16, {(noBias ? 0 : hiddenChannel)}, "rbc");
+    if (!noBias) {
+        addProducer(metaOp, 9, {hiddenChannel}, "wbi");
+        addProducer(metaOp, 10, {hiddenChannel}, "wbo");
+        addProducer(metaOp, 11, {hiddenChannel}, "wbf");
+        addProducer(metaOp, 12, {hiddenChannel}, "wbc");
+        addProducer(metaOp, 13, {hiddenChannel}, "rbi");
+        addProducer(metaOp, 14, {hiddenChannel}, "rbo");
+        addProducer(metaOp, 15, {hiddenChannel}, "rbf");
+        addProducer(metaOp, 16, {hiddenChannel}, "rbc");
+    }
     return metaOp;
 }
 
-std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
-                                         bool noBias)
+std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength)
 {
     // Construct micro-graph
     auto input = Identity("");
@@ -145,9 +146,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     auto add = Add(2, "");
 
     // Forget gate
-    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto forgetGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(forgetGateX, 0, 0);
-    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto forgetGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(forgetGateH, 1, 0);
     auto forgetGate = Add(2, "");
     forgetGateX->addChild(forgetGate, 0, 0);
@@ -160,9 +161,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     cellState->addChild(forgetGateMul, 1, 1);
 
     // Input gate
-    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto inputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(inputGateX, 0, 0);
-    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto inputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(inputGateH, 1, 0);
     auto inputGate = Add(2, "");
     inputGateX->addChild(inputGate, 0, 0);
@@ -174,9 +175,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     inputGateMul->addChild(add, 0, 1);
 
     // Candidate for cell update
-    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto cellCandidateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(cellCandidateX, 0, 0);
-    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto cellCandidateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(cellCandidateH, 1, 0);
     auto cellCandidate = Add(2, "");
     cellCandidateX->addChild(cellCandidate, 0, 0);
@@ -186,9 +187,9 @@ std::shared_ptr<MetaOperator_Op> LSTM_Op(const DimSize_t seqLength,
     cellCandidateAct->addChild(inputGateMul, 0, 1);
 
     // Output gate
-    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto outputGateX = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     input->addChild(outputGateX, 0, 0);
-    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(noBias), "");
+    auto outputGateH = std::make_shared<Node>(std::make_shared<FC_Op>(), "");
     hiddenState->addChild(outputGateH, 1, 0);
     auto outputGate = Add(2,"");
     outputGateX->addChild(outputGate, 0, 0);
diff --git a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
index ad300cd4f98b84d5ac5834370db53017958efaf6..ef319ef38ad18de9eaed0a1d4a92c3877ee7cf8e 100644
--- a/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
+++ b/src/operator/MetaOperatorDefs/PaddedAvgPooling.cpp
@@ -46,8 +46,6 @@ std::shared_ptr<Node> PaddedAvgPooling(const std::array<DimSize_t, DIM> &kernel_
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const std::array<DimSize_t,1>&, const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<Node> PaddedAvgPooling<2>(const std::array<DimSize_t,2>&, const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<Node> PaddedAvgPooling<3>(const std::array<DimSize_t,3>&, const std::string&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<Node> PaddedAvgPooling<4>(const std::array<DimSize_t,4>&, const std::string&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
 
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
@@ -61,8 +59,6 @@ std::shared_ptr<Node> PaddedAvgPooling(const DimSize_t (&kernel_dims)[DIM],
 
 template std::shared_ptr<Node> PaddedAvgPooling<1>(const DimSize_t (&kernel_dims)[1], const std::string&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<Node> PaddedAvgPooling<2>(const DimSize_t (&kernel_dims)[2], const std::string&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<Node> PaddedAvgPooling<3>(const DimSize_t (&kernel_dims)[3], const std::string&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<Node> PaddedAvgPooling<4>(const DimSize_t (&kernel_dims)[4], const std::string&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
 
 
 //////////////////////////////////
@@ -84,8 +80,5 @@ inline std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op(const std::array<Dim
 
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<1>(const std::array<DimSize_t,1>&, const std::array<DimSize_t,1>&, const std::array<DimSize_t,2>&);
 template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<2>(const std::array<DimSize_t,2>&, const std::array<DimSize_t,2>&, const std::array<DimSize_t,4>&);
-template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<3>(const std::array<DimSize_t,3>&, const std::array<DimSize_t,3>&, const std::array<DimSize_t,6>&);
-template std::shared_ptr<MetaOperator_Op> PaddedAvgPooling_Op<4>(const std::array<DimSize_t,4>&, const std::array<DimSize_t,4>&, const std::array<DimSize_t,8>&);
-
 
 } // namespace Aidge
diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..31b1c675e9d577002350ea11dd0b42601a91ef76
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp
@@ -0,0 +1,86 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels,
+                                  Aidge::DimSize_t out_channels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+                                  bool no_bias)
+{
+    // auto metaOp = PaddedConv_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims);
+    // if (!name.empty()) {
+    //     metaOp->getMicroGraph()->setName(name);
+    //     metaOp->getMicroGraph()->setNodesName();
+    // }
+    // auto metaOpNode = std::make_shared<Node>(metaOp, name);
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "")
+    });
+    auto metaOpNode = MetaOperator("PaddedConv", graph, name);
+    addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
+    if (!no_bias) {
+        addProducer(metaOpNode, 2, {out_channels}, "b");
+    }
+    return metaOpNode;
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<1>(const Aidge::DimSize_t, const Aidge::DimSize_t, const std::array<Aidge::DimSize_t,1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const std::array<Aidge::DimSize_t,2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op(
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}));
+}
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConv_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConv(
+    Aidge::DimSize_t in_channels,
+    Aidge::DimSize_t out_channels,
+    Aidge::DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+    const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+    const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+    bool no_bias)
+{
+    return PaddedConv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<1>(const Aidge::DimSize_t, const Aidge::DimSize_t, const Aidge::DimSize_t (&)[1], const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const Aidge::DimSize_t (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1c073b78a61763b46e330089cccfcc4bced352a4
--- /dev/null
+++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/MetaOperatorDefs.hpp"
+
+#include <array>
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t nb_channels,
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+                                  bool no_bias)
+{
+    // auto metaOp = std::make_shared<Node>(PaddedConvDepthWise_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name);
+    // if (!name.empty()) {
+    //     std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName();
+    // }
+    auto graph = Sequential({
+        Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
+        std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "")
+    });
+    auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, name);
+    addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w");
+    if (!no_bias) {
+        addProducer(metaOpNode, 2, {nb_channels}, "b");
+    }
+    return metaOpNode;
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<1>(const Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
+
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op(
+                                  const std::array<Aidge::DimSize_t, DIM> &kernel_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+                                  const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilation_dims)
+{
+    auto pad = Pad<DIM>(padding_dims, "", PadBorderType::Constant, 0.0);
+    auto conv = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), "");
+
+    return std::make_shared<MetaOperator_Op>("PaddedConvDepthWise", Sequential({pad, conv}));
+}
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<1>(const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&);
+template std::shared_ptr<Aidge::MetaOperator_Op> Aidge::PaddedConvDepthWise_Op<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&);
+
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <Aidge::DimSize_t DIM>
+std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(
+    const Aidge::DimSize_t nb_channels,
+    Aidge::DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name,
+    const std::array<Aidge::DimSize_t, DIM> &stride_dims,
+    const std::array<Aidge::DimSize_t, 2*DIM> &padding_dims,
+    const std::array<Aidge::DimSize_t, DIM> &dilation_dims,
+    bool no_bias)
+{
+    return PaddedConvDepthWise(nb_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims, no_bias);
+}
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<1>(const Aidge::DimSize_t, const Aidge::DimSize_t (&)[1], const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const Aidge::DimSize_t (&)[2], const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool);
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index 0f635ea655676e488343bb55d9de6423a997af7d..adabcd0d359927693965cec1987d2fad083328b9 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -19,6 +19,27 @@ void Aidge::Move_OpImpl::forward() {
 
 const std::string Aidge::Move_Op::Type = "Move";
 
+Aidge::Move_Op::Move_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1)
+{
+    mImpl = std::make_shared<Move_OpImpl>(*this);
+}
+
+Aidge::Move_Op::Move_Op(const Aidge::Move_Op& op)
+    : OperatorTensor(op)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Move_Op, *this, {op.getInput(0)->getImpl()->backend(), op.backend()});
+    }
+    else {
+        mImpl = std::make_shared<Move_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Move_Op::clone() const {
+    return std::make_shared<Move_Op>(*this);
+}
+
 void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Move_Op>::exists({mInputs[0]->getImpl()->backend(), name})) {
         SET_IMPL_MACRO(Move_Op, *this, {mInputs[0]->getImpl()->backend(), name});
@@ -28,3 +49,18 @@ void Aidge::Move_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Move_Op::getAvailableBackends() const {
+    std::set<std::string> backendsList;
+    for (const auto& tupleKey : Registrar<Move_Op>::getKeys()) {
+        backendsList.insert(std::get<0>(tupleKey));
+        backendsList.insert(std::get<1>(tupleKey));
+    }
+    return backendsList;
+}
+
+////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Move(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Move_Op>(), name);
+}
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 426de388f31391fb5e59446d50e50de94ca5f8a1..3f163c9d6a572cc488c621a0ec6819ea68143304 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -23,14 +23,22 @@
 
 const std::string Aidge::Mul_Op::Type = "Mul";
 
-bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+Aidge::Mul_Op::Mul_Op(const Aidge::Mul_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(Mul_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
     }
+}
 
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+std::shared_ptr<Aidge::Operator> Aidge::Mul_Op::clone() const {
+    return std::make_shared<Mul_Op>(*this);
+}
 
+bool Aidge::Mul_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
@@ -62,3 +70,13 @@ void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Mul_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Mul_Op::getAvailableBackends() const {
+    return Registrar<Mul_Op>::getKeys();
+}
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Mul(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Mul_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index 317bbd364572f49a714e328bf33f3cd58c19215f..f15a7dc3899a7bc864e8e76ff0946fb70584bf05 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -16,6 +16,7 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Operator.hpp"
+#include "aidge/scheduler/ProdConso.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
@@ -33,35 +34,35 @@ Aidge::Operator::~Operator() noexcept = default;
 
 Aidge::Elts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredData(): an implementation is required for {}!", type());
-    return mImpl->getNbRequiredData(inputIdx);
+    return mImpl->prodConso()->getNbRequiredData(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbRequiredProtected(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredProtected(): an implementation is required for {}!", type());
-    return mImpl->getNbRequiredProtected(inputIdx);
+    return mImpl->prodConso()->getNbRequiredProtected(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
     AIDGE_ASSERT(mImpl != nullptr, "getRequiredMemory(): an implementation is required for {}!", type());
-    return mImpl->getRequiredMemory(outputIdx, inputsSize);
+    return mImpl->prodConso()->getRequiredMemory(outputIdx, inputsSize);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbConsumedData(): an implementation is required for {}!", type());
-    return mImpl->getNbConsumedData(inputIdx);
+    return mImpl->prodConso()->getNbConsumedData(inputIdx);
 }
 
 Aidge::Elts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbProducedData(): an implementation is required for {}!", type());
-    return mImpl->getNbProducedData(outputIdx);
+    return mImpl->prodConso()->getNbProducedData(outputIdx);
 }
 void Aidge::Operator::updateConsummerProducer(){
     AIDGE_ASSERT(mImpl != nullptr, "updateConsummerProducer(): an implementation is required for {}!", type());
-    mImpl->updateConsummerProducer();
+    mImpl->prodConso()->updateConsummerProducer();
 }
 void Aidge::Operator::resetConsummerProducer(){
     AIDGE_ASSERT(mImpl != nullptr, "resetConsummerProducer(): an implementation is required for {}!", type());
-    mImpl->resetConsummerProducer();
+    mImpl->prodConso()->resetConsummerProducer();
 }
 
 void Aidge::Operator::runHooks() const {
@@ -79,3 +80,17 @@ void Aidge::Operator::backward() {
     AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type());
     mImpl->backward(); 
 }
+
+void Aidge::Operator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) {
+    const auto& availableBackends = getAvailableBackends();
+    // By default, try to set the last backend anyway
+    auto selectedBackend = backends.back();
+    for (const auto& backend : backends) {
+        if (availableBackends.find(backend.first) != availableBackends.end()) {
+            selectedBackend = backend;
+            break;
+        }
+    }
+
+    setBackend(selectedBackend.first, selectedBackend.second);
+}
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index af20c1ff4ddd71479fcc899f7fe87be1d0000c72..ff6fb9ce4b6b8596477dfdd1f43f8927e534459b 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -9,7 +9,6 @@
  *
  ********************************************************************************/
 
-#include <cassert>
 #include <memory>
 
 #include "aidge/operator/OperatorTensor.hpp"
@@ -20,11 +19,10 @@
 
 
 Aidge::OperatorTensor::OperatorTensor(const std::string& type,
-                                                            const IOIndex_t nbData,
-                                                            const IOIndex_t nbParam,
+                                      const std::vector<InputCategory>& inputsCategory,
                                                             const IOIndex_t nbOut)
-: Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor),
-        mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)),
+: Operator(type, inputsCategory, nbOut, OperatorType::Tensor),
+        mInputs(std::vector<std::shared_ptr<Tensor>>(inputsCategory.size(), nullptr)),
         mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
     for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
         mOutputs[i] = std::make_shared<Tensor>();
@@ -51,6 +49,11 @@ void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, cons
     mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
 }
 
+void Aidge::OperatorTensor::resetInput(const Aidge::IOIndex_t inputIdx) {
+    AIDGE_ASSERT(inputIdx < nbInputs(), "Input idx out of range.");
+    mInputs[inputIdx] = nullptr;
+}
+
 void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) {
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     if (getInput(inputIdx)) {
@@ -70,7 +73,7 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidg
     return mInputs[inputIdx];
 }
 
-void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) {
+void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
     const auto& data_tensor = std::dynamic_pointer_cast<Tensor>(data);
@@ -98,9 +101,6 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     if (outputIdx >= nbOutputs()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
     }
-    if (nbInputs() != nbData()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
-    }
     if (!dimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
     }
@@ -110,19 +110,28 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
         }
     }
     // return the same Tensor description as given in function parameter for each data input
-    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
+    return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbInputs(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
 }
 
-bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    bool associated = (nbInputs() > 0); // do not compute anything if no input
+bool Aidge::OperatorTensor::inputsAssociated(bool checkNonEmpty) const {
+    bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        if (!getInput(i)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+        if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
+            if (!getInput(i)) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
+            }
+        }
+
+        if (checkNonEmpty && getInput(i)) {
+            associated &= !(getInput(i)->undefined());
         }
-        associated &= !(getInput(i)->empty());
     }
-    if (associated) {
+
+    return associated;
+}
+
+bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
         const auto expectedDims =  getInput(0)->dims();
         for (std::size_t i = 1; i < nbInputs(); ++i) {
             if (expectedDims != getInput(i)->dims()) {
@@ -132,21 +141,24 @@ bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
             }
         }
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
 
-    return associated;
+    return false;
 }
 
 bool Aidge::OperatorTensor::dimsForwarded() const {
     bool forwarded = true;
     // check both inputs and outputs have been filled
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-        forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+        if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
+            forwarded &= mInputs[i] ? !(getInput(i)->undefined()) : false;
+        }
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         // If getOutput(i) is nullptr, ignore this output (it may be a dummy
         // output in a MetaOperator)
-        forwarded &= (getOutput(i)) ? !(getOutput(i)->empty()) : true;
+        forwarded &= (getOutput(i)) ? !(getOutput(i)->undefined()) : true;
     }
     return forwarded;
 }
@@ -156,9 +168,32 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
         getOutput(i)->setDataType(dataType);
     }
 
-    for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
-        AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
-        getInput(i)->setDataType(dataType);
+    // Set data type for parameters inputs only (weights, bias...), which are usually Producers
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (inputCategory(i) == InputCategory::Param) {
+            AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
+            getInput(i)->setDataType(dataType);
+        }
+        else if (inputCategory(i) == InputCategory::OptionalParam && getInput(i) != nullptr) {
+            getInput(i)->setDataType(dataType);
+        }
+    }
+}
+
+void Aidge::OperatorTensor::setDataFormat(const DataFormat& dataFormat) const {
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
+        getOutput(i)->setDataFormat(dataFormat);
+    }
+
+    // Set data format for parameters inputs only (weights, bias...), which are usually Producers
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (inputCategory(i) == InputCategory::Param) {
+            AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
+            getInput(i)->setDataFormat(dataFormat);
+        }
+        else if (inputCategory(i) == InputCategory::OptionalParam && getInput(i) != nullptr) {
+            getInput(i)->setDataFormat(dataFormat);
+        }
     }
 }
 
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..39f61e328bd3f98bc836604462bbfc064fbb93be
--- /dev/null
+++ b/src/operator/Pad.cpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Pad.hpp"
+
+#include <array>
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
+    return std::make_shared<Pad_Op<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Pad_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        std::array<DimSize_t, DIM + 2> outputDims{};
+        const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>();
+
+        for (std::size_t dim = 0; dim < DIM; ++dim) {
+            outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim]
+                                + inputDims[dim+2]
+                                + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1];
+        }
+        outputDims[1] = inputDims[1];
+        outputDims[0] = inputDims[0];
+        mOutputs[0]->resize(outputDims);
+        return true;
+    }
+
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Pad_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Pad_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Pad_Op<DIM>>::getKeys();
+}
+
+template class Aidge::Pad_Op<1>;
+template class Aidge::Pad_Op<2>;
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Pad(const std::array<Aidge::DimSize_t, 2*DIM> &beginEndTuples,
+                                           const std::string& name,
+                                           const PadBorderType &borderType,
+                                           double borderValue)
+{
+    AIDGE_ASSERT(DIM<=MaxDim, "Too many kernel dimensions required by {}, not supported", Pad_Op<DIM>::Type);
+    return std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(beginEndTuples, borderType, borderValue), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Pad<1>(const std::array<Aidge::DimSize_t, 2> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<2>(const std::array<Aidge::DimSize_t, 4> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
+template std::shared_ptr<Aidge::Node> Aidge::Pad<3>(const std::array<Aidge::DimSize_t, 6> &beginEndTuples, const std::string&, const PadBorderType&, double borderValue);
\ No newline at end of file
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 18325d80a94f35878ededca839ec809000527c39..cd5b18759cdd743f292054bca91ffee5da722ea6 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -20,7 +20,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::Pop_ProdConso::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getRawInput(inputIdx) && "requires valid input");
 
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
@@ -30,19 +30,40 @@ Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputI
 
 void Aidge::Pop_OpImpl::forward() {
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
+
     assert(op.getInput(0) && "missing input #0");
-    const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>();
-    *op.getOutput(0) = op.getInput(0)->extract({forwardStep});
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
 }
 
+//////////////////////////////////////////////////////////
+
 const std::string Aidge::Pop_Op::Type = "Pop";
 
-bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
+Aidge::Pop_Op::Pop_Op()
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0)))
+{
+    mImpl = std::make_shared<Pop_OpImpl>(*this);
+}
+
+Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Pop_Op, *this, op.backend());
     }
-    if (!(getInput(0)->empty())) {
+    else {
+        mImpl = std::make_shared<Pop_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Pop_Op::clone() const {
+    return std::make_shared<Pop_Op>(*this);
+}
+
+bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
         auto inputDims = getInput(0)->dims();
         inputDims.erase(inputDims.begin());
         getOutput(0)->resize(inputDims);
@@ -54,7 +75,7 @@ bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
 
 void Aidge::Pop_Op::updateConsummerProducer() {
     Operator::updateConsummerProducer();
-    this->template getAttr<PopAttr::ForwardStep>() = 0;
+    mAttributes->template getAttr<PopAttr::ForwardStep>() = 0;
 }
 
 void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -67,7 +88,17 @@ void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const {
+    return Registrar<Pop_Op>::getKeys();
+}
+
 void Aidge::Pop_Op::forward() {
     Operator::forward();
-    ++this->template getAttr<PopAttr::ForwardStep>();
+    ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
+
+///////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Pop(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Pop_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 135c792345b0caf1166e671a8dad7d5b49b42ee7..ada71d6cc56c6d88ff64bf720595b220b296801d 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -23,13 +23,7 @@
 const std::string Aidge::Pow_Op::Type = "Pow";
 
 bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
-    }
-
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
-
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
@@ -60,4 +54,14 @@ bool Aidge::Pow_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Pow_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Pow_Op::getAvailableBackends() const {
+    return Registrar<Pow_Op>::getKeys();
+}
+
+////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Pow(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Pow_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 7059ea7e989d789b4cff0ed895fc2c5ec0ad81bc..fdba4ac2e22d857a31779df2e5ff789c3eb92f5c 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -26,10 +26,22 @@
 
 const std::string Aidge::Producer_Op::Type = "Producer";
 
+template <std::size_t DIM>
+Aidge::Producer_Op::Producer_Op(
+            const std::array<Aidge::DimSize_t, DIM>& dims,
+            bool constant)
+    : OperatorTensor(Type, {}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ProdAttr::Constant>(constant)))
+{
+    mOutputs[0]->resize(dims);
+    mImpl = std::make_shared<OperatorImpl>(*this);
+}
 
 Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant)
-    : OperatorTensor(Type, 0, 0, 1),
-      Attributes_(attr<ProdAttr::Constant>(constant))
+    : OperatorTensor(Type, {}, 1),
+      mAttributes(std::make_shared<Attributes_>(
+        attr<ProdAttr::Constant>(constant)))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
@@ -47,7 +59,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
  */
 Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
-      Attributes_(op)
+      mAttributes(op.mAttributes)
 {
     mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
@@ -58,6 +70,10 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     }
 }
 
+std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
+    return std::make_shared<Producer_Op>(*this);
+}
+
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     if (Registrar<Producer_Op>::exists({name})){
         SET_IMPL_MACRO(Producer_Op, *this, name);
@@ -68,6 +84,10 @@ void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     mOutputs[0]->setBackend(name, device);
 }
 
+std::set<std::string> Aidge::Producer_Op::getAvailableBackends() const {
+    return Registrar<Producer_Op>::getKeys();
+}
+
 void Aidge::Producer_Op::forward() {
     if (!backend().empty()) {
         mImpl->forward();
@@ -75,3 +95,75 @@ void Aidge::Producer_Op::forward() {
 
     runHooks();
 }
+
+void Aidge::Producer_Op::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
+    if (mAttributes->template getAttr<ProdAttr::Constant>()) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
+    }
+    OperatorTensor::setOutput(outputIdx, data);
+}
+
+/////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Producer(const std::array<Aidge::DimSize_t, DIM> &dims,
+        const std::string& name,
+        bool constant)
+{
+  static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
+  return std::make_shared<Node>(std::make_shared<Producer_Op>(dims, constant), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Producer<1>(const std::array<Aidge::DimSize_t, 1>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<2>(const std::array<Aidge::DimSize_t, 2>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<3>(const std::array<Aidge::DimSize_t, 3>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<4>(const std::array<Aidge::DimSize_t, 4>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<5>(const std::array<Aidge::DimSize_t, 5>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<6>(const std::array<Aidge::DimSize_t, 6>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<7>(const std::array<Aidge::DimSize_t, 7>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<8>(const std::array<Aidge::DimSize_t, 8>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<9>(const std::array<Aidge::DimSize_t, 9>&, const std::string&, bool);
+template std::shared_ptr<Aidge::Node> Aidge::Producer<10>(const std::array<Aidge::DimSize_t, 10>&, const std::string&, bool);
+
+std::shared_ptr<Aidge::Node> Aidge::Producer(const std::shared_ptr<Aidge::Tensor> tensor,
+            const std::string& name,
+            bool constant)
+{
+    return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor, constant), name);
+}
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::addProducer(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, DIM>& dims,
+        const std::string& extension)
+{
+    AIDGE_ASSERT(inputIdx < gk_IODefaultIndex, "Input index too high. Cannot create Producer");
+    static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
+    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
+    auto prod = Producer(dims, prodName);
+    prod->addChild(otherNode, 0, inputIdx);
+    otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
+    return prod;
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<1>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 1>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<2>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 2>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<3>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 3>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<4>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 4>& dims,
+        const std::string& extension);
+template std::shared_ptr<Aidge::Node> Aidge::addProducer<5>(std::shared_ptr<Aidge::Node>& otherNode,
+        const IOIndex_t inputIdx,
+        const std::array<Aidge::DimSize_t, 5>& dims,
+        const std::string& extension);
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index 7b945a7d62ab0ef7f73a25f6f74430e725d17b48..bda26fa3332ee914325820f47d0babcb622905c8 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -19,7 +19,31 @@
 
 const std::string Aidge::ReLU_Op::Type = "ReLU";
 
+Aidge::ReLU_Op::ReLU_Op(const Aidge::ReLU_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(ReLU_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ReLU_Op::clone() const {
+    return std::make_shared<ReLU_Op>(*this);
+}
+
 void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
     SET_IMPL_MACRO(ReLU_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ReLU_Op::getAvailableBackends() const {
+    return Registrar<ReLU_Op>::getKeys();
+}
+
+/////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ReLU(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 28e39b6d3387a0371c0505dc0a7b350e83a2bbaf..7935edb050824af92a8f130f975aa09e41ca875f 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -15,6 +15,7 @@
 #include <cstddef>    // std::size_t
 #include <cstdint>    // std::int32_t
 #include <memory>
+#include <numeric> // For std::iota
 #include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
@@ -26,13 +27,33 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
-bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ReduceMeanAttr::Axes>(axes),
+        attr<ReduceMeanAttr::KeepDims>(keep_dims),
+        attr<ReduceMeanAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+{}
+
+Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
     }
-    if (!getInput(0)->empty()) {
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const {
+    return std::make_shared<ReduceMean_Op>(*this);
+}
+
+bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
         // make Axes attribute positive
-        std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceMeanAttr::Axes>();
         std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
             if (val < 0)
                 val+=static_cast<std::int32_t>(getInput(0)->nbDims());
@@ -41,7 +62,19 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 
         // build output dimensions
         std::vector<DimSize_t> outDims = getInput(0)->dims();
-        if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+
+        if (axes.empty())
+        {
+            if(mAttributes->template getAttr<ReduceMeanAttr::NoopWithEmptyAxes>()) {
+                mOutputs[0]->resize(outDims);
+                return true;
+            }
+            // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
+            axes.resize(getInput(0)->nbDims());
+            std::iota(axes.begin(), axes.end(), 0);
+        }
+
+        if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) {
             std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
         else {
@@ -59,4 +92,20 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) {
 void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ReduceMean_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ReduceMean_Op::getAvailableBackends() const {
+    return Registrar<ReduceMean_Op>::getKeys();
+}
+
+Aidge::ReduceMean_Op::~ReduceMean_Op() noexcept = default;
+
+////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ReduceMean(const std::vector<std::int32_t> &axes,
+                                        bool keep_dims,
+                                        bool noop_with_empty_axes,
+                                        const std::string& name) {
+    AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims, noop_with_empty_axes), name);
 }
\ No newline at end of file
diff --git a/src/operator/ReduceSum.cpp b/src/operator/ReduceSum.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0786f53c6b761e5cd9020352a2ecb92469a609d7
--- /dev/null
+++ b/src/operator/ReduceSum.cpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ReduceSum.hpp"
+
+#include <algorithm>  // std::for_each, std::sort
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int32_t
+#include <memory>
+#include <numeric> // For std::iota
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ReduceSum_Op::Type = "ReduceSum";
+
+bool Aidge::ReduceSum_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        // make Axes attribute positive
+        std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceSumAttr::Axes>();
+        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+            if (val < 0)
+                val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+        });
+        std::sort(axes.begin(), axes.end());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+        if (axes.empty())
+        {
+            if(mAttributes->template getAttr<ReduceSumAttr::NoopWithEmptyAxes>()) {
+                mOutputs[0]->resize(outDims);
+                return true;
+            }
+            // if no axes are provided and NoopWithEmptyAxes is false, reduce on all axes
+            axes.resize(getInput(0)->nbDims());
+            std::iota(axes.begin(), axes.end(), 0);
+        }
+
+        if (mAttributes->template getAttr<ReduceSumAttr::KeepDims>()) {
+            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+        }
+        else {
+            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
+        }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
+    }
+    return false;
+}
+
+void Aidge::ReduceSum_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(ReduceSum_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ReduceSum_Op::getAvailableBackends() const {
+    return Registrar<ReduceSum_Op>::getKeys();
+}
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index adbd5fae8a11bfc5009ed4b920d28624db71bb0d..0fa9a62816a36ad3afece02052224c966ee121a3 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -28,10 +28,37 @@ void Aidge::Reshape_OpImpl::forward() {
     op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(), op.getInput(0)->size());
 }
 
+//////////////////////////////////////////////////
+
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
+Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ReshapeAttr::Shape>(shape),
+        attr<ReshapeAttr::AllowZero>(allowzero)))
+{
+    mImpl = std::make_shared<Reshape_OpImpl>(*this);
+}
+
+Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Reshape_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Reshape_Op::clone() const {
+    return std::make_shared<Reshape_Op>(*this);
+}
+
 bool Aidge::Reshape_Op::dimsForwarded() const {
-    if (getInput(1) && !getInput(1)->empty()) {
+    if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
         return false;
     }
@@ -40,68 +67,63 @@ bool Aidge::Reshape_Op::dimsForwarded() const {
 }
 
 bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
-    // check input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Shape
+        if (getInput(1)) {
+            if (!this->shape().empty()) {
+                Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence");
+            }
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!this->template getAttr<ReshapeAttr::Shape>().empty()) {
-            Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence");
-        }
+            if (!allowDataDependency) {
+                Log::warn("Reshape_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
 
-        if (!allowDataDependency) {
-            Log::warn("Reshape_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
+            std::shared_ptr<Tensor> fallback;
+            this->shape().clear(); // If both are provided input would override attrs
+            this->shape().reserve(getInput(1)->size());
+            const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
+                        shape.size(),
+                        std::back_inserter(this->shape()));
         }
 
-        std::shared_ptr<Tensor> fallback;
-        this->template getAttr<ReshapeAttr::Shape>().clear(); // If both are provided input would override attrs
-        this->template getAttr<ReshapeAttr::Shape>().reserve(getInput(1)->size());
-        const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()),
-                    shape.size(),
-                    std::back_inserter(this->template getAttr<ReshapeAttr::Shape>()));
-    }
+        AIDGE_ASSERT(!this->shape().empty(), "Missing input#1 or Shape attribute");
 
-    AIDGE_ASSERT(!this->template getAttr<ReshapeAttr::Shape>().empty(), "Missing input#1 or Shape attribute");
-
-    std::vector<DimSize_t> outDims;
-    // variables to handle a negative dimension
-    bool foundNegativeDimension = false;
-    std::size_t outSize = 1;
-    DimIdx_t negativeIndex = 0;
-    for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
-    {
-        int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
-        if (dimSize < 0) {
-            if (foundNegativeDimension) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
-            }
-            foundNegativeDimension = true;
-            dimSize = 1;
-            negativeIndex = static_cast<DimIdx_t>(i);
-        }
-        else if (dimSize == 0 && !this->template getAttr<ReshapeAttr::AllowZero>())
+        // Compute output dims
+        std::vector<DimSize_t> outDims;
+        // variables to handle a negative dimension
+        bool foundNegativeDimension = false;
+        std::size_t outSize = 1;
+        DimIdx_t negativeIndex = 0;
+        for(std::size_t i = 0; i < this->shape().size(); ++i)
         {
-            dimSize = getInput(0) -> dims()[i];
+            int64_t dimSize = this->shape()[i];
+            if (dimSize < 0) {
+                AIDGE_ASSERT(!foundNegativeDimension, "Found more than one negative dimension in Reshape Operator: {}.", this->shape());
+                foundNegativeDimension = true;
+                dimSize = 1;
+                negativeIndex = static_cast<DimIdx_t>(i);
+            }
+            else if (dimSize == 0 && !this->allowZero())
+            {
+                dimSize = getInput(0) -> dims()[i];
+            }
+            outDims.push_back(static_cast<DimSize_t>(dimSize));
+            if (dimSize != 0) {
+                outSize *= static_cast<DimSize_t>(dimSize);
+            }
         }
-        outDims.push_back(static_cast<DimSize_t>(dimSize));
-        if (dimSize != 0) {
-            outSize *= static_cast<DimSize_t>(dimSize);
+
+        if (foundNegativeDimension) {
+            outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
         }
-    }
 
-    if (foundNegativeDimension) {
-        outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
+        mOutputs[0]->resize(outDims);
+        return true;
     }
 
-    mOutputs[0]->resize(outDims);
-    return true;
+    return false;
 }
 
 void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -113,3 +135,16 @@ void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t d
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Reshape_Op::getAvailableBackends() const {
+    return Registrar<Reshape_Op>::getKeys();
+}
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Reshape(const std::vector<std::int64_t>& shape,
+                            bool allowzero,
+                            const std::string &name)
+{
+    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape, allowzero), name);
+}
\ No newline at end of file
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9e5762452e382a31c1e5da25708507653da2e474
--- /dev/null
+++ b/src/operator/Resize.cpp
@@ -0,0 +1,160 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Resize.hpp"
+
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <vector>
+#include <fmt/core.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::Resize_Op::Type = "Resize";
+
+Aidge::Resize_Op::Resize_Op()
+    : OperatorTensor(Type,
+        {InputCategory::Data,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData},
+        1) {}
+
+/**
+ * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+ * but not its input tensors (the new operator has no input associated).
+ * @param op Operator to copy.
+ */
+
+Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
+    : OperatorTensor(op)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Resize_Op, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
+    return std::make_shared<Resize_Op>(*this);
+}
+
+bool Aidge::Resize_Op::dimsForwarded() const {
+    // in case of ROI add getInput(1) condition
+    if ((getInput(1) && !getInput(1)->undefined())
+        || (getInput(2) && !getInput(2)->undefined())
+        || (getInput(3) && !getInput(3)->undefined())
+        )
+    {
+        // output dims are data dependent
+        return false;
+    }
+
+    return OperatorTensor::dimsForwarded();
+}
+
+bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->nbDims() == 4,
+            "input tensor must have dimensions = 4 (batch, channel, height, width).");
+
+        const bool input1ROIPresent           = getInput(1) && !getInput(1)->undefined();
+        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->undefined();
+        const bool input3SizesPresent         = getInput(3) && !getInput(3)->undefined();
+
+        AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
+
+        if (input1ROIPresent) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Input #1 (ROI) is given and it is not supported.");
+        }
+        else if (input2ScalesPresent)  {
+            if (!allowDataDependency) {
+                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #2");
+                return false;
+            }
+
+            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(2)->size(),
+                "input #0 and input #2 (Scales) must have the same dimensions.");
+
+            std::vector<DimSize_t>      outDims = getInput(0)->dims();
+            const std::vector<DimSize_t> inDims = getInput(0)->dims();
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& scales = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+            for (std::size_t dim=0; dim < getInput(2)->size(); ++dim) {
+                outDims[dim] = inDims[dim]*static_cast<int64_t*>(scales.getImpl()->hostPtr())[dim];
+            }
+
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
+        else if (input3SizesPresent) {
+            if (!allowDataDependency) {
+                Log::warn("Resize_Op: cannot execute forwardDims() as the output dimensions depend on the input #3");
+                return false;
+            }
+
+            AIDGE_ASSERT(getInput(0)->nbDims() ==  getInput(3)->size(),
+                "input #0 and input #3 (Sizes) must have the same dimensions.");
+
+            std::vector<DimSize_t> outDims = getInput(0)->dims();
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& sizes = getInput(3)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+
+            for (std::size_t dim=0; dim < getInput(3)->size(); ++dim) {
+                outDims[dim] = static_cast<int64_t*>(sizes.getImpl()->hostPtr())[dim];
+            }
+
+            mOutputs[0]->resize(outDims);
+            return true;
+        }
+        else {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: Either Input #2 or Input #3 must be present.");
+        }
+    }
+
+    return false;
+}
+
+void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Resize_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+
+    // By default, automatically set backend for all inputs: roi, scales and sizes
+    if(getInput(1)) {
+        getInput(1)->setBackend(name, device);
+    }
+    if(getInput(2)) {
+        getInput(2)->setBackend(name, device);
+    }
+    if(getInput(3)) {
+        getInput(3)->setBackend(name, device);
+    }
+}
+
+std::set<std::string> Aidge::Resize_Op::getAvailableBackends() const {
+    return Registrar<Resize_Op>::getKeys();
+}
+
+/////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Resize(const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Resize_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index dc5e272210feb09fd5dac6ba4b16f9ba8dc93bf0..5ac08cd2245e0caa3ca7072c70ccc69bcfcf9558 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -20,7 +20,44 @@
 
 const std::string Aidge::Scaling_Op::Type = "Scaling";
 
+Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ScalingAttr::ScalingFactor>(scalingFactor),
+        attr<ScalingAttr::QuantizedNbBits>(nbBits),
+        attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
+{}
+
+Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Scaling_Op::clone() const {
+    return std::make_shared<Scaling_Op>(*this);
+}
+
 void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Scaling_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Scaling_Op::getAvailableBackends() const {
+    return Registrar<Scaling_Op>::getKeys();
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Scaling(float scalingFactor,
+                                     std::size_t quantizedNbBits,
+                                     bool isOutputUnsigned,
+                                     const std::string& name)
+{
+    return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name);
 }
\ No newline at end of file
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index d11cf39e1cd301d49f21863dcb1f250e96c6e502..29a9ee6252a0c2baa6e07bc56e60650685db6bdd 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -21,42 +21,64 @@
 
 void Aidge::Shape_OpImpl::forward() {
     const Shape_Op& op = dynamic_cast<const Shape_Op&>(mOp);
-    const auto start = op.template getAttr<std::int64_t>("Start");
-    const auto end = op.template getAttr<std::int64_t>("End");
+    const auto start = op.start();
+    const auto end = op.end();
 
-    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(), 
+    op.getOutput(0)->getImpl()->copyCast(std::next(op.getInput(0)->dims().data(),
                                                    start),
                                          DataType::UInt64,
                                          end - start + 1);
 }
 
+///////////////////////////////////////////////
+
 const std::string Aidge::Shape_Op::Type = "Shape";
 
-bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check data input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
+Aidge::Shape_Op::Shape_Op(const std::int64_t start, const std::int64_t end)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<ShapeAttr::Start>(start),
+        attr<ShapeAttr::End>(end)))
+{
+    mImpl = std::make_shared<Shape_OpImpl>(*this);
+}
 
-    if (getInput(0)->empty()) {
-        return false;
+Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Shape_Op, *this, op.backend());
     }
+    else {
+        mImpl = std::make_shared<Shape_OpImpl>(*this);
+    }
+}
 
-    if (this->template getAttr<std::int64_t>("Start") < 0)
-        this->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
-    if (this->template getAttr<std::int64_t>("End") < 0)
-        this->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
+std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const {
+    return std::make_shared<Shape_Op>(*this);
+}
 
-    const auto start = this->template getAttr<std::int64_t>("Start");
-    const auto end = this->template getAttr<std::int64_t>("End");
-    const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
-    const DimSize_t roi = end - start + 1;
+bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        if (this->start() < 0)
+            this->start() += static_cast<std::int64_t>(getInput(0)->nbDims());
+        if (this->end() < 0)
+            this->end() += static_cast<std::int64_t>(getInput(0)->nbDims());
+
+        const auto start = this->start();
+        const auto end = this->end();
+        const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
+        const DimSize_t roi = end - start + 1;
 
-    AIDGE_ASSERT(start < nbDims && end < nbDims, "'Start' and 'End' must be < {}", nbDims);
-    AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+        AIDGE_ASSERT(start < nbDims && end < nbDims, "'start' and 'end' must be < {}", nbDims);
+        AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+
+        mOutputs[0]->resize({roi});
+        return true;
+    }
 
-    mOutputs[0]->resize({roi});
-    return true;
+    return false;
 }
 
 void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -68,3 +90,13 @@ void Aidge::Shape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Shape_Op::getAvailableBackends() const {
+    return Registrar<Shape_Op>::getKeys();
+}
+
+//////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Shape(const std::int64_t start, const std::int64_t end, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Shape_Op>(start, end), name);
+}
\ No newline at end of file
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..bd229e6cf58a430922d08cff5301aa16ef636d5e
--- /dev/null
+++ b/src/operator/ShiftGELU.cpp
@@ -0,0 +1,53 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ShiftGELU.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ShiftGELU_Op::Type = "ShiftGELU";
+
+Aidge::ShiftGELU_Op::ShiftGELU_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::ShiftGELU_Op::ShiftGELU_Op(const Aidge::ShiftGELU_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ShiftGELU_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::ShiftGELU_Op::clone() const {
+    return std::make_shared<ShiftGELU_Op>(*this);
+}
+
+void Aidge::ShiftGELU_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ShiftGELU_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ShiftGELU_Op::getAvailableBackends() const {
+    return Registrar<ShiftGELU_Op>::getKeys();
+}
+
+///////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ShiftGELU(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ShiftGELU_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..58d4bf46100ce116ad4a179e972cbef81bc5b5c1
--- /dev/null
+++ b/src/operator/ShiftMax.cpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2024 Thales
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ * Author: Lucas RAKOTOARIVONY, Thales Research & Technology France
+ * Date: 25.06.2024
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/ShiftMax.hpp"
+
+#include <memory>
+#include <string>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Types.h"
+
+const std::string Aidge::ShiftMax_Op::Type = "ShiftMax";
+
+Aidge::ShiftMax_Op::ShiftMax_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::ShiftMax_Op::ShiftMax_Op(const Aidge::ShiftMax_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(ShiftMax_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+/**
+ * @brief Clone the operator using its copy-constructor.
+ * @see Operator::ShiftMax_Op
+ */
+std::shared_ptr<Aidge::Operator> Aidge::ShiftMax_Op::clone() const {
+    return std::make_shared<ShiftMax_Op>(*this);
+}
+
+void Aidge::ShiftMax_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+    SET_IMPL_MACRO(ShiftMax_Op, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::ShiftMax_Op::getAvailableBackends() const {
+    return Registrar<ShiftMax_Op>::getKeys();
+}
+
+/////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::ShiftMax(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<ShiftMax_Op>(), name);
+}
\ No newline at end of file
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index a6edcf823695f95253d6c56e45975480909679d3..d97f8c52341dee4e6e0840afa6e023d8a4e3fd52 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -20,7 +20,34 @@
 
 const std::string Aidge::Sigmoid_Op::Type = "Sigmoid";
 
+Aidge::Sigmoid_Op::Sigmoid_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Sigmoid_Op::Sigmoid_Op(const Aidge::Sigmoid_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Sigmoid_Op::clone() const {
+    return std::make_shared<Sigmoid_Op>(*this);
+}
+
+
 void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Sigmoid_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Sigmoid_Op::getAvailableBackends() const {
+    return Registrar<Sigmoid_Op>::getKeys();
+}
+
+///////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sigmoid(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sigmoid_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index bc888d419987e5d75c9ceb60e7baf8817bca3d2d..3bdee8c13c1759261140d634940b0a4e81210084 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -11,7 +11,6 @@
 
 #include "aidge/operator/Slice.hpp"
 
-#include <cassert>
 #include <cstddef>
 #include <cstdint>
 #include <string>
@@ -28,10 +27,46 @@
 
 const std::string Aidge::Slice_Op::Type = "Slice";
 
+Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
+                        const std::vector<std::int64_t>& ends,
+                        const std::vector<std::int8_t>& axes,
+                        const std::vector<std::int64_t>& steps)
+    : OperatorTensor(Type,
+        {InputCategory::Data,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData,
+            InputCategory::OptionalData},
+        1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SliceAttr::Starts>(starts),
+        attr<SliceAttr::Ends>(ends),
+        attr<SliceAttr::Axes>(axes),
+        attr<SliceAttr::Steps>(steps)))
+{}
+
+Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Slice_Op, *this, op.backend());
+    }
+    else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
+    return std::make_shared<Slice_Op>(*this);
+}
+
+
 bool Aidge::Slice_Op::dimsForwarded() const {
-    if ((getInput(1) && !getInput(1)->empty())
-        || (getInput(2) && !getInput(2)->empty())
-        || (getInput(3) && !getInput(3)->empty()))
+    if ((getInput(1) && !getInput(1)->undefined())
+        || (getInput(2) && !getInput(2)->undefined())
+        || (getInput(3) && !getInput(3)->undefined())
+        || (getInput(4) && !getInput(4)->undefined()))
     {
         // output dims are data dependent
         return false;
@@ -41,137 +76,152 @@ bool Aidge::Slice_Op::dimsForwarded() const {
 }
 
 bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
+    if (inputsAssociated()) {
+        std::shared_ptr<Tensor> fallback;
+        // Copy optional input #1, if present, to attribute Starts
+        if (getInput(1)) {
+            if (!this->starts().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence");
+            }
 
-   std::shared_ptr<Tensor> fallback;
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
 
-    if (getInput(1) && !getInput(1)->empty()) {
-        if (!this->template getAttr<SliceAttr::Starts>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence");
+            this->starts().clear(); // If both are provided input would override attrs
+            this->starts().reserve(getInput(1)->size());
+            const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
+                        starts.size(),
+                        std::back_inserter(this->starts()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
-        }
+        AIDGE_ASSERT(!this->starts().empty(), "Missing input#1 or Starts attribute");
 
-        this->template getAttr<SliceAttr::Starts>().clear(); // If both are provided input would override attrs
-        this->template getAttr<SliceAttr::Starts>().reserve(getInput(1)->size());
-        const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()),
-                    starts.size(),
-                    std::back_inserter(this->template getAttr<SliceAttr::Starts>()));
-    }
+        // Copy optional input #2, if present, to attribute Ends
+        if (getInput(2)) {
+            if (!this->ends().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence");
+            }
 
-    AIDGE_ASSERT(!this->template getAttr<SliceAttr::Starts>().empty(), "Missing input#1 or Starts attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#2");
+                return false;
+            }
 
-    if (getInput(2) && !getInput(2)->empty()) {
-        if (!this->template getAttr<SliceAttr::Ends>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence");
+            this->ends().clear(); // If both are provided input would override attrs
+            this->ends().reserve(getInput(2)->size());
+            const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
+                        ends.size(),
+                        std::back_inserter(this->ends()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#2");
-            return false;
-        }
+        AIDGE_ASSERT(!this->ends().empty(), "Missing input#2 or Ends attribute");
 
-        this->template getAttr<SliceAttr::Ends>().clear(); // If both are provided input would override attrs
-        this->template getAttr<SliceAttr::Ends>().reserve(getInput(2)->size());
-        const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()),
-                    ends.size(),
-                    std::back_inserter(this->template getAttr<SliceAttr::Ends>()));
-    }
+        // Copy optional input #3, if present, to attribute Axes
+        if (getInput(3)) {
+            if (!this->axes().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence");
+            }
 
-    AIDGE_ASSERT(!this->template getAttr<SliceAttr::Ends>().empty(), "Missing input#2 or Ends attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#3");
+                return false;
+            }
 
-    if (getInput(3) && !getInput(3)->empty()) {
-        if (!this->template getAttr<SliceAttr::Axes>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence");
+            this->axes().clear(); // If both are provided input would override attrs
+            this->axes().reserve(getInput(3)->size());
+            const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+            std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
+                        axes.size(),
+                        std::back_inserter(this->axes()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#3");
-            return false;
-        }
+        AIDGE_ASSERT(!this->axes().empty(), "Missing input#3 or Axes attribute");
 
-        this->template getAttr<SliceAttr::Axes>().clear(); // If both are provided input would override attrs
-        this->template getAttr<SliceAttr::Axes>().reserve(getInput(3)->size());
-        const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
-        std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()),
-                    axes.size(),
-                    std::back_inserter(this->template getAttr<SliceAttr::Axes>()));
-    }
+        // Copy optional input #4, if present, to attribute Steps
+        if (getInput(4)) {
+            if (!this->steps().empty()) {
+                Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence");
+            }
 
-    AIDGE_ASSERT(!this->template getAttr<SliceAttr::Axes>().empty(), "Missing input#3 or Axes attribute");
+            if (!allowDataDependency) {
+                Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#4");
+                return false;
+            }
 
-    if (getInput(4) && !getInput(4)->empty()) {
-        if (!this->template getAttr<SliceAttr::Steps>().empty()) {
-            Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence");
+            this->steps().clear(); // If both are provided input would override attrs
+            this->steps().reserve(getInput(4)->size());
+            const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
+            std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
+                        steps.size(),
+                        std::back_inserter(this->steps()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Slice_Op: unable to forwardDims() because output dims are data dependent on input#4");
-            return false;
+        // Fill Steps attr if empty
+        if(this->steps().empty()) {
+            // In case the input Steps is not provided, default value is 1
+            this->steps() = std::vector<std::int64_t>(this->axes().size(), 1);
         }
 
-        this->template getAttr<SliceAttr::Steps>().clear(); // If both are provided input would override attrs
-        this->template getAttr<SliceAttr::Steps>().reserve(getInput(4)->size());
-        const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu");
-        std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()),
-                    steps.size(),
-                    std::back_inserter(this->template getAttr<SliceAttr::Steps>()));
-    }
-    // Fill Steps attr if empty
-    if(this->template getAttr<SliceAttr::Steps>().empty()) {
-        // In case the input Steps is not provided, default value is 1
-        this->template getAttr<SliceAttr::Steps>() = std::vector<std::int64_t>(this->template getAttr<SliceAttr::Axes>().size(), 1);
-    }
-
-    const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for (std::size_t i = 0; i < nbAxes; ++i) {
-        const DimIdx_t axis = this->template getAttr<SliceAttr::Axes>()[i] >= 0 ?
-                        static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i]) :
-                        static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
-        const DimSize_t start = this->template getAttr<SliceAttr::Starts>()[i] >= 0 ?
-                            static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i]) :
-                            static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-        const DimSize_t end = this->template getAttr<SliceAttr::Ends>()[i] >= 0 ?
-                        static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i]) :
-                        static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
-        const std::int64_t step = this->template getAttr<SliceAttr::Steps>()[i];
-
-        AIDGE_ASSERT(step != 0, "Slice_Op: Step must be a non-zero value!");
-        if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
-            if(step < 0) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is negative we must have End < Start", type());
-            }
-            else {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step is positive we must have Start < End", type());
+        // Compute output dims
+        const DimSize_t nbAxes = this->axes().size();
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbAxes; ++i) {
+            const DimIdx_t axis = this->axes()[i] >= 0 ?
+                            static_cast<DimIdx_t>(this->axes()[i]) :
+                            static_cast<DimIdx_t>(this->axes()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
+            const DimSize_t start = this->starts()[i] >= 0 ?
+                                static_cast<DimSize_t>(this->starts()[i]) :
+                                static_cast<DimSize_t>(this->starts()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const DimSize_t end = this->ends()[i] >= 0 ?
+                            static_cast<DimSize_t>(this->ends()[i]) :
+                            static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
+            const std::int64_t step = this->steps()[i];
+
+            AIDGE_ASSERT(step != 0, "Slice_Op: Step ({}) must have a non-zero value on axis {}!", this->steps(), axis);
+            if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) {
+                if(step < 0) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step ({}) is negative, we must have End ({}) < Start ({}) on axis {}", type(), step, end, start, axis);
+                }
+                else {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: Step ({}) is positive, we must have Start ({}) < End ({}) on axis {}", type(), step, start, end, axis);
+                }
             }
-        }
 
-        const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
-        // Check if slice length is valid
-        if (sliceLength > getInput(0)->dims()[axis])
-        {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI of Slice operator out of bounds");
+            const std::size_t sliceLength = static_cast<std::size_t>(std::ceil((static_cast<float>(end) - static_cast<float>(start)) / static_cast<float>(step)));
+            // Check if slice length is valid
+            if (sliceLength > getInput(0)->dims()[axis])
+            {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice_Op: ROI ({}) of Slice operator out of bounds ({}) on axis {}, with (Start, End, Step) = ({}, {}, {})",
+                    sliceLength, getInput(0)->dims()[axis], axis, start, end, step);
+            }
+            outDims[axis] = sliceLength;
         }
-        outDims[axis] = sliceLength;
+        mOutputs[0]->resize(outDims);
+        return true;
     }
-    mOutputs[0]->resize(outDims);
-    return true;
+
+    return false;
 }
 
 void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(Slice_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Slice_Op::getAvailableBackends() const {
+    return Registrar<Slice_Op>::getKeys();
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& starts,
+                                   const std::vector<std::int64_t>& ends,
+                                   const std::vector<std::int8_t>& axes,
+                                   const std::vector<std::int64_t>& steps,
+                                   const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
+}
\ No newline at end of file
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index 612c61b0f66b97eb4630214538a22154a67b80d8..ad894c5e56a674a452d0388f88a7e4ad268dd216 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -20,7 +20,38 @@
 
 const std::string Aidge::Softmax_Op::Type = "Softmax";
 
+Aidge::Softmax_Op::Softmax_Op(std::int32_t axis)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SoftmaxAttr::Axis>(axis)))
+{}
+
+Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Softmax_Op::clone() const {
+    return std::make_shared<Softmax_Op>(*this);
+}
+
 void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Softmax_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Softmax_Op::getAvailableBackends() const {
+    return Registrar<Softmax_Op>::getKeys();
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Softmax(std::int32_t axis, const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
\ No newline at end of file
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index 5d0493ea4da0b80bf572a33fa4ee466804d0d270..e3ed13588d8c2b5ddde91d37fc926d675f0666a3 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -28,8 +28,8 @@
 
 void Aidge::Split_OpImpl::forward() {
     const Split_Op& op = dynamic_cast<const Split_Op&>(mOp);
-    const auto axis = op.template getAttr<std::int8_t>("Axis");
-    const auto splits = op.template getAttr<std::vector<DimSize_t>>("Split");
+    const auto axis = op.axis();
+    const auto splits = op.split();
     const auto dims = op.getInput(0)->dims();
 
     //Compute pre/post axis strides
@@ -37,25 +37,54 @@ void Aidge::Split_OpImpl::forward() {
     const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + dims.size() -1 - axis, 1, std::multiplies<std::size_t>());
     for (auto i = 0; i < op.nbOutputs(); ++i)
     {
-        DimIdx_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
-        DimIdx_t offset = 0;
+        DimSize_t chunkIdxOnAxis = std::accumulate(splits.cbegin(), splits.cbegin() + i, 0) * stride_post;
+        DimSize_t offset = 0;
         for (std::size_t j = 0; j < stride_pre; ++j)
         {
             // Compute chunk position in input tensor
-            DimIdx_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
+            DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
             // Copy chunk in ouput
             op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
-                                             splits[i] * stride_post, offset);
+                                            splits[i] * stride_post, offset);
             offset += splits[i] * stride_post;
         }
 
     }
 }
 
+/////////////////////////////////////////////////////
+
 const std::string Aidge::Split_Op::Type = "Split";
 
+Aidge::Split_Op::Split_Op(std::int8_t axis,
+                        Aidge::DimSize_t nbOutputs,
+                        const std::vector<Aidge::DimSize_t>& split)
+    : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
+    mAttributes(std::make_shared<Attributes_>(
+        attr<SplitAttr::Axis>(axis),
+        attr<SplitAttr::Split>(split)))
+{
+    mImpl = std::make_shared<Split_OpImpl>(*this);
+}
+
+Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Split_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Split_OpImpl>(*this);
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Split_Op::clone() const {
+    return std::make_shared<Split_Op>(*this);
+}
+
 bool Aidge::Split_Op::dimsForwarded() const {
-    if ((getInput(1) && !getInput(1)->empty()))
+    if ((getInput(1) && !getInput(1)->undefined()))
     {
         // output dims are data dependent
         return false;
@@ -65,66 +94,63 @@ bool Aidge::Split_Op::dimsForwarded() const {
 }
 
 bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
-    // check inputs have been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
-    }
-
-    if (getInput(0)->empty()) {
-        return false;
-    }
-
-    std::shared_ptr<Tensor> fallback;
-
-    if (getInput(1) && !getInput(1)->empty()) { // Split is given, replace
-        if (!this->template getAttr<SplitAttr::Split>().empty()) {
-            Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
+    if (inputsAssociated()) {
+        // Copy optional input #1, if present, to attribute Split
+        if (getInput(1)) {
+            if (!this->split().empty()) {
+                Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
+            }
+
+            if (!allowDataDependency) {
+                Log::warn("Split_Op: unable to forwardDims() because output dims are data dependent on input#1");
+                return false;
+            }
+
+            std::shared_ptr<Tensor> fallback;
+            this->split().clear(); // If both are provided input would override attrs
+            this->split().reserve(getInput(1)->size());
+            const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
+            std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
+                        splits.size(),
+                        std::back_inserter(this->split()));
         }
 
-        if (!allowDataDependency) {
-            Log::warn("Split_Op: unable to forwardDims() because output dims are data dependent on input#1");
-            return false;
-        }
+        // Compute output dims
+        if (this->axis() < 0)
+            this->axis() += static_cast<std::int8_t>(getInput(0)->nbDims());
 
-        this->template getAttr<SplitAttr::Split>().reserve(getInput(1)->size());
-        const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
-        std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
-                    splits.size(),
-                    std::back_inserter(this->template getAttr<SplitAttr::Split>()));
-    }
+        DimSize_t dimToSplit = getInput(0)->dims()[this->axis()];
+        DimSize_t nbOutput = this->nbOutputs();
+        // Fill Split attr if empty
+        if(this->split().empty()) {
+            // In case the input Split is not provided, divide the dimension of Axis into equal slices
+            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
+            DimSize_t baseSliceSize = dimToSplit / nbOutput;
 
-    if (this->template getAttr<std::int8_t>("Axis") < 0)
-        this->template getAttr<std::int8_t>("Axis") += static_cast<std::int8_t>(getInput(0)->nbDims());
+            DimSize_t remainder = dimToSplit % nbOutput;
 
-    DimSize_t dimToSplit = getInput(0)->dims()[this->template getAttr<std::int8_t>("Axis")];
-    DimSize_t nbOutput = this->nbOutputs();
-    // Fill Split attr if empty
-    if(this->template getAttr<SplitAttr::Split>().empty()) {
-        // In case the input Split is not provided, divide the dimension of Axis into equal slices
-        AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
-        DimSize_t baseSliceSize = dimToSplit / nbOutput;
+            for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
+                    this->split().push_back(baseSliceSize);
+            }
+            this->split().push_back(baseSliceSize + remainder);
+        }
 
-        DimSize_t remainder = dimToSplit % nbOutput;
+        const auto splits = this->split();
+        AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
+        DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
+        AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
 
-        for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
-                this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize);
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        for (std::size_t i = 0; i < nbOutput; ++i)
+        {
+            outDims[this->axis()] = this->split()[i];
+            mOutputs[i]->resize(outDims);
         }
-        this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize + remainder);
-    }
 
-    const auto splits = this->template getAttr<SplitAttr::Split>();
-    AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
-    DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
-    AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
-
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    for (std::size_t i = 0; i < nbOutput; ++i)
-    {
-        outDims[this->template getAttr<std::int8_t>("Axis")] = this->template getAttr<SplitAttr::Split>()[i];
-        mOutputs[i]->resize(outDims);
+        return true;
     }
 
-    return true;
+    return false;
 }
 
 void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
@@ -138,5 +164,18 @@ void Aidge::Split_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t dev
     {
         mOutputs[i]->setBackend(name, device);
     }
-    
+
 }
+
+std::set<std::string> Aidge::Split_Op::getAvailableBackends() const {
+    return Registrar<Split_Op>::getKeys();
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Split(Aidge::DimSize_t nbOutput,
+                                   std::int8_t axis,
+                                   const std::vector<Aidge::DimSize_t>& split,
+                                   const std::string &name) {
+    return std::make_shared<Node>(std::make_shared<Split_Op>(axis, nbOutput, split), name);
+}
\ No newline at end of file
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index d8ac8b8b0bf28110bd52493d7833f64e9e80fc6a..bd3286f098cd5c6985d7f33f88b723523ef94765 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -14,13 +14,39 @@
 #include <memory>
 #include <string>
 
+#include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 const std::string Aidge::Sqrt_Op::Type = "Sqrt";
 
+Aidge::Sqrt_Op::Sqrt_Op(const Aidge::Sqrt_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sqrt_Op, *this, op.backend());
+    }else{
+        mImpl = nullptr;
+    }
+}
+
+
+std::shared_ptr<Aidge::Operator> Aidge::Sqrt_Op::clone() const {
+    return std::make_shared<Sqrt_Op>(*this);
+}
+
 void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Sqrt_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Sqrt_Op::getAvailableBackends() const {
+    return Registrar<Sqrt_Op>::getKeys();
+}
+
+////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sqrt(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sqrt_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a8b20d21ae1f6c7bfba1a9e52d039f292b6aa62e
--- /dev/null
+++ b/src/operator/Squeeze.cpp
@@ -0,0 +1,168 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Squeeze.hpp"
+
+#include <algorithm>
+#include <bitset>
+#include <cstdint>
+#include <fmt/core.h>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+const std::string Squeeze_Op::Type = "Squeeze";
+
+bool Squeeze_Op::dimsForwarded() const {
+  if ((getInput(1) && !getInput(1)->undefined())) {
+    // output dims are data dependent
+    return false;
+  }
+
+  return OperatorTensor::dimsForwarded();
+}
+
+bool Squeeze_Op::forwardDims(bool allowDataDependency) {
+  // error checking
+  if (!inputsAssociated(false) || getInput(0)->undefined()) {
+    return false;
+  }
+
+  std::shared_ptr<Tensor> fallback;
+  // Input 1 is axes to squeeze (can also be given via attribute)
+  if (getInput(1)) {
+    if (!this->axes().empty()) {
+      Log::notice("{} : ignoring non-empty axes attribute because input#1 "
+                  "takes precedence",
+                  type());
+    }
+
+    if (!allowDataDependency) {
+      Log::warn("{} : unable to forwardDims() because output dims are data "
+                "dependent on input#1",
+                type());
+      return false;
+    }
+
+    this->axes().clear(); // If both are provided input would override attrs
+    this->axes().reserve(getInput(1)->size());
+    const auto &axes =
+        getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+    if (axes.nbDims() == 0) {
+      this->axes().clear();
+    } else {
+      AIDGE_ASSERT(
+          axes.nbDims() == 1,
+          "Axes input tensor should be of size 1. Received {} dimensions : {}",
+          axes.nbDims(), axes.dims());
+      std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()), axes.size(),
+                  std::back_inserter(this->axes()));
+    }
+  }
+
+  std::vector<DimSize_t> input_dims = getInput(0)->dims();
+  std::vector<DimSize_t> output_dims;
+  output_dims.reserve(input_dims.size());
+  std::vector<DimIdx_t> axes_rectified_idx;
+  axes_rectified_idx.reserve(input_dims.size());
+
+  if (this->axes().size() == 0) { // squeeze() => squeeze all 1 sized dimensions
+    Log::debug("this->axes() is empty, all 1 sized dim will be squeezed. If "
+               "this is an error ensure that the values are properly set via "
+               "attribute or data input#1.");
+    std::copy_if(input_dims.begin(), input_dims.end(),
+                 std::back_inserter(output_dims),
+                 [](DimSize_t dim) { return dim != 1; });
+  } else { // squeeze({N,.....}) => squeeze all specified dimensions that are of
+           // size 1.
+    /////// ensure indexes validity and set pythonic negative indexes to their
+    // positive value
+    for (const int8_t &axis : this->axes()) {
+      AIDGE_ASSERT(axis >= static_cast<int8_t>(-input_dims.size()) &&
+                       axis < static_cast<int8_t>(input_dims.size()),
+                   "{} : Axis index OutOfBounds error, expected value "
+                   "within size limits of input tensor : "
+                   "[-{},{}), got {}.",
+                   type(), input_dims.size(), input_dims.size() - 1, axis);
+      auto temp =
+          static_cast<DimIdx_t>(axis >= 0 ? axis : axis + input_dims.size());
+      if (axes_rectified_idx.end() == std::find(axes_rectified_idx.begin(),
+                                                axes_rectified_idx.end(),
+                                                temp)) {
+        axes_rectified_idx.push_back(temp);
+      }
+    }
+
+    // Create output_dims
+    // speeds up binary search
+    std::sort(axes_rectified_idx.begin(), axes_rectified_idx.end());
+    DimSize_t i = 0;
+    std::copy_if(
+        input_dims.begin(), input_dims.end(), std::back_inserter(output_dims),
+        [&axes_rectified_idx, &i, &input_dims](DimSize_t dim) {
+          // if current dim index is found in axes to squeeze
+          // we ensure that this axis is 1 sized, otherwise an error is thrown
+          bool ok = true;
+          if (std::binary_search(axes_rectified_idx.begin(),
+                                 axes_rectified_idx.end(), i)) {
+            AIDGE_ASSERT(dim == 1,
+                         "{} : Tried to squeeze axis nb {} of a tensor of dim "
+                         "{}. Dim to squeeze has to be 1-sized, got size {}."
+                         "Axes to squeeze : {}",
+                         __func__, i, input_dims, input_dims[i],
+                         axes_rectified_idx);
+            ok = false;
+          }
+          i++; // Incrementing counter since there is no enumerate
+               // fctn (until C++23)
+          return ok;
+        });
+  }
+  mOutputs[0]->resize(output_dims);
+  return true;
+}
+
+void Squeeze_Op::setBackend(const std::string &name,
+                            Aidge::DeviceIdx_t device) {
+  if (Registrar<Squeeze_Op>::exists({name})) {
+    SET_IMPL_MACRO(Squeeze_Op, *this, name);
+  } else {
+    mImpl = std::make_shared<Squeeze_OpImpl>(*this);
+  }
+  mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Squeeze_Op::getAvailableBackends() const {
+  return Registrar<Squeeze_Op>::getKeys();
+}
+
+void Aidge::Squeeze_OpImpl::forward() {
+  const Squeeze_Op &op_ = static_cast<const Squeeze_Op &>(mOp);
+  // Check if input is provided
+  AIDGE_ASSERT(op_.getInput(0), "Squeeze : missing input 0");
+
+  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                    op_.getInput(0)->size());
+}
+
+} // namespace Aidge
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index b977f4ee7ccce32d7f7929cbee99140aea36cd2f..ca7348b3b415375c09ac1cfd69ac3d6f6e3488eb 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -24,14 +24,22 @@
 
 const std::string Aidge::Sub_Op::Type = "Sub";
 
-bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+Aidge::Sub_Op::Sub_Op(const Aidge::Sub_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Sub_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
     }
+}
 
-    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+std::shared_ptr<Aidge::Operator> Aidge::Sub_Op::clone() const {
+    return std::make_shared<Sub_Op>(*this);
+}
 
+bool Aidge::Sub_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
         const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
         const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
 
@@ -63,3 +71,13 @@ void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic
     SET_IMPL_MACRO(Sub_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Sub_Op::getAvailableBackends() const {
+    return Registrar<Sub_Op>::getKeys();
+}
+
+//////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Sub(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Sub_Op>(), name);
+}
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index c113ee6f2da52f40a66a8df04ca33ec4b85f3387..fe295ab71b67e8e62562066b1464ffba6e8ae404 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -20,7 +20,33 @@
 
 const std::string Aidge::Tanh_Op::Type = "Tanh";
 
+Aidge::Tanh_Op::Tanh_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+
+Aidge::Tanh_Op::Tanh_Op(const Aidge::Tanh_Op& op)
+    : OperatorTensor(op)
+{
+    if (op.mImpl){
+        SET_IMPL_MACRO(Tanh_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::Tanh_Op::clone() const {
+    return std::make_shared<Tanh_Op>(*this);
+}
+
 void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Tanh_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Tanh_Op::getAvailableBackends() const {
+    return Registrar<Tanh_Op>::getKeys();
+}
+
+////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Tanh(const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Tanh_Op>(), name);
 }
\ No newline at end of file
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 20b2e5a15508368a7a3ca3bbf80bd4174d98ae4e..0cb1717f1c96c393b8845db129eee1429966cd98 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -25,52 +25,43 @@
 
 void Aidge::TransposeImpl::forward() {
     const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
-    const auto inputDims = op.getInput(0)->dims();
-    const auto outputDims = op.getOutput(0)->dims();
+    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder());
+}
 
-    std::vector<std::size_t> outStrides(outputDims.size(), 1);
-    for (size_t i = 0; i < outputDims.size(); ++i) {
-        for (size_t j = i+1; j < outputDims.size(); ++j)
-        {
-            outStrides[i] *= outputDims[j];
-        }
-    }
+///////////////////////////////////////////////////
 
-    std::vector<size_t> indices(outputDims.size(), 0);
-    for (size_t i = 0; i < op.getInput(0)->size(); ++i) {
-        size_t idx = 0;
-        // Permute indices based on OutputDimsOrder attr
-        for (int j = outputDims.size() -1; j >=0; --j) {
-            idx += indices[op.getAttr<std::vector<DimSize_t>>(0)[j]] * outStrides[j];
-        }
-        // Copy the value in output
-        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i), 1, idx);
+const std::string Aidge::Transpose_Op::Type = "Transpose";
 
-        // Update indices for the next iteration
-        for (int j = outputDims.size() - 1; j >= 0; --j) {
-            if (indices[j] < inputDims[j] - 1) {
-                indices[j]++;
-                break;
-            } else {
-                indices[j] = 0;
-            }
-        }
+Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDimsOrder)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)))
+{
+    mImpl = std::make_shared<TransposeImpl>(*this);
+}
+
+Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
+    : OperatorTensor(op),
+    mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<TransposeImpl>(*this);
     }
 }
 
-const std::string Aidge::Transpose_Op::Type = "Transpose";
+std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
+    return std::make_shared<Transpose_Op>(*this);
+}
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
-    // check input has been associated
-    if (!getInput(0)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
-    }
-
-    if (!getInput(0)->empty()) {
-        const auto& outDimsOrder = getAttr<std::vector<DimSize_t>>(0);
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
         std::vector<DimSize_t> outputDims;
-        for (std::size_t i = 0; i < outDimsOrder.size(); ++i) {
-            outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
+        for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
+            outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
         }
         mOutputs[0]->resize(outputDims);
         return true;
@@ -87,3 +78,14 @@ void Aidge::Transpose_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t
     }
     mOutputs[0]->setBackend(name, device);
 }
+
+std::set<std::string> Aidge::Transpose_Op::getAvailableBackends() const {
+    return Registrar<Transpose_Op>::getKeys();
+}
+
+//////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::Transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder,
+                                           const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<Transpose_Op>(outputDimsOrder), name);
+}
\ No newline at end of file
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..53b8bd5442081e601a55853115f44067ae17fc2b
--- /dev/null
+++ b/src/operator/Unfold.cpp
@@ -0,0 +1,162 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Unfold.hpp"
+
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Unfold_OpImpl<DIM>::forward() {
+    const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
+    const auto kernelDims = op.kernelDims();
+    const auto dilationDims = op.dilationDims();
+    const auto strideDims = op.strideDims();
+    const DimSize_t inHeight = op.getInput(0)->dims()[2];
+    const DimSize_t inWidth = op.getInput(0)->dims()[3];
+    const DimSize_t inChannels = op.getInput(0)->dims()[1];
+
+    const DimSize_t kernelExtentHeight = op.dilationDims()[0] *
+                                            (op.kernelDims()[0] - 1) + 1;
+    const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inHeight - kernelExtentHeight) /
+                            static_cast<float>(op.strideDims()[0])));
+    const DimSize_t kernelExtentWidth = op.dilationDims()[1] *
+                                            (op.kernelDims()[1] - 1) + 1;
+    const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inWidth - kernelExtentWidth) /
+                            static_cast<float>(op.strideDims()[1])));
+    const DimSize_t outChannels = op.getOutput(0)->dims()[1];
+
+    for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) {
+        for (DimSize_t outC = 0; outC < outChannels; ++outC) {
+            const auto inOffsetW = outC % kernelDims[1];
+            const auto inOffsetH = (outC / kernelDims[1]) % kernelDims[0];
+            const auto inC = outC / kernelDims[0] / kernelDims[1];
+
+            for (DimSize_t outH = 0; outH < outHeight; ++outH) {
+                const auto inH = outH * strideDims[0] + inOffsetH * dilationDims[0];
+
+                for (DimSize_t outW = 0; outW < outWidth; ++outW) {
+                    const auto inW = outW * strideDims[1] + inOffsetW * dilationDims[1];
+
+                    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(((n * inChannels + inC) * inHeight + inH) * inWidth + inW), 1,
+                        ((n * outChannels + outC) * outHeight + outH) * outWidth + outW);
+                }
+            }
+        }
+    }
+}
+
+template class Aidge::Unfold_OpImpl<2>;
+
+/////////////////////////////////////////////////////////////
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Unfold_Op<DIM>::Unfold_Op(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                    const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                    const std::array<Aidge::DimSize_t, DIM> &dilationDims)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<UnfoldAttr::StrideDims>(strideDims),
+        attr<UnfoldAttr::DilationDims>(dilationDims),
+        attr<UnfoldAttr::KernelDims>(kernelDims)))
+{
+    mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
+    : OperatorTensor(op),
+        mAttributes(op.mAttributes)
+{
+    if (!op.backend().empty()) {
+        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
+    }
+    else {
+        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+std::shared_ptr<Aidge::Operator> Aidge::Unfold_Op<DIM>::clone() const {
+    return std::make_shared<Unfold_Op>(*this);
+}
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+        DimSize_t k = 1;
+        DimSize_t l = 1;
+
+        for (std::size_t dim = 0; dim < this->kernelDims().size() ; ++dim) {
+            const DimSize_t kernelExtent = this->dilationDims()[dim] *
+                                                    (this->kernelDims()[dim] - 1) + 1;
+
+            k *= this->kernelDims()[dim];
+            l *= 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
+                            static_cast<float>(this->strideDims()[dim])));
+        }
+
+        mOutputs[0]->resize({inputDims[0], inputDims[1] * k, l});
+        return true;
+    }
+
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Unfold_Op<DIM>>::exists({name})){
+        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+template <Aidge::DimIdx_t DIM>
+std::set<std::string> Aidge::Unfold_Op<DIM>::getAvailableBackends() const {
+    return Registrar<Unfold_Op<DIM>>::getKeys();
+}
+
+template class Aidge::Unfold_Op<2>;
+
+///////////////////////////////////////////////////////////
+
+template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
+std::shared_ptr<Aidge::Node> Aidge::Unfold(const std::array<Aidge::DimSize_t, DIM> &kernelDims,
+                                  const std::string& name,
+                                  const std::array<Aidge::DimSize_t, DIM> &strideDims,
+                                  const std::array<Aidge::DimSize_t, DIM> &dilationDims) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
+    return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+}
+
+template std::shared_ptr<Aidge::Node> Aidge::Unfold<2>(const std::array<Aidge::DimSize_t, 2>&,
+                                  const std::string&,
+                                  const std::array<Aidge::DimSize_t, 2>&,
+                                  const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..43afd160e03395c65c4dcbe5504cb865da4ed8d8
--- /dev/null
+++ b/src/operator/Unsqueeze.cpp
@@ -0,0 +1,131 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Unsqueeze.hpp"
+
+#include <cstdint>
+#include <fmt/core.h>
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+const std::string Unsqueeze_Op::Type = "Unsqueeze";
+
+bool Aidge::Unsqueeze_Op::dimsForwarded() const {
+  if ((getInput(1) && !getInput(1)->undefined())) {
+    // output dims are data dependent
+    return false;
+  }
+
+  return OperatorTensor::dimsForwarded();
+}
+
+bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
+  // error checking
+  if (!inputsAssociated(true)) {
+    return false;
+  }
+  std::shared_ptr<Tensor> fallback;
+  // Copy optional input #1, if present, to attribute Axes
+  if (getInput(1)) {
+    if (!this->axes().empty()) {
+      Log::notice("{} : ignoring non-empty \"axes\" attribute because input#1 "
+                  "takes precedence",
+                  type());
+    }
+
+    if (!allowDataDependency) {
+      Log::warn("{} : unable to forwardDims() because output dims are data "
+                "dependent on input#1",
+                type());
+      return false;
+    }
+
+    this->axes().clear(); // If both are provided input would override attrs
+    this->axes().reserve(getInput(1)->size());
+    const auto &axes =
+        getInput(1)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu");
+    std::copy_n(static_cast<int8_t *>(axes.getImpl()->hostPtr()),
+                axes.size(), std::back_inserter(this->axes()));
+  }
+  AIDGE_ASSERT(!this->axes().empty(),
+               "{} : Axes to unsqueeze can be defined via input#1 or axes "
+               "attribute. None of them were provided.",
+               type());
+
+  std::vector<DimSize_t> input_dims = getInput(0)->dims();
+  std::vector<DimIdx_t> axes_rectified_idx;
+  axes_rectified_idx.reserve(this->axes().size());
+  DimIdx_t output_nb_dims = input_dims.size() + this->axes().size();
+
+  for (const int8_t &axis : this->axes()) {
+    AIDGE_ASSERT(axis >= static_cast<int8_t>(-output_nb_dims) &&
+                     axis < static_cast<int8_t>(output_nb_dims),
+                 "{} : Axis index OutOfBounds enrror, expected value "
+                 "within size limits of input tensor : "
+                 "[-{},{}), got {}.",
+                 type(), output_nb_dims, output_nb_dims - 1, axis);
+    axes_rectified_idx.push_back(
+        static_cast<DimIdx_t>(axis >= 0 ? axis : axis + output_nb_dims));
+  }
+  // sort by descending order
+  std::sort(axes_rectified_idx.begin(), axes_rectified_idx.end());
+  // Raise error if duplicate indexes are found
+  const auto &it = std::adjacent_find(axes_rectified_idx.begin(), axes_rectified_idx.end());
+  AIDGE_ASSERT(
+      it == axes_rectified_idx.end(),
+      "{} : The index {} appears multiple times in list of input dims. "
+      "Check positive and negative indexes.\nRaw indexes :\t{}\nRectified "
+      "indexes :\t{}",
+      type(), *it, this->axes(), axes_rectified_idx);
+
+  // computation
+  std::vector<DimSize_t> output_dims(input_dims);
+  output_dims.reserve(input_dims.size() + this->axes().size());
+  for (const DimIdx_t &axis : axes_rectified_idx) {
+    output_dims.insert(output_dims.begin() + axis, 1);
+  }
+  mOutputs[0]->resize(output_dims);
+  return true;
+}
+
+void Unsqueeze_Op::setBackend(const std::string &name,
+                              Aidge::DeviceIdx_t device) {
+  if (Registrar<Unsqueeze_Op>::exists({name})) {
+    SET_IMPL_MACRO(Unsqueeze_Op, *this, name);
+  } else {
+    mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
+  }
+  mOutputs[0]->setBackend(name, device);
+}
+
+std::set<std::string> Aidge::Unsqueeze_Op::getAvailableBackends() const {
+  return Registrar<Unsqueeze_Op>::getKeys();
+}
+
+void Aidge::Unsqueeze_OpImpl::forward() {
+  const Unsqueeze_Op &op_ = static_cast<const Unsqueeze_Op &>(mOp);
+  // Check if input is provided
+  AIDGE_ASSERT(op_.getInput(0), "Unsqueeze : missing input 0");
+  op_.getOutput(0)->getImpl()->copy(op_.getInput(0)->getImpl()->rawPtr(),
+                                    op_.getInput(0)->size());
+}
+
+} // namespace Aidge
diff --git a/src/recipes/AdaptToBackend.cpp b/src/recipes/AdaptToBackend.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e625a52f6545c3b2b34f85745fd88087a1b9883b
--- /dev/null
+++ b/src/recipes/AdaptToBackend.cpp
@@ -0,0 +1,38 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+void Aidge::adaptToBackend(std::shared_ptr<GraphView> graphView) {
+    const auto nodes = graphView->getNodes();
+    for (auto node : nodes) {
+        auto impl = node->getOperator()->getImpl();
+        AIDGE_ASSERT(impl, "Missing implementation for node {} (of type {})",
+            node->name(), node->type());
+        auto adaptedNode = impl->getBestAdaptation(impl->getRequiredSpec());
+
+        if (adaptedNode == nullptr) {
+            Log::notice("Unable to adapt node {} (of type {}) to backend {}",
+                node->name(), node->type(), impl->backend());
+        }
+        else if (!adaptedNode->getOperator()->isAtomic()) {
+            Log::info("Adapted node {} (of type {}) to backend {}",
+                node->name(), node->type(), impl->backend());
+            AIDGE_ASSERT(GraphView::replace({node}, {adaptedNode}), "Unable to replace adapted node!");
+        }
+    }
+}
diff --git a/src/recipes/ConstantFolding.cpp b/src/recipes/ConstantFolding.cpp
index 42fb45224614ca2655165a69b974cfe229e27f90..40b0bda766ab243805349b13e93391c5a60df63a 100644
--- a/src/recipes/ConstantFolding.cpp
+++ b/src/recipes/ConstantFolding.cpp
@@ -44,7 +44,7 @@ void Aidge::constantFolding(std::shared_ptr<GraphView> graph) {
                     }
 
                     const auto& producer = std::static_pointer_cast<Producer_Op>(input.first->getOperator());
-                    if (!producer->getAttr<bool>("Constant")) {
+                    if (!producer->constant()) {
                         Log::info("Node {} (of type {}) not foldable because Producer input {} not Constant",
                             node->name(), node->type(), input.first->name());
                         foldable = false;
diff --git a/src/recipes/ConvToMatMul.cpp b/src/recipes/ConvToMatMul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9b88ffc73204b44cf857213d1fdfff49b3191f73
--- /dev/null
+++ b/src/recipes/ConvToMatMul.cpp
@@ -0,0 +1,114 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Unfold.hpp"
+#include "aidge/operator/Fold.hpp"
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
+    const auto matches = SinglePassGraphMatching(graphView).match("Conv");
+
+    size_t nbReplaced = 0;
+    for (const auto& match : matches) {
+        const auto convNode = match.startNode;
+        const std::shared_ptr<Conv_Op<2>> convOp =
+            std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
+
+        AIDGE_ASSERT(convOp->getOutput(0) && !convOp->getOutput(0)->empty(),
+            "Output dims must have been forwarded in order to apply convToMatMul for Conv {}", convNode->name());
+
+        //const auto nbDims = convOp->getOutput(0)->dims().size();
+        //const std::array<DimSize_t, 2> outputDims = {convOp->getOutput(0)->dims()[nbDims - 2], convOp->getOutput(0)->dims()[nbDims - 1]};
+        const auto wShape = convOp->getInput(1)->dims();
+        const auto wFlattenSize = std::accumulate(wShape.cbegin() + 1, wShape.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
+
+        auto microGraph = std::make_shared<GraphView>();
+        auto unfold = Unfold(convOp->kernelDims(),
+            (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
+            convOp->strideDims(),
+            convOp->dilationDims());
+        auto wReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{static_cast<int64_t>(convOp->getInput(1)->dims()[0]), static_cast<int64_t>(wFlattenSize)}}),
+            (!convNode->name().empty()) ? convNode->name() + "_w_reshape_shape_prod" : "",
+            true);
+        auto wReshape = Reshape({},
+            false,
+            (!convNode->name().empty()) ? convNode->name() + "_w_reshape" : "");
+        auto matMul = MatMul((!convNode->name().empty()) ? convNode->name() + "_matmul" : "");
+        auto reshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>(convOp->getOutput(0)->dims())),
+            (!convNode->name().empty()) ? convNode->name() + "_reshape_shape_prod" : "",
+            true);
+        auto reshape = Reshape({},
+            false,
+            (!convNode->name().empty()) ? convNode->name() + "_reshape" : "");
+        //auto fold = Fold(outputDims,
+        //    convOp->kernelDims(),
+        //    (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
+        //    convOp->strideDims(),
+        //    convOp->dilationDims());
+
+        wReshapeProd->addChild(wReshape, 0, 1);
+        wReshape->addChild(matMul, 0, 0);
+        unfold->addChild(matMul, 0, 1);
+        reshapeProd->addChild(reshape, 0, 1);
+        matMul->addChild(reshape, 0, 0);
+        //matMul->addChild(fold, 0, 0);
+        microGraph->add({unfold, wReshapeProd, wReshape, matMul, reshapeProd, reshape}, false);
+        //microGraph->add({unfold, wReshapeProd, wReshape, matMul, fold}, false);
+
+        // Handle bias
+        if (convOp->getInput(2) && !convOp->getInput(2)->empty()) {
+            auto add = Add(2, (!convNode->name().empty()) ? convNode->name() + "_add" : "");
+            auto bReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{1, static_cast<int64_t>(convOp->getInput(2)->size()), 1, 1}}),
+                (!convNode->name().empty()) ? convNode->name() + "_b_reshape_shape_prod" : "",
+                true);
+            auto bReshape = Reshape({},
+                false,
+                (!convNode->name().empty()) ? convNode->name() + "_b_reshape" : "");
+
+            bReshapeProd->addChild(bReshape, 0, 1);
+            bReshape->addChild(add, 0, 1);
+            reshape->addChild(add, 0, 0);
+            //fold->addChild(add, 0, 0);
+            microGraph->add({reshape, add, bReshapeProd, bReshape}, false);
+            //microGraph->add({fold, add}, false);
+            microGraph->setOrderedInputs({{unfold, 0}, {wReshape, 0}, {bReshape, 0}});
+        }
+        else {
+            // Add a dummy 3rd input in order for replace() to work
+            microGraph->setOrderedInputs({{unfold, 0}, {wReshape, 0}, {nullptr, 0}});
+        }
+
+        auto gConv = std::make_shared<GraphView>();
+        gConv->add(convNode, false);
+
+        const auto success = GraphView::replace(gConv, microGraph);
+
+        if (!success) {
+            Log::notice("Could not replace Conv {} with MatMul", convNode->name());
+        }
+        else {
+            ++nbReplaced;
+        }
+    }
+
+    Log::info("Replaced {} (out of {}) matching Conv with MatMul", nbReplaced, matches.size());
+    return nbReplaced;
+}
diff --git a/src/recipes/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp
index 7d836c3acc835c5ed3fe014db6787029dc318afd..c860b9e8a0e1fcbf467eb13e1366f371d731a47d 100644
--- a/src/recipes/ExplicitCastMove.cpp
+++ b/src/recipes/ExplicitCastMove.cpp
@@ -73,7 +73,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
 
         IOIndex_t inputIdx = 0;
         for (auto parent : node->inputs()) {
-            // TODO: possible optimization: currently, a Cast/Move Operator may 
+            // TODO: possible optimization: currently, a Cast/Move Operator may
             // be added several time to the same output, if it has multiple childs,
             // even if it is the same conversion each time.
             if (parent.first != nullptr) {
@@ -91,8 +91,8 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) {
 
                 if (node->type() != Cast_Op::Type && input->dataType() != output->dataType()) {
                     // Change of date type => a Cast operator is required
-                    castOp = Cast();
-                    castOp->getOperator()->setDataType(output->dataType());
+                    castOp = Cast(output->dataType());
+                    // castOp->getOperator()->setDataType(output->dataType());
                     castOp->getOperator()->setBackend(device.first, device.second);
 
                     if (moveOp == nullptr) {
diff --git a/src/recipes/ExplicitTranspose.cpp b/src/recipes/ExplicitTranspose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7ff971b7e436219d5dfbb7cbadbaf780d3f1aeda
--- /dev/null
+++ b/src/recipes/ExplicitTranspose.cpp
@@ -0,0 +1,125 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+
+void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) {
+    // First, remove existing Transpose operators, if not needed anymore
+    auto nodes = graph->getNodes();
+    for (auto node : nodes) {
+        AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+        const auto& output = std::static_pointer_cast<OperatorTensor>(node->getOperator())->getOutput(0);
+
+        if (node->type() == Transpose_Op::Type) {
+            // Remove existing Transpose operators, if not needed anymore
+            AIDGE_INTERNAL_ASSERT(node->inputs().size() == 1);
+            const auto parent = node->inputs()[0];
+            // Check parent is not nullptr, as this Operator may be an entry point of the graph without parent
+            if (parent.first != nullptr) {
+                AIDGE_ASSERT(parent.first->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+                const auto& input = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getOutput(parent.second);
+
+                if (input->dataFormat() != DataFormat::Default
+                    && output->dataFormat() != DataFormat::Default
+                    && input->dataFormat() == output->dataFormat())
+                {
+                    // Add direct connection bypassing Transpose node
+                    const auto childs = node->outputs()[0];
+                    for (const auto& child : childs) {
+                        parent.first->addChild(child.first, parent.second, child.second);
+                    }
+
+                    // Remove all node connections
+                    node->resetConnections();
+                    // Remove node from view
+                    graph->remove(node);
+                }
+            }
+        }
+    }
+
+    // Second, insert Transpose operator between node inputs and parent output, if needed
+    nodes = graph->getNodes();
+    for (auto node : nodes) {
+        // TODO: currently, Operator data type is only reflected in its output tensor data type.
+        // But an Operator might have multiple outputs of different data type(?)
+        const auto& output = std::static_pointer_cast<OperatorTensor>(node->getOperator())->getOutput(0);
+
+        IOIndex_t inputIdx = 0;
+        for (auto parent : node->inputs()) {
+            // TODO: possible optimization: currently, a Transpose Operator may
+            // be added several time to the same output, if it has multiple childs,
+            // even if it is the same conversion each time.
+            if (parent.first != nullptr) {
+                const auto& input = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getOutput(parent.second);
+
+                if ((node->type() != Transpose_Op::Type
+                    && input->dataFormat() != DataFormat::Default
+                    && output->dataFormat() != DataFormat::Default
+                    && input->dataFormat() != output->dataFormat()))
+                {
+                    // Allow Transpose fuse only if data format is fully specified before and after in this recipe
+                    bool fuseTranspose = false;
+                    if (parent.first->type() == Transpose_Op::Type && parent.first->getChildren().size() == 1) {
+                        const auto& parentInput = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getInput(0);
+                        if (parentInput->dataFormat() != DataFormat::Default) {
+                            fuseTranspose = true;
+                        }
+                    }
+
+                    if (fuseTranspose) {
+                        // Do not insert a new Transpose if the only parent is already a Transpose!
+                        const auto& parentInput = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getInput(0);
+                        if (parentInput->dataFormat() == output->dataFormat()) {
+                            // Case 1: same data format than before parent Transpose
+                            // => remove Transpose altogether
+                            const auto parentParent = parent.first->inputs()[0];
+                            // Add direct connection bypassing Transpose node
+                            parentParent.first->addChild(node, parentParent.second, 0);
+                            // Remove all node connections
+                            parent.first->resetConnections();
+                            // Remove node from view
+                            graph->remove(parent.first);
+                        }
+                        else {
+                            // Case 2: change of format
+                            // => compute the new permutation array
+                            const auto transpose = getDataFormatTranspose(parentInput->dataFormat(), output->dataFormat());
+                            auto transposeOp = std::static_pointer_cast<Transpose_Op>(parent.first->getOperator());
+                            transposeOp->setDataFormat(output->dataFormat());
+                            transposeOp->outputDimsOrder() = std::vector<DimSize_t>(transpose.begin(), transpose.end());
+                        }
+                    }
+                    else {
+                        const auto transpose = getDataFormatTranspose(input->dataFormat(), output->dataFormat());
+                        auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+                        transposeOp->getOperator()->setDataFormat(output->dataFormat());
+                        transposeOp->getOperator()->setDataType(output->dataType());
+                        if (output->getImpl()) {
+                            const auto& device = output->getImpl()->device();
+                            transposeOp->getOperator()->setBackend(device.first, device.second);
+                        }
+                        transposeOp->addChild(node, 0, inputIdx);
+                        parent.first->addChild(transposeOp, parent.second, 0);
+
+                        graph->add(transposeOp);
+                        graph->add(parent.first);
+                        graph->add(node);
+                    }
+                }
+            }
+
+            ++inputIdx;
+        }
+    }
+}
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 21009318cddae7ce60a01592b19ab237a77fbd2b..34722c19f8c0fddaffa7357136f1512a027e1617 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -16,6 +16,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/graph/Matching.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
@@ -25,17 +26,16 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-// Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
-
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
                           std::shared_ptr<Aidge::Node> batchnormNode) {
     // Case: convNode is a MetaOperator ending with a Convolution
     // eg. PaddedConv
+    std::shared_ptr<Node> metaNode;
     if (!(convNode -> getOperator() -> isAtomic())) {
-        const std::shared_ptr<MetaOperator_Op> metaNode = std::static_pointer_cast<MetaOperator_Op>(convNode -> getOperator());
-        const std::shared_ptr<GraphView>  metanodeGraph = metaNode -> getMicroGraph();
-        const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metanodeGraph -> getOrderedOutputs();
+        metaNode = convNode;
+        const auto metaOp = std::static_pointer_cast<MetaOperator_Op>(convNode -> getOperator());
+        const std::shared_ptr<GraphView>  metaOpGraph = metaOp -> getMicroGraph();
+        const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metaOpGraph -> getOrderedOutputs();
         if (outputNodes.size() != 1) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipie.");
         }
@@ -60,14 +60,15 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
         convNbOutChannels = convOpPtr->outChannels();
         channelsSize = convOpPtr->inChannels();
-        kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+        kernelDims = convOpPtr->kernelDims();
     }
     else if (convNode->type() == ConvDepthWise_Op<2>::Type) {
         const std::shared_ptr<ConvDepthWise_Op<2>> convOpPtr =
             std::static_pointer_cast<ConvDepthWise_Op<2>>(convNode->getOperator());
         convNbOutChannels = convOpPtr->nbChannels();
-        kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
+        kernelDims = convOpPtr->kernelDims();
     }
+    AIDGE_ASSERT(kernelDims.size() == 2, "fuseBatchNorm(): only 2D convolutions are supported");
 
     std::shared_ptr<Tensor> scaleBuf, shiftBuf, b_meanBuf, b_varBuf;
     const Tensor& scale = batchOp->getInput(1)->refCastFrom(scaleBuf, DataType::Float32, "cpu");
@@ -75,7 +76,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
     const Tensor& b_mean = batchOp->getInput(3)->refCastFrom(b_meanBuf, DataType::Float32, "cpu");
     const Tensor& b_var = batchOp->getInput(4)->refCastFrom(b_varBuf, DataType::Float32, "cpu");
 
-    const float epsilon = batchOp->getAttr<float>("Epsilon");
+    const float epsilon = batchOp->epsilon();
 
 
     assert(epsilon > 0.0);
@@ -89,13 +90,58 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             meanVariance += b_var.get<float>(outChId);
             ++count;
         } else {
-            fmt::print("Zero-variance: {} [{}]\n", convNode->name(), outChId);
+            Log::notice("Zero-variance: {} [{}]\n", convNode->name(), outChId);
         }
     }
     if (count > 0)
         meanVariance /= count;
     else {
-        fmt::print("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+        Log::notice("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
+    }
+
+    // Add bias if it is non existant, as there will be a bias after the fuse
+    if (!convOp->getInput(2)) {
+        if (metaNode) {
+            // Conv is inside a meta-operator, we add bias outside it
+            // Find the correct input index of the meta-operator corresponding
+            // to the bias:
+            const auto metaOp = std::static_pointer_cast<MetaOperator_Op>(metaNode->getOperator());
+            const auto metaOpGraph = metaOp->getMicroGraph();
+            IOIndex_t inputIdx = 0;
+            for (auto input : metaOpGraph->getOrderedInputs()) {
+                if (input.first == convNode && input.second == 2) {
+                    break;
+                }
+                ++inputIdx;
+            }
+
+            auto prod = addProducer(metaNode, inputIdx, {convNbOutChannels}, "b");
+            // Add the new bias node to the same views as the meta node
+            for (auto g : metaNode->views()) {
+                g->add(prod);
+            }
+        }
+        else {
+            auto prod = addProducer(convNode, 2, {convNbOutChannels}, "b");
+            if (convNode->input(1).first) {
+                // Add the new bias node to the same views as the weights node
+                // if possible
+                for (auto g : convNode->input(1).first->views()) {
+                    g->add(prod);
+                }
+            }
+            else {
+                for (auto g : convNode->views()) {
+                    g->add(prod);
+                }
+            }
+        }
+
+        AIDGE_INTERNAL_ASSERT(convOp->getInput(2) != nullptr);
+
+        // Use the same backend for the bias than for the weights
+        convOp->getInput(2)->setBackend(convOp->getInput(1)->backend());
+        convOp->getInput(2)->zeros();
     }
 
     std::shared_ptr<Tensor> weightBuf, biasBuf;
@@ -112,7 +158,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
                         ? b_var.get<float>(outChId) : meanVariance));
         // Weights adjustments
         for (std::size_t channel = 0; channel < channelsSize; ++channel) {
-            // TODO : Suppose kerneldims = 2
             for (std::size_t k0 = 0; k0 < kernelDims[0]; ++k0) {
                 for (std::size_t k1 = 0; k1 < kernelDims[1]; ++k1) {
                     std::vector<DimSize_t> currentIdx = {outChId, channel, k0, k1};
@@ -122,7 +167,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
             }
         }
 
-        // TODO : check if noBias==true is set, then set biasValue to 0
         float biasValue = bias.get<float>(outChId);
 
         biasValue = shift.get<float>(outChId) + (biasValue - b_mean.get<float>(outChId)) * factor;
@@ -145,44 +189,11 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
 
 }
 
-void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::MatchSolution> solution) {
-    assert(solution->at("BatchNorm").size() == 1 && "Wrong number of nodes BatchNorm to replace\n");
-    assert(solution->at("OP").size() == 1 && "Wrong number of nodes OP to replace\n");
-
-    for (const auto& op : solution->at("OP")) {
-        if (op->getOperator()->isAtomic()) {
-            for (const auto& batchNorm : solution->at("BatchNorm")) {
-                fuseBatchNorm(op, batchNorm);
-            }
-        } else {  // op is a MetaOperator
-            auto metaOp = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator());
-            if ((metaOp->getMicroGraph()->getOrderedOutputs().size() == 1) &&
-                ((metaOp->getMicroGraph()->getOrderedOutputs()[0].first->type() ==
-                  Conv_Op<2>::Type) ||
-                 (metaOp->getMicroGraph()->getOrderedOutputs()[0].first->type() ==
-                  ConvDepthWise_Op<2>::Type))) {
-                for (const auto& batchNorm : solution->at("BatchNorm")) {
-                    fuseBatchNorm(op, batchNorm);
-                }
-            }
-        }
-    }
-}
-
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
-    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-    regex->setNodeKey("BatchNorm", "getType($) =='BatchNorm'");
-    fmt::print("\n============================\nSearching for solutions\n==============================\n");
-    regex->setNodeKey(
-            "OP",
-            "getType($) =='Conv' || getType($) =='ConvDepthWise' || getType($) =='PaddedConv' || getType($) =='PaddedConvDepthWise'");
-            //  || getType($) =='FC' ");
-
-    regex->addQuery("OP -> BatchNorm");
-
-    for (const auto& solution : regex->match(graphView)) {
-
-        fuseBatchNorm(solution);
+    auto matches = SinglePassGraphMatching(graphView).match("(Conv|ConvDepthWise|PaddedConv|PaddedConvDepthWise)->BatchNorm");
 
+    for (auto match : matches) {
+        auto rootNode = match.graph->rootNode();
+        fuseBatchNorm(rootNode, *rootNode->getChildren().begin());
     }
-}
\ No newline at end of file
+}
diff --git a/src/recipes/FuseToMetaOps.cpp b/src/recipes/FuseToMetaOps.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0ad5e5a1da0e6aef74f7e47751dd2d4e8648980b
--- /dev/null
+++ b/src/recipes/FuseToMetaOps.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+size_t Aidge::fuseToMetaOps(std::shared_ptr<GraphView> graphView, const std::string& query, const std::string& type) {
+    const auto metaType = (!type.empty()) ? type : query;
+    const auto matches = SinglePassGraphMatching(graphView).match(query);
+
+    size_t nbReplaced = 0;
+    for (const auto& match : matches) {
+        auto metaOp = MetaOperator(metaType.c_str(), match.graph->clone());
+        // Clone does not clone implementation, which is therefore empty.
+        // Use the root node backend for the meta op backend, even though some
+        // matching nodes might be on a different backend, as nodes in the meta
+        // op are required to share the same backend!
+        const auto backend = match.graph->rootNode()->getOperator()->backend();
+        if (!backend.empty()) {
+            metaOp->getOperator()->setBackend(backend);
+        }
+
+        auto metaOpGraph = std::make_shared<GraphView>();
+        metaOpGraph->add(metaOp, false);
+        const auto success = GraphView::replace(match.graph, metaOpGraph);
+
+        if (!success) {
+            Log::notice("Could not replace sub-graph with meta operator");
+        }
+        else {
+            ++nbReplaced;
+        }
+    }
+
+    Log::info("Replaced {} (out of {}) matching sub-graph with meta operators", nbReplaced, matches.size());
+    return nbReplaced;
+}
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index b0c99bffb895dc64b20d76991911ae5f4b604c85..9522c0fe7346e78875a08d3ebf19a04dea2909e1 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -44,14 +44,3 @@ std::set<std::shared_ptr<Aidge::Tensor>> Aidge::parameters(std::shared_ptr<Aidge
     }
     return res;
 }
-
-void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) {
-    for (const auto& node : gv->getNodes()) {
-        // TODO: check that each node is an OperatorTensor
-        AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type());
-        const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator());
-        for (std::size_t o = 0; o < node -> nbOutputs(); ++o) {
-            op->getOutput(o)->initGrad();
-        }
-    }
-}
diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp
index 9897549304ee04e8512ab7b4ed9450169c7fc911..88691c26d5d7013874c13000535ec2a3842d47d3 100644
--- a/src/recipes/HorizontalTiling.cpp
+++ b/src/recipes/HorizontalTiling.cpp
@@ -74,10 +74,12 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
     // }
 
     std::vector<std::shared_ptr<Node>> clonedInputs = std::vector<std::shared_ptr<Node>>(node->nbInputs(), nullptr);
-    for (std::size_t i = node->nbData(); i < node ->nbInputs(); ++i) {
-        clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
-        clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
-        tiledOperator.insert(clonedInputs[i]);
+    for (std::size_t i = 0; i < node ->nbInputs(); ++i) {
+        if (node->inputCategory(i) == InputCategory::Param || node->inputCategory(i) == InputCategory::OptionalParam) {
+            clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
+            clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
+            tiledOperator.insert(clonedInputs[i]);
+        }
     }
 
     const std::vector<std::string> sliceInputsNames = Slice_Op::getInputsName();
@@ -92,6 +94,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
 
         auto slice = Slice();
         auto backend = outTensor->getImpl()->backend();
+
         // Create Slice's Starts producer node
         std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
         for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
@@ -139,6 +142,8 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         auto stepsNode = Producer(steps, slice->name() + sliceInputsNames[4]);
         stepsNode -> addChild(slice, 0, 4);
 
+        // auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, inputDimsSteps);
+        // auto backend = outTensor->getImpl()->backend();
         slice -> addChild(newNode, 0, 0);
         newNode -> addChild(concat, 0, i);
 
diff --git a/src/recipes/LabelGraph.cpp b/src/recipes/LabelGraph.cpp
index ac0e6bfe197460c8c422a6c1f3b3240518ee1f29..75bcd36bf61f7c23645038bedb060cd13bdce2c5 100644
--- a/src/recipes/LabelGraph.cpp
+++ b/src/recipes/LabelGraph.cpp
@@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == Conv_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvAttr::KernelDims>(), op->template getAttr<ConvAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == ConvDepthWise_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvDepthWiseAttr::KernelDims>(), op->template getAttr<ConvDepthWiseAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
@@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) {
     if (node->type() == AvgPooling_Op<2>::Type) {
         auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator());
 
-        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<AvgPoolingAttr::KernelDims>(), op->template getAttr<AvgPoolingAttr::StrideDims>());
+        auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims());
         return std::make_shared<Node>(newOp, node->name());
     }
 
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/MatMulToFC.cpp
similarity index 56%
rename from src/recipes/FuseMulAdd.cpp
rename to src/recipes/MatMulToFC.cpp
index bb4b0e3db1974ccf106699b25fd71fc9cc09654c..9b5addd3bb971b3f61980a582d4cce6435c57219 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/MatMulToFC.cpp
@@ -22,28 +22,29 @@
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/operator/MatMul.hpp"
+#include "aidge/graph/Matching.hpp"
 
-//Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
 
-
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<Aidge::Node> addNode) { //std::set<std::shared_ptr<Node>> nodes){
+void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<Aidge::Node> addNode) {
     // Fuse Mulmat & Add into FC
     // Inputs : old nodes (pointers on mul & add)
-
-    assert((matmulNode->type() == "MatMul" && addNode->type() == "Add") && "Wrong type for the nodes to replace");
+    AIDGE_ASSERT((matmulNode->type() == "MatMul" && (addNode == nullptr || addNode->type() == "Add")),
+        "Wrong type for the nodes to replace: {} and {}",
+        matmulNode->type(), (addNode) ? addNode->type() : "nullptr");
 
 
     // Step 1 : Create FC
     // Fetch the output dimension throught the bias size
     std::shared_ptr<Node> bias = nullptr;
-    if (addNode->getParent(0) == matmulNode) {
-        AIDGE_ASSERT(addNode->getParent(1), "No bias detected to produce the fuseMulAdd recipe.");
-        bias = addNode->getParent(1);
-    }
-    else if (addNode->getParent(1) == matmulNode) {
-        AIDGE_ASSERT(addNode->getParent(0), "No bias detected to produce the fuseMulAdd recipe.");
-        bias = addNode->getParent(0);
+    if (addNode) {
+        if (addNode->getParent(0) == matmulNode) {
+            AIDGE_ASSERT(addNode->getParent(1), "No bias detected to produce the matMulToFC recipe.");
+            bias = addNode->getParent(1);
+        }
+        else if (addNode->getParent(1) == matmulNode) {
+            AIDGE_ASSERT(addNode->getParent(0), "No bias detected to produce the matMulToFC recipe.");
+            bias = addNode->getParent(0);
+        }
     }
 
     std::shared_ptr<Node> weight = nullptr;
@@ -52,6 +53,12 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
             && matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() != Producer_Op::Type))
     {
         weight = matmulNode->getParent(1);
+        // Transpose weights because weight Tensor is in first input
+        auto weightOpTensor = std::static_pointer_cast<OperatorTensor>(weight->getOperator());
+        const std::shared_ptr<Aidge::Tensor>& weightTensor = weightOpTensor->getOutput(0);
+        std::vector<DimSize_t> shape =  weightTensor->dims();
+        std::reverse(shape.begin(), shape.end());
+        weightTensor->copyTranspose(*weightTensor, std::vector<Aidge::DimSize_t>({1ul, 0ul}));
     }
     else if ((matmulNode->getParent(0) && !matmulNode->getParent(1))
         || (matmulNode->getParent(0) && matmulNode->getParent(0)->getOperator()->type() == Producer_Op::Type
@@ -69,28 +76,13 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     }
     AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
 
-    // TODO: find another way to get OutChannels for FC operator.
-    // This poor fix supposes that one of Add inputs is a const and has the same outChannels as the output
-    DimSize_t outSize = 0;
-    AIDGE_ASSERT(addNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
-    const auto& op = std::static_pointer_cast<OperatorTensor>(addNode->getOperator());
-    for (size_t i = 0; i < op->nbInputs(); i++)
-    {
-        const auto& inTensor = op->getInput(i);
-        if(inTensor->nbDims() > 0) {
-            outSize = inTensor->dims()[inTensor->nbDims()-1];
-            break;
-        }
-    }
-    AIDGE_ASSERT(outSize, "Couldnt get output number of channels for FC operator.");
-
     // Instanciate FC
     std::string fcName = matmulNode->name();
-    if (!addNode->name().empty()) {
+    if (addNode && !addNode->name().empty()) {
         fcName += "_" + addNode->name();
     }
 
-    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(bias ? false : true), fcName);
+    std::shared_ptr<Node> fc = std::make_shared<Node>(std::make_shared<FC_Op>(), fcName);
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
@@ -99,43 +91,26 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
         bias->cloneSharedOperators()->addChild(fc, 0, 2);
     }
 
-
     // Step 3 : Update all graphviews that contains at least one node to replace
         // Case 1 : If all nodes are in a graph view : delete old nodes & branch input & output
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
         // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory?
-    auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1), fc->getParent(2)});
-    GraphView::replace({matmulNode, addNode, bias, weight}, newNodes);
+    if (addNode) {
+        auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1), fc->getParent(2)});
+        GraphView::replace({matmulNode, addNode, bias, weight}, newNodes);
+    }
+    else {
+        auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1)});
+        GraphView::replace({matmulNode, weight}, newNodes);
+    }
 
 }
 
+void Aidge::matMulToFC(std::shared_ptr<Aidge::GraphView> graphView){
+    const auto matches = SinglePassGraphMatching(graphView).match("MatMul->Add#?");
 
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::MatchSolution> solution){
-
-    assert(solution->at("MatMul").size() == 1 && "Wrong number of nodes MatMul to replace\n");
-    assert(solution->at("Add").size() == 1 && "Wrong number of nodes Add to replace\n");
-
-    for (const auto& matmulNode : solution->at("MatMul")) {
-        for (const auto& addNode : solution->at("Add")) {
-            fuseMulAdd(matmulNode,addNode);
-        }
+    for (const auto& match : matches) {
+        const auto it = match.anchors.find("Add");
+        matMulToFC(match.graph->rootNode(), (it != match.anchors.end()) ? it->second.at("#") : nullptr);
     }
 }
-
-
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::GraphView> graphView){
-
-
-    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-    regex->setNodeKey("Add","getType($) =='Add'");
-    regex->setNodeKey("MatMul","getType($) =='MatMul'");
-    regex->addQuery("MatMul -> Add ;");
-
-    for (const auto& solution : regex->match(graphView)) {
-
-        fuseMulAdd(solution);
-
-
-
-    }
-}
\ No newline at end of file
diff --git a/src/recipes/RemoveDropout.cpp b/src/recipes/RemoveDropout.cpp
deleted file mode 100644
index 4f8805845bd1f46fd187cba3564b031c55c4655a..0000000000000000000000000000000000000000
--- a/src/recipes/RemoveDropout.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <memory>
-
-#include "aidge/graph/Node.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/recipes/Recipes.hpp"
-
-//Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
-
-
-namespace Aidge {
-    void removeDropout(std::shared_ptr<Node> dropout) {
-
-        std::set<NodePtr> nodesToRemove;
-        for (auto nodePtr: dropout->getParents())
-        {
-            if(nodePtr->type() == "Producer")
-            {
-                nodesToRemove.insert(nodePtr);
-            }
-        }
-        nodesToRemove.insert(dropout);
-        GraphView::replace(nodesToRemove, {});
-    }
-
-    void removeDropout(std::shared_ptr<MatchSolution> solution){
-
-        assert(solution->at("Dropout").size() == 1 && "Wrong number of nodes Dropout to replace\n");
-
-        for (const auto& dropout : solution->at("Dropout")) {
-
-            removeDropout(dropout);
-        }
-    }
-
-    void removeDropout(std::shared_ptr<GraphView> graphView){
-        std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-        regex->setNodeKey("Dropout","getType($) =='Dropout'");
-        regex->addQuery("Dropout#");
-
-        for (const auto& solution : regex->match(graphView)) {
-            removeDropout(solution);
-        }
-    }
-}
diff --git a/src/recipes/RemoveFlatten.cpp b/src/recipes/RemoveFlatten.cpp
index 8c1bf1bcf0bf79fda275867ff6430d5a937da172..bf80ab51749953a5b72d0e01f186265fdbb72e81 100644
--- a/src/recipes/RemoveFlatten.cpp
+++ b/src/recipes/RemoveFlatten.cpp
@@ -17,38 +17,20 @@
 
 
 //Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
+// #include "aidge/graphRegex/GraphRegex.hpp"
+#include "aidge/graph/Matching.hpp"
 
 
 namespace Aidge {
-    void removeFlatten(std::shared_ptr<Node> flatten) {
-        GraphView::replace({flatten}, {});
-    }
-
-    void removeFlatten(std::shared_ptr<MatchSolution> solution){
-
-        assert(solution->at("FC").size() == 1 && "Wrong number of nodes FC to replace\n");
-        assert(solution->at("Flatten").size() == 1 && "Wrong number of nodes Flatten to replace\n");
-
-        for (const auto& flatten : solution->at("Flatten")) {
-            removeFlatten(flatten);
-        }
-    }
-
-
-
     void removeFlatten(std::shared_ptr<GraphView> graphView){
-      
-
-        std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-        regex->setNodeKey("Flatten","getType($) =='Flatten'");
-        regex->setNodeKey("FC","getType($) =='FC'");
-        regex->addQuery("Flatten->FC");
-
-        for (const auto& solution : regex->match(graphView)) {
-            removeFlatten(solution);
+        const auto matches = SinglePassGraphMatching(graphView).match(
+            "(FC|MatMul)<-(Flatten)+"
+        );
+
+        for (const auto& solution : matches) {
+            auto flattenNodes(solution.graph->getNodes());
+            flattenNodes.erase(solution.graph->rootNode());
+            GraphView::replace(flattenNodes, {});
         }
-
-
     }
 }
diff --git a/src/recipes/RemoveNode.cpp b/src/recipes/RemoveNode.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a09c67991409dfe491d46b4ad739f9ddf5b72aef
--- /dev/null
+++ b/src/recipes/RemoveNode.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+
+//Graph Regex
+#include "aidge/graphRegex/GraphRegex.hpp"
+
+size_t Aidge::removeNode(std::shared_ptr<GraphView> graphView, const std::string& type, bool incProducers) {
+    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
+    regex->setNodeKey(type, "getType($) =='" + type + "'");
+    regex->addQuery(type + "#");
+
+    const auto matches = regex->match(graphView);
+    for (const auto& solution : matches) {
+        assert(solution->at(type).size() == 1 && "Wrong number of nodes to replace\n");
+
+        std::set<NodePtr> nodesToRemove = solution->at(type);
+        if (incProducers) {
+            for (const auto& nodePtr: (*solution->at(type).begin())->getParents()) {
+                if (nodePtr != nullptr && nodePtr->type() == "Producer") {
+                    nodesToRemove.insert(nodePtr);
+                }
+            }
+        }
+        GraphView::replace(nodesToRemove, {});
+    }
+
+    return matches.size();
+}
+
+size_t Aidge::removeDropout(std::shared_ptr<GraphView> graphView) {
+    return removeNode(graphView, "Dropout", true);
+}
+
+size_t Aidge::removeIdentity(std::shared_ptr<GraphView> graphView) {
+    return removeNode(graphView, "Identity");
+}
diff --git a/src/recipes/removeConstantOfShape.cpp b/src/recipes/removeConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5e84f7b494815ecb5a8937bb6f76ba1de80ad3f9
--- /dev/null
+++ b/src/recipes/removeConstantOfShape.cpp
@@ -0,0 +1,128 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#include "aidge/recipes/Recipes.hpp"
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <set>
+#include <stdexcept>
+#include <string>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+// Graph Regex
+#include "aidge/graphRegex/GraphRegex.hpp"
+
+namespace Aidge {
+
+size_t removeConstantOfShape(std::shared_ptr<GraphView> graph_view) {
+  const auto matches =
+      SinglePassGraphMatching(graph_view).match("Producer->ConstantOfShape");
+
+  size_t nbReplaced = 0;
+  for (const auto &match : matches) {
+    const auto prod_node = match.graph->rootNode();
+    const auto prod_op =
+        std::static_pointer_cast<Producer_Op>(prod_node->getOperator());
+
+    const NodePtr constantofshape_node =
+        prod_node->getOrderedChildren().at(0).at(0);
+
+    const auto constantofshape_op =
+        std::static_pointer_cast<ConstantOfShape_Op>(
+            constantofshape_node->getOperator());
+
+    if (prod_op->getOutput(0)->nbDims() != 1) {
+      Log::debug("{} : Producer output dimension number is {} != 1 and {} "
+                 "input has to have 1 dim, skipping match.",
+                 __func__, prod_op->getOutput(0)->nbDims(),
+                 ConstantOfShape_Op::Type);
+      continue;
+    }
+    if (!prod_op->constant()) {
+      Log::debug("{} : Producer is not constant, skipping match.", __func__);
+      continue;
+    }
+    if (prod_op->getOutput(0)->dataType() != DataType::Int64) {
+      AIDGE_THROW_OR_ABORT(
+          std::runtime_error,
+          "{} : Producer output dtype is {} != int64 and {} "
+          "input type is restricted to int64_t, this is an error."
+          "Fix your network. skipping match.",
+          __func__, prod_op->getOutput(0)->dataType(),
+          ConstantOfShape_Op::Type);
+      continue;
+    }
+
+    auto graph_to_replace = std::make_shared<GraphView>();
+    auto new_graph = std::make_shared<GraphView>();
+    graph_to_replace->add(constantofshape_node);
+    if (prod_node->getChildren().size() == 1) {
+      graph_to_replace->add(prod_node);
+    } else {
+      Log::debug("{} : Producer node has multiple children, only"
+                 "replacing the {} node.",
+                 __func__, ConstantOfShape_Op::Type);
+    }
+
+    prod_node->forward();
+    std::shared_ptr<Tensor> prod_output = prod_op->getOutput(0);
+    std::vector<DimSize_t> new_input_dims;
+    new_input_dims.reserve(prod_output->dims()[0]);
+    for (DimSize_t i = 0; i < prod_output->size(); ++i) {
+      new_input_dims.push_back(prod_output->get<int64_t>(i));
+    }
+
+    auto new_input = std::make_shared<Tensor>(new_input_dims);
+    new_input->setBackend(prod_op->backend() == "" ? "cpu"
+                                                   : prod_op->backend());
+    new_input->setDataType(constantofshape_op->value().dataType());
+    for (std::size_t i = 0; i < new_input->size(); ++i) {
+      new_input->getImpl()->copy(
+          constantofshape_op->value().getImpl()->rawPtr(), 1, i);
+    }
+    auto new_prod =
+        Producer(new_input, prod_node->name() + "_constant_of_shape", true);
+    new_graph->add(new_prod);
+
+    const auto success = GraphView::replace(graph_to_replace, new_graph);
+    if (!success) {
+      Log::warn("Could not replace Producer({})->ConstantOfShape({}) with"
+                "Producer",
+                prod_node->name(), constantofshape_node->name());
+    } else {
+      ++nbReplaced;
+    }
+  }
+
+  Log::info("Replaced {} (out of {}) matching Producer->ConstantOfShape with "
+            "Producers",
+            nbReplaced, matches.size());
+  return nbReplaced;
+}
+} // namespace Aidge
+
diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp
index 4e515099006b9e0588eafc7e981c5f5e80bbe97d..1d70646b70091e2e3ff6f03b8ee82ae62aeb1e43 100644
--- a/src/scheduler/ParallelScheduler.cpp
+++ b/src/scheduler/ParallelScheduler.cpp
@@ -127,7 +127,12 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
         // in the next step
         for (size_t i = 0; i < staticSchedule.size(); ) {
             auto runnable = staticSchedule[i];
-            if (!pool.busy() && runnable->early <= latest) {
+            if (runnable->early > latest) {
+                // No more node can be run at this step (latest)
+                break;
+            }
+
+            if (!pool.busy()) {
                 // Check that potential preceding non-critical nodes are finished
                 bool ready = true;
                 for (auto elt : runnable->laterThan) {
@@ -168,9 +173,17 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
                 }
             }
             else {
-                // Thread pool is already full or no more node can be run at
-                // this step (latest)
-                break;
+                // Thread pool is already full
+                bool ready = true;
+                for (auto elt : mustFinish) {
+                    ready = ready && finished.at(elt);
+                }
+                if (!ready) {
+                    std::this_thread::yield();
+                }
+                else {
+                    break;
+                }
             }
         }
 
diff --git a/src/scheduler/ProdConso.cpp b/src/scheduler/ProdConso.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a3bff53c3643a5da361dec5944f47a27f148a995
--- /dev/null
+++ b/src/scheduler/ProdConso.cpp
@@ -0,0 +1,117 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <string>
+
+#include "aidge/scheduler/ProdConso.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+Aidge::ProdConso::ProdConso(const Operator& op, bool inPlace):
+    mOp(op),
+    mInPlace(inPlace),
+    mNbConsumedData(mOp.nbInputs(), Elts_t::NoneElts()),
+    mNbProducedData(mOp.nbOutputs(), Elts_t::NoneElts())
+{
+    //ctor
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+    if (mOp.getRawInput(inputIdx)) {
+        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
+        if (!input->undefined()) {
+            // Known amount of data: requires the whole tensor by default
+            return Elts_t::DataElts(input->size());
+        }
+        else {
+            // Unknown amount of data: require a single token by default
+            return Elts_t::TokenElts(1);
+        }
+    }
+
+    // Input not connected, meaning it is an optional input: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbRequiredProtected(IOIndex_t inputIdx) const {
+    if (mOp.getRawInput(inputIdx)) {
+        const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
+        if (!input->undefined()) {
+            // Known amount of data: protect the whole tensor by default
+            return Elts_t::DataElts((mInPlace) ? 0 : input->size());
+        }
+        else {
+            // Unknown amount of data: protect a single token by default
+            // (this does not really make sense for now, as getNbRequiredProtected()
+            // is supposed to give a precise amount of data to protect for
+            // memory management purpose...)
+            return Elts_t::TokenElts((mInPlace) ? 0 : 1);
+        }
+    }
+
+    // Input not connected, meaning it is an optional input: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
+    if (mOp.getRawOutput(outputIdx)) {
+        const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
+        if (!output->undefined()) {
+            // Known amount of data: requires the whole tensor by default,
+            // regardless of available data on inputs
+            return Elts_t::DataElts(output->size());
+        }
+        else {
+            // Unknown amount of data: require a single token by default
+            // (this does not really make sense for now, as getRequiredMemory()
+            // is supposed to give a precise amount of data to allocate for
+            // memory management purpose...)
+            return Elts_t::TokenElts(1);
+        }
+    }
+
+    // Output not set, meaning it is an optional output: do no require anything!
+    return Elts_t::NoneElts();
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
+    AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(),
+        "input index ({}) is out of bound ({}) for operator type {}",
+        inputIdx, mNbConsumedData.size(), mOp.type());
+    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
+}
+
+Aidge::Elts_t Aidge::ProdConso::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
+    AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(),
+        "output index ({}) is out of bound ({}) for operator type {}",
+        outputIdx, mNbProducedData.size(), mOp.type());
+    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
+}
+
+void Aidge::ProdConso::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx) {
+        // each input is consumed by the minimum amount for a forward pass
+        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));
+    }
+
+    for (std::size_t outputIdx = 0; outputIdx < mNbProducedData.size(); ++outputIdx) {
+        mNbProducedData[outputIdx] += getRequiredMemory(outputIdx, {});
+    }
+}
+
+void Aidge::ProdConso::resetConsummerProducer(){
+    std::fill(mNbConsumedData.begin(), mNbConsumedData.end(), Elts_t::NoneElts());
+    std::fill(mNbProducedData.begin(), mNbProducedData.end(), Elts_t::NoneElts());
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index af10e3dcd3ead044f8619c40570936f53039d9a2..1613450508ea84a230f36ba6526a1322c6a70559 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -33,6 +33,7 @@
 #include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
 
@@ -197,18 +198,20 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             bool isStillConsumer = false;
             // Only look for data inputs. If no data is available on data input,
             // by definition, no parameter can be consumed on parameter inputs.
-            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbData(); ++inputIdx) {
-                AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
-
-                if (consumer->getOperator()->getNbConsumedData(inputIdx) <
-                            getNbAvailableData(consumer, inputIdx)) {
-                    Log::debug("  still consumer: C{} < P{} for input #{}",
-                        consumer->getOperator()->getNbConsumedData(inputIdx),
-                        getNbAvailableData(consumer, inputIdx), inputIdx);
-
-                    // there is still data to consume
-                    isStillConsumer = true;
-                    break;
+            for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) {
+                if (consumer->inputCategory(inputIdx) == InputCategory::Data) {
+                    AIDGE_LOG_CONTEXT("Consumer node {} input #{}", namePtrTable.at(consumer), inputIdx);
+
+                    if (consumer->getOperator()->getNbConsumedData(inputIdx) <
+                                getNbAvailableData(consumer, inputIdx)) {
+                        Log::debug("  still consumer: C{} < P{} for input #{}",
+                            consumer->getOperator()->getNbConsumedData(inputIdx),
+                            getNbAvailableData(consumer, inputIdx), inputIdx);
+
+                        // there is still data to consume
+                        isStillConsumer = true;
+                        break;
+                    }
                 }
             }
 
@@ -217,7 +220,7 @@ std::vector<std::shared_ptr<Aidge::Scheduler::StaticSchedulingElement>> Aidge::S
             bool isProducer = false;
             for (IOIndex_t outId = 0; outId < consumer->nbOutputs(); ++outId) {
                 for (const auto& child : consumer->getChildren(outId)) {
-                    if (child) {
+                    if (child && mGraphView->inView(child)) {
                         IOIndex_t inputIdx = 0;
                         for (const auto& childParent : child->getParents()) {
                             if (childParent == consumer) {
@@ -524,23 +527,28 @@ void Aidge::Scheduler::connectInputs(const std::vector<std::shared_ptr<Aidge::Te
     // This version of connect inputs only connects tensor inputs in input data producers.
     auto inputNodes = mGraphView->getOrderedInputs();
 
-    // Assert that the number of input data producers corresponds to the number of data input
-    if (data.size() != inputNodes.size()) {
-        const std::map<std::shared_ptr<Node>, std::string> namePtrTable
-            = mGraphView->getRankedNodesName("{0} ({1}#{3})");
-
-        std::vector<std::pair<std::string, IOIndex_t>> inputNodesName;
-        std::transform(inputNodes.begin(), inputNodes.end(),
-            std::back_inserter(inputNodesName),
-            [&namePtrTable](auto val){ return std::make_pair(namePtrTable.at(val.first), val.second); });
-
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Provided {} inputs to the scheduler, but graph has {} inputs (required inputs in order: )",
-            data.size(), inputNodes.size(), inputNodesName);
-    }
-
-    for (std::size_t i = 0; i < data.size(); ++i){
-        // TODO : maybe shallow copy instead of deepcopy
-        inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]);
+    std::size_t i = 0;
+    for (auto& input : inputNodes) {
+        if (i < data.size() && data[i]) {
+            // TODO : maybe shallow copy instead of deepcopy
+            input.first->getOperator()->setInput(input.second, data[i]);
+        }
+        else {
+            const auto& currentTensorPtr =
+                std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second);
+            const bool optional = (input.first->inputCategory(input.second) == InputCategory::OptionalData
+                || input.first->inputCategory(input.second) == InputCategory::OptionalParam);
+
+            if (currentTensorPtr) {
+                Log::debug("connectInputs(): existing tensor dims are {} for graph input#{} for input#{} of node {} (of type {})",
+                    i, input.second, input.first->name(), input.first->type(), currentTensorPtr->dims());
+            }
+            else if (!optional) {
+                Log::warn("connectInputs(): did not specify tensor for mandatory graph input#{} for input#{} of node {} (of type {})",
+                    i, input.second, input.first->name(), input.first->type());
+            }
+        }
+        ++i;
     }
 }
 
@@ -604,6 +612,9 @@ void Aidge::Scheduler::saveStaticSchedulingDiagram(const std::string& fileName)
 }
 
 std::vector<std::shared_ptr<Aidge::Node>> Aidge::Scheduler::getStaticScheduling(std::size_t step) const {
+    AIDGE_ASSERT(!mStaticSchedule.empty(), "Scheduler::getStaticScheduling(): static scheduling is empty, did you generate scheduling first?");
+    AIDGE_ASSERT(step < mStaticSchedule.size(), "Scheduler::getStaticScheduling(): no static scheduling at step {} (available steps: {})", mStaticSchedule.size(), step);
+
     const auto& staticSchedule = mStaticSchedule.at(step);
     std::vector<std::shared_ptr<Node>> schedule;
     std::transform(staticSchedule.begin(), staticSchedule.end(), std::back_inserter(schedule), [](const auto& v) { return v->node; });
@@ -638,32 +649,29 @@ Aidge::Elts_t Aidge::Scheduler::getNbAvailableData(const std::shared_ptr<Node>&
         // We are inside an upper operator (for instance a MetaOperator)
         // We need to connect the "local" producer-consumer model to the upper
         // one, by mapping local node inputs to the upper node inputs.
-        IOIndex_t nodeInputIdx = 0;
+        IOIndex_t upperInputIdx = 0;
         for (const auto& input : mGraphView->getOrderedInputs()) {
-            if (input.first == node) {
+            if (input.first == node && input.second == inputIdx) {
                 // Current node is an input
-                const auto upperInput = upperNode->inputs()[nodeInputIdx];
+                const auto upperInput = upperNode->inputs()[upperInputIdx];
                 if (upperInput.first) {
                     return upperInput.first->getOperator()->getNbProducedData(upperInput.second);
                 }
             }
-            ++nodeInputIdx;
+            ++upperInputIdx;
         }
     }
 
-    // Otherwise, two cases:
+    // Otherwise, it means that the input is not connected. Two cases:
+    // - There is no data, it is assumed to be an optional input
+    // - A valid tensor exists:
     if (node->getOperator()->getRawInput(inputIdx)) {
-        // Input is not connected but a valid tensor exists
         // => This means data was fed manually to the input, without a Producer
         // In this case, we assume a single-use data (unlike a Producer, which
         // keep producing the data each time it is needed).
-        fmt::print("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
+        Log::warn("No producer node attached to input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
         return Elts_t::DataElts(std::static_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputIdx))->size());
     }
-    else {
-        // Input is not connected, this is an error
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input#{} for node {} ({})\n", inputIdx, node->name(), node->type());
-    }
 
     return Elts_t::NoneElts();
 }
diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp
index 74b1b3f0c6e9be164792460669821744661c15b3..88b5e98bc62456bd59dc235c3112396daaeddd24 100644
--- a/src/scheduler/SequentialScheduler.cpp
+++ b/src/scheduler/SequentialScheduler.cpp
@@ -73,10 +73,7 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std
     }
 }
 
-void Aidge::SequentialScheduler::backward(bool instanciateGrad) {
-    // create ad set Grad values
-    if (instanciateGrad) { compile_gradient(mGraphView); }
-
+void Aidge::SequentialScheduler::backward() {
     // TODO: Check output grad are not empty
 
     // Generate scheduling *only if empty*
diff --git a/src/utils/DynamicAttributes.cpp b/src/utils/DynamicAttributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..909d3bb2f5fda977ac497a19e1a1088eb52cfc88
--- /dev/null
+++ b/src/utils/DynamicAttributes.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/utils/DynamicAttributes.hpp"
+
+std::map<std::type_index, bool(*)(const future_std::any&, const future_std::any&)> Aidge::DynamicAttributes::mAnyCompare;
+
+bool future_std::operator<(const future_std::any& lhs, const future_std::any& rhs) {
+    if (lhs.type() == rhs.type()) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+    }
+#ifdef PYBIND
+    else if (lhs.type() == typeid(py::object)) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(rhs.type())(lhs, rhs);
+    }
+    else if (rhs.type() == typeid(py::object)) {
+        return Aidge::DynamicAttributes::mAnyCompare.at(lhs.type())(lhs, rhs);
+    }
+#endif
+    else {
+        return (lhs.type().before(rhs.type()));
+    }
+}
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index 54af888caca8dc2c4b512515ff70663f9437dd45..da32a8e0ec6a3c9f27da5c47f9e6166c1fc879bc 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -28,6 +28,16 @@ Aidge::Log::Level Aidge::Log::mConsoleLevel = []() {
     }
     return Info;
 }();
+bool Aidge::Log::mConsoleColor = []() {
+    const char* logColor = std::getenv("AIDGE_LOG_COLOR");
+    if (logColor == nullptr)
+        return true;
+    auto logColorStr = std::string(logColor);
+    if (logColorStr == "off" || logColorStr == "OFF" ||
+        logColorStr == "0")
+        return false;
+    return true;
+}();
 Aidge::Log::Level Aidge::Log::mFileLevel = []() {
     const char* logLevel = std::getenv("AIDGE_LOGLEVEL_FILE");
     if (logLevel != nullptr) {
@@ -55,7 +65,8 @@ void Aidge::Log::log(Level level, const std::string& msg) {
         // Styles that were already applied to msg with fmt are kept also in 
         // the log file.
         const auto modifier
-            = (level == Debug) ? fmt::fg(fmt::color::gray)
+            = !mConsoleColor ? fmt::text_style()
+            : (level == Debug) ? fmt::fg(fmt::color::gray)
             : (level == Notice) ? fmt::fg(fmt::color::medium_purple)
             : (level == Warn) ? fmt::fg(fmt::color::orange)
             : (level == Error) ? fmt::fg(fmt::color::red)
@@ -78,7 +89,7 @@ void Aidge::Log::log(Level level, const std::string& msg) {
             fmt::println("Context: {}", context);
         }
 
-        fmt::println(mFile.get(), msg);
+        fmt::println(mFile.get(), "{}", msg);
     }
 }
 
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 9280d5fbdfd0a6a35724e5afd5caf672fefd8bf8..fd96b060630c162e93143e8f51019a0ce3e82cc9 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -55,7 +55,7 @@ target_link_options(tests${module_name} PUBLIC $<$<OR:$<CXX_COMPILER_ID:Clang>,$
 
 endif()
 
-target_link_libraries(tests${module_name} PUBLIC ${module_name})
+target_link_libraries(tests${module_name} PRIVATE ${module_name})
 
 target_link_libraries(tests${module_name} PRIVATE Catch2::Catch2WithMain)
 
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 655fd725e9d7d913d24c6552571ae3b91e3605b4..d5cd8cdcfb88beee9aab2393b0c5591c79a70b80 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -14,13 +14,14 @@
 #include <cstdint>     // std::uint8_t, std::uint16_t, std::int32_t
 #include <numeric>     // std::accumulate, std::inner_product
 #include <functional>  // std::multiplies
-#include <random>      // std::random_device, std::mt19937,
+#include <random>      // std::mt19937,
                        // std::uniform_int_distribution, std::uniform_real_distribution
 #include <set>
 #include <string>
 #include <vector>
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
@@ -36,11 +37,11 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
         Tensor T_default{};
         REQUIRE((
             (T_default.dataType() == DataType::Float32) &&
-            (T_default.size() == 1) &&
+            (T_default.size() == 0) &&
             (T_default.dims() == std::vector<DimSize_t>({})) &&
             (T_default.strides() == std::vector<DimSize_t>({1})) &&
             (T_default.getImpl() == nullptr) &&
-            (T_default.grad() == nullptr) &&
+            (T_default.grad() != nullptr) &&
             (T_default.isContiguous() == true)
         ));
     }
@@ -53,7 +54,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({})) &&
             (T.strides() == std::vector<DimSize_t>({1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -67,7 +68,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == Tdims) &&
             (T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) &&
             (T.getImpl() == nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -83,7 +84,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2})) &&
             (T.strides() == std::vector<DimSize_t>({1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
 
@@ -97,7 +98,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2,2,2})) &&
             (T.strides() == std::vector<DimSize_t>({4,2,1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
         REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
@@ -113,7 +114,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
             (T.dims() == std::vector<DimSize_t>({2,2,2,2})) &&
             (T.strides() == std::vector<DimSize_t>({8,4,2,1})) &&
             (T.getImpl() != nullptr) &&
-            (T.grad() == nullptr) &&
+            (T.grad() != nullptr) &&
             (T.isContiguous() == true)
         ));
     }
@@ -127,7 +128,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
         constexpr std::uint16_t NBTRIALS = 10;
 
         // Create random number generators
-        std::random_device rd;
+        auto rd = Catch::Generators::Detail::getSeed;
         std::mt19937 gen(rd());
         std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
         std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -157,7 +158,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
                 (T.dims() == Tclone.dims()) &&
                 (T.strides() == Tclone.strides()) &&
                 (T.getImpl() != Tclone.getImpl()) &&
-                (Tclone.grad() == nullptr) &&
+                (Tclone.grad() != nullptr) &&
                 (Tclone.isContiguous() == true)
             ));
             REQUIRE(Tclone == T);
@@ -169,7 +170,7 @@ TEST_CASE("[core/data] Tensor(getter/setter)", "[Tensor][Getter][Setter]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create random number generators
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -261,7 +262,7 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create random number generators
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -297,6 +298,73 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
         }
     }
 
+    SECTION("Tensor set/get") {
+        // Test set with idx and get with idx and coords on different tensors ranks (including 0-rank)
+        // with different coords ranks (including 0-rank).
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            // Test tensors of rank 0 to 3
+            for (std::size_t nb_dims = 0; nb_dims <= 3; ++nb_dims) {
+                std::vector<std::size_t> dims(nb_dims);
+                for (std::size_t dim = 0; dim < nb_dims; ++dim) {
+                    dims[dim] = dimsDist(gen);
+                }
+
+                size_t size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>());
+                std::vector<float> values(size);
+                for (auto& valref : values) {
+                    valref = valueDist(gen);
+                }
+
+                std::unique_ptr<float[]> x_array(new float[size]);
+                for (std::size_t i = 0; i < size; ++i) {
+                    x_array[i] = values[i];
+                }
+
+                // Initialize Tensor with a host backend
+                Tensor x{dims};
+                x.setDataType(DataType::Float32);
+                x.setBackend("cpu");
+                x.getImpl()->setRawPtr(x_array.get(), x.size());
+                REQUIRE(x.getImpl()->hostPtr() != nullptr);
+                REQUIRE(x.isContiguous());
+
+                // Test get() and set() values by index
+                for (std::size_t i = 0; i < size; ++i) {
+                    REQUIRE_NOTHROW(x.set(i, values[i]));
+                }
+                for (std::size_t i = 0; i < size; ++i) {
+                    float val;
+                    REQUIRE_NOTHROW(val = x.get<float>(i));
+                    REQUIRE(val == values[i]);
+                }
+
+                // Test get() and set() by coords
+                // We create coords of rank 0 to the number of dimensions
+                for (std::size_t coord_size = 0; coord_size < dims.size(); ++coord_size) {
+                    std::vector<std::size_t> coords(coord_size);
+                    for (std::size_t coord_idx = 0; coord_idx < coord_size; ++coord_idx) {
+                        std::size_t dim_idx = (dimsDist(gen)-1) % dims[coord_idx];
+                        coords[coord_idx] = dim_idx;
+                    }
+                    std::size_t flat_idx, flat_storage_idx;
+                    // As it is continuous we have getIdx() == getStorageIdx()
+                    REQUIRE_NOTHROW(flat_idx = x.getIdx(coords));
+                    REQUIRE_NOTHROW(flat_storage_idx = x.getStorageIdx(coords));
+                    REQUIRE(flat_storage_idx == flat_idx);
+                    float val, val_flat;
+                    // Test get() by index and by coords
+                    REQUIRE_NOTHROW(val_flat = x.get<float>(flat_idx));
+                    REQUIRE_NOTHROW(val = x.get<float>(coords));
+                    REQUIRE(val == val_flat);
+                    REQUIRE(val == values[flat_idx]);
+                    // Test set() by coords, also update the reference array
+                    REQUIRE_NOTHROW(x.set(coords, val + 1));
+                    values[flat_idx] += 1;
+                }
+            }
+        }
+    }
+
     SECTION("Tensor extract") {
         bool equal;
 
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 8403686d16da15e7e8ad4616029a241d6197d450..a08808ee5e6c2657a76213dcff80cec53b23e7ee 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -17,11 +17,15 @@
 #include <string>
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Testing.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Split.hpp"
+#include "aidge/operator/Memorize.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/graph/OpArgs.hpp"
@@ -35,7 +39,7 @@ TEST_CASE("genRandomGraph", "[GraphView][randomGen]") {
     size_t nbUnicity = 0;
 
     for (int test = 0; test < nbTests; ++test) {
-        std::random_device rd;
+        auto rd = Catch::Generators::Detail::getSeed;
         const std::mt19937::result_type seed(rd());
 
         RandomGraph randGraph;
@@ -81,7 +85,7 @@ TEST_CASE("clone", "[GraphView][clone]") {
     const size_t nbTests = 100;
 
     for (int test = 0; test < nbTests; ++test) {
-        std::random_device rd;
+        auto rd = Catch::Generators::Detail::getSeed;
         const std::mt19937::result_type seed(rd());
 
         RandomGraph randGraph;
@@ -107,13 +111,6 @@ TEST_CASE("clone_with_delete", "[GraphView][cloneDelete]") {
     const size_t nbTests = 100;
     size_t nbClonedWithDelete = 0;
 
-    // Note: initial seed is chosen such that for nbTests=100, the generated
-    // graphs keep the same inputs/outputs despites the deleted nodes
-    // (meaning the deleted nodes are not input/output of the graph).
-    // Otherwise, the last two REQUIRE are not garanteed to be true!
-    // Warning: distributions are not required to behave the same way by the standard,
-    // therefore the seed has to work for both GCC and MSVC...
-    // See https://stackoverflow.com/questions/38532927/why-gcc-and-msvc-stdnormal-distribution-are-different
     std::mt19937::result_type seed(243);
 
     for (int test = 0; test < nbTests; ++test) {
@@ -123,7 +120,21 @@ TEST_CASE("clone_with_delete", "[GraphView][cloneDelete]") {
         const auto g1 = std::make_shared<GraphView>("g1");
         const bool unicity1 = g1->add(randGraph.gen(seed, 10));
 
-        if (unicity1) {
+        bool stableInOut = true;
+        for (auto node : g1->inputNodes()) {
+            if (node->type() == "DelFictive") {
+                stableInOut = false;
+                break;
+            }
+        }
+        for (auto node : g1->outputNodes()) {
+            if (node->type() == "DelFictive") {
+                stableInOut = false;
+                break;
+            }
+        }
+
+        if (unicity1 && stableInOut) {
             randGraph.omitType = "DelFictive";
             const auto g2 = std::make_shared<GraphView>("g2");
             const bool unicity2 = g2->add(randGraph.gen(seed, 10));
@@ -155,7 +166,7 @@ TEST_CASE("remove", "[GraphView][remove]") {
     size_t nbTested = 0;
 
     for (int test = 0; test < nbTests; ++test) {
-        std::random_device rd;
+        auto rd = Catch::Generators::Detail::getSeed;
         const std::mt19937::result_type seed(rd());
 
         RandomGraph randGraph;
@@ -399,9 +410,7 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
         conv1->resetConnections(false);
 
         REQUIRE(conv->output(0).size() == 0);
-        for (std::size_t i = 0; i < conv1->nbData(); ++i) {
-        REQUIRE((conv1->input(i) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
-        }
+        REQUIRE((conv1->input(0) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
         REQUIRE((conv1->input(1) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod1, 0)));
         REQUIRE((conv1->input(2) == std::pair<std::shared_ptr<Node>, IOIndex_t>(prod2, 0)));
         REQUIRE((conv2->input(0) == std::pair<std::shared_ptr<Node>, IOIndex_t>(nullptr, gk_IODefaultIndex)));
@@ -434,6 +443,107 @@ TEST_CASE("[core/graph] GraphView(resetConnections)") {
     }
 }
 
+TEST_CASE("[core/graph] GraphView(getOrderedNodes)", "[GraphView][getOrderedNodes]") {
+    auto data1 = Producer({2}, "data1");
+    auto data2 = Producer({2}, "data2");
+    auto data3 = Producer({2}, "data3");
+    auto add1 = Add(2, "add1");
+    auto add2 = Add(2, "add2");
+    auto split1 = Split(2, 0, {1, 1}, "split1");
+    auto add3 = Add(3, "add3");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    data1->addChild(add1);
+    data2->addChild(add1);
+    add1->addChild(add2);
+    data3->addChild(add2);
+    add1->addChild(add3);
+    add2->addChild(split1);
+    split1->addChild(add3);
+    g->add(data1);
+    g->add(data2);
+    g->add(data3);
+    g->add(add1);
+    g->add(add2);
+    g->add(split1);
+    g->add(add3);
+    REQUIRE(g->getNodes().size() == 7);
+
+    auto topo = g->getOrderedNodes();
+    SECTION("Topological order") {
+        REQUIRE(topo[0] == data1);
+        REQUIRE(topo[1] == data2);
+        REQUIRE(topo[2] == add1);
+        REQUIRE(topo[3] == data3);
+        REQUIRE(topo[4] == add2);
+        REQUIRE(topo[5] == split1);
+        REQUIRE(topo[6] == add3);
+    }
+
+    auto pdfs = g->getOrderedNodes(true);
+    SECTION("Post DFS order") {
+        REQUIRE(pdfs[0] == add3);
+        REQUIRE(pdfs[1] == split1);
+        REQUIRE(pdfs[2] == add2);
+        REQUIRE(pdfs[3] == add1);
+        REQUIRE(pdfs[4] == data1);
+        REQUIRE(pdfs[5] == data2);
+        REQUIRE(pdfs[6] == data3);
+    }
+
+    // Invert output order
+    g->setOrderedOutputs({{split1, 1}, {add3, 0}});
+    SECTION("Topological order output reversed") {
+        // As add3 depends upon split1, the order should not be changed
+        auto topo2 = g->getOrderedNodes();
+        REQUIRE(topo2 == topo);
+    }
+
+    SECTION("Post DFS order output reversed") {
+        // As add3 depends upon split1, the order should not be changed
+        auto pdfs2 = g->getOrderedNodes(true);
+        REQUIRE(pdfs2 == pdfs);
+    }
+}
+
+TEST_CASE("[core/graph] GraphView(getOrderedNodes) cyclic", "[GraphView][getOrderedNodes]") {
+    auto data1 = Producer({2}, "data1");
+    auto data2 = Producer({2}, "data2");
+    auto add1 = Add(2, "add1");
+    auto mem1 = Memorize(1, "mem1");
+    auto add2 = Add(2, "add2");
+    auto g = std::make_shared<GraphView>("TestGraph");
+    data1->addChild(add1);
+    data2->addChild(add1);
+    add1->addChild(mem1, 0, 1); // init
+    data1->addChild(add2);
+    mem1->addChild(add2);
+    add2->addChild(mem1); // back edge
+    g->add(data1);
+    g->add(data2);
+    g->add(add1);
+    g->add(mem1);
+    g->add(add2);
+    REQUIRE(g->getNodes().size() == 5);
+
+    auto topo = g->getOrderedNodes();
+    SECTION("Topological order") {
+        REQUIRE(topo[0] == data1);
+        REQUIRE(topo[1] == data2);
+        REQUIRE(topo[2] == add1);
+        REQUIRE(topo[3] == mem1);
+        REQUIRE(topo[4] == add2);
+    }
+
+    auto pdfs = g->getOrderedNodes(true);
+    SECTION("post DFS order") {
+        REQUIRE(pdfs[0] == add2);
+        REQUIRE(pdfs[1] == mem1);
+        REQUIRE(pdfs[2] == add1);
+        REQUIRE(pdfs[3] == data1);
+        REQUIRE(pdfs[4] == data2);
+    }
+}
+
 TEST_CASE("[core/graph] GraphView(forwardDims)", "[GraphView][forwardDims]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
@@ -554,6 +664,69 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myConv, other2}));
     }
 
+    SECTION("replace same input category 1") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Data}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::OptionalData, InputCategory::OptionalData}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == other1);
+        REQUIRE(myNew->input(1).first == nullptr);
+        REQUIRE(myNew->input(2).first == nullptr);
+    }
+
+    SECTION("replace same input category 2") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Param}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld, 0, 0);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::Param, InputCategory::Data}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == nullptr);
+        REQUIRE(myNew->input(1).first == other1);
+        REQUIRE(myNew->input(2).first == nullptr);
+    }
+
+    SECTION("replace same input category 3") {
+        std::shared_ptr<GraphView> g = std::make_shared<GraphView>("test_graph");
+        auto otherInput = GenericOperator("Producer", {}, 1, "other_input");
+        auto other1 = GenericOperator("Other", {InputCategory::Data}, 1, "other1");
+        auto myOld = GenericOperator("myOld", {InputCategory::Data}, 1, "old");
+        auto other2 = GenericOperator("Other", {InputCategory::Data}, 1, "other2");
+        otherInput->addChild(other1);
+        other1->addChild(myOld);
+        myOld->addChild(other2);
+        g->add({other1, myOld, other2});
+
+        auto myNew =  GenericOperator("myNew", {InputCategory::Data, InputCategory::Data, InputCategory::Data}, 1, "new");
+
+        GraphView::replace({myOld}, {myNew});
+
+        REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({other1, myNew, other2}));
+        REQUIRE(myNew->input(0).first == other1);
+        REQUIRE(myNew->input(1).first == other1);
+        REQUIRE(myNew->input(2).first == other1);
+    }
+
     SECTION("Change every Nodes in a GraphView") {
         auto matmulWeight0 = GenericOperator("Producer", 0, 0, 1, "matmul_w0");
         auto addBias0 = GenericOperator("Producer", 0, 0, 1, "add_b0");
diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2fdcd611d378ceb6c3dbdc853920eecf92c31141
--- /dev/null
+++ b/unit_tests/graph/Test_Matching.cpp
@@ -0,0 +1,473 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <fmt/chrono.h>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Testing.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+using namespace Aidge;
+
+void checkMatches(const std::set<SinglePassGraphMatching::MatchingResult>& results, const std::map<std::string, std::set<std::string>>& expected) {
+    CHECK(results.size() == expected.size());
+
+    for (const auto& result : results) {
+        const auto found = nodePtrTo(result.graph->getNodes(), nodePtrToName);
+        fmt::print("Found: {}\n", found);
+
+        const auto rootNode = result.graph->rootNode()->name();
+        const auto expectedSet = expected.at(rootNode);
+        REQUIRE(found == expectedSet);
+    }
+}
+
+TEST_CASE("[core/graph] Matching") {
+    auto g1 = Sequential({
+        Producer({16, 3, 512, 512}, "dataProvider"),
+        Conv(3, 4, {5, 5}, "conv1"),
+        ReLU("relu1"),
+        PaddedConv(4, 8, {5, 5}, "conv2", {1, 1}, {2, 2, 2, 2}),
+        ReLU("relu2"),
+        PaddedConv(8, 16, {3, 3}, "conv3", {1, 1}, {2, 2, 2, 2}),
+        ReLU("relu3"),
+        PaddedConv(8, 16, {5, 5}, "conv4", {1, 1}, {2, 2, 2, 2}),
+        Add(2, "add"),
+        PaddedConv(8, 16, {5, 5}, "conv5", {1, 1}, {2, 2, 2, 2}),
+        ReLU("relu5"),
+        Add(2, "add2")
+    });
+
+    g1->getNode("relu3")->addChild(g1->getNode("add"), 0, 1);
+    g1->getNode("conv5")->addChild(g1->getNode("add2"), 0, 1);
+    g1->updateInputsOutputs();
+
+    g1->save("Test_examples_before_expand", true);
+    expandMetaOps(g1);
+    g1->save("Test_examples", true);
+
+    SECTION("Conv->(ReLU->Pad->Conv)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU->Pad->Conv)*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "relu1", "relu2"}},
+            {"conv2_conv", {"conv2_conv", "conv3_conv", "conv3_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv"}},
+            {"conv4_conv", {"conv4_conv"}},
+            {"conv5_conv", {"conv5_conv"}}
+        });
+    }
+
+    SECTION("Conv->ReLU;ReLU->Pad") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU;ReLU->Pad"));
+    }
+
+    SECTION("Conv->ReLU#1;ReLU#2->Pad") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv->ReLU#1;ReLU#2->Pad"));
+    }
+
+    SECTION("Conv?->ReLU") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("Conv?->ReLU"));
+    }
+
+    SECTION("(Add#<*~.)*") {
+        REQUIRE_THROWS(SinglePassGraphMatching(g1).match("(Add#<*~.)*"));
+    }
+
+    SECTION("Conv->(ReLU~>Pad->Conv)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
+            {"conv2_conv", {"conv2_conv", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu2", "relu3"}},
+            {"conv3_conv", {"conv3_conv", "conv4_conv", "conv4_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv"}},
+            {"conv5_conv", {"conv5_conv"}}
+        });
+    }
+
+    SECTION("Conv->(ReLU~>Pad->Conv)* [disjoint]") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv->(ReLU~>Pad->Conv)*", true);
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
+            {"conv5_conv", {"conv5_conv"}}
+        });
+    }
+
+    SECTION("Conv~>(ReLU~>Pad->Conv)*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU~>Pad->Conv)*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv2_conv", "conv2_pad", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu1", "relu2", "relu3"}},
+            {"conv2_conv", {"conv2_conv", "conv3_conv", "conv3_pad", "conv4_conv", "conv4_pad", "relu2", "relu3"}},
+            {"conv3_conv", {"conv3_conv", "conv4_conv", "conv4_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv"}},
+            {"conv5_conv", {"conv5_conv"}}
+        });
+    }
+
+    SECTION("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;Conv#<1-Producer;Conv#<2-Producer");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}}
+        });
+    }
+
+    SECTION("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;Conv#<1-Producer;Conv#<2-Producer");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}},
+            {"conv5_pad", {"conv5_b", "conv5_conv", "conv5_pad", "conv5_w", "relu5"}}
+        });
+    }
+
+    SECTION("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-Producer){2}");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}},
+            {"conv5_pad", {"conv5_b", "conv5_conv", "conv5_pad", "conv5_w", "relu5"}}
+        });
+    }
+
+    SECTION("Pad->Conv#->ReLU;(Conv#<*-Producer){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-Producer){2}");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}}
+        });
+    }
+
+    SECTION("Pad->Conv#~>ReLU;(Conv#<*-.){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#~>ReLU;(Conv#<*-.){2}");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}},
+            {"conv5_pad", {"conv5_b", "conv5_conv", "conv5_pad", "conv5_w", "relu5"}}
+        });
+    }
+
+    SECTION("Pad->Conv#->ReLU;(Conv#<*-.){2}") {
+        const auto results = SinglePassGraphMatching(g1).match("Pad->Conv#->ReLU;(Conv#<*-.){2}");
+
+        checkMatches(results, {
+            {"conv2_pad", {"conv2_b", "conv2_conv", "conv2_pad", "conv2_w", "relu2"}},
+            {"conv3_pad", {"conv3_b", "conv3_conv", "conv3_pad", "conv3_w", "relu3"}}
+        });
+    }
+
+    SECTION("Conv#~>ReLU*;Conv#<-Pad*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU*;Conv#<-Pad*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad", "relu5"}}
+        });
+    }
+
+    SECTION("Conv#->ReLU*;Conv#<-Pad*") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU*;Conv#<-Pad*");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad"}}
+        });
+    }
+
+    SECTION("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?-*>Add#1?->ReLU?;Conv#<-Pad?;(Add#1<*-.)?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"add", "conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad"}}
+        });
+    }
+
+    SECTION("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?-*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*-.)?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"add", "conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"add2", "conv5_conv", "conv5_pad", "relu5"}}
+        });
+    }
+
+    SECTION("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?~*>Add#1?~>ReLU?;Conv#<-Pad?;(Add#1<*~.)?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"add", "conv3_conv", "conv3_pad", "conv4_conv", "relu3"}},
+            {"conv4_conv", {"add", "conv4_conv", "conv4_pad", "relu3"}},
+            {"conv5_conv", {"add2", "conv5_conv", "conv5_pad", "relu5"}}
+        });
+    }
+
+    SECTION("Conv#->ReLU?;Conv#<-Pad?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#->ReLU?;Conv#<-Pad?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad"}}
+        });
+    }
+
+    SECTION("Conv#~>ReLU?;Conv#<-Pad?") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv#~>ReLU?;Conv#<-Pad?");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "relu1"}},
+            {"conv2_conv", {"conv2_conv", "conv2_pad", "relu2"}},
+            {"conv3_conv", {"conv3_conv", "conv3_pad", "relu3"}},
+            {"conv4_conv", {"conv4_conv", "conv4_pad"}},
+            {"conv5_conv", {"conv5_conv", "conv5_pad", "relu5"}}
+        });
+    }
+
+    SECTION("(Conv|ReLU)->Add") {
+        const auto results = SinglePassGraphMatching(g1).match("(Conv|ReLU)->Add");
+
+        checkMatches(results, {
+            {"conv4_conv", {"add", "conv4_conv"}},
+            {"relu5", {"add2", "relu5"}}
+        });
+    }
+
+    SECTION("Add<*-.") {
+        const auto results = SinglePassGraphMatching(g1).match("Add<*-.");
+
+        checkMatches(results, {
+            {"add", {"add", "conv4_conv"}},
+            {"add2", {"add2", "relu5"}}
+        });
+    }
+
+    SECTION("(Add#<*~.)+") {
+        const auto results = SinglePassGraphMatching(g1).match("(Add#<*~.)+");
+
+        checkMatches(results, {
+            {"add", {"add", "conv4_conv", "relu3"}},
+            {"add2", {"add2", "conv5_conv", "relu5"}}
+        });
+    }
+
+    SECTION("Conv~*>(ReLU&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv~*>(ReLU&Add)");
+
+        checkMatches(results, {
+            {"conv5_conv", {"add2", "conv5_conv", "relu5"}}
+        });
+    }
+
+    SECTION("Conv~>(ReLU&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("Conv~>(ReLU&Add)");
+        REQUIRE(results.size() == 0);
+    }
+
+    SECTION("ReLU~*>((Pad->Conv-*>Add#)&Add#)") {
+        const auto results = SinglePassGraphMatching(g1).match("ReLU~*>((Pad->Conv-*>Add#)&Add#)");
+
+        checkMatches(results, {
+            {"relu3", {"add", "conv4_conv", "conv4_pad", "relu3"}}
+        });
+    }
+
+    SECTION("ReLU-*>((Pad->Conv-*>Add)&Add)") {
+        const auto results = SinglePassGraphMatching(g1).match("ReLU-*>((Pad->Conv-*>Add)&Add)");
+        REQUIRE(results.size() == 0);
+    }
+
+    SECTION("Pad->Conv[3x3]->ReLU") {
+        auto gm = SinglePassGraphMatching(g1);
+        gm.addNodeLambda("3x3", [](const NodePtr& node) {
+            const std::shared_ptr<Conv_Op<2>> op =
+                std::static_pointer_cast<Conv_Op<2>>(node->getOperator());
+            return (op->kernelDims() == std::array<DimSize_t, 2>({3, 3}));
+        });
+
+        const auto results = gm.match("Pad->Conv[3x3]->ReLU");
+
+        checkMatches(results, {
+            {"conv3_pad", {"conv3_conv", "conv3_pad", "relu3"}}
+        });
+    }
+
+    SECTION(".[test]->Pad") {
+        auto gm = SinglePassGraphMatching(g1);
+        gm.addNodeLambda("test", [](const NodePtr& node) {
+            return (node->type() == "Add" || (node->type() == "ReLU" && node->name() == "relu1"));
+        });
+
+        const auto results = gm.match(".[test]->Pad");
+
+        checkMatches(results, {
+            {"add", {"add", "conv5_pad"}},
+            {"relu1", {"relu1", "conv2_pad"}}
+        });
+    }
+
+    auto g2 = Sequential({
+        Producer({16, 3, 512, 512}, "dataProvider"),
+        Conv(3, 4, {5, 5}, "conv1"),
+        BatchNorm<2>(4, 1.0e-5, 0.1, "bn1"),
+        Conv(4, 4, {5, 5}, "conv2"),
+        ReLU("relu2"),
+        Conv(4, 4, {5, 5}, "conv3"),
+        BatchNorm<2>(4, 1.0e-5, 0.1, "bn3"),
+        FC(4, 4, false, "fc1"),
+        FC(4, 4, false, "fc2"),
+        FC(4, 4, false, "fc3"),
+        ReLU("relu3"),
+        Conv(1, 4, {5, 5}, "conv4")
+    });
+
+    SECTION("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exBN", [](const NodePtr& node) {
+            return (node->type() != "BatchNorm");
+        });
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("((Conv#->(.[exBN]|$))|(FC#->(.[exFC])*->$))");
+
+        checkMatches(results, {
+            {"conv2", {"conv2", "relu2"}},
+            {"conv4", {"conv4"}},
+            {"fc3", {"fc3", "relu3", "conv4"}}
+        });
+    }
+
+    // Find last node of a type
+    SECTION("FC#->(.[exFC])*->$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("FC#->(.[exFC])*->$");
+
+        checkMatches(results, {
+            {"fc3", {"fc3", "relu3", "conv4"}}
+        });
+    }
+
+    SECTION("Conv#->(.[exConv])*->$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exConv", [](const NodePtr& node) {
+            return (node->type() != "Conv");
+        });
+
+        const auto results = gm.match("Conv#->(.[exConv])*->$");
+
+        checkMatches(results, {
+            {"conv4", {"conv4"}}
+        });
+    }
+
+    // Find first node of a type
+    SECTION("FC#<-(.[exFC])*<-$") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exFC", [](const NodePtr& node) {
+            return (node->type() != "FC");
+        });
+
+        const auto results = gm.match("FC#<-(.[exFC])*<-$");
+
+        checkMatches(results, {
+            {"fc1", {"fc1", "bn3", "conv3", "relu2", "conv2", "bn1", "conv1", "dataProvider"}}
+        });
+    }
+
+    SECTION("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#") {
+        auto gm = SinglePassGraphMatching(g2);
+        gm.addNodeLambda("exParam", [](const NodePtr& node) {
+            return (node->type() != "FC" && node->type() != "Conv");
+        });
+
+        const auto results = gm.match("(((FC#|Conv#)<-(.[exParam])*<-$)|((FC#|Conv#)->(.[exParam])*->$));(FC#|Conv#)<1-Producer#");
+
+        checkMatches(results, {
+            {"conv1", {"conv1", "conv1_w", "dataProvider"}},
+            {"conv4", {"conv4", "conv4_w"}}
+        });
+    }
+
+    SECTION("Conv->ReLU [perf]") {
+        const size_t nbTests = 3;
+        std::mt19937::result_type seed(1);
+
+        for (int test = 0; test < nbTests; ++test) {
+            RandomGraph randGraph;
+            randGraph.types = {"Conv", "ReLU", "Dummy"};
+            randGraph.typesWeights = {0.4, 0.4, 0.2};
+            randGraph.avgIn = 1;
+            randGraph.maxIn = 1;
+            randGraph.maxOut = 1;
+            randGraph.avgOut = 1;
+            randGraph.density = 0.9;
+            randGraph.acyclic = true;
+            const auto g1 = std::make_shared<GraphView>("g1");
+
+            Log::setConsoleLevel(Log::Warn);
+            g1->add(randGraph.gen(seed, 100));
+            g1->save("graph_single_pass");
+
+            auto gm = SinglePassGraphMatching(g1);
+
+            const auto start = std::chrono::system_clock::now();
+            const auto results = gm.match("Conv->ReLU#;ReLU#->Dummy");
+            const auto end = std::chrono::system_clock::now();
+            const auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
+
+            REQUIRE(results.size() > 0);
+            ++seed;
+
+            fmt::print("Found: {} - duration: {}\n", results.size(), duration);
+        }
+    }
+}
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index a62b9a8602b494f26fb47061b899eaba41129a1f..68ac509e79e347106a9a132249f125ebe6e39f6a 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -31,11 +31,11 @@ TEST_CASE("GraphRegexUser") {
         g1->addChild(fc, "c");
         g1->addChild(conv2, "c1");
         g1->addChild(fc2, "c2");
-        
+
         ///
         std::shared_ptr<GraphRegex> sut = std::make_shared<GraphRegex>();
         sut->setNodeKey("C",+[](NodePtr NodeOp){return NodeOp->type() == "FC";});
-        
+
         sut->setNodeKey("A","C($)==True");
         sut->addQuery("A");
         auto match = sut->match(g1);
@@ -163,42 +163,32 @@ TEST_CASE("GraphRegexUser") {
         auto w1 = Producer({5,5},"W1");
         auto input = Producer({2,5}, "input");
 
-        input->addChild(matmul0, 0, 0);
-        w0->addChild(matmul0, 0, 1);
+        input->addChild(matmul0, 0, 1);
+        w0->addChild(matmul0, 0, 0);
 
         matmul0->addChild(add0, 0, 0);
         b0->addChild(add0, 0, 1);
 
-        add0->addChild(matmul1, 0, 0);
-        w1->addChild(matmul1, 0, 1);
+        add0->addChild(matmul1, 0, 1);
+        w1->addChild(matmul1, 0, 0);
 
         matmul1->addChild(add1, 0, 0);
         b1->addChild(add1, 0, 1);
 
-        auto fc = GenericOperator("FC", 1, 0, 1, "c");
-        auto fl = GenericOperator("Flatten", 1, 0, 1, "c");
-
-
+        auto fc = GenericOperator("FC", 1, 0, 1, "fc1");
+        auto fl = GenericOperator("Flatten", 1, 0, 1, "flatten0");
+        add1->addChild(fl, 0, 0);
+        fl->addChild(fc, 0, 0);
         auto g = std::make_shared<GraphView>();
-        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc});
-
-        std::shared_ptr<GraphRegex> kitchenBook = std::make_shared<GraphRegex>();
-
-        kitchenBook->setNodeKey("Add","getType($) =='Add'");
-        kitchenBook->setNodeKey("MatMul","getType($) =='MatMul'");
-        kitchenBook->setNodeKey("Flatten","getType($) =='Flatten'");
-        kitchenBook->setNodeKey("FC","getType($) =='FC'");
-
-        kitchenBook->addQuery("MatMul->Add",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(fuseMulAdd));
-        kitchenBook->addQuery("Flatten->FC",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(removeFlatten));
-
-        kitchenBook->appliedRecipes(g);
+        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1, fl, fc});
 
+        matMulToFC(g);
+        removeFlatten(g);
         std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
-        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fc}));
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc}));
 	    //REQUIRE(newNodes.size() == 6);
 
 
     }
 
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_BitShift_Op.cpp b/unit_tests/operator/Test_BitShift_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..39916e4e75779ecc63680b43ece8ccd2bdc667c9
--- /dev/null
+++ b/unit_tests/operator/Test_BitShift_Op.cpp
@@ -0,0 +1,133 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/BitShift.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]") 
+{
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Shift Operator
+    std::shared_ptr<Node> myShift = BitShift(BitShift_Op::BitShiftDirection::right);
+    auto op = std::static_pointer_cast<OperatorTensor>(myShift-> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+     SECTION("BitShifOP Test dimensions [Scalar]") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+
+    SECTION("BitShifOP Test dimensions [Same Size]") {
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+    }
+    SECTION("BitShifOP Test dimensions [Broadcast]") {
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+        }
+    }
+    SECTION("BitShifOP Test dimensions [Wrong Dimensions]") {
+        
+       for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->forwardDims());
+    }
+}
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
index 184c02d5208c99b903cf838784bb14fb65799111..fcdf3e8cc1bc07493cfa84608f200f9f334a29cc 100644
--- a/unit_tests/operator/Test_ConcatImpl.cpp
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -18,6 +18,14 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
+    SECTION("Concat scalar inputs") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(2);
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(4);
+        auto myConcat = Concat(2, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        REQUIRE_THROWS(myConcat->forward());
+    }
     SECTION("Concat 1D inputs") {
         std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
         std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
@@ -140,4 +148,4 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
 
         REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_ConstantOfShape.cpp b/unit_tests/operator/Test_ConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c10d97ce5fb774e051e75f051772e1cbcd41dbea
--- /dev/null
+++ b/unit_tests/operator/Test_ConstantOfShape.cpp
@@ -0,0 +1,85 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <cstddef> // std::size_t
+#include <cstdint>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random> // std::mt19937, std::uniform_int_distribution
+#include <system_error>
+#include <vector>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/filler/Filler.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+TEST_CASE("[core/operator] ConstantOfShape_Op(forwardDims)",
+          "[ConstantOfShape][forwardDims]") {
+  constexpr std::uint16_t NBTRIALS = 10;
+
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+  std::uniform_int_distribution<std::size_t> input_tensor_dims_dist(1, 10);
+  std::uniform_int_distribution<std::size_t> input_tensor_value_dist(1, 9);
+  std::uniform_real_distribution<float> op_value_attr_value_dist(1, 10000);
+
+  std::uniform_int_distribution<std::size_t> op_value_attr_type_dist(
+      0, static_cast<int>(Aidge::DataType::UInt64));
+  // TENSORS
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+  input_T->setDataType(Aidge::DataType::Int64);
+  input_T->setBackend("cpu");
+
+  SECTION("operator test") {
+    // Create Operator
+    for (int i = 0; i < NBTRIALS; ++i) {
+      std::shared_ptr<Node> node =
+          ConstantOfShape(Tensor(op_value_attr_value_dist(gen)));
+      auto op =
+          std::static_pointer_cast<ConstantOfShape_Op>(node->getOperator());
+      op->associateInput(0, input_T);
+
+      std::vector<DimSize_t> input_dims;
+      input_dims.push_back(input_tensor_dims_dist(gen));
+
+      Log::setConsoleLevel(Log::Debug);
+      int input_nb_elems = input_dims.at(0);
+      int output_nb_elems = 1;
+      int64_t *array_in = new int64_t[input_nb_elems];
+      for (std::size_t i = 0; i < input_nb_elems; ++i) {
+        std::int64_t val = input_tensor_value_dist(gen);
+        array_in[i] = val;
+        output_nb_elems *= val;
+      }
+
+      input_T->resize(input_dims);
+      op->setInput(0, input_T);
+      input_T->getImpl()->setRawPtr(array_in, input_nb_elems);
+
+      REQUIRE(op->forwardDims(true));
+      REQUIRE(input_T->size() == op->getOutput(0)->nbDims());
+      for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+        CHECK(array_in[i] == op->getOutput(0)->dims().at(i));
+      }
+    }
+  }
+}
+} // namespace Aidge
+
diff --git a/unit_tests/operator/Test_DepthToSpaceImpl.cpp b/unit_tests/operator/Test_DepthToSpaceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..62f760ce8b3942ab3101ff5e1324307a46048b91
--- /dev/null
+++ b/unit_tests/operator/Test_DepthToSpaceImpl.cpp
@@ -0,0 +1,87 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/DepthToSpace.hpp"
+
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] DepthToSpace_Op", "[DepthToSpace][forwardDims]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+
+    SECTION("Nb dimensions") {
+        // Create DepthToSpace operator with block_size of 1 compatible with any size
+        std::shared_ptr<Node> myDTS = DepthToSpace(1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myDTS -> getOperator());
+
+        SECTION("Scalar") {
+            // input_0
+            std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(9);
+            op -> associateInput(0,T0);
+            REQUIRE_THROWS(op->forwardDims());
+        }
+        SECTION("+1-D") {
+            // input_0
+            std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+            op -> associateInput(0,T0);
+
+            for (std::uint16_t nb_dims = 0; nb_dims < 6; ++nb_dims) {
+
+                std::vector<std::size_t> dims0(nb_dims);
+                for (std::size_t i = 0; i < nb_dims; ++i) {
+                    dims0[i] = dimsDist(gen);
+                }
+                T0->resize(dims0);
+                if (nb_dims == 4) {
+                    REQUIRE_NOTHROW(op->forwardDims());
+                } else {
+                    REQUIRE_THROWS(op->forwardDims());
+                }
+            }
+        }
+    }
+
+    SECTION("Propagation") {
+        // input_0 with 4-D in NCHW format
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(std::vector<DimSize_t>({1, 16, 100, 100}));
+
+        DepthToSpace_Op myDTS_should_throw = DepthToSpace_Op(7);
+        myDTS_should_throw.associateInput(0,T0);
+
+        REQUIRE_THROWS(myDTS_should_throw.forwardDims());
+
+        DepthToSpace_Op myDTS_should_not_throw = DepthToSpace_Op(4);
+        myDTS_should_not_throw.associateInput(0,T0);
+
+        REQUIRE_NOTHROW(myDTS_should_not_throw.forwardDims());
+        REQUIRE(myDTS_should_not_throw.getOutput(0)->dims() == std::vector<std::size_t>({1,1,400,400}));
+    }
+}
+
+TEST_CASE("[core/operator] DepthToSpace_Op impl", "[DepthToSpace][forward]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+
+
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
index d11f72474b0b70bf335dfee95d13a9b41cfe6efb..d35edec17cd9732119cfcaf249b5e7965a14ea65 100644
--- a/unit_tests/operator/Test_Div_Op.cpp
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -24,7 +25,7 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -44,54 +45,54 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 8d634cc3a105c423b54b6003f41204aeb1fc5335..41bad69749fd82f892c6faa625739d0493396c73 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -20,7 +20,7 @@ using namespace Aidge;
 TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        const char* key = "intAttr";
+        const char* key = "IntAttr";
         Testop.addAttr(key, int(5));
         int registeredVal = Testop.getAttr<int>(key);
         REQUIRE(registeredVal == 5);
@@ -28,21 +28,21 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("LONG") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         long value = 3;
-        const char* key = "longAttr";
+        const char* key = "LongAttr";
         Testop.addAttr(key, value);
         REQUIRE(Testop.getAttr<long>(key) == value);
     }
     SECTION("FLOAT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         float value = 2.0;
-        const char* key = "floatAttr";
+        const char* key = "FloatAttr";
         Testop.addAttr(key, value);
         REQUIRE(Testop.getAttr<float>(key) == value);
     }
      SECTION("VECTOR<BOOL>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<bool> value = {true, false, false, true, true};
-        const char* key = "vect";
+        const char* key = "Vect";
         Testop.addAttr(key, value);
 
         REQUIRE(Testop.getAttr<std::vector<bool>>(key).size() == value.size());
@@ -53,7 +53,7 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
     SECTION("VECTOR<INT>") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         std::vector<int> value = {1, 2, 3, 4, 5, 6, 7, 8, 9};
-        const char* key = "vect";
+        const char* key = "Vect";
         Testop.addAttr(key, value);
 
         REQUIRE(Testop.getAttr<std::vector<int>>(key).size() == value.size());
@@ -66,23 +66,23 @@ TEST_CASE("[core/operators] GenericOp(add & get attributes)", "[Operator]") {
         Goal : Test that the offsets are well done by adding different attributes with different size.
         */
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addAttr<long>("longAttr", 3);
-        Testop.addAttr<float>("floatAttr", 2.0);
-        Testop.addAttr<uint8_t>("uint8Attr", 5);
-        Testop.addAttr<long long>("llAttr", 10);
-        REQUIRE(Testop.getAttr<long>("longAttr") == 3);
-        REQUIRE(Testop.getAttr<float>("floatAttr") == 2.0);
-        REQUIRE(Testop.getAttr<uint8_t>("uint8Attr") == 5);
-        REQUIRE(Testop.getAttr<long long>("llAttr") == 10);
+        Testop.addAttr<long>("LongAttr", 3);
+        Testop.addAttr<float>("FloatAttr", 2.0);
+        Testop.addAttr<uint8_t>("Uint8Attr", 5);
+        Testop.addAttr<long long>("LlAttr", 10);
+        REQUIRE(Testop.getAttr<long>("LongAttr") == 3);
+        REQUIRE(Testop.getAttr<float>("FloatAttr") == 2.0);
+        REQUIRE(Testop.getAttr<uint8_t>("Uint8Attr") == 5);
+        REQUIRE(Testop.getAttr<long long>("LlAttr") == 10);
     }
 }
 
 TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
-        Testop.addAttr<long>("longAttr", 3);
+        Testop.addAttr<long>("LongAttr", 3);
 
         // This line should raise a failled assert
-        REQUIRE_THROWS(Testop.getAttr<int>("longAttribute"));
+        REQUIRE_THROWS(Testop.getAttr<int>("LongAttribute"));
     }
 }
diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
index d20f689aba55d8cbaef553388d4666fd6c1d7172..15c714b63c2b86e156b43cdaec390ddf60eb7353 100644
--- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef> // std::size_t
 #include <memory>
-#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random> // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -25,7 +26,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
           "[GlobalAveragePooling][forwardDims]") {
   constexpr std::uint16_t NB_TRIALS = 10;
   // Create a random number generator
-  std::random_device rd;
+  auto rd = Catch::Generators::Detail::getSeed;
   std::mt19937 gen(rd());
   std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
   std::uniform_int_distribution<std::size_t> inf3DimsDistribution(1, 2);
@@ -46,9 +47,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
   SECTION("Connected Inputs") {
     SECTION("empty tensor") {
       for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
-        const std::size_t nb_dims = 0;
-        std::vector<std::size_t> dims(nb_dims);
-        input_T->resize(dims);
+        // Test that on undefined input it does not fail
         REQUIRE_NOTHROW(op->forwardDims());
       }
     }
diff --git a/unit_tests/operator/Test_GridSample_Op.cpp b/unit_tests/operator/Test_GridSample_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ae38ec7083a0df49fb241509bf52895765ddb0e8
--- /dev/null
+++ b/unit_tests/operator/Test_GridSample_Op.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/GridSample.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] GridSample_Op(forwardDims)", "[GridSample][forwardDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    auto rd = Catch::Generators::Detail::getSeed;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create GridSample Operator
+    std::shared_ptr<Node> myGridSample = GridSample(GridSample_Op::Mode::Cubic, GridSample_Op::PaddingMode::Border, false);
+    auto op = std::static_pointer_cast<OperatorTensor>(myGridSample -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> data_in0 = std::make_shared<Tensor>();
+    op -> associateInput(0,data_in0);
+    // input_1
+    std::shared_ptr<Tensor> grid_in1 = std::make_shared<Tensor>();
+    op -> associateInput(1,grid_in1);
+
+    SECTION("Valid shape provided") {
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            std::size_t N = dimsDist(gen);
+            std::size_t C = dimsDist(gen);
+            std::size_t H_data_in0 = dimsDist(gen);
+            std::size_t W_data_in0 = dimsDist(gen);
+            std::size_t H_grid_in1 = dimsDist(gen);
+            std::size_t W_grid_in1 = dimsDist(gen);
+
+            data_in0->resize({N, C, H_data_in0, W_data_in0});
+            grid_in1->resize({N, H_grid_in1, W_grid_in1, 2});
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == std::vector<std::size_t>({N, C, H_grid_in1, W_grid_in1}));
+        }
+    }
+    SECTION("Invalid shape provided") {
+        std::size_t N_in = dimsDist(gen);
+        std::size_t C = dimsDist(gen);
+        std::size_t H_data_in0 = dimsDist(gen);
+        std::size_t W_data_in0 = dimsDist(gen);
+        std::size_t H_grid_in1 = dimsDist(gen);
+        std::size_t W_grid_in1 = dimsDist(gen);
+
+        // different batch number
+        std::size_t N_out = N_in+1;
+        data_in0->resize({N_in, C, H_data_in0, W_data_in0});
+        grid_in1->resize({N_out, H_grid_in1, W_grid_in1, 2});
+        REQUIRE_THROWS(op->forwardDims());
+
+        // different number of dimensions
+        data_in0->resize({N_in, C, H_data_in0, W_data_in0});
+        grid_in1->resize({N_in, H_grid_in1, W_grid_in1, 2, 2});
+        REQUIRE_THROWS(op->forwardDims());
+
+        // wrong number of pixel coordinates
+        data_in0->resize({N_in, C, H_data_in0, W_data_in0});
+        grid_in1->resize({N_in, H_grid_in1, W_grid_in1, 2 + dimsDist(gen)});
+        REQUIRE_THROWS(op->forwardDims());
+    }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
index bdd1de87c27351e943c59fa616c40dc4a0001abc..876c1ac764efe54475f6d45982acca76aacb7528 100644
--- a/unit_tests/operator/Test_MatMul_Op.cpp
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -22,10 +23,11 @@
 namespace Aidge {
 TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dist(1, 10);
 
+    std::cerr << "Test case start, random " << dist(gen) << " " << rd() << std::endl;
     // Create MatMul Operator
     std::shared_ptr<Node> myMatMul = MatMul();
     auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
@@ -33,24 +35,24 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     /** @todo Special case of scalar Tensor objects.
      * Not handled yet.
     */
-    // SECTION("0-D / 0-D") {
-    //     std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
-    //     T0->resize({});
-    //     op -> associateInput(0,T0);
+    SECTION("0-D / 0-D") {
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        T0->resize({});
+        op -> associateInput(0,T0);
 
-    //     // input_1 - right
-    //     std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
-    //     T1->resize({});
-    //     op -> associateInput(1,T1);
+        // input_1 - right
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        T1->resize({});
+        op -> associateInput(1,T1);
 
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims()).empty());
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims()).empty());
 
-    //     // input_1 - wrong
-    //     T1->resize({dist(gen)});
+        // input_1 - wrong
+        T1->resize({dist(gen)});
 
-    //     REQUIRE_THROWS(op->forwardDims());
-    // }
+        REQUIRE_THROWS(op->forwardDims());
+    }
 
     SECTION("1-D / N-D") {
         // input_0
@@ -193,4 +195,4 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
         REQUIRE_THROWS(op -> forwardDims());
     }
 }
-} // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp
index ed4afafe39a367ecabb25ff949eb3d03999d1ea9..d1b4e2e31e8c57e2c3eebd42019ba9f42c4d39e0 100644
--- a/unit_tests/operator/Test_MetaOperator.cpp
+++ b/unit_tests/operator/Test_MetaOperator.cpp
@@ -39,7 +39,9 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(microGraph->outputNodes().size() == 1);
         REQUIRE((*microGraph->outputNodes().begin())->getOperator()->type() == "Conv");
         REQUIRE(op->nbInputs() == 3);
-        REQUIRE(op->nbData() == 1);
+        REQUIRE(op->inputCategory(0) == InputCategory::Data);
+        REQUIRE(op->inputCategory(1) == InputCategory::Param);
+        REQUIRE(op->inputCategory(2) == InputCategory::OptionalParam);
         REQUIRE(op->nbOutputs() == 1);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(std::vector<std::size_t>({2,1,5,5}));
@@ -66,7 +68,13 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         microGraph->save("lstm", false, false);
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>();
@@ -94,7 +102,13 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         auto op = std::static_pointer_cast<OperatorTensor>(myLSTM->getOperator());
 
         REQUIRE(myLSTM->nbInputs() == 3 + 8 + 8);
-        REQUIRE(myLSTM->nbData() == 1);
+        REQUIRE(myLSTM->inputCategory(0) == InputCategory::Data);
+        for (size_t i = 1; i < 9; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::Param);
+        }
+        for (size_t i = 9; i < 17; ++i) {
+            REQUIRE(myLSTM->inputCategory(i) == InputCategory::OptionalParam);
+        }
         REQUIRE(myLSTM->nbOutputs() == 2);
 
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
@@ -129,6 +143,6 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") {
         REQUIRE(g->rootNode() == pop);
         g->save("lstm_expanded", true, true);
 
-        REQUIRE(g->getNodes().size() == 41);
+        REQUIRE(g->getNodes().size() == 33);
     }
 }
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
index f3f8fb9522943d0a9574cb80cfc228135a973890..bee90d725b25508abf90813532bb5ca754d8fb9a 100644
--- a/unit_tests/operator/Test_Mul_Op.cpp
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -24,7 +25,7 @@ TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -44,54 +45,54 @@ TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
index 4a8d242a355cda58c7b36914efdb1304220f713a..274f7c00b9bd3c3ba57f0463dbe3a1b727141013 100644
--- a/unit_tests/operator/Test_Pow_Op.cpp
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -24,7 +25,7 @@ TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -44,54 +45,54 @@ TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..471a1dcd1e45384b2c65da75ddee9d3ec039dc34
--- /dev/null
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -0,0 +1,457 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Squeeze.hpp"
+
+#include <aidge/utils/Types.h>
+#include <algorithm>
+#include <array>
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+#include <chrono>
+#include <cmath>
+#include <cstddef> // std::size_t
+#include <cstdint> // std::uint16_t
+#include <fmt/core.h>
+#include <iostream>
+#include <iterator>
+#include <memory>
+#include <numeric> // std::accumulate
+#include <ostream>
+#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
+  Log::setConsoleLevel(Log::Notice);
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  // Random float distribution between 0 and 1
+  constexpr int8_t max_nb_dims = 7;
+  std::uniform_real_distribution<float> tensor_value_dist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(max_nb_dims));
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(5));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_squeeze_dist(
+      std::size_t(1), std::size_t(2));
+  std::uniform_int_distribution<short> idx_dims_to_squeeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+  SECTION("ERROR : Inputs not ready") {
+    SECTION("unconnected input") {
+      std::shared_ptr<Node> squeeze_node = Squeeze();
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      REQUIRE_THROWS(op->forwardDims());
+    }
+
+    SECTION("empty tensor") {
+      // Create the Squeeze Operator
+      std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      op->associateInput(0, input_T);
+
+      CHECK(op->forwardDims() == false);
+    }
+  }
+  SECTION("ERROR : nb_dims_to_squeeze>input.size()") {
+    constexpr size_t nb_dims_to_squeeze = 100;
+
+    std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+    std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                  [&gen, &idx_dims_to_squeeze_dist]() {
+                    return idx_dims_to_squeeze_dist(gen);
+                  });
+    Log::error("dims_to_sqeeze = {}", dims_to_squeeze);
+
+    std::shared_ptr<Node> squeeze_node = Squeeze(dims_to_squeeze);
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+
+    // input tensor
+    const std::size_t nb_dims = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&tensor_dims_size_dist, &gen]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+
+    // Test
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+    REQUIRE_THROWS(op->forwardDims());
+  }
+  SECTION("Compare with reference output") {
+    SECTION("axes is given via attribute") {
+      SECTION("Squeeze a 1-sized-axis") {
+        int8_t nb_dims = 4;
+        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({0}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims());
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == 3);
+      }
+      SECTION("Squeeze multiple 1-sized axes") {
+        // test should be successful
+        std::shared_ptr<Node> squeeze_node =
+            Squeeze(std::vector<int8_t>({1, -4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 1, 13, 200};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims());
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>{13, 200});
+        CHECK((op->getOutput(0)->dims().size()) == 2);
+      }
+      SECTION("Squeeze a non-1-Sized axis") {
+        int8_t nb_dims = 4;
+        std::shared_ptr<Node> squeeze_node = Squeeze(std::vector<int8_t>({3}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{1, 2, 3, 4};
+        input_T->resize(dims_in);
+
+        REQUIRE_THROWS(op->forwardDims());
+      }
+      SECTION("Squeeze multiple non-sized-axes") {
+        std::shared_ptr<Node> squeeze_node =
+            Squeeze(std::vector<int8_t>({1, -2}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            squeeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        std::array<DimSize_t, 3> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        REQUIRE_THROWS((op->forwardDims()));
+      }
+    }
+    SECTION("axes is given via tensor") {
+      SECTION("tensor is empty") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Squeeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T =
+            std::make_shared<Aidge::Tensor>(std::vector<DimSize_t>({}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{3, 1, 4, 1, 1, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true));
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
+      }
+      SECTION("tensor not empty") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Squeeze(std::vector<std::int8_t>({3, 1}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T =
+            std::make_shared<Aidge::Tensor>(Aidge::Array1D<int8_t, 2>({0, 3}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{1, 3, 4, 1, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true) == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
+      }
+    }
+  }
+  SECTION("Squeeze()") {
+    // Create the Operator
+    std::shared_ptr<Node> squeeze_node = Squeeze();
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+    op->associateInput(0, input_T);
+
+    for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+      // input tensor
+      const std::size_t nb_dims = tensor_nb_dims_dist(gen);
+      std::vector<std::size_t> dims_in(nb_dims);
+
+      std::generate(dims_in.begin(), dims_in.end(),
+                    [&gen, &tensor_dims_size_dist]() {
+                      return tensor_dims_size_dist(gen);
+                    });
+
+      // output tensor
+      std::vector<DimSize_t> dims_out;
+      dims_out.reserve(dims_in.size());
+      std::copy_if(dims_in.begin(), dims_in.end(), std::back_inserter(dims_out),
+                   [](DimSize_t dim) { return dim != 1; });
+      // Test
+      input_T->resize(dims_in);
+      op->setInput(0, input_T);
+      CHECK(op->forwardDims() == true);
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      int nb_ones = std::count_if(dims_in.begin(), dims_in.end(),
+                                  [](int8_t dim) { return dim == 1; });
+      CHECK((op->getInput(0)->dims().size() -
+             op->getOutput(0)->dims().size()) == nb_ones);
+    }
+  }
+  SECTION("Squeeze({N,...})") {
+    int number_of_operation{0};
+    for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+      // Create the Operator
+      size_t nb_dims_to_squeeze = nb_dims_to_squeeze_dist(gen);
+      std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+      std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                    [&gen, &idx_dims_to_squeeze_dist]() {
+                      return idx_dims_to_squeeze_dist(gen);
+                    });
+      std::shared_ptr<Node> squeeze_node = Squeeze({dims_to_squeeze});
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+      op->associateInput(0, input_T);
+
+      // input tensor
+      const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+      std::vector<std::size_t> dims_in(nb_dims_tensor);
+      std::generate(dims_in.begin(), dims_in.end(),
+                    [&gen, &tensor_dims_size_dist]() {
+                      return tensor_dims_size_dist(gen);
+                    });
+      input_T->resize(dims_in);
+      op->setInput(0, input_T);
+
+      // rectifying indexes
+      std::transform(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                     dims_to_squeeze.begin(),
+                     [&nb_dims_tensor](int8_t dim_to_squeeze) {
+                       return dim_to_squeeze < 0
+                                  ? dim_to_squeeze + nb_dims_tensor
+                                  : dim_to_squeeze;
+                     });
+      std::sort(dims_to_squeeze.begin(), dims_to_squeeze.end());
+      auto it = std::unique(dims_to_squeeze.begin(), dims_to_squeeze.end());
+      dims_to_squeeze.erase(it, dims_to_squeeze.end());
+
+      // ensuring arguments given to Squeeze are good
+      bool not_in_bounds = false;
+      bool dim_to_squeeze_not_1_sized = false;
+      for (const auto dim_to_squeeze : dims_to_squeeze) {
+        not_in_bounds = dim_to_squeeze >= nb_dims_tensor;
+        if (not_in_bounds) {
+          break;
+        }
+        dim_to_squeeze_not_1_sized = dims_in.at(dim_to_squeeze) != 1;
+        if (dim_to_squeeze_not_1_sized) {
+          break;
+        }
+      }
+
+      if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
+          dim_to_squeeze_not_1_sized) {
+        REQUIRE_THROWS(op->forwardDims());
+      } else {
+        // output tensor
+        int i = 0;
+        std::vector<DimSize_t> dims_out;
+        dims_out.reserve(dims_in.size());
+        std::copy_if(dims_in.begin(), dims_in.end(),
+                     std::back_inserter(dims_out),
+                     [&dims_to_squeeze, &i](DimSize_t dim) {
+                       bool ok = dim != 1 ||
+                                 !std::binary_search(dims_to_squeeze.begin(),
+                                                     dims_to_squeeze.end(), i);
+                       i++; // incrementing counter since C++ has not enumerate
+                            // fctn (until C++23)
+                       return ok;
+                     });
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == dims_out);
+      }
+    }
+  }
+}
+
+TEST_CASE("[core/operator] Squeeze(forward)", "[Squeeze][forward]") {
+  Log::setConsoleLevel(Log::Notice);
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  constexpr int8_t max_nb_dims = 7;
+  std::uniform_real_distribution<float> tensor_value_dist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(max_nb_dims));
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(5));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_squeeze_dist(
+      std::size_t(1), std::size_t(2));
+  std::uniform_int_distribution<short> idx_dims_to_squeeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+  // BENCHMARKING
+  std::chrono::time_point<std::chrono::system_clock> start;
+  std::chrono::time_point<std::chrono::system_clock> end;
+  std::chrono::duration<double, std::micro> duration{};
+
+  Log::setConsoleLevel(Log::Notice);
+  int number_of_operation{0};
+  for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+    // Create the Operator
+    size_t nb_dims_to_squeeze = nb_dims_to_squeeze_dist(gen);
+    std::vector<int8_t> dims_to_squeeze(nb_dims_to_squeeze);
+    std::generate(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                  [&gen, &idx_dims_to_squeeze_dist]() {
+                    return idx_dims_to_squeeze_dist(gen);
+                  });
+    std::shared_ptr<Node> squeeze_node = Squeeze({dims_to_squeeze});
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(squeeze_node->getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+
+    // input tensor
+    const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims_tensor);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&gen, &tensor_dims_size_dist]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+
+    // rectifying indexes
+    std::transform(dims_to_squeeze.begin(), dims_to_squeeze.end(),
+                   dims_to_squeeze.begin(),
+                   [&nb_dims_tensor](int8_t dim_to_squeeze) {
+                     return dim_to_squeeze < 0 ? dim_to_squeeze + nb_dims_tensor
+                                               : dim_to_squeeze;
+                   });
+
+    // ensuring arguments given to Squeeze are good
+    bool not_in_bounds = false;
+    bool dim_to_squeeze_not_1_sized = false;
+    for (const auto dim_to_squeeze : dims_to_squeeze) {
+      not_in_bounds = dim_to_squeeze >= nb_dims_tensor;
+      if (not_in_bounds) {
+        break;
+      }
+      dim_to_squeeze_not_1_sized = dims_in.at(dim_to_squeeze) != 1;
+      if (dim_to_squeeze_not_1_sized) {
+        break;
+      }
+    }
+    if (nb_dims_tensor > max_nb_dims || not_in_bounds ||
+        dim_to_squeeze_not_1_sized) {
+      REQUIRE_THROWS(op->forwardDims());
+    } else {
+      // output tensor
+      int i = 0;
+      std::vector<DimSize_t> dims_out;
+      dims_out.reserve(dims_in.size());
+      for (DimIdx_t i = 0; i < dims_in.size(); ++i) {
+        if (dims_in[i] == 1 &&
+            std::find(dims_to_squeeze.begin(), dims_to_squeeze.end(), i) !=
+                dims_to_squeeze.end()) {
+          continue;
+        }
+        dims_out.push_back(dims_in[i]);
+      }
+      CHECK(op->forwardDims());
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      SECTION("forward") {
+        // Create the input Tensor
+        std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+        input_T->setDataType(DataType::Float32);
+        input_T->setBackend("cpu");
+        op->associateInput(0, input_T);
+
+        // Create results Tensor
+        std::shared_ptr<Tensor> result_T = std::make_shared<Tensor>();
+        result_T->setDataType(DataType::Float32);
+        result_T->setBackend("cpu");
+
+        const std::size_t nb_elems =
+            std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1),
+                            std::multiplies<std::size_t>());
+        float *array_in = new float[nb_elems];
+        for (std::size_t i = 0; i < nb_elems; ++i) {
+          float val = tensor_value_dist(gen);
+          array_in[i] = val;
+        }
+        number_of_operation += nb_elems; // Copying all values : 1
+                                         // assignation / item in the tensor
+        // input0
+        input_T->resize(dims_in);
+        input_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        result_T->resize(dims_out);
+        result_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        CHECK(op->forwardDims() == true);
+        start = std::chrono::system_clock::now();
+        REQUIRE_NOTHROW(squeeze_node->forward());
+        end = std::chrono::system_clock::now();
+        duration +=
+            std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+        CHECK(result_T->nbDims() == op->getOutput(0)->nbDims());
+        for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+          CHECK(result_T->dims().at(i) == op->getOutput(0)->dims().at(i));
+        }
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+
+        delete[] array_in;
+      }
+      std::cout << "Squeeze total execution time : " << duration.count() << "µs"
+                << std::endl;
+      std::cout << "Number of operations : " << number_of_operation
+                << std::endl;
+      std::cout << "Operation / µs = " << number_of_operation / duration.count()
+                << std::endl;
+    }
+  }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
index 329f3da798854ddff3d1c1393d60c57ef180c70a..110cbbfe68b723a2a670abe590ca5392881170f3 100644
--- a/unit_tests/operator/Test_Sub_Op.cpp
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -24,7 +25,7 @@ TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -44,54 +45,54 @@ TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
index 8b6eafc70b7eefec6e1ccab9d0cfcde1eb4a09d5..18f0d68d87ac1ee66ffb1f24c4c130f9b020d56e 100644
--- a/unit_tests/operator/Test_TransposeImpl.cpp
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -18,6 +18,16 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Transpose(forward)") {
+    SECTION("Scalar Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(2);
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(2);
+        std::shared_ptr<Node> myTranspose = Transpose({});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        REQUIRE_THROWS(myTranspose->forward());
+    }
     SECTION("3D Tensor") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
             {
@@ -120,4 +130,4 @@ TEST_CASE("[cpu/operator] Transpose(forward)") {
 
         REQUIRE(*(op->getOutput(0)) == *output);
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_Unsqueeze_Op.cpp b/unit_tests/operator/Test_Unsqueeze_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..79f5b89b1c08f409b214a9439431c2d2a51ddbd2
--- /dev/null
+++ b/unit_tests/operator/Test_Unsqueeze_Op.cpp
@@ -0,0 +1,382 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <algorithm>
+#include <chrono>
+#include <cmath>
+#include <cstddef> // std::size_t
+#include <cstdint> // std::uint16_t
+#include <fmt/core.h>
+#include <iostream>
+#include <memory>
+#include <numeric> // std::accumulate
+#include <ostream>
+#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Unsqueeze.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+bool ensure_axes_validity(std::vector<int8_t> dims_to_unsqueeze,
+                          DimIdx_t nb_dims_input_tensor) {
+
+  bool in_bounds =
+      std::all_of(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                  [&nb_dims_input_tensor,
+                   &dims_to_unsqueeze](const int8_t &dim_to_unsqueeze) {
+                    return (dim_to_unsqueeze <
+                            nb_dims_input_tensor + dims_to_unsqueeze.size());
+                  });
+
+  std::sort(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+  bool index_appear_twice =
+      dims_to_unsqueeze.end() !=
+      std::adjacent_find(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+
+  return in_bounds && !index_appear_twice;
+}
+
+std::vector<DimSize_t>
+generate_unsqueeze_output_dims(std::vector<size_t> dims_in,
+                               std::vector<int8_t> dims_to_unsqueeze) {
+
+  std::sort(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end());
+  std::vector<DimSize_t> dims_out(dims_in);
+  dims_out.reserve(dims_in.size() + dims_to_unsqueeze.size());
+  for (const DimIdx_t &dim : dims_to_unsqueeze) {
+    dims_out.insert(dims_out.begin() + dim, 1);
+  }
+  return dims_out;
+}
+
+std::vector<int8_t> rectify_indexes(const std::vector<int8_t> & dims_to_unsqueeze,
+                                    const int8_t offset) {
+  std::vector<int8_t> output;
+  output.reserve(dims_to_unsqueeze.size());
+  for (int8_t dim : dims_to_unsqueeze) {
+    output.push_back(dim >= 0 ? dim : dim + offset);
+  }
+  return output;
+}
+
+TEST_CASE("[core/operator] Unsqueeze(forwardDims)",
+          "[Unsqueeze][forwardDims]") {
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  auto random_seed = Catch::Generators::Detail::getSeed;
+  std::mt19937 gen(random_seed());
+
+  std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(10));
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      std::size_t(1), std::size_t(7));
+  std::uniform_int_distribution<std::size_t> nb_dims_to_unsqueeze_dist(
+      std::size_t(1), std::size_t(8));
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+  std::shared_ptr<Tensor> axes_T = std::make_shared<Tensor>();
+
+  SECTION("ERROR : Inputs not ready") {
+    SECTION("unconnected input") {
+      std::shared_ptr<Node> myUnsqueeze =
+          Unsqueeze(std::vector<std::int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(myUnsqueeze->getOperator());
+      REQUIRE_THROWS(op->forwardDims());
+    }
+
+    std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+
+    SECTION("empty tensor") {
+      // Create the Unsqueeze Operator
+      std::shared_ptr<Node> myUnsqueeze =
+          Unsqueeze(std::vector<std::int8_t>({0}));
+      auto op =
+          std::static_pointer_cast<OperatorTensor>(myUnsqueeze->getOperator());
+      op->associateInput(0, input_T);
+
+      CHECK(op->forwardDims() == false);
+    }
+  }
+  SECTION("Compare with reference output") {
+    int8_t nb_dims = 3;
+    SECTION("axes is given via attribute") {
+      SECTION("unsqueez(0)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({1, 2, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze(1)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({1}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::array<DimSize_t, 3> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 1, 3, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze(2)") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({2}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{2, 3, 4};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({2, 3, 1, 4}));
+        CHECK((op->getOutput(0)->dims().size()) == nb_dims + 1);
+      }
+      SECTION("Unsqueeze({0,4})") {
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        std::vector<DimSize_t> dims_in{3, 4, 5};
+        input_T->resize(dims_in);
+
+        CHECK(op->forwardDims() == true);
+        CHECK(op->getOutput(0)->dims() ==
+              std::vector<DimSize_t>({1, 3, 4, 5, 1}));
+      }
+    }
+    SECTION("axes is given via tensor") {
+        // arguments here should be overriden by axes_T values
+        std::shared_ptr<Node> myUnsqueeze =
+            Unsqueeze(std::vector<std::int8_t>({0, 4}));
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            myUnsqueeze->getOperator());
+        op->associateInput(0, input_T);
+
+        auto axes_T = std::make_shared<Aidge::Tensor>(
+            Aidge::Array1D<int8_t, 3>({1, 3, 4}));
+        axes_T->setDataType(Aidge::DataType::Int8);
+        axes_T->setBackend("cpu");
+
+        std::vector<DimSize_t> dims_in{3, 4, 5};
+        input_T->resize(dims_in);
+        op->associateInput(0, input_T);
+        op->associateInput(1, axes_T);
+
+        CHECK(op->forwardDims(true) == true);
+        CHECK(op->getOutput(0)->dims() ==
+              std::vector<DimSize_t>({3, 1, 4, 1, 1, 5}));
+    }
+  }
+  SECTION("Random testing") {
+    SECTION("Unsqueeze({N,...})") {
+      int number_of_operation{0};
+      for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+        const size_t nb_dims_to_unsqueeze = nb_dims_to_unsqueeze_dist(gen);
+        const size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+        const size_t idx_dims_to_unsqueeze_max =
+            nb_dims_to_unsqueeze + nb_dims_tensor;
+        const size_t variance_error = 2;
+        std::uniform_int_distribution<short> idx_dims_to_unsqueeze_dist(
+            -idx_dims_to_unsqueeze_max - variance_error,
+            idx_dims_to_unsqueeze_max - 1 + variance_error);
+        // Create the Operator
+        std::vector<int8_t> dims_to_unsqueeze(nb_dims_to_unsqueeze);
+        std::generate(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                      [&gen, &idx_dims_to_unsqueeze_dist]() {
+                        return idx_dims_to_unsqueeze_dist(gen);
+                      });
+        std::shared_ptr<Node> unsqueeze_node = Unsqueeze(dims_to_unsqueeze);
+        auto op = std::static_pointer_cast<OperatorTensor>(
+            unsqueeze_node->getOperator());
+        op->associateInput(0, input_T);
+
+        // input tensor
+        std::vector<std::size_t> dims_in(nb_dims_tensor);
+        std::generate(dims_in.begin(), dims_in.end(),
+                      [&gen, &tensor_dims_size_dist]() {
+                        return tensor_dims_size_dist(gen);
+                      });
+        input_T->resize(dims_in);
+        op->setInput(0, input_T);
+
+        dims_to_unsqueeze = rectify_indexes(
+            dims_to_unsqueeze, input_T->nbDims() + dims_to_unsqueeze.size());
+        bool dims_to_unsqueeze_valid =
+            ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+        Log::warn("raw dims_to_unsqueeze : {}", dims_to_unsqueeze);
+        Log::warn("dims_to_unsqueeze : {}", dims_to_unsqueeze);
+        Log::warn("tensor dims : {}", input_T->dims());
+
+        if (!dims_to_unsqueeze_valid) {
+          ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+          REQUIRE_THROWS(op->forwardDims(true));
+        } else {
+          // output tensor
+          std::vector<DimSize_t> dims_out =
+              generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+          Log::warn("dims_out : {}", dims_out);
+          CHECK(op->forwardDims(true) == true);
+          CHECK(op->getOutput(0)->dims() == dims_out);
+          generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+        }
+      }
+    }
+  }
+}
+
+TEST_CASE("[core/operator] Unsqueeze(forward)", "[Unsqueeze][forward]") {
+  constexpr std::uint16_t NB_TRIALS = 10;
+  // Create a random number generator
+  std::random_device rd;
+  auto random_seed = rd();
+  std::cout << "True random seed : " << random_seed << std::endl;
+  std::mt19937 gen(random_seed);
+  // Random float distribution between 0 and 1
+  std::uniform_real_distribution<float> valueDist(0.1f, 1.1f);
+  std::uniform_int_distribution<std::size_t> tensor_dims_size_dist(
+      std::size_t(1), std::size_t(10));
+  std::size_t min_tensor_nb_dims{1};
+  std::size_t max_tensor_nb_dims{7};
+  std::uniform_int_distribution<std::size_t> tensor_nb_dims_dist(
+      min_tensor_nb_dims, max_tensor_nb_dims);
+  std::uniform_int_distribution<std::size_t> nb_dims_to_unsqueeze_dist(
+      std::size_t(1), std::size_t(8));
+  std::uniform_int_distribution<short> idx_dims_to_unsqueeze_dist(-9, 8);
+
+  std::shared_ptr<Tensor> input_T = std::make_shared<Tensor>();
+  input_T->setDataType(DataType::Float32);
+  input_T->setBackend("cpu");
+  std::shared_ptr<Tensor> result_T = std::make_shared<Tensor>();
+  result_T->setDataType(DataType::Float32);
+  result_T->setBackend("cpu");
+
+  // BENCHMARKING
+  std::chrono::time_point<std::chrono::system_clock> start;
+  std::chrono::time_point<std::chrono::system_clock> end;
+  std::chrono::duration<double, std::micro> duration{};
+
+  int number_of_operation{0};
+  for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
+    // Create the Operator
+    size_t nb_dims_to_unsqueeze = nb_dims_to_unsqueeze_dist(gen);
+    std::vector<int8_t> dims_to_unsqueeze(nb_dims_to_unsqueeze);
+    std::generate(dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+                  [&gen, &idx_dims_to_unsqueeze_dist]() {
+                    return idx_dims_to_unsqueeze_dist(gen);
+                  });
+    std::shared_ptr<Node> unsqueeze_node = Unsqueeze(dims_to_unsqueeze);
+    auto op =
+        std::static_pointer_cast<OperatorTensor>(unsqueeze_node->getOperator());
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+    op->associateInput(0, input_T);
+
+    // input tensor
+    const std::size_t nb_dims_tensor = tensor_nb_dims_dist(gen);
+    std::vector<std::size_t> dims_in(nb_dims_tensor);
+    std::generate(dims_in.begin(), dims_in.end(),
+                  [&gen, &tensor_dims_size_dist]() {
+                    return tensor_dims_size_dist(gen);
+                  });
+    input_T->resize(dims_in);
+    op->setInput(0, input_T);
+
+    // rectifying indexes
+    std::transform(
+        dims_to_unsqueeze.begin(), dims_to_unsqueeze.end(),
+        dims_to_unsqueeze.begin(),
+        [&nb_dims_tensor, &nb_dims_to_unsqueeze](int8_t dim_to_unsqueeze) {
+          return dim_to_unsqueeze < 0
+                     ? dim_to_unsqueeze +
+                           (nb_dims_tensor + nb_dims_to_unsqueeze)
+                     : dim_to_unsqueeze;
+        });
+
+    // ensuring arguments given to Unsqueeze are good
+    bool axes_to_unsqueeze_valid =
+        ensure_axes_validity(dims_to_unsqueeze, input_T->nbDims());
+    if (!axes_to_unsqueeze_valid) {
+      REQUIRE_THROWS(op->forwardDims(true));
+    } else {
+      // output tensor
+      std::vector<DimSize_t> dims_out =
+          generate_unsqueeze_output_dims(dims_in, dims_to_unsqueeze);
+      CHECK(op->forwardDims(true) == true);
+      CHECK(op->getOutput(0)->dims() == dims_out);
+
+      SECTION("forward") {
+        const std::size_t nb_elems =
+            std::accumulate(dims_in.cbegin(), dims_in.cend(), std::size_t(1),
+                            std::multiplies<std::size_t>());
+        float *array_in = new float[nb_elems];
+        for (std::size_t i = 0; i < nb_elems; ++i) {
+          array_in[i] = valueDist(gen);
+        }
+        number_of_operation += nb_elems; // Copying all values : 1
+                                         // assignation / item in the tensor
+
+        // input0
+        input_T->resize(dims_in);
+        input_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        // results
+        result_T->resize(dims_out);
+        result_T->getImpl()->setRawPtr(array_in, nb_elems);
+
+        CHECK(op->forwardDims(true) == true);
+        start = std::chrono::system_clock::now();
+        REQUIRE_NOTHROW(unsqueeze_node->forward());
+        end = std::chrono::system_clock::now();
+        duration +=
+            std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+        CHECK(result_T->nbDims() == op->getOutput(0)->nbDims());
+        for (DimSize_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+          CHECK(result_T->dims().at(i) == op->getOutput(0)->dims().at(i));
+        }
+        CHECK(approxEq<float>(*result_T, *(op->getOutput(0))));
+
+        delete[] array_in;
+      }
+    }
+    std::cout << "Unsqueeze total execution time : " << duration.count() << "µs"
+              << std::endl;
+    std::cout << "Number of operations : " << number_of_operation << std::endl;
+    std::cout << "Operation / µs = " << number_of_operation / duration.count()
+              << std::endl;
+  }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/recipes/Test_ConvToMatMul.cpp b/unit_tests/recipes/Test_ConvToMatMul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b5ecf7c72804413b620546666c11bc14ad809fbe
--- /dev/null
+++ b/unit_tests/recipes/Test_ConvToMatMul.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[ConvToMatMul] conv") {
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2", {1, 1}, {1, 1}, true);
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3", {2, 2});
+
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        conv1,
+        conv2,
+        conv3
+    });
+
+    g1->forwardDims();
+
+    g1->save("convToMatMul_before");
+    REQUIRE(convToMatMul(g1) == 3);
+    g1->save("convToMatMul_after");
+}
diff --git a/unit_tests/recipes/Test_ExplicitTranspose.cpp b/unit_tests/recipes/Test_ExplicitTranspose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0c0a46710d69606508a22e7b01dac708db9b8f34
--- /dev/null
+++ b/unit_tests/recipes/Test_ExplicitTranspose.cpp
@@ -0,0 +1,54 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[ExplicitTranspose] conv") {
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3", {2, 2});
+
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        conv1,
+        conv2,
+        conv3
+    });
+
+    g1->setDataFormat(DataFormat::NCHW);
+    conv2->getOperator()->setDataFormat(DataFormat::NHWC);
+
+    g1->save("explicitTranspose_before");
+    REQUIRE(g1->getNodes().size() == 10);
+    const auto initialNodes = g1->getNodes();
+
+    g1->forwardDims();
+    explicitTranspose(g1);
+
+    // Check that Tranpose were inserted
+    g1->save("explicitTranspose_after");
+    REQUIRE(g1->getNodes().size() == 12);
+
+    // Check that Tranpose are removed
+    conv2->getOperator()->setDataFormat(DataFormat::NCHW);
+    explicitTranspose(g1);
+
+    REQUIRE(g1->getNodes().size() == 10);
+    REQUIRE(g1->getNodes() == initialNodes);
+}
diff --git a/unit_tests/recipes/Test_FuseMulAdd.cpp b/unit_tests/recipes/Test_FuseMulAdd.cpp
deleted file mode 100644
index 4c6e3f9d563d2e74958e68f8876a49a8323f4403..0000000000000000000000000000000000000000
--- a/unit_tests/recipes/Test_FuseMulAdd.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <catch2/catch_test_macros.hpp>
-#include <set>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/FC.hpp"
-#include "aidge/operator/MatMul.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/recipes/Recipes.hpp"
-
-namespace Aidge {
-
-
-TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
-    // generate the original GraphView
-    auto matmul0 = MatMul("matmul0");
-    auto add0 = Add(2, "add0");
-    auto matmul1 = MatMul("matmul1");
-    auto add1 = Add(2, "add1");
-
-    auto b0 = Producer({5}, "B0");
-    auto w0 = Producer({5, 5}, "W0");
-    auto b1 = Producer({5}, "B1");
-    auto w1 = Producer({5,5},"W1");
-    auto input = Producer({2,5}, "input");
-
-    input->addChild(matmul0, 0, 0);
-    w0->addChild(matmul0, 0, 1);
-
-    matmul0->addChild(add0, 0, 0);
-    b0->addChild(add0, 0, 1);
-
-    add0->addChild(matmul1, 0, 0);
-    w1->addChild(matmul1, 0, 1);
-
-    matmul1->addChild(add1, 0, 0);
-    b1->addChild(add1, 0, 1);
-
-    auto g = std::make_shared<GraphView>();
-    g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
-
-    // Check original graph
-    REQUIRE(g->getNodes() ==
-            std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
-    REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
-    REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
-    REQUIRE(((matmul1->getParent(0) == add0) && (matmul1->getParent(1) == w1)));
-    REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
-
-	// Transform GraphView inplace
-    fuseMulAdd(g);
-
-	// Check new GraphView
-	 std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
-	REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
-	REQUIRE(newNodes.size() == 6);
-	for (const auto& node : newNodes) {
-		REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
-	}
-}
-
-}  // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipes/Test_FuseToMetaOps.cpp b/unit_tests/recipes/Test_FuseToMetaOps.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9fceedf2feef0a3ed79b83a8494a1a2b49f77291
--- /dev/null
+++ b/unit_tests/recipes/Test_FuseToMetaOps.cpp
@@ -0,0 +1,44 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+namespace Aidge {
+
+
+TEST_CASE("[cpu/recipes] FuseToMetaOps", "[FuseToMetaOps][recipes]") {
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        Conv(3, 32, {3, 3}, "conv1"),
+        ReLU("relu1"),
+        Conv(32, 64, {3, 3}, "conv2"),
+        ReLU("relu2"),
+        Conv(64, 10, {1, 1}, "conv3")
+    });
+    g1->save("FuseToMetaOps_before");
+
+    // FIXME: GraphRegex also matches the Conv Producers, which are not in the query!
+    const auto nbFused = fuseToMetaOps(g1, "Conv->ReLU", "ConvReLU");
+    g1->save("FuseToMetaOps_after", true);
+
+    REQUIRE(nbFused == 2);
+}
+
+}  // namespace Aidge
diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2adf882ca69e0d5ca5f050d1b89cfb09d81b536b
--- /dev/null
+++ b/unit_tests/recipes/Test_MatMulToFC.cpp
@@ -0,0 +1,118 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+namespace Aidge {
+
+
+TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") {
+    SECTION("with Add") {
+        // generate the original GraphView
+        auto matmul0 = MatMul("matmul0");
+        auto add0 = Add(2, "add0");
+        auto matmul1 = MatMul("matmul1");
+        auto add1 = Add(2, "add1");
+
+        auto b0 = Producer({5}, "B0");
+        auto w0 = Producer({5, 5}, "W0");
+        auto b1 = Producer({5}, "B1");
+        auto w1 = Producer({5,5},"W1");
+        auto input = Producer({2,5}, "input");
+
+        input->addChild(matmul0, 0, 0);
+        w0->addChild(matmul0, 0, 1);
+
+        matmul0->addChild(add0, 0, 0);
+        b0->addChild(add0, 0, 1);
+
+        add0->addChild(matmul1, 0, 1);
+        w1->addChild(matmul1, 0, 0);
+
+        matmul1->addChild(add1, 0, 0);
+        b1->addChild(add1, 0, 1);
+
+        auto g = std::make_shared<GraphView>();
+        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
+
+        // Check original graph
+        REQUIRE(g->getNodes() ==
+                std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+        REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+        REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
+        REQUIRE(((matmul1->getParent(1) == add0) && (matmul1->getParent(0) == w1)));
+        REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+        // Transform GraphView inplace
+        matMulToFC(g);
+
+        // Check new GraphView
+        std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+        REQUIRE(newNodes.size() == 6);
+        for (const auto& node : newNodes) {
+            REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+        }
+    }
+
+    SECTION("without Add") {
+        // generate the original GraphView
+        auto matmul0 = MatMul("matmul0");
+        auto matmul1 = MatMul("matmul1");
+        auto add1 = Add(2, "add1");
+
+        auto w0 = Producer({5, 5}, "W0");
+        auto b1 = Producer({5}, "B1");
+        auto w1 = Producer({5,5},"W1");
+        auto input = Producer({2,5}, "input");
+
+        input->addChild(matmul0, 0, 0);
+        w0->addChild(matmul0, 0, 1);
+
+        matmul0->addChild(matmul1, 0, 1);
+        w1->addChild(matmul1, 0, 0);
+
+        matmul1->addChild(add1, 0, 0);
+        b1->addChild(add1, 0, 1);
+
+        auto g = std::make_shared<GraphView>();
+        g->add({w0, matmul0, w1, matmul1, b1, add1});
+
+        // Check original graph
+        REQUIRE(g->getNodes() ==
+                std::set<std::shared_ptr<Node>>({w0, matmul0, w1, matmul1, b1, add1}));
+        REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+        REQUIRE(((matmul1->getParent(1) == matmul0) && (matmul1->getParent(0) == w1)));
+        REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+        // Transform GraphView inplace
+        matMulToFC(g);
+
+        // Check new GraphView
+        std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, w1, matmul1, b1, add1}));
+        REQUIRE(newNodes.size() == 5);
+        for (const auto& node : newNodes) {
+            REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+        }
+    }
+}
+
+}  // namespace Aidge
diff --git a/unit_tests/recipes/Test_removeConstantOfShape.cpp b/unit_tests/recipes/Test_removeConstantOfShape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..247149a0fdb1087f14ac17d125659d677ccfb506
--- /dev/null
+++ b/unit_tests/recipes/Test_removeConstantOfShape.cpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Identity.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/ConstantOfShape.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Types.h"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/recipies] removeConstantOfShape",
+          "[ConstantOfShape][removeConstantOfShape][recipies]") {
+  auto input_T = std::make_shared<Tensor>(Array1D<int64_t, 4>({1, 1, 3, 3}));
+
+  auto model = std::make_shared<GraphView>();
+  SECTION("Sequential model") {
+    model = Sequential({Producer(input_T, "prod_0", true),
+                        ConstantOfShape(3, "constantOfShape_0"),
+                        Conv(1, 1, {3, 3}, "Conv_0"), ReLU("ReLU_1")});
+    model->save("test_removeConstantOfShape_model_before_1");
+    CHECK(removeConstantOfShape(model) == 1);
+    CHECK(model->forwardDims());
+    model->save("test_removeConstantOfShape_model_after_1");
+  }
+}
+
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index 84099ac0b77a633893af6a7550464e539c95d806..c3b4c08d98115c9f081bbbf8cb677114b66c545a 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -27,8 +27,8 @@ namespace Aidge {
 TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
-  std::shared_ptr<Node> fc0 = FC(10, 10, "FC_1");
-  std::shared_ptr<Node> fc1 = FC(10, 10, "FC_2");
+  std::shared_ptr<Node> fc0 = FC(10, 10, false, "FC_1");
+  std::shared_ptr<Node> fc1 = FC(10, 10, false, "FC_2");
   std::shared_ptr<Node> prod = Producer(std::array<DimSize_t, 10>(), "myProd");
 
   SECTION("flatten last layer : nothing removed because pattern searched is "
@@ -42,7 +42,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedInputs().size() == 1);
     CHECK(g->getOrderedInputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == nullptr);
     CHECK(fc0->getChildren(0).size() == 1);
     CHECK(g->rootNode() == fc0);
@@ -54,10 +54,10 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedInputs().size() == 1);
     CHECK(g->getOrderedInputs()[0].first == fc0);
-    
+
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == nullptr);
     CHECK(fc0->getChildren(0).size() == 0);
     CHECK(g->rootNode() == fc0);
@@ -73,7 +73,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
 
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc1);
-    
+
     CHECK(fc1->getParent(0) == fc0);
     CHECK(fc0->getChildren(0)[0] == fc1);
     CHECK(g->rootNode() == fc0);
@@ -87,10 +87,10 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
     removeFlatten(g);
 
     CHECK(g->getOrderedInputs().size() == 0);
-    
+
     CHECK(g->getOrderedOutputs().size() == 1);
     CHECK(g->getOrderedOutputs()[0].first == fc0);
-    
+
     CHECK(fc0->getParent(0) == prod);
     CHECK(fc0->getChildren(0).size() == 0);
 
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index ceaa5e301c820ef54970a0e76004ad3467ae66da..3c3026ff09222f9623d886f9c4574bf23667cd9a 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -17,6 +17,7 @@
 #include <string>
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
@@ -35,7 +36,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") {
   std::uniform_int_distribution<std::size_t> nb_nodes_dist(100, 500);
 
   for (int test = 0; test < nbTests; ++test) {
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     const std::mt19937::result_type seed(rd());
     std::mt19937 gen(rd());
 
diff --git a/unit_tests/utils/Test_DynamicAttributes.cpp b/unit_tests/utils/Test_DynamicAttributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b8a1264b3ad954e776a5ae4c47f03cd0c3fb82c9
--- /dev/null
+++ b/unit_tests/utils/Test_DynamicAttributes.cpp
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <string>
+#include <vector>
+
+#include "aidge/utils/DynamicAttributes.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[core/attributes] DynamicAttributes") {
+    SECTION("TestAttr") {
+        DynamicAttributes attrs;
+        attrs.addAttr("a", 1);
+        attrs.addAttr("b", 1.0f);
+        attrs.addAttr("c", std::string("test"));
+        attrs.addAttr<std::vector<bool>>("d", {false, true, false});
+
+        REQUIRE(attrs.getAttr<int>("a") == 1);
+        REQUIRE(attrs.getAttr<float>("b") == 1.0f);
+        REQUIRE(attrs.getAttr<std::string>("c") == "test");
+        REQUIRE(attrs.getAttr<std::vector<bool>>("d") == std::vector<bool>{{false, true, false}});
+
+        attrs.addAttr("e", DynamicAttributes());
+        attrs.getAttr<DynamicAttributes>("e").addAttr("e1", 1.0f);
+        attrs.getAttr<DynamicAttributes>("e").addAttr("e2", std::string("test"));
+
+        REQUIRE(attrs.getAttr<DynamicAttributes>("e").getAttr<float>("e1") == 1.0f);
+        REQUIRE(attrs.getAttr<DynamicAttributes>("e").getAttr<std::string>("e2") == "test");
+    }
+
+    SECTION("TestAttrNS") {
+        DynamicAttributes attrs;
+        attrs.addAttr("mem.a", 1);
+        attrs.addAttr("mem.data.b", 1.0f);
+        attrs.addAttr("impl.c", std::string("test"));
+        attrs.addAttr<std::vector<bool>>("d", {false, true, false});
+
+        REQUIRE(attrs.getAttr<int>("mem.a") == 1);
+        REQUIRE(attrs.getAttr<float>("mem.data.b") == 1.0f);
+        REQUIRE(attrs.getAttr<std::string>("impl.c") == "test");
+        REQUIRE(attrs.getAttr<std::vector<bool>>("d") == std::vector<bool>{{false, true, false}});
+
+        attrs.getAttr<DynamicAttributes>("mem.data").addAttr("e", 2.0f);
+        attrs.getAttr<DynamicAttributes>("impl").addAttr("f", std::string("test2"));
+        REQUIRE(attrs.getAttr<float>("mem.data.e") == 2.0f);
+        REQUIRE(attrs.getAttr<std::string>("impl.f") == "test2");
+
+        REQUIRE(attrs.getAttr<DynamicAttributes>("mem.data").getAttr<float>("b") == 1.0f);
+        REQUIRE(attrs.getAttr<DynamicAttributes>("impl").getAttr<std::string>("c") == "test");
+    }
+}
diff --git a/version.txt b/version.txt
index 0c62199f16ac1e2d7f7ae75b420c1231325dff4e..ee1372d33a29e27945406f0527f8af8e6ee119c9 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.2.1
+0.2.2