Skip to content
Snippets Groups Projects
Commit 4c80e142 authored by Maxence Naud's avatar Maxence Naud
Browse files

Change 'weightInterLeaving' for 'weightInterLeaved'

parent 85388447
No related branches found
No related tags found
1 merge request!132[UPD] version 0.4.1 -> 0.5.0
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
#include "aidge/backend/cpu/operator/SoftmaxImpl.hpp" #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
#include "aidge/backend/cpu/operator/SubImpl.hpp" #include "aidge/backend/cpu/operator/SubImpl.hpp"
#include "aidge/backend/cpu/operator/TanhImpl.hpp" #include "aidge/backend/cpu/operator/TanhImpl.hpp"
#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp" #include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp" #include "aidge/backend/cpu/data/TensorImpl.hpp"
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
namespace Aidge { namespace Aidge {
// Operator implementation entry point for the backend // Operator implementation entry point for the backend
using WeightInterleavingImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op, using WeightInterleavedImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op,
void(const DimSize_t, void(const DimSize_t,
const DimSize_t, const DimSize_t,
const DimSize_t, const DimSize_t,
...@@ -31,7 +31,7 @@ using WeightInterleavingImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op, ...@@ -31,7 +31,7 @@ using WeightInterleavingImpl_cpu = OperatorImpl_cpu<WeightInterleaving_Op,
void *)>; void *)>;
// Implementation entry point registration to Operator // Implementation entry point registration to Operator
REGISTRAR(WeightInterleaving_Op, "cpu", Aidge::WeightInterleavingImpl_cpu::create); REGISTRAR(WeightInterleaving_Op, "cpu", Aidge::WeightInterleavedImpl_cpu::create);
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_WeightInterleavingIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_WeightInterleavingIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
#ifndef AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_ *
#define AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_ * This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
#include <algorithm> * http://www.eclipse.org/legal/epl-2.0.
*
#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp" * SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_
#define AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_
#include <cstddef> // std::size_t
#include <cstdint> // std::int8_t, std::uint8_t
#include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge { namespace Aidge {
/** /**
* @brief Compacts 8-bit data into a smaller bit-width representation. * @brief Compacts 8-bit data into a smaller bit-width representation.
* *
* This function takes an array of 8-bit data and compacts it into smaller chunks * This function takes an array of 8-bit data and compacts it into smaller chunks
* based on the specified bit-width `nb_bits`. Each element in `compactData` will * based on the specified bit-width `nb_bits`. Each element in `compactData` will
* store multiple packed `nb_bits` segments extracted from `data`. * store multiple packed `nb_bits` segments extracted from `data`.
* *
* @param data The input array of 8-bit values to be compacted. * @param data The input array of 8-bit values to be compacted.
* @param dataSize The size of the input `data` array. * @param dataSize The size of the input `data` array.
* @param compactData The output array storing the compacted data. * @param compactData The output array storing the compacted data.
...@@ -39,14 +51,14 @@ namespace Aidge { ...@@ -39,14 +51,14 @@ namespace Aidge {
std::uint8_t shift = 8 / nbSlot; std::uint8_t shift = 8 / nbSlot;
const unsigned int nbFullCompactbytes = dataSize / nbSlot; const unsigned int nbFullCompactbytes = dataSize / nbSlot;
// Main loop to process data in groups of `nbSlot` // Main loop to process data in groups of `nbSlot`
for (std::size_t i = 0; i < nbFullCompactbytes; ++i) { for (std::size_t i = 0; i < nbFullCompactbytes; ++i) {
T compact = 0; T compact = 0;
for (unsigned int j = 0; j < nbSlot; ++j) { for (unsigned int j = 0; j < nbSlot; ++j) {
compact |= (data[i * nbSlot + j] & mask); // Apply mask to keep `nb_bits` only compact |= (data[i * nbSlot + j] & mask); // Apply mask to keep `nb_bits` only
// Shift only if not on the last slot to make room for the next `nb_bits` // Shift only if not on the last slot to make room for the next `nb_bits`
if (j < nbSlot - 1) { if (j < nbSlot - 1) {
compact <<= shift; compact <<= shift;
...@@ -55,7 +67,7 @@ namespace Aidge { ...@@ -55,7 +67,7 @@ namespace Aidge {
// Store the compacted value in the output array // Store the compacted value in the output array
compactData[i] = compact; compactData[i] = compact;
} }
// Handle any remaining data elements (if dataSize is not a multiple of nbSlot). // Handle any remaining data elements (if dataSize is not a multiple of nbSlot).
std::size_t remaining = dataSize % nbSlot; std::size_t remaining = dataSize % nbSlot;
...@@ -63,7 +75,7 @@ namespace Aidge { ...@@ -63,7 +75,7 @@ namespace Aidge {
std::int8_t compact = 0; std::int8_t compact = 0;
for (std::size_t j = 0; j < remaining; ++j) { for (std::size_t j = 0; j < remaining; ++j) {
compact |= (data[nbFullCompactbytes*nbSlot + j] & mask); compact |= (data[nbFullCompactbytes*nbSlot + j] & mask);
if (j < remaining - 1) { if (j < remaining - 1) {
compact <<= shift; compact <<= shift;
} }
...@@ -75,7 +87,7 @@ namespace Aidge { ...@@ -75,7 +87,7 @@ namespace Aidge {
} }
template <class I, class O, int nb_bits> template <class I, class O, int nb_bits>
void WeightInterleavingImpl_cpu_forward_kernel(const DimSize_t input_interleaving, void WeightInterleavedImpl_cpu_forward_kernel(const DimSize_t input_interleaving,
const DimSize_t nb_interleaving, const DimSize_t nb_interleaving,
const DimSize_t output_interleaving, const DimSize_t output_interleaving,
const void* input_, const void* input_,
...@@ -91,41 +103,41 @@ void WeightInterleavingImpl_cpu_forward_kernel(const DimSize_t input_interleavin ...@@ -91,41 +103,41 @@ void WeightInterleavingImpl_cpu_forward_kernel(const DimSize_t input_interleavin
} }
REGISTRAR(WeightInterleavingImpl_cpu, REGISTRAR(WeightInterleavedImpl_cpu,
{ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int4>::type, DataFormat::NHWC}}, {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int4>, DataFormat::NHWC}},
{ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr}); {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
REGISTRAR(WeightInterleavingImpl_cpu, REGISTRAR(WeightInterleavedImpl_cpu,
{ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int3>::type, DataFormat::NHWC}}, {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int3>, DataFormat::NHWC}},
{ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr}); {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
REGISTRAR(WeightInterleavingImpl_cpu, REGISTRAR(WeightInterleavedImpl_cpu,
{ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Int2>::type, DataFormat::NHWC}}, {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Int2>, DataFormat::NHWC}},
{ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr}); {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
REGISTRAR(WeightInterleavingImpl_cpu, REGISTRAR(WeightInterleavedImpl_cpu,
{ImplSpec::IOSpec{DataType::Binary, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::Binary>::type, DataFormat::NHWC}}, {ImplSpec::IOSpec{DataType::Binary, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::Binary>, DataFormat::NHWC}},
{ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 1>, nullptr}); {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 1>, nullptr});
REGISTRAR(WeightInterleavingImpl_cpu, REGISTRAR(WeightInterleavedImpl_cpu,
{ImplSpec::IOSpec{DataType::UInt4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt4>::type, DataFormat::NHWC}}, {ImplSpec::IOSpec{DataType::UInt4, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt4>, DataFormat::NHWC}},
{ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 4>, nullptr}); {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 4>, nullptr});
REGISTRAR(WeightInterleavingImpl_cpu, REGISTRAR(WeightInterleavedImpl_cpu,
{ImplSpec::IOSpec{DataType::UInt3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt3>::type, DataFormat::NHWC}}, {ImplSpec::IOSpec{DataType::UInt3, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt3>, DataFormat::NHWC}},
{ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 3>, nullptr}); {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 3>, nullptr});
REGISTRAR(WeightInterleavingImpl_cpu, REGISTRAR(WeightInterleavedImpl_cpu,
{ImplSpec::IOSpec{DataType::UInt2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavingType<DataType::UInt2>::type, DataFormat::NHWC}}, {ImplSpec::IOSpec{DataType::UInt2, DataFormat::NHWC}, ImplSpec::IOSpec{WeightInterleavedType_v<DataType::UInt2>, DataFormat::NHWC}},
{ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<uint8_t, uint8_t, 2>, nullptr}); {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<uint8_t, uint8_t, 2>, nullptr});
// REGISTRAR(WeightInterleavingImpl_cpu, // REGISTRAR(WeightInterleavedImpl_cpu,
// {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}}, // {ImplSpec::IOSpec{DataType::Int4, DataFormat::NHWC}},
// {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr}); // {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 4>, nullptr});
// REGISTRAR(WeightInterleavingImpl_cpu, // REGISTRAR(WeightInterleavedImpl_cpu,
// {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}}, // {ImplSpec::IOSpec{DataType::Int3, DataFormat::NHWC}},
// {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr}); // {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 3>, nullptr});
// REGISTRAR(WeightInterleavingImpl_cpu, // REGISTRAR(WeightInterleavedImpl_cpu,
// {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}}, // {ImplSpec::IOSpec{DataType::Int2, DataFormat::NHWC}},
// {ProdConso::defaultModel, Aidge::WeightInterleavingImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr}); // {ProdConso::defaultModel, Aidge::WeightInterleavedImpl_cpu_forward_kernel<int8_t, int8_t, 2>, nullptr});
} }
#endif /* AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVINGIMPL_KERNELS_H_ */ #endif /* AIDGE_CPU_OPERATOR_WEIGHTINTERLEAVEDIMPL_KERNELS_H_ */
\ No newline at end of file \ No newline at end of file
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* *
********************************************************************************/ ********************************************************************************/
#include "aidge/backend/cpu/operator/WeightInterleavingImpl.hpp" #include "aidge/backend/cpu/operator/WeightInterleavedImpl.hpp"
#include <cstddef> // std::size_t #include <cstddef> // std::size_t
#include <functional> #include <functional>
...@@ -17,19 +17,19 @@ ...@@ -17,19 +17,19 @@
#include <tuple> #include <tuple>
#include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/backend/cpu/operator/WeightInterleavingImpl_kernels.hpp" #include "aidge/backend/cpu/operator/WeightInterleavedImpl_kernels.hpp"
#include "aidge/operator/WeightInterleaving.hpp" #include "aidge/operator/WeightInterleaving.hpp"
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
template <> template <>
void Aidge::WeightInterleavingImpl_cpu::forward() void Aidge::WeightInterleavedImpl_cpu::forward()
{ {
const WeightInterleaving_Op& op_ = dynamic_cast<const WeightInterleaving_Op&>(mOp); const WeightInterleaving_Op& op_ = dynamic_cast<const WeightInterleaving_Op&>(mOp);
AIDGE_ASSERT(op_.getInput(0), "missing input #0"); AIDGE_ASSERT(op_.getInput(0), "missing input #0");
const auto impl = Registrar<WeightInterleavingImpl_cpu>::create(getBestMatch(getRequiredSpec())); const auto impl = Registrar<WeightInterleavedImpl_cpu>::create(getBestMatch(getRequiredSpec()));
// Convert input data (no overhead if not needed!) // Convert input data (no overhead if not needed!)
// TODO: right now, if needed, memory will be allocated/deallocated at each // TODO: right now, if needed, memory will be allocated/deallocated at each
...@@ -38,14 +38,14 @@ void Aidge::WeightInterleavingImpl_cpu::forward() ...@@ -38,14 +38,14 @@ void Aidge::WeightInterleavingImpl_cpu::forward()
std::shared_ptr<Tensor> input0Fallback; std::shared_ptr<Tensor> input0Fallback;
const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0))); const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0)));
// inputInterleaving is the number of consecutive input elements that will be compacted // inputInterleaving is the number of consecutive input elements that will be compacted
// Here the interleaving is the last dimension (cf STM32 low bit kernels) // Here the interleaving is the last dimension (cf STM32 low bit kernels)
std::size_t inputInterleaving = input0.dims().back(); std::size_t inputInterleaving = input0.dims().back();
// The resulting compacted dimension was computed in forwardDims and the output tensor was resized // The resulting compacted dimension was computed in forwardDims and the output tensor was resized
std::size_t outputInterleaving = op_.getOutput(0)->dims().back(); std::size_t outputInterleaving = op_.getOutput(0)->dims().back();
// nb_interleaving is the number of compacted segments // nb_interleaving is the number of compacted segments
std::size_t nbInterleaving; std::size_t nbInterleaving;
// Determine the number of segment to compact // Determine the number of segment to compact
...@@ -65,11 +65,11 @@ void Aidge::WeightInterleavingImpl_cpu::forward() ...@@ -65,11 +65,11 @@ void Aidge::WeightInterleavingImpl_cpu::forward()
outputInterleaving, outputInterleaving,
input0.getImpl()->rawPtr(), input0.getImpl()->rawPtr(),
getCPUPtr(mOp.getRawOutput(0))); getCPUPtr(mOp.getRawOutput(0)));
} }
template <> template <>
void Aidge::WeightInterleavingImpl_cpu::backward() { void Aidge::WeightInterleavedImpl_cpu::backward() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for WeightInterleaving_Op on backend cpu"); AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for WeightInterleaving_Op on backend cpu");
} }
\ No newline at end of file
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
using namespace Aidge; using namespace Aidge;
TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
std::shared_ptr<Node> myWeightInterleaving = WeightInterleaving(); std::shared_ptr<Node> myWeightInterleaving = WeightInterleaving();
auto opWeightInterleaving = std::static_pointer_cast<WeightInterleaving_Op>(myWeightInterleaving -> getOperator()); auto opWeightInterleaving = std::static_pointer_cast<WeightInterleaving_Op>(myWeightInterleaving -> getOperator());
...@@ -64,9 +64,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -64,9 +64,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
SECTION("CompactData - 4-bit compaction") { SECTION("CompactData - 4-bit compaction") {
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{ std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
{static_cast<std::int8_t>(0x0F), {static_cast<std::int8_t>(0x0F),
static_cast<std::int8_t>(0xF5), static_cast<std::int8_t>(0xF5),
static_cast<std::int8_t>(0xB3), static_cast<std::int8_t>(0xB3),
static_cast<std::int8_t>(0x9C)} static_cast<std::int8_t>(0x9C)}
}); });
...@@ -74,17 +74,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -74,17 +74,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
weight->setDataType(Aidge::DataType::Int4); weight->setDataType(Aidge::DataType::Int4);
std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{ std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
{static_cast<int8_t>(0xF5), {static_cast<int8_t>(0xF5),
static_cast<int8_t>(0x3C)} static_cast<int8_t>(0x3C)}
}); });
expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC); expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type); expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving(); std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
op->associateInput(0,weight); op->associateInput(0,weight);
op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type); op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
op->setDataFormat(DataFormat::NHWC); op->setDataFormat(DataFormat::NHWC);
op->setBackend("cpu"); op->setBackend("cpu");
myWeightInterleavingNode->forward(); myWeightInterleavingNode->forward();
...@@ -93,8 +93,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -93,8 +93,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
SECTION("CompactData - 3-bit compaction") { SECTION("CompactData - 3-bit compaction") {
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{ std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
{static_cast<int8_t>(0x0F), {static_cast<int8_t>(0x0F),
static_cast<int8_t>(0x05), static_cast<int8_t>(0x05),
static_cast<int8_t>(0x04), static_cast<int8_t>(0x04),
static_cast<int8_t>(0xD3)} static_cast<int8_t>(0xD3)}
}); });
...@@ -103,17 +103,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -103,17 +103,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
weight->setDataType(Aidge::DataType::Int3); weight->setDataType(Aidge::DataType::Int3);
std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{ std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
{static_cast<int8_t>(0x75), {static_cast<int8_t>(0x75),
static_cast<int8_t>(0x43)} static_cast<int8_t>(0x43)}
}); });
expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC); expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type); expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving(); std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
op->associateInput(0,weight); op->associateInput(0,weight);
op->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type); op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
op->setDataFormat(DataFormat::NHWC); op->setDataFormat(DataFormat::NHWC);
op->setBackend("cpu"); op->setBackend("cpu");
myWeightInterleavingNode->forward(); myWeightInterleavingNode->forward();
...@@ -124,7 +124,7 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -124,7 +124,7 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{ std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 4>{
{static_cast<std::int8_t>(0x03), {static_cast<std::int8_t>(0x03),
static_cast<std::int8_t>(0x02), static_cast<std::int8_t>(0x02),
static_cast<std::int8_t>(0x01), static_cast<std::int8_t>(0x01),
static_cast<std::int8_t>(0x00)} static_cast<std::int8_t>(0x00)}
}); });
...@@ -136,12 +136,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -136,12 +136,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
}); });
expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC); expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int2>::type); expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int2>);
std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving(); std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
op->associateInput(0,weight); op->associateInput(0,weight);
op->setDataType(WeightInterleavingType<Aidge::DataType::Int2>::type); op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int2>);
op->setDataFormat(DataFormat::NHWC); op->setDataFormat(DataFormat::NHWC);
op->setBackend("cpu"); op->setBackend("cpu");
myWeightInterleavingNode->forward(); myWeightInterleavingNode->forward();
...@@ -161,12 +161,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -161,12 +161,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
}); });
expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC); expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type); expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving(); std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
op->associateInput(0,weight); op->associateInput(0,weight);
op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type); op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
op->setDataFormat(DataFormat::NHWC); op->setDataFormat(DataFormat::NHWC);
op->setBackend("cpu"); op->setBackend("cpu");
myWeightInterleavingNode->forward(); myWeightInterleavingNode->forward();
...@@ -175,8 +175,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -175,8 +175,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
SECTION("CompactData - Edge Cases - Non-divisible dataSize for nbSlot with nbbits=4") { SECTION("CompactData - Edge Cases - Non-divisible dataSize for nbSlot with nbbits=4") {
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 3>{ std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 3>{
{static_cast<int8_t>(0x0F), {static_cast<int8_t>(0x0F),
static_cast<int8_t>(0xA5), static_cast<int8_t>(0xA5),
static_cast<int8_t>(0x34)} static_cast<int8_t>(0x34)}
}); });
...@@ -184,17 +184,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -184,17 +184,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
weight->setDataType(Aidge::DataType::Int4); weight->setDataType(Aidge::DataType::Int4);
std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{ std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
{static_cast<int8_t>(0xF5), {static_cast<int8_t>(0xF5),
static_cast<int8_t>(0x40)} static_cast<int8_t>(0x40)}
}); });
expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC); expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type); expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving(); std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
op->associateInput(0,weight); op->associateInput(0,weight);
op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type); op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
op->setDataFormat(DataFormat::NHWC); op->setDataFormat(DataFormat::NHWC);
op->setBackend("cpu"); op->setBackend("cpu");
myWeightInterleavingNode->forward(); myWeightInterleavingNode->forward();
...@@ -205,8 +205,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -205,8 +205,8 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
SECTION("CompactData - Edge Cases - Non-divisible dataSize for nbSlot with nbbits=3") { SECTION("CompactData - Edge Cases - Non-divisible dataSize for nbSlot with nbbits=3") {
std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 3>{ std::shared_ptr<Tensor> weight = std::make_shared<Tensor>(Array1D<std::int8_t, 3>{
{static_cast<int8_t>(0x0F), {static_cast<int8_t>(0x0F),
static_cast<int8_t>(0x05), static_cast<int8_t>(0x05),
static_cast<int8_t>(0x04)} static_cast<int8_t>(0x04)}
}); });
...@@ -214,17 +214,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -214,17 +214,17 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
weight->setDataType(Aidge::DataType::Int3); weight->setDataType(Aidge::DataType::Int3);
std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{ std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array1D<std::int8_t, 2>{
{static_cast<int8_t>(0x75), {static_cast<int8_t>(0x75),
static_cast<int8_t>(0x40)} static_cast<int8_t>(0x40)}
}); });
expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC); expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type); expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving(); std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
op->associateInput(0,weight); op->associateInput(0,weight);
op->setDataType(WeightInterleavingType<Aidge::DataType::Int3>::type); op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int3>);
op->setDataFormat(DataFormat::NHWC); op->setDataFormat(DataFormat::NHWC);
op->setBackend("cpu"); op->setBackend("cpu");
myWeightInterleavingNode->forward(); myWeightInterleavingNode->forward();
...@@ -271,9 +271,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -271,9 +271,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
{-1, -6, -3, 0} // 'F' 'A' 'D' '0' in hexadecimal format {-1, -6, -3, 0} // 'F' 'A' 'D' '0' in hexadecimal format
} }
} }
} }
}); });
std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array4D<std::int8_t,2,3,3,2> { std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array4D<std::int8_t,2,3,3,2> {
{ {
{ {
...@@ -310,19 +310,19 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -310,19 +310,19 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
{static_cast<int8_t>(0xFA), static_cast<int8_t>(0xD0)} // 'F' 'A' 'D' '0' in hexadecimal format {static_cast<int8_t>(0xFA), static_cast<int8_t>(0xD0)} // 'F' 'A' 'D' '0' in hexadecimal format
} }
} }
} }
}); });
weight->setDataFormat(Aidge::DataFormat::NHWC); weight->setDataFormat(Aidge::DataFormat::NHWC);
weight->setDataType(Aidge::DataType::Int4); weight->setDataType(Aidge::DataType::Int4);
expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC); expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
expectedWeightInterleaving->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type); expectedWeightInterleaving->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving(); std::shared_ptr<Node> myWeightInterleavingNode = WeightInterleaving();
auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator()); auto op = std::static_pointer_cast<OperatorTensor>(myWeightInterleavingNode -> getOperator());
op->associateInput(0,weight); op->associateInput(0,weight);
op->setDataType(WeightInterleavingType<Aidge::DataType::Int4>::type); op->setDataType(WeightInterleavedType_v<Aidge::DataType::Int4>);
op->setDataFormat(DataFormat::NHWC); op->setDataFormat(DataFormat::NHWC);
op->setBackend("cpu"); op->setBackend("cpu");
myWeightInterleavingNode->forward(); myWeightInterleavingNode->forward();
...@@ -368,9 +368,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -368,9 +368,9 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
{-1, -6, -3, 0} // 'F' 'A' 'D' '0' in hexadecimal format {-1, -6, -3, 0} // 'F' 'A' 'D' '0' in hexadecimal format
} }
} }
} }
}); });
std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array4D<std::int8_t,2,3,3,2> { std::shared_ptr<Tensor> expectedWeightInterleaving = std::make_shared<Tensor>(Array4D<std::int8_t,2,3,3,2> {
{ {
{ {
...@@ -407,7 +407,7 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -407,7 +407,7 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
{static_cast<int8_t>(0xFA), static_cast<int8_t>(0xD0)} // 'F' 'A' 'D' '0' in hexadecimal format {static_cast<int8_t>(0xFA), static_cast<int8_t>(0xD0)} // 'F' 'A' 'D' '0' in hexadecimal format
} }
} }
} }
}); });
expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC); expectedWeightInterleaving->setDataFormat(Aidge::DataFormat::NHWC);
...@@ -415,12 +415,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") { ...@@ -415,12 +415,12 @@ TEST_CASE("[cpu/operator] WeightInterleaving", "[WeightInterleaving][CPU]") {
// Create convolution node // Create convolution node
std::shared_ptr<Node> conv = Conv(4, 2, {3, 3}, "conv1"); std::shared_ptr<Node> conv = Conv(4, 2, {3, 3}, "conv1");
// Place the weight tensor in the weight producer of the conv // Place the weight tensor in the weight producer of the conv
auto weightProducer = conv->getParent(1); auto weightProducer = conv->getParent(1);
weightProducer->getOperator()->setOutput(0, weight); weightProducer->getOperator()->setOutput(0, weight);
// Set dataType, dataformat and backend of convolution // Set dataType, dataformat and backend of convolution
conv->getOperator()->setDataFormat(Aidge::DataFormat::NHWC); conv->getOperator()->setDataFormat(Aidge::DataFormat::NHWC);
conv->getOperator()->setDataType(Aidge::DataType::Int4); conv->getOperator()->setDataType(Aidge::DataType::Int4);
conv->getOperator()->setBackend("cpu"); conv->getOperator()->setBackend("cpu");
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment