Skip to content
Snippets Groups Projects
Commit 57954e16 authored by Jerome Hue's avatar Jerome Hue
Browse files

Save changes

parent 720b47c2
No related branches found
No related tags found
No related merge requests found
Pipeline #71772 failed
......@@ -37,6 +37,7 @@
#include "aidge/backend/cpu/operator/ExpandImpl.hpp"
#include "aidge/backend/cpu/operator/FaultImpl.hpp"
#include "aidge/backend/cpu/operator/NBitFlipImpl.hpp"
#include "aidge/backend/cpu/operator/FixedNBitFlipImpl.hpp"
#include "aidge/backend/cpu/operator/FCImpl.hpp"
#include "aidge/backend/cpu/operator/FoldImpl.hpp"
#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp"
......
/********************************************************************************
* Copyright (c) 2025 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_FIXEDNBITFLIPIMPL_H_
#define AIDGE_CPU_OPERATOR_FIXEDNBITFLIPIMPL_H_
#include <memory>
#include <tuple>
#include <vector>
#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
#include "aidge/operator/FixedNBitFlip.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
// Operator implementation entry point for the backend
using FixedNBitFlipImpl_cpu = OperatorImpl_cpu<FixedNBitFlip_Op,
void(const void *,
void *,
std::size_t,
std::uint32_t,
std::vector<int>, // weights
std::vector<int> // bits
)>;
// Implementation entry point registration to Operator
REGISTRAR(FixedNBitFlip_Op, "cpu", Aidge::FixedNBitFlipImpl_cpu::create);
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_NBITFLIPIMPL_H_ */
/********************************************************************************
* Copyright (c) 2025 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_NBITFLIPIMPL_KERNELS_H_
#define AIDGE_CPU_OPERATOR_NBITFLIPIMPL_KERNELS_H_
#include <algorithm>
#include <random>
#include "aidge/backend/cpu/operator/NBitFlipImpl.hpp"
#include "aidge/utils/Registrar.hpp"
namespace Aidge {
template<typename T2, typename T1>
struct BitCastHelper {
static constexpr bool isValid =
sizeof(T1) == sizeof(T2) and
std::is_trivially_copyable<T1>::value and
std::is_trivially_copyable<T2>::value and
std::is_trivially_constructible<T2>::value;
};
template<typename T2, typename T1>
typename std::enable_if<BitCastHelper<T2, T1>::isValid, T2>::type
bitCast(const T1& src) {
T2 dst;
std::memcpy(&dst, &src, sizeof(T2));
return dst;
}
template<typename T>
typename std::enable_if<sizeof(T) == 4>::type
flipSingleBit(T& value, std::size_t bitToFlip)
{
std::uint32_t intRepresentation = bitCast<std::uint32_t>(value);
intRepresentation ^= (1u << bitToFlip);
value = bitCast<T>(intRepresentation);
}
template<typename T>
typename std::enable_if<sizeof(T) == 8>::type
flipSingleBit(T& value, std::size_t bitToFlip)
{
std::uint64_t intRepresentation = bitCast<std::uint64_t>(value);
intRepresentation ^= (1ull << bitToFlip);
value = bitCast<T>(intRepresentation);
}
template<typename T>
typename std::enable_if<(sizeof(T) != 4 and sizeof(T) != 8)>::type
flipSingleBit(T& vlaue, std::size_t bitToFlip)
{
AIDGE_THROW_OR_ABORT(std::runtime_error,
"BitFlip error not implemented for this type of data.");
}
template <class I, class O>
void FixedNBitFlipImpl_cpu_forward_kernel(const void* input_,
void* output_,
std::size_t inputLength,
std::uint32_t nBitToFlip,
std::vector<int> weights,
std::vector<int> bits) {
const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_);
std::copy(input, input+inputLength, output);
if(nBitToFlip == 0) {
return;
}
Log::notice("Performing {} bit flips on tensor", nBitToFlip);
Log::notice("Size of the weights vector : {} ", weights.size());
Log::notice("Size of the bits vector : {} ", bits.size());
// Debug Print
//for(auto weight : weights)
//{
// Log::notice("Weight {}", weight);
//}
//for(auto bit : bits)
//{
// Log::notice("Bit {}", bit);
//}
// End debug print
for(auto i = 0U; i < nBitToFlip; ++i) {
auto randomIndex = weights[i];
auto bitToFlip = bits[i];
O oldValue = output[randomIndex];
flipSingleBit(output[randomIndex], bitToFlip);
O newValue = output[randomIndex];
Log::notice("\t Flipping bit {} of weight {}.\tFrom {} to {}",
bitToFlip,
randomIndex,
oldValue,
newValue);
}
}
// Kernels registration to implementation entry point
REGISTRAR(FixedNBitFlipImpl_cpu,
{DataType::Float32},
{ProdConso::inPlaceModel,
Aidge::FixedNBitFlipImpl_cpu_forward_kernel<float, float>,
nullptr});
REGISTRAR(FixedNBitFlipImpl_cpu,
{DataType::Float64},
{ProdConso::inPlaceModel,
Aidge::FixedNBitFlipImpl_cpu_forward_kernel<double, double>,
nullptr});
REGISTRAR(FixedNBitFlipImpl_cpu,
{DataType::Int32},
{ProdConso::inPlaceModel,
Aidge::FixedNBitFlipImpl_cpu_forward_kernel<std::uint32_t, std::uint32_t>,
nullptr});
} // namespace Aidge
#endif
......@@ -98,7 +98,7 @@ void NBitFlipImpl_cpu_forward_kernel(const void* input_,
flipSingleBit(output[randomIndex], bitToFlip);
O newValue = output[randomIndex];
Log::info("\t Flipping bit {} of weight {}.\tFrom {} to {}",
Log::notice("\t Flipping bit {} of weight {}.\tFrom {} to {}",
bitToFlip,
randomIndex,
oldValue,
......
/********************************************************************************
* Copyright (c) 2025 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/backend/cpu/operator/FixedNBitFlipImpl.hpp"
#include <stdexcept>
#include <vector>
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/backend/cpu/operator/FixedNBitFlipImpl_kernels.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/NBitFlip.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Log.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/Registrar.hpp"
template <>
void Aidge::FixedNBitFlipImpl_cpu::forward() {
const FixedNBitFlip_Op& op_ = dynamic_cast<const FixedNBitFlip_Op&>(mOp);
std::shared_ptr<Tensor> in0 = op_.getInput(0);
std::shared_ptr<Tensor> out0 = op_.getOutput(0);
AIDGE_ASSERT(in0, "missing input #0");
// Find the correct kernel type
const auto impl = Registrar<FixedNBitFlipImpl_cpu>::create(getBestMatch(getRequiredSpec()));
// Call kernel
impl.forward(
getCPUPtr(mOp.getRawInput(0)),
getCPUPtr(mOp.getRawOutput(0)),
in0->size(),
op_.nBits(),
op_.weights(),
op_.bits());
}
template <>
void Aidge::FixedNBitFlipImpl_cpu::backward() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Not implemented yet");
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment