Skip to content
Snippets Groups Projects
Commit 75968752 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into learning

parents 9280ea46 c0a5d97a
No related branches found
No related tags found
2 merge requests!105version 0.2.0,!88Basic supervised learning
Pipeline #42284 passed
......@@ -23,12 +23,16 @@
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Database.hpp"
#include "aidge/data/DataProvider.hpp"
#include "aidge/graph/Connector.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/graphRegex/GraphRegex.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/nodeTester/ConditionalInterpreter.hpp"
#include "aidge/operator/Add.hpp"
......@@ -60,12 +64,7 @@
#include "aidge/operator/Sqrt.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/operator/Transpose.hpp"
#include "aidge/optimizer/LR/LRSchedulerList.hpp"
#include "aidge/optimizer/LR/LRScheduler.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/stimuli/Stimulus.hpp"
#include "aidge/recipes/Recipes.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_FILLER_H_
#define AIDGE_CORE_FILLER_H_
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp"
namespace Aidge {
inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
unsigned int& fanIn, unsigned int& fanOut) {
AIDGE_ASSERT(
tensor->nbDims() == 4,
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
DimSize_t batchSize = tensor->dims()[0];
DimSize_t channelSize = tensor->dims()[1];
AIDGE_ASSERT(batchSize != 0,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<unsigned int>(tensor->size() / batchSize);
fanOut = static_cast<unsigned int>(tensor->size() / channelSize);
}
enum VarianceNorm { FanIn, Average, FanOut };
template <typename T>
void constantFiller(std::shared_ptr<Tensor> tensor, T constantValue);
template <typename T>
void normalFiller(std::shared_ptr<Tensor> tensor, double mean = 0.0,
double stdDev = 1.0);
template <typename T>
void uniformFiller(std::shared_ptr<Tensor> tensor, T min, T max);
template <typename T>
void xavierUniformFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn);
template <typename T>
void xavierNormalFiller(std::shared_ptr<Tensor> tensor, T scaling = 1.0,
VarianceNorm varianceNorm = FanIn);
template <typename T>
void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = FanIn,
T meanNorm = 0.0, T scaling = 1.0);
} // namespace Aidge
#endif /* AIDGE_CORE_FILLER_H_ */
......@@ -9,23 +9,53 @@
*
********************************************************************************/
#ifndef AIDGE_RANDOM_H_
#define AIDGE_RANDOM_H_
#include <algorithm>
#include <vector>
#include <random>
#include <vector>
namespace Aidge {
namespace Random {
void randShuffle(std::vector<unsigned int>& vec) {
std::random_device rd;
std::mt19937 g(rd());
std::shuffle(vec.begin(), vec.end(), g);
}
/**
* @brief Generator is a class created to handle only one Mersenne Twister
* pseudo-random number generator for the whole Aidge framework.
*
* All of its method are static. You can set a random seed and access the
* generator.
* By default, the random seed is set to 0 but selected randomly.
*
*/
class Generator {
public:
/**
* @brief Set a seed to the pseudo-random number generator.
*
* @return std::mt19937&
*/
static void setSeed(unsigned int seed);
static unsigned int getSeed() { return seed; };
/**
* @brief Return a Mersenne Twister pseudo-random number generator.
* You can set the seed of this generator using ``setSeed`` method.
*
* @return std::mt19937&
*/
static std::mt19937& get() { return generator; };
private:
// Mersenne Twister pseudo-random number generator
static std::mt19937 generator;
static unsigned int seed;
};
inline void randShuffle(std::vector<unsigned int>& vec) {
std::shuffle(vec.begin(), vec.end(), Aidge::Random::Generator::get());
}
#endif //AIDGE_RANDOM_H_
\ No newline at end of file
} // namespace Random
} // namespace Aidge
#endif // AIDGE_RANDOM_H_
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Filler(py::module &m) {
py::enum_<enum VarianceNorm>(m, "VarianceNorm")
.value("FanIn", VarianceNorm::FanIn)
.value("Average", VarianceNorm::Average)
.value("FanOut", VarianceNorm::FanOut)
.export_values();
m.def(
"constant_filler",
[](std::shared_ptr<Tensor> tensor, py::object value) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
constantFiller<double>(tensor, value.cast<double>());
break;
case DataType::Float32:
constantFiller<float>(tensor, value.cast<float>());
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Constant filler.");
}
},
py::arg("tensor"), py::arg("value"))
.def(
"normal_filler",
[](std::shared_ptr<Tensor> tensor, double mean,
double stdDev) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
normalFiller<double>(tensor, mean, stdDev);
break;
case DataType::Float32:
normalFiller<float>(tensor, mean, stdDev);
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Normal filler.");
}
},
py::arg("tensor"), py::arg("mean") = 0.0, py::arg("stdDev") = 1.0)
.def(
"uniform_filler",
[](std::shared_ptr<Tensor> tensor, double min, double max) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
uniformFiller<double>(tensor, min, max);
break;
case DataType::Float32:
uniformFiller<float>(tensor, min, max);
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Uniform filler.");
}
},
py::arg("tensor"), py::arg("min"), py::arg("max"))
.def(
"xavier_uniform_filler",
[](std::shared_ptr<Tensor> tensor, py::object scaling,
VarianceNorm varianceNorm) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
xavierUniformFiller<double>(
tensor, scaling.cast<double>(), varianceNorm);
break;
case DataType::Float32:
xavierUniformFiller<float>(
tensor, scaling.cast<float>(), varianceNorm);
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Uniform filler.");
}
},
py::arg("tensor"), py::arg("scaling") = 1.0,
py::arg("varianceNorm") = VarianceNorm::FanIn)
.def(
"xavier_normal_filler",
[](std::shared_ptr<Tensor> tensor, py::object scaling,
VarianceNorm varianceNorm) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
xavierNormalFiller<double>(
tensor, scaling.cast<double>(), varianceNorm);
break;
case DataType::Float32:
xavierNormalFiller<float>(tensor, scaling.cast<float>(),
varianceNorm);
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Uniform filler.");
}
},
py::arg("tensor"), py::arg("scaling") = 1.0,
py::arg("varianceNorm") = VarianceNorm::FanIn)
.def(
"he_filler",
[](std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm,
py::object meanNorm, py::object scaling) -> void {
switch (tensor->dataType()) {
case DataType::Float64:
heFiller<double>(tensor, varianceNorm,
meanNorm.cast<double>(),
scaling.cast<double>());
break;
case DataType::Float32:
heFiller<float>(tensor, varianceNorm,
meanNorm.cast<float>(),
scaling.cast<float>());
break;
default:
AIDGE_THROW_OR_ABORT(
py::value_error,
"Data type is not supported for Uniform filler.");
}
},
py::arg("tensor"), py::arg("varianceNorm") = VarianceNorm::FanIn,
py::arg("meanNorm") = 0.0, py::arg("scaling") = 1.0)
;
}
} // namespace Aidge
......@@ -11,12 +11,12 @@
#include <pybind11/pybind11.h>
#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
namespace py = pybind11;
namespace Aidge {
void init_Random(py::module&);
void init_Data(py::module&);
void init_Database(py::module&);
void init_DataProvider(py::module&);
......@@ -71,9 +71,11 @@ void init_Recipes(py::module&);
void init_Scheduler(py::module&);
void init_TensorUtils(py::module&);
void init_Filler(py::module&);
void init_Aidge(py::module& m) {
init_Random(m);
void init_Aidge(py::module& m){
init_Data(m);
init_Database(m);
init_DataProvider(m);
......@@ -129,9 +131,8 @@ void init_Aidge(py::module& m){
init_Recipes(m);
init_Scheduler(m);
init_TensorUtils(m);
init_Filler(m);
}
PYBIND11_MODULE(aidge_core, m) {
init_Aidge(m);
}
}
PYBIND11_MODULE(aidge_core, m) { init_Aidge(m); }
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <pybind11/pybind11.h>
#include "aidge/utils/Random.hpp"
namespace py = pybind11;
namespace Aidge {
void init_Random(py::module &m) {
auto mRand = m.def_submodule("random", "Random module.");
py::class_<Random::Generator>(mRand, "Generator")
.def_static("set_seed", Random::Generator::setSeed);
}
} // namespace Aidge
......@@ -41,8 +41,8 @@ Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::si
}
// Compute the number of bacthes depending on mDropLast boolean
mNbBatch = (mDropLast) ?
static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) :
mNbBatch = (mDropLast) ?
static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) :
static_cast<std::size_t>(std::ceil(mNbItems / mBatchSize));
}
......@@ -98,7 +98,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() con
void Aidge::DataProvider::setBatches(){
mBatches.clear();
mBatches.resize(mNbItems);
std::iota(mBatches.begin(),
......@@ -106,7 +106,7 @@ void Aidge::DataProvider::setBatches(){
0U);
if (mShuffle){
Random::randShuffle(mBatches);
Aidge::Random::randShuffle(mBatches);
}
if (mNbItems % mBatchSize !=0){ // The last batch is not full
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/filler/Filler.hpp"
#include "aidge/data/Tensor.hpp"
template<typename T>
void Aidge::constantFiller(std::shared_ptr<Aidge::Tensor> tensor, T constantValue){
AIDGE_ASSERT(tensor->getImpl(),
"Tensor got no implementation, cannot fill it.");
AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
std::shared_ptr<Aidge::Tensor> cpyTensor;
// Create cpy only if tensor not on CPU
Aidge::Tensor& tensorWithValues =
tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
// Setting values
for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
tensorWithValues.set<T>(idx, constantValue);
}
// Copy values back to the original tensors (actual copy only if needed)
tensor->copyCastFrom(tensorWithValues);
}
template void Aidge::constantFiller<float>(std::shared_ptr<Aidge::Tensor>, float);
template void Aidge::constantFiller<double>(std::shared_ptr<Aidge::Tensor>, double);
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/utils/Random.hpp"
template <typename T>
void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor,
Aidge::VarianceNorm varianceNorm, T meanNorm, T scaling) {
AIDGE_ASSERT(tensor->getImpl(),
"Tensor got no implementation, cannot fill it.");
AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
unsigned int fanIn, fanOut = 0;
Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
: (varianceNorm == Aidge::VarianceNorm::Average)
? (fanIn + fanOut) / 2.0
: fanOut);
const T stdDev(std::sqrt(2.0 / n));
const T mean(varianceNorm == Aidge::VarianceNorm::FanIn ? meanNorm / fanIn
: (varianceNorm == Aidge::VarianceNorm::Average)
? meanNorm / ((fanIn + fanOut) / 2.0)
: meanNorm / fanOut);
std::normal_distribution<T> normalDist(mean, stdDev);
std::shared_ptr<Tensor> cpyTensor;
// Create cpy only if tensor not on CPU
Tensor& tensorWithValues =
tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
// Setting values
for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
tensorWithValues.set<T>(idx, scaling*normalDist(Aidge::Random::Generator::get()));
}
// Copy values back to the original tensors (actual copy only if needed)
tensor->copyCastFrom(tensorWithValues);
}
template void Aidge::heFiller<float>(std::shared_ptr<Aidge::Tensor>,
Aidge::VarianceNorm, float, float);
template void Aidge::heFiller<double>(std::shared_ptr<Aidge::Tensor>,
Aidge::VarianceNorm, double, double);
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/utils/Random.hpp"
template <typename T>
void Aidge::normalFiller(std::shared_ptr<Aidge::Tensor> tensor, double mean,
double stdDev) {
AIDGE_ASSERT(tensor->getImpl(),
"Tensor got no implementation, cannot fill it.");
AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
std::normal_distribution<T> normalDist(mean, stdDev);
std::shared_ptr<Tensor> cpyTensor;
// Create cpy only if tensor not on CPU
Tensor& tensorWithValues =
tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
// Setting values
for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
tensorWithValues.set<T>(idx, normalDist(Aidge::Random::Generator::get()));
}
// Copy values back to the original tensors (actual copy only if needed)
tensor->copyCastFrom(tensorWithValues);
}
template void Aidge::normalFiller<float>(std::shared_ptr<Aidge::Tensor>, double,
double);
template void Aidge::normalFiller<double>(std::shared_ptr<Aidge::Tensor>,
double, double);
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/utils/Random.hpp"
template <typename T>
void Aidge::uniformFiller(std::shared_ptr<Aidge::Tensor> tensor, T min, T max) {
AIDGE_ASSERT(tensor->getImpl(),
"Tensor got no implementation, cannot fill it.");
AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
std::uniform_real_distribution<T> uniformDist(min, max);
std::shared_ptr<Aidge::Tensor> cpyTensor;
// Create cpy only if tensor not on CPU
Aidge::Tensor& tensorWithValues =
tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
// Setting values
for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
tensorWithValues.set<T>(idx, uniformDist(Aidge::Random::Generator::get()));
}
// Copy values back to the original tensors (actual copy only if needed)
tensor->copyCastFrom(tensorWithValues);
}
template void Aidge::uniformFiller<float>(std::shared_ptr<Aidge::Tensor>, float,
float);
template void Aidge::uniformFiller<double>(std::shared_ptr<Aidge::Tensor>,
double, double);
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include <random> // normal_distribution, uniform_real_distribution
#include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/utils/Random.hpp"
template <typename T>
void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor,
T scaling, Aidge::VarianceNorm varianceNorm) {
AIDGE_ASSERT(tensor->getImpl(),
"Tensor got no implementation, cannot fill it.");
AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
unsigned int fanIn, fanOut = 0;
Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
: (varianceNorm == Aidge::VarianceNorm::Average)
? (fanIn + fanOut) / 2.0
: fanOut);
const T scale(std::sqrt(3.0 / n));
std::uniform_real_distribution<T> uniformDist(-scale, scale);
std::shared_ptr<Aidge::Tensor> cpyTensor;
// Create cpy only if tensor not on CPU
Aidge::Tensor& tensorWithValues =
tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
// Setting values
for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
tensorWithValues.set<T>(
idx, scaling * uniformDist(Aidge::Random::Generator::get()));
}
// Copy values back to the original tensors (actual copy only if needed)
tensor->copyCastFrom(tensorWithValues);
}
template <typename T>
void Aidge::xavierNormalFiller(std::shared_ptr<Aidge::Tensor> tensor, T scaling,
Aidge::VarianceNorm varianceNorm) {
AIDGE_ASSERT(tensor->getImpl(),
"Tensor got no implementation, cannot fill it.");
AIDGE_ASSERT(NativeType<T>::type == tensor->dataType(), "Wrong data type");
unsigned int fanIn, fanOut = 0;
Aidge::calculateFanInFanOut(tensor, fanIn, fanOut);
const T n((varianceNorm == Aidge::VarianceNorm::FanIn) ? fanIn
: (varianceNorm == Aidge::VarianceNorm::Average)
? (fanIn + fanOut) / 2.0
: fanOut);
const double stdDev(std::sqrt(1.0 / n));
std::normal_distribution<T> normalDist(0.0, stdDev);
std::shared_ptr<Aidge::Tensor> cpyTensor;
// Create cpy only if tensor not on CPU
Aidge::Tensor& tensorWithValues =
tensor->refCastFrom(cpyTensor, tensor->dataType(), "cpu");
// Setting values
for (std::size_t idx = 0; idx < tensorWithValues.size(); ++idx) {
tensorWithValues.set<T>(
idx, scaling * normalDist(Aidge::Random::Generator::get()));
}
// Copy values back to the original tensors (actual copy only if needed)
tensor->copyCastFrom(tensorWithValues);
}
template void Aidge::xavierUniformFiller<float>(std::shared_ptr<Aidge::Tensor>,
float, Aidge::VarianceNorm);
template void Aidge::xavierUniformFiller<double>(std::shared_ptr<Aidge::Tensor>,
double, Aidge::VarianceNorm);
template void Aidge::xavierNormalFiller<float>(std::shared_ptr<Aidge::Tensor>,
float, Aidge::VarianceNorm);
template void Aidge::xavierNormalFiller<double>(std::shared_ptr<Aidge::Tensor>,
double, Aidge::VarianceNorm);
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/utils/Random.hpp"
#include <random> // normal_distribution, uniform_real_distribution
std::mt19937 Aidge::Random::Generator::generator{std::random_device{}()};
unsigned int Aidge::Random::Generator::seed = 0;
void Aidge::Random::Generator::setSeed(unsigned int new_seed) {
seed = new_seed;
generator.seed(seed);
}
0.1.1
0.2.0
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment