Skip to content
Snippets Groups Projects
Commit 4352dddc authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'master' into tiling

parents c215a4ca c20897c1
No related branches found
No related tags found
2 merge requests!22Update operators implementation,!16Draft: Tiling
Pipeline #32955 failed
......@@ -50,9 +50,9 @@ class SoftmaxImpl_cpu : public OperatorImpl {
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
void forward() override;
void backward();
void backward() override;
};
namespace {
......
......@@ -70,7 +70,8 @@ class CMakeBuild(build_ext):
self.spawn(['cmake', str(cwd), param_py, '-DTEST=OFF', f'-DCMAKE_INSTALL_PREFIX:PATH={install_path}'])
if not self.dry_run:
self.spawn(['make', 'all', 'install', '-j', max_jobs])
self.spawn(['cmake', '--build', '.', '--config', 'Debug', '-j', max_jobs])
self.spawn(['cmake', '--install', '.', '--config', 'Debug'])
os.chdir(str(cwd))
aidge_package = build_lib / (get_project_name())
......@@ -81,7 +82,7 @@ class CMakeBuild(build_ext):
# Copy all shared object files from build_temp/lib to aidge_package
for root, _, files in os.walk(build_temp.absolute()):
for file in files:
if file.endswith('.so') and (root != str(aidge_package.absolute())):
if (file.endswith('.so') or file.endswith('.pyd')) and (root != str(aidge_package.absolute())):
currentFile=os.path.join(root, file)
shutil.copy(currentFile, str(aidge_package.absolute()))
......@@ -100,7 +101,6 @@ if __name__ == '__main__':
long_description_content_type="text/markdown",
long_description="\n".join(DOCLINES[2:]),
classifiers=[c for c in CLASSIFIERS.split('\n') if c],
platforms=["Linux"],
packages=find_packages(where="."),
include_package_data=True,
ext_modules=[CMakeExtension(get_project_name())],
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/operator/Conv.hpp"
#include "aidge/backend/cpu/operator/PadImpl.hpp"
#include "aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp"
Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(inputIdx == 0 && "operator has only one input");
(void) inputIdx;
// Requires the whole tensors
const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) const {
assert(inputIdx == 0 && "operator has only one input");
(void) inputIdx;
// Padding cannot be in-place!
// We must ensure that we do not override data that has not been consummed yet.
const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
const size_t inputSize = std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
const size_t outputSize = std::accumulate(outputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
return (outputSize - inputSize);
}
Aidge::NbElts_t Aidge::PadImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
(void) outputIdx;
const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}
Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::PadImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::PadImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
// Find the correct kernel type
auto kernelFunc =
Registrar<PadImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
}
void Aidge::PadImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
......@@ -156,165 +156,8 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
// myConv->getOperator()->getOutput(0)->print();
REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
}
SECTION("test Padding") {
std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv", {1,1}, {1,1,1,1});
myConv->getOperator()->setDatatype(DataType::Int32);
myConv->getOperator()->setBackend("cpu");
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
{
{
{{ 0, 1, 2},
{ 3, 4, 5},
{ 6, 7, 8}},
{{ 9, 10, 11},
{ 12, 13, 14},
{ 15, 16, 17}},
{{ 18, 19, 20},
{ 21, 22, 23},
{ 24, 25, 26}}
},
{
{{ 27, 28, 29},
{ 30, 31, 32},
{ 33, 34, 35}},
{{ 36, 37, 38},
{ 39, 40, 41},
{ 42, 43, 44}},
{{ 45, 46, 47},
{ 48, 49, 50},
{ 51, 52, 53}}
},
{
{{ 54, 55, 56},
{ 57, 58, 59},
{ 60, 61, 62}},
{{ 63, 64, 65},
{ 66, 67, 68},
{ 69, 70, 71}},
{{ 72, 73, 74},
{ 75, 76, 77},
{ 78, 79, 80}}
},
{
{{ 81, 82, 83},
{ 84, 85, 86},
{ 87, 88, 89}},
{{ 90, 91, 92},
{ 93, 94, 95},
{ 96, 97, 98}},
{{ 99, 100, 101},
{102, 103, 104},
{105, 106, 107}}
}
}
});
std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,5,5> {
{
{
{{ 6895, 10225, 10486, 10747, 7063},
{ 10303, 15226, 15577, 15928, 10429},
{ 11518, 16981, 17332, 17683, 11554},
{ 12733, 18736, 19087, 19438, 12679},
{ 8047, 11791, 11998, 12205, 7927}},
{{ 15960, 24069, 24816, 25563, 17100},
{ 25119, 37818, 38898, 39978, 26703},
{ 28764, 43218, 44298, 45378, 30258},
{ 32409, 48618, 49698, 50778, 33813},
{ 21972, 32925, 33618, 34311, 22824}},
{{ 25041, 37929, 39162, 40395, 27153},
{ 39951, 60426, 62235, 64044, 42993},
{ 46026, 69471, 71280, 73089, 48978},
{ 52101, 78516, 80325, 82134, 54963},
{ 35913, 54075, 55254, 56433, 37737}},
{{ 34104, 51771, 53490, 55209, 37188},
{ 54765, 83016, 85554, 88092, 59265},
{ 63270, 95706, 98244, 100782, 67680},
{ 71775, 108396, 110934, 113472, 76095},
{ 49836, 75207, 76872, 78537, 52632}}
},
{
{{ 20395, 29800, 30061, 30322, 19663},
{ 28528, 41551, 41902, 42253, 27304},
{ 29743, 43306, 43657, 44008, 28429},
{ 30958, 45061, 45412, 45763, 29554},
{ 18847, 27316, 27523, 27730, 17827}},
{{ 53760, 80094, 80841, 81588, 54000},
{ 79794, 118818, 119898, 120978, 80028},
{ 83439, 124218, 125298, 126378, 83583},
{ 87084, 129618, 130698, 131778, 87138},
{ 57072, 84900, 85593, 86286, 57024}},
{{ 87141, 130404, 131637, 132870, 88353},
{131076, 196101, 197910, 199719, 132768},
{137151, 205146, 206955, 208764, 138753},
{143226, 214191, 216000, 217809, 144738},
{ 95313, 142500, 143679, 144858, 96237}},
{{120504, 180696, 182415, 184134, 122688},
{182340, 273366, 275904, 278442, 185490},
{190845, 286056, 288594, 291132, 193905},
{199350, 298746, 301284, 303822, 202320},
{133536, 200082, 201747, 203412, 135432}}
}
}
});
myConv->getOperator()->associateInput(0,myInput);
myConv->getOperator()->associateInput(1,myWeights);
myConv->getOperator()->associateInput(2,myBias);
myConv->getOperator()->computeOutputDims();
myConv->forward();
REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
}
SECTION("Point-wise") {
std::shared_ptr<Node> myConv = Conv(3,4,{1,1}, "myconv", {1,1}, {0,0,0,0});
std::shared_ptr<Node> myConv = Conv(3,4,{1,1}, "myconv", {1,1});
myConv->getOperator()->setDatatype(DataType::Float32);
myConv->getOperator()->setBackend("cpu");
myConv->getOperator()->input(0) = Array4D<float,2,3,3,3> {
......
This diff is collapsed.
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstdlib>
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp"
#include "aidge/backend/cpu.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] PaddedConv(forward)") {
SECTION("Classic Conv") {
std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv");
myConv->getOperator()->setDatatype(DataType::Int32);
myConv->getOperator()->setBackend("cpu");
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
{
{
{{ 0, 1, 2},
{ 3, 4, 5},
{ 6, 7, 8}},
{{ 9, 10, 11},
{ 12, 13, 14},
{ 15, 16, 17}},
{{ 18, 19, 20},
{ 21, 22, 23},
{ 24, 25, 26}}
},
{
{{ 27, 28, 29},
{ 30, 31, 32},
{ 33, 34, 35}},
{{ 36, 37, 38},
{ 39, 40, 41},
{ 42, 43, 44}},
{{ 45, 46, 47},
{ 48, 49, 50},
{ 51, 52, 53}}
},
{
{{ 54, 55, 56},
{ 57, 58, 59},
{ 60, 61, 62}},
{{ 63, 64, 65},
{ 66, 67, 68},
{ 69, 70, 71}},
{{ 72, 73, 74},
{ 75, 76, 77},
{ 78, 79, 80}}
},
{
{{ 81, 82, 83},
{ 84, 85, 86},
{ 87, 88, 89}},
{{ 90, 91, 92},
{ 93, 94, 95},
{ 96, 97, 98}},
{{ 99, 100, 101},
{102, 103, 104},
{105, 106, 107}}
}
}
});
std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
{
{
{{ 15226, 15577, 15928},
{ 16981, 17332, 17683},
{ 18736, 19087, 19438}},
{{ 37818, 38898, 39978},
{ 43218, 44298, 45378},
{ 48618, 49698, 50778}},
{{ 60426, 62235, 64044},
{ 69471, 71280, 73089},
{ 78516, 80325, 82134}},
{{ 83016, 85554, 88092},
{ 95706, 98244, 100782},
{108396, 110934, 113472}}
},
{
{{ 41551, 41902, 42253},
{ 43306, 43657, 44008},
{ 45061, 45412, 45763}},
{{118818, 119898, 120978},
{124218, 125298, 126378},
{129618, 130698, 131778}},
{{196101, 197910, 199719},
{205146, 206955, 208764},
{214191, 216000, 217809}},
{{273366, 275904, 278442},
{286056, 288594, 291132},
{298746, 301284, 303822}}
}
}
});
myConv->getOperator()->associateInput(0,myInput);
myConv->getOperator()->associateInput(1,myWeights);
myConv->getOperator()->associateInput(2,myBias);
myConv->getOperator()->computeOutputDims();
myConv->forward();
REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
}
SECTION("test Padding") {
std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv", {1,1}, {1,1,1,1});
myConv->getOperator()->setDatatype(DataType::Int32);
myConv->getOperator()->setBackend("cpu");
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
{
{
{{ 0, 1, 2},
{ 3, 4, 5},
{ 6, 7, 8}},
{{ 9, 10, 11},
{ 12, 13, 14},
{ 15, 16, 17}},
{{ 18, 19, 20},
{ 21, 22, 23},
{ 24, 25, 26}}
},
{
{{ 27, 28, 29},
{ 30, 31, 32},
{ 33, 34, 35}},
{{ 36, 37, 38},
{ 39, 40, 41},
{ 42, 43, 44}},
{{ 45, 46, 47},
{ 48, 49, 50},
{ 51, 52, 53}}
},
{
{{ 54, 55, 56},
{ 57, 58, 59},
{ 60, 61, 62}},
{{ 63, 64, 65},
{ 66, 67, 68},
{ 69, 70, 71}},
{{ 72, 73, 74},
{ 75, 76, 77},
{ 78, 79, 80}}
},
{
{{ 81, 82, 83},
{ 84, 85, 86},
{ 87, 88, 89}},
{{ 90, 91, 92},
{ 93, 94, 95},
{ 96, 97, 98}},
{{ 99, 100, 101},
{102, 103, 104},
{105, 106, 107}}
}
}
});
std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,5,5> {
{
{
{{ 6895, 10225, 10486, 10747, 7063},
{ 10303, 15226, 15577, 15928, 10429},
{ 11518, 16981, 17332, 17683, 11554},
{ 12733, 18736, 19087, 19438, 12679},
{ 8047, 11791, 11998, 12205, 7927}},
{{ 15960, 24069, 24816, 25563, 17100},
{ 25119, 37818, 38898, 39978, 26703},
{ 28764, 43218, 44298, 45378, 30258},
{ 32409, 48618, 49698, 50778, 33813},
{ 21972, 32925, 33618, 34311, 22824}},
{{ 25041, 37929, 39162, 40395, 27153},
{ 39951, 60426, 62235, 64044, 42993},
{ 46026, 69471, 71280, 73089, 48978},
{ 52101, 78516, 80325, 82134, 54963},
{ 35913, 54075, 55254, 56433, 37737}},
{{ 34104, 51771, 53490, 55209, 37188},
{ 54765, 83016, 85554, 88092, 59265},
{ 63270, 95706, 98244, 100782, 67680},
{ 71775, 108396, 110934, 113472, 76095},
{ 49836, 75207, 76872, 78537, 52632}}
},
{
{{ 20395, 29800, 30061, 30322, 19663},
{ 28528, 41551, 41902, 42253, 27304},
{ 29743, 43306, 43657, 44008, 28429},
{ 30958, 45061, 45412, 45763, 29554},
{ 18847, 27316, 27523, 27730, 17827}},
{{ 53760, 80094, 80841, 81588, 54000},
{ 79794, 118818, 119898, 120978, 80028},
{ 83439, 124218, 125298, 126378, 83583},
{ 87084, 129618, 130698, 131778, 87138},
{ 57072, 84900, 85593, 86286, 57024}},
{{ 87141, 130404, 131637, 132870, 88353},
{131076, 196101, 197910, 199719, 132768},
{137151, 205146, 206955, 208764, 138753},
{143226, 214191, 216000, 217809, 144738},
{ 95313, 142500, 143679, 144858, 96237}},
{{120504, 180696, 182415, 184134, 122688},
{182340, 273366, 275904, 278442, 185490},
{190845, 286056, 288594, 291132, 193905},
{199350, 298746, 301284, 303822, 202320},
{133536, 200082, 201747, 203412, 135432}}
}
}
});
myConv->getOperator()->associateInput(0,myInput);
myConv->getOperator()->associateInput(1,myWeights);
myConv->getOperator()->associateInput(2,myBias);
myConv->getOperator()->computeOutputDims();
myConv->forward();
REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment