Skip to content
Snippets Groups Projects
Commit a0817f6a authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'master' of gitlab.eclipse.org:eclipse/aidge/aidge_backend_cpu

parents f19edbda a59450ea
No related branches found
No related tags found
No related merge requests found
Showing
with 139 additions and 82 deletions
......@@ -36,7 +36,54 @@ class test_scheduler(unittest.TestCase):
for i in range(len(expected_out)):
self.assertEqual(expected_out[i], out_tensor[i])
def test_sequential_scheduling(self):
input_data = np.array([]).astype(np.float32)
input_tensor = aidge_core.Tensor(input_data)
input_node = aidge_core.Producer(input_tensor, "X")
graph_view = aidge_core.sequential([
aidge_core.FC(50, name='0'),
aidge_core.FC(50, name='1'),
aidge_core.FC(10, name='2'),
])
EXPECTED_SCHEDULE = ['0', '1', '2']
input_node.add_child(graph_view)
input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
input_node.get_operator().set_backend("cpu")
graph_view.set_datatype(aidge_core.DataType.Float32)
graph_view.set_backend("cpu")
scheduler = aidge_core.SequentialScheduler(graph_view)
scheduler.generate_scheduling()
self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()], EXPECTED_SCHEDULE)
def test_parallel_scheduling(self):
input_data = np.array([]).astype(np.float32)
input_tensor = aidge_core.Tensor(input_data)
input_node = aidge_core.Producer(input_tensor, "X")
graph_view = aidge_core.sequential([
aidge_core.FC(50, name='0'),
aidge_core.parallel([aidge_core.FC(50, name='1'), aidge_core.FC(50, name='3')]),
aidge_core.Add(name='2'),
])
EXPECTED_SCHEDULE = [['0', '1', '3', '2'], ['0', '3', '1', '2']] # Both scheduling are valid !
input_node.add_child(graph_view)
input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
input_node.get_operator().set_backend("cpu")
graph_view.set_datatype(aidge_core.DataType.Float32)
graph_view.set_backend("cpu")
scheduler = aidge_core.SequentialScheduler(graph_view)
scheduler.generate_scheduling()
self.assertTrue([i.name() for i in scheduler.get_static_scheduling()] in EXPECTED_SCHEDULE)
if __name__ == '__main__':
unittest.main()
......@@ -97,6 +97,7 @@ class AddImpl_cpu : public OperatorImpl {
assert(outputIdx < mNbProducedData.size());
return mNbProducedData[outputIdx];
}
void updateConsummerProducer() override final;
void forward() {
// nothing
......@@ -130,6 +131,7 @@ class AddImpl_cpu<1> : public OperatorImpl {
NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
void updateConsummerProducer() override final;
void forward();
......@@ -161,6 +163,7 @@ class AddImpl_cpu<2> : public OperatorImpl {
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
void updateConsummerProducer() override final;
void forward();
......@@ -191,6 +194,7 @@ class AddImpl_cpu<3> : public OperatorImpl {
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
......@@ -204,4 +208,4 @@ static Registrar<Add_Op<3>> registrarAddImpl3I_cpu("cpu", Aidge::AddImpl_cpu<3>:
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */
......@@ -54,6 +54,7 @@ class AvgPoolingImpl2D_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
......@@ -66,4 +67,4 @@ static Registrar<AvgPooling_Op<2>> registrarAvgPoolingImpl2D_cpu("cpu", Aidge::A
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */
......@@ -69,6 +69,7 @@ class BatchNormImpl2D_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
......@@ -81,4 +82,4 @@ static Registrar<BatchNorm_Op<2>> registrarBatchNormImpl2D_cpu("cpu", Aidge::Bat
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */
......@@ -56,6 +56,7 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
......@@ -68,4 +69,4 @@ static Registrar<ConvDepthWise_Op<2>> registrarConvDepthWiseImpl2D_cpu("cpu", Ai
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */
......@@ -56,6 +56,7 @@ class ConvImpl2D_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
......@@ -68,4 +69,4 @@ static Registrar<Conv_Op<2>> registrarConvImpl2D_cpu("cpu", Aidge::ConvImpl2D_cp
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */
......@@ -50,7 +50,7 @@ class FCImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
void backward();
......@@ -61,4 +61,4 @@ static Registrar<FC_Op> registrarFCImpl_cpu("cpu", Aidge::FCImpl_cpu::create);
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */
......@@ -49,7 +49,7 @@ class LeakyReLUImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
void backward();
......@@ -60,4 +60,4 @@ static Registrar<LeakyReLU_Op> registrarLeakyReLUImpl_cpu("cpu", Aidge::LeakyReL
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
......@@ -37,6 +37,7 @@ class ProducerImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
......@@ -48,4 +49,4 @@ static Registrar<Producer_Op> registrarProducer1DImpl_cpu("cpu", Aidge::Producer
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */
......@@ -49,7 +49,7 @@ class ReLUImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
void backward();
......@@ -60,4 +60,4 @@ static Registrar<ReLU_Op> registrarReLUImpl_cpu("cpu", Aidge::ReLUImpl_cpu::crea
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_H_ */
......@@ -49,7 +49,7 @@ class SoftmaxImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward();
void backward();
......@@ -60,4 +60,4 @@ static Registrar<Softmax_Op> registrarSoftmaxImpl_cpu("cpu", Aidge::SoftmaxImpl_
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_SOFTMAXIMPL_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_OPERATOR_SOFTMAXIMPL_H_ */
......@@ -48,7 +48,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbConsumedData(Aidge::IOIndex_t /*inpu
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0];
}
void Aidge::AddImpl_cpu<1>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<1>::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -63,11 +69,6 @@ void Aidge::AddImpl_cpu<1>::forward() {
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<1>::backward() {
......@@ -112,7 +113,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbConsumedData(Aidge::IOIndex_t inputI
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0];
}
void Aidge::AddImpl_cpu<2>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<2>::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -130,11 +137,6 @@ void Aidge::AddImpl_cpu<2>::forward() {
mOp.mInputs[1]->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<2>::backward() {
......@@ -180,7 +182,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbProducedData(Aidge::IOIndex_t output
assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::AddImpl_cpu<3>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<3>::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -201,13 +209,8 @@ void Aidge::AddImpl_cpu<3>::forward() {
mOp.mInputs[2]->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<3>::backward() {
printf("Not implemented yet.\n");
}
\ No newline at end of file
}
......@@ -54,7 +54,13 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::AvgPoolingImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::AvgPoolingImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -69,13 +75,6 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() {
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::AvgPoolingImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
......@@ -53,7 +53,14 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t o
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::BatchNormImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::BatchNormImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -79,12 +86,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
mOp.getOutput(0)->getImpl()->rawPtr(),
true);
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::BatchNormImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
......@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbProducedData(Aidge::IOIndex
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::ConvDepthWiseImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -73,14 +80,6 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::ConvDepthWiseImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
......@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t output
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::ConvImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::ConvImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -72,15 +79,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
// FIXME: Dummy wait for some earlier scheduler tests
std::this_thread::sleep_for(std::chrono::milliseconds(mOp.get<ConvParam::OutChannels>()));
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::ConvImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
......@@ -71,6 +71,16 @@ Aidge::NbElts_t Aidge::FCImpl_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx)
return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}
void Aidge::FCImpl_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]
+= getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::FCImpl_cpu::forward()
{
// FIXME: uncomment the following code once memory handling will work
......@@ -95,7 +105,7 @@ void Aidge::FCImpl_cpu::forward()
// mOp.mInputs[2]->getImpl()->rawPtr(),
// mOp.getOutput(0)->getImpl()->rawPtr());
// }
// else
// else
kernelFunc(
mOp.getParams(),
mOp.getInput(0)->dims()[0],
......@@ -104,19 +114,8 @@ void Aidge::FCImpl_cpu::forward()
mOp.mInputs[1]->getImpl()->rawPtr(),
mOp.mInputs[2]->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
// FIXME: Dummy wait for some earlier scheduler tests
std::this_thread::sleep_for(std::chrono::milliseconds(mOp.get<FCParam::OutChannels>()));
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]
+= getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::FCImpl_cpu::backward()
......
......@@ -50,7 +50,11 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*i
Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0];
}
void Aidge::LeakyReLUImpl_cpu::updateConsummerProducer(){
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::LeakyReLUImpl_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -65,11 +69,6 @@ void Aidge::LeakyReLUImpl_cpu::forward() {
std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::LeakyReLUImpl_cpu::backward() {
......
......@@ -61,7 +61,8 @@ Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData(
{
return getRequiredMemory(0, {});
}
void Aidge::ProducerImpl_cpu::updateConsummerProducer(){
}
void Aidge::ProducerImpl_cpu::forward()
{
......@@ -71,4 +72,4 @@ void Aidge::ProducerImpl_cpu::forward()
void Aidge::ProducerImpl_cpu::backward()
{
printf("Not implemented yet.\n");
}
\ No newline at end of file
}
......@@ -50,7 +50,11 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputI
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0];
}
void Aidge::ReLUImpl_cpu::updateConsummerProducer(){
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::ReLUImpl_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0");
......@@ -65,10 +69,6 @@ void Aidge::ReLUImpl_cpu::forward() {
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::ReLUImpl_cpu::backward() {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment