Skip to content
Snippets Groups Projects
Commit a59450ea authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Merge branch 'consumerProducerRefactor' into 'master'

Remove the consumer/producer system from the forward method.

See merge request !5
parents ef96324d 871215bb
No related branches found
No related tags found
No related merge requests found
Showing
with 139 additions and 82 deletions
...@@ -36,7 +36,54 @@ class test_scheduler(unittest.TestCase): ...@@ -36,7 +36,54 @@ class test_scheduler(unittest.TestCase):
for i in range(len(expected_out)): for i in range(len(expected_out)):
self.assertEqual(expected_out[i], out_tensor[i]) self.assertEqual(expected_out[i], out_tensor[i])
def test_sequential_scheduling(self):
input_data = np.array([]).astype(np.float32)
input_tensor = aidge_core.Tensor(input_data)
input_node = aidge_core.Producer(input_tensor, "X")
graph_view = aidge_core.sequential([
aidge_core.FC(50, name='0'),
aidge_core.FC(50, name='1'),
aidge_core.FC(10, name='2'),
])
EXPECTED_SCHEDULE = ['0', '1', '2']
input_node.add_child(graph_view)
input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
input_node.get_operator().set_backend("cpu")
graph_view.set_datatype(aidge_core.DataType.Float32)
graph_view.set_backend("cpu")
scheduler = aidge_core.SequentialScheduler(graph_view)
scheduler.generate_scheduling()
self.assertListEqual([i.name() for i in scheduler.get_static_scheduling()], EXPECTED_SCHEDULE)
def test_parallel_scheduling(self):
input_data = np.array([]).astype(np.float32)
input_tensor = aidge_core.Tensor(input_data)
input_node = aidge_core.Producer(input_tensor, "X")
graph_view = aidge_core.sequential([
aidge_core.FC(50, name='0'),
aidge_core.parallel([aidge_core.FC(50, name='1'), aidge_core.FC(50, name='3')]),
aidge_core.Add(name='2'),
])
EXPECTED_SCHEDULE = [['0', '1', '3', '2'], ['0', '3', '1', '2']] # Both scheduling are valid !
input_node.add_child(graph_view)
input_node.get_operator().set_datatype(aidge_core.DataType.Float32)
input_node.get_operator().set_backend("cpu")
graph_view.set_datatype(aidge_core.DataType.Float32)
graph_view.set_backend("cpu")
scheduler = aidge_core.SequentialScheduler(graph_view)
scheduler.generate_scheduling()
self.assertTrue([i.name() for i in scheduler.get_static_scheduling()] in EXPECTED_SCHEDULE)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -97,6 +97,7 @@ class AddImpl_cpu : public OperatorImpl { ...@@ -97,6 +97,7 @@ class AddImpl_cpu : public OperatorImpl {
assert(outputIdx < mNbProducedData.size()); assert(outputIdx < mNbProducedData.size());
return mNbProducedData[outputIdx]; return mNbProducedData[outputIdx];
} }
void updateConsummerProducer() override final;
void forward() { void forward() {
// nothing // nothing
...@@ -130,6 +131,7 @@ class AddImpl_cpu<1> : public OperatorImpl { ...@@ -130,6 +131,7 @@ class AddImpl_cpu<1> : public OperatorImpl {
NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final; NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final; NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
...@@ -161,6 +163,7 @@ class AddImpl_cpu<2> : public OperatorImpl { ...@@ -161,6 +163,7 @@ class AddImpl_cpu<2> : public OperatorImpl {
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final; NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
...@@ -191,6 +194,7 @@ class AddImpl_cpu<3> : public OperatorImpl { ...@@ -191,6 +194,7 @@ class AddImpl_cpu<3> : public OperatorImpl {
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
...@@ -204,4 +208,4 @@ static Registrar<Add_Op<3>> registrarAddImpl3I_cpu("cpu", Aidge::AddImpl_cpu<3>: ...@@ -204,4 +208,4 @@ static Registrar<Add_Op<3>> registrarAddImpl3I_cpu("cpu", Aidge::AddImpl_cpu<3>:
} // namespace } // namespace
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */
\ No newline at end of file
...@@ -54,6 +54,7 @@ class AvgPoolingImpl2D_cpu : public OperatorImpl { ...@@ -54,6 +54,7 @@ class AvgPoolingImpl2D_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
...@@ -66,4 +67,4 @@ static Registrar<AvgPooling_Op<2>> registrarAvgPoolingImpl2D_cpu("cpu", Aidge::A ...@@ -66,4 +67,4 @@ static Registrar<AvgPooling_Op<2>> registrarAvgPoolingImpl2D_cpu("cpu", Aidge::A
} // namespace } // namespace
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */
\ No newline at end of file
...@@ -69,6 +69,7 @@ class BatchNormImpl2D_cpu : public OperatorImpl { ...@@ -69,6 +69,7 @@ class BatchNormImpl2D_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
...@@ -81,4 +82,4 @@ static Registrar<BatchNorm_Op<2>> registrarBatchNormImpl2D_cpu("cpu", Aidge::Bat ...@@ -81,4 +82,4 @@ static Registrar<BatchNorm_Op<2>> registrarBatchNormImpl2D_cpu("cpu", Aidge::Bat
} // namespace } // namespace
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */
\ No newline at end of file
...@@ -56,6 +56,7 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl { ...@@ -56,6 +56,7 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
...@@ -68,4 +69,4 @@ static Registrar<ConvDepthWise_Op<2>> registrarConvDepthWiseImpl2D_cpu("cpu", Ai ...@@ -68,4 +69,4 @@ static Registrar<ConvDepthWise_Op<2>> registrarConvDepthWiseImpl2D_cpu("cpu", Ai
} // namespace } // namespace
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */
\ No newline at end of file
...@@ -56,6 +56,7 @@ class ConvImpl2D_cpu : public OperatorImpl { ...@@ -56,6 +56,7 @@ class ConvImpl2D_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
...@@ -68,4 +69,4 @@ static Registrar<Conv_Op<2>> registrarConvImpl2D_cpu("cpu", Aidge::ConvImpl2D_cp ...@@ -68,4 +69,4 @@ static Registrar<Conv_Op<2>> registrarConvImpl2D_cpu("cpu", Aidge::ConvImpl2D_cp
} // namespace } // namespace
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */
\ No newline at end of file
...@@ -50,7 +50,7 @@ class FCImpl_cpu : public OperatorImpl { ...@@ -50,7 +50,7 @@ class FCImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
void backward(); void backward();
...@@ -61,4 +61,4 @@ static Registrar<FC_Op> registrarFCImpl_cpu("cpu", Aidge::FCImpl_cpu::create); ...@@ -61,4 +61,4 @@ static Registrar<FC_Op> registrarFCImpl_cpu("cpu", Aidge::FCImpl_cpu::create);
} }
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */
\ No newline at end of file
...@@ -49,7 +49,7 @@ class LeakyReLUImpl_cpu : public OperatorImpl { ...@@ -49,7 +49,7 @@ class LeakyReLUImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
void backward(); void backward();
...@@ -60,4 +60,4 @@ static Registrar<LeakyReLU_Op> registrarLeakyReLUImpl_cpu("cpu", Aidge::LeakyReL ...@@ -60,4 +60,4 @@ static Registrar<LeakyReLU_Op> registrarLeakyReLUImpl_cpu("cpu", Aidge::LeakyReL
} }
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
\ No newline at end of file
...@@ -37,6 +37,7 @@ class ProducerImpl_cpu : public OperatorImpl { ...@@ -37,6 +37,7 @@ class ProducerImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
...@@ -48,4 +49,4 @@ static Registrar<Producer_Op> registrarProducer1DImpl_cpu("cpu", Aidge::Producer ...@@ -48,4 +49,4 @@ static Registrar<Producer_Op> registrarProducer1DImpl_cpu("cpu", Aidge::Producer
} // namespace } // namespace
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_PRODUCERIMPL_H_ */
\ No newline at end of file
...@@ -49,7 +49,7 @@ class ReLUImpl_cpu : public OperatorImpl { ...@@ -49,7 +49,7 @@ class ReLUImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
void backward(); void backward();
...@@ -60,4 +60,4 @@ static Registrar<ReLU_Op> registrarReLUImpl_cpu("cpu", Aidge::ReLUImpl_cpu::crea ...@@ -60,4 +60,4 @@ static Registrar<ReLU_Op> registrarReLUImpl_cpu("cpu", Aidge::ReLUImpl_cpu::crea
} }
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_RELUIMPL_H_ */
\ No newline at end of file
...@@ -49,7 +49,7 @@ class SoftmaxImpl_cpu : public OperatorImpl { ...@@ -49,7 +49,7 @@ class SoftmaxImpl_cpu : public OperatorImpl {
NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final; NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final; NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final; NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void updateConsummerProducer() override final;
void forward(); void forward();
void backward(); void backward();
...@@ -60,4 +60,4 @@ static Registrar<Softmax_Op> registrarSoftmaxImpl_cpu("cpu", Aidge::SoftmaxImpl_ ...@@ -60,4 +60,4 @@ static Registrar<Softmax_Op> registrarSoftmaxImpl_cpu("cpu", Aidge::SoftmaxImpl_
} }
} // namespace Aidge } // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_SOFTMAXIMPL_H_ */ #endif /* AIDGE_CPU_OPERATOR_SOFTMAXIMPL_H_ */
\ No newline at end of file
...@@ -48,7 +48,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbConsumedData(Aidge::IOIndex_t /*inpu ...@@ -48,7 +48,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbConsumedData(Aidge::IOIndex_t /*inpu
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const { Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0]; return mNbProducedData[0];
} }
void Aidge::AddImpl_cpu<1>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<1>::forward() { void Aidge::AddImpl_cpu<1>::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -63,11 +69,6 @@ void Aidge::AddImpl_cpu<1>::forward() { ...@@ -63,11 +69,6 @@ void Aidge::AddImpl_cpu<1>::forward() {
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
} }
void Aidge::AddImpl_cpu<1>::backward() { void Aidge::AddImpl_cpu<1>::backward() {
...@@ -112,7 +113,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbConsumedData(Aidge::IOIndex_t inputI ...@@ -112,7 +113,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbConsumedData(Aidge::IOIndex_t inputI
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const { Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0]; return mNbProducedData[0];
} }
void Aidge::AddImpl_cpu<2>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<2>::forward() { void Aidge::AddImpl_cpu<2>::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -130,11 +137,6 @@ void Aidge::AddImpl_cpu<2>::forward() { ...@@ -130,11 +137,6 @@ void Aidge::AddImpl_cpu<2>::forward() {
mOp.mInputs[1]->getImpl()->rawPtr(), mOp.mInputs[1]->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
} }
void Aidge::AddImpl_cpu<2>::backward() { void Aidge::AddImpl_cpu<2>::backward() {
...@@ -180,7 +182,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbProducedData(Aidge::IOIndex_t output ...@@ -180,7 +182,13 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbProducedData(Aidge::IOIndex_t output
assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size()); assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
return mNbProducedData[static_cast<std::size_t>(outputIdx)]; return mNbProducedData[static_cast<std::size_t>(outputIdx)];
} }
void Aidge::AddImpl_cpu<3>::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::AddImpl_cpu<3>::forward() { void Aidge::AddImpl_cpu<3>::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -201,13 +209,8 @@ void Aidge::AddImpl_cpu<3>::forward() { ...@@ -201,13 +209,8 @@ void Aidge::AddImpl_cpu<3>::forward() {
mOp.mInputs[2]->getImpl()->rawPtr(), mOp.mInputs[2]->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
} }
void Aidge::AddImpl_cpu<3>::backward() { void Aidge::AddImpl_cpu<3>::backward() {
printf("Not implemented yet.\n"); printf("Not implemented yet.\n");
} }
\ No newline at end of file
...@@ -54,7 +54,13 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t ...@@ -54,7 +54,13 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size())); assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)]; return mNbProducedData[static_cast<std::size_t>(outputIdx)];
} }
void Aidge::AvgPoolingImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::AvgPoolingImpl2D_cpu::forward() { void Aidge::AvgPoolingImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -69,13 +75,6 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() { ...@@ -69,13 +75,6 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() {
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
} }
void Aidge::AvgPoolingImpl2D_cpu::backward() { printf("Not implemented yet.\n"); } void Aidge::AvgPoolingImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
...@@ -53,7 +53,14 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t o ...@@ -53,7 +53,14 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t o
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size())); assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)]; return mNbProducedData[static_cast<std::size_t>(outputIdx)];
} }
void Aidge::BatchNormImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::BatchNormImpl2D_cpu::forward() { void Aidge::BatchNormImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -79,12 +86,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() { ...@@ -79,12 +86,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
mOp.getOutput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr(),
true); true);
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
} }
void Aidge::BatchNormImpl2D_cpu::backward() { printf("Not implemented yet.\n"); } void Aidge::BatchNormImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
...@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbProducedData(Aidge::IOIndex ...@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbProducedData(Aidge::IOIndex
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size())); assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)]; return mNbProducedData[static_cast<std::size_t>(outputIdx)];
} }
void Aidge::ConvDepthWiseImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::ConvDepthWiseImpl2D_cpu::forward() { void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -73,14 +80,6 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() { ...@@ -73,14 +80,6 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
} }
void Aidge::ConvDepthWiseImpl2D_cpu::backward() { printf("Not implemented yet.\n"); } void Aidge::ConvDepthWiseImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
...@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t output ...@@ -55,7 +55,14 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t output
assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size())); assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
return mNbProducedData[static_cast<std::size_t>(outputIdx)]; return mNbProducedData[static_cast<std::size_t>(outputIdx)];
} }
void Aidge::ConvImpl2D_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::ConvImpl2D_cpu::forward() { void Aidge::ConvImpl2D_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -72,15 +79,7 @@ void Aidge::ConvImpl2D_cpu::forward() { ...@@ -72,15 +79,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
// FIXME: Dummy wait for some earlier scheduler tests
std::this_thread::sleep_for(std::chrono::milliseconds(mOp.get<ConvParam::OutChannels>()));
// Update producer-consumer data
for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
} }
void Aidge::ConvImpl2D_cpu::backward() { printf("Not implemented yet.\n"); } void Aidge::ConvImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
...@@ -70,6 +70,16 @@ Aidge::NbElts_t Aidge::FCImpl_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) ...@@ -70,6 +70,16 @@ Aidge::NbElts_t Aidge::FCImpl_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx)
return mNbProducedData[static_cast<std::size_t>(outputIdx)]; return mNbProducedData[static_cast<std::size_t>(outputIdx)];
} }
void Aidge::FCImpl_cpu::updateConsummerProducer(){
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]
+= getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
}
void Aidge::FCImpl_cpu::forward() void Aidge::FCImpl_cpu::forward()
{ {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
...@@ -94,7 +104,7 @@ void Aidge::FCImpl_cpu::forward() ...@@ -94,7 +104,7 @@ void Aidge::FCImpl_cpu::forward()
// mOp.mInputs[2]->getImpl()->rawPtr(), // mOp.mInputs[2]->getImpl()->rawPtr(),
// mOp.getOutput(0)->getImpl()->rawPtr()); // mOp.getOutput(0)->getImpl()->rawPtr());
// } // }
// else // else
kernelFunc( kernelFunc(
mOp.getParams(), mOp.getParams(),
mOp.getInput(0)->dims()[0], mOp.getInput(0)->dims()[0],
...@@ -103,19 +113,8 @@ void Aidge::FCImpl_cpu::forward() ...@@ -103,19 +113,8 @@ void Aidge::FCImpl_cpu::forward()
mOp.mInputs[1]->getImpl()->rawPtr(), mOp.mInputs[1]->getImpl()->rawPtr(),
mOp.mInputs[2]->getImpl()->rawPtr(), mOp.mInputs[2]->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getOutput(0)->getImpl()->rawPtr());
// FIXME: Dummy wait for some earlier scheduler tests
std::this_thread::sleep_for(std::chrono::milliseconds(mOp.get<FCParam::OutChannels>()));
// Update producer-consumer data
for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
mNbConsumedData[inputIdx]
+= getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
// amount for a forward pass
mNbProducedData[0] += getRequiredMemory(0, {});
} }
void Aidge::FCImpl_cpu::backward() void Aidge::FCImpl_cpu::backward()
......
...@@ -51,7 +51,11 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*i ...@@ -51,7 +51,11 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*i
Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const { Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0]; return mNbProducedData[0];
} }
void Aidge::LeakyReLUImpl_cpu::updateConsummerProducer(){
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::LeakyReLUImpl_cpu::forward() { void Aidge::LeakyReLUImpl_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -66,11 +70,6 @@ void Aidge::LeakyReLUImpl_cpu::forward() { ...@@ -66,11 +70,6 @@ void Aidge::LeakyReLUImpl_cpu::forward() {
std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getOutput(0)->getImpl()->rawPtr());
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
} }
void Aidge::LeakyReLUImpl_cpu::backward() { void Aidge::LeakyReLUImpl_cpu::backward() {
......
...@@ -61,7 +61,8 @@ Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData( ...@@ -61,7 +61,8 @@ Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData(
{ {
return getRequiredMemory(0, {}); return getRequiredMemory(0, {});
} }
void Aidge::ProducerImpl_cpu::updateConsummerProducer(){
}
void Aidge::ProducerImpl_cpu::forward() void Aidge::ProducerImpl_cpu::forward()
{ {
...@@ -71,4 +72,4 @@ void Aidge::ProducerImpl_cpu::forward() ...@@ -71,4 +72,4 @@ void Aidge::ProducerImpl_cpu::forward()
void Aidge::ProducerImpl_cpu::backward() void Aidge::ProducerImpl_cpu::backward()
{ {
printf("Not implemented yet.\n"); printf("Not implemented yet.\n");
} }
\ No newline at end of file
...@@ -51,7 +51,11 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputI ...@@ -51,7 +51,11 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputI
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const { Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
return mNbProducedData[0]; return mNbProducedData[0];
} }
void Aidge::ReLUImpl_cpu::updateConsummerProducer(){
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
}
void Aidge::ReLUImpl_cpu::forward() { void Aidge::ReLUImpl_cpu::forward() {
// FIXME: uncomment the following code once memory handling will work // FIXME: uncomment the following code once memory handling will work
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
...@@ -66,10 +70,6 @@ void Aidge::ReLUImpl_cpu::forward() { ...@@ -66,10 +70,6 @@ void Aidge::ReLUImpl_cpu::forward() {
mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr()); mOp.getOutput(0)->getImpl()->rawPtr());
mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
mNbProducedData[0]+= getRequiredMemory(0, {});
} }
void Aidge::ReLUImpl_cpu::backward() { void Aidge::ReLUImpl_cpu::backward() {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment