Skip to content
Snippets Groups Projects
Commit 9dd41e85 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Merge branch 'SliceFix' into 'dev'

Fix slice for out of bound start and end attributes

See merge request eclipse/aidge/aidge_core!291
parents 620f9e62 8c69c41b
No related branches found
No related tags found
2 merge requests!318[Upd] release verision 0.5.0,!291Fix slice for out of bound start and end attributes
Pipeline #62584 passed
...@@ -43,7 +43,7 @@ public: ...@@ -43,7 +43,7 @@ public:
/** /**
* @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor * @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor
* every operator class derive from this class. * every operator class derive from this class.
* *
* @param[in] type : type of operator (i.e. "Add", "AveragePool",...) * @param[in] type : type of operator (i.e. "Add", "AveragePool",...)
* @param[in] inputsCategory : describes the type of each input. * @param[in] inputsCategory : describes the type of each input.
* @param[in] nbOut : Number of tensors this operator will output * @param[in] nbOut : Number of tensors this operator will output
...@@ -67,11 +67,14 @@ public: ...@@ -67,11 +67,14 @@ public:
// input management // input management
void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override; void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override;
const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const; const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const;
virtual const std::vector<std::shared_ptr<Tensor>>& getInputs() const;
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final; std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
// output management // output management
void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override; void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const; virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
virtual const std::vector<std::shared_ptr<Tensor>>& getOutputs() const;
std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final; std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
/////////////////////////////////////////////////// ///////////////////////////////////////////////////
...@@ -94,7 +97,7 @@ public: ...@@ -94,7 +97,7 @@ public:
* - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected. * - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
* @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optional parameter tensors. * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optional parameter tensors.
* @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode. * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
* *
*/ */
virtual bool forwardDims(bool allowDataDependency = false); virtual bool forwardDims(bool allowDataDependency = false);
virtual bool dimsForwarded() const; virtual bool dimsForwarded() const;
...@@ -110,4 +113,4 @@ protected: ...@@ -110,4 +113,4 @@ protected:
}; };
} // namespace Aidge } // namespace Aidge
#endif // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_ #endif // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
\ No newline at end of file
...@@ -31,6 +31,10 @@ public: ...@@ -31,6 +31,10 @@ public:
void forward() override; void forward() override;
}; };
// Implementation note:
// If start or end are out of bound then it takes the max value for the given axe.
// Example Slice with start=1, end=1000, axes=0 for tensor [0, 1, 2, 3]
// Will return [1, 2, 3]
enum class SliceAttr { Starts, Ends, Axes, Steps }; enum class SliceAttr { Starts, Ends, Axes, Steps };
class Slice_Op class Slice_Op
......
...@@ -26,7 +26,9 @@ namespace Aidge { ...@@ -26,7 +26,9 @@ namespace Aidge {
void init_OperatorTensor(py::module& m){ void init_OperatorTensor(py::module& m){
py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor") py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor")
.def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx")) .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
.def("get_outputs", &OperatorTensor::getOutputs)
.def("get_input", &OperatorTensor::getInput, py::arg("inputIdx")) .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
.def("get_inputs", &OperatorTensor::getInputs)
.def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&) const) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data")) .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&) const) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
.def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data")) .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
......
...@@ -92,6 +92,12 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid ...@@ -92,6 +92,12 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid
return mOutputs[outputIdx]; return mOutputs[outputIdx];
} }
const std::vector<std::shared_ptr<Aidge::Tensor>>& Aidge::OperatorTensor::getOutputs() const{
return mOutputs;
}
const std::vector<std::shared_ptr<Aidge::Tensor>>& Aidge::OperatorTensor::getInputs() const{
return mInputs;
}
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField( std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField(
const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& firstEltDims,
......
...@@ -101,8 +101,9 @@ void Aidge::Slice_OpImpl::forward() { ...@@ -101,8 +101,9 @@ void Aidge::Slice_OpImpl::forward() {
int step = op.steps()[axisIdx]; int step = op.steps()[axisIdx];
start = start >= 0 ? start: start + inputDims[axisIdx]; start = start >= 0 ? start: start + inputDims[axisIdx];
start = std::max(0, std::min(start, static_cast<int>(inputDims[axisIdx])));
end = end >= 0 ? end: end + inputDims[axisIdx]; end = end >= 0 ? end: end + inputDims[axisIdx];
end = std::max(0, std::min(end, static_cast<int>(inputDims[axisIdx])));
// Generate the range of indices for this axis // Generate the range of indices for this axis
for (int idx = start; (step > 0) ? (idx < end) : (idx > end); idx += step) { for (int idx = start; (step > 0) ? (idx < end) : (idx > end); idx += step) {
ranges[axisIdx].push_back(idx); ranges[axisIdx].push_back(idx);
...@@ -253,12 +254,17 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { ...@@ -253,12 +254,17 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) {
const DimIdx_t axis = this->axes()[i] >= 0 ? const DimIdx_t axis = this->axes()[i] >= 0 ?
static_cast<DimIdx_t>(this->axes()[i]) : static_cast<DimIdx_t>(this->axes()[i]) :
static_cast<DimIdx_t>(this->axes()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims())); static_cast<DimIdx_t>(this->axes()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims()));
const DimSize_t start = this->starts()[i] >= 0 ? DimSize_t start = this->starts()[i] >= 0 ?
static_cast<DimSize_t>(this->starts()[i]) : static_cast<DimSize_t>(this->starts()[i]) :
static_cast<DimSize_t>(this->starts()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis])); static_cast<DimSize_t>(this->starts()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
const DimSize_t end = this->ends()[i] >= 0 ? // Clamp start to the range [0, axis_dim]
start = std::max(static_cast<DimSize_t>(0), std::min(start, getInput(0)->dims()[axis]-1));
DimSize_t end = this->ends()[i] >= 0 ?
static_cast<DimSize_t>(this->ends()[i]) : static_cast<DimSize_t>(this->ends()[i]) :
static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis])); static_cast<DimSize_t>(this->ends()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis]));
// Clamp end to the range [0, axis_dim]
end = std::max(static_cast<DimSize_t>(0), std::min(end, getInput(0)->dims()[axis]));
const std::int64_t step = this->steps()[i]; const std::int64_t step = this->steps()[i];
AIDGE_ASSERT(step != 0, "Slice_Op: Step ({}) must have a non-zero value on axis {}!", this->steps(), axis); AIDGE_ASSERT(step != 0, "Slice_Op: Step ({}) must have a non-zero value on axis {}!", this->steps(), axis);
...@@ -309,4 +315,4 @@ std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& start ...@@ -309,4 +315,4 @@ std::shared_ptr<Aidge::Node> Aidge::Slice(const std::vector<std::int64_t>& start
const std::vector<std::int64_t>& steps, const std::vector<std::int64_t>& steps,
const std::string &name) { const std::string &name) {
return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name); return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes, steps), name);
} }
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment