Skip to content
Snippets Groups Projects
Commit d79e4eeb authored by Maxence Naud's avatar Maxence Naud Committed by Maxence Naud
Browse files

Move generic implementations of Cast, Concat and DepthToSpace to their own files

parent 482557ca
No related branches found
No related tags found
3 merge requests!414Update version 0.5.1 -> 0.6.0,!408[Add] Dropout Operator,!361Move code from header to source
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CASTIMPL_H_
#define AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CASTIMPL_H_
#include <string>
#include "aidge/backend/OperatorImpl.hpp"
namespace Aidge {
class Cast_OpImpl : public OperatorImpl {
public:
Cast_OpImpl(const Operator& op, const std::string& backend = "")
: OperatorImpl(op, backend)
{}
void forward() override;
};
} // namespace Aidge
#endif // AIDGE_CORE_BACKEND_GENERIC_OPERATOR_CASTIMPL_H_
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <string>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Operator.hpp"
namespace Aidge {
/**
* @class Concat_OpImpl
* @brief Implementation of the Concat operator.
*
* Since Concat operation is backend-agnostic, its implementation is located in aidge_core.
*/
class Concat_OpImpl : public OperatorImpl {
public:
/**
* @brief Constructor for Concat_OpImpl.
* @param[in] op Operator instance.
* @param[in] backend Name of the backend.
*/
Concat_OpImpl(const Operator& op, const std::string& backend = "")
: OperatorImpl(op, backend)
{}
/**
* @brief Perform the forward pass of the Concat operator.
*/
void forward() override;
};
} // namespace Aidge
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <string>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Operator.hpp"
namespace Aidge {
/**
* @class DepthToSpace_OpImpl
* @brief Implementation of the DepthToSpace operation for rearranging data from depth into spatial dimensions.
*/
class DepthToSpace_OpImpl : public OperatorImpl {
public:
/**
* @brief Constructor for the DepthToSpace_OpImpl.
* @param op Operator containing attributes for DepthToSpace.
* @param backend The backend used for computation.
*/
DepthToSpace_OpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
/**
* @brief Perform the forward computation for DepthToSpace.
*/
void forward() override;
};
} // namespace Aidge
......@@ -23,16 +23,11 @@
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
class Cast_OpImpl : public OperatorImpl {
public:
Cast_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
#define LIST_CAST_ATTR(X) \
X(TargetType, "target_type", DataType)
namespace Aidge {
/**
* @enum CastAttr
* @brief Enum class defining the attributes for the Cast operator.
......@@ -53,6 +48,7 @@ constexpr const char* const EnumStrings<Aidge::CastAttr>::data[] = {
GENERATE_LIST_ATTR_STR(LIST_CAST_ATTR)
};
}
namespace Aidge {
/**
* @brief Description of the Cast operation to convert a tensor's data type.
......@@ -100,17 +96,7 @@ public:
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Cast_Op(const Cast_Op& op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Cast_Op, *this, op.backend());
}
else {
mImpl = std::make_shared<Cast_OpImpl>(*this);
}
}
Cast_Op(const Cast_Op& op);
/**
* @brief Clone the operator using its copy constructor.
......
......@@ -25,32 +25,6 @@
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
/**
* @class Concat_OpImpl
* @brief Implementation of the Concat operator.
*
* Since Concat operation is backend-agnostic, its implementation is located in aidge_core.
*/
class Concat_OpImpl : public OperatorImpl {
public:
/**
* @brief Constructor for Concat_OpImpl.
* @param[in] op Operator instance.
* @param[in] backend Name of the backend.
*/
Concat_OpImpl(const Operator& op, const std::string& backend = "")
: OperatorImpl(op, backend)
{}
/**
* @brief Perform the forward pass of the Concat operator.
*/
void forward() override;
};
} // namespace Aidge
#define LIST_CONCAT_ATTR(X) \
X(Axis, "axis", std::int32_t)
......
......@@ -22,27 +22,6 @@
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
/**
* @class DepthToSpace_OpImpl
* @brief Implementation of the DepthToSpace operation for rearranging data from depth into spatial dimensions.
*/
class DepthToSpace_OpImpl : public OperatorImpl {
public:
/**
* @brief Constructor for the DepthToSpace_OpImpl.
* @param op Operator containing attributes for DepthToSpace.
* @param backend The backend used for computation.
*/
DepthToSpace_OpImpl(const Operator& op, const std::string& backend = "") : OperatorImpl(op, backend) {}
/**
* @brief Perform the forward computation for DepthToSpace.
*/
void forward() override;
};
} // namespace Aidge
#define LIST_DEPTHTOSPACE_ATTR(X) \
X(BlockSize, "block_size", std::uint32_t), \
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/backend/generic/operator/CastImpl.hpp"
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Cast.hpp"
namespace Aidge {
void Cast_OpImpl::forward() {
const Cast_Op& op = dynamic_cast<const Cast_Op&>(mOp);
op.getOutput(0)->copyCast(*(op.getInput(0)));
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/backend/generic/operator/ConcatImpl.hpp"
#include <cstddef> // std::size_t
#include <cstdint> // std::int32_t
#include "aidge/operator/Concat.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
void Concat_OpImpl::forward() {
const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
auto axis = op.axis();
const auto nbDimsInput0 = op.getInput(0)->nbDims();
axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
AIDGE_ASSERT(op.getInput(0), "missing input in Concat operator");
for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
AIDGE_ASSERT(op.getInput(i), "missing input in Concat operator");
AIDGE_ASSERT(op.getInput(i)->dataType() == op.getInput(0)->dataType(), "Every input must have the same DataType");
}
DimSize_t outputAxisValue = 0;
for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
outputAxisValue += op.getInput(i)->dims()[axis];
}
DimSize_t prodDimLower = 1;
for (DimIdx_t i = 0; i < axis; ++i) {
prodDimLower *= op.getInput(0)->dims()[i];
}
DimSize_t prodDimHigher = 1;
for (DimIdx_t i = axis + 1; static_cast<std::size_t>(i) < op.getInput(0)->dims().size();
++i) {
prodDimHigher *= op.getInput(0)->dims()[i];
}
std::size_t oIndexStart = 0;
// std::size_t oIndex = 0;
for (std::size_t inputId = 0; inputId < op.nbInputs(); ++inputId) {
// oIndex = oIndexStart;
const DimSize_t iOffset = prodDimHigher*op.getInput(inputId)->dims()[axis];
for (std::size_t iIndex = 0, oIndex = oIndexStart; iIndex < prodDimLower; ++iIndex) {
op.getOutput(0)->getImpl()->copy(op.getInput(inputId)->getImpl()->rawPtr(iIndex*iOffset), iOffset, oIndex);
oIndex += prodDimHigher*outputAxisValue;
}
oIndexStart += op.getInput(inputId)->dims()[axis]*prodDimHigher;
}
}
} // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/backend/generic/operator/DepthToSpaceImpl.hpp"
#include <array>
#include <cstddef> // std::size_t
#include <cstdint> // std::int32_t
#include <vector>
#include "aidge/operator/DepthToSpace.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
void DepthToSpace_OpImpl::forward() {
const DepthToSpace_Op& op = dynamic_cast<const DepthToSpace_Op&>(mOp);
// suppose an NCHW Tensor format
// Get input dimensions
const auto& dims = op.getInput(0)->dims<4>();
// get final output dimension
const std::array<DimSize_t, 4> final_dims = op.getOutput(0)->dims<4>();
std::size_t b = dims[0];
std::size_t c = dims[1] / (static_cast<DimSize_t>(op.blockSize()) * static_cast<DimSize_t>(op.blockSize()));
std::size_t h = dims[2];
std::size_t w = dims[3];
// Copt input tensor to output
op.setOutput(0, op.getInput(0));
// Step 1: Resize
const std::vector<DimSize_t> resize_dims =
(op.mode() == DepthToSpace_Op::Mode::CRD) ?
std::vector<DimSize_t>({b, c, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), h, w}) :
std::vector<DimSize_t>({b, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), c, h, w});
op.getOutput(0)->resize(resize_dims);
// Step 2: Transpose
const std::vector<DimSize_t> transpose_order =
(op.mode() == DepthToSpace_Op::Mode::CRD) ?
std::vector<DimSize_t>({0, 1, 4, 2, 5, 3}) :
std::vector<DimSize_t>({0, 3, 4, 1, 5, 2});
op.getOutput(0)->copyTranspose(*(op.getOutput(0)), transpose_order);
// Step 3: Final resize
op.getOutput(0)->resize(final_dims);
}
} // namespace Aidge
......@@ -12,22 +12,23 @@
#include "aidge/operator/Cast.hpp"
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/backend/generic/operator/CastImpl.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
void Aidge::Cast_OpImpl::forward() {
const Cast_Op& op = dynamic_cast<const Cast_Op&>(mOp);
op.getOutput(0)->copyCast(*(op.getInput(0)));
}
namespace Aidge {
const std::string Aidge::Cast_Op::Type = "Cast";
const std::string Cast_Op::Type = "Cast";
Aidge::Cast_Op::Cast_Op(const DataType targetType)
Cast_Op::Cast_Op(const DataType targetType)
: OperatorTensor(Type, {InputCategory::Data}, 1),
mAttributes(std::make_shared<Attributes_>(
attr<CastAttr::TargetType>(targetType)))
......@@ -36,8 +37,20 @@ Aidge::Cast_Op::Cast_Op(const DataType targetType)
mOutputs[0]->setDataType(targetType);
}
Cast_Op::Cast_Op(const Cast_Op& op)
: OperatorTensor(op),
mAttributes(op.mAttributes)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Cast_Op, *this, op.backend());
}
else {
mImpl = std::make_shared<Cast_OpImpl>(*this);
}
}
void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
void Cast_Op::setBackend(const std::string& name, DeviceIdx_t device) {
if (Registrar<Cast_Op>::exists({name})) {
SET_IMPL_MACRO(Cast_Op, *this, name);
}
......@@ -47,10 +60,14 @@ void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devi
mOutputs[0]->setBackend(name, device);
}
std::set<std::string> Aidge::Cast_Op::getAvailableBackends() const {
std::set<std::string> Cast_Op::getAvailableBackends() const {
return Registrar<Cast_Op>::getKeys();
}
std::shared_ptr<Aidge::Node> Aidge::Cast(const Aidge::DataType targetType, const std::string& name) {
////////////////////////////////////////////////////////////////////////////////
std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name) {
return std::make_shared<Node>(std::make_shared<Cast_Op>(targetType), name);
}
\ No newline at end of file
}
} // namespace Aidge
......@@ -11,11 +11,13 @@
#include "aidge/operator/Concat.hpp"
#include <cstdint> // std::int32_t
#include <string>
#include <vector>
#include "aidge/backend/generic/operator/ConcatImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
const std::string Aidge::Concat_Op::Type = "Concat";
......@@ -47,47 +49,6 @@ std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
return std::make_shared<Concat_Op>(*this);
}
void Aidge::Concat_OpImpl::forward() {
const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp);
auto axis = op.axis();
const auto nbDimsInput0 = op.getInput(0)->nbDims();
axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
assert(op.getInput(0) && "missing input in Concat operator");
for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) {
assert(op.getInput(i) && "missing input in Concat operator");
assert(op.getInput(i)->dataType() == op.getInput(0)->dataType());
}
DimSize_t outputAxisValue = 0;
for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) {
outputAxisValue += op.getInput(i)->dims()[axis];
}
DimSize_t prodDimLower = 1;
for (DimIdx_t i = 0; i < axis; ++i) {
prodDimLower *= op.getInput(0)->dims()[i];
}
DimSize_t prodDimHigher = 1;
for (DimIdx_t i = axis + 1; static_cast<std::size_t>(i) < op.getInput(0)->dims().size();
++i) {
prodDimHigher *= op.getInput(0)->dims()[i];
}
std::size_t oIndexStart = 0;
// std::size_t oIndex = 0;
for (std::size_t inputId = 0; inputId < op.nbInputs(); ++inputId) {
// oIndex = oIndexStart;
const DimSize_t iOffset = prodDimHigher*op.getInput(inputId)->dims()[axis];
for (std::size_t iIndex = 0, oIndex = oIndexStart; iIndex < prodDimLower; ++iIndex) {
op.getOutput(0)->getImpl()->copy(op.getInput(inputId)->getImpl()->rawPtr(iIndex*iOffset), iOffset, oIndex);
oIndex += prodDimHigher*outputAxisValue;
}
oIndexStart += op.getInput(inputId)->dims()[axis]*prodDimHigher;
}
}
bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
if (!inputsAssociated()) {
return false;
......@@ -101,11 +62,11 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
}
// Check validity of attributes with inputs
// Axis
std::int32_t axis = mAttributes->template getAttr<ConcatAttr::Axis>();
axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis;
AIDGE_ASSERT(((axis >= 0) && (axis < static_cast<std::int32_t>(nbDimsInput0))),
std::int32_t concatenationAxis = axis();
concatenationAxis = (concatenationAxis < 0) ? concatenationAxis + static_cast<std::int32_t>(nbDimsInput0) : concatenationAxis;
AIDGE_ASSERT(((concatenationAxis >= 0) && (concatenationAxis < static_cast<std::int32_t>(nbDimsInput0))),
"'Axis' attribute not compatible with provided inputs.")
const std::size_t axis_u64 = static_cast<std::size_t>(axis);
const std::size_t axis_u64 = static_cast<std::size_t>(concatenationAxis);
// Check validity of inputs
auto outputDims = getInput(0)->dims();
......
......@@ -16,46 +16,11 @@
#include <string>
#include <vector>
#include "aidge/backend/generic/operator/DepthToSpaceImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
void Aidge::DepthToSpace_OpImpl::forward() {
const DepthToSpace_Op& op = dynamic_cast<const DepthToSpace_Op&>(mOp);
// suppose an NCHW Tensor format
// Get input dimensions
const auto& dims = op.getInput(0)->dims<4>();
// get final output dimension
const std::array<DimSize_t, 4> final_dims = op.getOutput(0)->dims<4>();
std::size_t b = dims[0];
std::size_t c = dims[1] / (static_cast<DimSize_t>(op.blockSize()) * static_cast<DimSize_t>(op.blockSize()));
std::size_t h = dims[2];
std::size_t w = dims[3];
// Copt input tensor to output
op.setOutput(0, op.getInput(0));
// Step 1: Resize
const std::vector<DimSize_t> resize_dims =
(op.mode() == DepthToSpace_Op::Mode::CRD) ?
std::vector<DimSize_t>({b, c, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), h, w}) :
std::vector<DimSize_t>({b, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), c, h, w});
op.getOutput(0)->resize(resize_dims);
// Step 2: Transpose
const std::vector<DimSize_t> transpose_order =
(op.mode() == DepthToSpace_Op::Mode::CRD) ?
std::vector<DimSize_t>({0, 1, 4, 2, 5, 3}) :
std::vector<DimSize_t>({0, 3, 4, 1, 5, 2});
op.getOutput(0)->copyTranspose(*(op.getOutput(0)), transpose_order);
// Step 3: Final resize
op.getOutput(0)->resize(final_dims);
}
//////////////////////////////////////////////////////
const std::string Aidge::DepthToSpace_Op::Type = "DepthToSpace";
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment