Skip to content
Snippets Groups Projects
Commit 984eaa14 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Initial commit

parent 1eccdb62
No related branches found
No related tags found
2 merge requests!212Version 0.3.0,!153Im2col
Pipeline #49569 passed
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_FOLD_H_
#define AIDGE_CORE_OPERATOR_FOLD_H_
#include <array>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM>
class Fold_Op : public OperatorTensor,
public Registrable<Fold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>,
public StaticAttributes<FoldAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
public:
static const std::string Type;
Fold_Op() = delete;
using Attributes_ = StaticAttributes<FoldAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <FoldAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Fold_Op(const std::array<DimSize_t, DIM> &outputDims,
const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data}, 1),
Attributes_(attr<FoldAttr::OutputDims>(outputDims),
attr<FoldAttr::StrideDims>(strideDims),
attr<FoldAttr::DilationDims>(dilationDims),
attr<FoldAttr::KernelDims>(kernelDims)) {}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
* input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Fold_Op(const Fold_Op<DIM> &op)
: OperatorTensor(op),
Attributes_(op)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
}
else {
mImpl = nullptr;
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Fold_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Fold_Op<DIM>>(*this);
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> Fold(
DimSize_t const (&outputDims)[DIM],
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
return Fold(to_array(outputDims), to_array(kernelDims), name, strideDims, dilationDims);
}
} // namespace Aidge
extern template class Aidge::Fold_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::FoldAttr>::data[] = {
"OutputDims",
"StrideDims",
"DilationDims",
"KernelDims"
};
}
#endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_UNFOLD_H_
#define AIDGE_CORE_OPERATOR_UNFOLD_H_
#include <array>
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
template <DimIdx_t DIM>
class Unfold_OpImpl : public OperatorImpl {
public:
Unfold_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override;
};
enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
template <DimIdx_t DIM>
class Unfold_Op : public OperatorTensor,
public Registrable<Unfold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)>,
public StaticAttributes<UnfoldAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>> {
public:
static const std::string Type;
Unfold_Op() = delete;
using Attributes_ = StaticAttributes<UnfoldAttr,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>,
std::array<DimSize_t, DIM>>;
template <UnfoldAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
: OperatorTensor(Type, {InputCategory::Data}, 1),
Attributes_(attr<UnfoldAttr::StrideDims>(strideDims),
attr<UnfoldAttr::DilationDims>(dilationDims),
attr<UnfoldAttr::KernelDims>(kernelDims))
{
mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
}
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
* input tensors (the new operator has no input associated).
* @param op Operator to copy.
*/
Unfold_Op(const Unfold_Op<DIM> &op)
: OperatorTensor(op),
Attributes_(op)
{
if (!op.backend().empty()) {
SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
}
else {
mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
}
}
/**
* @brief Clone the operator using its copy-constructor.
* @see Operator::Unfold_Op
*/
std::shared_ptr<Operator> clone() const override {
return std::make_shared<Unfold_Op>(*this);
}
bool forwardDims(bool /*allowDataDependency*/ = false) override final;
void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
static const std::vector<std::string> getInputsName(){
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
// FIXME: properly handle default w&b initialization in every cases
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> Unfold(
DimSize_t const (&kernelDims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
return Unfold(to_array(kernelDims), name, strideDims, dilationDims);
}
} // namespace Aidge
extern template class Aidge::Unfold_Op<2>;
namespace {
template <>
const char *const EnumStrings<Aidge::UnfoldAttr>::data[] = {
"StrideDims",
"DilationDims",
"KernelDims"
};
}
#endif /* AIDGE_CORE_OPERATOR_UNFOLD_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Fold.hpp"
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
template <Aidge::DimIdx_t DIM>
const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
template <Aidge::DimIdx_t DIM>
bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) {
auto dims(getInput(0)->dims());
DimSize_t k = 1;
DimSize_t l = 1;
for (std::size_t dim = 0; dim < this->template getAttr<FoldAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<FoldAttr::DilationDims>()[dim] *
(this->template getAttr<FoldAttr::KernelDims>()[dim] - 1) + 1;
k *= this->template getAttr<FoldAttr::KernelDims>()[dim];
l *= 1 + static_cast<DimSize_t>(
floor(static_cast<float>(this->template getAttr<FoldAttr::OutputDims>()[dim] - kernelExtent) /
static_cast<float>(this->template getAttr<FoldAttr::StrideDims>()[dim])));
}
AIDGE_ASSERT(dims[dims.size() - 2] % k == 0 , "Fold: input number of channels ({}) is not divisible by the product of provided kernel dims ({})!",
dims[dims.size() - 2], k);
AIDGE_ASSERT(dims[dims.size() - 1] == l, "Fold: mismatch between expected input 3rd dim {} and provided input 3rd dim {}",
dims[dims.size() - 1], l);
dims[dims.size() - 2] /= k;
dims.pop_back();
dims.insert(dims.end(), this->template getAttr<FoldAttr::OutputDims>().begin(), this->template getAttr<FoldAttr::OutputDims>().end());
mOutputs[0]->resize(dims);
return true;
}
return false;
}
template <Aidge::DimIdx_t DIM>
void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
SET_IMPL_MACRO(Fold_Op<DIM>, *this, name);
mOutputs[0]->setBackend(name, device);
}
template class Aidge::Fold_Op<2>;
\ No newline at end of file
......@@ -53,7 +53,7 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
}
AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes: {} vs {}.", dims0, dims1);
std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
for (std::size_t i = 0; i < dims_size-2; ++i) {
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/Unfold.hpp"
#include <cmath> // std::floor
#include <cstddef> // std::size_t
#include <stdexcept> // std::runtime_error
#include <string>
#include <utility> // std::pair
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
template <Aidge::DimIdx_t DIM>
void Aidge::Unfold_OpImpl<DIM>::forward() {
const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
const auto kernelDims = op.template getAttr<UnfoldAttr::KernelDims>();
const auto dilationDims = op.template getAttr<UnfoldAttr::DilationDims>();
const auto strideDims = op.template getAttr<UnfoldAttr::StrideDims>();
const DimSize_t inHeight = op.getInput(0)->dims()[2];
const DimSize_t inWidth = op.getInput(0)->dims()[3];
const DimSize_t kernelExtentHeight = op.template getAttr<UnfoldAttr::DilationDims>()[0] *
(op.template getAttr<UnfoldAttr::KernelDims>()[0] - 1) + 1;
const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inHeight - kernelExtentHeight) /
static_cast<float>(op.template getAttr<UnfoldAttr::StrideDims>()[0])));
const DimSize_t kernelExtentWidth = op.template getAttr<UnfoldAttr::DilationDims>()[1] *
(op.template getAttr<UnfoldAttr::KernelDims>()[1] - 1) + 1;
const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inWidth - kernelExtentWidth) /
static_cast<float>(op.template getAttr<UnfoldAttr::StrideDims>()[1])));
const DimSize_t outChannels = op.getOutput(0)->dims()[1];
for (DimSize_t outC = 0; outC < outChannels; ++outC) {
const auto inOffsetH = outC % kernelDims[0];
const auto inOffsetW = (outC / kernelDims[0]) % kernelDims[1];
const auto inC = outC / kernelDims[0] / kernelDims[1];
for (DimSize_t outH = 0; outH < outHeight; ++outH) {
const auto inH = outH * strideDims[1] + inOffsetH * dilationDims[1];
for (DimSize_t outW = 0; outW < outWidth; ++outW) {
const auto inW = outW * strideDims[0] + inOffsetW * dilationDims[0];
op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr((inC * inHeight + inH) * inWidth + inW), 1,
(outC * outHeight + outH) * outWidth + outW);
}
}
}
}
template <Aidge::DimIdx_t DIM>
const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
template <Aidge::DimIdx_t DIM>
bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
if (inputsAssociated()) {
const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
DimSize_t k = 1;
DimSize_t l = 1;
for (std::size_t dim = 0; dim < this->template getAttr<UnfoldAttr::KernelDims>().size() ; ++dim) {
const DimSize_t kernelExtent = this->template getAttr<UnfoldAttr::DilationDims>()[dim] *
(this->template getAttr<UnfoldAttr::KernelDims>()[dim] - 1) + 1;
k *= this->template getAttr<UnfoldAttr::KernelDims>()[dim];
l *= 1 + static_cast<DimSize_t>(
floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
static_cast<float>(this->template getAttr<UnfoldAttr::StrideDims>()[dim])));
}
mOutputs[0]->resize({inputDims[0], inputDims[1] * k, l});
return true;
}
return false;
}
template <Aidge::DimIdx_t DIM>
void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
if (Registrar<Unfold_Op<DIM>>::exists({name})){
SET_IMPL_MACRO(Unfold_Op<DIM>, *this, name);
}
else {
mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
}
mOutputs[0]->setBackend(name, device);
}
template class Aidge::Unfold_OpImpl<2>;
template class Aidge::Unfold_Op<2>;
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment