Skip to content
Snippets Groups Projects
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
DepthToSpace.cpp 4.54 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include "aidge/operator/DepthToSpace.hpp"

#include <array>
#include <cstddef>  // std::size_t
#include <string>
#include <vector>

#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"

void Aidge::DepthToSpace_OpImpl::forward() {
    const DepthToSpace_Op& op = dynamic_cast<const DepthToSpace_Op&>(mOp);
    // suppose an NCHW Tensor format

    // Get input dimensions
    const auto& dims = op.getInput(0)->dims<4>();
    // get final output dimension
    const std::array<DimSize_t, 4> final_dims = op.getOutput(0)->dims<4>();

    std::size_t b = dims[0];
    std::size_t c = dims[1] / (static_cast<DimSize_t>(op.blockSize()) * static_cast<DimSize_t>(op.blockSize()));
    std::size_t h = dims[2];
    std::size_t w = dims[3];

    // Copt input tensor to output
    op.setOutput(0, op.getInput(0));

    // Step 1: Resize
    const std::vector<DimSize_t> resize_dims =
        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
            std::vector<DimSize_t>({b, c, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), h, w}) :
            std::vector<DimSize_t>({b, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), c, h, w});
    op.getOutput(0)->resize(resize_dims);

    // Step 2: Transpose
    const std::vector<DimSize_t> transpose_order =
        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
            std::vector<DimSize_t>({0, 1, 4, 2, 5, 3}) :
            std::vector<DimSize_t>({0, 3, 4, 1, 5, 2});
    op.getOutput(0)->copyTranspose(*(op.getOutput(0)), transpose_order);

    // Step 3: Final resize
    op.getOutput(0)->resize(final_dims);
}

//////////////////////////////////////////////////////

const std::string Aidge::DepthToSpace_Op::Type = "DepthToSpace";

Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aidge::DepthToSpace_Op::Mode mode)
    : OperatorTensor(Type, {InputCategory::Data}, 1),
        mAttributes(std::make_shared<Attributes_>(
        attr<DepthToSpaceAttr::BlockSize>(blockSize),
        attr<DepthToSpaceAttr::Mode>(mode)))
{
    // ctor
}
Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
    : OperatorTensor(op),
      mAttributes(op.mAttributes)
{
    if (op.mImpl) {
        SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
    } else {
        mImpl = nullptr;
    }
}

std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const {
    return std::make_shared<DepthToSpace_Op>(*this);
}

bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
    if (inputsAssociated()) {
        AIDGE_ASSERT(getInput(0)->nbDims() == 4, "{} Operator only accepts 4-D input Tensors.", DepthToSpace_Op::Type);
        AIDGE_ASSERT(getInput(0)->dims()[1] % (blockSize() * blockSize()) == 0, "Number of channels must be divisible by blocksize squared");

        // Compute output dims
        const std::array<DimSize_t, 4>& inDims = getInput(0)->dims<4>();
        const std::vector<DimSize_t> outDims =
                {inDims[0],
                 inDims[1] / (static_cast<DimSize_t>(blockSize()) * static_cast<DimSize_t>(blockSize())),
                 inDims[2] * static_cast<DimSize_t>(blockSize()),
                 inDims[3] * static_cast<DimSize_t>(blockSize())};

        mOutputs[0]->resize(outDims);
        return true;
    }

    return false;
}

void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
    if (Registrar<DepthToSpace_Op>::exists({name})) {
        SET_IMPL_MACRO(DepthToSpace_Op, *this, name);
    }
    else {
        mImpl = std::make_shared<DepthToSpace_OpImpl>(*this);
    }
    mOutputs[0]->setBackend(name, device);
}

std::set<std::string> Aidge::DepthToSpace_Op::getAvailableBackends() const {
    return Registrar<DepthToSpace_Op>::getKeys();
}

//////////////////////////////////////////////////////////

std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
                                    const Aidge::DepthToSpace_Op::Mode mode,
                                    const std::string& name) {
    return std::make_shared<Node>(std::make_shared<DepthToSpace_Op>(blockSize, mode), name);
}