Skip to content
Snippets Groups Projects
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
ConvDepthWise.cpp 8.60 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include "aidge/operator/ConvDepthWise.hpp"

#include <array>
#include <cmath>      // std::floor
#include <cstddef>    // std::size_t
#include <stdexcept>  // std::runtime_error
#include <string>
#include <utility>    // std::pair
#include <vector>

#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"

template <Aidge::DimIdx_t DIM>
const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise" + std::to_string(DIM) + "D";

template <Aidge::DimIdx_t DIM>
Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
    : OperatorTensor(op),
      mAttributes(op.mAttributes)
{
    if (op.mImpl) {
        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
    } else {
        mImpl = nullptr;
    }
}

template <Aidge::DimIdx_t DIM>
bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
    if (inputsAssociated()) {
        // first check weight since it defines nbChannels
        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
        // check data
        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                    (getInput(0)->template dims<DIM+2>()[1] == nbChannels()),
                    "Wrong input size for Conv operator.");
        // check optional bias
        if(getInput(2))
            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                    (getInput(2)->template dims<1>()[0] == nbChannels()),
                    "Wrong bias size for Conv operator.");
        std::array<DimSize_t, DIM + 2> outputDims = {};
        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());

        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) {
            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] *
                                                    (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) +
                                            1;

            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
                            static_cast<float>(mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim])));
        }

        outputDims[1] = inputDims[1];
        outputDims[0] = inputDims[0];
        mOutputs[0]->resize(outputDims);
        return true;
    }

    return false;
}


template <Aidge::DimIdx_t DIM>
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField(
                          const std::vector<Aidge::DimSize_t>& firstEltDims,
                          const std::vector<Aidge::DimSize_t>& outputDims,
                          const Aidge::IOIndex_t outputIdx) const
{
    if (outputIdx != 0) {
        AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
    }
    if (firstEltDims.size() != outputDims.size()) {
        AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
    }
    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
        // Offset
        auto inputIdxDims = firstEltDims; // batch idx is the same

        for (DimIdx_t i = 0; i < (DIM+2); ++i) {
            if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
                AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
            }
        }

        // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
        // Input
        // same batch value
        std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
        for (DimIdx_t i = 0; i < DIM; ++i) {
            inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
                        * mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
                        + 1
                        + (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
                        * mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
        }

        // Weight
        std::vector<DimSize_t> weightDims{outputDims[1], 1};
        for (std::size_t i = 0; i < DIM; ++i) {
            weightDims.push_back(mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
        }
        std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
        weightIdxDims[0] = firstEltDims[1];


        // Result
        std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
        // Bias
        if (getInput(2)){
            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
            res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
        }
        return res;
    }
    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}

template <Aidge::DimIdx_t DIM>
void Aidge::ConvDepthWise_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
    SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
    mOutputs[0]->setBackend(name, device);

    // By default, automatically set backend for weight and bias inputs
    if (getInput(1)) {
        getInput(1)->setBackend(name, device);
    }
    else {
        Log::notice("ConvDepthWise_Op::setBackend(): could not set backend for weight input, because input is not connected");
    }

    if (getInput(2)) {
        // Bias is optional
        getInput(2)->setBackend(name, device);
    }
}

template <Aidge::DimIdx_t DIM>
std::set<std::string> Aidge::ConvDepthWise_Op<DIM>::getAvailableBackends() const {
    return Registrar<ConvDepthWise_Op<DIM>>::getKeys();
}

template class Aidge::ConvDepthWise_Op<1>;
template class Aidge::ConvDepthWise_Op<2>;

////////////////////////////////////////////

template <std::array<Aidge::DimSize_t, 1>::size_type DIM>
std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise(const Aidge::DimSize_t nbChannels,
                                           const std::array<Aidge::DimSize_t, DIM> &kernelDims,
                                           const std::string& name,
                                           const std::array<Aidge::DimSize_t, DIM> &strideDims,
                                           const std::array<Aidge::DimSize_t, DIM> &dilationDims,
                                           bool noBias) {
    // FIXME: properly handle default w&b initialization in every cases
    AIDGE_ASSERT(DIM<=MaxDim,"Too many kernel dimensions required by {}, not supported", ConvDepthWise_Op<DIM>::Type);
    auto convDW = std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
    addProducer(convDW, 1, append(nbChannels, append(DimSize_t(1), kernelDims)), "w");
    if (!noBias) {
        addProducer(convDW, 2, {nbChannels}, "b");
    }
    return convDW;
}

template std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise<1>(Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 1>&, bool);
template std::shared_ptr<Aidge::Node> Aidge::ConvDepthWise<2>(Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, bool);