Skip to content
Snippets Groups Projects
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Conv.cpp 7.18 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include "aidge/operator/Conv.hpp"

#include <cmath>      // std::floor
#include <cstddef>    // std::size_t
#include <stdexcept>  // std::runtime_error
#include <string>
#include <utility>    // std::pair
#include <vector>

#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"

template <Aidge::DimIdx_t DIM>
const std::string Aidge::Conv_Op<DIM>::Type = "Conv";

template <Aidge::DimIdx_t DIM>
Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
    : OperatorTensor(op),
      mAttributes(op.mAttributes)
{
    if (op.mImpl) {
        SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
    } else {
        mImpl = nullptr;
    }
}

template <Aidge::DimIdx_t DIM>
bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
    // check inputs have been associated
    bool associated = true;
    for (IOIndex_t i = 0; i < 3; ++i) {
        if (!getInput(i)) {
            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
        }
        associated &= !(getInput(i)->empty());
    }
    if (associated) {
        // first check weight since it defines inChannels and outChannels
        AIDGE_ASSERT((getInput(1)->nbDims() == (DIM+2)),
                    "Wrong weight Tensor dimension: {} for Conv{}D operator.", getInput(1)->nbDims(), DIM);
        // check data
        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)) &&
                    (getInput(0)->template dims<DIM+2>()[1] == inChannels()),
                    "Wrong input size for Conv operator.");
        // check optional bias
        if(!mAttributes->template getAttr<ConvAttr::NoBias>())
            AIDGE_ASSERT((getInput(2)->nbDims() == (1)) &&
                    (getInput(2)->template dims<1>()[0] == outChannels()),
                    "Wrong bias size for Conv operator.");
        std::array<DimSize_t, DIM + 2> outputDims{};
        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());

        for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
            const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] *
                                                    (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
                                            1;

            outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
                            static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim])));
        }

        outputDims[1] = outChannels();
        outputDims[0] = inputDims[0];
        mOutputs[0]->resize(outputDims);
    }

    return associated;
}


template <Aidge::DimIdx_t DIM>
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>
Aidge::Conv_Op<DIM>::computeReceptiveField(
                          const std::vector<Aidge::DimSize_t>& firstEltDims,
                          const std::vector<Aidge::DimSize_t>& outputDims,
                          const Aidge::IOIndex_t outputIdx) const
{
    if (outputIdx != 0) {
        AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
    }
    if (firstEltDims.size() != outputDims.size()) {
        AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
    }
    if ((outputDims.size() == (DIM+2)) && dimsForwarded()) {
        // Offset
        auto inputIdxDims = firstEltDims; // batch idx is the same
        inputIdxDims[1] = 0; // each channel is used so start with the first one

        for (DimIdx_t i = 0; i < (DIM+2); ++i) {
            if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
                AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension {} ({} + {})", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
            }
        }

        // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
        // Input
        // same batch value, every input channel is used
        std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
        for (DimIdx_t i = 0; i < DIM; ++i) {
            inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
                        * mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
                        + 1
                        + (mAttributes->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
                        * mAttributes->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
            inputIdxDims[2+i] *= mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
        }

        // Weight
        // same output value, every input channel is used
        std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
        for (std::size_t i = 0; i < DIM; ++i) {
            weightDims.push_back(mAttributes->template getAttr<ConvAttr::KernelDims>()[i]);
        }
        std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
        weightIdxDims[0] = firstEltDims[1];

        // Result
        std::vector<std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>> res;
        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
        res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));

        // Bias
        if (!mAttributes->template getAttr<ConvAttr::NoBias>()){
            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
            const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]};
            res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
        }
        return res;
    }
    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}

template <Aidge::DimIdx_t DIM>
void Aidge::Conv_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
    SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
    mOutputs[0]->setBackend(name, device);

    // By default, automatically set backend for weight and bias inputs
    if (getInput(1)) {
        getInput(1)->setBackend(name, device);
    }
    else {
        Log::notice("Conv_Op::setBackend(): could not set backend for weight input, because input is not connected");
    }

    if (getInput(2)) {
        // Bias is optional
        getInput(2)->setBackend(name, device);
    }
}

template class Aidge::Conv_Op<2>;