Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_backend_cpu
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Noam Zerah
aidge_backend_cpu
Commits
71259085
Commit
71259085
authored
7 months ago
by
Noam Zerah
Browse files
Options
Downloads
Patches
Plain Diff
Adding ScalingMeta Test
parent
49f72dac
No related branches found
No related tags found
No related merge requests found
Pipeline
#56611
failed
7 months ago
Stage: static_analysis
Stage: build
Stage: test
Stage: coverage
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
unit_tests/operator/Test_ScalingMeta.cpp
+83
-0
83 additions, 0 deletions
unit_tests/operator/Test_ScalingMeta.cpp
with
83 additions
and
0 deletions
unit_tests/operator/Test_ScalingMeta.cpp
0 → 100644
+
83
−
0
View file @
71259085
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include
<catch2/catch_test_macros.hpp>
#include
<cmath>
#include
<cstdlib>
#include
<memory>
#include
"aidge/utils/TensorUtils.hpp"
#include
"aidge/backend/cpu/operator/ConvImpl.hpp"
#include
"aidge/backend/cpu/operator/PadImpl.hpp"
#include
"aidge/data/Tensor.hpp"
#include
"aidge/operator/Conv.hpp"
#include
"aidge/operator/MetaOperator.hpp"
#include
"aidge/operator/MetaOperatorDefs.hpp"
#include
"aidge/operator/Pad.hpp"
#include
"aidge/operator/Pop.hpp"
#include
"aidge/scheduler/SequentialScheduler.hpp"
#include
"aidge/scheduler/ParallelScheduler.hpp"
using
namespace
Aidge
;
TEST_CASE
(
"ScalingNodeMeta"
,
"[ScalingMeta][CPU]"
)
{
/*SECTION("Scaling MetaOperator")
{
std::shared_ptr<Tensor> t0 = std::make_shared<Tensor>(
Array2D<float, 3, 3>{{{45, 72, 2},
{84.15, 144.45, 0.01484},
{0.62132, 17.67132, 212.132}}});
auto scal = ScalingMeta(2,8,false);
auto scalop = std::static_pointer_cast<OperatorTensor>(scal->getOperator());
t0->setBackend("cpu");
scalop->associateInput(0,t0);
scalop->setBackend("cpu");
scalop->forwardDims();
scalop->forward();
//auto sf = scalop -> getInput(1);
auto out0 = scalop->getOutput(0);
auto in0 = scalop->getInput(0);
auto in1 = scalop->getInput(1);
std::cout << "in0 is: ";
in0->print();
std::cout << "in1 is: ";
in1->print();
std::cout << "output is: " ;
out0->print();
}*/
SECTION
(
"MulPTQ"
)
{
std
::
shared_ptr
<
Tensor
>
t0
=
std
::
make_shared
<
Tensor
>
(
Array2D
<
float
,
3
,
3
>
{{{
45
,
72
,
2
},
{
84.15
,
144.45
,
0.01484
},
{
0.62132
,
17.67132
,
212.132
}}});
auto
scal
=
MulPTQ
(
2.001
);
auto
scalop
=
std
::
static_pointer_cast
<
OperatorTensor
>
(
scal
->
getOperator
());
t0
->
setBackend
(
"cpu"
);
scalop
->
associateInput
(
0
,
t0
);
scalop
->
setBackend
(
"cpu"
);
scalop
->
forwardDims
();
//scalop->forward();
scal
->
forward
();
auto
out0
=
scalop
->
getOutput
(
0
);
auto
in0
=
scalop
->
getInput
(
0
);
auto
in1
=
scalop
->
getInput
(
1
);
std
::
cout
<<
"in0 is: "
;
in0
->
print
();
std
::
cout
<<
"in1 is: "
;
in1
->
print
();
std
::
cout
<<
"output is: "
;
out0
->
print
();
}
}
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment