Skip to content
Snippets Groups Projects
Commit df167f3f authored by Grégoire Kubler's avatar Grégoire Kubler
Browse files

Merge branch 'feat/release_pip' into 'main'

Feat/release pip

See merge request !12
parents d945030d 3f3be842
No related branches found
No related tags found
1 merge request!12Feat/release pip
Pipeline #53562 passed
Showing
with 299 additions and 108 deletions
......@@ -11,6 +11,8 @@ __pycache__
*.pyc
*.egg-info
dist*/
aidge_export_cpp/_version.py
wheelhouse/*
# Mermaid
*.mmd
......@@ -19,4 +21,4 @@ dist*/
xml*/
# ONNX
*.onnx
\ No newline at end of file
*.onnx
###############################################################################
# Aidge Continuous Integration and Deployment #
# #
###############################################################################
stages:
- static_analysis
- build
- test
- coverage
- release
- deploy
include:
- project: 'eclipse/aidge/gitlab_shared_files'
ref: 'main'
file:
# choose which jobs to run by including the corresponding files.
- '.gitlab/ci/ubuntu_python.gitlab-ci.yml'
- '.gitlab/ci/release/pip.gitlab-ci.yml'
# Since aidge_export_cpp is a pure python package building on windows and on ubuntu doesn't differ
# - '.gitlab/ci/windows_python.gitlab-ci.yml'
#!/bin/bash
set -e
if [[ "$1" == "" ]]; then
echo "build aidge deps in cibuildwheel container before building wheel."
echo "search path defines where the dependencies will be searched."
echo "Hint : In wheel containers, files are mounted on /host by default."
echo "\nusage : ./cibuildwheel_build_deps_before_build_wheel.sh $search_path"
fi
set -x
if [[ $AIDGE_DEPENDENCIES == "" ]]; then # case for aidge_ core
mkdir -p build # creating build if its not already there to hold the build of cpp files
rm -rf build/* # build from scratch
else
for repo in $AIDGE_DEPENDENCIES ; do # case for other projects
search_path=$1
REPO_PATH=$(find $search_path ! -writable -prune -o -type d \
-name "$repo" \
-not -path "*/install/*" \
-not -path "*/.git/*" \
-not -path "*/miniconda/*" \
-not -path "*/conda/*" \
-not -path "*/.local/*" \
-not -path "*/lib/*" \
-not -path "*/$repo/$repo/*" \
-not -path "*/proc/*" \
-print -quit)
if [[ -z "$REPO_PATH" ]]; then
echo "ERROR : dependency $repo not found in search_path \"$search_path\". ABORTING."
exit -1
fi
cd $REPO_PATH
mkdir -p build # creating build if its not already there to hold the build of cpp files
rm -rf build/* # build from scratch
pip install . -v
cd -
done
fi
set +x
set +e
include MANIFEST.in
include LICENSE
include README.md
recursive-include aidge_export_cpp *
include setup.py
include version.txt
......@@ -9,7 +9,5 @@ import aidge_core
from aidge_export_cpp.utils import ROOT
__version__ = open(ROOT / "version.txt", "r").read().strip()
from .export import *
......@@ -90,13 +90,18 @@ def export(export_folder_name, graphview, scheduler):
list_outputs_name.append((export_type, node.name()))
# Generate forward file
# TODO: for now the mem type is bound for all intermediate results, should change.
# Note that we may have all inputs constants, hence select output type
assert len(list_outputs_name) >= 1, f"TODO: requires some output to determine mem type"
mem_ctype = list_outputs_name[0][0]
generate_file(
str(dnn_folder / "src" / "forward.cpp"),
str(ROOT / "templates" / "network" / "network_forward.jinja"),
headers=list_configs,
actions=list_actions,
inputs= list_inputs_name,
outputs=list_outputs_name
outputs=list_outputs_name,
mem_ctype=mem_ctype,
)
# Generate dnn API
......
#ifndef __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
#define __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
#include "network/typedefs.hpp"
#include "kernels/activation.hpp"
// Generic function for matmul and activation
template<int M,
int K,
int N,
ActivationFunction_T ACTIVATION,
typename Input_T, typename Output_T,
typename Rescaling_T>
__attribute__((always_inline)) inline
void matmul_forward (
const Input_T* __restrict inputs1,
const Input_T* __restrict inputs2,
Output_T* __restrict outputs,
const Rescaling_T& __restrict rescaling)
{
for (int m = 0; m < M; ++m) {
for (int n = 0; n < N; ++n) {
Output_T sum = Output_T(0);
for (int k = 0; k < K; ++k) {
sum += inputs1[K*m + k] * inputs2[N*k + n];
}
outputs[N*m + n] = activation_forward_value<Output_T>(sum, 0/*not applicable*/, ACTIVATION, rescaling);
}
}
}
#endif // __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
......@@ -238,6 +238,8 @@ class AddCPP(ExportNode):
copyfile(str(ROOT / "kernels" / "elemwise.hpp"),
str(export_folder / "include" / "kernels"))
copyfile(str(ROOT / "kernels" / "activation.hpp"),
str(export_folder / "include" / "kernels"))
generate_file(
str(export_folder / "layers" / f"{self.name}.h"),
......@@ -251,13 +253,13 @@ class AddCPP(ExportNode):
return list_configs
def forward(self, list_actions:list):
list_actions.append(set_up_output(self.name, "float"))
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
name=self.name,
inputs1_name=self.parents[0].name() if self.parents[0] else self.name + "_input1",
inputs2_name=self.parents[1].name() if self.parents[1] else self.name + "_input2",
inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
output_name=self.name
))
return list_actions
......@@ -272,6 +274,9 @@ class SubCPP(ExportNode):
list_configs.append("kernels/elemwise.hpp")
copyfile(str(ROOT / "kernels" / "elemwise.hpp"),
str(export_folder / "include" / "kernels"))
copyfile(str(ROOT / "kernels" / "activation.hpp"),
str(export_folder / "include" / "kernels"))
generate_file(
str(export_folder / "layers" / f"{self.name}.h"),
str(ROOT / "templates" / "configuration" / "elemwise_config.jinja"),
......@@ -284,8 +289,46 @@ class SubCPP(ExportNode):
return list_configs
def forward(self, list_actions:list):
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
name=self.name,
inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
output_name=self.name
))
return list_actions
@operator_register("Mul")
class MulCPP(ExportNode):
def __init__(self, node):
super().__init__(node)
def export(self, export_folder:str, list_configs:list):
list_configs.append(f"layers/{self.name}.h")
list_configs.append("kernels/elemwise.hpp")
copyfile(str(ROOT / "kernels" / "elemwise.hpp"),
str(export_folder / "include" / "kernels"))
copyfile(str(ROOT / "kernels" / "activation.hpp"),
str(export_folder / "include" / "kernels"))
generate_file(
str(export_folder / "layers" / f"{self.name}.h"),
str(ROOT / "templates" / "configuration" / "elemwise_config.jinja"),
name=self.name,
nb_elts=np.prod(self.inputs_dims[0]),
activation="Linear",
elemwise_op="Mul",
rescaling="NoScaling")
return list_configs
def forward(self, list_actions:list):
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
name=self.name,
......@@ -410,3 +453,68 @@ class FcCPP(ExportNode):
))
return list_actions
@operator_register("MatMul")
class MatMulCPP(ExportNode):
def __init__(self, node):
super().__init__(node)
dims0, dims1, outdims = [tuple(x) for x in [self.inputs_dims[0], self.inputs_dims[1], self.outputs_dims[0]]]
# TODO: MatMul aidge operator supports N-D multi broadcast dimensions where N > 2
assert len(dims0) <= 2 and len(dims1) <= 2, (
f"MatMul export do not support yet dimensions above 2D: inputs shapes are: {dims0}, {dims1}")
# Cast to at least 1D
# Note that from MatMul::forwardDims(), scalar inputs are supported
# which is actually more general than np.matmul
dims0 = dims0 if len(dims0) >= 1 else (1, 1)
dims1 = dims1 if len(dims1) >= 1 else (1, 1)
# Cast to at least 2D
dims0 = dims0 if len(dims0) >= 2 else (1, dims0[0])
dims1 = dims1 if len(dims1) >= 2 else (dims1[0], 1)
assert dims0[1] == dims1[0], (
f"MatMul input dimensions do no match, expected (m, k), (k, n): inputs shapes are: {dims0}, {dims1}")
outdims = outdims if len(outdims) > 0 else (1, 1)
assert outdims == (dims0[0], dims1[1]), (
f"MatMul output dimensions do no match, expected (m, n) for inputs (m, k) (k, n): output shape is: {outdims}, inputs shapes are: {dims0}, {dims1}")
self.matmul_inputs_dims = dims0, dims1
self.matmul_output_dims = outdims
def export(self, export_folder:Path, list_configs:list):
copyfile(str(ROOT / "kernels" / "matmul.hpp"),
str(export_folder / "include" / "kernels"))
copyfile(str(ROOT / "kernels" / "activation.hpp"),
str(export_folder / "include" / "kernels"))
# Add to config list the include of configurations
list_configs.append("kernels/matmul.hpp")
list_configs.append(f"layers/{self.name}.h")
# Export configuration file
generate_file(
str(export_folder / "layers" / f"{self.name}.h"),
str(ROOT / "templates" / "configuration" / "matmul_config.jinja"),
name=self.name,
inputs_dims=self.matmul_inputs_dims,
output_dims=self.matmul_output_dims,
activation="Linear",
rescaling="NoScaling",
)
return list_configs
def forward(self, list_actions:list):
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
str(ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja"),
name=self.name,
inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
outputs_name=self.name
))
return list_actions
{#- For name header -#}
#ifndef {{ name|upper }}_LAYER_H
#define {{ name|upper }}_LAYER_H
{# For layer configuration -#}
#define {{ name|upper }}_M {{ inputs_dims[0][0] }}
#define {{ name|upper }}_K {{ inputs_dims[0][1] }}
#define {{ name|upper }}_N {{ inputs_dims[1][1] }}
#define {{ name|upper }}_ACTIVATION {{ activation }}
static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
{#- Calculate sizes #}
#endif /* {{ name|upper }}_LAYER_H */
matmul_forward<{{name|upper}}_M,
{{name|upper}}_K,
{{name|upper}}_N,
{{name|upper}}_ACTIVATION>
({{inputs1_name}}, {{inputs2_name}}, {{outputs_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
......@@ -11,7 +11,7 @@
{# mem has the datatype of the firt input #}
{#- Change here to improve it -#}
static {{inputs[0][0]}} mem[MEMORY_SIZE];
static {{mem_ctype}} mem[MEMORY_SIZE];
{# Forward function #}
{#- Support multiple inputs with different datatypes and multiple outputs with different datatypes -#}
......
%% Cell type:markdown id: tags:
# Add a custom operator in the CPP export
%% Cell type:markdown id: tags:
The main objective of this tutorial is to demonstrate the toolchain to **detect unsupported operators** and **add them** in an export module. <br>
For this tutorial, we use the CPP export module ``aidge_export_cpp`` to demonstrate the toolchain.
%% Cell type:markdown id: tags:
## Import Aidge
%% Cell type:code id: tags:
``` python
!pip install nbconvert
```
%% Cell type:code id: tags:
``` python
import aidge_core
import aidge_backend_cpu
import aidge_onnx
import numpy as np
import os
import requests
```
%% Cell type:markdown id: tags:
## Load ONNX model
%% Cell type:code id: tags:
``` python
# Download onnx file if it has not been done before
if not os.path.isfile("./lenet_mnist.onnx"):
response = requests.get("https://huggingface.co/vtemplier/LeNet_MNIST/resolve/main/lenet_mnist.onnx?download=true")
if response.status_code == 200:
with open("lenet_mnist.onnx", 'wb') as f:
f.write(response.content)
print("ONNX model downloaded successfully.")
else:
print("Failed to download ONNX model. Status code:", response.status_code)
```
%% Cell type:code id: tags:
``` python
model = aidge_onnx.load_onnx("lenet_mnist.onnx")
```
%% Cell type:code id: tags:
``` python
# Remove Flatten node, useless in the CPP export
aidge_core.remove_flatten(model)
# Freeze the model by setting constant to parameters producers
for node in model.get_nodes():
if node.type() == "Producer":
node.get_operator().set_attr("Constant", True)
# Create Producer Node for the Graph
input_node = aidge_core.Producer([1, 1, 28, 28], "input")
input_node.add_child(model)
model.add(input_node)
# Configuration for the model + forward dimensions
model.compile("cpu", aidge_core.dtype.float32)
```
%% Cell type:code id: tags:
``` python
# Generate scheduling of the model
scheduler = aidge_core.SequentialScheduler(model)
scheduler.generate_scheduling()
```
%% Cell type:markdown id: tags:
## Replace ReLU operators by Swish operators
Let's say you want to replace ReLU with another activation like Switch.
%% Cell type:code id: tags:
``` python
switch_id = 0 # ID for naming newly created Swich Operators
for node in scheduler.get_static_scheduling():
if node.type() == "ReLU":
print(f"{node.name()} will be replaced")
# Swich is not implemented by default in Aidge
# It is inserted in the current model as a GenericOperator that we will custom
node_swish = aidge_core.GenericOperator("Swish", nb_data=1, nb_param=0, nb_out=1, name=f"swish_{switch_id}")
node_swish.get_operator().add_attr("betas", [1.0]*node.get_operator().get_input(0).dims()[1])
aidge_core.GraphView.replace(set([node]), set([node_swish]))
switch_id+=1
```
%% Cell type:code id: tags:
``` python
import base64
from IPython.display import Image, display
import matplotlib.pyplot as plt
# function to vizualize .mmd files
def visualize_mmd(path_to_mmd):
with open(path_to_mmd, "r") as file_mmd:
graph_mmd = file_mmd.read()
graphbytes = graph_mmd.encode("utf-8")
base64_bytes = base64.b64encode(graphbytes)
base64_string = base64_bytes.decode("utf-8")
display(Image(url=f"https://mermaid.ink/img/{base64_string}"))
model.save("myModel")
visualize_mmd("myModel.mmd")
```
%% Cell type:markdown id: tags:
## Schedule the graph
%% Cell type:markdown id: tags:
Add an implementation for Swish. <br>
The implementation is required to perform a sequential scheduling.
%% Cell type:code id: tags:
``` python
class SwishImpl(aidge_core.OperatorImpl): # Inherit OperatorImpl to interface with Aidge !
def __init__(self, op: aidge_core.Operator):
aidge_core.OperatorImpl.__init__(self, op, 'cpu')
# no need to define forward() function in python as we do not intend to run a scheduler on the model
for node in model.get_nodes():
if node.type() == "Swish":
node.get_operator().set_forward_dims(lambda x: x) # to propagate dimensions in the model
node.get_operator().set_impl(SwishImpl(node.get_operator())) # Setting implementation
```
%% Cell type:code id: tags:
``` python
scheduler = aidge_core.SequentialScheduler(model)
model.forward_dims()
scheduler.generate_scheduling()
```
%% Cell type:markdown id: tags:
## Add Swish to the CPP export support
%% Cell type:code id: tags:
``` python
import aidge_export_cpp as cpp
cpp.supported_operators()
```
%% Cell type:code id: tags:
``` python
from aidge_export_cpp.operators import *
# To complete
@operator_register("Swish")
class SwishCPP(ExportNode):
def __init__(self, node):
super().__init__(node)
self.betas: float = self.operator.get_attr("betas")
def export(self, export_folder: str, list_configs: list[str]) -> list[str]:
copyfile("for_export/swish_kernel.hpp",
f"{export_folder}/include/kernels/")
list_configs.append(f"layers/{self.name}.h")
generate_file(
f"{export_folder}/layers/{self.name}.h",
"for_export/swish_config.jinja",
name=self.name,
output_dims=self.outputs_dims[0]
)
return list_configs
def forward(self, list_actions:list[str]) -> list[str]:
if not self.is_last:
list_actions.append(set_up_output(self.name, "float"))
list_actions.append(generate_str(
"for_export/swish_forward.jinja",
name=self.name,
input_name=self.input.name(),
output_name=self.name
))
return list_actions
```
%% Cell type:code id: tags:
``` python
print(cpp.supported_operators())
```
%% Cell type:code id: tags:
``` python
cpp.export("myexport", model, scheduler)
```
%% Cell type:code id: tags:
``` python
!tree myexport
```
%% Cell type:code id: tags:
``` python
digit = np.load("digit.npy")
cpp.generate_input_file(array_name="inputs", array=digit.reshape(-1), folder_path="myexport")
```
%% Cell type:code id: tags:
``` python
!cd myexport && make
```
%% Cell type:code id: tags:
``` python
!./myexport/bin/run_export
```
......
%% Cell type:markdown id: tags:
# Export a MNIST model to a CPP standalone project
%% Cell type:code id: tags:
``` python
%pip install requests numpy ipywidgets ipycanvas
%pip install requests numpy ipywidgets ipycanvas nbconvert
```
%% Cell type:markdown id: tags:
## Download the model
%% Cell type:code id: tags:
``` python
import os
import requests
```
%% Cell type:code id: tags:
``` python
# Download onnx file if it has not been done before
if not os.path.isfile("./lenet_mnist.onnx"):
response = requests.get("https://huggingface.co/vtemplier/LeNet_MNIST/resolve/main/lenet_mnist.onnx?download=true")
if response.status_code == 200:
with open("lenet_mnist.onnx", 'wb') as f:
f.write(response.content)
print("ONNX model downloaded successfully.")
else:
print("Failed to download ONNX model. Status code:", response.status_code)
```
%% Cell type:markdown id: tags:
## Load the model in Aidge and manipulate it
%% Cell type:code id: tags:
``` python
import aidge_core
import aidge_backend_cpu
import aidge_onnx
import aidge_export_cpp
```
%% Cell type:code id: tags:
``` python
model = aidge_onnx.load_onnx("lenet_mnist.onnx")
```
%% Cell type:code id: tags:
``` python
# Remove Flatten node, useless in the CPP export
aidge_core.remove_flatten(model)
# Freeze the model by setting constant to parameters producers
for node in model.get_nodes():
if node.type() == "Producer":
node.get_operator().set_attr("Constant", True)
# Create Producer Node for the Graph
input_node = aidge_core.Producer([1, 1, 28, 28], "input")
input_node.add_child(model)
model.add(input_node)
# Configuration for the model + forward dimensions
model.compile("cpu", aidge_core.dtype.float32)
```
%% Cell type:code id: tags:
``` python
# Generate scheduling of the model
scheduler = aidge_core.SequentialScheduler(model)
scheduler.generate_scheduling()
```
%% Cell type:code id: tags:
``` python
model.save("test")
```
%% Cell type:markdown id: tags:
## Export the model
%% Cell type:code id: tags:
``` python
aidge_export_cpp.export("lenet_export_fp32", model, scheduler)
```
%% Cell type:markdown id: tags:
### Draw your own number
%% Cell type:code id: tags:
``` python
from ipywidgets import HBox, VBox, Button, Layout
from ipycanvas import RoughCanvas, hold_canvas
img_name = "my_number.png"
canvas = RoughCanvas(width=28, height=28, sync_image_data=True)
button_gen = Button(description="Generate PNG")
button_clear = Button(description="Clear")
drawing = False
position = None
shape = []
def on_erase_button_clicked(b):
canvas.clear()
def on_generate_button_clicked(b):
try:
canvas.to_file(img_name)
print(f"Image generated to {img_name} !")
except:
print("Draw a number before generating the image.")
button_clear.on_click(on_erase_button_clicked)
button_gen.on_click(on_generate_button_clicked)
def on_mouse_down(x, y):
global drawing
global position
global shape
drawing = True
position = (x, y)
shape = [position]
def on_mouse_move(x, y):
global drawing
global position
global shape
if not drawing:
return
with hold_canvas():
canvas.stroke_line(position[0], position[1], x, y)
position = (x, y)
shape.append(position)
def on_mouse_up(x, y):
global drawing
global position
global shape
drawing = False
with hold_canvas():
canvas.stroke_line(position[0], position[1], x, y)
shape = []
canvas.on_mouse_down(on_mouse_down)
canvas.on_mouse_move(on_mouse_move)
canvas.on_mouse_up(on_mouse_up)
canvas.stroke_style = "#000000"
VBox((canvas, HBox((button_gen, button_clear))),
layout=Layout(height='auto', width="300px"))
```
%% Cell type:markdown id: tags:
### Generate inputs for testing the model from your drawing
%% Cell type:code id: tags:
``` python
try:
number_np = canvas.get_image_data()
# We got a numpy array with the shape of (28,28,4)
# Transform it to (28,28)
x = number_np[:, :, 3].astype("float32")
# Convert from [0, 255] to [0, 1] and export it
aidge_export_cpp.generate_input_file(export_folder="lenet_export_fp32",
array_name="inputs",
array=x / 255)
except:
print("Please draw a number in the previous cell before running this one.")
```
%% Cell type:markdown id: tags:
### Compile the export and test it
%% Cell type:code id: tags:
``` python
!cd lenet_export_fp32 && make
```
%% Cell type:code id: tags:
``` python
!./lenet_export_fp32/bin/run_export
```
......
[project]
name = "aidge_export_cpp"
description="Aidge CPP generic export"
dependencies = [
"numpy>=1.20",
"Jinja2>=3.1.3",
]
requires-python = ">= 3.7"
readme = "README.md"
license = { file = "LICENSE" }
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development"
]
dynamic = ["version"] # defined in tool.setuptools_scm
[build-system]
requires = [
"setuptools>=64",
"setuptools_scm[toml]==7.1.0",
"toml"
]
build-backend = "setuptools.build_meta"
#####################################################
# SETUPTOOLS
[tool.setuptools]
[tool.setuptools.packages.find]
where = ["."] # list of folders that contain the packages (["."] by default)
include = ["aidge_export_cpp.*"] # package names should match these glob patterns (["*"] by default)
exclude = ["aidge_export_cpp.unit_tests*"] # exclude packages matching these glob patterns (empty by default)
namespaces = false # to disable scanning PEP 420 namespaces (true by default)
# SETUPTOOLS_SCM
[tool.setuptools_scm]
write_to = "aidge_export_cpp/_version.py"
Jinja2
numpy
\ No newline at end of file
#!/usr/bin/env python3
""" Aidge CPP generic export
"""
DOCLINES = (__doc__ or '').split("\n")
import sys
# Python supported version checks
if sys.version_info[:2] < (3, 7):
raise RuntimeError("Python version >= 3.7 required.")
CLASSIFIERS = """\
Development Status :: 2 - Pre-Alpha
Intended Audience :: Developers
Intended Audience :: Education
Intended Audience :: Science/Research
License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)
Programming Language :: C++
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3 :: Only
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Artificial Intelligence
Topic :: Software Development
"""
import os
import shutil
import pathlib
from setuptools import setup, Extension
from setuptools import find_packages
from setuptools.command.build_ext import build_ext
def get_project_version() -> str:
aidge_root = pathlib.Path().absolute()
version = open(aidge_root / "version.txt", "r").read().strip()
return version
class AdditionalExtension(Extension):
def __init__(self, name):
super().__init__(name, sources=[])
class AdditionalBuild(build_ext):
def run(self):
cwd = pathlib.Path().absolute()
build_temp = cwd / "build"
if not build_temp.exists():
build_temp.mkdir(parents=True, exist_ok=True)
build_lib = pathlib.Path(self.build_lib)
if not build_lib.exists():
build_lib.mkdir(parents=True, exist_ok=True)
aidge_package = build_lib / "aidge_export_cpp"
# Copy version.txt in aidge_package
os.chdir(os.path.dirname(__file__))
shutil.copy("version.txt", str(aidge_package.absolute()))
if __name__ == '__main__':
setup(
name="aidge_export_cpp",
version=get_project_version(),
license="Eclipse Public License 2.0 (EPL-2.0)",
python_requires='>=3.7',
description=DOCLINES[0],
long_description_content_type="text/markdown",
long_description="\n".join(DOCLINES[2:]),
classifiers=[c for c in CLASSIFIERS.split('\n') if c],
platforms=["Linux"],
packages=find_packages(where="."),
include_package_data=True,
ext_modules=[AdditionalExtension("aidge_export_cpp")],
cmdclass={
'build_ext': AdditionalBuild,
},
)
0.1.2
\ No newline at end of file
0.1.2
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment