Skip to content
Snippets Groups Projects
Commit 7e51ee61 authored by Jerome Hue's avatar Jerome Hue
Browse files

Add some dummy gradient examples with pytorch

parent b1d22937
No related branches found
No related tags found
No related merge requests found
grad.py 0 → 100644
import torch
def print_gradient_formula(operation, grad_formula):
print(f"Gradient formula for {operation}: {grad_formula}\n")
def compute_gradients():
# Example 1: Gradient of Element-wise Multiplication
print("Example 1: Element-wise Multiplication")
beta = torch.tensor([3.0, 4.0, 5.0], requires_grad=False)
X1 = torch.tensor([2.0, 3.0, 4.0], requires_grad=True)
print(f"beta : {beta}")
print(f"X1 : {X1}")
Y1 = beta * X1
Y1.sum().backward()
print(f"Gradient of Y1 with respect to X1: {X1.grad}")
print_gradient_formula("β * X", "∂Y/∂X = β")
# Reset gradients for the next operation
X1.grad.zero_()
# Example 2: Gradient of Element-wise Division
print("Example 2: Element-wise Division")
X2 = torch.tensor([2.0, 4.0, 8.0], requires_grad=True)
Y2 = torch.tensor([1.0, 2.0, 4.0], requires_grad=False)
print(f"X2 : {X2}")
print(f"Y2 : {Y2}")
Z2 = X2 / Y2
Z2.sum().backward()
print(f"Gradient of Z2 with respect to X2: {X2.grad}")
print_gradient_formula("X / Y", "∂Z/∂X = 1 / Y")
# Reset gradients for the next operation
X2.grad.zero_()
# Example 3: Gradient of Element-wise Power
print("Example 3: Element-wise Power")
X3 = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
exponent = torch.tensor([2.0, 3.0, 4.0], requires_grad=False)
print(f"X3 : {X3}")
print(f"pow : {exponent}")
Z3 = X3.pow(exponent)
Z3.sum().backward()
print(f"Gradient of Z3 with respect to X3: {X3.grad}")
print_gradient_formula("X^exponent", "∂Z/∂X = exponent * X^(exponent - 1)")
# Reset gradients for the next operation
X3.grad.zero_()
# Example 4: Gradient of Element-wise Square Root
print("Example 4: Element-wise Square Root")
X4 = torch.tensor([4.0, 9.0, 16.0], requires_grad=True)
print(f"X4 : {X4}")
Z4 = X4.sqrt()
Z4.sum().backward()
print(f"Gradient of Z4 with respect to X4: {X4.grad}")
print_gradient_formula("sqrt(X)", "∂Z/∂X = 1 / (2 * sqrt(X))")
# Reset gradients for the next operation
X4.grad.zero_()
# Example 5: Combining Multiple Operations
print("Example 5: Combining Multiple Operations")
X5 = torch.tensor([4.0, 9.0, 16.0], requires_grad=True)
Y5 = torch.tensor([2.0, 3.0, 4.0], requires_grad=False)
print(f"X5 : {X5}")
print(f"Y5 : {Y5}")
Z5 = (X5.pow(2) / Y5).sqrt()
Z5.sum().backward()
print(f"Gradient of Z5 with respect to X5: {X5.grad}")
print_gradient_formula("sqrt((X^2) / Y)", "∂Z/∂X = (1 / (2 * sqrt((X^2) / Y))) * (2 * X / Y)")
if __name__ == "__main__":
compute_gradients()
# Testing Tensor type in aidge
import numpy as np
import aidge_core
array = np.random.rand(16,28*28)
tensor = aidge_core.Tensor(array)
capacity = tensor.capacity()
print(f"Capacity {capacity}")
print(f"Dims {tensor.dims()}")
print(f"dtype {tensor.dtype()}")
print(f"Coords {tensor.get_coord(0)}")
print(f"")
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment