Skip to content

Commit

Permalink
end autodiff and test
Browse files Browse the repository at this point in the history
  • Loading branch information
Tokisakix committed Apr 6, 2024
1 parent bcfe7ac commit 714d624
Show file tree
Hide file tree
Showing 3 changed files with 180 additions and 38 deletions.
14 changes: 7 additions & 7 deletions terox/autodiff/scalar_opts.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def _forward(self, a:Variable) -> Variable:
def _backward(self, grad:float, args: Iterable[Variable]) -> Iterable[Variable]:
(a,) = args
a_grad = -grad
return a_grad
return (a_grad,)

class Max(VarFunction):
def __init__(self) -> None:
Expand Down Expand Up @@ -203,7 +203,7 @@ def _forward(self, a:Variable) -> Variable:
def _backward(self, grad:float, args: Iterable[Variable]) -> Iterable[Variable]:
(a,) = args
a_grad = grad if a.item() >= 0.0 else -grad
return a_grad
return (a_grad,)

class Exp(VarFunction):
def __init__(self) -> None:
Expand All @@ -219,7 +219,7 @@ def _forward(self, a:Variable) -> Variable:
def _backward(self, grad:float, args: Iterable[Variable]) -> Iterable[Variable]:
(a,) = args
a_grad = grad * exp(a.item())
return a_grad
return (a_grad,)

class Log(VarFunction):
def __init__(self) -> None:
Expand All @@ -235,7 +235,7 @@ def _forward(self, a:Variable) -> Variable:
def _backward(self, grad:float, args: Iterable[Variable]) -> Iterable[Variable]:
(a,) = args
a_grad = grad / a.item()
return a_grad
return (a_grad,)

class Relu(VarFunction):
def __init__(self) -> None:
Expand All @@ -251,7 +251,7 @@ def _forward(self, a:Variable) -> Variable:
def _backward(self, grad:float, args: Iterable[Variable]) -> Iterable[Variable]:
(a,) = args
a_grad = grad if a.item() >= 0.0 else 0.0
return a_grad
return (a_grad,)

class Sigmoid(VarFunction):
def __init__(self) -> None:
Expand All @@ -268,7 +268,7 @@ def _backward(self, grad:float, args: Iterable[Variable]) -> Iterable[Variable]:
(a,) = args
sigmoid = 1.0 / (1.0 + exp(-a.item()))
a_grad = grad * sigmoid * (1.0 - sigmoid)
return a_grad
return (a_grad,)

class Tanh(VarFunction):
def __init__(self) -> None:
Expand All @@ -285,4 +285,4 @@ def _backward(self, grad:float, args: Iterable[Variable]) -> Iterable[Variable]:
(a,) = args
tanh = (exp(a.item()) - exp(-a.item())) / (exp(a.item()) + exp(-a.item()))
a_grad = grad * (1.0 - tanh ** 2)
return a_grad
return (a_grad,)
38 changes: 7 additions & 31 deletions terox/autodiff/variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,12 @@ def _chainRule(self) -> None:
arg._gradient += grad
return

def backward(self):
topoList = getTopoList(self)
self._oneGrad()
for variable in topoList:
variable._chainRule()
def backward(self, first=True):
if first:
self._oneGrad()
self._chainRule()
for parent in self._parent():
parent.backward(first=False)
return

def new(self) -> "Variable":
Expand All @@ -81,29 +82,4 @@ def detach(self) -> "Variable":
raise NotImplementedError

def item(self) -> object:
raise NotImplementedError

def _getTopoChain(var:"Variable") -> Iterable["Variable"]:
topoChain = []
for parent in var._parent():
topoChain.append((var, parent))
topoChain += _getTopoChain(parent)
return topoChain

def getTopoList(var:"Variable") -> Iterable["Variable"]:
topoChain = _getTopoChain(var)
topoChain = list(set(topoChain))
topoDegree = {var:1}
for _, parent in topoChain:
if not parent in topoDegree:
topoDegree[parent] = 0
topoDegree[parent] += 1
topoList, queue = [], [var]
while len(queue) > 0:
variable = queue[0]
queue += variable._parent()
topoDegree[variable] -= 1
if topoDegree[variable] == 0:
topoList.append(variable)
del queue[0]
return topoList
raise NotImplementedError
166 changes: 166 additions & 0 deletions test/autodiff/test_backward.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
import pytest
import math

from terox.autodiff.scalar import Scalar

@pytest.mark.test_scalar_overload
def test_add_backward(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A + B
C.backward()
assert A._gradient == 1.0
assert B._gradient == 1.0
return

@pytest.mark.test_scalar_overload
def test_sub(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A - B
C.backward()
assert A._gradient == 1.0
assert B._gradient == -1.0
return

@pytest.mark.test_scalar_overload
def test_mul(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A * B
C.backward()
assert A._gradient == 3.0
assert B._gradient == 2.0
return

@pytest.mark.test_scalar_overload
def test_div(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A / B
C.backward()
assert A._gradient == 1.0 / 3.0
assert B._gradient == -1.0 * 2.0 / 9.0
return

@pytest.mark.test_scalar_overload
def test_neg(a:float=2.0) -> None:
A = Scalar(a, None, None)
C = -A
C.backward()
assert A._gradient == -1.0
return

@pytest.mark.test_scalar_overload
def test_max(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A.max(A, B)
C.backward()
assert A._gradient == 0.0
assert B._gradient == 1.0
return

@pytest.mark.test_scalar_overload
def test_min(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A.min(A, B)
C.backward()
assert A._gradient == 1.0
assert B._gradient == 0.0
return

@pytest.mark.test_scalar_overload
def test_eq(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A == B
C.backward()
assert A._gradient == 0.0
assert B._gradient == 0.0
return

@pytest.mark.test_scalar_overload
def test_lt(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A < B
C.backward()
assert A._gradient == 0.0
assert B._gradient == 0.0
return

@pytest.mark.test_scalar_overload
def test_gt(a:float=2.0, b:float=3.0) -> None:
A = Scalar(a, None, None)
B = Scalar(b, None, None)
C = A > B
C.backward()
assert A._gradient == 0.0
assert B._gradient == 0.0
return

@pytest.mark.test_scalar_overload
def test_abs(a:float=-2.0) -> None:
A = Scalar(a, None, None)
C = A.abs(A)
C.backward()
assert A._gradient == -1.0
return

@pytest.mark.test_scalar_overload
def test_exp(a:float=2.0) -> None:
A = Scalar(a, None, None)
C = A.exp()
C.backward()
assert A._gradient == math.exp(2.0)
return

@pytest.mark.test_scalar_overload
def test_log(a:float=2.0) -> None:
A = Scalar(a, None, None)
C = A.log()
C.backward()
assert A._gradient == 1.0 / 2.0
return

@pytest.mark.test_scalar_overload
def test_relu(a:float=-2.0) -> None:
A = Scalar(a, None, None)
C = A.relu()
C.backward()
assert A._gradient == 0.0
return

@pytest.mark.test_scalar_overload
def test_sigmoid(a:float=-2.0) -> None:
A = Scalar(a, None, None)
C = A.sigmoid()
C.backward()
sigmoid = 1.0 / (1.0 + math.exp(2.0))
assert A._gradient == sigmoid * (1.0 - sigmoid)
return

@pytest.mark.test_scalar_overload
def test_tanh(a:float=-2.0) -> None:
A = Scalar(a, None, None)
C = A.tanh()
C.backward()
tanh = (math.exp(-2.0) - math.exp(2.0)) / (math.exp(-2.0) + math.exp(2.0))
assert A._gradient == 1.0 - tanh ** 2
return

@pytest.mark.test_scalar_overload
def test_add_backward() -> None:
A = Scalar(1.0, None, None)
B = Scalar(2.0, None, None)
C = A + B
D = A + C
E = (D - C) * (D - C)
E.backward()
assert A._gradient == 5.0
assert B._gradient == 2.0
assert C._gradient == 1.0
assert D._gradient == 2.0
return

0 comments on commit 714d624

Please sign in to comment.