-
Notifications
You must be signed in to change notification settings - Fork 0
/
autograd.py
56 lines (34 loc) · 889 Bytes
/
autograd.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import torch
# x = torch.randn(3,requires_grad=True)
# print(x)
# y = x + 2
# print(y)
# z = y*y*2
# # z = z.mean()
# print(z)
# v = torch.tensor([0.1,1.0,0.001],dtype=torch.float32)
# z.backward(v) # Calculate dz/dx -> Vector Jacobian Product
# print(x.grad) # prints gradients of x
# #---------------------------------#
# x = torch.randn(3,requires_grad=True)
# print(x)
# # To prevent from calculating gradients for x
# # x.requires_grad_(False)
# # x.requires_grad_(False)
# # print(x)
# # x.detach()
# # y = x.detach()
# # print(y)
# # with torch.no_grad():
# # operations
# with torch.no_grad():
# y = x + 2
# print(y)
weights = torch.ones(4,requires_grad=True)
for epoch in range(5):
print(weights)
model_output = (weights*3).sum()
print(model_output)
model_output.backward()
print(weights.grad)
weights.grad.zero_()