November 6, 2022

Python PyTorch - Tensor Library and (Deep) Neural Networks Package

Pytorch Package in Python

conda install pytorch -c pytorch
conda list pytorch

import torch

print(torch.__version__)

x = torch.Tensor([5.5, 3]) # Tensor is constructor, tensor is a function -- both will copy data to new object
y = torch.tensor([4., 5., 6.])
V = torch.tensor(Vector_data)
M = torch.tensor(Matrix_data)
T = torch.tensor(Tensor_data)
v = torch.Tensor([0.1, 1.0, 0.0001], dtype=torch.float)
x = torch.tensor([[1, 2],  [3, 4], [5, 6]])
b = torch.tensor([1.], requires_grad=True)
torch.tensor(np.array([1., 2., 3.]))
x = torch.tensor([1., 2., 3], requires_grad=True)
X_train, Y_train, X_val, Y_val = map(torch.tensor, (X_train, Y_train, X_val, Y_val))

T = torch.from_numpy(np_array) # Tensor created by using from_numpy will share data with Numpy Array
x_np = ten.numpy()

torch.as_tensor(data) # Tensor created by using as_tensor will share data with Numpy Array

x = torch.empty(5, 3)

x = torch.zeros(2, 2, dtype=torch.long)
t = torch.zeros(3, 2)
bias2 = torch.zeros(4, requires_grad=True)
y = torch.zeros_like(x)

a = torch.ones(5)
x = torch.ones(2, 2, requires_grad=True)
y = torch.ones([3, 2])
a = torch.ones(3, 2, device=cuda0)
y = torch.ones_like(x, device=device)

torch.eye(2)
T = torch.eye(4)

x = torch.rand(3, 2)
w = torch.rand([N], requires_grad=True)
w = torch.rand(100000, requires_grad=True, device=cuda0)
x = torch.randn(1)
t = torch.randn(3, 5)
b = torch.randn([100, 100])
x = torch.randn((3, 4, 5))
x = torch.randn(3, requires_grad=True)
T = torch.randn([20, 1], requires_grad=True)
x = torch.randn_like(x, dtype=torch.float)

print(x.size())
t.shape
print(x[:, 1]) 
print(x[0, :])
print(x.item())
y = x[1, 1]

print(t.dtype)
torch.get_default_dtype()
print(t.device)
print(t.layout)

print(x.view(2, 12)) # reshape
y = x.view(6, -1)
t.reshape(1, 9)
x.reshape(2, 3, 3)
x.reshape(3, 1, 4, 4)
t.reshape(1, 9).shape
Rank = len(t.shape)
t.flatten()
t.flatten(start_dim=1)
t.reshape(1, -1)[0]
x.reshape(-1)
t.view(-1)
t.view(t.numel())

t.numel() # Number of elements
torch.tensor(t.shape).prod()

t + 8
Y - 7
x * 2
t1 / 4
T1.add(6)
X6.sub(9999)
t1.mul(123)
t1.div(3)
z = x + y
t1 + torch.tensor(np.broadcast_to(2, t1.shape), dtype=torch.float32)
z = x - y
z = x * y
print(torch.add(x, y))
torch.add(x, y, out=result)
z = y.add(x)
y.add_(x) # y = y.add(x)
T.add_(2)
t = torch.sum(z)
z_1 = torch.cat([x_1, y_1])
torch.cat((t1, t2), dim=0) # row-wise
torch.cat((x1, x2), dim=1) # column-wise

t1.eq(0)
t1.gt(6)
t1.ge(4)
t1.le(5)
t1.lt(8)
t1 < t2
T.abs()
T.sqrt()
t.neg()

print(a.exp())
print(a.exp().sum(0))
print(a.exp().sum(-1)) # sum of last axis
r = 1/(1 + torch.exp(-y))
h2 = a2.exp()/a2.exp().sum(-1).unsqueeze(-1)
print(torch.mean(w).item(), b.item())
torch.argmax(y)
pred = torch.argmax(y_hat, dim=1)
(torch.argmax(y_hat, dim=1) == y).float().mean().item()

np.broadcast_to(4, t1.shape)

t.reshape(1, 12).squeeze()
t.reshape(1, -1).squeeze()
t.reshape(1, 12).squeeze().unsqueeze(dim=0)

x = x.new_ones(5, 3, dtype=torch.double)      # new_* methods take in sizes

x = torch.linspace(0, 1, steps=5)

torch.manual_seed(2019)

c = torch.matmul(a, b)
torch.matmul(a_cpu, b_cpu)
torch.dot(w, x)
y = torch.dot(torch.ones([N]), x)
y = torch.dot(3*torch.ones([N], device=cuda0), x) - 2

np.add(a, 1, out=a)

torch.stack((t1, t2, t3))

import torch.autograd as autograd


x = torch.ones([2, 3], requires_grad=True)
y = x + 6
r = 2/(1 + torch.exp(-y))
print(r)
s = torch.sum(r)
s.backward()
print(x.grad)

t.backward() # backward propagation
weights1.requires_grad_()
b.grad.zero_()
a1.sigmoid()

with torch.no_grad():
    w -= learning_rate * w.grad
    b -= learning_rate * b.grad

CUDA (Compute Unified Device Architecture) for GPUs (Graphics Processing Units)
torch.version.cuda
torch.cuda.is_available()
print(torch.cuda.device_count())
print(torch.cuda.device(0))
print(torch.cuda.get_device_name(0))

device = torch.device("cpu")
device = torch.device("cuda") # to use GPUs
cuda0 = torch.device('cuda:0')
b = torch.ones(3, 2, device=cuda0)
Y_train = Y_train.to(device)
fun6.to(device)

t = t.cuda()

import torch.nn as nn
nn.Parameter(torch.zeros(2))
self.weights1 = nn.Parameter(torch.randn(2, 2) / math.sqrt(2))
self.lin2 = nn.Linear(2, 4)
self.net = nn.Sequential( nn.Linear(2, 2),  nn.Sigmoid(),  nn.Linear(2, 4),  nn.Softmax() )

import torch.nn.functional as F
loss_fn = F.cross_entropy
loss = F.cross_entropy(y_hat, Y_train)

import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
opt = optim.SGD(fn.parameters(), lr=learning_rate)
opt = optim.SGD(fn.parameters(), lr=1)
opt.step()
opt.zero_grad()

import torchvision
import torchvision.transforms as transforms
train_set = torchvision.datasets.FashionMNIST(root='./data/FashionMNIST', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]) )
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(train_set)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=10)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True)
train_set.train_labels
train_set.train_labels.bincount()

image, label = next(iter(train_set))
plt.imshow(image.squeeze(), cmap='gray')

images, labels = next(iter(train_loader))
grid = torchvision.utils.make_grid(images, nrow=10)
plt.figure(figsize=(15,15))
plt.imshow(np.transpose(grid, (1,2,0)))

Related Python Articles: Python h2o package   Python seaborn package