参考文献: PyTorch & 深層学習プログラミング 赤石雅典著 日経BP社
pip3.11 install torchviz
pip3.11 install torchinfo
python3.11
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# データセット準備1 ランダムデータの場合
#####
rng = np.random.default_rng()
image_train = rng.uniform(-1,1,(600, 784))
image_train = torch.tensor(image_train).float()
print(image_train)
print(image_train.shape)
label_train = rng.integers(10, size=(600))
label_train = torch.tensor(label_train).long()
print(label_train)
print(label_train.shape)
rng = np.random.default_rng()
image_test = rng.uniform(-1,1,(100, 784))
image_test = torch.tensor(image_test).float()
print(image_test)
print(image_test.shape)
label_test = rng.integers(10, size=(100))
label_test = torch.tensor(label_test).long()
print(label_test)
print(label_test.shape)
train_set = tuple(zip(image_train, label_train))
test_set = tuple(zip(image_test, label_test))
print(train_set[599])
print(test_set[99])
#####
# データセット準備2 MNISTの場合
#####
import torchvision.datasets as datasets
import torchvision.transforms as transforms
data_root = './data'
from PIL import Image
from torchvision.transforms.functional import pil_to_tensor
train_set_none = datasets.MNIST(
root = data_root, train = True,
download = True, transform = None)
image, label = train_set_none[0]
print(type(image))
print(type(label))
# Convert to tensor
tensor = pil_to_tensor(image)
print(type(tensor))
print(tensor.shape)
print(tensor)
# データ変換用関数 Transforms
# (1) Imageをテンソル化
# (2) [0, 1]の範囲の値を[-1, 1]の範囲にする
# (3) データのshapeを[1, 28, 28]から[784]に変換
transform = transforms.Compose([
# (1) データのテンソル化
transforms.ToTensor(),
# (2) データの正規化
transforms.Normalize(0.5, 0.5),
# (3) 1階テンソルに変換
transforms.Lambda(lambda x: x.view(-1)),
])
train_set = datasets.MNIST(
root = data_root, train = True,
download = True, transform = transform)
test_set = datasets.MNIST(
root = data_root, train = False,
download = True, transform = transform)
#####
# データローダ準備
from torch.utils.data import DataLoader
batch_size = 10
train_loader = DataLoader(
train_set, batch_size = batch_size,
shuffle = True)
test_loader = DataLoader(
test_set, batch_size = batch_size,
shuffle = False)
print(len(train_loader))
print(len(test_loader))
for images, labels in train_loader:
break
print(images.shape)
print(labels.shape)
# モデル定義
n_input = 1 * 28 * 28
n_output = 10
n_hidden = 128
class Net(nn.Module):
def __init__(self, n_input, n_output, n_hidden):
super().__init__()
self.l1 = nn.Linear(n_input, n_hidden)
self.relu = nn.ReLU(inplace=True)
self.l2 = nn.Linear(n_hidden, n_output)
def forward(self, x):
x1 = self.l1(x)
x2 = self.relu(x1)
x3 = self.l2(x2)
return x3
# インスタンス生成
net = Net(n_input, n_output, n_hidden)
net = net.to(device)
for parameter in net.named_parameters():
print(parameter)
for parameter in net.parameters():
print(parameter)
print(net)
from torchinfo import summary
summary(net, (1 * 28 * 28,))
# 損失関数
criterion = nn.CrossEntropyLoss()
# 学習率
lr = 0.01
# 最適化関数
optimizer = optim.SGD(net.parameters(), lr=lr)
# 繰り返し回数
num_epochs = 5
# 繰り返し計算
for epoch in range(num_epochs):
# 1エポックあたりの正解数(精度計算用)
n_train_acc, n_val_acc = 0, 0
# 1エポックあたりの累積損失(平均化前)
train_loss, val_loss = 0, 0
# 1エポックあたりのデータ累積件数
n_train, n_test = 0, 0
# 訓練フェーズ
for inputs, labels in train_loader:
# 1バッチあたりのデータ件数
train_batch_size = len(labels)
# 1エポックあたりのデータ累積件数
n_train += train_batch_size
# GPUヘ転送
inputs = inputs.to(device)
labels = labels.to(device)
#勾配の初期化★
optimizer.zero_grad()
# 予測計算
outputs = net(inputs)
# 損失計算
loss = criterion(outputs, labels)
# 勾配計算★
loss.backward()
# パラメータ修正★
optimizer.step()
# 予測ラベル導出
predicted = torch.max(outputs, 1)[1]
# 平均前の損失と正解数の計算
# lossは平均計算が行われているので平均前の損失に戻して加算
train_loss += loss.item() * train_batch_size
n_train_acc += (predicted == labels).sum().item()
#予測フェーズ
for inputs_test, labels_test in test_loader:
# 1バッチあたりのデータ件数
test_batch_size = len(labels_test)
# 1エポックあたりのデータ累積件数
n_test += test_batch_size
# GPUヘ転送
inputs_test = inputs_test.to(device)
labels_test = labels_test.to(device)
# 予測計算
outputs_test = net(inputs_test)
# 損失計算
loss_test = criterion(outputs_test, labels_test)
#予測ラベル導出
predicted_test = torch.max(outputs_test, 1)[1]
# 平均前の損失と正解数の計算
# lossは平均計算が行われているので平均前の損失に戻して加算
val_loss += loss_test.item() * test_batch_size
n_val_acc += (predicted_test == labels_test).sum().item()
# 精度計算
train_acc = n_train_acc / n_train
val_acc = n_val_acc / n_test
# 損失計算
ave_train_loss = train_loss / n_train
ave_val_loss = val_loss / n_test
# 結果表示
print (f'Epoch [{epoch+1}/{num_epochs}], loss: {ave_train_loss:.5f} acc: {train_acc:.5f} val_loss: {ave_val_loss:.5f}, val_acc: {val_acc:.5f}')
# 重み行列
print(net.l1.weight.data)
print(net.l2.weight.data)
# バイアス
print(net.l1.bias.data)
print(net.l2.bias.data)