参考文献: PyTorch & 深層学習プログラミング 赤石雅典著 日経BP社
pip3.11 install torchviz
pip3.11 install torchinfo
python3.11
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchinfo import summary
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# データセット準備2 CIFAR10の場合
#####
data_root = './data'
from PIL import Image
from torchvision.transforms.functional import pil_to_tensor
train_set_none = datasets.CIFAR10(
root = data_root, train = True,
download = True, transform = None)
image, label = train_set_none[0]
print(type(image))
print(type(label))
# Convert to tensor
tensor = pil_to_tensor(image)
print(type(tensor))
print(tensor.shape)
print(tensor)
# データ変換用関数
transform_train = transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5),
transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False)
])
transform_test = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
# 訓練データセット
train_set_full = datasets.CIFAR10(
root = data_root, train = True,
download = True, transform = transform_train)
# 検証データセット
test_set_full = datasets.CIFAR10(
root = data_root, train = False,
download = True, transform = transform_test)
from torch.utils.data import Subset
train_set = Subset(train_set_full, list(range(600)))
test_set = Subset(test_set_full, list(range(100)))
#####
# データローダ準備
batch_size = 10
# 訓練用データローダー
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
# 検証用データローダー
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)
print(len(train_loader))
print(len(test_loader))
for images, labels in train_loader:
break
print(images.shape)
print(labels.shape)
# モデル定義
n_output = 10
from torchvision import models
# 事前学習済みモデルのロード
# pretraind = True で学習済みパラメータも一緒に読み込む
net = models.resnet18(pretrained = True)
for parameter in net.named_parameters():
print(parameter)
for parameter in net.parameters():
print(parameter)
net = net.to(device)
print(net)
summary(net,(10,3,224,224))
# 最終レイヤー関数の付け替え
print(net.fc)
print(net.fc.in_features)
fc_in_features = net.fc.in_features
net.fc = nn.Linear(fc_in_features, n_output)
net = net.to(device)
print(net)
summary(net,(10,3,224,224))
# 損失関数
criterion = nn.CrossEntropyLoss()
# 学習率
lr = 0.001
# 最適化関数
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
# 繰り返し回数
num_epochs = 5
# 繰り返し計算
for epoch in range(num_epochs):
# 1エポックあたりの正解数(精度計算用)
n_train_acc, n_val_acc = 0, 0
# 1エポックあたりの累積損失(平均化前)
train_loss, val_loss = 0, 0
# 1エポックあたりのデータ累積件数
n_train, n_test = 0, 0
#訓練フェーズ
net.train()
for inputs, labels in train_loader:
# 1バッチあたりのデータ件数
train_batch_size = len(labels)
# 1エポックあたりのデータ累積件数
n_train += train_batch_size
# GPUヘ転送
inputs = inputs.to(device)
labels = labels.to(device)
# 勾配の初期化★
optimizer.zero_grad()
# 予測計算
outputs = net(inputs)
# 損失計算
loss = criterion(outputs, labels)
# 勾配計算★
loss.backward()
# パラメータ修正★
optimizer.step()
# 予測ラベル導出
predicted = torch.max(outputs, 1)[1]
# 平均前の損失と正解数の計算
# lossは平均計算が行われているので平均前の損失に戻して加算
train_loss += loss.item() * train_batch_size
n_train_acc += (predicted == labels).sum().item()
#予測フェーズ
net.eval()
for inputs_test, labels_test in test_loader:
# 1バッチあたりのデータ件数
test_batch_size = len(labels_test)
# 1エポックあたりのデータ累積件数
n_test += test_batch_size
# GPUヘ転送
inputs_test = inputs_test.to(device)
labels_test = labels_test.to(device)
# 予測計算
outputs_test = net(inputs_test)
# 損失計算
loss_test = criterion(outputs_test, labels_test)
# 予測ラベル導出
predicted_test = torch.max(outputs_test, 1)[1]
# 平均前の損失と正解数の計算
# lossは平均計算が行われているので平均前の損失に戻して加算
val_loss += loss_test.item() * test_batch_size
n_val_acc += (predicted_test == labels_test).sum().item()
# 精度計算
train_acc = n_train_acc / n_train
val_acc = n_val_acc / n_test
# 損失計算
avg_train_loss = train_loss / n_train
avg_val_loss = val_loss / n_test
# 結果表示
print (f'Epoch [{(epoch+1)}/{num_epochs}], loss: {avg_train_loss:.5f} acc: {train_acc:.5f} val_loss: {avg_val_loss:.5f}, val_acc: {val_acc:.5f}')