参考文献: PyTorch & 深層学習プログラミング 赤石雅典著 日経BP社
pip3.11 install torchviz
pip3.11 install torchinfo
python3.11
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
# データ準備
x_data = torch.randn(100,2)
rng = np.random.default_rng()
y_data = rng.integers(2, size=(100, 1) )
y_data = torch.tensor(y_data)
print(x_data[:5,:])
print(y_data[:5,:])
# 訓練データ、検証データに分割 (シャフルも同時に実施)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, train_size=70, test_size=30,
random_state=123)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
# モデル定義
n_input= 2
n_output = 1
class Net(nn.Module):
def __init__(self, n_input, n_output):
super().__init__()
self.l1 = nn.Linear(n_input, n_output)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x1 = self.l1(x)
x2 = self.sigmoid(x1)
return x2
# インスタンス生成
net = Net(n_input, n_output)
for parameter in net.named_parameters():
print(parameter)
for parameter in net.parameters():
print(parameter)
print(net)
from torchinfo import summary
summary(net, (2,))
# 訓練データのテンソル化
inputs = torch.tensor(x_train).float()
labels = torch.tensor(y_train).float()
# 検証データのテンソル化
inputs_test = torch.tensor(x_test).float()
labels_test = torch.tensor(y_test).float()
# 損失関数
criterion = nn.BCELoss()
# 学習率
lr = 0.01
# 最適化関数
optimizer = optim.SGD(net.parameters(), lr=lr)
# 繰り返し回数
num_epochs = 100
# 繰り返し計算
for epoch in range(num_epochs):
# 訓練フェーズ
#勾配値初期化★
optimizer.zero_grad()
# 予測計算
outputs = net(inputs)
# 損失計算
loss = criterion(outputs, labels)
# 勾配計算★
loss.backward()
# パラメータ修正★
optimizer.step()
# 損失の保存(スカラー値の取得)
train_loss = loss.item()
# 予測ラベル(1 or 0)計算
predicted = torch.where(outputs < 0.5, 0, 1)
# 精度計算
train_acc = (predicted == labels).sum() / len(y_train)
# 予測フェーズ
# 予測計算
outputs_test = net(inputs_test)
# 損失計算
loss_test = criterion(outputs_test, labels_test)
# 損失の保存(スカラー値の取得)
val_loss = loss_test.item()
# 予測ラベル(1 or 0)計算
predicted_test = torch.where(outputs_test < 0.5, 0, 1)
# 精度計算
val_acc = (predicted_test == labels_test).sum() / len(y_test)
if ( epoch % 10 == 0):
print (f'Epoch [{epoch}/{num_epochs}], loss: {train_loss:.5f} acc: {train_acc:.5f} val_loss: {val_loss:.5f}, val_acc: {val_acc:.5f}')