参考文献: PyTorch & 深層学習プログラミング 赤石雅典著 日経BP社
pip3.11 install torchviz
pip3.11 install torchinfo
python3.11
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
# データ準備
x_data = torch.randn(150,2)
rng = np.random.default_rng()
y_data = rng.integers(3, size=(150) )
y_data = torch.tensor(y_data)
print(x_data[:5,:])
print(y_data[:5])
# 訓練データ、検証データに分割 (シャフルも同時に実施)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, train_size=75, test_size=75,
random_state=123)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
# モデル定義
n_input= 2
n_output = 3
class Net(nn.Module):
def __init__(self, n_input, n_output):
super().__init__()
self.l1 = nn.Linear(n_input, n_output)
def forward(self, x):
x1 = self.l1(x)
return x1
# インスタンス生成
net = Net(n_input, n_output)
for parameter in net.named_parameters():
print(parameter)
for parameter in net.parameters():
print(parameter)
print(net)
from torchinfo import summary
summary(net, (2,))
# データのテンソル化
inputs = torch.tensor(x_train).float()
labels = torch.tensor(y_train).long()
inputs_test = torch.tensor(x_test).float()
labels_test = torch.tensor(y_test).long()
# 損失関数
criterion = nn.CrossEntropyLoss()
# 学習率
lr = 0.01
# 最適化関数
optimizer = optim.SGD(net.parameters(), lr=lr)
# 繰り返し回数
num_epochs = 100
# 繰り返し計算
for epoch in range(num_epochs):
# 訓練フェーズ
#勾配の初期化★
optimizer.zero_grad()
# 予測計算
outputs = net(inputs)
# 損失計算
loss = criterion(outputs, labels)
# 勾配計算★
loss.backward()
# パラメータ修正★
optimizer.step()
# 予測ラベル算出
predicted = torch.max(outputs, 1)[1]
# 損失と精度の計算
train_loss = loss.item()
train_acc = (predicted == labels).sum() / len(labels)
#予測フェーズ
# 予測計算
outputs_test = net(inputs_test)
# 損失計算
loss_test = criterion(outputs_test, labels_test)
# 予測ラベル算出
predicted_test = torch.max(outputs_test, 1)[1]
# 損失と精度の計算
val_loss = loss_test.item()
val_acc = (predicted_test == labels_test).sum() / len(labels_test)
if ((epoch) % 10 == 0):
print (f'Epoch [{epoch}/{num_epochs}], loss: {train_loss:.5f} acc: {train_acc:.5f} val_loss: {val_loss:.5f}, val_acc: {val_acc:.5f}')
# 重み行列
print(net.l1.weight.data)
# バイアス
print(net.l1.bias.data)