以下の内容はhttps://htn20190109.hatenablog.com/entry/2025/10/19/000243より取得しました。


Neural Networks

https://docs.pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html

python3.11

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim


class Net(nn.Module):
  
  def __init__(self):
    super(Net, self).__init__()
    # 1 input image channel, 6 output channels, 5x5 square convolution
    # kernel
    self.conv1 = nn.Conv2d(1, 6, 5)
    self.conv2 = nn.Conv2d(6, 16, 5)
    # an affine operation: y = Wx + b
    self.fc1 = nn.Linear(16 * 5 * 5, 120)  # 5*5 from image dimension
    self.fc2 = nn.Linear(120, 84)
    self.fc3 = nn.Linear(84, 10)
  
  def forward(self, input):
    # Convolution layer C1: 1 input image channel, 6 output channels,
    # 5x5 square convolution, it uses RELU activation function, and
    # outputs a Tensor with size (N, 6, 28, 28), where N is the size of the batch
    c1 = F.relu(self.conv1(input))
    # Subsampling layer S2: 2x2 grid, purely functional,
    # this layer does not have any parameter, and outputs a (N, 6, 14, 14) Tensor
    s2 = F.max_pool2d(c1, (2, 2))
    # Convolution layer C3: 6 input channels, 16 output channels,
    # 5x5 square convolution, it uses RELU activation function, and
    # outputs a (N, 16, 10, 10) Tensor
    c3 = F.relu(self.conv2(s2))
    # Subsampling layer S4: 2x2 grid, purely functional,
    # this layer does not have any parameter, and outputs a (N, 16, 5, 5) Tensor
    s4 = F.max_pool2d(c3, 2)
    # Flatten operation: purely functional, outputs a (N, 400) Tensor
    s4 = torch.flatten(s4, 1)
    # Fully connected layer F5: (N, 400) Tensor input,
    # and outputs a (N, 120) Tensor, it uses RELU activation function
    f5 = F.relu(self.fc1(s4))
    # Fully connected layer F6: (N, 120) Tensor input,
    # and outputs a (N, 84) Tensor, it uses RELU activation function
    f6 = F.relu(self.fc2(f5))
    # Gaussian layer OUTPUT: (N, 84) Tensor input, and
    # outputs a (N, 10) Tensor
    output = self.fc3(f6)
    return output


net = Net()
print(net)


params = list(net.parameters())

for i,j in enumerate(params):
  print(params[i].size())

 


input = torch.randn(1, 1, 32, 32)
print(input)
output = net(input)
print(output)

 

target = torch.randn(10)  # a dummy target, for example
print(target)
target = target.view(1, -1)  # make it the same shape as output
print(target)
criterion = nn.MSELoss()

 

# create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)

# in your training loop:
optimizer.zero_grad()   # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
print(loss)
loss.backward()
optimizer.step()    # Does the update

# 重み行列
print(net.conv1.weight.data)
print(net.conv2.weight.data)
print(net.fc1.weight.data)
print(net.fc2.weight.data)
print(net.fc3.weight.data)

# バイアス
print(net.conv1.bias.data)
print(net.conv2.bias.data)
print(net.fc1.bias.data)
print(net.fc2.bias.data)
print(net.fc3.bias.data)

 




以上の内容はhttps://htn20190109.hatenablog.com/entry/2025/10/19/000243より取得しました。
このページはhttp://font.textar.tv/のウェブフォントを使用してます

不具合報告/要望等はこちらへお願いします。
モバイルやる夫Viewer Ver0.14