以一个线性回归的例子为例:
全部代码
import torch
import numpy as np
def get_x_y():
x = np.random.randint(0, 50, 300)
y_values = 2 * x + 21
x = np.array(x, dtype=np.float32)
y = np.array(y_values, dtype=np.float32)
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
return x, y
if __name__ == '__main__':
train_x, train_y = get_x_y()
input_size = train_x.shape[1] # 输入的维度,只是1维
output_size = 1 # 输出的个数
batch_size = 16 # 每个 batch 的数量
my_nn = torch.nn.Sequential(
torch.nn.Linear(input_size, output_size),
)
cost = torch.nn.MSELoss(reduction='mean') # 使用MSE作为损失函数
optimizer = torch.optim.Adam(my_nn.parameters(), lr=0.001) # 优化器
# 训练网络
losses = []
for i in range(1000):
batch_loss = []
# MINI-Batch方法来进行训练
for start in range(0, len(train_x), batch_size):
end = start + batch_size if start + batch_size < len(train_x) else len(train_x)
xx = torch.tensor(train_x[start:end], dtype=torch.float, requires_grad=True)
yy = torch.tensor(train_y[start:end], dtype=torch.float, requires_grad=True)
prediction = my_nn(xx) # 自己的网络模型(x数据),会被认为做前向传播
loss = cost(prediction, yy) # 计算损失
optimizer.zero_grad() # 清零优化器!!!!一定要记得
loss.backward(retain_graph=True) # 反向传播,retain_graph表示是否重复执行操作,在循环中需要设置为True
optimizer.step() # 更新参数
batch_loss.append(loss.data.numpy())
# 打印损失
if i % 100 == 0:
losses.append(np.mean(batch_loss))
print(i, np.mean(batch_loss))