文章目录
- 保存
- 加载
- 案例
保存
torch.save({
'epoch': epoch, # 保存迭代次数
'model_state_dict': model.state_dict(), # 模型的状态
'optimizer_state': optimizer.state_dict(), # 优化器的状态
}, 'checkpoint') # 路径,这里每次保存都会覆盖掉checkpoint文件
当然字典里可以保存任意的内容,路径也可以根据epoch不同而改变
还有一种写法:
torch.save((epoch, model.state_dict(), optimizer.state_dict()), 'checkpoint')
加载
checkpoint = torch.load('checkpoint') # 加载到checkpoint
epoch = checkpoint['epoch'] # 读取epoch
model.load_state_dict(checkpoint['model_state_dict']) # 加载模型状态
optimizer.load_state_dict(checkpoint['optimizer_state']) # 加载优化器的状态
案例
import torch
import torch.nn as nn
import numpy as np
class LinearRegressionModel(nn.Module):
def __init__(self, input_shape, output_shape):
super(LinearRegressionModel, self).__init__()
self.linear = nn.Linear(input_shape, output_shape)
def forward(self, x):
out = self.linear(x)
return out
def train_model(x_train, y_train):
# 指定参数与损失函数
model = LinearRegressionModel(x_train.shape[1], 1)
epochs = 10 # 迭代10次
learning_rate = 0.01 # 学习率
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # 优化函数
criterion = nn.MSELoss() # Loss使用MSE值,目标是使MSE最小
for epoch in range(epochs):
epoch += 1
optimizer.zero_grad() # 梯度清零
outputs = model(x_train) # 前向传播
loss = criterion(outputs, y_train) # 计算损失
loss.backward() # 返向传播
optimizer.step() # 更新权重参数
# 保存
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
}, 'checkpoint') # 这里每次保存都会覆盖掉checkpoint文件
# 加载
checkpoint = torch.load('checkpoint') # 加载到checkpoint
epoch = checkpoint['epoch'] # 读取epoch
model.load_state_dict(checkpoint['model_state_dict']) # 加载模型状态
optimizer.load_state_dict(checkpoint['optimizer_state']) # 加载优化器的状态
if __name__ == '__main__':
x_train = torch.randn(100, 4) # 生成100个4维的随机数,作为训练集的 X
y_train = torch.randn(100, 1) # 作为训练集的label
train_model(x_train, y_train)