searchusermenu
  • 发布文章
  • 消息中心
点赞
收藏
评论
分享
原创

pytorch冻结训练(下)

2024-11-15 09:17:46
0
0
import torch.nn as nn
import torch.optim as optim
import torch
import numpy as np

np.random.seed(0)
torch.manual_seed(0)
x = torch.randn((3, 8))
label = torch.randint(0, 5, [3]).long()
epoch_N = 10


# 定义一个简单的网络
class MyNet(nn.Module):
def __init__(self, num_class=5):
super(MyNet, self).__init__()
# 这里 bias = False 不然由于 bias = True 就初始化成不同参数,导致 demo01和 demo02 运行的 fc.weight 不一致
self.fc1 = nn.Linear(8, 4, bias=False)
self.fc1.weight = nn.Parameter(torch.ones((4, 8), dtype=torch.float32))
self.fc2 = nn.Linear(4, num_class, bias=False)
self.fc2.weight = nn.Parameter(torch.ones((num_class, 4), dtype=torch.float32))

def forward(self, x):
return self.fc2(self.fc1(x))


def demo_01():
model = MyNet()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2) # 传入的是所有的参数

for epoch in range(epoch_N):
output = model(x)
loss = loss_fn(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()

# 训练后的模型参数
print("model.fc1.weight", model.fc1.weight)
print("model.fc2.weight", model.fc2.weight)


def demo_02():
model = MyNet()
loss_fn = nn.CrossEntropyLoss()
# 定义2个 优化器,各种优化不同层的参数
optimizer1 = optim.SGD(model.fc1.parameters(), lr=1e-2)
optimizer2 = optim.SGD(model.fc2.parameters(), lr=1e-2)
for epoch in range(epoch_N):
output = model(x)
loss = loss_fn(output, label)
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer2.step()
optimizer1.step()
# 训练后的模型参数
print("model.fc1.weight", model.fc1.weight)
print("model.fc2.weight", model.fc2.weight)


if __name__ == "__main__":
demo_01()
demo_02()

'''
model.fc1.weight Parameter containing:
tensor([[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997]],
requires_grad=True)
model.fc2.weight Parameter containing:
tensor([[0.9986, 0.9986, 0.9986, 0.9986],
[1.0317, 1.0317, 1.0317, 1.0317],
[0.9986, 0.9986, 0.9986, 0.9986],
[0.9726, 0.9726, 0.9726, 0.9726],
[0.9986, 0.9986, 0.9986, 0.9986]], requires_grad=True)
model.fc1.weight Parameter containing:
tensor([[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997]],
requires_grad=True)
model.fc2.weight Parameter containing:
tensor([[0.9986, 0.9986, 0.9986, 0.9986],
[1.0317, 1.0317, 1.0317, 1.0317],
[0.9986, 0.9986, 0.9986, 0.9986],
[0.9726, 0.9726, 0.9726, 0.9726],
[0.9986, 0.9986, 0.9986, 0.9986]], requires_grad=True)

结论: 定义1个优化器优化全部参数 和定义2个优化器优化不同层的参数,更新w 参数是一模一样的。
'''
0条评论
作者已关闭评论
Top123
29文章数
3粉丝数
Top123
29 文章 | 3 粉丝
Top123
29文章数
3粉丝数
Top123
29 文章 | 3 粉丝
原创

pytorch冻结训练(下)

2024-11-15 09:17:46
0
0
import torch.nn as nn
import torch.optim as optim
import torch
import numpy as np

np.random.seed(0)
torch.manual_seed(0)
x = torch.randn((3, 8))
label = torch.randint(0, 5, [3]).long()
epoch_N = 10


# 定义一个简单的网络
class MyNet(nn.Module):
def __init__(self, num_class=5):
super(MyNet, self).__init__()
# 这里 bias = False 不然由于 bias = True 就初始化成不同参数,导致 demo01和 demo02 运行的 fc.weight 不一致
self.fc1 = nn.Linear(8, 4, bias=False)
self.fc1.weight = nn.Parameter(torch.ones((4, 8), dtype=torch.float32))
self.fc2 = nn.Linear(4, num_class, bias=False)
self.fc2.weight = nn.Parameter(torch.ones((num_class, 4), dtype=torch.float32))

def forward(self, x):
return self.fc2(self.fc1(x))


def demo_01():
model = MyNet()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2) # 传入的是所有的参数

for epoch in range(epoch_N):
output = model(x)
loss = loss_fn(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()

# 训练后的模型参数
print("model.fc1.weight", model.fc1.weight)
print("model.fc2.weight", model.fc2.weight)


def demo_02():
model = MyNet()
loss_fn = nn.CrossEntropyLoss()
# 定义2个 优化器,各种优化不同层的参数
optimizer1 = optim.SGD(model.fc1.parameters(), lr=1e-2)
optimizer2 = optim.SGD(model.fc2.parameters(), lr=1e-2)
for epoch in range(epoch_N):
output = model(x)
loss = loss_fn(output, label)
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer2.step()
optimizer1.step()
# 训练后的模型参数
print("model.fc1.weight", model.fc1.weight)
print("model.fc2.weight", model.fc2.weight)


if __name__ == "__main__":
demo_01()
demo_02()

'''
model.fc1.weight Parameter containing:
tensor([[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997]],
requires_grad=True)
model.fc2.weight Parameter containing:
tensor([[0.9986, 0.9986, 0.9986, 0.9986],
[1.0317, 1.0317, 1.0317, 1.0317],
[0.9986, 0.9986, 0.9986, 0.9986],
[0.9726, 0.9726, 0.9726, 0.9726],
[0.9986, 0.9986, 0.9986, 0.9986]], requires_grad=True)
model.fc1.weight Parameter containing:
tensor([[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997],
[1.0007, 1.0012, 1.0004, 1.0001, 0.9999, 0.9991, 0.9998, 0.9997]],
requires_grad=True)
model.fc2.weight Parameter containing:
tensor([[0.9986, 0.9986, 0.9986, 0.9986],
[1.0317, 1.0317, 1.0317, 1.0317],
[0.9986, 0.9986, 0.9986, 0.9986],
[0.9726, 0.9726, 0.9726, 0.9726],
[0.9986, 0.9986, 0.9986, 0.9986]], requires_grad=True)

结论: 定义1个优化器优化全部参数 和定义2个优化器优化不同层的参数,更新w 参数是一模一样的。
'''
文章来自个人专栏
云原生最佳实践
29 文章 | 1 订阅
0条评论
作者已关闭评论
作者已关闭评论
0
0