与模型的超参数设置相关,想要在较短的epoch获得较好的效果,可以初始化参数为目标解,并降低学习率,可以尝试
设置的代码如下:
def train(net,train_features, test_features, train_labels, test_labels,
num_epochs=200):
loss = nn.MSELoss(reduction='mean')
# 不设置偏置,因为我们已经在多项式中实现了它
batch_size = min(10, train_labels.shape[0])
train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)),
batch_size)
test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)),
batch_size, is_train=False)
trainer = torch.optim.SGD(net.parameters(), lr=0.003)
# animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log',
# xlim=[1, num_epochs], ylim=[1e-3, 1e2],
# legend=['train', 'test'])
for epoch in range(num_epochs):
d2l.train_epoch_ch3(net, train_iter, loss, trainer)
# if epoch == 0 or (epoch + 1) % 20 == 0:
# animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss),
# evaluate_loss(net, test_iter, loss)))
return evaluate_loss(net, test_iter, loss)
test_loss=[]
# 从多项式特征中选择前4个维度,即1,x,x^2/2!,x^3/3!'
import matplotlib.pyplot as plt
for i in range(3,20):
train_features = poly_features[:n_train, :i]
test_features = poly_features[n_train:, :i]
input_shape = train_features.shape[-1]
net = nn.Sequential(
nn.Linear(input_shape, 1, bias=False),
# nn.ReLU()
)
test_loss.append(train(net,train_features,test_features,labels[:n_train], labels[n_train:]))
plt.plot(np.arange(3,20), test_loss, label='test loss',color='blue',alpha=0.4)
plt.show()