|
|
|
@ -108,26 +108,28 @@ eval_y = dataset_y[train_size:]
|
|
|
|
|
train_x = train_x.reshape(-1, 1, 27)
|
|
|
|
|
train_y = train_y.reshape(-1, 1, 3)
|
|
|
|
|
eval_x = eval_x.reshape(-1, 1, 27)
|
|
|
|
|
eval_y = eval_y.reshape(-1, 1, 27)
|
|
|
|
|
eval_y = eval_y.reshape(-1, 1, 3)
|
|
|
|
|
|
|
|
|
|
# # 转为pytorch的tensor对象
|
|
|
|
|
train_x = torch.from_numpy(train_x).to(device).type(torch.float32)
|
|
|
|
|
train_y = torch.from_numpy(train_y).to(device).type(torch.float32)
|
|
|
|
|
eval_x = torch.from_numpy(eval_x).to(device).type(torch.float32)
|
|
|
|
|
eval_y = torch.from_numpy(eval_y).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
|
|
train_ds = TensorDataset(train_x,train_y)
|
|
|
|
|
train_dl = DataLoader(train_ds,batch_size=32,drop_last=True)
|
|
|
|
|
train_dl = DataLoader(train_ds,batch_size=2,drop_last=True)
|
|
|
|
|
eval_ds = TensorDataset(eval_x,eval_y)
|
|
|
|
|
eval_dl = DataLoader(eval_ds,batch_size=64,drop_last=True)
|
|
|
|
|
eval_dl = DataLoader(eval_ds,batch_size=4,drop_last=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = LSTM(27, 16, output_size=3, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等
|
|
|
|
|
|
|
|
|
|
train_loss = []
|
|
|
|
|
loss_function = nn.MSELoss()
|
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
|
|
|
|
|
min_loss = 1
|
|
|
|
|
for i in range(200):
|
|
|
|
|
model.train()
|
|
|
|
|
for j,(x,y) in enumerate(train_dl):
|
|
|
|
|
x,y = x.to(device),y.to(device)
|
|
|
|
|
out = model(x)
|
|
|
|
@ -138,18 +140,28 @@ for i in range(200):
|
|
|
|
|
train_loss.append(loss.item())
|
|
|
|
|
# if (i+1) % 100 == 0:
|
|
|
|
|
# print(f'epoch {i+1}/1500 loss:{round(loss.item(),5)}')
|
|
|
|
|
if (j + 1) % 50 == 0:
|
|
|
|
|
if (j + 1) % 10 == 0:
|
|
|
|
|
print(f'epoch {i+1}/200 step {j+1}/{len(train_dl)} loss:{loss}' )
|
|
|
|
|
test_running_loss = 0
|
|
|
|
|
model.eval()
|
|
|
|
|
for k,(x,y) in enumerate(eval_dl):
|
|
|
|
|
pred = model(eval_x)
|
|
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
|
|
for x,y in eval_dl:
|
|
|
|
|
pred = model(eval_x)
|
|
|
|
|
loss = loss_function(pred,y)
|
|
|
|
|
test_running_loss += loss.item()
|
|
|
|
|
test_loss = test_running_loss/len(eval_dl)
|
|
|
|
|
if test_loss < min_loss:
|
|
|
|
|
min_loss = test_loss
|
|
|
|
|
best_model_weight = model.state_dict()
|
|
|
|
|
print(f'epoch {i+1} test_loss:{test_loss}')
|
|
|
|
|
|
|
|
|
|
# 保存模型
|
|
|
|
|
torch.save(model.state_dict(),'dy3.pth')
|
|
|
|
|
torch.save(best_model_weight,'dy3.pth')
|
|
|
|
|
|
|
|
|
|
# 读取模型
|
|
|
|
|
model = LSTM(27, 16, output_size=3, num_layers=2).to(device)
|
|
|
|
|
model.load_state_dict(torch.load('dy3.pth'))
|
|
|
|
|
# for test
|
|
|
|
|
model = model.eval()
|
|
|
|
|
|
|
|
|
|
dataset_x = dataset_x.reshape(-1, 1, 27) # (seq_size, batch_size, feature_size)
|
|
|
|
|
dataset_x = torch.from_numpy(dataset_x).to(device).type(torch.float32)
|
|
|
|
|