删除重复数据集

main
get 1 year ago
parent 22f4141a79
commit 3eb184033a

@ -1,7 +1,7 @@
import torch import torch
import pandas as pd import pandas as pd
from 电压等级_输出为5 import LSTM_Regression from 电压等级_输入10_输出3 import LSTM_Regression
from 电压等级_输出为5 import create_dataset from 电压等级_输入10_输出3 import create_dataset
model = LSTM_Regression(10, 32, output_size=5, num_layers=2) model = LSTM_Regression(10, 32, output_size=5, num_layers=2)
model.load_state_dict(torch.load('dy5.pth')) model.load_state_dict(torch.load('dy5.pth'))

@ -53,36 +53,36 @@ def data_preprocessing(data):
return data return data
# 拼接数据集 # 拼接数据集
# file_dir = r'./浙江各地市分电压日电量数据' file_dir = r'./浙江各地市分电压日电量数据'
# excel = os.listdir(file_dir)[0] excel = os.listdir(file_dir)[0]
# data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date') data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date')
# data.drop(columns='地市',inplace=True) data.drop(columns='地市',inplace=True)
# data = data_preprocessing(data) data = data_preprocessing(data)
#
# df = data[data.columns[0]] df = data[data.columns[0]]
# df.dropna(inplace = True) df.dropna(inplace = True)
# dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN) dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN)
#
# for level in data.columns[1:]: for level in data.columns[1:]:
# df = data[level] df = data[level]
# df.dropna(inplace=True) df.dropna(inplace=True)
# x, y = create_dataset(df, DAYS_FOR_TRAIN) x, y = create_dataset(df, DAYS_FOR_TRAIN)
# dataset_x = np.concatenate((dataset_x, x)) dataset_x = np.concatenate((dataset_x, x))
# dataset_y = np.concatenate((dataset_y, y)) dataset_y = np.concatenate((dataset_y, y))
#
#
# for excel in os.listdir(file_dir)[1:]: for excel in os.listdir(file_dir)[1:]:
#
# data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col='stat_date') data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col='stat_date')
# data.drop(columns='地市', inplace=True) data.drop(columns='地市', inplace=True)
# data = data_preprocessing(data) data = data_preprocessing(data)
#
# for level in data.columns: for level in data.columns:
# df = data[level] df = data[level]
# df.dropna(inplace=True) df.dropna(inplace=True)
# x,y = create_dataset(df,DAYS_FOR_TRAIN) x,y = create_dataset(df,DAYS_FOR_TRAIN)
# dataset_x = np.concatenate((dataset_x,x)) dataset_x = np.concatenate((dataset_x,x))
# dataset_y = np.concatenate((dataset_y,y)) dataset_y = np.concatenate((dataset_y,y))
@ -90,68 +90,76 @@ def data_preprocessing(data):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# #
# # 标准化到0~1 # # 标准化到0~1
# max_value = np.max(dataset_x) max_value = np.max(dataset_x)
# min_value = np.min(dataset_x) min_value = np.min(dataset_x)
# dataset_x = (dataset_x - min_value) / (max_value - min_value) dataset_x = (dataset_x - min_value) / (max_value - min_value)
# dataset_y = (dataset_y - min_value) / (max_value - min_value) dataset_y = (dataset_y - min_value) / (max_value - min_value)
# #
# print(max_value,min_value) # print(max_value,min_value)
# #
# # 划分训练集和测试集 # # 划分训练集和测试集
# train_size = int(len(dataset_x)*0.7) train_size = int(len(dataset_x)*0.7)
# train_x = dataset_x[:train_size] train_x = dataset_x[:train_size]
# train_y = dataset_y[:train_size] train_y = dataset_y[:train_size]
# eval_x = dataset_x[train_size:]
# # # 将数据改变形状RNN 读入的数据维度是 (seq_size, batch_size, feature_size) eval_y = dataset_y[train_size:]
# train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN)
# train_y = train_y.reshape(-1, 1, 3) # 将数据改变形状RNN 读入的数据维度是 (seq_size, batch_size, feature_size)
# train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN)
# # # 转为pytorch的tensor对象 train_y = train_y.reshape(-1, 1, 3)
# train_x = torch.from_numpy(train_x).to(device).type(torch.float32) eval_x = eval_x.reshape(-1, 1, DAYS_FOR_TRAIN)
# train_y = torch.from_numpy(train_y).to(device).type(torch.float32) eval_y = eval_y.reshape(-1, 1, 3)
# train_ds = TensorDataset(train_x,train_y)
# train_dl = DataLoader(train_ds,batch_size=128,shuffle=True,drop_last=True) # 转为pytorch的tensor对象
train_x = torch.from_numpy(train_x).to(device).type(torch.float32)
train_y = torch.from_numpy(train_y).to(device).type(torch.float32)
eval_x = torch.from_numpy(eval_x).to(device).type(torch.float32)
eval_y = torch.from_numpy(eval_y).to(device).type(torch.float32)
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
min_loss = 1
for i in range(2500):
model.train()
out = model(train_x)
loss = loss_function(out, train_y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等 model.eval()
with torch.no_grad():
pred = model(eval_x)
eval_loss = loss_function(pred,eval_y)
if eval_loss <= min_loss:
min_loss = eval_loss
best_param = model.state_dict()
if (i+1) % 100 == 0:
print(f'epoch {i+1}/1500 loss:{round(loss.item(),5)}')
# train_loss = [] # 保存模型
# loss_function = nn.MSELoss() torch.save(best_param,'best_dy3.pth')
# optimizer = torch.optim.Adam(model.parameters(), lr=0.005, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
# for i in range(2500):
# # for j,(x,y) in enumerate(train_dl):
# out = model(train_x)
# loss = loss_function(out, train_y)
# loss.backward()
# optimizer.step()
# optimizer.zero_grad()
# train_loss.append(loss.item())
# if (i+1) % 100 == 0:
# print(f'epoch {i+1}/1500 loss:{round(loss.item(),5)}')
# # if (j + 1) % 100 == 0:
# # print(f'epoch {i+1}/1500 step {j+1}/{len(train_dl)} loss:{loss}' )
#
# # 保存模型
# torch.save(model.state_dict(),'8_dy3.pth')
# for test # for test
# model = model.eval() model = model.eval()
#
# dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN) # (seq_size, batch_size, feature_size) dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN) # (seq_size, batch_size, feature_size)
# dataset_x = torch.from_numpy(dataset_x).to(device).type(torch.float32) dataset_x = torch.from_numpy(dataset_x).to(device).type(torch.float32)
#
# pred_test = model(dataset_x) # 全量训练集 pred_test = model(dataset_x)
# # 模型输出 (seq_size, batch_size, output_size) # 模型输出 (seq_size, batch_size, output_size)
# pred_test = pred_test.view(-1) pred_test = pred_test.view(-1)
# pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().numpy())) pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().numpy()))
#
# plt.plot(pred_test.reshape(-1), 'r', label='prediction') plt.plot(pred_test.reshape(-1), 'r', label='prediction')
# plt.plot(dataset_y.reshape(-1), 'b', label='real') plt.plot(dataset_y.reshape(-1), 'b', label='real')
# plt.plot((train_size*3, train_size*3), (0, 1), 'g--') # 分割线 左边是训练数据 右边是测试数据的输出 plt.plot((train_size*3, train_size*3), (0, 1), 'g--') # 分割线 左边是训练数据 右边是测试数据的输出
# plt.legend(loc='best') plt.legend(loc='best')
# plt.show() plt.show()
# 创建测试集 # 创建测试集

@ -131,37 +131,37 @@ train_loss = []
loss_function = nn.MSELoss() loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.005) optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
min_loss = 1 min_loss = 1
for i in range(200): # for i in range(200):
model.train() # model.train()
for j,(x,y) in enumerate(train_dl): # for j,(x,y) in enumerate(train_dl):
x,y = x.to(device),y.to(device) # x,y = x.to(device),y.to(device)
out = model(x) # out = model(x)
loss = loss_function(out, y) # loss = loss_function(out, y)
loss.backward() # loss.backward()
optimizer.step() # optimizer.step()
optimizer.zero_grad() # optimizer.zero_grad()
train_loss.append(loss.item()) # train_loss.append(loss.item())
# if (i+1) % 100 == 0: # # if (i+1) % 100 == 0:
# print(f'epoch {i+1}/1500 loss:{round(loss.item(),5)}') # # print(f'epoch {i+1}/1500 loss:{round(loss.item(),5)}')
if (j + 1) % 10 == 0: # if (j + 1) % 10 == 0:
print(f'epoch {i+1}/200 step {j+1}/{len(train_dl)} loss:{loss}' ) # print(f'epoch {i+1}/200 step {j+1}/{len(train_dl)} loss:{loss}' )
test_running_loss = 0 # test_running_loss = 0
model.eval() # model.eval()
with torch.no_grad(): # with torch.no_grad():
for x,y in eval_dl: # for x,y in eval_dl:
pred = model(eval_x) # pred = model(eval_x)
loss = loss_function(pred,y) # loss = loss_function(pred,y)
test_running_loss += loss.item() # test_running_loss += loss.item()
test_loss = test_running_loss/len(eval_dl) # test_loss = test_running_loss/len(eval_dl)
if test_loss < min_loss: # if test_loss < min_loss:
min_loss = test_loss # min_loss = test_loss
best_model_weight = model.state_dict() # best_model_weight = model.state_dict()
print(f'epoch {i+1} test_loss:{test_loss}') # print(f'epoch {i+1} test_loss:{test_loss}')
#
total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) # total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Total parameters in the LSTM model: {total_params}") # print(f"Total parameters in the LSTM model: {total_params}")
# 保存模型 # # 保存模型
torch.save(best_model_weight,'dy3.pth') # torch.save(best_model_weight,'dy3.pth')
# 读取模型 # 读取模型
model = LSTM(27, 16, output_size=3, num_layers=3).to(device) model = LSTM(27, 16, output_size=3, num_layers=3).to(device)

Loading…
Cancel
Save