|
|
@ -17,8 +17,8 @@ class LSTM_Regression(nn.Module):
|
|
|
|
self.fc = nn.Linear(hidden_size, output_size)
|
|
|
|
self.fc = nn.Linear(hidden_size, output_size)
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, _x):
|
|
|
|
def forward(self, _x):
|
|
|
|
x, _ = self.lstm(_x) # _x is input, size (seq_len, batch, input_size) 一批多少条样本 多少批样本 每一个样本的输入特征大小(10)
|
|
|
|
x, _ = self.lstm(_x) # _x is input, size (seq_len, batch, input_size)
|
|
|
|
s, b, h = x.shape # x is output, size (seq_len, batch, hidden_size) 经过lstm计算后输出为隐藏层大小
|
|
|
|
s, b, h = x.shape # x is output, size (seq_len, batch, hidden_size)
|
|
|
|
x = x.view(s * b, h)
|
|
|
|
x = x.view(s * b, h)
|
|
|
|
x = self.fc(x)
|
|
|
|
x = self.fc(x)
|
|
|
|
x = x.view(s, b, -1) # 把形状改回来
|
|
|
|
x = x.view(s, b, -1) # 把形状改回来
|
|
|
@ -52,20 +52,19 @@ def data_preprocessing(data):
|
|
|
|
return data
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
# 拼接数据集
|
|
|
|
# 拼接数据集
|
|
|
|
file_dir = r'C:\Users\鸽子\Desktop\浙江各地市分电压日电量数据'
|
|
|
|
file_dir = r'C:\Users\user\Desktop\浙江各地市分电压日电量数据'
|
|
|
|
excel = os.listdir(file_dir)[0]
|
|
|
|
excel = os.listdir(file_dir)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col=' stat_date ')
|
|
|
|
data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col=' stat_date ')
|
|
|
|
|
|
|
|
|
|
|
|
data = data_preprocessing(data)
|
|
|
|
data = data_preprocessing(data)
|
|
|
|
|
|
|
|
|
|
|
|
df = data[data.columns[0]]
|
|
|
|
df = data[data.columns[0]]
|
|
|
|
df.dropna(inplace = True)
|
|
|
|
df.dropna(inplace = True)
|
|
|
|
dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
|
|
|
|
|
|
|
|
for level in data.columns[1:]:
|
|
|
|
for level in data.columns[1:]:
|
|
|
|
df = data[level]
|
|
|
|
df = data[level]
|
|
|
|
df.dropna(inplace=True)
|
|
|
|
df.dropna(inplace=True)
|
|
|
|
x, y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
x, y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
@ -73,7 +72,7 @@ if __name__ == '__main__':
|
|
|
|
dataset_y = np.concatenate((dataset_y, y))
|
|
|
|
dataset_y = np.concatenate((dataset_y, y))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for excel in os.listdir(file_dir)[1:]:
|
|
|
|
for excel in os.listdir(file_dir)[1:]:
|
|
|
|
data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col=' stat_date ')
|
|
|
|
data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col=' stat_date ')
|
|
|
|
data = data_preprocessing(data)
|
|
|
|
data = data_preprocessing(data)
|
|
|
|
|
|
|
|
|
|
|
@ -84,101 +83,79 @@ if __name__ == '__main__':
|
|
|
|
dataset_x = np.concatenate((dataset_x,x))
|
|
|
|
dataset_x = np.concatenate((dataset_x,x))
|
|
|
|
dataset_y = np.concatenate((dataset_y,y))
|
|
|
|
dataset_y = np.concatenate((dataset_y,y))
|
|
|
|
|
|
|
|
|
|
|
|
print(dataset_x,dataset_y,dataset_x.shape,dataset_y.shape)
|
|
|
|
print(dataset_x,dataset_y,dataset_x.shape,dataset_y.shape)
|
|
|
|
|
|
|
|
|
|
|
|
# 训练
|
|
|
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 标准化到0~1
|
|
|
|
|
|
|
|
max_value = np.max(dataset_x)
|
|
|
|
|
|
|
|
min_value = np.min(dataset_x)
|
|
|
|
|
|
|
|
dataset_x = (dataset_x - min_value) / (max_value - min_value)
|
|
|
|
|
|
|
|
dataset_y = (dataset_y - min_value) / (max_value - min_value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 划分训练集和测试集
|
|
|
|
|
|
|
|
train_size = int(len(dataset_x)*0.7)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_x = dataset_x[:train_size]
|
|
|
|
|
|
|
|
train_y = dataset_y[:train_size]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 将数据改变形状,RNN 读入的数据维度是 (seq_size, batch_size, feature_size)
|
|
|
|
|
|
|
|
train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN)
|
|
|
|
|
|
|
|
train_y = train_y.reshape(-1, 1, 5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 转为pytorch的tensor对象
|
|
|
|
|
|
|
|
train_x = torch.from_numpy(train_x).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
train_y = torch.from_numpy(train_y).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=5, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等
|
|
|
|
# 训练
|
|
|
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 标准化到0~1
|
|
|
|
|
|
|
|
max_value = np.max(dataset_x)
|
|
|
|
|
|
|
|
min_value = np.min(dataset_x)
|
|
|
|
|
|
|
|
dataset_x = (dataset_x - min_value) / (max_value - min_value)
|
|
|
|
|
|
|
|
dataset_y = (dataset_y - min_value) / (max_value - min_value)
|
|
|
|
|
|
|
|
|
|
|
|
train_loss = []
|
|
|
|
# 划分训练集和测试集
|
|
|
|
loss_function = nn.MSELoss()
|
|
|
|
train_size = len(dataset_x)*0.7
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
|
|
|
|
train_x = dataset_x[:train_size]
|
|
|
|
|
|
|
|
train_y = dataset_y[:train_size]
|
|
|
|
|
|
|
|
|
|
|
|
# for i in range(1500):
|
|
|
|
# 将数据改变形状,RNN 读入的数据维度是 (seq_size, batch_size, feature_size)
|
|
|
|
# out = model(train_x)
|
|
|
|
train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN)
|
|
|
|
# loss = loss_function(out, train_y)
|
|
|
|
train_y = train_y.reshape(-1, 1, 5)
|
|
|
|
# loss.backward()
|
|
|
|
|
|
|
|
# optimizer.step()
|
|
|
|
|
|
|
|
# optimizer.zero_grad()
|
|
|
|
|
|
|
|
# train_loss.append(loss.item())
|
|
|
|
|
|
|
|
# # print(loss)
|
|
|
|
|
|
|
|
# # 保存模型
|
|
|
|
|
|
|
|
# torch.save(model.state_dict(),'dy5.pth')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 转为pytorch的tensor对象
|
|
|
|
|
|
|
|
train_x = torch.from_numpy(train_x).to(device)
|
|
|
|
|
|
|
|
train_y = torch.from_numpy(train_y).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
model.load_state_dict(torch.load('dy5.pth'))
|
|
|
|
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等
|
|
|
|
|
|
|
|
|
|
|
|
# for test
|
|
|
|
|
|
|
|
model = model.eval() # 转换成测试模式
|
|
|
|
|
|
|
|
# model.load_state_dict(torch.load(os.path.join(model_save_dir,model_file))) # 读取参数
|
|
|
|
|
|
|
|
dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN) # (seq_size, batch_size, feature_size)
|
|
|
|
|
|
|
|
dataset_x = torch.from_numpy(dataset_x).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pred_test = model(dataset_x) # 全量训练集
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 模型输出 (seq_size, batch_size, output_size)
|
|
|
|
|
|
|
|
pred_test = pred_test.view(-1)
|
|
|
|
|
|
|
|
pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().numpy()))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# plt.plot(pred_test.reshape(-1), 'r', label='prediction')
|
|
|
|
|
|
|
|
# plt.plot(dataset_y.reshape(-1), 'b', label='real')
|
|
|
|
|
|
|
|
# plt.plot((train_size*5, train_size*5), (0, 1), 'g--') # 分割线 左边是训练数据 右边是测试数据的输出
|
|
|
|
|
|
|
|
# plt.legend(loc='best')
|
|
|
|
|
|
|
|
# plt.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_loss = []
|
|
|
|
|
|
|
|
loss_function = nn.MSELoss()
|
|
|
|
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
|
|
|
|
|
|
|
|
for i in range(1500):
|
|
|
|
|
|
|
|
out = model(train_x)
|
|
|
|
|
|
|
|
loss = loss_function(out, train_y)
|
|
|
|
|
|
|
|
loss.backward()
|
|
|
|
|
|
|
|
optimizer.step()
|
|
|
|
|
|
|
|
optimizer.zero_grad()
|
|
|
|
|
|
|
|
train_loss.append(loss.item())
|
|
|
|
|
|
|
|
# print(loss)
|
|
|
|
|
|
|
|
# 保存模型
|
|
|
|
|
|
|
|
torch.save(model.state_dict(),'dy5.pth')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# for test
|
|
|
|
|
|
|
|
model = model.eval() # 转换成测试模式
|
|
|
|
|
|
|
|
# model.load_state_dict(torch.load(os.path.join(model_save_dir,model_file))) # 读取参数
|
|
|
|
|
|
|
|
dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN) # (seq_size, batch_size, feature_size)
|
|
|
|
|
|
|
|
dataset_x = torch.from_numpy(dataset_x).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pred_test = model(dataset_x) # 全量训练集
|
|
|
|
|
|
|
|
# 模型输出 (seq_size, batch_size, output_size)
|
|
|
|
|
|
|
|
pred_test = pred_test.view(-1)
|
|
|
|
|
|
|
|
pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().numpy()))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
plt.plot(pred_test, 'r', label='prediction')
|
|
|
|
|
|
|
|
plt.plot(df, 'b', label='real')
|
|
|
|
|
|
|
|
plt.plot((train_size, train_size), (0, 1), 'g--') # 分割线 左边是训练数据 右边是测试数据的输出
|
|
|
|
|
|
|
|
plt.legend(loc='best')
|
|
|
|
|
|
|
|
plt.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 创建测试集
|
|
|
|
# 创建测试集
|
|
|
|
# result_list = []
|
|
|
|
# result_list = []
|
|
|
|
# 以x为基础实际数据,滚动预测未来3天
|
|
|
|
# 以x为基础实际数据,滚动预测未来3天
|
|
|
|
df_eval = pd.read_excel(r'C:\Users\user\Desktop\浙江各地市分电压日电量数据\杭州.xlsx',index_col=' stat_date ')
|
|
|
|
# x = torch.from_numpy(df[-14:-4]).to(device)
|
|
|
|
df_eval.columns = df_eval.columns.map(lambda x:x.strip())
|
|
|
|
# pred = model(x.reshape(-1,1,DAYS_FOR_TRAIN)).view(-1).detach().numpy()
|
|
|
|
df_eval.index = pd.to_datetime(df_eval.index)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x,y = create_dataset(df_eval.loc['2023-7']['10kv以下'],10)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = (x - min_value) / (max_value - min_value)
|
|
|
|
|
|
|
|
x = x.reshape(-1,1,10)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = torch.from_numpy(x).type(torch.float32).to(device)
|
|
|
|
|
|
|
|
pred = model(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 反归一化
|
|
|
|
# 反归一化
|
|
|
|
pred = pred * (max_value - min_value) + min_value
|
|
|
|
# pred = pred * (max_value - min_value) + min_value
|
|
|
|
# df = df * (max_value - min_value) + min_value
|
|
|
|
# df = df * (max_value - min_value) + min_value
|
|
|
|
|
|
|
|
|
|
|
|
print(pred,y)
|
|
|
|
# print(pred)
|
|
|
|
df = pd.DataFrame({'real':y.reshape(-1),'pred':pred.view(-1).cpu().detach().numpy()})
|
|
|
|
# # 打印指标
|
|
|
|
df.to_csv('7月预测.csv',encoding='gbk')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 打印指标
|
|
|
|
|
|
|
|
# print(abs(pred - df[-3:]).mean() / df[-3:].mean())
|
|
|
|
# print(abs(pred - df[-3:]).mean() / df[-3:].mean())
|
|
|
|
# result_eight = pd.DataFrame({'pred': np.round(pred,1),'real': df[-3:]})
|
|
|
|
# result_eight = pd.DataFrame({'pred': np.round(pred,1),'real': df[-3:]})
|
|
|
|
# target = (result_eight['pred'].sum() - result_eight['real'].sum()) / df[-31:].sum()
|
|
|
|
# target = (result_eight['pred'].sum() - result_eight['real'].sum()) / df[-31:].sum()
|
|
|
|