|
|
@ -54,42 +54,42 @@ def data_preprocessing(data):
|
|
|
|
return data
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
|
|
# 拼接数据集
|
|
|
|
# 拼接数据集
|
|
|
|
file_dir = './浙江各地市行业电量数据'
|
|
|
|
# file_dir = './浙江各地市行业电量数据'
|
|
|
|
excel = os.listdir(file_dir)[0]
|
|
|
|
# excel = os.listdir(file_dir)[0]
|
|
|
|
data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date')
|
|
|
|
# data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date')
|
|
|
|
data.drop(columns='地市',inplace=True)
|
|
|
|
# data.drop(columns='地市',inplace=True)
|
|
|
|
data = data_preprocessing(data)
|
|
|
|
# data = data_preprocessing(data)
|
|
|
|
|
|
|
|
#
|
|
|
|
df = data[data.columns[0]]
|
|
|
|
# df = data[data.columns[0]]
|
|
|
|
df.dropna(inplace = True)
|
|
|
|
# df.dropna(inplace = True)
|
|
|
|
dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
# dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
|
|
|
|
#
|
|
|
|
for level in data.columns[1:]:
|
|
|
|
# for level in data.columns[1:]:
|
|
|
|
df = data[level]
|
|
|
|
# df = data[level]
|
|
|
|
df.dropna(inplace=True)
|
|
|
|
# df.dropna(inplace=True)
|
|
|
|
x, y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
# x, y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
dataset_x = np.concatenate((dataset_x, x))
|
|
|
|
# dataset_x = np.concatenate((dataset_x, x))
|
|
|
|
dataset_y = np.concatenate((dataset_y, y))
|
|
|
|
# dataset_y = np.concatenate((dataset_y, y))
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
#
|
|
|
|
for excel in os.listdir(file_dir)[1:]:
|
|
|
|
# for excel in os.listdir(file_dir)[1:]:
|
|
|
|
|
|
|
|
#
|
|
|
|
data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col='stat_date')
|
|
|
|
# data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col='stat_date')
|
|
|
|
data.drop(columns='地市', inplace=True)
|
|
|
|
# data.drop(columns='地市', inplace=True)
|
|
|
|
data = data_preprocessing(data)
|
|
|
|
# data = data_preprocessing(data)
|
|
|
|
|
|
|
|
#
|
|
|
|
for level in data.columns:
|
|
|
|
# for level in data.columns:
|
|
|
|
df = data[level]
|
|
|
|
# df = data[level]
|
|
|
|
df.dropna(inplace=True)
|
|
|
|
# df.dropna(inplace=True)
|
|
|
|
x,y = create_dataset(df,DAYS_FOR_TRAIN)
|
|
|
|
# x,y = create_dataset(df,DAYS_FOR_TRAIN)
|
|
|
|
dataset_x = np.concatenate((dataset_x,x))
|
|
|
|
# dataset_x = np.concatenate((dataset_x,x))
|
|
|
|
dataset_y = np.concatenate((dataset_y,y))
|
|
|
|
# dataset_y = np.concatenate((dataset_y,y))
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
#
|
|
|
|
df_x_10 = pd.DataFrame(dataset_x)
|
|
|
|
# df_x_10 = pd.DataFrame(dataset_x)
|
|
|
|
df_y_10 = pd.DataFrame(dataset_y)
|
|
|
|
# df_y_10 = pd.DataFrame(dataset_y)
|
|
|
|
df_x_10.to_csv('df_x_10.csv',index=False)
|
|
|
|
# df_x_10.to_csv('df_x_10.csv',index=False)
|
|
|
|
df_y_10.to_csv('df_y_10.csv',index=False)
|
|
|
|
# df_y_10.to_csv('df_y_10.csv',index=False)
|
|
|
|
dataset_x = pd.read_csv('df_x_10.csv').values
|
|
|
|
dataset_x = pd.read_csv('df_x_10.csv').values
|
|
|
|
dataset_y = pd.read_csv('df_y_10.csv').values
|
|
|
|
dataset_y = pd.read_csv('df_y_10.csv').values
|
|
|
|
print(dataset_x.shape,dataset_y.shape)
|
|
|
|
print(dataset_x.shape,dataset_y.shape)
|
|
|
@ -115,30 +115,31 @@ train_y = train_y.reshape(-1, 1, 3)
|
|
|
|
train_x = torch.from_numpy(train_x).to(device).type(torch.float32)
|
|
|
|
train_x = torch.from_numpy(train_x).to(device).type(torch.float32)
|
|
|
|
train_y = torch.from_numpy(train_y).to(device).type(torch.float32)
|
|
|
|
train_y = torch.from_numpy(train_y).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等
|
|
|
|
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等
|
|
|
|
|
|
|
|
|
|
|
|
train_loss = []
|
|
|
|
train_loss = []
|
|
|
|
loss_function = nn.MSELoss()
|
|
|
|
loss_function = nn.MSELoss()
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
|
|
|
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
|
|
|
|
min_loss = 1
|
|
|
|
min_loss = 1
|
|
|
|
for i in range(500):
|
|
|
|
# for i in range(500):
|
|
|
|
train_x,train_y = train_x.to(device),train_y.to(device)
|
|
|
|
# train_x,train_y = train_x.to(device),train_y.to(device)
|
|
|
|
out = model(train_x)
|
|
|
|
# out = model(train_x)
|
|
|
|
loss = loss_function(out, train_y)
|
|
|
|
# loss = loss_function(out, train_y)
|
|
|
|
loss.backward()
|
|
|
|
# loss.backward()
|
|
|
|
optimizer.step()
|
|
|
|
# optimizer.step()
|
|
|
|
optimizer.zero_grad()
|
|
|
|
# optimizer.zero_grad()
|
|
|
|
train_loss.append(loss.item())
|
|
|
|
# train_loss.append(loss.item())
|
|
|
|
|
|
|
|
#
|
|
|
|
if loss <= min_loss:
|
|
|
|
# if loss <= min_loss:
|
|
|
|
min_loss = loss
|
|
|
|
# min_loss = loss
|
|
|
|
best_para = model.state_dict()
|
|
|
|
# best_para = model.state_dict()
|
|
|
|
if i % 100 == 0:
|
|
|
|
# if i % 100 == 0:
|
|
|
|
print(f'epoch {i+1}: loss:{loss}')
|
|
|
|
# print(f'epoch {i+1}: loss:{loss}')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 保存/读取模型
|
|
|
|
# # 保存/读取模型
|
|
|
|
torch.save(best_para,'hy3.pth')
|
|
|
|
# torch.save(best_para,'hy3.pth')
|
|
|
|
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device)
|
|
|
|
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device)
|
|
|
|
model.load_state_dict(torch.load('hy3.pth'))
|
|
|
|
model.load_state_dict(torch.load('hy3.pth'))
|
|
|
|
# 测试
|
|
|
|
# 测试
|
|
|
@ -171,7 +172,7 @@ for excel in os.listdir(file_dir):
|
|
|
|
df_city = df_city.loc['2023-9'][:-3]
|
|
|
|
df_city = df_city.loc['2023-9'][:-3]
|
|
|
|
city = df_city['地市'].iloc[0]
|
|
|
|
city = df_city['地市'].iloc[0]
|
|
|
|
result_dict = {}
|
|
|
|
result_dict = {}
|
|
|
|
for industry in df_city.columns[2:]:
|
|
|
|
for industry in df_city.columns[1:]:
|
|
|
|
df_city[industry] = df_city[industry].astype('float')
|
|
|
|
df_city[industry] = df_city[industry].astype('float')
|
|
|
|
x, y = create_dataset(df_city[industry], 10)
|
|
|
|
x, y = create_dataset(df_city[industry], 10)
|
|
|
|
x = (x - min_value) / (max_value - min_value)
|
|
|
|
x = (x - min_value) / (max_value - min_value)
|
|
|
@ -179,30 +180,14 @@ for excel in os.listdir(file_dir):
|
|
|
|
x = torch.from_numpy(x).type(torch.float32).to(device)
|
|
|
|
x = torch.from_numpy(x).type(torch.float32).to(device)
|
|
|
|
pred = model(x).view(-1)
|
|
|
|
pred = model(x).view(-1)
|
|
|
|
pred = pred * (max_value - min_value) + min_value
|
|
|
|
pred = pred * (max_value - min_value) + min_value
|
|
|
|
result = pred.cpu().detach().numpy()[-5:-2]
|
|
|
|
result = pred.cpu().detach().numpy()[-3:]
|
|
|
|
result_dict[industry] = list(result)
|
|
|
|
result_dict[industry] = list(result)
|
|
|
|
df = pd.DataFrame(result_dict,index=['2023-09-28','2023-09-29','2023-09-30'])
|
|
|
|
df = pd.DataFrame(result_dict,index=['2023-09-28','2023-09-29','2023-09-30'])
|
|
|
|
df.to_excel(fr'C:\Users\user\Desktop\9月行业电量预测28-30\{city} .xlsx')
|
|
|
|
df.to_excel(fr'C:\Users\user\Desktop\9月行业电量预测28-30\{city} .xlsx')
|
|
|
|
print(time.time()-t1)
|
|
|
|
print(time.time()-t1)
|
|
|
|
print(result_dict)
|
|
|
|
print(result_dict)
|
|
|
|
|
|
|
|
|
|
|
|
# 反归一化
|
|
|
|
|
|
|
|
pred = pred * (max_value - min_value) + min_value
|
|
|
|
|
|
|
|
df = df * (max_value - min_value) + min_value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 打印指标
|
|
|
|
|
|
|
|
print(abs(pred - df[-3:]).mean() / df[-3:].mean())
|
|
|
|
|
|
|
|
result_eight = pd.DataFrame({'pred': np.round(pred,1),'real': df[-3:]})
|
|
|
|
|
|
|
|
target = (result_eight['pred'].sum() - result_eight['real'].sum()) / df[-31:].sum()
|
|
|
|
|
|
|
|
result_eight['loss_rate'] = round(target, 5)
|
|
|
|
|
|
|
|
result_eight['level'] = level
|
|
|
|
|
|
|
|
list_app.append(result_eight)
|
|
|
|
|
|
|
|
print(target)
|
|
|
|
|
|
|
|
print(result_eight)
|
|
|
|
|
|
|
|
final_df = pd.concat(list_app,ignore_index=True)
|
|
|
|
|
|
|
|
final_df.to_csv('市行业电量.csv',encoding='gbk')
|
|
|
|
|
|
|
|
print(final_df)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|