|
|
@ -49,40 +49,39 @@ def data_preprocessing(data):
|
|
|
|
data = data.astype(float)
|
|
|
|
data = data.astype(float)
|
|
|
|
for col in data.columns:
|
|
|
|
for col in data.columns:
|
|
|
|
data[col] = normal(data[col])
|
|
|
|
data[col] = normal(data[col])
|
|
|
|
|
|
|
|
|
|
|
|
return data
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
|
|
# 拼接数据集
|
|
|
|
# 拼接数据集
|
|
|
|
file_dir = r'./浙江各地市分电压日电量数据'
|
|
|
|
# file_dir = r'./浙江各地市分电压日电量数据'
|
|
|
|
excel = os.listdir(file_dir)[0]
|
|
|
|
# excel = os.listdir(file_dir)[0]
|
|
|
|
data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date')
|
|
|
|
# data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date')
|
|
|
|
data.drop(columns='地市',inplace=True)
|
|
|
|
# data.drop(columns='地市',inplace=True)
|
|
|
|
data = data_preprocessing(data)
|
|
|
|
# data = data_preprocessing(data)
|
|
|
|
|
|
|
|
#
|
|
|
|
df = data[data.columns[0]]
|
|
|
|
# df = data[data.columns[0]]
|
|
|
|
df.dropna(inplace = True)
|
|
|
|
# df.dropna(inplace = True)
|
|
|
|
dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
# dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
|
|
|
|
#
|
|
|
|
for level in data.columns[1:]:
|
|
|
|
# for level in data.columns[1:]:
|
|
|
|
df = data[level]
|
|
|
|
# df = data[level]
|
|
|
|
df.dropna(inplace=True)
|
|
|
|
# df.dropna(inplace=True)
|
|
|
|
x, y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
# x, y = create_dataset(df, DAYS_FOR_TRAIN)
|
|
|
|
dataset_x = np.concatenate((dataset_x, x))
|
|
|
|
# dataset_x = np.concatenate((dataset_x, x))
|
|
|
|
dataset_y = np.concatenate((dataset_y, y))
|
|
|
|
# dataset_y = np.concatenate((dataset_y, y))
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
#
|
|
|
|
for excel in os.listdir(file_dir)[1:]:
|
|
|
|
# for excel in os.listdir(file_dir)[1:]:
|
|
|
|
|
|
|
|
#
|
|
|
|
data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col='stat_date')
|
|
|
|
# data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col='stat_date')
|
|
|
|
data.drop(columns='地市', inplace=True)
|
|
|
|
# data.drop(columns='地市', inplace=True)
|
|
|
|
data = data_preprocessing(data)
|
|
|
|
# data = data_preprocessing(data)
|
|
|
|
|
|
|
|
#
|
|
|
|
for level in data.columns:
|
|
|
|
# for level in data.columns:
|
|
|
|
df = data[level]
|
|
|
|
# df = data[level]
|
|
|
|
df.dropna(inplace=True)
|
|
|
|
# df.dropna(inplace=True)
|
|
|
|
x,y = create_dataset(df,DAYS_FOR_TRAIN)
|
|
|
|
# x,y = create_dataset(df,DAYS_FOR_TRAIN)
|
|
|
|
dataset_x = np.concatenate((dataset_x,x))
|
|
|
|
# dataset_x = np.concatenate((dataset_x,x))
|
|
|
|
dataset_y = np.concatenate((dataset_y,y))
|
|
|
|
# dataset_y = np.concatenate((dataset_y,y))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -90,31 +89,31 @@ for excel in os.listdir(file_dir)[1:]:
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
#
|
|
|
|
#
|
|
|
|
# # 标准化到0~1
|
|
|
|
# # 标准化到0~1
|
|
|
|
max_value = np.max(dataset_x)
|
|
|
|
# max_value = np.max(dataset_x)
|
|
|
|
min_value = np.min(dataset_x)
|
|
|
|
# min_value = np.min(dataset_x)
|
|
|
|
dataset_x = (dataset_x - min_value) / (max_value - min_value)
|
|
|
|
# dataset_x = (dataset_x - min_value) / (max_value - min_value)
|
|
|
|
dataset_y = (dataset_y - min_value) / (max_value - min_value)
|
|
|
|
# dataset_y = (dataset_y - min_value) / (max_value - min_value)
|
|
|
|
#
|
|
|
|
# #
|
|
|
|
# print(max_value,min_value)
|
|
|
|
# # print(max_value,min_value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# # # 划分训练集和测试集
|
|
|
|
|
|
|
|
# train_size = int(len(dataset_x)*0.7)
|
|
|
|
|
|
|
|
# train_x = dataset_x[:train_size]
|
|
|
|
|
|
|
|
# train_y = dataset_y[:train_size]
|
|
|
|
|
|
|
|
# eval_x = dataset_x[train_size:]
|
|
|
|
|
|
|
|
# eval_y = dataset_y[train_size:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# # 将数据改变形状,RNN 读入的数据维度是 (seq_size, batch_size, feature_size)
|
|
|
|
|
|
|
|
# train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN)
|
|
|
|
|
|
|
|
# train_y = train_y.reshape(-1, 1, 3)
|
|
|
|
|
|
|
|
# eval_x = eval_x.reshape(-1, 1, DAYS_FOR_TRAIN)
|
|
|
|
|
|
|
|
# eval_y = eval_y.reshape(-1, 1, 3)
|
|
|
|
#
|
|
|
|
#
|
|
|
|
# # 划分训练集和测试集
|
|
|
|
# # 转为pytorch的tensor对象
|
|
|
|
train_size = int(len(dataset_x)*0.7)
|
|
|
|
# train_x = torch.from_numpy(train_x).to(device).type(torch.float32)
|
|
|
|
train_x = dataset_x[:train_size]
|
|
|
|
# train_y = torch.from_numpy(train_y).to(device).type(torch.float32)
|
|
|
|
train_y = dataset_y[:train_size]
|
|
|
|
# eval_x = torch.from_numpy(eval_x).to(device).type(torch.float32)
|
|
|
|
eval_x = dataset_x[train_size:]
|
|
|
|
# eval_y = torch.from_numpy(eval_y).to(device).type(torch.float32)
|
|
|
|
eval_y = dataset_y[train_size:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 将数据改变形状,RNN 读入的数据维度是 (seq_size, batch_size, feature_size)
|
|
|
|
|
|
|
|
train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN)
|
|
|
|
|
|
|
|
train_y = train_y.reshape(-1, 1, 3)
|
|
|
|
|
|
|
|
eval_x = eval_x.reshape(-1, 1, DAYS_FOR_TRAIN)
|
|
|
|
|
|
|
|
eval_y = eval_y.reshape(-1, 1, 3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# 转为pytorch的tensor对象
|
|
|
|
|
|
|
|
train_x = torch.from_numpy(train_x).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
train_y = torch.from_numpy(train_y).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
eval_x = torch.from_numpy(eval_x).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
eval_y = torch.from_numpy(eval_y).to(device).type(torch.float32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等
|
|
|
|
model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等
|
|
|
|
|
|
|
|
|
|
|
@ -164,16 +163,15 @@ model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=3, num_layers=2).to(devi
|
|
|
|
|
|
|
|
|
|
|
|
# 创建测试集
|
|
|
|
# 创建测试集
|
|
|
|
max_value,min_value = 192751288.47,0.0
|
|
|
|
max_value,min_value = 192751288.47,0.0
|
|
|
|
model.load_state_dict(torch.load('8_dy3.pth')) # cpu跑加上,map_location=torch.device('cpu')
|
|
|
|
model.load_state_dict(torch.load('best_dy3.pth',map_location=torch.device('cpu'))) # cpu跑加上,map_location=torch.device('cpu')
|
|
|
|
file_dir = r'./浙江各地市分电压日电量数据'
|
|
|
|
# file_dir = r'./浙江各地市分电压日电量数据'
|
|
|
|
for excel in os.listdir(file_dir):
|
|
|
|
|
|
|
|
df_city = pd.read_excel(os.path.join(file_dir,excel),index_col='stat_date')
|
|
|
|
|
|
|
|
df_city.index = pd.to_datetime(df_city.index)
|
|
|
|
|
|
|
|
df_city = df_city.loc['2023-9'][:-3]
|
|
|
|
|
|
|
|
df_city.drop(columns=[i for i in df_city.columns if (df_city[i] == 0).sum() / len(df_city) >= 0.5], inplace=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
city = df_city['地市'].iloc[0]
|
|
|
|
df = pd.read_excel(r'C:\Users\鸽子\Desktop\浙江电量20231127.xlsx',sheet_name=1)
|
|
|
|
|
|
|
|
df = df[df['county_name'].isnull()]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for city in df['city_name'].drop_duplicates():
|
|
|
|
|
|
|
|
df_city = df[df['city_name']==city].drop(columns=['county_name','500kv(含330kv)以上']).set_index('pt_date').sort_index()
|
|
|
|
|
|
|
|
# df_city.drop(columns=[i for i in df_city.columns if (df_city[i] == 0).sum() / len(df_city) >= 0.5], inplace=True)
|
|
|
|
result_dict = {}
|
|
|
|
result_dict = {}
|
|
|
|
for level in df_city.columns[1:]:
|
|
|
|
for level in df_city.columns[1:]:
|
|
|
|
x, y = create_dataset(df_city[level], 10)
|
|
|
|
x, y = create_dataset(df_city[level], 10)
|
|
|
@ -184,13 +182,11 @@ for excel in os.listdir(file_dir):
|
|
|
|
pred = pred * (max_value - min_value) + min_value
|
|
|
|
pred = pred * (max_value - min_value) + min_value
|
|
|
|
result = pred.cpu().detach().numpy()[-3:]
|
|
|
|
result = pred.cpu().detach().numpy()[-3:]
|
|
|
|
result_dict[level] = list(result)
|
|
|
|
result_dict[level] = list(result)
|
|
|
|
df = pd.DataFrame(result_dict,index=['2023-09-28','2023-09-29','2023-09-30'])
|
|
|
|
df1 = pd.DataFrame(result_dict,index=['2023-11-28','2023-11-29','2023-11-30'])
|
|
|
|
df.to_excel(fr'C:\Users\user\Desktop\1\9月分压电量预测28-30\{city} .xlsx')
|
|
|
|
df1.to_excel(fr'C:\Users\鸽子\Desktop\11月分压电量预测28-30\{city} .xlsx')
|
|
|
|
# print(result_dict)
|
|
|
|
print(result_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# # 打印指标
|
|
|
|
# 打印指标
|
|
|
|
# print(abs(pred - df[-3:]).mean() / df[-3:].mean())
|
|
|
|
# print(abs(pred - df[-3:]).mean() / df[-3:].mean())
|
|
|
|
# result_eight = pd.DataFrame({'pred': np.round(pred,1),'real': df[-3:]})
|
|
|
|
# result_eight = pd.DataFrame({'pred': np.round(pred,1),'real': df[-3:]})
|
|
|
|
# target = (result_eight['pred'].sum() - result_eight['real'].sum()) / df[-31:].sum()
|
|
|
|
# target = (result_eight['pred'].sum() - result_eight['real'].sum()) / df[-31:].sum()
|
|
|
|