diff --git a/浙江行业电量/行业电量_输出为5.py b/浙江行业电量/行业电量_输出为5.py index 9f53179..14ade33 100644 --- a/浙江行业电量/行业电量_输出为5.py +++ b/浙江行业电量/行业电量_输出为5.py @@ -54,91 +54,91 @@ def data_preprocessing(data): return data # 拼接数据集 -file_dir = r'C:\Users\user\Desktop\浙江各地市行业电量数据' -excel = os.listdir(file_dir)[0] -data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date') -data.drop(columns='地市',inplace=True) -data = data_preprocessing(data) - -df = data[data.columns[0]] -df.dropna(inplace = True) -dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN) - -for level in data.columns[1:]: - df = data[level] - df.dropna(inplace=True) - x, y = create_dataset(df, DAYS_FOR_TRAIN) - dataset_x = np.concatenate((dataset_x, x)) - dataset_y = np.concatenate((dataset_y, y)) - - -for excel in os.listdir(file_dir)[1:]: - - data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col='stat_date') - data.drop(columns='地市', inplace=True) - data = data_preprocessing(data) - - for level in data.columns: - df = data[level] - df.dropna(inplace=True) - x,y = create_dataset(df,DAYS_FOR_TRAIN) - dataset_x = np.concatenate((dataset_x,x)) - dataset_y = np.concatenate((dataset_y,y)) - - -print(dataset_x.shape,dataset_y.shape) -# 训练 +# file_dir = r'C:\Users\user\Desktop\浙江各地市行业电量数据' +# excel = os.listdir(file_dir)[0] +# data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date') +# data.drop(columns='地市',inplace=True) +# data = data_preprocessing(data) +# +# df = data[data.columns[0]] +# df.dropna(inplace = True) +# dataset_x, dataset_y = create_dataset(df, DAYS_FOR_TRAIN) +# +# for level in data.columns[1:]: +# df = data[level] +# df.dropna(inplace=True) +# x, y = create_dataset(df, DAYS_FOR_TRAIN) +# dataset_x = np.concatenate((dataset_x, x)) +# dataset_y = np.concatenate((dataset_y, y)) +# +# +# for excel in os.listdir(file_dir)[1:]: +# +# data = pd.read_excel(os.path.join(file_dir,excel), sheet_name=0,index_col='stat_date') +# data.drop(columns='地市', inplace=True) +# data = data_preprocessing(data) +# +# for level in data.columns: +# df = data[level] +# df.dropna(inplace=True) +# x,y = create_dataset(df,DAYS_FOR_TRAIN) +# dataset_x = np.concatenate((dataset_x,x)) +# dataset_y = np.concatenate((dataset_y,y)) +# +# +# print(dataset_x.shape,dataset_y.shape) +# # 训练 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -# 标准化到0~1 -max_value = np.max(dataset_x) -min_value = np.min(dataset_x) -dataset_x = (dataset_x - min_value) / (max_value - min_value) -dataset_y = (dataset_y - min_value) / (max_value - min_value) - -# 划分训练集和测试集 -train_size = int(len(dataset_x)*0.7) -train_x = dataset_x[:train_size] -train_y = dataset_y[:train_size] - -# 将数据改变形状,RNN 读入的数据维度是 (seq_size, batch_size, feature_size) -train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN) -train_y = train_y.reshape(-1, 1, 5) - -# 转为pytorch的tensor对象 -train_x = torch.from_numpy(train_x).to(device).type(torch.float32) -train_y = torch.from_numpy(train_y).to(device).type(torch.float32) - +# +# # 标准化到0~1 +# max_value = np.max(dataset_x) +# min_value = np.min(dataset_x) +# dataset_x = (dataset_x - min_value) / (max_value - min_value) +# dataset_y = (dataset_y - min_value) / (max_value - min_value) +# print('max_value:',max_value,'min_value:',min_value) +# # 划分训练集和测试集 +# train_size = int(len(dataset_x)*0.7) +# train_x = dataset_x[:train_size] +# train_y = dataset_y[:train_size] +# +# # 将数据改变形状,RNN 读入的数据维度是 (seq_size, batch_size, feature_size) +# train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN) +# train_y = train_y.reshape(-1, 1, 5) +# +# # 转为pytorch的tensor对象 +# train_x = torch.from_numpy(train_x).to(device).type(torch.float32) +# train_y = torch.from_numpy(train_y).to(device).type(torch.float32) +# model = LSTM_Regression(DAYS_FOR_TRAIN, 32, output_size=5, num_layers=2).to(device) # 导入模型并设置模型的参数输入输出层、隐藏层等 - - -# train_loss = [] -# loss_function = nn.MSELoss() -# optimizer = torch.optim.Adam(model.parameters(), lr=0.005, betas=(0.9, 0.999), eps=1e-08, weight_decay=0) -# for i in range(1500): -# out = model(train_x) -# loss = loss_function(out, train_y) -# loss.backward() -# optimizer.step() -# optimizer.zero_grad() -# train_loss.append(loss.item()) -# if i % 100 == 0: -# print(f'epoch {i+1}: loss:{loss}') - -# 保存/读取模型 -# torch.save(model.state_dict(),'hy5.pth') - -model.load_state_dict(torch.load('hy5.pth')) -# for test -model = model.eval() # 转换成测试模式 -# model.load_state_dict(torch.load(os.path.join(model_save_dir,model_file))) # 读取参数 -dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN) # (seq_size, batch_size, feature_size) -dataset_x = torch.from_numpy(dataset_x).to(device).type(torch.float32) - -pred_test = model(dataset_x) # 全量训练集 -# 模型输出 (seq_size, batch_size, output_size) -pred_test = pred_test.view(-1) -pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().numpy())) +# +# +# # train_loss = [] +# # loss_function = nn.MSELoss() +# # optimizer = torch.optim.Adam(model.parameters(), lr=0.005, betas=(0.9, 0.999), eps=1e-08, weight_decay=0) +# # for i in range(1500): +# # out = model(train_x) +# # loss = loss_function(out, train_y) +# # loss.backward() +# # optimizer.step() +# # optimizer.zero_grad() +# # train_loss.append(loss.item()) +# # if i % 100 == 0: +# # print(f'epoch {i+1}: loss:{loss}') +# +# # 保存/读取模型 +# # torch.save(model.state_dict(),'hy5.pth') +# +# model.load_state_dict(torch.load('hy5.pth')) +# # for test +# model = model.eval() # 转换成测试模式 +# # model.load_state_dict(torch.load(os.path.join(model_save_dir,model_file))) # 读取参数 +# dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN) # (seq_size, batch_size, feature_size) +# dataset_x = torch.from_numpy(dataset_x).to(device).type(torch.float32) +# +# pred_test = model(dataset_x) # 全量训练集 +# # 模型输出 (seq_size, batch_size, output_size) +# pred_test = pred_test.view(-1) +# pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().numpy())) # plt.plot(pred_test.reshape(-1), 'r', label='prediction') # plt.plot(dataset_y.reshape(-1), 'b', label='real') @@ -146,6 +146,9 @@ pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().n # plt.legend(loc='best') # plt.show() +model.load_state_dict(torch.load('hy5.pth')) +max_value = 354024930.8 +min_value = 0.0 # 创建测试集 @@ -153,7 +156,7 @@ df_eval = pd.read_excel(r'C:\Users\user\Desktop\浙江各地市行业电量数 df_eval.columns = df_eval.columns.map(lambda x:x.strip()) df_eval.index = pd.to_datetime(df_eval.index) -x,y = create_dataset(df_eval.loc['2023-7']['第三产业'],10) +x,y = create_dataset(df_eval.loc['2023-7']['第二产业'],10) x = (x - min_value) / (max_value - min_value) x = x.reshape(-1,1,10) @@ -161,13 +164,21 @@ x = x.reshape(-1,1,10) x = torch.from_numpy(x).type(torch.float32).to(device) pred = model(x) +x2 = np.array([227964890.1,220189256.2,220189256.2,220189256.2,220189256.2,220189256.2,220189256.2,220189256.2,220189256.2,220189256.2]) +x2 = (x2 - min_value) / (max_value - min_value) +x2 = x2.reshape(-1,1,10) +print(x2) +x2 = torch.from_numpy(x2).type(torch.float32).to(device) +pred2 = model(x2) # 反归一化 pred = pred * (max_value - min_value) + min_value +pred2 = pred2 * (max_value - min_value) + min_value +print('pred2:',pred2.view(-1).cpu().detach().numpy()) # df = df * (max_value - min_value) + min_value df = pd.DataFrame({'real':y.reshape(-1),'pred':pred.view(-1).cpu().detach().numpy()}) print(df) -df.to_csv('7月第三产业.csv',encoding='gbk') +df.to_csv('7月第二产业.csv',encoding='gbk') # 反归一化 # pred = pred * (max_value - min_value) + min_value