get 1 year ago
parent f6878b8505
commit 293adc01f6

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="C:\anaconda\envs\pytorch" project-jdk-type="Python SDK" /> <component name="ProjectRootManager" version="2" project-jdk-name="pytorch_gpu" project-jdk-type="Python SDK" />
</project> </project>

@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4"> <module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager"> <component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" /> <content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="C:\anaconda\envs\pytorch" jdkType="Python SDK" /> <orderEntry type="jdk" jdkName="pytorch_gpu" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
</module> </module>

Binary file not shown.

@ -52,7 +52,7 @@ def data_preprocessing(data):
return data return data
# 拼接数据集 # 拼接数据集
file_dir = r'C:\Users\鸽子\Desktop\浙江各地市分电压日电量数据' file_dir = r'C:\Users\user\Desktop\浙江各地市分电压日电量数据'
excel = os.listdir(file_dir)[0] excel = os.listdir(file_dir)[0]
data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date') data = pd.read_excel(os.path.join(file_dir, excel), sheet_name=0, index_col='stat_date')
data.drop(columns='地市',inplace=True) data.drop(columns='地市',inplace=True)
@ -83,7 +83,7 @@ for excel in os.listdir(file_dir)[1:]:
dataset_x = np.concatenate((dataset_x,x)) dataset_x = np.concatenate((dataset_x,x))
dataset_y = np.concatenate((dataset_y,y)) dataset_y = np.concatenate((dataset_y,y))
print(dataset_x,dataset_y,dataset_x.shape,dataset_y.shape)
# 训练 # 训练
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@ -120,7 +120,7 @@ for i in range(1500):
optimizer.step() optimizer.step()
optimizer.zero_grad() optimizer.zero_grad()
train_loss.append(loss.item()) train_loss.append(loss.item())
print(loss)
# 保存模型 # 保存模型
torch.save(model.state_dict(),'dy5.pth') torch.save(model.state_dict(),'dy5.pth')
@ -128,16 +128,16 @@ torch.save(model.state_dict(),'dy5.pth')
model = model.eval() # 转换成测试模式 model = model.eval() # 转换成测试模式
# model.load_state_dict(torch.load(os.path.join(model_save_dir,model_file))) # 读取参数 # model.load_state_dict(torch.load(os.path.join(model_save_dir,model_file))) # 读取参数
dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN) # (seq_size, batch_size, feature_size) dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN) # (seq_size, batch_size, feature_size)
dataset_x = torch.from_numpy(dataset_x).to(device) dataset_x = torch.from_numpy(dataset_x).to(device).type(torch.float32)
pred_test = model(dataset_x) # 全量训练集 pred_test = model(dataset_x) # 全量训练集
# 模型输出 (seq_size, batch_size, output_size) # 模型输出 (seq_size, batch_size, output_size)
pred_test = pred_test.view(-1) pred_test = pred_test.view(-1)
pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().numpy())) pred_test = np.concatenate((np.zeros(DAYS_FOR_TRAIN), pred_test.cpu().detach().numpy()))
plt.plot(pred_test, 'r', label='prediction') plt.plot(pred_test.reshape(-1), 'r', label='prediction')
plt.plot(df, 'b', label='real') plt.plot(dataset_y.reshape(-1), 'b', label='real')
plt.plot((train_size, train_size), (0, 1), 'g--') # 分割线 左边是训练数据 右边是测试数据的输出 plt.plot((train_size*5, train_size*5), (0, 1), 'g--') # 分割线 左边是训练数据 右边是测试数据的输出
plt.legend(loc='best') plt.legend(loc='best')
plt.show() plt.show()

Loading…
Cancel
Save