You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

82 lines
3.2 KiB
Python

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from sklearn import preprocessing
data = pd.read_excel(r'C:\Users\user\PycharmProjects\pytorch2\入模数据\杭州数据.xlsx',index_col='dtdate')
print(data.columns)
y = np.array(data['售电量']) # 制作标签,用于比对训练结果
x = data.drop(columns=['售电量','city_name']) # 在特征数据集中去掉label
# df.drop(label, axis=0)
# label要删除的列或者行如果要删除多个传入列表
# axis:轴的方向0为行1为列默认为0
fea_train = np.array(x) # 转换为ndarray格式
# 数据标准化操作:(x-均值μ) / 标准差σ ,使数据关于原点对称,提升训练效率
input_features = preprocessing.StandardScaler().fit_transform(fea_train) # fit求出均值和标准差 transform求解
# y归一化 防止梯度爆炸
y = (y - np.min(y))/(np.max(y) - np.min(y))
print(y)
# 设定神经网络的输入参数、隐藏层神经元、输出参数的个数
input_size = input_features.shape[1] # 设定输入特征个数
# np.shape[1]
# 0为行1为列默认为0
# 在此表格中因为每行为各样本的值每列为不同的特征分类所以此处0表示样本数1表示特征数
hidden_size = 64 # 设定隐藏层包含64个神经元
output_size = 1 # 设定输出特征个数为1
batch_size = 32 # 每一批迭代的特征数量
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # 选择使用GPU训练
my_nn = torch.nn.Sequential(
torch.nn.Linear(input_size, hidden_size).to(device), # 输入层 → 第一层
torch.nn.ReLU().to(device),
torch.nn.Linear(hidden_size, hidden_size).to(device), # 第一层 → 第二层
torch.nn.ReLU().to(device),
torch.nn.Linear(hidden_size, hidden_size).to(device), # 第二层 → 第三层
torch.nn.ReLU().to(device),
torch.nn.Linear(hidden_size, hidden_size).to(device), # 第三层 → 第四层
torch.nn.ReLU().to(device),
torch.nn.Linear(hidden_size, output_size).to(device) # 第四层 → 输出层
).to(device)
cost = torch.nn.MSELoss().to(device)
optimizer = torch.optim.Adam(my_nn.parameters(), lr=0.001)
# 训练网络
losses = []
for i in range(300):
batch_loss = []
# 采用MINI-Batch的方法进行训练
for start in range(0, len(input_features), batch_size):
end = start + batch_size if start + batch_size < len(input_features) else len(input_features)
x_train = torch.tensor(input_features[start:end], dtype=torch.float32, requires_grad=True).to(device)
y_train = torch.tensor(y[start:end], dtype=torch.float32, requires_grad=True).to(device)
prediction = my_nn(x_train)
loss = cost(y_train, prediction)
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
batch_loss.append(loss.data.cpu().numpy())
if i % 10 == 0:
losses.append(np.mean(batch_loss))
print(losses)
print(i, np.mean(batch_loss))
# 保存模型
# torch.save(my_nn, 'BP.pt')
# 绘制图像
dev_x = [i * 10 for i in range(20)]
plt.xlabel('step count')
plt.ylabel('loss')
plt.xlim((0, 200))
plt.ylim((0, 1000))
plt.plot(dev_x, losses)
plt.show()