You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

82 lines
3.2 KiB
Python

11 months ago
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from sklearn import preprocessing
11 months ago
data = pd.read_excel(r'C:\Users\user\PycharmProjects\pytorch2\入模数据\杭州数据.xlsx',index_col='dtdate')
11 months ago
print(data.columns)
11 months ago
y = np.array(data['售电量']) # 制作标签,用于比对训练结果
x = data.drop(columns=['售电量','city_name']) # 在特征数据集中去掉label
# df.drop(label, axis=0)
# label要删除的列或者行如果要删除多个传入列表
# axis:轴的方向0为行1为列默认为0
fea_train = np.array(x) # 转换为ndarray格式
11 months ago
# 数据标准化操作:(x-均值μ) / 标准差σ ,使数据关于原点对称,提升训练效率
11 months ago
input_features = preprocessing.StandardScaler().fit_transform(fea_train) # fit求出均值和标准差 transform求解
11 months ago
11 months ago
# y归一化 防止梯度爆炸
y = (y - np.min(y))/(np.max(y) - np.min(y))
print(y)
11 months ago
# 设定神经网络的输入参数、隐藏层神经元、输出参数的个数
input_size = input_features.shape[1] # 设定输入特征个数
11 months ago
# np.shape[1]
# 0为行1为列默认为0
# 在此表格中因为每行为各样本的值每列为不同的特征分类所以此处0表示样本数1表示特征数
hidden_size = 64 # 设定隐藏层包含64个神经元
output_size = 1 # 设定输出特征个数为1
batch_size = 32 # 每一批迭代的特征数量
11 months ago
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # 选择使用GPU训练
my_nn = torch.nn.Sequential(
torch.nn.Linear(input_size, hidden_size).to(device), # 输入层 → 第一层
torch.nn.ReLU().to(device),
torch.nn.Linear(hidden_size, hidden_size).to(device), # 第一层 → 第二层
torch.nn.ReLU().to(device),
torch.nn.Linear(hidden_size, hidden_size).to(device), # 第二层 → 第三层
torch.nn.ReLU().to(device),
11 months ago
torch.nn.Linear(hidden_size, hidden_size).to(device), # 第三层 → 第四层
torch.nn.ReLU().to(device),
torch.nn.Linear(hidden_size, output_size).to(device) # 第四层 → 输出层
11 months ago
).to(device)
cost = torch.nn.MSELoss().to(device)
11 months ago
optimizer = torch.optim.Adam(my_nn.parameters(), lr=0.001)
11 months ago
# 训练网络
losses = []
11 months ago
for i in range(300):
11 months ago
batch_loss = []
# 采用MINI-Batch的方法进行训练
11 months ago
for start in range(0, len(input_features), batch_size):
end = start + batch_size if start + batch_size < len(input_features) else len(input_features)
x_train = torch.tensor(input_features[start:end], dtype=torch.float32, requires_grad=True).to(device)
y_train = torch.tensor(y[start:end], dtype=torch.float32, requires_grad=True).to(device)
prediction = my_nn(x_train)
loss = cost(y_train, prediction)
11 months ago
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
batch_loss.append(loss.data.cpu().numpy())
if i % 10 == 0:
losses.append(np.mean(batch_loss))
11 months ago
print(losses)
11 months ago
print(i, np.mean(batch_loss))
# 保存模型
# torch.save(my_nn, 'BP.pt')
# 绘制图像
11 months ago
dev_x = [i * 10 for i in range(20)]
plt.xlabel('step count')
plt.ylabel('loss')
plt.xlim((0, 200))
plt.ylim((0, 1000))
plt.plot(dev_x, losses)
plt.show()