You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

138 lines
4.0 KiB
Python

import xgboost as xgb
import pandas as pd
import os
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['font.sans-serif']=['kaiti']
pd.set_option('display.width',None)
def season(x):
if str(x)[5:7] in ('01', '02'):
return 0
elif str(x)[5:7] in ('03', '04', '05', '06', '09', '10', '11', '12'):
return 1
else:
return 2
def normal(nd):
high = nd.describe()['75%'] + 1.5*(nd.describe()['75%']-nd.describe()['25%'])
low = nd.describe()['25%'] - 1.5*(nd.describe()['75%']-nd.describe()['25%'])
return nd[(nd<high)&(nd>low)]
parent_dir = os.path.abspath(os.path.join(os.getcwd(),os.pardir))
data = pd.read_excel(os.path.join(parent_dir,'入模数据/衢州.xlsx'),index_col='dtdate')
data.index = pd.to_datetime(data.index,format='%Y-%m-%d')
data = data.loc[normal(data['售电量']).index]
# list2 = []
# list0 = []
# list1 = []
# for i in ('01','02','03','04','05','06','07','08','09','10','11','12'):
# month_index = data.index.strftime('%Y-%m-%d').str[5:7] == f'{i}'
# if data.loc[month_index]['售电量'].mean() >= data['售电量'].describe()['75%']:
# list2.append(i)
# elif data.loc[month_index]['售电量'].mean() <= data['售电量'].describe()['25%']:
# list0.append(i)
# else:
# list1.append(i)
#
# print(list0,list1,list2)
data['season'] = data.index.map(season)
df_eval = data.loc['2023-08']
df_train = data.iloc[450:900]
df_train = df_train[['tem_max','tem_min','holiday','24ST','售电量','season']]
X = df_train[['tem_max','tem_min','holiday','24ST','season']]
X_eval = df_eval[['tem_max','tem_min','holiday','24ST','season']]
y = df_train['售电量']
x_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42)
model = xgb.XGBRegressor(max_depth=6, learning_rate=0.1, n_estimators=150)
model.fit(x_train,y_train)
y_pred = model.predict(x_test)
result_test = pd.DataFrame({'test':y_test,'pred':y_pred},index=y_test.index)
# 指标打印
eval_pred = model.predict(X_eval)
result_eval = pd.DataFrame({'eval':df_eval['售电量'],'pred':eval_pred})
goal = (result_eval['eval'][-3:].sum()-result_eval['pred'][-3:].sum())/result_eval['eval'].sum()
print(goal)
goal2 = (result_eval['eval'][-23:].sum()-result_eval['pred'][-23:].sum())/result_eval['eval'].sum()
print(goal2)
# 保存模型
model.save_model('quzhou.bin')
loaded_model = xgb.XGBRegressor()
loaded_model.load_model('quzhou.bin')
import numpy as np
X_eval = np.array([[23.69,16.5,23,1,0],
[24.5,15.19,23,0,0],
[25.19,13.9,23,0,0]])
print(model.predict(X_eval))
# import torch
# from torch import nn
# from torch.utils.data import TensorDataset,DataLoader
#
#
# net = nn.Sequential(
# nn.Linear(6,32),
# nn.ReLU(),
# nn.Linear(32,64),
# nn.ReLU(),
# nn.Linear(64,64),
# nn.ReLU(),
# nn.Linear(64,1)
# )
# opt = torch.optim.Adam(net.parameters(),lr=0.00005)
# loss_fn = nn.MSELoss()
#
# epochs = 200
#
# x_train = torch.from_numpy(x_train.values).type(torch.float32)
# x_train = (x_train - x_train.mean())/x_train.std()
#
# y_train = torch.from_numpy(y_train.values).type(torch.float32)
# std1 = y_train.std()
# mean1 = y_train.mean()
# y_train = (y_train - mean1)/std1
#
# X_eval = torch.from_numpy(X_eval.values).type(torch.float32)
# X_eval = (X_eval - X_eval.mean())/X_eval.std()
#
# y_eval= torch.from_numpy(df_eval['售电量'].values).type(torch.float32)
#
#
# train_ds = TensorDataset(x_train,y_train)
# train_dl = DataLoader(train_ds,shuffle=True,batch_size=64)
#
#
# for i in range(epochs):
# for x,y in train_dl:
# y_pred = net(x)
# loss = loss_fn(y_pred,y)
#
# opt.zero_grad()
# loss.backward()
# opt.step()
# print(round(loss.item(),2))
#
# predict = (net(X_eval) * std1 + mean1).detach().numpy()
# print(y_train.std(),y_train.mean())
# print(net(X_eval))
# print(predict)
# print((y_eval.detach().numpy().sum() - predict.sum())/ y_eval.detach().numpy().sum())