Liu/attack/RNN_model_trainging.py

80 lines
2.3 KiB
Python

from data_load import data_format
import tensorflow as tf
import numpy as np
from keras.layers import Dropout
from keras import regularizers
from keras.callbacks import TensorBoard, LearningRateScheduler
import keras
def model_train(X_train, X_test, Y_train, Y_test):
"""_summary_
Args:
X_train (np.array): _description_
X_test (np.array): _description_
Y_train (np.array): _description_
Y_test (np.array): _description_
"""
# 数据随机化
np.random.seed(7)
np.random.shuffle(X_train)
np.random.seed(7)
np.random.shuffle(Y_train)
tf.random.set_seed(7)
# 构建模型
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(100, return_sequences=True), # 第一层
Dropout(0.2),
tf.keras.layers.LSTM(80), # 第二层
Dropout(0.2),
tf.keras.layers.Dense(
1, kernel_regularizer=regularizers.l2(0.01))
])
# 损失函数
loss_fn = tf.keras.losses.MeanSquaredError()
# 编译模型
model.compile(
optimizer='SGD',
loss=loss_fn,
metrics=[tf.keras.metrics.MeanAbsolutePercentageError()]
)
# 定义学习率指数递减的函数
def lr_schedule(epoch):
initial_learning_rate = 0.01
decay_rate = 0.1
decay_steps = 2000
new_learning_rate = initial_learning_rate * \
decay_rate ** (epoch / decay_steps)
return new_learning_rate
# 定义学习率调度器
lr_scheduler = LearningRateScheduler(lr_schedule)
# TensorBoard 回调
log_dir = "logs/fit"
tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
# 训练模型,添加 TensorBoard 回调
model.fit(X_train, Y_train, epochs=6000,
callbacks=[tensorboard_callback, lr_scheduler], batch_size=256)
loss, mape = model.evaluate(X_test, Y_test)
print("Test loss:", loss,)
print("test mape:", mape)
# 保存模型
keras.models.save_model(model, 'model')
if __name__ == "__main__":
X_train, X_test, Y_train, Y_test = data_format(
'data/archive/PowerQualityDistributionDataset1.csv', md = 1)
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
model_train(X_train, X_test, Y_train, Y_test)