from data_load import data_format import tensorflow as tf import numpy as np from keras.layers import Dropout from keras import regularizers from keras.callbacks import TensorBoard, LearningRateScheduler import keras def model_train(X_train, X_test, Y_train, Y_test): """_summary_ Args: X_train (np.array): _description_ X_test (np.array): _description_ Y_train (np.array): _description_ Y_test (np.array): _description_ """ # 数据随机化 np.random.seed(7) np.random.shuffle(X_train) np.random.seed(7) np.random.shuffle(Y_train) tf.random.set_seed(7) # 构建模型 model = tf.keras.models.Sequential([ tf.keras.layers.Dense(10000, activation='relu'), # 第一层 Dropout(0.2), tf.keras.layers.Dense(800, activation='relu'), # 第一层 Dropout(0.2), tf.keras.layers.Dense( (len(np.unique(Y_train)) + 1), activation='relu', kernel_regularizer=regularizers.l2(0.01)) ]) # 损失函数 loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # 编译模型 model.compile( optimizer='SGD', loss=loss_fn, metrics=['accuracy']) # 定义学习率指数递减的函数 def lr_schedule(epoch): initial_learning_rate = 0.01 decay_rate = 0.1 decay_steps = 1500 new_learning_rate = initial_learning_rate * \ decay_rate ** (epoch / decay_steps) return new_learning_rate # 定义学习率调度器 lr_scheduler = LearningRateScheduler(lr_schedule) # TensorBoard 回调 log_dir = "logs/fit" tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1) # 训练模型,添加 TensorBoard 回调 model.fit(X_train, Y_train, epochs=1000, callbacks=[tensorboard_callback, lr_scheduler], batch_size=256) loss, accuracy = model.evaluate(X_test, Y_test) print("Test accuracy:", accuracy) # 保存模型 keras.models.save_model(model, 'model') if __name__ == "__main__": X_train, X_test, Y_train, Y_test = data_format( 'data/archive/PowerQualityDistributionDataset1.csv') model_train(X_train, X_test, Y_train, Y_test)