修改了一些bug,但结果还是没有规律

This commit is contained in:
MuJ 2024-01-07 09:39:05 +08:00
parent 19a079f76a
commit 58e266e184
1 changed files with 28 additions and 25 deletions

53
main.py
View File

@ -49,11 +49,13 @@ model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# 训练模型
model.fit(X_train, Y_train, epochs=3000, validation_split=0.2)
model.fit(X_train, Y_train, epochs=10, validation_split=0.2)
# 评估模型
loss, accuracy = model.evaluate(X_test, Y_test)
print(X_train.shape)
# 制作扰动数据
@ -85,48 +87,49 @@ gradient_magnitudes = tf.norm(gradients, axis=1)
# 创建每个gamma对应的准确率的字典
accuracy_per_gamma = {}
# 平坦化梯度
flattened_gradients = tf.reshape(gradients, [-1])
# 选择最大的γ * |X|个梯度
for gamma in [0.05, 0.1, 0.2, 0.4]: # 你需要选择一个合适的γ值
num_gradients_to_select = int(
gamma * tf.size(gradient_magnitudes, out_type=tf.dtypes.float32))
# 获取最大梯度的索引
# tf.argsort返回的是升序排列的所以用[::-1]来获得降序的索引
top_gradients_indices = tf.argsort(gradient_magnitudes, direction='DESCENDING')[
:num_gradients_to_select]
# 创建集合A这里是所有选定的最大梯度的索引
A = top_gradients_indices.numpy()
for gamma in [0.05, 0.1, 0.2, 0.4]:
num_gradients_to_select = int(gamma * tf.size(flattened_gradients, out_type=tf.dtypes.float32))
top_gradients_indices = tf.argsort(flattened_gradients, direction='DESCENDING')[:num_gradients_to_select]
# 创建一个新的梯度张量,初始化为原始梯度的副本
updated_gradients = tf.identity(gradients)
updated_gradients = tf.identity(flattened_gradients)
# 获取所有索引
all_indices = tf.range(start=0, limit=tf.size(
gradient_magnitudes, out_type=tf.dtypes.int32), dtype=tf.int32)
# 创建一个布尔掩码其中选定的最大梯度为False其他为True
mask = tf.ones_like(updated_gradients, dtype=bool)
mask = tf.tensor_scatter_nd_update(mask, tf.expand_dims(top_gradients_indices, 1), tf.zeros_like(top_gradients_indices, dtype=bool))
# 创建一个布尔掩码其中集合A中的索引为False其他为True
mask = ~tf.reduce_any(tf.equal(tf.expand_dims(
all_indices, 1), tf.reshape(A, (1, -1))), axis=1)
# 使用这个掩码更新梯度
updated_gradients = tf.where(mask, tf.zeros_like(updated_gradients), updated_gradients)
# 将梯度重构为原始形状
updated_gradients = tf.reshape(updated_gradients, tf.shape(gradients))
# 使用这个掩码更新不在集合A中的梯度为0
updated_gradients = tf.where(
mask, updated_gradients, tf.zeros_like(updated_gradients))
# 创建准确率列表
accuracy_list = []
for learning_rate in [0.1, 0.2, 0.3, 0.4, 0.5]:
# 更新X_train_tensor
X_train_updated = X_train_tensor - updated_gradients
# 应用学习率到梯度
scaled_gradients = learning_rate * updated_gradients
# 使用缩放后的梯度更新X_train_tensor
X_train_updated = X_train_tensor - scaled_gradients
tf.reshape(X_train_updated, (3332,34,1))
X_train_updated = X_train_updated.numpy()
# 编译模型
model.compile(
optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# 训练模型
model.fit(X_train, Y_train, epochs=3000, validation_split=0.2)
model.fit(X_train_updated, Y_train, epochs=1500, validation_split=0.2)
# 评估模型
loss, accuracy = model.evaluate(X_test, Y_test)