Liu/attack/attack_craft.py

59 lines
2.2 KiB
Python
Raw Normal View History

import tensorflow as tf
def craft_adv(X, Y, gamma, learning_rate, model, loss_fn, md = 0):
# 将测试数据转换为TensorFlow张量
X_test_tensor = tf.convert_to_tensor(X, dtype=tf.float64)
if md == 0:
Y_test_tensor = tf.convert_to_tensor(Y, dtype=tf.int32)
elif md == 1:
Y_test_tensor = tf.convert_to_tensor(Y, dtype=tf.float64)
# 使用GradientTape计算梯度
with tf.GradientTape() as tape:
tape.watch(X_test_tensor)
predictions = model(X_test_tensor)
loss = loss_fn(Y_test_tensor, predictions)
# 计算关于输入的梯度
gradients = tape.gradient(loss, X_test_tensor)
# 平坦化梯度以便进行处理
flattened_gradients = tf.reshape(gradients, [-1])
# 选择最大的γ * |X|个梯度
num_gradients_to_select = int(gamma * tf.size(flattened_gradients, out_type=tf.dtypes.float32))
2024-01-26 20:42:33 +08:00
print(num_gradients_to_select)
top_gradients_indices = tf.argsort(flattened_gradients, direction='DESCENDING')[:num_gradients_to_select]
# 创建新的梯度张量,初始值为原始梯度
updated_gradients = tf.identity(flattened_gradients)
# 创建布尔掩码,用于选择特定梯度
mask = tf.ones_like(updated_gradients, dtype=bool)
mask = tf.tensor_scatter_nd_update(mask, tf.expand_dims(top_gradients_indices, 1), tf.zeros_like(top_gradients_indices, dtype=bool))
# 应用掩码更新梯度
updated_gradients = tf.where(mask, tf.zeros_like(updated_gradients), updated_gradients)
# 将梯度恢复到原始形状
updated_gradients = tf.reshape(updated_gradients, tf.shape(gradients))
# 应用学习率到梯度
scaled_gradients = (learning_rate * 700) * updated_gradients
# 更新X_test_tensor
X_train_updated = tf.add(X_test_tensor, scaled_gradients)
X_train_updated = X_train_updated.numpy()
# 评估更新后的模型
if md == 1:
loss = model.evaluate(X_train_updated, Y)
print(f"Accuracy gamma: {gamma},learning:{learning_rate}", loss)
return X_train_updated, loss
elif md == 0:
loss, accuracy = model.evaluate(X_train_updated, Y)
print(f"Accuracy gamma: {gamma},learning:{learning_rate},accuracy{accuracy}" )
return X_train_updated, accuracy