69 lines
2.7 KiB
Python
69 lines
2.7 KiB
Python
import torch
|
|
|
|
|
|
class FGM():
|
|
def __init__(self, model):
|
|
self.model = model
|
|
self.backup = {}
|
|
|
|
def attack(self, epsilon=1., emb_name='bert.embeddings.word_embeddings'):
|
|
# emb_name这个参数要换成你模型中embedding的参数名
|
|
for name, param in self.model.named_parameters():
|
|
if param.requires_grad and emb_name in name:
|
|
self.backup[name] = param.data.clone()
|
|
norm = torch.norm(param.grad)
|
|
if norm != 0 and not torch.isnan(norm):
|
|
r_at = epsilon * param.grad / norm
|
|
param.data.add_(r_at)
|
|
|
|
def restore(self, emb_name='char_emb.'):
|
|
# emb_name这个参数要换成你模型中embedding的参数名
|
|
for name, param in self.model.named_parameters():
|
|
if param.requires_grad and emb_name in name:
|
|
assert name in self.backup
|
|
param.data = self.backup[name]
|
|
self.backup = {}
|
|
|
|
|
|
class PGD():
|
|
def __init__(self, model):
|
|
self.model = model
|
|
self.emb_backup = {}
|
|
self.grad_backup = {}
|
|
|
|
def attack(self, epsilon=1., alpha=0.3, emb_name='char_emb.', is_first_attack=False):
|
|
# emb_name这个参数要换成你模型中embedding的参数名
|
|
for name, param in self.model.named_parameters():
|
|
if param.requires_grad and emb_name in name:
|
|
if is_first_attack:
|
|
self.emb_backup[name] = param.data.clone()
|
|
norm = torch.norm(param.grad)
|
|
if norm != 0 and not torch.isnan(norm):
|
|
r_at = alpha * param.grad / norm
|
|
param.data.add_(r_at)
|
|
param.data = self.project(name, param.data, epsilon)
|
|
|
|
def restore(self, emb_name='char_emb.'):
|
|
# emb_name这个参数要换成你模型中embedding的参数名
|
|
for name, param in self.model.named_parameters():
|
|
if param.requires_grad and emb_name in name:
|
|
assert name in self.emb_backup
|
|
param.data = self.emb_backup[name]
|
|
self.emb_backup = {}
|
|
|
|
def project(self, param_name, param_data, epsilon):
|
|
r = param_data - self.emb_backup[param_name]
|
|
if torch.norm(r) > epsilon:
|
|
r = epsilon * r / torch.norm(r)
|
|
return self.emb_backup[param_name] + r
|
|
|
|
def backup_grad(self):
|
|
for name, param in self.model.named_parameters():
|
|
if param.requires_grad:
|
|
self.grad_backup[name] = param.grad.clone()
|
|
|
|
def restore_grad(self):
|
|
for name, param in self.model.named_parameters():
|
|
if param.requires_grad:
|
|
param.grad = self.grad_backup[name]
|