-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathattacks.py
107 lines (91 loc) · 3.91 KB
/
attacks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import torch
from utils import one_hot_embedding
from models.model import *
import torch.nn.functional as F
lower_limit, upper_limit = 0, 1
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def attack_pgd(prompter, model, add_prompter, criterion, X, target, text_tokens, alpha,
attack_iters, norm, restarts=1, early_stop=True, epsilon=0):
delta = torch.zeros_like(X).cuda()
if norm == "l_inf":
delta.uniform_(-epsilon, epsilon)
elif norm == "l_2":
delta.normal_()
d_flat = delta.view(delta.size(0), -1)
n = d_flat.norm(p=2, dim=1).view(delta.size(0), 1, 1, 1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r / n * epsilon
else:
raise ValueError
delta = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
_images = clip_img_preprocessing(X + delta)
if prompter is not None:
prompted_images = prompter(_images)
else:
prompted_images = _images
prompt_token = add_prompter() if add_prompter is not None else None
output, _ = multiGPU_CLIP(model, prompted_images, text_tokens, prompt_token)
loss = criterion(output, target)
loss.backward()
grad = delta.grad.detach()
d = delta[:, :, :, :]
g = grad[:, :, :, :]
x = X[:, :, :, :]
if norm == "l_inf":
d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon)
elif norm == "l_2":
g_norm = torch.norm(g.view(g.shape[0], -1), dim=1).view(-1, 1, 1, 1)
scaled_g = g / (g_norm + 1e-10)
d = (d + scaled_g * alpha).view(d.size(0), -1).renorm(p=2, dim=0, maxnorm=epsilon).view_as(d)
d = clamp(d, lower_limit - x, upper_limit - x)
delta.data[:, :, :, :] = d
delta.grad.zero_()
return delta
def attack_CW(prompter, model, add_prompter, criterion, X, target, text_tokens, alpha,
attack_iters, norm, restarts=1, early_stop=True, epsilon=0):
delta = torch.zeros_like(X).cuda()
if norm == "l_inf":
delta.uniform_(-epsilon, epsilon)
elif norm == "l_2":
delta.normal_()
d_flat = delta.view(delta.size(0), -1)
n = d_flat.norm(p=2, dim=1).view(delta.size(0), 1, 1, 1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r / n * epsilon
else:
raise ValueError
delta = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
_images = clip_img_preprocessing(X + delta)
if prompter is not None:
prompted_images = prompter(_images)
else:
prompted_images = _images
prompt_token = add_prompter() if add_prompter is not None else None
output, _ = multiGPU_CLIP(model, prompted_images, text_tokens, prompt_token)
num_class = output.size(1)
label_mask = one_hot_embedding(target, num_class)
label_mask = label_mask.cuda()
correct_logit = torch.sum(label_mask*output, dim=1)
wrong_logit, _ = torch.max((1-label_mask)*output - 1e4*label_mask, axis=1)
# loss = criterion(output, target)
loss = - torch.sum(F.relu(correct_logit - wrong_logit + 50))
loss.backward()
grad = delta.grad.detach()
d = delta[:, :, :, :]
g = grad[:, :, :, :]
x = X[:, :, :, :]
if norm == "l_inf":
d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon)
elif norm == "l_2":
g_norm = torch.norm(g.view(g.shape[0], -1), dim=1).view(-1, 1, 1, 1)
scaled_g = g / (g_norm + 1e-10)
d = (d + scaled_g * alpha).view(d.size(0), -1).renorm(p=2, dim=0, maxnorm=epsilon).view_as(d)
d = clamp(d, lower_limit - x, upper_limit - x)
delta.data[:, :, :, :] = d
delta.grad.zero_()
return delta